aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-08-05 20:36:01 -0400
committerDavid S. Miller <davem@davemloft.net>2018-08-05 20:36:01 -0400
commitb9a7f2ee569296656a3debdabf3c5473a3923f8a (patch)
tree3cdbc30ad70fca07b874fa46b55c8bca8befe3bf
parent6277547f33ecd6beaca373cb0858df69706e466a (diff)
parente800a333135bef633ffb21bdd471b8ffc491db7b (diff)
Merge tag 'wireless-drivers-next-for-davem-2018-08-05' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next
Kalle Valo says: ==================== wireless-drivers-next patches for 4.19 This time a bigger pull request as we have two new Mediatek drivers MT76x2u (CONFIG_MT76x2U) and MT76x0U (CONFIG_MT76x0U). Also iwlwifi got support for the new IEEE 802.11ax standard, the successor for 802.11ac. And naturally smaller new features and bugfixes all over. Major changes: wcn36xx * fix WEP in client mode wil6210 * add support for Talyn-MB (Talyn ver 2.0) device * add support for enhanced DMA firmware feature iwlwifi * implement 802.11ax D2.0 * support for the new 22560 device family * new PCI IDs for 22000 and 22560 qtnfmac * implement cfg80211 power management callback * enable multiple SSIDs scan support * qtnfmac: implement basic WoWLAN support mt7601u * fall back to software encryption for hw unsupported ciphers * enable 802.11 Management Frame Protection (MFP) mt76 * support setting RTS threshold * add USB support * add support for MT76x2u devices * add support for MT76x0U devices mwifiex * allow user space to set all other IEs except WMM IE rsi * add firmware support for AP+BT dual mode ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c30
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c95
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c490
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c225
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c360
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c73
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c59
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c26
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h59
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c649
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h105
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c1598
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h562
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h228
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c502
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h167
-rw-r--r--drivers/net/wireless/atmel/atmel.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c38
-rw-r--r--drivers/net/wireless/cisco/airo.c8
-rw-r--r--drivers/net/wireless/cisco/airo_cs.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c284
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c205
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c364
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c207
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c388
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c192
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c92
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c8
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c95
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c33
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c25
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig26
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile20
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c77
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h162
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/core.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.c522
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/dma.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c445
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c720
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h772
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c660
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.h154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c403
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h101
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h330
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c1008
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/regs.h651
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/tx.c270
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c381
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_common.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_dma.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c305
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c259
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c656
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c699
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c323
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy.c347
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_regs.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx.c128
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_usb.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_core.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_init.c318
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c240
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_main.c185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c463
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c845
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_mcu.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c100
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c154
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h85
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c18
-rw-r--r--drivers/net/wireless/ray_cs.c6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c28
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c23
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c8
211 files changed, 22060 insertions, 3866 deletions
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 8902720b4e49..331b8d558791 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -274,7 +274,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
274 struct ath10k *ar = htc->ar; 274 struct ath10k *ar = htc->ar;
275 int bundle_cnt = len / sizeof(*report); 275 int bundle_cnt = len / sizeof(*report);
276 276
277 if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) { 277 if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
278 ath10k_warn(ar, "Invalid lookahead bundle count: %d\n", 278 ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
279 bundle_cnt); 279 bundle_cnt);
280 return -EINVAL; 280 return -EINVAL;
@@ -655,7 +655,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
655 sizeof(msg->hdr) + sizeof(msg->ready_ext)) { 655 sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
656 htc->max_msgs_per_htc_bundle = 656 htc->max_msgs_per_htc_bundle =
657 min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle, 657 min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
658 HTC_HOST_MAX_MSG_PER_BUNDLE); 658 HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
659 ath10k_dbg(ar, ATH10K_DBG_HTC, 659 ath10k_dbg(ar, ATH10K_DBG_HTC,
660 "Extended ready message. RX bundle size: %d\n", 660 "Extended ready message. RX bundle size: %d\n",
661 htc->max_msgs_per_htc_bundle); 661 htc->max_msgs_per_htc_bundle);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 34877597dd6a..51fda6c23f69 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -50,7 +50,8 @@ struct ath10k;
50 * 4-byte aligned. 50 * 4-byte aligned.
51 */ 51 */
52 52
53#define HTC_HOST_MAX_MSG_PER_BUNDLE 8 53#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
54#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16
54 55
55enum ath10k_htc_tx_flags { 56enum ath10k_htc_tx_flags {
56 ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, 57 ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
@@ -58,6 +59,7 @@ enum ath10k_htc_tx_flags {
58}; 59};
59 60
60enum ath10k_htc_rx_flags { 61enum ath10k_htc_rx_flags {
62 ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
61 ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02, 63 ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
62 ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0 64 ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
63}; 65};
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index c72d8af122a2..4d1cd90d6d27 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -268,11 +268,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
268 spin_lock_bh(&htt->rx_ring.lock); 268 spin_lock_bh(&htt->rx_ring.lock);
269 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 269 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
270 htt->rx_ring.fill_cnt)); 270 htt->rx_ring.fill_cnt));
271 spin_unlock_bh(&htt->rx_ring.lock);
272 271
273 if (ret) 272 if (ret)
274 ath10k_htt_rx_ring_free(htt); 273 ath10k_htt_rx_ring_free(htt);
275 274
275 spin_unlock_bh(&htt->rx_ring.lock);
276
276 return ret; 277 return ret;
277} 278}
278 279
@@ -284,7 +285,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
284 skb_queue_purge(&htt->rx_in_ord_compl_q); 285 skb_queue_purge(&htt->rx_in_ord_compl_q);
285 skb_queue_purge(&htt->tx_fetch_ind_q); 286 skb_queue_purge(&htt->tx_fetch_ind_q);
286 287
288 spin_lock_bh(&htt->rx_ring.lock);
287 ath10k_htt_rx_ring_free(htt); 289 ath10k_htt_rx_ring_free(htt);
290 spin_unlock_bh(&htt->rx_ring.lock);
288 291
289 dma_free_coherent(htt->ar->dev, 292 dma_free_coherent(htt->ar->dev,
290 ath10k_htt_get_rx_ring_size(htt), 293 ath10k_htt_get_rx_ring_size(htt),
@@ -1089,7 +1092,7 @@ static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1089 status = IEEE80211_SKB_RXCB(skb); 1092 status = IEEE80211_SKB_RXCB(skb);
1090 *status = *rx_status; 1093 *status = *rx_status;
1091 1094
1092 __skb_queue_tail(&ar->htt.rx_msdus_q, skb); 1095 skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1093} 1096}
1094 1097
1095static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) 1098static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
@@ -2810,7 +2813,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2810 break; 2813 break;
2811 } 2814 }
2812 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 2815 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2813 __skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 2816 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2814 return false; 2817 return false;
2815 } 2818 }
2816 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: 2819 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@@ -2874,7 +2877,7 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
2874 if (skb_queue_empty(&ar->htt.rx_msdus_q)) 2877 if (skb_queue_empty(&ar->htt.rx_msdus_q))
2875 break; 2878 break;
2876 2879
2877 skb = __skb_dequeue(&ar->htt.rx_msdus_q); 2880 skb = skb_dequeue(&ar->htt.rx_msdus_q);
2878 if (!skb) 2881 if (!skb)
2879 break; 2882 break;
2880 ath10k_process_rx(ar, skb); 2883 ath10k_process_rx(ar, skb);
@@ -2905,7 +2908,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2905 goto exit; 2908 goto exit;
2906 } 2909 }
2907 2910
2908 while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) { 2911 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
2909 spin_lock_bh(&htt->rx_ring.lock); 2912 spin_lock_bh(&htt->rx_ring.lock);
2910 ret = ath10k_htt_rx_in_ord_ind(ar, skb); 2913 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
2911 spin_unlock_bh(&htt->rx_ring.lock); 2914 spin_unlock_bh(&htt->rx_ring.lock);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 89157c5b5e5f..be5b52aaffa6 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1056,7 +1056,7 @@ static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
1056 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) 1056 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
1057 return HTT_DATA_TX_EXT_TID_MGMT; 1057 return HTT_DATA_TX_EXT_TID_MGMT;
1058 else if (cb->flags & ATH10K_SKB_F_QOS) 1058 else if (cb->flags & ATH10K_SKB_F_QOS)
1059 return skb->priority % IEEE80211_QOS_CTL_TID_MASK; 1059 return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1060 else 1060 else
1061 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 1061 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
1062} 1062}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 541bc1c4b2f7..95243b48a179 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4026,7 +4026,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
4026 drv_priv); 4026 drv_priv);
4027 4027
4028 /* Prevent aggressive sta/tid taking over tx queue */ 4028 /* Prevent aggressive sta/tid taking over tx queue */
4029 max = 16; 4029 max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
4030 ret = 0; 4030 ret = 0;
4031 while (ath10k_mac_tx_can_push(hw, txq) && max--) { 4031 while (ath10k_mac_tx_can_push(hw, txq) && max--) {
4032 ret = ath10k_mac_tx_push_txq(hw, txq); 4032 ret = ath10k_mac_tx_push_txq(hw, txq);
@@ -4047,6 +4047,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
4047 rcu_read_unlock(); 4047 rcu_read_unlock();
4048 spin_unlock_bh(&ar->txqs_lock); 4048 spin_unlock_bh(&ar->txqs_lock);
4049} 4049}
4050EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
4050 4051
4051/************/ 4052/************/
4052/* Scanning */ 4053/* Scanning */
@@ -4287,7 +4288,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4287 struct ieee80211_txq *f_txq; 4288 struct ieee80211_txq *f_txq;
4288 struct ath10k_txq *f_artxq; 4289 struct ath10k_txq *f_artxq;
4289 int ret = 0; 4290 int ret = 0;
4290 int max = 16; 4291 int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
4291 4292
4292 spin_lock_bh(&ar->txqs_lock); 4293 spin_lock_bh(&ar->txqs_lock);
4293 if (list_empty(&artxq->list)) 4294 if (list_empty(&artxq->list))
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index d612ce8c9cff..7f61591ce0de 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -30,6 +30,7 @@
30#include "debug.h" 30#include "debug.h"
31#include "hif.h" 31#include "hif.h"
32#include "htc.h" 32#include "htc.h"
33#include "mac.h"
33#include "targaddrs.h" 34#include "targaddrs.h"
34#include "trace.h" 35#include "trace.h"
35#include "sdio.h" 36#include "sdio.h"
@@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
396 int ret; 397 int ret;
397 398
398 payload_len = le16_to_cpu(htc_hdr->len); 399 payload_len = le16_to_cpu(htc_hdr->len);
400 skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
399 401
400 if (trailer_present) { 402 if (trailer_present) {
401 trailer = skb->data + sizeof(*htc_hdr) + 403 trailer = skb->data + sizeof(*htc_hdr) +
@@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
434 enum ath10k_htc_ep_id id; 436 enum ath10k_htc_ep_id id;
435 int ret, i, *n_lookahead_local; 437 int ret, i, *n_lookahead_local;
436 u32 *lookaheads_local; 438 u32 *lookaheads_local;
439 int lookahead_idx = 0;
437 440
438 for (i = 0; i < ar_sdio->n_rx_pkts; i++) { 441 for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
439 lookaheads_local = lookaheads; 442 lookaheads_local = lookaheads;
440 n_lookahead_local = n_lookahead; 443 n_lookahead_local = n_lookahead;
441 444
442 id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid; 445 id = ((struct ath10k_htc_hdr *)
446 &lookaheads[lookahead_idx++])->eid;
443 447
444 if (id >= ATH10K_HTC_EP_COUNT) { 448 if (id >= ATH10K_HTC_EP_COUNT) {
445 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n", 449 ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
@@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
462 /* Only read lookahead's from RX trailers 466 /* Only read lookahead's from RX trailers
463 * for the last packet in a bundle. 467 * for the last packet in a bundle.
464 */ 468 */
469 lookahead_idx--;
465 lookaheads_local = NULL; 470 lookaheads_local = NULL;
466 n_lookahead_local = NULL; 471 n_lookahead_local = NULL;
467 } 472 }
@@ -505,11 +510,11 @@ static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
505 510
506 *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags); 511 *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
507 512
508 if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) { 513 if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
509 ath10k_warn(ar, 514 ath10k_warn(ar,
510 "HTC bundle length %u exceeds maximum %u\n", 515 "HTC bundle length %u exceeds maximum %u\n",
511 le16_to_cpu(htc_hdr->len), 516 le16_to_cpu(htc_hdr->len),
512 HTC_HOST_MAX_MSG_PER_BUNDLE); 517 HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
513 return -ENOMEM; 518 return -ENOMEM;
514 } 519 }
515 520
@@ -600,6 +605,9 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
600 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled 605 * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
601 * packet skb's have been allocated in the previous step. 606 * packet skb's have been allocated in the previous step.
602 */ 607 */
608 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
609 full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
610
603 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i], 611 ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
604 act_len, 612 act_len,
605 full_len, 613 full_len,
@@ -1342,6 +1350,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
1342 break; 1350 break;
1343 } while (time_before(jiffies, timeout) && !done); 1351 } while (time_before(jiffies, timeout) && !done);
1344 1352
1353 ath10k_mac_tx_push_pending(ar);
1354
1345 sdio_claim_host(ar_sdio->func); 1355 sdio_claim_host(ar_sdio->func);
1346 1356
1347 if (ret && ret != -ECANCELED) 1357 if (ret && ret != -ECANCELED)
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
index 4ff7b545293b..453eb6263143 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.h
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -96,14 +96,14 @@
96 * way: 96 * way:
97 * 97 *
98 * Let's assume that each packet in a bundle of the maximum bundle size 98 * Let's assume that each packet in a bundle of the maximum bundle size
99 * (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set 99 * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
100 * to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE). 100 * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
101 * 101 *
102 * in this case the driver must allocate 102 * in this case the driver must allocate
103 * (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's. 103 * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
104 */ 104 */
105#define ATH10K_SDIO_MAX_RX_MSGS \ 105#define ATH10K_SDIO_MAX_RX_MSGS \
106 (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) 106 (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
107 107
108#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u 108#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
109#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF 109#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8c49a26fc571..b04f86f8038a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1076,6 +1076,8 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
1076 arg->phy_capab = ev->phy_capability; 1076 arg->phy_capab = ev->phy_capability;
1077 arg->num_rf_chains = ev->num_rf_chains; 1077 arg->num_rf_chains = ev->num_rf_chains;
1078 arg->eeprom_rd = reg->eeprom_rd; 1078 arg->eeprom_rd = reg->eeprom_rd;
1079 arg->low_5ghz_chan = reg->low_5ghz_chan;
1080 arg->high_5ghz_chan = reg->high_5ghz_chan;
1079 arg->num_mem_reqs = ev->num_mem_reqs; 1081 arg->num_mem_reqs = ev->num_mem_reqs;
1080 arg->service_map = svc_bmap; 1082 arg->service_map = svc_bmap;
1081 arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap); 1083 arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
@@ -1614,10 +1616,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1614 bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr); 1616 bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1615 ie_len = roundup(arg->ie_len, 4); 1617 ie_len = roundup(arg->ie_len, 4);
1616 len = (sizeof(*tlv) + sizeof(*cmd)) + 1618 len = (sizeof(*tlv) + sizeof(*cmd)) +
1617 (arg->n_channels ? sizeof(*tlv) + chan_len : 0) + 1619 sizeof(*tlv) + chan_len +
1618 (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) + 1620 sizeof(*tlv) + ssid_len +
1619 (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) + 1621 sizeof(*tlv) + bssid_len +
1620 (arg->ie_len ? sizeof(*tlv) + ie_len : 0); 1622 sizeof(*tlv) + ie_len;
1621 1623
1622 skb = ath10k_wmi_alloc_skb(ar, len); 1624 skb = ath10k_wmi_alloc_skb(ar, len);
1623 if (!skb) 1625 if (!skb)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index ef0de4f1312c..21ba20981a80 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -342,7 +342,7 @@ struct ath_chanctx {
342 342
343 struct ath_beacon_config beacon; 343 struct ath_beacon_config beacon;
344 struct ath9k_hw_cal_data caldata; 344 struct ath9k_hw_cal_data caldata;
345 struct timespec tsf_ts; 345 struct timespec64 tsf_ts;
346 u64 tsf_val; 346 u64 tsf_val;
347 u32 last_beacon; 347 u32 last_beacon;
348 348
@@ -1021,7 +1021,7 @@ struct ath_softc {
1021 struct ath_offchannel offchannel; 1021 struct ath_offchannel offchannel;
1022 struct ath_chanctx *next_chan; 1022 struct ath_chanctx *next_chan;
1023 struct completion go_beacon; 1023 struct completion go_beacon;
1024 struct timespec last_event_time; 1024 struct timespec64 last_event_time;
1025#endif 1025#endif
1026 1026
1027 unsigned long driver_data; 1027 unsigned long driver_data;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 1b05b5d7a038..fd61ae4782b6 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -233,9 +233,9 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
233static u32 chanctx_event_delta(struct ath_softc *sc) 233static u32 chanctx_event_delta(struct ath_softc *sc)
234{ 234{
235 u64 ms; 235 u64 ms;
236 struct timespec ts, *old; 236 struct timespec64 ts, *old;
237 237
238 getrawmonotonic(&ts); 238 ktime_get_raw_ts64(&ts);
239 old = &sc->last_event_time; 239 old = &sc->last_event_time;
240 ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000; 240 ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
241 ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000; 241 ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
@@ -334,7 +334,7 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
334static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc) 334static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
335{ 335{
336 struct ath_chanctx *prev, *cur; 336 struct ath_chanctx *prev, *cur;
337 struct timespec ts; 337 struct timespec64 ts;
338 u32 cur_tsf, prev_tsf, beacon_int; 338 u32 cur_tsf, prev_tsf, beacon_int;
339 s32 offset; 339 s32 offset;
340 340
@@ -346,7 +346,7 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
346 if (!prev->switch_after_beacon) 346 if (!prev->switch_after_beacon)
347 return; 347 return;
348 348
349 getrawmonotonic(&ts); 349 ktime_get_raw_ts64(&ts);
350 cur_tsf = (u32) cur->tsf_val + 350 cur_tsf = (u32) cur->tsf_val +
351 ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts); 351 ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
352 352
@@ -1230,7 +1230,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
1230{ 1230{
1231 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1231 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1232 struct ath_chanctx *old_ctx; 1232 struct ath_chanctx *old_ctx;
1233 struct timespec ts; 1233 struct timespec64 ts;
1234 bool measure_time = false; 1234 bool measure_time = false;
1235 bool send_ps = false; 1235 bool send_ps = false;
1236 bool queues_stopped = false; 1236 bool queues_stopped = false;
@@ -1260,7 +1260,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
1260 spin_unlock_bh(&sc->chan_lock); 1260 spin_unlock_bh(&sc->chan_lock);
1261 1261
1262 if (sc->next_chan == &sc->offchannel.chan) { 1262 if (sc->next_chan == &sc->offchannel.chan) {
1263 getrawmonotonic(&ts); 1263 ktime_get_raw_ts64(&ts);
1264 measure_time = true; 1264 measure_time = true;
1265 } 1265 }
1266 1266
@@ -1277,7 +1277,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
1277 spin_lock_bh(&sc->chan_lock); 1277 spin_lock_bh(&sc->chan_lock);
1278 1278
1279 if (sc->cur_chan != &sc->offchannel.chan) { 1279 if (sc->cur_chan != &sc->offchannel.chan) {
1280 getrawmonotonic(&sc->cur_chan->tsf_ts); 1280 ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
1281 sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah); 1281 sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
1282 } 1282 }
1283 } 1283 }
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index cb0eef13af1c..fb649d85b8fc 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
138{ 138{
139 struct cmd_buf *cmd = (struct cmd_buf *)urb->context; 139 struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
140 struct hif_device_usb *hif_dev; 140 struct hif_device_usb *hif_dev;
141 unsigned long flags;
141 bool txok = true; 142 bool txok = true;
142 143
143 if (!cmd || !cmd->skb || !cmd->hif_dev) 144 if (!cmd || !cmd->skb || !cmd->hif_dev)
@@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
158 * If the URBs are being flushed, no need to complete 159 * If the URBs are being flushed, no need to complete
159 * this packet. 160 * this packet.
160 */ 161 */
161 spin_lock(&hif_dev->tx.tx_lock); 162 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
162 if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) { 163 if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
163 spin_unlock(&hif_dev->tx.tx_lock); 164 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
164 dev_kfree_skb_any(cmd->skb); 165 dev_kfree_skb_any(cmd->skb);
165 kfree(cmd); 166 kfree(cmd);
166 return; 167 return;
167 } 168 }
168 spin_unlock(&hif_dev->tx.tx_lock); 169 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
169 170
170 break; 171 break;
171 default: 172 default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 585736a837ed..799010ed04e0 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
1107 struct ath_hw *ah = priv->ah; 1107 struct ath_hw *ah = priv->ah;
1108 struct ath_common *common = ath9k_hw_common(ah); 1108 struct ath_common *common = ath9k_hw_common(ah);
1109 struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL; 1109 struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
1110 unsigned long flags;
1110 1111
1111 spin_lock(&priv->rx.rxbuflock); 1112 spin_lock_irqsave(&priv->rx.rxbuflock, flags);
1112 list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) { 1113 list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
1113 if (!tmp_buf->in_process) { 1114 if (!tmp_buf->in_process) {
1114 rxbuf = tmp_buf; 1115 rxbuf = tmp_buf;
1115 break; 1116 break;
1116 } 1117 }
1117 } 1118 }
1118 spin_unlock(&priv->rx.rxbuflock); 1119 spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
1119 1120
1120 if (rxbuf == NULL) { 1121 if (rxbuf == NULL) {
1121 ath_dbg(common, ANY, "No free RX buffer\n"); 1122 ath_dbg(common, ANY, "No free RX buffer\n");
1122 goto err; 1123 goto err;
1123 } 1124 }
1124 1125
1125 spin_lock(&priv->rx.rxbuflock); 1126 spin_lock_irqsave(&priv->rx.rxbuflock, flags);
1126 rxbuf->skb = skb; 1127 rxbuf->skb = skb;
1127 rxbuf->in_process = true; 1128 rxbuf->in_process = true;
1128 spin_unlock(&priv->rx.rxbuflock); 1129 spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
1129 1130
1130 tasklet_schedule(&priv->rx_tasklet); 1131 tasklet_schedule(&priv->rx_tasklet);
1131 return; 1132 return;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1665066f4e24..32fb85e076d6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1835,13 +1835,13 @@ fail:
1835 return -EINVAL; 1835 return -EINVAL;
1836} 1836}
1837 1837
1838u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur) 1838u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
1839{ 1839{
1840 struct timespec ts; 1840 struct timespec64 ts;
1841 s64 usec; 1841 s64 usec;
1842 1842
1843 if (!cur) { 1843 if (!cur) {
1844 getrawmonotonic(&ts); 1844 ktime_get_raw_ts64(&ts);
1845 cur = &ts; 1845 cur = &ts;
1846 } 1846 }
1847 1847
@@ -1859,7 +1859,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1859 u32 saveLedState; 1859 u32 saveLedState;
1860 u32 saveDefAntenna; 1860 u32 saveDefAntenna;
1861 u32 macStaId1; 1861 u32 macStaId1;
1862 struct timespec tsf_ts; 1862 struct timespec64 tsf_ts;
1863 u32 tsf_offset; 1863 u32 tsf_offset;
1864 u64 tsf = 0; 1864 u64 tsf = 0;
1865 int r; 1865 int r;
@@ -1905,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1905 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; 1905 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
1906 1906
1907 /* Save TSF before chip reset, a cold reset clears it */ 1907 /* Save TSF before chip reset, a cold reset clears it */
1908 getrawmonotonic(&tsf_ts); 1908 ktime_get_raw_ts64(&tsf_ts);
1909 tsf = ath9k_hw_gettsf64(ah); 1909 tsf = ath9k_hw_gettsf64(ah);
1910 1910
1911 saveLedState = REG_READ(ah, AR_CFG_LED) & 1911 saveLedState = REG_READ(ah, AR_CFG_LED) &
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9804a24a2dc0..68956cdc8c9a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1060,7 +1060,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
1060u64 ath9k_hw_gettsf64(struct ath_hw *ah); 1060u64 ath9k_hw_gettsf64(struct ath_hw *ah);
1061void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); 1061void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
1062void ath9k_hw_reset_tsf(struct ath_hw *ah); 1062void ath9k_hw_reset_tsf(struct ath_hw *ah);
1063u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur); 1063u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
1064void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set); 1064void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
1065void ath9k_hw_init_global_settings(struct ath_hw *ah); 1065void ath9k_hw_init_global_settings(struct ath_hw *ah);
1066u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah); 1066u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5eb1c0aea41d..1049773378f2 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1865,7 +1865,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
1865 mutex_lock(&sc->mutex); 1865 mutex_lock(&sc->mutex);
1866 ath9k_ps_wakeup(sc); 1866 ath9k_ps_wakeup(sc);
1867 tsf -= le64_to_cpu(avp->tsf_adjust); 1867 tsf -= le64_to_cpu(avp->tsf_adjust);
1868 getrawmonotonic(&avp->chanctx->tsf_ts); 1868 ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
1869 if (sc->cur_chan == avp->chanctx) 1869 if (sc->cur_chan == avp->chanctx)
1870 ath9k_hw_settsf64(sc->sc_ah, tsf); 1870 ath9k_hw_settsf64(sc->sc_ah, tsf);
1871 avp->chanctx->tsf_val = tsf; 1871 avp->chanctx->tsf_val = tsf;
@@ -1881,7 +1881,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1881 mutex_lock(&sc->mutex); 1881 mutex_lock(&sc->mutex);
1882 1882
1883 ath9k_ps_wakeup(sc); 1883 ath9k_ps_wakeup(sc);
1884 getrawmonotonic(&avp->chanctx->tsf_ts); 1884 ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
1885 if (sc->cur_chan == avp->chanctx) 1885 if (sc->cur_chan == avp->chanctx)
1886 ath9k_hw_reset_tsf(sc->sc_ah); 1886 ath9k_hw_reset_tsf(sc->sc_ah);
1887 avp->chanctx->tsf_val = 0; 1887 avp->chanctx->tsf_val = 0;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index b0b5579b7560..d1f6710ca63b 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
209{ 209{
210 struct wmi *wmi = priv; 210 struct wmi *wmi = priv;
211 struct wmi_cmd_hdr *hdr; 211 struct wmi_cmd_hdr *hdr;
212 unsigned long flags;
212 u16 cmd_id; 213 u16 cmd_id;
213 214
214 if (unlikely(wmi->stopped)) 215 if (unlikely(wmi->stopped))
@@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
218 cmd_id = be16_to_cpu(hdr->command_id); 219 cmd_id = be16_to_cpu(hdr->command_id);
219 220
220 if (cmd_id & 0x1000) { 221 if (cmd_id & 0x1000) {
221 spin_lock(&wmi->wmi_lock); 222 spin_lock_irqsave(&wmi->wmi_lock, flags);
222 __skb_queue_tail(&wmi->wmi_event_queue, skb); 223 __skb_queue_tail(&wmi->wmi_event_queue, skb);
223 spin_unlock(&wmi->wmi_lock); 224 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
224 tasklet_schedule(&wmi->wmi_event_tasklet); 225 tasklet_schedule(&wmi->wmi_event_tasklet);
225 return; 226 return;
226 } 227 }
227 228
228 /* Check if there has been a timeout. */ 229 /* Check if there has been a timeout. */
229 spin_lock(&wmi->wmi_lock); 230 spin_lock_irqsave(&wmi->wmi_lock, flags);
230 if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) { 231 if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
231 spin_unlock(&wmi->wmi_lock); 232 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
232 goto free_skb; 233 goto free_skb;
233 } 234 }
234 spin_unlock(&wmi->wmi_lock); 235 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
235 236
236 /* WMI command response */ 237 /* WMI command response */
237 ath9k_wmi_rsp_callback(wmi, skb); 238 ath9k_wmi_rsp_callback(wmi, skb);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index aeb5e6e806be..79998a3ddb7a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -493,7 +493,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
493{ 493{
494 struct wcn36xx *wcn = hw->priv; 494 struct wcn36xx *wcn = hw->priv;
495 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 495 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
496 struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); 496 struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL;
497 int ret = 0; 497 int ret = 0;
498 u8 key[WLAN_MAX_KEY_LEN]; 498 u8 key[WLAN_MAX_KEY_LEN];
499 499
@@ -512,7 +512,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
512 vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40; 512 vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
513 break; 513 break;
514 case WLAN_CIPHER_SUITE_WEP104: 514 case WLAN_CIPHER_SUITE_WEP104:
515 vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40; 515 vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104;
516 break; 516 break;
517 case WLAN_CIPHER_SUITE_CCMP: 517 case WLAN_CIPHER_SUITE_CCMP:
518 vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP; 518 vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
@@ -567,15 +567,19 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
567 key_conf->keyidx, 567 key_conf->keyidx,
568 key_conf->keylen, 568 key_conf->keylen,
569 key); 569 key);
570
570 if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) || 571 if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
571 (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) { 572 (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
572 sta_priv->is_data_encrypted = true; 573 list_for_each_entry(sta_priv,
573 wcn36xx_smd_set_stakey(wcn, 574 &vif_priv->sta_list, list) {
574 vif_priv->encrypt_type, 575 sta_priv->is_data_encrypted = true;
575 key_conf->keyidx, 576 wcn36xx_smd_set_stakey(wcn,
576 key_conf->keylen, 577 vif_priv->encrypt_type,
577 key, 578 key_conf->keyidx,
578 get_sta_index(vif, sta_priv)); 579 key_conf->keylen,
580 key,
581 get_sta_index(vif, sta_priv));
582 }
579 } 583 }
580 } 584 }
581 break; 585 break;
@@ -984,6 +988,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
984 mutex_lock(&wcn->conf_mutex); 988 mutex_lock(&wcn->conf_mutex);
985 989
986 vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; 990 vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
991 INIT_LIST_HEAD(&vif_priv->sta_list);
987 list_add(&vif_priv->list, &wcn->vif_list); 992 list_add(&vif_priv->list, &wcn->vif_list);
988 wcn36xx_smd_add_sta_self(wcn, vif); 993 wcn36xx_smd_add_sta_self(wcn, vif);
989 994
@@ -1005,6 +1010,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1005 1010
1006 spin_lock_init(&sta_priv->ampdu_lock); 1011 spin_lock_init(&sta_priv->ampdu_lock);
1007 sta_priv->vif = vif_priv; 1012 sta_priv->vif = vif_priv;
1013 list_add(&sta_priv->list, &vif_priv->sta_list);
1014
1008 /* 1015 /*
1009 * For STA mode HW will be configured on BSS_CHANGED_ASSOC because 1016 * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
1010 * at this stage AID is not available yet. 1017 * at this stage AID is not available yet.
@@ -1032,6 +1039,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
1032 1039
1033 mutex_lock(&wcn->conf_mutex); 1040 mutex_lock(&wcn->conf_mutex);
1034 1041
1042 list_del(&sta_priv->list);
1035 wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index); 1043 wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
1036 sta_priv->vif = NULL; 1044 sta_priv->vif = NULL;
1037 1045
@@ -1153,8 +1161,6 @@ static const struct ieee80211_ops wcn36xx_ops = {
1153 1161
1154static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) 1162static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
1155{ 1163{
1156 int ret = 0;
1157
1158 static const u32 cipher_suites[] = { 1164 static const u32 cipher_suites[] = {
1159 WLAN_CIPHER_SUITE_WEP40, 1165 WLAN_CIPHER_SUITE_WEP40,
1160 WLAN_CIPHER_SUITE_WEP104, 1166 WLAN_CIPHER_SUITE_WEP104,
@@ -1201,7 +1207,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
1201 wiphy_ext_feature_set(wcn->hw->wiphy, 1207 wiphy_ext_feature_set(wcn->hw->wiphy,
1202 NL80211_EXT_FEATURE_CQM_RSSI_LIST); 1208 NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1203 1209
1204 return ret; 1210 return 0;
1205} 1211}
1206 1212
1207static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, 1213static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index b4dadf75d565..00098f24116d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -250,7 +250,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
250 250
251static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) 251static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
252{ 252{
253 int ret = 0; 253 int ret;
254 unsigned long start; 254 unsigned long start;
255 struct wcn36xx_hal_msg_header *hdr = 255 struct wcn36xx_hal_msg_header *hdr =
256 (struct wcn36xx_hal_msg_header *)wcn->hal_buf; 256 (struct wcn36xx_hal_msg_header *)wcn->hal_buf;
@@ -446,7 +446,7 @@ static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
446int wcn36xx_smd_start(struct wcn36xx *wcn) 446int wcn36xx_smd_start(struct wcn36xx *wcn)
447{ 447{
448 struct wcn36xx_hal_mac_start_req_msg msg_body, *body; 448 struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
449 int ret = 0; 449 int ret;
450 int i; 450 int i;
451 size_t len; 451 size_t len;
452 452
@@ -493,7 +493,7 @@ out:
493int wcn36xx_smd_stop(struct wcn36xx *wcn) 493int wcn36xx_smd_stop(struct wcn36xx *wcn)
494{ 494{
495 struct wcn36xx_hal_mac_stop_req_msg msg_body; 495 struct wcn36xx_hal_mac_stop_req_msg msg_body;
496 int ret = 0; 496 int ret;
497 497
498 mutex_lock(&wcn->hal_mutex); 498 mutex_lock(&wcn->hal_mutex);
499 INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ); 499 INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
@@ -520,7 +520,7 @@ out:
520int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode) 520int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
521{ 521{
522 struct wcn36xx_hal_init_scan_req_msg msg_body; 522 struct wcn36xx_hal_init_scan_req_msg msg_body;
523 int ret = 0; 523 int ret;
524 524
525 mutex_lock(&wcn->hal_mutex); 525 mutex_lock(&wcn->hal_mutex);
526 INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ); 526 INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
@@ -549,7 +549,7 @@ out:
549int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) 549int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
550{ 550{
551 struct wcn36xx_hal_start_scan_req_msg msg_body; 551 struct wcn36xx_hal_start_scan_req_msg msg_body;
552 int ret = 0; 552 int ret;
553 553
554 mutex_lock(&wcn->hal_mutex); 554 mutex_lock(&wcn->hal_mutex);
555 INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ); 555 INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
@@ -579,7 +579,7 @@ out:
579int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) 579int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
580{ 580{
581 struct wcn36xx_hal_end_scan_req_msg msg_body; 581 struct wcn36xx_hal_end_scan_req_msg msg_body;
582 int ret = 0; 582 int ret;
583 583
584 mutex_lock(&wcn->hal_mutex); 584 mutex_lock(&wcn->hal_mutex);
585 INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ); 585 INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
@@ -610,7 +610,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
610 enum wcn36xx_hal_sys_mode mode) 610 enum wcn36xx_hal_sys_mode mode)
611{ 611{
612 struct wcn36xx_hal_finish_scan_req_msg msg_body; 612 struct wcn36xx_hal_finish_scan_req_msg msg_body;
613 int ret = 0; 613 int ret;
614 614
615 mutex_lock(&wcn->hal_mutex); 615 mutex_lock(&wcn->hal_mutex);
616 INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ); 616 INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
@@ -732,7 +732,7 @@ out:
732static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len) 732static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
733{ 733{
734 struct wcn36xx_hal_switch_channel_rsp_msg *rsp; 734 struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
735 int ret = 0; 735 int ret;
736 736
737 ret = wcn36xx_smd_rsp_status_check(buf, len); 737 ret = wcn36xx_smd_rsp_status_check(buf, len);
738 if (ret) 738 if (ret)
@@ -747,7 +747,7 @@ int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
747 struct ieee80211_vif *vif, int ch) 747 struct ieee80211_vif *vif, int ch)
748{ 748{
749 struct wcn36xx_hal_switch_channel_req_msg msg_body; 749 struct wcn36xx_hal_switch_channel_req_msg msg_body;
750 int ret = 0; 750 int ret;
751 751
752 mutex_lock(&wcn->hal_mutex); 752 mutex_lock(&wcn->hal_mutex);
753 INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ); 753 INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
@@ -860,7 +860,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn,
860 u8 *channels, size_t channel_count) 860 u8 *channels, size_t channel_count)
861{ 861{
862 struct wcn36xx_hal_update_scan_params_req_ex msg_body; 862 struct wcn36xx_hal_update_scan_params_req_ex msg_body;
863 int ret = 0; 863 int ret;
864 864
865 mutex_lock(&wcn->hal_mutex); 865 mutex_lock(&wcn->hal_mutex);
866 INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ); 866 INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
@@ -931,7 +931,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
931int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif) 931int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
932{ 932{
933 struct wcn36xx_hal_add_sta_self_req msg_body; 933 struct wcn36xx_hal_add_sta_self_req msg_body;
934 int ret = 0; 934 int ret;
935 935
936 mutex_lock(&wcn->hal_mutex); 936 mutex_lock(&wcn->hal_mutex);
937 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ); 937 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
@@ -965,7 +965,7 @@ out:
965int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr) 965int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
966{ 966{
967 struct wcn36xx_hal_del_sta_self_req_msg msg_body; 967 struct wcn36xx_hal_del_sta_self_req_msg msg_body;
968 int ret = 0; 968 int ret;
969 969
970 mutex_lock(&wcn->hal_mutex); 970 mutex_lock(&wcn->hal_mutex);
971 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ); 971 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
@@ -993,7 +993,7 @@ out:
993int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index) 993int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
994{ 994{
995 struct wcn36xx_hal_delete_sta_req_msg msg_body; 995 struct wcn36xx_hal_delete_sta_req_msg msg_body;
996 int ret = 0; 996 int ret;
997 997
998 mutex_lock(&wcn->hal_mutex); 998 mutex_lock(&wcn->hal_mutex);
999 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ); 999 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
@@ -1040,7 +1040,7 @@ static int wcn36xx_smd_join_rsp(void *buf, size_t len)
1040int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch) 1040int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
1041{ 1041{
1042 struct wcn36xx_hal_join_req_msg msg_body; 1042 struct wcn36xx_hal_join_req_msg msg_body;
1043 int ret = 0; 1043 int ret;
1044 1044
1045 mutex_lock(&wcn->hal_mutex); 1045 mutex_lock(&wcn->hal_mutex);
1046 INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ); 1046 INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
@@ -1089,7 +1089,7 @@ int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
1089 enum wcn36xx_hal_link_state state) 1089 enum wcn36xx_hal_link_state state)
1090{ 1090{
1091 struct wcn36xx_hal_set_link_state_req_msg msg_body; 1091 struct wcn36xx_hal_set_link_state_req_msg msg_body;
1092 int ret = 0; 1092 int ret;
1093 1093
1094 mutex_lock(&wcn->hal_mutex); 1094 mutex_lock(&wcn->hal_mutex);
1095 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ); 1095 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
@@ -1215,7 +1215,7 @@ int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
1215{ 1215{
1216 struct wcn36xx_hal_config_sta_req_msg msg; 1216 struct wcn36xx_hal_config_sta_req_msg msg;
1217 struct wcn36xx_hal_config_sta_params *sta_params; 1217 struct wcn36xx_hal_config_sta_params *sta_params;
1218 int ret = 0; 1218 int ret;
1219 1219
1220 mutex_lock(&wcn->hal_mutex); 1220 mutex_lock(&wcn->hal_mutex);
1221 INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ); 1221 INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
@@ -1414,7 +1414,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
1414 struct wcn36xx_hal_config_bss_params *bss; 1414 struct wcn36xx_hal_config_bss_params *bss;
1415 struct wcn36xx_hal_config_sta_params *sta_params; 1415 struct wcn36xx_hal_config_sta_params *sta_params;
1416 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 1416 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
1417 int ret = 0; 1417 int ret;
1418 1418
1419 mutex_lock(&wcn->hal_mutex); 1419 mutex_lock(&wcn->hal_mutex);
1420 INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ); 1420 INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
@@ -1579,7 +1579,7 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
1579 u16 p2p_off) 1579 u16 p2p_off)
1580{ 1580{
1581 struct wcn36xx_hal_send_beacon_req_msg msg_body; 1581 struct wcn36xx_hal_send_beacon_req_msg msg_body;
1582 int ret = 0, pad, pvm_len; 1582 int ret, pad, pvm_len;
1583 1583
1584 mutex_lock(&wcn->hal_mutex); 1584 mutex_lock(&wcn->hal_mutex);
1585 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ); 1585 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
@@ -1653,7 +1653,7 @@ int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
1653 struct sk_buff *skb) 1653 struct sk_buff *skb)
1654{ 1654{
1655 struct wcn36xx_hal_send_probe_resp_req_msg msg; 1655 struct wcn36xx_hal_send_probe_resp_req_msg msg;
1656 int ret = 0; 1656 int ret;
1657 1657
1658 mutex_lock(&wcn->hal_mutex); 1658 mutex_lock(&wcn->hal_mutex);
1659 INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ); 1659 INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
@@ -1700,7 +1700,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
1700 u8 sta_index) 1700 u8 sta_index)
1701{ 1701{
1702 struct wcn36xx_hal_set_sta_key_req_msg msg_body; 1702 struct wcn36xx_hal_set_sta_key_req_msg msg_body;
1703 int ret = 0; 1703 int ret;
1704 1704
1705 mutex_lock(&wcn->hal_mutex); 1705 mutex_lock(&wcn->hal_mutex);
1706 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ); 1706 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
@@ -1708,12 +1708,20 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
1708 msg_body.set_sta_key_params.sta_index = sta_index; 1708 msg_body.set_sta_key_params.sta_index = sta_index;
1709 msg_body.set_sta_key_params.enc_type = enc_type; 1709 msg_body.set_sta_key_params.enc_type = enc_type;
1710 1710
1711 msg_body.set_sta_key_params.key[0].id = keyidx; 1711 if (enc_type == WCN36XX_HAL_ED_WEP104 ||
1712 msg_body.set_sta_key_params.key[0].unicast = 1; 1712 enc_type == WCN36XX_HAL_ED_WEP40) {
1713 msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX; 1713 /* Use bss key for wep (static) */
1714 msg_body.set_sta_key_params.key[0].pae_role = 0; 1714 msg_body.set_sta_key_params.def_wep_idx = keyidx;
1715 msg_body.set_sta_key_params.key[0].length = keylen; 1715 msg_body.set_sta_key_params.wep_type = 0;
1716 memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen); 1716 } else {
1717 msg_body.set_sta_key_params.key[0].id = keyidx;
1718 msg_body.set_sta_key_params.key[0].unicast = 1;
1719 msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
1720 msg_body.set_sta_key_params.key[0].pae_role = 0;
1721 msg_body.set_sta_key_params.key[0].length = keylen;
1722 memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
1723 }
1724
1717 msg_body.set_sta_key_params.single_tid_rc = 1; 1725 msg_body.set_sta_key_params.single_tid_rc = 1;
1718 1726
1719 PREPARE_HAL_BUF(wcn->hal_buf, msg_body); 1727 PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1741,7 +1749,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
1741 u8 *key) 1749 u8 *key)
1742{ 1750{
1743 struct wcn36xx_hal_set_bss_key_req_msg msg_body; 1751 struct wcn36xx_hal_set_bss_key_req_msg msg_body;
1744 int ret = 0; 1752 int ret;
1745 1753
1746 mutex_lock(&wcn->hal_mutex); 1754 mutex_lock(&wcn->hal_mutex);
1747 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ); 1755 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
@@ -1778,7 +1786,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
1778 u8 sta_index) 1786 u8 sta_index)
1779{ 1787{
1780 struct wcn36xx_hal_remove_sta_key_req_msg msg_body; 1788 struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
1781 int ret = 0; 1789 int ret;
1782 1790
1783 mutex_lock(&wcn->hal_mutex); 1791 mutex_lock(&wcn->hal_mutex);
1784 INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ); 1792 INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
@@ -1810,7 +1818,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
1810 u8 keyidx) 1818 u8 keyidx)
1811{ 1819{
1812 struct wcn36xx_hal_remove_bss_key_req_msg msg_body; 1820 struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
1813 int ret = 0; 1821 int ret;
1814 1822
1815 mutex_lock(&wcn->hal_mutex); 1823 mutex_lock(&wcn->hal_mutex);
1816 INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ); 1824 INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
@@ -1839,7 +1847,7 @@ int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
1839{ 1847{
1840 struct wcn36xx_hal_enter_bmps_req_msg msg_body; 1848 struct wcn36xx_hal_enter_bmps_req_msg msg_body;
1841 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 1849 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
1842 int ret = 0; 1850 int ret;
1843 1851
1844 mutex_lock(&wcn->hal_mutex); 1852 mutex_lock(&wcn->hal_mutex);
1845 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ); 1853 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
@@ -1869,7 +1877,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
1869{ 1877{
1870 struct wcn36xx_hal_exit_bmps_req_msg msg_body; 1878 struct wcn36xx_hal_exit_bmps_req_msg msg_body;
1871 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 1879 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
1872 int ret = 0; 1880 int ret;
1873 1881
1874 mutex_lock(&wcn->hal_mutex); 1882 mutex_lock(&wcn->hal_mutex);
1875 INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ); 1883 INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
@@ -1895,7 +1903,7 @@ out:
1895int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim) 1903int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
1896{ 1904{
1897 struct wcn36xx_hal_set_power_params_req_msg msg_body; 1905 struct wcn36xx_hal_set_power_params_req_msg msg_body;
1898 int ret = 0; 1906 int ret;
1899 1907
1900 mutex_lock(&wcn->hal_mutex); 1908 mutex_lock(&wcn->hal_mutex);
1901 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ); 1909 INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
@@ -1930,7 +1938,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
1930{ 1938{
1931 struct wcn36xx_hal_keep_alive_req_msg msg_body; 1939 struct wcn36xx_hal_keep_alive_req_msg msg_body;
1932 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 1940 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
1933 int ret = 0; 1941 int ret;
1934 1942
1935 mutex_lock(&wcn->hal_mutex); 1943 mutex_lock(&wcn->hal_mutex);
1936 INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ); 1944 INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
@@ -1968,7 +1976,7 @@ int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
1968 u32 arg3, u32 arg4, u32 arg5) 1976 u32 arg3, u32 arg4, u32 arg5)
1969{ 1977{
1970 struct wcn36xx_hal_dump_cmd_req_msg msg_body; 1978 struct wcn36xx_hal_dump_cmd_req_msg msg_body;
1971 int ret = 0; 1979 int ret;
1972 1980
1973 mutex_lock(&wcn->hal_mutex); 1981 mutex_lock(&wcn->hal_mutex);
1974 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ); 1982 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
@@ -2013,7 +2021,6 @@ void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
2013int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap) 2021int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
2014{ 2022{
2015 int arr_idx, bit_idx; 2023 int arr_idx, bit_idx;
2016 int ret = 0;
2017 2024
2018 if (cap < 0 || cap > 127) { 2025 if (cap < 0 || cap > 127) {
2019 wcn36xx_warn("error cap idx %d\n", cap); 2026 wcn36xx_warn("error cap idx %d\n", cap);
@@ -2022,8 +2029,8 @@ int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
2022 2029
2023 arr_idx = cap / 32; 2030 arr_idx = cap / 32;
2024 bit_idx = cap % 32; 2031 bit_idx = cap % 32;
2025 ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0; 2032
2026 return ret; 2033 return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
2027} 2034}
2028 2035
2029void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap) 2036void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
@@ -2043,7 +2050,7 @@ void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
2043int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn) 2050int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
2044{ 2051{
2045 struct wcn36xx_hal_feat_caps_msg msg_body, *rsp; 2052 struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
2046 int ret = 0, i; 2053 int ret, i;
2047 2054
2048 mutex_lock(&wcn->hal_mutex); 2055 mutex_lock(&wcn->hal_mutex);
2049 INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ); 2056 INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -2079,7 +2086,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
2079 u8 sta_index) 2086 u8 sta_index)
2080{ 2087{
2081 struct wcn36xx_hal_add_ba_session_req_msg msg_body; 2088 struct wcn36xx_hal_add_ba_session_req_msg msg_body;
2082 int ret = 0; 2089 int ret;
2083 2090
2084 mutex_lock(&wcn->hal_mutex); 2091 mutex_lock(&wcn->hal_mutex);
2085 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ); 2092 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
@@ -2117,7 +2124,7 @@ out:
2117int wcn36xx_smd_add_ba(struct wcn36xx *wcn) 2124int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
2118{ 2125{
2119 struct wcn36xx_hal_add_ba_req_msg msg_body; 2126 struct wcn36xx_hal_add_ba_req_msg msg_body;
2120 int ret = 0; 2127 int ret;
2121 2128
2122 mutex_lock(&wcn->hal_mutex); 2129 mutex_lock(&wcn->hal_mutex);
2123 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ); 2130 INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
@@ -2145,7 +2152,7 @@ out:
2145int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index) 2152int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
2146{ 2153{
2147 struct wcn36xx_hal_del_ba_req_msg msg_body; 2154 struct wcn36xx_hal_del_ba_req_msg msg_body;
2148 int ret = 0; 2155 int ret;
2149 2156
2150 mutex_lock(&wcn->hal_mutex); 2157 mutex_lock(&wcn->hal_mutex);
2151 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ); 2158 INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
@@ -2185,7 +2192,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
2185{ 2192{
2186 struct wcn36xx_hal_trigger_ba_req_msg msg_body; 2193 struct wcn36xx_hal_trigger_ba_req_msg msg_body;
2187 struct wcn36xx_hal_trigger_ba_req_candidate *candidate; 2194 struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
2188 int ret = 0; 2195 int ret;
2189 2196
2190 mutex_lock(&wcn->hal_mutex); 2197 mutex_lock(&wcn->hal_mutex);
2191 INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ); 2198 INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
@@ -2364,7 +2371,7 @@ int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
2364{ 2371{
2365 struct wcn36xx_hal_update_cfg_req_msg msg_body, *body; 2372 struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
2366 size_t len; 2373 size_t len;
2367 int ret = 0; 2374 int ret;
2368 2375
2369 mutex_lock(&wcn->hal_mutex); 2376 mutex_lock(&wcn->hal_mutex);
2370 INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ); 2377 INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
@@ -2399,7 +2406,7 @@ int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
2399{ 2406{
2400 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 2407 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
2401 struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL; 2408 struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
2402 int ret = 0; 2409 int ret;
2403 2410
2404 mutex_lock(&wcn->hal_mutex); 2411 mutex_lock(&wcn->hal_mutex);
2405 2412
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 11e74015c79a..a58f313983b9 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -129,6 +129,8 @@ struct wcn36xx_vif {
129 u8 self_sta_index; 129 u8 self_sta_index;
130 u8 self_dpu_desc_index; 130 u8 self_dpu_desc_index;
131 u8 self_ucast_dpu_sign; 131 u8 self_ucast_dpu_sign;
132
133 struct list_head sta_list;
132}; 134};
133 135
134/** 136/**
@@ -154,6 +156,7 @@ struct wcn36xx_vif {
154 * |______________|_____________|_______________| 156 * |______________|_____________|_______________|
155 */ 157 */
156struct wcn36xx_sta { 158struct wcn36xx_sta {
159 struct list_head list;
157 struct wcn36xx_vif *vif; 160 struct wcn36xx_vif *vif;
158 u16 aid; 161 u16 aid;
159 u16 tid; 162 u16 tid;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 398edd2a7f2b..d3d61ae459e2 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
9wil6210-y += wmi.o 9wil6210-y += wmi.o
10wil6210-y += interrupt.o 10wil6210-y += interrupt.o
11wil6210-y += txrx.o 11wil6210-y += txrx.o
12wil6210-y += txrx_edma.o
12wil6210-y += debug.o 13wil6210-y += debug.o
13wil6210-y += rx_reorder.o 14wil6210-y += rx_reorder.o
14wil6210-y += fw.o 15wil6210-y += fw.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 013d056a7a4c..e63b07830f2c 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1726,7 +1726,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
1726 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 1726 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
1727 int authorize; 1727 int authorize;
1728 int cid, i; 1728 int cid, i;
1729 struct vring_tx_data *txdata = NULL; 1729 struct wil_ring_tx_data *txdata = NULL;
1730 1730
1731 wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n", 1731 wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
1732 mac, params->sta_flags_mask, params->sta_flags_set, 1732 mac, params->sta_flags_mask, params->sta_flags_set,
@@ -1746,20 +1746,20 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
1746 return -ENOLINK; 1746 return -ENOLINK;
1747 } 1747 }
1748 1748
1749 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) 1749 for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
1750 if (wil->vring2cid_tid[i][0] == cid) { 1750 if (wil->ring2cid_tid[i][0] == cid) {
1751 txdata = &wil->vring_tx_data[i]; 1751 txdata = &wil->ring_tx_data[i];
1752 break; 1752 break;
1753 } 1753 }
1754 1754
1755 if (!txdata) { 1755 if (!txdata) {
1756 wil_err(wil, "vring data not found\n"); 1756 wil_err(wil, "ring data not found\n");
1757 return -ENOLINK; 1757 return -ENOLINK;
1758 } 1758 }
1759 1759
1760 authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED); 1760 authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
1761 txdata->dot1x_open = authorize ? 1 : 0; 1761 txdata->dot1x_open = authorize ? 1 : 0;
1762 wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i, 1762 wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
1763 txdata->dot1x_open); 1763 txdata->dot1x_open);
1764 1764
1765 return 0; 1765 return 0;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index ebfdff4d328c..58ce044b1130 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -29,7 +29,10 @@
29/* Nasty hack. Better have per device instances */ 29/* Nasty hack. Better have per device instances */
30static u32 mem_addr; 30static u32 mem_addr;
31static u32 dbg_txdesc_index; 31static u32 dbg_txdesc_index;
32static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ 32static u32 dbg_ring_index; /* 24+ for Rx, 0..23 for Tx */
33static u32 dbg_status_msg_index;
34/* 0..wil->num_rx_status_rings-1 for Rx, wil->tx_sring_idx for Tx */
35static u32 dbg_sring_index;
33 36
34enum dbg_off_type { 37enum dbg_off_type {
35 doff_u32 = 0, 38 doff_u32 = 0,
@@ -47,20 +50,53 @@ struct dbg_off {
47 enum dbg_off_type type; 50 enum dbg_off_type type;
48}; 51};
49 52
50static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, 53static void wil_print_desc_edma(struct seq_file *s, struct wil6210_priv *wil,
51 const char *name, struct vring *vring, 54 struct wil_ring *ring,
52 char _s, char _h) 55 char _s, char _h, int idx)
53{ 56{
54 void __iomem *x = wmi_addr(wil, vring->hwtail); 57 u8 num_of_descs;
58 bool has_skb = false;
59
60 if (ring->is_rx) {
61 struct wil_rx_enhanced_desc *rx_d =
62 (struct wil_rx_enhanced_desc *)
63 &ring->va[idx].rx.enhanced;
64 u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
65
66 has_skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
67 seq_printf(s, "%c", (has_skb) ? _h : _s);
68 } else {
69 struct wil_tx_enhanced_desc *d =
70 (struct wil_tx_enhanced_desc *)
71 &ring->va[idx].tx.enhanced;
72
73 num_of_descs = (u8)d->mac.d[2];
74 has_skb = ring->ctx[idx].skb;
75 if (num_of_descs >= 1)
76 seq_printf(s, "%c", ring->ctx[idx].skb ? _h : _s);
77 else
78 /* num_of_descs == 0, it's a frag in a list of descs */
79 seq_printf(s, "%c", has_skb ? 'h' : _s);
80 }
81}
82
83static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
84 const char *name, struct wil_ring *ring,
85 char _s, char _h)
86{
87 void __iomem *x = wmi_addr(wil, ring->hwtail);
55 u32 v; 88 u32 v;
56 89
57 seq_printf(s, "VRING %s = {\n", name); 90 seq_printf(s, "RING %s = {\n", name);
58 seq_printf(s, " pa = %pad\n", &vring->pa); 91 seq_printf(s, " pa = %pad\n", &ring->pa);
59 seq_printf(s, " va = 0x%p\n", vring->va); 92 seq_printf(s, " va = 0x%p\n", ring->va);
60 seq_printf(s, " size = %d\n", vring->size); 93 seq_printf(s, " size = %d\n", ring->size);
61 seq_printf(s, " swtail = %d\n", vring->swtail); 94 if (wil->use_enhanced_dma_hw && ring->is_rx)
62 seq_printf(s, " swhead = %d\n", vring->swhead); 95 seq_printf(s, " swtail = %u\n", *ring->edma_rx_swtail.va);
63 seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail); 96 else
97 seq_printf(s, " swtail = %d\n", ring->swtail);
98 seq_printf(s, " swhead = %d\n", ring->swhead);
99 seq_printf(s, " hwtail = [0x%08x] -> ", ring->hwtail);
64 if (x) { 100 if (x) {
65 v = readl(x); 101 v = readl(x);
66 seq_printf(s, "0x%08x = %d\n", v, v); 102 seq_printf(s, "0x%08x = %d\n", v, v);
@@ -68,41 +104,45 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
68 seq_puts(s, "???\n"); 104 seq_puts(s, "???\n");
69 } 105 }
70 106
71 if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) { 107 if (ring->va && (ring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
72 uint i; 108 uint i;
73 109
74 for (i = 0; i < vring->size; i++) { 110 for (i = 0; i < ring->size; i++) {
75 volatile struct vring_tx_desc *d = &vring->va[i].tx; 111 if ((i % 128) == 0 && i != 0)
76
77 if ((i % 128) == 0 && (i != 0))
78 seq_puts(s, "\n"); 112 seq_puts(s, "\n");
79 seq_printf(s, "%c", (d->dma.status & BIT(0)) ? 113 if (wil->use_enhanced_dma_hw) {
80 _s : (vring->ctx[i].skb ? _h : 'h')); 114 wil_print_desc_edma(s, wil, ring, _s, _h, i);
115 } else {
116 volatile struct vring_tx_desc *d =
117 &ring->va[i].tx.legacy;
118 seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
119 _s : (ring->ctx[i].skb ? _h : 'h'));
120 }
81 } 121 }
82 seq_puts(s, "\n"); 122 seq_puts(s, "\n");
83 } 123 }
84 seq_puts(s, "}\n"); 124 seq_puts(s, "}\n");
85} 125}
86 126
87static int wil_vring_debugfs_show(struct seq_file *s, void *data) 127static int wil_ring_debugfs_show(struct seq_file *s, void *data)
88{ 128{
89 uint i; 129 uint i;
90 struct wil6210_priv *wil = s->private; 130 struct wil6210_priv *wil = s->private;
91 131
92 wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_'); 132 wil_print_ring(s, wil, "rx", &wil->ring_rx, 'S', '_');
93 133
94 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { 134 for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
95 struct vring *vring = &wil->vring_tx[i]; 135 struct wil_ring *ring = &wil->ring_tx[i];
96 struct vring_tx_data *txdata = &wil->vring_tx_data[i]; 136 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
97 137
98 if (vring->va) { 138 if (ring->va) {
99 int cid = wil->vring2cid_tid[i][0]; 139 int cid = wil->ring2cid_tid[i][0];
100 int tid = wil->vring2cid_tid[i][1]; 140 int tid = wil->ring2cid_tid[i][1];
101 u32 swhead = vring->swhead; 141 u32 swhead = ring->swhead;
102 u32 swtail = vring->swtail; 142 u32 swtail = ring->swtail;
103 int used = (vring->size + swhead - swtail) 143 int used = (ring->size + swhead - swtail)
104 % vring->size; 144 % ring->size;
105 int avail = vring->size - used - 1; 145 int avail = ring->size - used - 1;
106 char name[10]; 146 char name[10];
107 char sidle[10]; 147 char sidle[10];
108 /* performance monitoring */ 148 /* performance monitoring */
@@ -137,20 +177,88 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
137 txdata->dot1x_open ? "+" : "-", 177 txdata->dot1x_open ? "+" : "-",
138 used, avail, sidle); 178 used, avail, sidle);
139 179
140 wil_print_vring(s, wil, name, vring, '_', 'H'); 180 wil_print_ring(s, wil, name, ring, '_', 'H');
181 }
182 }
183
184 return 0;
185}
186
187static int wil_ring_seq_open(struct inode *inode, struct file *file)
188{
189 return single_open(file, wil_ring_debugfs_show, inode->i_private);
190}
191
192static const struct file_operations fops_ring = {
193 .open = wil_ring_seq_open,
194 .release = single_release,
195 .read = seq_read,
196 .llseek = seq_lseek,
197};
198
199static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
200 struct wil_status_ring *sring)
201{
202 void __iomem *x = wmi_addr(wil, sring->hwtail);
203 int sring_idx = sring - wil->srings;
204 u32 v;
205
206 seq_printf(s, "Status Ring %s [ %d ] = {\n",
207 sring->is_rx ? "RX" : "TX", sring_idx);
208 seq_printf(s, " pa = %pad\n", &sring->pa);
209 seq_printf(s, " va = 0x%pK\n", sring->va);
210 seq_printf(s, " size = %d\n", sring->size);
211 seq_printf(s, " elem_size = %zu\n", sring->elem_size);
212 seq_printf(s, " swhead = %d\n", sring->swhead);
213 seq_printf(s, " hwtail = [0x%08x] -> ", sring->hwtail);
214 if (x) {
215 v = readl_relaxed(x);
216 seq_printf(s, "0x%08x = %d\n", v, v);
217 } else {
218 seq_puts(s, "???\n");
219 }
220 seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
221
222 if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
223 uint i;
224
225 for (i = 0; i < sring->size; i++) {
226 u32 *sdword_0 =
227 (u32 *)(sring->va + (sring->elem_size * i));
228
229 if ((i % 128) == 0 && i != 0)
230 seq_puts(s, "\n");
231 if (i == sring->swhead)
232 seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
233 'X' : 'x');
234 else
235 seq_printf(s, "%c", (*sdword_0 & BIT(31)) ?
236 '1' : '0');
141 } 237 }
238 seq_puts(s, "\n");
142 } 239 }
240 seq_puts(s, "}\n");
241}
242
243static int wil_srings_debugfs_show(struct seq_file *s, void *data)
244{
245 struct wil6210_priv *wil = s->private;
246 int i = 0;
247
248 for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++)
249 if (wil->srings[i].va)
250 wil_print_sring(s, wil, &wil->srings[i]);
143 251
144 return 0; 252 return 0;
145} 253}
146 254
147static int wil_vring_seq_open(struct inode *inode, struct file *file) 255static int wil_srings_seq_open(struct inode *inode, struct file *file)
148{ 256{
149 return single_open(file, wil_vring_debugfs_show, inode->i_private); 257 return single_open(file, wil_srings_debugfs_show, inode->i_private);
150} 258}
151 259
152static const struct file_operations fops_vring = { 260static const struct file_operations fops_srings = {
153 .open = wil_vring_seq_open, 261 .open = wil_srings_seq_open,
154 .release = single_release, 262 .release = single_release,
155 .read = seq_read, 263 .read = seq_read,
156 .llseek = seq_lseek, 264 .llseek = seq_lseek,
@@ -162,8 +270,8 @@ static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
162 seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false); 270 seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
163} 271}
164 272
165static void wil_print_ring(struct seq_file *s, const char *prefix, 273static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
166 void __iomem *off) 274 void __iomem *off)
167{ 275{
168 struct wil6210_priv *wil = s->private; 276 struct wil6210_priv *wil = s->private;
169 struct wil6210_mbox_ring r; 277 struct wil6210_mbox_ring r;
@@ -249,9 +357,9 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
249 if (ret < 0) 357 if (ret < 0)
250 return ret; 358 return ret;
251 359
252 wil_print_ring(s, "tx", wil->csr + HOST_MBOX + 360 wil_print_mbox_ring(s, "tx", wil->csr + HOST_MBOX +
253 offsetof(struct wil6210_mbox_ctl, tx)); 361 offsetof(struct wil6210_mbox_ctl, tx));
254 wil_print_ring(s, "rx", wil->csr + HOST_MBOX + 362 wil_print_mbox_ring(s, "rx", wil->csr + HOST_MBOX +
255 offsetof(struct wil6210_mbox_ctl, rx)); 363 offsetof(struct wil6210_mbox_ctl, rx));
256 364
257 wil_pm_runtime_put(wil); 365 wil_pm_runtime_put(wil);
@@ -719,13 +827,13 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf,
719 827
720 if ((strcmp(cmd, "add") == 0) || 828 if ((strcmp(cmd, "add") == 0) ||
721 (strcmp(cmd, "del_tx") == 0)) { 829 (strcmp(cmd, "del_tx") == 0)) {
722 struct vring_tx_data *txdata; 830 struct wil_ring_tx_data *txdata;
723 831
724 if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) { 832 if (p1 < 0 || p1 >= WIL6210_MAX_TX_RINGS) {
725 wil_err(wil, "BACK: invalid ring id %d\n", p1); 833 wil_err(wil, "BACK: invalid ring id %d\n", p1);
726 return -EINVAL; 834 return -EINVAL;
727 } 835 }
728 txdata = &wil->vring_tx_data[p1]; 836 txdata = &wil->ring_tx_data[p1];
729 if (strcmp(cmd, "add") == 0) { 837 if (strcmp(cmd, "add") == 0) {
730 if (rc < 3) { 838 if (rc < 3) {
731 wil_err(wil, "BACK: add require at least 2 params\n"); 839 wil_err(wil, "BACK: add require at least 2 params\n");
@@ -972,54 +1080,93 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
972static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) 1080static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
973{ 1081{
974 struct wil6210_priv *wil = s->private; 1082 struct wil6210_priv *wil = s->private;
975 struct vring *vring; 1083 struct wil_ring *ring;
976 bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS); 1084 bool tx;
1085 int ring_idx = dbg_ring_index;
1086 int txdesc_idx = dbg_txdesc_index;
1087 volatile struct vring_tx_desc *d;
1088 volatile u32 *u;
1089 struct sk_buff *skb;
1090
1091 if (wil->use_enhanced_dma_hw) {
1092 /* RX ring index == 0 */
1093 if (ring_idx >= WIL6210_MAX_TX_RINGS) {
1094 seq_printf(s, "invalid ring index %d\n", ring_idx);
1095 return 0;
1096 }
1097 tx = ring_idx > 0; /* desc ring 0 is reserved for RX */
1098 } else {
1099 /* RX ring index == WIL6210_MAX_TX_RINGS */
1100 if (ring_idx > WIL6210_MAX_TX_RINGS) {
1101 seq_printf(s, "invalid ring index %d\n", ring_idx);
1102 return 0;
1103 }
1104 tx = (ring_idx < WIL6210_MAX_TX_RINGS);
1105 }
977 1106
978 vring = tx ? &wil->vring_tx[dbg_vring_index] : &wil->vring_rx; 1107 ring = tx ? &wil->ring_tx[ring_idx] : &wil->ring_rx;
979 1108
980 if (!vring->va) { 1109 if (!ring->va) {
981 if (tx) 1110 if (tx)
982 seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index); 1111 seq_printf(s, "No Tx[%2d] RING\n", ring_idx);
983 else 1112 else
984 seq_puts(s, "No Rx VRING\n"); 1113 seq_puts(s, "No Rx RING\n");
985 return 0; 1114 return 0;
986 } 1115 }
987 1116
988 if (dbg_txdesc_index < vring->size) { 1117 if (txdesc_idx >= ring->size) {
989 /* use struct vring_tx_desc for Rx as well,
990 * only field used, .dma.length, is the same
991 */
992 volatile struct vring_tx_desc *d =
993 &vring->va[dbg_txdesc_index].tx;
994 volatile u32 *u = (volatile u32 *)d;
995 struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
996
997 if (tx) 1118 if (tx)
998 seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index, 1119 seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
999 dbg_txdesc_index); 1120 ring_idx, txdesc_idx, ring->size);
1000 else 1121 else
1001 seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index); 1122 seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
1002 seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n", 1123 txdesc_idx, ring->size);
1003 u[0], u[1], u[2], u[3]); 1124 return 0;
1004 seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n", 1125 }
1005 u[4], u[5], u[6], u[7]);
1006 seq_printf(s, " SKB = 0x%p\n", skb);
1007 1126
1008 if (skb) { 1127 /* use struct vring_tx_desc for Rx as well,
1009 skb_get(skb); 1128 * only field used, .dma.length, is the same
1010 wil_seq_print_skb(s, skb); 1129 */
1011 kfree_skb(skb); 1130 d = &ring->va[txdesc_idx].tx.legacy;
1131 u = (volatile u32 *)d;
1132 skb = NULL;
1133
1134 if (wil->use_enhanced_dma_hw) {
1135 if (tx) {
1136 skb = ring->ctx[txdesc_idx].skb;
1137 } else {
1138 struct wil_rx_enhanced_desc *rx_d =
1139 (struct wil_rx_enhanced_desc *)
1140 &ring->va[txdesc_idx].rx.enhanced;
1141 u16 buff_id = le16_to_cpu(rx_d->mac.buff_id);
1142
1143 if (!wil_val_in_range(buff_id, 0,
1144 wil->rx_buff_mgmt.size)) {
1145 seq_printf(s, "invalid buff_id %d\n", buff_id);
1146 return 0;
1147 }
1148 skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
1012 } 1149 }
1013 seq_puts(s, "}\n");
1014 } else { 1150 } else {
1015 if (tx) 1151 skb = ring->ctx[txdesc_idx].skb;
1016 seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
1017 dbg_vring_index, dbg_txdesc_index,
1018 vring->size);
1019 else
1020 seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
1021 dbg_txdesc_index, vring->size);
1022 } 1152 }
1153 if (tx)
1154 seq_printf(s, "Tx[%2d][%3d] = {\n", ring_idx,
1155 txdesc_idx);
1156 else
1157 seq_printf(s, "Rx[%3d] = {\n", txdesc_idx);
1158 seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
1159 u[0], u[1], u[2], u[3]);
1160 seq_printf(s, " DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
1161 u[4], u[5], u[6], u[7]);
1162 seq_printf(s, " SKB = 0x%p\n", skb);
1163
1164 if (skb) {
1165 skb_get(skb);
1166 wil_seq_print_skb(s, skb);
1167 kfree_skb(skb);
1168 }
1169 seq_puts(s, "}\n");
1023 1170
1024 return 0; 1171 return 0;
1025} 1172}
@@ -1036,6 +1183,115 @@ static const struct file_operations fops_txdesc = {
1036 .llseek = seq_lseek, 1183 .llseek = seq_lseek,
1037}; 1184};
1038 1185
1186/*---------Tx/Rx status message------------*/
1187static int wil_status_msg_debugfs_show(struct seq_file *s, void *data)
1188{
1189 struct wil6210_priv *wil = s->private;
1190 int sring_idx = dbg_sring_index;
1191 struct wil_status_ring *sring;
1192 bool tx = sring_idx == wil->tx_sring_idx ? 1 : 0;
1193 u32 status_msg_idx = dbg_status_msg_index;
1194 u32 *u;
1195
1196 if (sring_idx >= WIL6210_MAX_STATUS_RINGS) {
1197 seq_printf(s, "invalid status ring index %d\n", sring_idx);
1198 return 0;
1199 }
1200
1201 sring = &wil->srings[sring_idx];
1202
1203 if (!sring->va) {
1204 seq_printf(s, "No %cX status ring\n", tx ? 'T' : 'R');
1205 return 0;
1206 }
1207
1208 if (status_msg_idx >= sring->size) {
1209 seq_printf(s, "%cxDesc index (%d) >= size (%d)\n",
1210 tx ? 'T' : 'R', status_msg_idx, sring->size);
1211 return 0;
1212 }
1213
1214 u = sring->va + (sring->elem_size * status_msg_idx);
1215
1216 seq_printf(s, "%cx[%d][%3d] = {\n",
1217 tx ? 'T' : 'R', sring_idx, status_msg_idx);
1218
1219 seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
1220 u[0], u[1], u[2], u[3]);
1221 if (!tx && !wil->use_compressed_rx_status)
1222 seq_printf(s, " 0x%08x 0x%08x 0x%08x 0x%08x\n",
1223 u[4], u[5], u[6], u[7]);
1224
1225 seq_puts(s, "}\n");
1226
1227 return 0;
1228}
1229
1230static int wil_status_msg_seq_open(struct inode *inode, struct file *file)
1231{
1232 return single_open(file, wil_status_msg_debugfs_show,
1233 inode->i_private);
1234}
1235
1236static const struct file_operations fops_status_msg = {
1237 .open = wil_status_msg_seq_open,
1238 .release = single_release,
1239 .read = seq_read,
1240 .llseek = seq_lseek,
1241};
1242
1243static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
1244{
1245 struct wil_rx_buff *it;
1246 int i = 0;
1247
1248 list_for_each_entry(it, lh, list) {
1249 if ((i % 16) == 0 && i != 0)
1250 seq_puts(s, "\n ");
1251 seq_printf(s, "[%4d] ", it->id);
1252 i++;
1253 }
1254 seq_printf(s, "\nNumber of buffers: %u\n", i);
1255
1256 return i;
1257}
1258
1259static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
1260{
1261 struct wil6210_priv *wil = s->private;
1262 struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
1263 int num_active;
1264 int num_free;
1265
1266 seq_printf(s, " size = %zu\n", rbm->size);
1267 seq_printf(s, " free_list_empty_cnt = %lu\n",
1268 rbm->free_list_empty_cnt);
1269
1270 /* Print active list */
1271 seq_puts(s, " Active list:\n");
1272 num_active = wil_print_rx_buff(s, &rbm->active);
1273 seq_puts(s, "\n Free list:\n");
1274 num_free = wil_print_rx_buff(s, &rbm->free);
1275
1276 seq_printf(s, " Total number of buffers: %u\n",
1277 num_active + num_free);
1278
1279 return 0;
1280}
1281
1282static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file)
1283{
1284 return single_open(file, wil_rx_buff_mgmt_debugfs_show,
1285 inode->i_private);
1286}
1287
1288static const struct file_operations fops_rx_buff_mgmt = {
1289 .open = wil_rx_buff_mgmt_seq_open,
1290 .release = single_release,
1291 .read = seq_read,
1292 .llseek = seq_lseek,
1293};
1294
1039/*---------beamforming------------*/ 1295/*---------beamforming------------*/
1040static char *wil_bfstatus_str(u32 status) 1296static char *wil_bfstatus_str(u32 status)
1041{ 1297{
@@ -1478,6 +1734,13 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
1478 p->stats.rx_large_frame, 1734 p->stats.rx_large_frame,
1479 p->stats.rx_replay); 1735 p->stats.rx_replay);
1480 1736
1737 if (wil->use_enhanced_dma_hw)
1738 seq_printf(s,
1739 "mic error %lu, key error %lu, amsdu error %lu\n",
1740 p->stats.rx_mic_error,
1741 p->stats.rx_key_error,
1742 p->stats.rx_amsdu_error);
1743
1481 seq_puts(s, "Rx/MCS:"); 1744 seq_puts(s, "Rx/MCS:");
1482 for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs); 1745 for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
1483 mcs++) 1746 mcs++)
@@ -1760,6 +2023,60 @@ static const struct file_operations fops_suspend_stats = {
1760 .open = simple_open, 2023 .open = simple_open,
1761}; 2024};
1762 2025
2026/*---------compressed_rx_status---------*/
2027static ssize_t wil_compressed_rx_status_write(struct file *file,
2028 const char __user *buf,
2029 size_t len, loff_t *ppos)
2030{
2031 struct seq_file *s = file->private_data;
2032 struct wil6210_priv *wil = s->private;
2033 int compressed_rx_status;
2034 int rc;
2035
2036 rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
2037 if (rc) {
2038 wil_err(wil, "Invalid argument\n");
2039 return rc;
2040 }
2041
2042 if (wil_has_active_ifaces(wil, true, false)) {
2043 wil_err(wil, "cannot change edma config after iface is up\n");
2044 return -EPERM;
2045 }
2046
2047 wil_info(wil, "%sable compressed_rx_status\n",
2048 compressed_rx_status ? "En" : "Dis");
2049
2050 wil->use_compressed_rx_status = compressed_rx_status;
2051
2052 return len;
2053}
2054
2055static int
2056wil_compressed_rx_status_show(struct seq_file *s, void *data)
2057{
2058 struct wil6210_priv *wil = s->private;
2059
2060 seq_printf(s, "%d\n", wil->use_compressed_rx_status);
2061
2062 return 0;
2063}
2064
2065static int
2066wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
2067{
2068 return single_open(file, wil_compressed_rx_status_show,
2069 inode->i_private);
2070}
2071
2072static const struct file_operations fops_compressed_rx_status = {
2073 .open = wil_compressed_rx_status_seq_open,
2074 .release = single_release,
2075 .read = seq_read,
2076 .write = wil_compressed_rx_status_write,
2077 .llseek = seq_lseek,
2078};
2079
1763/*----------------*/ 2080/*----------------*/
1764static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil, 2081static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
1765 struct dentry *dbg) 2082 struct dentry *dbg)
@@ -1790,7 +2107,7 @@ static const struct {
1790 const struct file_operations *fops; 2107 const struct file_operations *fops;
1791} dbg_files[] = { 2108} dbg_files[] = {
1792 {"mbox", 0444, &fops_mbox}, 2109 {"mbox", 0444, &fops_mbox},
1793 {"vrings", 0444, &fops_vring}, 2110 {"rings", 0444, &fops_ring},
1794 {"stations", 0444, &fops_sta}, 2111 {"stations", 0444, &fops_sta},
1795 {"mids", 0444, &fops_mids}, 2112 {"mids", 0444, &fops_mids},
1796 {"desc", 0444, &fops_txdesc}, 2113 {"desc", 0444, &fops_txdesc},
@@ -1813,6 +2130,10 @@ static const struct {
1813 {"fw_capabilities", 0444, &fops_fw_capabilities}, 2130 {"fw_capabilities", 0444, &fops_fw_capabilities},
1814 {"fw_version", 0444, &fops_fw_version}, 2131 {"fw_version", 0444, &fops_fw_version},
1815 {"suspend_stats", 0644, &fops_suspend_stats}, 2132 {"suspend_stats", 0644, &fops_suspend_stats},
2133 {"compressed_rx_status", 0644, &fops_compressed_rx_status},
2134 {"srings", 0444, &fops_srings},
2135 {"status_msg", 0444, &fops_status_msg},
2136 {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt},
1816}; 2137};
1817 2138
1818static void wil6210_debugfs_init_files(struct wil6210_priv *wil, 2139static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1858,7 +2179,12 @@ static const struct dbg_off dbg_wil_off[] = {
1858 WIL_FIELD(chip_revision, 0444, doff_u8), 2179 WIL_FIELD(chip_revision, 0444, doff_u8),
1859 WIL_FIELD(abft_len, 0644, doff_u8), 2180 WIL_FIELD(abft_len, 0644, doff_u8),
1860 WIL_FIELD(wakeup_trigger, 0644, doff_u8), 2181 WIL_FIELD(wakeup_trigger, 0644, doff_u8),
1861 WIL_FIELD(vring_idle_trsh, 0644, doff_u32), 2182 WIL_FIELD(ring_idle_trsh, 0644, doff_u32),
2183 WIL_FIELD(num_rx_status_rings, 0644, doff_u8),
2184 WIL_FIELD(rx_status_ring_order, 0644, doff_u32),
2185 WIL_FIELD(tx_status_ring_order, 0644, doff_u32),
2186 WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
2187 WIL_FIELD(amsdu_en, 0644, doff_u8),
1862 {}, 2188 {},
1863}; 2189};
1864 2190
@@ -1872,9 +2198,11 @@ static const struct dbg_off dbg_wil_regs[] = {
1872/* static parameters */ 2198/* static parameters */
1873static const struct dbg_off dbg_statics[] = { 2199static const struct dbg_off dbg_statics[] = {
1874 {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32}, 2200 {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
1875 {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32}, 2201 {"ring_index", 0644, (ulong)&dbg_ring_index, doff_u32},
1876 {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, 2202 {"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
1877 {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, 2203 {"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
2204 {"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32},
2205 {"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32},
1878 {}, 2206 {},
1879}; 2207};
1880 2208
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index e7ff41e623d2..a04c87ffd37b 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
101 if (ret < 0) 101 if (ret < 0)
102 return ret; 102 return ret;
103 103
104 wil_configure_interrupt_moderation(wil); 104 wil->txrx_ops.configure_interrupt_moderation(wil);
105 105
106 wil_pm_runtime_put(wil); 106 wil_pm_runtime_put(wil);
107 107
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 84e9840c1752..d7e112da6a8d 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -44,6 +44,8 @@
44 (~(BIT_DMA_EP_RX_ICR_RX_HTRSH))) 44 (~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
45#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \ 45#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
46 BIT_DMA_EP_TX_ICR_TX_DONE_N(0)) 46 BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
47#define WIL6210_IMC_TX_EDMA BIT_TX_STATUS_IRQ
48#define WIL6210_IMC_RX_EDMA BIT_RX_STATUS_IRQ
47#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \ 49#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
48 ISR_MISC_MBOX_EVT | \ 50 ISR_MISC_MBOX_EVT | \
49 ISR_MISC_FW_ERROR) 51 ISR_MISC_FW_ERROR)
@@ -87,12 +89,24 @@ static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
87 WIL6210_IRQ_DISABLE); 89 WIL6210_IRQ_DISABLE);
88} 90}
89 91
92static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
93{
94 wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
95 WIL6210_IRQ_DISABLE);
96}
97
90static void wil6210_mask_irq_rx(struct wil6210_priv *wil) 98static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
91{ 99{
92 wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS), 100 wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
93 WIL6210_IRQ_DISABLE); 101 WIL6210_IRQ_DISABLE);
94} 102}
95 103
104static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
105{
106 wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
107 WIL6210_IRQ_DISABLE);
108}
109
96static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp) 110static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
97{ 111{
98 wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n", 112 wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
@@ -125,6 +139,12 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
125 WIL6210_IMC_TX); 139 WIL6210_IMC_TX);
126} 140}
127 141
142void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
143{
144 wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
145 WIL6210_IMC_TX_EDMA);
146}
147
128void wil6210_unmask_irq_rx(struct wil6210_priv *wil) 148void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
129{ 149{
130 bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0; 150 bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
@@ -133,6 +153,12 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
133 unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH); 153 unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
134} 154}
135 155
156void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
157{
158 wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
159 WIL6210_IMC_RX_EDMA);
160}
161
136static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp) 162static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
137{ 163{
138 wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n", 164 wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
@@ -164,7 +190,9 @@ void wil_mask_irq(struct wil6210_priv *wil)
164 wil_dbg_irq(wil, "mask_irq\n"); 190 wil_dbg_irq(wil, "mask_irq\n");
165 191
166 wil6210_mask_irq_tx(wil); 192 wil6210_mask_irq_tx(wil);
193 wil6210_mask_irq_tx_edma(wil);
167 wil6210_mask_irq_rx(wil); 194 wil6210_mask_irq_rx(wil);
195 wil6210_mask_irq_rx_edma(wil);
168 wil6210_mask_irq_misc(wil, true); 196 wil6210_mask_irq_misc(wil, true);
169 wil6210_mask_irq_pseudo(wil); 197 wil6210_mask_irq_pseudo(wil);
170} 198}
@@ -179,13 +207,43 @@ void wil_unmask_irq(struct wil6210_priv *wil)
179 WIL_ICR_ICC_VALUE); 207 WIL_ICR_ICC_VALUE);
180 wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC), 208 wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
181 WIL_ICR_ICC_MISC_VALUE); 209 WIL_ICR_ICC_MISC_VALUE);
210 wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
211 WIL_ICR_ICC_VALUE);
212 wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
213 WIL_ICR_ICC_VALUE);
182 214
183 wil6210_unmask_irq_pseudo(wil); 215 wil6210_unmask_irq_pseudo(wil);
184 wil6210_unmask_irq_tx(wil); 216 if (wil->use_enhanced_dma_hw) {
185 wil6210_unmask_irq_rx(wil); 217 wil6210_unmask_irq_tx_edma(wil);
218 wil6210_unmask_irq_rx_edma(wil);
219 } else {
220 wil6210_unmask_irq_tx(wil);
221 wil6210_unmask_irq_rx(wil);
222 }
186 wil6210_unmask_irq_misc(wil, true); 223 wil6210_unmask_irq_misc(wil, true);
187} 224}
188 225
226void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
227{
228 u32 moderation;
229
230 wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
231
232 wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
233
234 /* Update RX and TX moderation */
235 moderation = wil->rx_max_burst_duration |
236 (WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
237 wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
238 wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
239
240 /* Treat special events as regular
241 * (set bit 0 to 0x1 and clear bits 1-8)
242 */
243 wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
244 wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
245}
246
189void wil_configure_interrupt_moderation(struct wil6210_priv *wil) 247void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
190{ 248{
191 struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; 249 struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
@@ -294,6 +352,97 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
294 return IRQ_HANDLED; 352 return IRQ_HANDLED;
295} 353}
296 354
355static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
356{
357 struct wil6210_priv *wil = cookie;
358 u32 isr = wil_ioread32_and_clear(wil->csr +
359 HOSTADDR(RGF_INT_GEN_RX_ICR) +
360 offsetof(struct RGF_ICR, ICR));
361 bool need_unmask = true;
362
363 trace_wil6210_irq_rx(isr);
364 wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
365
366 if (unlikely(!isr)) {
367 wil_err(wil, "spurious IRQ: RX\n");
368 return IRQ_NONE;
369 }
370
371 wil6210_mask_irq_rx_edma(wil);
372
373 if (likely(isr & BIT_RX_STATUS_IRQ)) {
374 wil_dbg_irq(wil, "RX status ring\n");
375 isr &= ~BIT_RX_STATUS_IRQ;
376 if (likely(test_bit(wil_status_fwready, wil->status))) {
377 if (likely(test_bit(wil_status_napi_en, wil->status))) {
378 wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
379 need_unmask = false;
380 napi_schedule(&wil->napi_rx);
381 } else {
382 wil_err(wil,
383 "Got Rx interrupt while stopping interface\n");
384 }
385 } else {
386 wil_err(wil, "Got Rx interrupt while in reset\n");
387 }
388 }
389
390 if (unlikely(isr))
391 wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
392
393 /* Rx IRQ will be enabled when NAPI processing finished */
394
395 atomic_inc(&wil->isr_count_rx);
396
397 if (unlikely(need_unmask))
398 wil6210_unmask_irq_rx_edma(wil);
399
400 return IRQ_HANDLED;
401}
402
403static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
404{
405 struct wil6210_priv *wil = cookie;
406 u32 isr = wil_ioread32_and_clear(wil->csr +
407 HOSTADDR(RGF_INT_GEN_TX_ICR) +
408 offsetof(struct RGF_ICR, ICR));
409 bool need_unmask = true;
410
411 trace_wil6210_irq_tx(isr);
412 wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
413
414 if (unlikely(!isr)) {
415 wil_err(wil, "spurious IRQ: TX\n");
416 return IRQ_NONE;
417 }
418
419 wil6210_mask_irq_tx_edma(wil);
420
421 if (likely(isr & BIT_TX_STATUS_IRQ)) {
422 wil_dbg_irq(wil, "TX status ring\n");
423 isr &= ~BIT_TX_STATUS_IRQ;
424 if (likely(test_bit(wil_status_fwready, wil->status))) {
425 wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
426 need_unmask = false;
427 napi_schedule(&wil->napi_tx);
428 } else {
429 wil_err(wil, "Got Tx status ring IRQ while in reset\n");
430 }
431 }
432
433 if (unlikely(isr))
434 wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
435
436 /* Tx IRQ will be enabled when NAPI processing finished */
437
438 atomic_inc(&wil->isr_count_tx);
439
440 if (unlikely(need_unmask))
441 wil6210_unmask_irq_tx_edma(wil);
442
443 return IRQ_HANDLED;
444}
445
297static irqreturn_t wil6210_irq_tx(int irq, void *cookie) 446static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
298{ 447{
299 struct wil6210_priv *wil = cookie; 448 struct wil6210_priv *wil = cookie;
@@ -510,30 +659,53 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
510 */ 659 */
511static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause) 660static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
512{ 661{
662 u32 icm_rx, icr_rx, imv_rx;
663 u32 icm_tx, icr_tx, imv_tx;
664 u32 icm_misc, icr_misc, imv_misc;
665
513 if (!test_bit(wil_status_irqen, wil->status)) { 666 if (!test_bit(wil_status_irqen, wil->status)) {
514 u32 icm_rx = wil_ioread32_and_clear(wil->csr + 667 if (wil->use_enhanced_dma_hw) {
515 HOSTADDR(RGF_DMA_EP_RX_ICR) + 668 icm_rx = wil_ioread32_and_clear(wil->csr +
516 offsetof(struct RGF_ICR, ICM)); 669 HOSTADDR(RGF_INT_GEN_RX_ICR) +
517 u32 icr_rx = wil_ioread32_and_clear(wil->csr + 670 offsetof(struct RGF_ICR, ICM));
518 HOSTADDR(RGF_DMA_EP_RX_ICR) + 671 icr_rx = wil_ioread32_and_clear(wil->csr +
519 offsetof(struct RGF_ICR, ICR)); 672 HOSTADDR(RGF_INT_GEN_RX_ICR) +
520 u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR + 673 offsetof(struct RGF_ICR, ICR));
674 imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
521 offsetof(struct RGF_ICR, IMV)); 675 offsetof(struct RGF_ICR, IMV));
522 u32 icm_tx = wil_ioread32_and_clear(wil->csr + 676 icm_tx = wil_ioread32_and_clear(wil->csr +
523 HOSTADDR(RGF_DMA_EP_TX_ICR) + 677 HOSTADDR(RGF_INT_GEN_TX_ICR) +
524 offsetof(struct RGF_ICR, ICM)); 678 offsetof(struct RGF_ICR, ICM));
525 u32 icr_tx = wil_ioread32_and_clear(wil->csr + 679 icr_tx = wil_ioread32_and_clear(wil->csr +
526 HOSTADDR(RGF_DMA_EP_TX_ICR) + 680 HOSTADDR(RGF_INT_GEN_TX_ICR) +
527 offsetof(struct RGF_ICR, ICR)); 681 offsetof(struct RGF_ICR, ICR));
528 u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR + 682 imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
683 offsetof(struct RGF_ICR, IMV));
684 } else {
685 icm_rx = wil_ioread32_and_clear(wil->csr +
686 HOSTADDR(RGF_DMA_EP_RX_ICR) +
687 offsetof(struct RGF_ICR, ICM));
688 icr_rx = wil_ioread32_and_clear(wil->csr +
689 HOSTADDR(RGF_DMA_EP_RX_ICR) +
690 offsetof(struct RGF_ICR, ICR));
691 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
529 offsetof(struct RGF_ICR, IMV)); 692 offsetof(struct RGF_ICR, IMV));
530 u32 icm_misc = wil_ioread32_and_clear(wil->csr + 693 icm_tx = wil_ioread32_and_clear(wil->csr +
694 HOSTADDR(RGF_DMA_EP_TX_ICR) +
695 offsetof(struct RGF_ICR, ICM));
696 icr_tx = wil_ioread32_and_clear(wil->csr +
697 HOSTADDR(RGF_DMA_EP_TX_ICR) +
698 offsetof(struct RGF_ICR, ICR));
699 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
700 offsetof(struct RGF_ICR, IMV));
701 }
702 icm_misc = wil_ioread32_and_clear(wil->csr +
531 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 703 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
532 offsetof(struct RGF_ICR, ICM)); 704 offsetof(struct RGF_ICR, ICM));
533 u32 icr_misc = wil_ioread32_and_clear(wil->csr + 705 icr_misc = wil_ioread32_and_clear(wil->csr +
534 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 706 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
535 offsetof(struct RGF_ICR, ICR)); 707 offsetof(struct RGF_ICR, ICR));
536 u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR + 708 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
537 offsetof(struct RGF_ICR, IMV)); 709 offsetof(struct RGF_ICR, IMV));
538 710
539 /* HALP interrupt can be unmasked when misc interrupts are 711 /* HALP interrupt can be unmasked when misc interrupts are
@@ -592,11 +764,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
592 * voting for wake thread - need at least 1 vote 764 * voting for wake thread - need at least 1 vote
593 */ 765 */
594 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) && 766 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
595 (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD)) 767 (wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
596 rc = IRQ_WAKE_THREAD; 768 rc = IRQ_WAKE_THREAD;
597 769
598 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) && 770 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
599 (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD)) 771 (wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
600 rc = IRQ_WAKE_THREAD; 772 rc = IRQ_WAKE_THREAD;
601 773
602 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) && 774 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
@@ -624,6 +796,10 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
624 offsetof(struct RGF_ICR, ICR)); 796 offsetof(struct RGF_ICR, ICR));
625 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + 797 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
626 offsetof(struct RGF_ICR, ICR)); 798 offsetof(struct RGF_ICR, ICR));
799 wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
800 offsetof(struct RGF_ICR, ICR));
801 wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
802 offsetof(struct RGF_ICR, ICR));
627 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + 803 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
628 offsetof(struct RGF_ICR, ICR)); 804 offsetof(struct RGF_ICR, ICR));
629 wmb(); /* make sure write completed */ 805 wmb(); /* make sure write completed */
@@ -652,6 +828,13 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
652 828
653 wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx"); 829 wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
654 830
831 if (wil->use_enhanced_dma_hw) {
832 wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
833 wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
834 } else {
835 wil->txrx_ops.irq_tx = wil6210_irq_tx;
836 wil->txrx_ops.irq_rx = wil6210_irq_rx;
837 }
655 rc = request_threaded_irq(irq, wil6210_hardirq, 838 rc = request_threaded_irq(irq, wil6210_hardirq,
656 wil6210_thread_irq, 839 wil6210_thread_irq,
657 use_msi ? 0 : IRQF_SHARED, 840 use_msi ? 0 : IRQF_SHARED,
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index e7006c2428a0..4de19bd40a58 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -21,11 +21,13 @@
21 21
22#include "wil6210.h" 22#include "wil6210.h"
23#include "txrx.h" 23#include "txrx.h"
24#include "txrx_edma.h"
24#include "wmi.h" 25#include "wmi.h"
25#include "boot_loader.h" 26#include "boot_loader.h"
26 27
27#define WAIT_FOR_HALP_VOTE_MS 100 28#define WAIT_FOR_HALP_VOTE_MS 100
28#define WAIT_FOR_SCAN_ABORT_MS 1000 29#define WAIT_FOR_SCAN_ABORT_MS 1000
30#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
29 31
30bool debug_fw; /* = false; */ 32bool debug_fw; /* = false; */
31module_param(debug_fw, bool, 0444); 33module_param(debug_fw, bool, 0444);
@@ -110,9 +112,29 @@ MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
110module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444); 112module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
111MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order"); 113MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
112 114
113#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */ 115enum {
116 WIL_BOOT_ERR,
117 WIL_BOOT_VANILLA,
118 WIL_BOOT_PRODUCTION,
119 WIL_BOOT_DEVELOPMENT,
120};
121
122enum {
123 WIL_SIG_STATUS_VANILLA = 0x0,
124 WIL_SIG_STATUS_DEVELOPMENT = 0x1,
125 WIL_SIG_STATUS_PRODUCTION = 0x2,
126 WIL_SIG_STATUS_CORRUPTED_PRODUCTION = 0x3,
127};
128
129#define RST_DELAY (20) /* msec, for loop in @wil_wait_device_ready */
114#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */ 130#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */
115 131
132#define PMU_READY_DELAY_MS (4) /* ms, for sleep in @wil_wait_device_ready */
133
134#define OTP_HW_DELAY (200) /* usec, loop in @wil_wait_device_ready_talyn_mb */
135/* round up to be above 2 ms total */
136#define OTP_HW_COUNT (1 + 2000 / OTP_HW_DELAY)
137
116/* 138/*
117 * Due to a hardware issue, 139 * Due to a hardware issue,
118 * one has to read/write to/from NIC in 32-bit chunks; 140 * one has to read/write to/from NIC in 32-bit chunks;
@@ -160,6 +182,37 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
160 } 182 }
161} 183}
162 184
185static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
186{
187 struct wil_ring *ring = &wil->ring_tx[id];
188 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
189
190 lockdep_assert_held(&wil->mutex);
191
192 if (!ring->va)
193 return;
194
195 wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
196
197 spin_lock_bh(&txdata->lock);
198 txdata->dot1x_open = false;
199 txdata->mid = U8_MAX;
200 txdata->enabled = 0; /* no Tx can be in progress or start anew */
201 spin_unlock_bh(&txdata->lock);
202 /* napi_synchronize waits for completion of the current NAPI but will
203 * not prevent the next NAPI run.
204 * Add a memory barrier to guarantee that txdata->enabled is zeroed
205 * before napi_synchronize so that the next scheduled NAPI will not
206 * handle this vring
207 */
208 wmb();
209 /* make sure NAPI won't touch this vring */
210 if (test_bit(wil_status_napi_en, wil->status))
211 napi_synchronize(&wil->napi_tx);
212
213 wil->txrx_ops.ring_fini_tx(wil, ring);
214}
215
163static void wil_disconnect_cid(struct wil6210_vif *vif, int cid, 216static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
164 u16 reason_code, bool from_event) 217 u16 reason_code, bool from_event)
165__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) 218__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -219,9 +272,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
219 memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx)); 272 memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
220 memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx)); 273 memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
221 /* release vrings */ 274 /* release vrings */
222 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { 275 for (i = 0; i < ARRAY_SIZE(wil->ring_tx); i++) {
223 if (wil->vring2cid_tid[i][0] == cid) 276 if (wil->ring2cid_tid[i][0] == cid)
224 wil_vring_fini_tx(wil, i); 277 wil_ring_fini_tx(wil, i);
225 } 278 }
226 /* statistics */ 279 /* statistics */
227 memset(&sta->stats, 0, sizeof(sta->stats)); 280 memset(&sta->stats, 0, sizeof(sta->stats));
@@ -453,18 +506,19 @@ static void wil_fw_error_worker(struct work_struct *work)
453 mutex_unlock(&wil->mutex); 506 mutex_unlock(&wil->mutex);
454} 507}
455 508
456static int wil_find_free_vring(struct wil6210_priv *wil) 509static int wil_find_free_ring(struct wil6210_priv *wil)
457{ 510{
458 int i; 511 int i;
512 int min_ring_id = wil_get_min_tx_ring_id(wil);
459 513
460 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 514 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
461 if (!wil->vring_tx[i].va) 515 if (!wil->ring_tx[i].va)
462 return i; 516 return i;
463 } 517 }
464 return -EINVAL; 518 return -EINVAL;
465} 519}
466 520
467int wil_tx_init(struct wil6210_vif *vif, int cid) 521int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
468{ 522{
469 struct wil6210_priv *wil = vif_to_wil(vif); 523 struct wil6210_priv *wil = vif_to_wil(vif);
470 int rc = -EINVAL, ringid; 524 int rc = -EINVAL, ringid;
@@ -473,16 +527,17 @@ int wil_tx_init(struct wil6210_vif *vif, int cid)
473 wil_err(wil, "No connection pending\n"); 527 wil_err(wil, "No connection pending\n");
474 goto out; 528 goto out;
475 } 529 }
476 ringid = wil_find_free_vring(wil); 530 ringid = wil_find_free_ring(wil);
477 if (ringid < 0) { 531 if (ringid < 0) {
478 wil_err(wil, "No free vring found\n"); 532 wil_err(wil, "No free vring found\n");
479 goto out; 533 goto out;
480 } 534 }
481 535
482 wil_dbg_wmi(wil, "Configure for connection CID %d MID %d vring %d\n", 536 wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
483 cid, vif->mid, ringid); 537 cid, vif->mid, ringid);
484 538
485 rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0); 539 rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
540 cid, 0);
486 if (rc) 541 if (rc)
487 wil_err(wil, "init TX for CID %d MID %d vring %d failed\n", 542 wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
488 cid, vif->mid, ringid); 543 cid, vif->mid, ringid);
@@ -494,19 +549,19 @@ out:
494int wil_bcast_init(struct wil6210_vif *vif) 549int wil_bcast_init(struct wil6210_vif *vif)
495{ 550{
496 struct wil6210_priv *wil = vif_to_wil(vif); 551 struct wil6210_priv *wil = vif_to_wil(vif);
497 int ri = vif->bcast_vring, rc; 552 int ri = vif->bcast_ring, rc;
498 553
499 if ((ri >= 0) && wil->vring_tx[ri].va) 554 if (ri >= 0 && wil->ring_tx[ri].va)
500 return 0; 555 return 0;
501 556
502 ri = wil_find_free_vring(wil); 557 ri = wil_find_free_ring(wil);
503 if (ri < 0) 558 if (ri < 0)
504 return ri; 559 return ri;
505 560
506 vif->bcast_vring = ri; 561 vif->bcast_ring = ri;
507 rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order); 562 rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
508 if (rc) 563 if (rc)
509 vif->bcast_vring = -1; 564 vif->bcast_ring = -1;
510 565
511 return rc; 566 return rc;
512} 567}
@@ -514,13 +569,13 @@ int wil_bcast_init(struct wil6210_vif *vif)
514void wil_bcast_fini(struct wil6210_vif *vif) 569void wil_bcast_fini(struct wil6210_vif *vif)
515{ 570{
516 struct wil6210_priv *wil = vif_to_wil(vif); 571 struct wil6210_priv *wil = vif_to_wil(vif);
517 int ri = vif->bcast_vring; 572 int ri = vif->bcast_ring;
518 573
519 if (ri < 0) 574 if (ri < 0)
520 return; 575 return;
521 576
522 vif->bcast_vring = -1; 577 vif->bcast_ring = -1;
523 wil_vring_fini_tx(wil, ri); 578 wil_ring_fini_tx(wil, ri);
524} 579}
525 580
526void wil_bcast_fini_all(struct wil6210_priv *wil) 581void wil_bcast_fini_all(struct wil6210_priv *wil)
@@ -548,7 +603,7 @@ int wil_priv_init(struct wil6210_priv *wil)
548 } 603 }
549 604
550 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) 605 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
551 spin_lock_init(&wil->vring_tx_data[i].lock); 606 spin_lock_init(&wil->ring_tx_data[i].lock);
552 607
553 mutex_init(&wil->mutex); 608 mutex_init(&wil->mutex);
554 mutex_init(&wil->vif_mutex); 609 mutex_init(&wil->vif_mutex);
@@ -589,11 +644,30 @@ int wil_priv_init(struct wil6210_priv *wil)
589 wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST | 644 wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
590 WMI_WAKEUP_TRIGGER_BCAST; 645 WMI_WAKEUP_TRIGGER_BCAST;
591 memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); 646 memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
592 wil->vring_idle_trsh = 16; 647 wil->ring_idle_trsh = 16;
593 648
594 wil->reply_mid = U8_MAX; 649 wil->reply_mid = U8_MAX;
595 wil->max_vifs = 1; 650 wil->max_vifs = 1;
596 651
652 /* edma configuration can be updated via debugfs before allocation */
653 wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
654 wil->use_compressed_rx_status = true;
655 wil->use_rx_hw_reordering = true;
656 wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
657
658 /* Rx status ring size should be bigger than the number of RX buffers
659 * in order to prevent backpressure on the status ring, which may
660 * cause HW freeze.
661 */
662 wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
663 /* Number of RX buffer IDs should be bigger than the RX descriptor
664 * ring size as in HW reorder flow, the HW can consume additional
665 * buffers before releasing the previous ones.
666 */
667 wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
668
669 wil->amsdu_en = 1;
670
597 return 0; 671 return 0;
598 672
599out_wmi_wq: 673out_wmi_wq:
@@ -736,14 +810,24 @@ static void wil_bl_prepare_halt(struct wil6210_priv *wil)
736 810
737static inline void wil_halt_cpu(struct wil6210_priv *wil) 811static inline void wil_halt_cpu(struct wil6210_priv *wil)
738{ 812{
739 wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); 813 if (wil->hw_version >= HW_VER_TALYN_MB) {
740 wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST); 814 wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB,
815 BIT_USER_USER_CPU_MAN_RST);
816 wil_w(wil, RGF_USER_MAC_CPU_0_TALYN_MB,
817 BIT_USER_MAC_CPU_MAN_RST);
818 } else {
819 wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
820 wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST);
821 }
741} 822}
742 823
743static inline void wil_release_cpu(struct wil6210_priv *wil) 824static inline void wil_release_cpu(struct wil6210_priv *wil)
744{ 825{
745 /* Start CPU */ 826 /* Start CPU */
746 wil_w(wil, RGF_USER_USER_CPU_0, 1); 827 if (wil->hw_version >= HW_VER_TALYN_MB)
828 wil_w(wil, RGF_USER_USER_CPU_0_TALYN_MB, 1);
829 else
830 wil_w(wil, RGF_USER_USER_CPU_0, 1);
747} 831}
748 832
749static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode) 833static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
@@ -767,11 +851,146 @@ static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode)
767 } 851 }
768} 852}
769 853
770static int wil_target_reset(struct wil6210_priv *wil, int no_flash) 854static int wil_wait_device_ready(struct wil6210_priv *wil, int no_flash)
771{ 855{
772 int delay = 0; 856 int delay = 0;
773 u32 x, x1 = 0; 857 u32 x, x1 = 0;
774 858
859 /* wait until device ready. */
860 if (no_flash) {
861 msleep(PMU_READY_DELAY_MS);
862
863 wil_dbg_misc(wil, "Reset completed\n");
864 } else {
865 do {
866 msleep(RST_DELAY);
867 x = wil_r(wil, RGF_USER_BL +
868 offsetof(struct bl_dedicated_registers_v0,
869 boot_loader_ready));
870 if (x1 != x) {
871 wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
872 x1, x);
873 x1 = x;
874 }
875 if (delay++ > RST_COUNT) {
876 wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
877 x);
878 return -ETIME;
879 }
880 } while (x != BL_READY);
881
882 wil_dbg_misc(wil, "Reset completed in %d ms\n",
883 delay * RST_DELAY);
884 }
885
886 return 0;
887}
888
889static int wil_wait_device_ready_talyn_mb(struct wil6210_priv *wil)
890{
891 u32 otp_hw;
892 u8 signature_status;
893 bool otp_signature_err;
894 bool hw_section_done;
895 u32 otp_qc_secured;
896 int delay = 0;
897
898 /* Wait for OTP signature test to complete */
899 usleep_range(2000, 2200);
900
901 wil->boot_config = WIL_BOOT_ERR;
902
903 /* Poll until OTP signature status is valid.
904 * In vanilla and development modes, when signature test is complete
905 * HW sets BIT_OTP_SIGNATURE_ERR_TALYN_MB.
906 * In production mode BIT_OTP_SIGNATURE_ERR_TALYN_MB remains 0, poll
907 * for signature status change to 2 or 3.
908 */
909 do {
910 otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
911 signature_status = WIL_GET_BITS(otp_hw, 8, 9);
912 otp_signature_err = otp_hw & BIT_OTP_SIGNATURE_ERR_TALYN_MB;
913
914 if (otp_signature_err &&
915 signature_status == WIL_SIG_STATUS_VANILLA) {
916 wil->boot_config = WIL_BOOT_VANILLA;
917 break;
918 }
919 if (otp_signature_err &&
920 signature_status == WIL_SIG_STATUS_DEVELOPMENT) {
921 wil->boot_config = WIL_BOOT_DEVELOPMENT;
922 break;
923 }
924 if (!otp_signature_err &&
925 signature_status == WIL_SIG_STATUS_PRODUCTION) {
926 wil->boot_config = WIL_BOOT_PRODUCTION;
927 break;
928 }
929 if (!otp_signature_err &&
930 signature_status ==
931 WIL_SIG_STATUS_CORRUPTED_PRODUCTION) {
932 /* Unrecognized OTP signature found. Possibly a
933 * corrupted production signature, access control
934 * is applied as in production mode, therefore
935 * do not fail
936 */
937 wil->boot_config = WIL_BOOT_PRODUCTION;
938 break;
939 }
940 if (delay++ > OTP_HW_COUNT)
941 break;
942
943 usleep_range(OTP_HW_DELAY, OTP_HW_DELAY + 10);
944 } while (!otp_signature_err && signature_status == 0);
945
946 if (wil->boot_config == WIL_BOOT_ERR) {
947 wil_err(wil,
948 "invalid boot config, signature_status %d otp_signature_err %d\n",
949 signature_status, otp_signature_err);
950 return -ETIME;
951 }
952
953 wil_dbg_misc(wil,
954 "signature test done in %d usec, otp_hw 0x%x, boot_config %d\n",
955 delay * OTP_HW_DELAY, otp_hw, wil->boot_config);
956
957 if (wil->boot_config == WIL_BOOT_VANILLA)
958 /* Assuming not SPI boot (currently not supported) */
959 goto out;
960
961 hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
962 delay = 0;
963
964 while (!hw_section_done) {
965 msleep(RST_DELAY);
966
967 otp_hw = wil_r(wil, RGF_USER_OTP_HW_RD_MACHINE_1);
968 hw_section_done = otp_hw & BIT_OTP_HW_SECTION_DONE_TALYN_MB;
969
970 if (delay++ > RST_COUNT) {
971 wil_err(wil, "TO waiting for hw_section_done\n");
972 return -ETIME;
973 }
974 }
975
976 wil_dbg_misc(wil, "HW section done in %d ms\n", delay * RST_DELAY);
977
978 otp_qc_secured = wil_r(wil, RGF_OTP_QC_SECURED);
979 wil->secured_boot = otp_qc_secured & BIT_BOOT_FROM_ROM ? 1 : 0;
980 wil_dbg_misc(wil, "secured boot is %sabled\n",
981 wil->secured_boot ? "en" : "dis");
982
983out:
984 wil_dbg_misc(wil, "Reset completed\n");
985
986 return 0;
987}
988
989static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
990{
991 u32 x;
992 int rc;
993
775 wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); 994 wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
776 995
777 /* Clear MAC link up */ 996 /* Clear MAC link up */
@@ -811,10 +1030,17 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
811 wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); 1030 wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
812 wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); 1031 wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
813 1032
814 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); 1033 if (wil->hw_version >= HW_VER_TALYN_MB) {
815 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); 1034 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x7e000000);
816 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0); 1035 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
817 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); 1036 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0xc00000f0);
1037 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
1038 } else {
1039 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xfe000000);
1040 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003f);
1041 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
1042 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xffe7fe00);
1043 }
818 1044
819 wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); 1045 wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
820 wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); 1046 wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
@@ -830,34 +1056,12 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
830 1056
831 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); 1057 wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
832 1058
833 /* wait until device ready. typical time is 20..80 msec */ 1059 if (wil->hw_version == HW_VER_TALYN_MB)
834 if (no_flash) 1060 rc = wil_wait_device_ready_talyn_mb(wil);
835 do {
836 msleep(RST_DELAY);
837 x = wil_r(wil, USER_EXT_USER_PMU_3);
838 if (delay++ > RST_COUNT) {
839 wil_err(wil, "Reset not completed, PMU_3 0x%08x\n",
840 x);
841 return -ETIME;
842 }
843 } while ((x & BIT_PMU_DEVICE_RDY) == 0);
844 else 1061 else
845 do { 1062 rc = wil_wait_device_ready(wil, no_flash);
846 msleep(RST_DELAY); 1063 if (rc)
847 x = wil_r(wil, RGF_USER_BL + 1064 return rc;
848 offsetof(struct bl_dedicated_registers_v0,
849 boot_loader_ready));
850 if (x1 != x) {
851 wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n",
852 x1, x);
853 x1 = x;
854 }
855 if (delay++ > RST_COUNT) {
856 wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
857 x);
858 return -ETIME;
859 }
860 } while (x != BL_READY);
861 1065
862 wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); 1066 wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
863 1067
@@ -865,7 +1069,7 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
865 wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | 1069 wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
866 BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); 1070 BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
867 1071
868 if (no_flash) { 1072 if (wil->hw_version < HW_VER_TALYN_MB && no_flash) {
869 /* Reset OTP HW vectors to fit 40MHz */ 1073 /* Reset OTP HW vectors to fit 40MHz */
870 wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001); 1074 wil_w(wil, RGF_USER_XPM_IFC_RD_TIME1, 0x60001);
871 wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027); 1075 wil_w(wil, RGF_USER_XPM_IFC_RD_TIME2, 0x20027);
@@ -880,7 +1084,6 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
880 wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57); 1084 wil_w(wil, RGF_USER_XPM_RD_DOUT_SAMPLE_TIME, 0x57);
881 } 1085 }
882 1086
883 wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
884 return 0; 1087 return 0;
885} 1088}
886 1089
@@ -1042,8 +1245,14 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
1042 struct net_device *ndev = wil->main_ndev; 1245 struct net_device *ndev = wil->main_ndev;
1043 struct wiphy *wiphy = wil_to_wiphy(wil); 1246 struct wiphy *wiphy = wil_to_wiphy(wil);
1044 u8 mac[8]; 1247 u8 mac[8];
1248 int mac_addr;
1249
1250 if (wil->hw_version >= HW_VER_TALYN_MB)
1251 mac_addr = RGF_OTP_MAC_TALYN_MB;
1252 else
1253 mac_addr = RGF_OTP_MAC;
1045 1254
1046 wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(RGF_OTP_MAC), 1255 wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
1047 sizeof(mac)); 1256 sizeof(mac));
1048 if (!is_valid_ether_addr(mac)) { 1257 if (!is_valid_ether_addr(mac)) {
1049 wil_err(wil, "Invalid MAC %pM\n", mac); 1258 wil_err(wil, "Invalid MAC %pM\n", mac);
@@ -1147,8 +1356,13 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
1147 /* it is W1C, clear by writing back same value */ 1356 /* it is W1C, clear by writing back same value */
1148 wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); 1357 wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
1149 wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); 1358 wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
1150 /* clear PAL_UNIT_ICR (potential D0->D3 leftover) */ 1359 /* clear PAL_UNIT_ICR (potential D0->D3 leftover)
1151 wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0); 1360 * In Talyn-MB host cannot access this register due to
1361 * access control, hence PAL_UNIT_ICR is cleared by the FW
1362 */
1363 if (wil->hw_version < HW_VER_TALYN_MB)
1364 wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR),
1365 0);
1152 1366
1153 if (wil->fw_calib_result > 0) { 1367 if (wil->fw_calib_result > 0) {
1154 __le32 val = cpu_to_le32(wil->fw_calib_result | 1368 __le32 val = cpu_to_le32(wil->fw_calib_result |
@@ -1284,7 +1498,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
1284 rc = wil_target_reset(wil, no_flash); 1498 rc = wil_target_reset(wil, no_flash);
1285 wil6210_clear_irq(wil); 1499 wil6210_clear_irq(wil);
1286 wil_enable_irq(wil); 1500 wil_enable_irq(wil);
1287 wil_rx_fini(wil); 1501 wil->txrx_ops.rx_fini(wil);
1502 wil->txrx_ops.tx_fini(wil);
1288 if (rc) { 1503 if (rc) {
1289 if (!no_flash) 1504 if (!no_flash)
1290 wil_bl_crash_info(wil, true); 1505 wil_bl_crash_info(wil, true);
@@ -1337,7 +1552,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
1337 clear_bit(wil_status_resetting, wil->status); 1552 clear_bit(wil_status_resetting, wil->status);
1338 1553
1339 if (load_fw) { 1554 if (load_fw) {
1340 wil_configure_interrupt_moderation(wil);
1341 wil_unmask_irq(wil); 1555 wil_unmask_irq(wil);
1342 1556
1343 /* we just started MAC, wait for FW ready */ 1557 /* we just started MAC, wait for FW ready */
@@ -1352,6 +1566,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
1352 return rc; 1566 return rc;
1353 } 1567 }
1354 1568
1569 wil->txrx_ops.configure_interrupt_moderation(wil);
1570
1355 rc = wil_restore_vifs(wil); 1571 rc = wil_restore_vifs(wil);
1356 if (rc) { 1572 if (rc) {
1357 wil_err(wil, "failed to restore vifs, rc %d\n", rc); 1573 wil_err(wil, "failed to restore vifs, rc %d\n", rc);
@@ -1406,8 +1622,12 @@ int __wil_up(struct wil6210_priv *wil)
1406 if (rc) 1622 if (rc)
1407 return rc; 1623 return rc;
1408 1624
1409 /* Rx VRING. After MAC and beacon */ 1625 /* Rx RING. After MAC and beacon */
1410 rc = wil_rx_init(wil, 1 << rx_ring_order); 1626 rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
1627 if (rc)
1628 return rc;
1629
1630 rc = wil->txrx_ops.tx_init(wil);
1411 if (rc) 1631 if (rc)
1412 return rc; 1632 return rc;
1413 1633
@@ -1568,3 +1788,11 @@ void wil_halp_unvote(struct wil6210_priv *wil)
1568 1788
1569 mutex_unlock(&wil->halp.lock); 1789 mutex_unlock(&wil->halp.lock);
1570} 1790}
1791
1792void wil_init_txrx_ops(struct wil6210_priv *wil)
1793{
1794 if (wil->use_enhanced_dma_hw)
1795 wil_init_txrx_ops_edma(wil);
1796 else
1797 wil_init_txrx_ops_legacy_dma(wil);
1798}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index eb6c14ed65a4..7a78a06bd356 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -120,6 +120,27 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
120 return done; 120 return done;
121} 121}
122 122
123static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
124{
125 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
126 napi_rx);
127 int quota = budget;
128 int done;
129
130 wil_rx_handle_edma(wil, &quota);
131 done = budget - quota;
132
133 if (done < budget) {
134 napi_complete_done(napi, done);
135 wil6210_unmask_irq_rx_edma(wil);
136 wil_dbg_txrx(wil, "NAPI RX complete\n");
137 }
138
139 wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
140
141 return done;
142}
143
123static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) 144static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
124{ 145{
125 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, 146 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
@@ -129,11 +150,11 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
129 150
130 /* always process ALL Tx complete, regardless budget - it is fast */ 151 /* always process ALL Tx complete, regardless budget - it is fast */
131 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 152 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
132 struct vring *vring = &wil->vring_tx[i]; 153 struct wil_ring *ring = &wil->ring_tx[i];
133 struct vring_tx_data *txdata = &wil->vring_tx_data[i]; 154 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
134 struct wil6210_vif *vif; 155 struct wil6210_vif *vif;
135 156
136 if (!vring->va || !txdata->enabled || 157 if (!ring->va || !txdata->enabled ||
137 txdata->mid >= wil->max_vifs) 158 txdata->mid >= wil->max_vifs)
138 continue; 159 continue;
139 160
@@ -157,6 +178,30 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
157 return min(tx_done, budget); 178 return min(tx_done, budget);
158} 179}
159 180
181static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
182{
183 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
184 napi_tx);
185 int tx_done;
186 /* There is only one status TX ring */
187 struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
188
189 if (!sring->va)
190 return 0;
191
192 tx_done = wil_tx_sring_handler(wil, sring);
193
194 if (tx_done < budget) {
195 napi_complete(napi);
196 wil6210_unmask_irq_tx_edma(wil);
197 wil_dbg_txrx(wil, "NAPI TX complete\n");
198 }
199
200 wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
201
202 return min(tx_done, budget);
203}
204
160static void wil_dev_setup(struct net_device *dev) 205static void wil_dev_setup(struct net_device *dev)
161{ 206{
162 ether_setup(dev); 207 ether_setup(dev);
@@ -228,7 +273,7 @@ static void wil_p2p_discovery_timer_fn(struct timer_list *t)
228 273
229static void wil_vif_init(struct wil6210_vif *vif) 274static void wil_vif_init(struct wil6210_vif *vif)
230{ 275{
231 vif->bcast_vring = -1; 276 vif->bcast_ring = -1;
232 277
233 mutex_init(&vif->probe_client_mutex); 278 mutex_init(&vif->probe_client_mutex);
234 279
@@ -418,11 +463,21 @@ int wil_if_add(struct wil6210_priv *wil)
418 } 463 }
419 464
420 init_dummy_netdev(&wil->napi_ndev); 465 init_dummy_netdev(&wil->napi_ndev);
421 netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx, 466 if (wil->use_enhanced_dma_hw) {
422 WIL6210_NAPI_BUDGET); 467 netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
423 netif_tx_napi_add(&wil->napi_ndev, 468 wil6210_netdev_poll_rx_edma,
424 &wil->napi_tx, wil6210_netdev_poll_tx, 469 WIL6210_NAPI_BUDGET);
425 WIL6210_NAPI_BUDGET); 470 netif_tx_napi_add(&wil->napi_ndev,
471 &wil->napi_tx, wil6210_netdev_poll_tx_edma,
472 WIL6210_NAPI_BUDGET);
473 } else {
474 netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
475 wil6210_netdev_poll_rx,
476 WIL6210_NAPI_BUDGET);
477 netif_tx_napi_add(&wil->napi_ndev,
478 &wil->napi_tx, wil6210_netdev_poll_tx,
479 WIL6210_NAPI_BUDGET);
480 }
426 481
427 wil_update_net_queues_bh(wil, vif, NULL, true); 482 wil_update_net_queues_bh(wil, vif, NULL, true);
428 483
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 19cbc6add637..8b148cb91372 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -85,7 +85,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
85 wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE; 85 wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
86 break; 86 break;
87 case JTAG_DEV_ID_TALYN: 87 case JTAG_DEV_ID_TALYN:
88 wil->hw_name = "Talyn"; 88 wil->hw_name = "Talyn-MA";
89 wil->hw_version = HW_VER_TALYN; 89 wil->hw_version = HW_VER_TALYN;
90 memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping)); 90 memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
91 wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE; 91 wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
@@ -94,6 +94,17 @@ int wil_set_capabilities(struct wil6210_priv *wil)
94 BIT_NO_FLASH_INDICATION) 94 BIT_NO_FLASH_INDICATION)
95 set_bit(hw_capa_no_flash, wil->hw_capa); 95 set_bit(hw_capa_no_flash, wil->hw_capa);
96 break; 96 break;
97 case JTAG_DEV_ID_TALYN_MB:
98 wil->hw_name = "Talyn-MB";
99 wil->hw_version = HW_VER_TALYN_MB;
100 memcpy(fw_mapping, talyn_mb_fw_mapping,
101 sizeof(talyn_mb_fw_mapping));
102 wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
103 wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
104 set_bit(hw_capa_no_flash, wil->hw_capa);
105 wil->use_enhanced_dma_hw = true;
106 wil->use_rx_hw_reordering = true;
107 break;
97 default: 108 default:
98 wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n", 109 wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
99 jtag_id, chip_revision); 110 jtag_id, chip_revision);
@@ -102,6 +113,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
102 return -EINVAL; 113 return -EINVAL;
103 } 114 }
104 115
116 wil_init_txrx_ops(wil);
117
105 iccm_section = wil_find_fw_mapping("fw_code"); 118 iccm_section = wil_find_fw_mapping("fw_code");
106 if (!iccm_section) { 119 if (!iccm_section) {
107 wil_err(wil, "fw_code section not found in fw_mapping\n"); 120 wil_err(wil, "fw_code section not found in fw_mapping\n");
@@ -257,8 +270,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
257 .fw_recovery = wil_platform_rop_fw_recovery, 270 .fw_recovery = wil_platform_rop_fw_recovery,
258 }; 271 };
259 u32 bar_size = pci_resource_len(pdev, 0); 272 u32 bar_size = pci_resource_len(pdev, 0);
260 int dma_addr_size[] = {48, 40, 32}; /* keep descending order */ 273 int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
261 int i; 274 int i, start_idx;
262 275
263 /* check HW */ 276 /* check HW */
264 dev_info(&pdev->dev, WIL_NAME 277 dev_info(&pdev->dev, WIL_NAME
@@ -293,24 +306,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
293 goto if_free; 306 goto if_free;
294 } 307 }
295 /* rollback to err_plat */ 308 /* rollback to err_plat */
296
297 /* device supports >32bit addresses */
298 for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
299 rc = dma_set_mask_and_coherent(dev,
300 DMA_BIT_MASK(dma_addr_size[i]));
301 if (rc) {
302 dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
303 dma_addr_size[i], rc);
304 continue;
305 }
306 dev_info(dev, "using dma mask %d", dma_addr_size[i]);
307 wil->dma_addr_size = dma_addr_size[i];
308 break;
309 }
310
311 if (wil->dma_addr_size == 0)
312 goto err_plat;
313
314 rc = pci_enable_device(pdev); 309 rc = pci_enable_device(pdev);
315 if (rc && pdev->msi_enabled == 0) { 310 if (rc && pdev->msi_enabled == 0) {
316 wil_err(wil, 311 wil_err(wil,
@@ -350,6 +345,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
350 wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc); 345 wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
351 goto err_iounmap; 346 goto err_iounmap;
352 } 347 }
348
349 /* device supports >32bit addresses.
350 * for legacy DMA start from 48 bit.
351 */
352 start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
353
354 for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
355 rc = dma_set_mask_and_coherent(dev,
356 DMA_BIT_MASK(dma_addr_size[i]));
357 if (rc) {
358 dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
359 dma_addr_size[i], rc);
360 continue;
361 }
362 dev_info(dev, "using dma mask %d", dma_addr_size[i]);
363 wil->dma_addr_size = dma_addr_size[i];
364 break;
365 }
366
367 if (wil->dma_addr_size == 0)
368 goto err_iounmap;
369
353 wil6210_clear_irq(wil); 370 wil6210_clear_irq(wil);
354 371
355 /* FW should raise IRQ when ready */ 372 /* FW should raise IRQ when ready */
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index ba81fb3ac96f..3a4194779ddf 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -211,7 +211,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
211 goto reject_suspend; 211 goto reject_suspend;
212 } 212 }
213 213
214 if (!wil_is_rx_idle(wil)) { 214 if (!wil->txrx_ops.is_rx_idle(wil)) {
215 wil_dbg_pm(wil, "Pending RX data, reject suspend\n"); 215 wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
216 wil->suspend_stats.rejected_by_host++; 216 wil->suspend_stats.rejected_by_host++;
217 goto reject_suspend; 217 goto reject_suspend;
@@ -235,9 +235,9 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
235 start = jiffies; 235 start = jiffies;
236 data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS); 236 data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
237 if (test_bit(wil_status_napi_en, wil->status)) { 237 if (test_bit(wil_status_napi_en, wil->status)) {
238 while (!wil_is_rx_idle(wil)) { 238 while (!wil->txrx_ops.is_rx_idle(wil)) {
239 if (time_after(jiffies, data_comp_to)) { 239 if (time_after(jiffies, data_comp_to)) {
240 if (wil_is_rx_idle(wil)) 240 if (wil->txrx_ops.is_rx_idle(wil))
241 break; 241 break;
242 wil_err(wil, 242 wil_err(wil,
243 "TO waiting for idle RX, suspend failed\n"); 243 "TO waiting for idle RX, suspend failed\n");
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 76f8084c1fd8..22475a1ddb7f 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -95,17 +95,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
95{ 95{
96 struct wil6210_vif *vif; 96 struct wil6210_vif *vif;
97 struct net_device *ndev; 97 struct net_device *ndev;
98 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 98 int tid, cid, mid, mcast;
99 int tid = wil_rxdesc_tid(d); 99 u16 seq;
100 int cid = wil_rxdesc_cid(d); 100 struct wil_sta_info *sta;
101 int mid = wil_rxdesc_mid(d);
102 u16 seq = wil_rxdesc_seq(d);
103 int mcast = wil_rxdesc_mcast(d);
104 struct wil_sta_info *sta = &wil->sta[cid];
105 struct wil_tid_ampdu_rx *r; 101 struct wil_tid_ampdu_rx *r;
106 u16 hseq; 102 u16 hseq;
107 int index; 103 int index;
108 104
105 wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
106 &mcast);
107 sta = &wil->sta[cid];
108
109 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n", 109 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
110 mid, cid, tid, seq, mcast); 110 mid, cid, tid, seq, mcast);
111 111
@@ -315,7 +315,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
315 * bits 6..15: buffer size 315 * bits 6..15: buffer size
316 */ 316 */
317 u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15); 317 u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
318 bool agg_amsdu = !!(param_set & BIT(0)); 318 bool agg_amsdu = wil->use_enhanced_dma_hw &&
319 wil->use_rx_hw_reordering &&
320 test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
321 wil->amsdu_en && (param_set & BIT(0));
319 int ba_policy = param_set & BIT(1); 322 int ba_policy = param_set & BIT(1);
320 u16 status = WLAN_STATUS_SUCCESS; 323 u16 status = WLAN_STATUS_SUCCESS;
321 u16 ssn = seq_ctrl >> 4; 324 u16 ssn = seq_ctrl >> 4;
@@ -360,8 +363,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
360 } 363 }
361 } 364 }
362 365
363 rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status, 366 rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
364 agg_amsdu, agg_wsize, agg_timeout); 367 status, agg_amsdu, agg_wsize,
368 agg_timeout);
365 if (rc || (status != WLAN_STATUS_SUCCESS)) { 369 if (rc || (status != WLAN_STATUS_SUCCESS)) {
366 wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc, 370 wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
367 status); 371 status);
@@ -384,7 +388,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
384{ 388{
385 u8 agg_wsize = wil_agg_size(wil, wsize); 389 u8 agg_wsize = wil_agg_size(wil, wsize);
386 u16 agg_timeout = 0; 390 u16 agg_timeout = 0;
387 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; 391 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
388 int rc = 0; 392 int rc = 0;
389 393
390 if (txdata->addba_in_progress) { 394 if (txdata->addba_in_progress) {
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index c4db2a9d9f7f..853abc3a73e4 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -187,6 +187,40 @@ TRACE_EVENT(wil6210_rx,
187 __entry->seq, __entry->type, __entry->subtype) 187 __entry->seq, __entry->type, __entry->subtype)
188); 188);
189 189
190TRACE_EVENT(wil6210_rx_status,
191 TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id,
192 void *msg),
193 TP_ARGS(wil, use_compressed, buff_id, msg),
194 TP_STRUCT__entry(__field(u8, use_compressed)
195 __field(u16, buff_id)
196 __field(unsigned int, len)
197 __field(u8, mid)
198 __field(u8, cid)
199 __field(u8, tid)
200 __field(u8, type)
201 __field(u8, subtype)
202 __field(u16, seq)
203 __field(u8, mcs)
204 ),
205 TP_fast_assign(__entry->use_compressed = use_compressed;
206 __entry->buff_id = buff_id;
207 __entry->len = wil_rx_status_get_length(msg);
208 __entry->mid = wil_rx_status_get_mid(msg);
209 __entry->cid = wil_rx_status_get_cid(msg);
210 __entry->tid = wil_rx_status_get_tid(msg);
211 __entry->type = wil_rx_status_get_frame_type(wil,
212 msg);
213 __entry->subtype = wil_rx_status_get_fc1(wil, msg);
214 __entry->seq = wil_rx_status_get_seq(wil, msg);
215 __entry->mcs = wil_rx_status_get_mcs(msg);
216 ),
217 TP_printk(
218 "compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x",
219 __entry->use_compressed, __entry->buff_id, __entry->len,
220 __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
221 __entry->seq, __entry->type, __entry->subtype)
222);
223
190TRACE_EVENT(wil6210_tx, 224TRACE_EVENT(wil6210_tx,
191 TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags), 225 TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
192 TP_ARGS(vring, index, len, frags), 226 TP_ARGS(vring, index, len, frags),
@@ -226,6 +260,31 @@ TRACE_EVENT(wil6210_tx_done,
226 __entry->err) 260 __entry->err)
227); 261);
228 262
263TRACE_EVENT(wil6210_tx_status,
264 TP_PROTO(struct wil_ring_tx_status *msg, u16 index,
265 unsigned int len),
266 TP_ARGS(msg, index, len),
267 TP_STRUCT__entry(__field(u16, index)
268 __field(unsigned int, len)
269 __field(u8, num_descs)
270 __field(u8, ring_id)
271 __field(u8, status)
272 __field(u8, mcs)
273
274 ),
275 TP_fast_assign(__entry->index = index;
276 __entry->len = len;
277 __entry->num_descs = msg->num_descriptors;
278 __entry->ring_id = msg->ring_id;
279 __entry->status = msg->status;
280 __entry->mcs = wil_tx_status_get_mcs(msg);
281 ),
282 TP_printk(
283 "ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d",
284 __entry->ring_id, __entry->index, __entry->len,
285 __entry->num_descs, __entry->status, __entry->mcs)
286);
287
229#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/ 288#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
230 289
231#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) 290#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index b9a9fa828961..2098f3cc1cec 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -28,6 +28,7 @@
28#include "wmi.h" 28#include "wmi.h"
29#include "txrx.h" 29#include "txrx.h"
30#include "trace.h" 30#include "trace.h"
31#include "txrx_edma.h"
31 32
32static bool rtap_include_phy_info; 33static bool rtap_include_phy_info;
33module_param(rtap_include_phy_info, bool, 0444); 34module_param(rtap_include_phy_info, bool, 0444);
@@ -47,62 +48,28 @@ static inline uint wil_rx_snaplen(void)
47 return rx_align_2 ? 6 : 0; 48 return rx_align_2 ? 6 : 0;
48} 49}
49 50
50static inline int wil_vring_is_empty(struct vring *vring) 51/* wil_ring_wmark_low - low watermark for available descriptor space */
52static inline int wil_ring_wmark_low(struct wil_ring *ring)
51{ 53{
52 return vring->swhead == vring->swtail; 54 return ring->size / 8;
53} 55}
54 56
55static inline u32 wil_vring_next_tail(struct vring *vring) 57/* wil_ring_wmark_high - high watermark for available descriptor space */
58static inline int wil_ring_wmark_high(struct wil_ring *ring)
56{ 59{
57 return (vring->swtail + 1) % vring->size; 60 return ring->size / 4;
58}
59
60static inline void wil_vring_advance_head(struct vring *vring, int n)
61{
62 vring->swhead = (vring->swhead + n) % vring->size;
63}
64
65static inline int wil_vring_is_full(struct vring *vring)
66{
67 return wil_vring_next_tail(vring) == vring->swhead;
68}
69
70/* Used space in Tx Vring */
71static inline int wil_vring_used_tx(struct vring *vring)
72{
73 u32 swhead = vring->swhead;
74 u32 swtail = vring->swtail;
75 return (vring->size + swhead - swtail) % vring->size;
76}
77
78/* Available space in Tx Vring */
79static inline int wil_vring_avail_tx(struct vring *vring)
80{
81 return vring->size - wil_vring_used_tx(vring) - 1;
82}
83
84/* wil_vring_wmark_low - low watermark for available descriptor space */
85static inline int wil_vring_wmark_low(struct vring *vring)
86{
87 return vring->size/8;
88}
89
90/* wil_vring_wmark_high - high watermark for available descriptor space */
91static inline int wil_vring_wmark_high(struct vring *vring)
92{
93 return vring->size/4;
94} 61}
95 62
96/* returns true if num avail descriptors is lower than wmark_low */ 63/* returns true if num avail descriptors is lower than wmark_low */
97static inline int wil_vring_avail_low(struct vring *vring) 64static inline int wil_ring_avail_low(struct wil_ring *ring)
98{ 65{
99 return wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring); 66 return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
100} 67}
101 68
102/* returns true if num avail descriptors is higher than wmark_high */ 69/* returns true if num avail descriptors is higher than wmark_high */
103static inline int wil_vring_avail_high(struct vring *vring) 70static inline int wil_ring_avail_high(struct wil_ring *ring)
104{ 71{
105 return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring); 72 return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
106} 73}
107 74
108/* returns true when all tx vrings are empty */ 75/* returns true when all tx vrings are empty */
@@ -112,9 +79,10 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
112 unsigned long data_comp_to; 79 unsigned long data_comp_to;
113 80
114 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 81 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
115 struct vring *vring = &wil->vring_tx[i]; 82 struct wil_ring *vring = &wil->ring_tx[i];
116 int vring_index = vring - wil->vring_tx; 83 int vring_index = vring - wil->ring_tx;
117 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; 84 struct wil_ring_tx_data *txdata =
85 &wil->ring_tx_data[vring_index];
118 86
119 spin_lock(&txdata->lock); 87 spin_lock(&txdata->lock);
120 88
@@ -126,7 +94,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
126 data_comp_to = jiffies + msecs_to_jiffies( 94 data_comp_to = jiffies + msecs_to_jiffies(
127 WIL_DATA_COMPLETION_TO_MS); 95 WIL_DATA_COMPLETION_TO_MS);
128 if (test_bit(wil_status_napi_en, wil->status)) { 96 if (test_bit(wil_status_napi_en, wil->status)) {
129 while (!wil_vring_is_empty(vring)) { 97 while (!wil_ring_is_empty(vring)) {
130 if (time_after(jiffies, data_comp_to)) { 98 if (time_after(jiffies, data_comp_to)) {
131 wil_dbg_pm(wil, 99 wil_dbg_pm(wil,
132 "TO waiting for idle tx\n"); 100 "TO waiting for idle tx\n");
@@ -150,13 +118,7 @@ bool wil_is_tx_idle(struct wil6210_priv *wil)
150 return true; 118 return true;
151} 119}
152 120
153/* wil_val_in_range - check if value in [min,max) */ 121static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
154static inline bool wil_val_in_range(int val, int min, int max)
155{
156 return val >= min && val < max;
157}
158
159static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
160{ 122{
161 struct device *dev = wil_to_dev(wil); 123 struct device *dev = wil_to_dev(wil);
162 size_t sz = vring->size * sizeof(vring->va[0]); 124 size_t sz = vring->size * sizeof(vring->va[0]);
@@ -205,7 +167,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
205 * we can use any 167 * we can use any
206 */ 168 */
207 for (i = 0; i < vring->size; i++) { 169 for (i = 0; i < vring->size; i++) {
208 volatile struct vring_tx_desc *_d = &vring->va[i].tx; 170 volatile struct vring_tx_desc *_d =
171 &vring->va[i].tx.legacy;
209 172
210 _d->dma.status = TX_DMA_STATUS_DU; 173 _d->dma.status = TX_DMA_STATUS_DU;
211 } 174 }
@@ -216,9 +179,10 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
216 return 0; 179 return 0;
217} 180}
218 181
219static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d, 182static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
220 struct wil_ctx *ctx) 183 struct wil_ctx *ctx)
221{ 184{
185 struct vring_tx_desc *d = &desc->legacy;
222 dma_addr_t pa = wil_desc_addr(&d->dma.addr); 186 dma_addr_t pa = wil_desc_addr(&d->dma.addr);
223 u16 dmalen = le16_to_cpu(d->dma.length); 187 u16 dmalen = le16_to_cpu(d->dma.length);
224 188
@@ -234,15 +198,14 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
234 } 198 }
235} 199}
236 200
237static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, 201static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
238 int tx)
239{ 202{
240 struct device *dev = wil_to_dev(wil); 203 struct device *dev = wil_to_dev(wil);
241 size_t sz = vring->size * sizeof(vring->va[0]); 204 size_t sz = vring->size * sizeof(vring->va[0]);
242 205
243 lockdep_assert_held(&wil->mutex); 206 lockdep_assert_held(&wil->mutex);
244 if (tx) { 207 if (!vring->is_rx) {
245 int vring_index = vring - wil->vring_tx; 208 int vring_index = vring - wil->ring_tx;
246 209
247 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n", 210 wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
248 vring_index, vring->size, vring->va, 211 vring_index, vring->size, vring->va,
@@ -253,33 +216,33 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
253 &vring->pa, vring->ctx); 216 &vring->pa, vring->ctx);
254 } 217 }
255 218
256 while (!wil_vring_is_empty(vring)) { 219 while (!wil_ring_is_empty(vring)) {
257 dma_addr_t pa; 220 dma_addr_t pa;
258 u16 dmalen; 221 u16 dmalen;
259 struct wil_ctx *ctx; 222 struct wil_ctx *ctx;
260 223
261 if (tx) { 224 if (!vring->is_rx) {
262 struct vring_tx_desc dd, *d = &dd; 225 struct vring_tx_desc dd, *d = &dd;
263 volatile struct vring_tx_desc *_d = 226 volatile struct vring_tx_desc *_d =
264 &vring->va[vring->swtail].tx; 227 &vring->va[vring->swtail].tx.legacy;
265 228
266 ctx = &vring->ctx[vring->swtail]; 229 ctx = &vring->ctx[vring->swtail];
267 if (!ctx) { 230 if (!ctx) {
268 wil_dbg_txrx(wil, 231 wil_dbg_txrx(wil,
269 "ctx(%d) was already completed\n", 232 "ctx(%d) was already completed\n",
270 vring->swtail); 233 vring->swtail);
271 vring->swtail = wil_vring_next_tail(vring); 234 vring->swtail = wil_ring_next_tail(vring);
272 continue; 235 continue;
273 } 236 }
274 *d = *_d; 237 *d = *_d;
275 wil_txdesc_unmap(dev, d, ctx); 238 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
276 if (ctx->skb) 239 if (ctx->skb)
277 dev_kfree_skb_any(ctx->skb); 240 dev_kfree_skb_any(ctx->skb);
278 vring->swtail = wil_vring_next_tail(vring); 241 vring->swtail = wil_ring_next_tail(vring);
279 } else { /* rx */ 242 } else { /* rx */
280 struct vring_rx_desc dd, *d = &dd; 243 struct vring_rx_desc dd, *d = &dd;
281 volatile struct vring_rx_desc *_d = 244 volatile struct vring_rx_desc *_d =
282 &vring->va[vring->swhead].rx; 245 &vring->va[vring->swhead].rx.legacy;
283 246
284 ctx = &vring->ctx[vring->swhead]; 247 ctx = &vring->ctx[vring->swhead];
285 *d = *_d; 248 *d = *_d;
@@ -287,7 +250,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
287 dmalen = le16_to_cpu(d->dma.length); 250 dmalen = le16_to_cpu(d->dma.length);
288 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); 251 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
289 kfree_skb(ctx->skb); 252 kfree_skb(ctx->skb);
290 wil_vring_advance_head(vring, 1); 253 wil_ring_advance_head(vring, 1);
291 } 254 }
292 } 255 }
293 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa); 256 dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
@@ -302,13 +265,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
302 * 265 *
303 * Safe to call from IRQ 266 * Safe to call from IRQ
304 */ 267 */
305static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, 268static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
306 u32 i, int headroom) 269 u32 i, int headroom)
307{ 270{
308 struct device *dev = wil_to_dev(wil); 271 struct device *dev = wil_to_dev(wil);
309 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen(); 272 unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
310 struct vring_rx_desc dd, *d = &dd; 273 struct vring_rx_desc dd, *d = &dd;
311 volatile struct vring_rx_desc *_d = &vring->va[i].rx; 274 volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
312 dma_addr_t pa; 275 dma_addr_t pa;
313 struct sk_buff *skb = dev_alloc_skb(sz + headroom); 276 struct sk_buff *skb = dev_alloc_skb(sz + headroom);
314 277
@@ -445,19 +408,12 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
445 } 408 }
446} 409}
447 410
448/* similar to ieee80211_ version, but FC contain only 1-st byte */ 411static bool wil_is_rx_idle(struct wil6210_priv *wil)
449static inline int wil_is_back_req(u8 fc)
450{
451 return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
452 (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
453}
454
455bool wil_is_rx_idle(struct wil6210_priv *wil)
456{ 412{
457 struct vring_rx_desc *_d; 413 struct vring_rx_desc *_d;
458 struct vring *vring = &wil->vring_rx; 414 struct wil_ring *ring = &wil->ring_rx;
459 415
460 _d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx; 416 _d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
461 if (_d->dma.status & RX_DMA_STATUS_DU) 417 if (_d->dma.status & RX_DMA_STATUS_DU)
462 return false; 418 return false;
463 419
@@ -472,7 +428,7 @@ bool wil_is_rx_idle(struct wil6210_priv *wil)
472 * Safe to call from IRQ 428 * Safe to call from IRQ
473 */ 429 */
474static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, 430static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
475 struct vring *vring) 431 struct wil_ring *vring)
476{ 432{
477 struct device *dev = wil_to_dev(wil); 433 struct device *dev = wil_to_dev(wil);
478 struct wil6210_vif *vif; 434 struct wil6210_vif *vif;
@@ -492,11 +448,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
492 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); 448 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
493 449
494again: 450again:
495 if (unlikely(wil_vring_is_empty(vring))) 451 if (unlikely(wil_ring_is_empty(vring)))
496 return NULL; 452 return NULL;
497 453
498 i = (int)vring->swhead; 454 i = (int)vring->swhead;
499 _d = &vring->va[i].rx; 455 _d = &vring->va[i].rx.legacy;
500 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { 456 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
501 /* it is not error, we just reached end of Rx done area */ 457 /* it is not error, we just reached end of Rx done area */
502 return NULL; 458 return NULL;
@@ -504,7 +460,7 @@ again:
504 460
505 skb = vring->ctx[i].skb; 461 skb = vring->ctx[i].skb;
506 vring->ctx[i].skb = NULL; 462 vring->ctx[i].skb = NULL;
507 wil_vring_advance_head(vring, 1); 463 wil_ring_advance_head(vring, 1);
508 if (!skb) { 464 if (!skb) {
509 wil_err(wil, "No Rx skb at [%d]\n", i); 465 wil_err(wil, "No Rx skb at [%d]\n", i);
510 goto again; 466 goto again;
@@ -641,15 +597,15 @@ again:
641static int wil_rx_refill(struct wil6210_priv *wil, int count) 597static int wil_rx_refill(struct wil6210_priv *wil, int count)
642{ 598{
643 struct net_device *ndev = wil->main_ndev; 599 struct net_device *ndev = wil->main_ndev;
644 struct vring *v = &wil->vring_rx; 600 struct wil_ring *v = &wil->ring_rx;
645 u32 next_tail; 601 u32 next_tail;
646 int rc = 0; 602 int rc = 0;
647 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ? 603 int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
648 WIL6210_RTAP_SIZE : 0; 604 WIL6210_RTAP_SIZE : 0;
649 605
650 for (; next_tail = wil_vring_next_tail(v), 606 for (; next_tail = wil_ring_next_tail(v),
651 (next_tail != v->swhead) && (count-- > 0); 607 (next_tail != v->swhead) && (count-- > 0);
652 v->swtail = next_tail) { 608 v->swtail = next_tail) {
653 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); 609 rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
654 if (unlikely(rc)) { 610 if (unlikely(rc)) {
655 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n", 611 wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
@@ -677,7 +633,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
677 * Cut'n'paste from original memcmp (see lib/string.c) 633 * Cut'n'paste from original memcmp (see lib/string.c)
678 * with minimal modifications 634 * with minimal modifications
679 */ 635 */
680static int reverse_memcmp(const void *cs, const void *ct, size_t count) 636int reverse_memcmp(const void *cs, const void *ct, size_t count)
681{ 637{
682 const unsigned char *su1, *su2; 638 const unsigned char *su1, *su2;
683 int res = 0; 639 int res = 0;
@@ -722,6 +678,15 @@ static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
722 return 0; 678 return 0;
723} 679}
724 680
681static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
682 int *security)
683{
684 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
685
686 *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
687 *security = wil_rxdesc_security(d);
688}
689
725/* 690/*
726 * Pass Rx packet to the netif. Update statistics. 691 * Pass Rx packet to the netif. Update statistics.
727 * Called in softirq context (NAPI poll). 692 * Called in softirq context (NAPI poll).
@@ -733,15 +698,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
733 struct wil6210_priv *wil = ndev_to_wil(ndev); 698 struct wil6210_priv *wil = ndev_to_wil(ndev);
734 struct wireless_dev *wdev = vif_to_wdev(vif); 699 struct wireless_dev *wdev = vif_to_wdev(vif);
735 unsigned int len = skb->len; 700 unsigned int len = skb->len;
736 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 701 int cid;
737 int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ 702 int security;
738 int security = wil_rxdesc_security(d);
739 struct ethhdr *eth = (void *)skb->data; 703 struct ethhdr *eth = (void *)skb->data;
740 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication 704 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
741 * is not suitable, need to look at data 705 * is not suitable, need to look at data
742 */ 706 */
743 int mcast = is_multicast_ether_addr(eth->h_dest); 707 int mcast = is_multicast_ether_addr(eth->h_dest);
744 struct wil_net_stats *stats = &wil->sta[cid].stats; 708 struct wil_net_stats *stats;
745 struct sk_buff *xmit_skb = NULL; 709 struct sk_buff *xmit_skb = NULL;
746 static const char * const gro_res_str[] = { 710 static const char * const gro_res_str[] = {
747 [GRO_MERGED] = "GRO_MERGED", 711 [GRO_MERGED] = "GRO_MERGED",
@@ -751,6 +715,10 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
751 [GRO_DROP] = "GRO_DROP", 715 [GRO_DROP] = "GRO_DROP",
752 }; 716 };
753 717
718 wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
719
720 stats = &wil->sta[cid].stats;
721
754 if (ndev->features & NETIF_F_RXHASH) 722 if (ndev->features & NETIF_F_RXHASH)
755 /* fake L4 to ensure it won't be re-calculated later 723 /* fake L4 to ensure it won't be re-calculated later
756 * set hash to any non-zero value to activate rps 724 * set hash to any non-zero value to activate rps
@@ -761,7 +729,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
761 729
762 skb_orphan(skb); 730 skb_orphan(skb);
763 731
764 if (security && (wil_rx_crypto_check(wil, skb) != 0)) { 732 if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
765 rc = GRO_DROP; 733 rc = GRO_DROP;
766 dev_kfree_skb(skb); 734 dev_kfree_skb(skb);
767 stats->rx_replay++; 735 stats->rx_replay++;
@@ -835,7 +803,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
835{ 803{
836 struct net_device *ndev = wil->main_ndev; 804 struct net_device *ndev = wil->main_ndev;
837 struct wireless_dev *wdev = ndev->ieee80211_ptr; 805 struct wireless_dev *wdev = ndev->ieee80211_ptr;
838 struct vring *v = &wil->vring_rx; 806 struct wil_ring *v = &wil->ring_rx;
839 struct sk_buff *skb; 807 struct sk_buff *skb;
840 808
841 if (unlikely(!v->va)) { 809 if (unlikely(!v->va)) {
@@ -875,9 +843,9 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
875 } 843 }
876} 844}
877 845
878int wil_rx_init(struct wil6210_priv *wil, u16 size) 846static int wil_rx_init(struct wil6210_priv *wil, u16 size)
879{ 847{
880 struct vring *vring = &wil->vring_rx; 848 struct wil_ring *vring = &wil->ring_rx;
881 int rc; 849 int rc;
882 850
883 wil_dbg_misc(wil, "rx_init\n"); 851 wil_dbg_misc(wil, "rx_init\n");
@@ -890,6 +858,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
890 wil_rx_buf_len_init(wil); 858 wil_rx_buf_len_init(wil);
891 859
892 vring->size = size; 860 vring->size = size;
861 vring->is_rx = true;
893 rc = wil_vring_alloc(wil, vring); 862 rc = wil_vring_alloc(wil, vring);
894 if (rc) 863 if (rc)
895 return rc; 864 return rc;
@@ -904,22 +873,46 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
904 873
905 return 0; 874 return 0;
906 err_free: 875 err_free:
907 wil_vring_free(wil, vring, 0); 876 wil_vring_free(wil, vring);
908 877
909 return rc; 878 return rc;
910} 879}
911 880
912void wil_rx_fini(struct wil6210_priv *wil) 881static void wil_rx_fini(struct wil6210_priv *wil)
913{ 882{
914 struct vring *vring = &wil->vring_rx; 883 struct wil_ring *vring = &wil->ring_rx;
915 884
916 wil_dbg_misc(wil, "rx_fini\n"); 885 wil_dbg_misc(wil, "rx_fini\n");
917 886
918 if (vring->va) 887 if (vring->va)
919 wil_vring_free(wil, vring, 0); 888 wil_vring_free(wil, vring);
920} 889}
921 890
922static inline void wil_tx_data_init(struct vring_tx_data *txdata) 891static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
892 u32 len, int vring_index)
893{
894 struct vring_tx_desc *d = &desc->legacy;
895
896 wil_desc_addr_set(&d->dma.addr, pa);
897 d->dma.ip_length = 0;
898 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
899 d->dma.b11 = 0/*14 | BIT(7)*/;
900 d->dma.error = 0;
901 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
902 d->dma.length = cpu_to_le16((u16)len);
903 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
904 d->mac.d[0] = 0;
905 d->mac.d[1] = 0;
906 d->mac.d[2] = 0;
907 d->mac.ucode_cmd = 0;
908 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
909 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
910 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
911
912 return 0;
913}
914
915void wil_tx_data_init(struct wil_ring_tx_data *txdata)
923{ 916{
924 spin_lock_bh(&txdata->lock); 917 spin_lock_bh(&txdata->lock);
925 txdata->dot1x_open = 0; 918 txdata->dot1x_open = 0;
@@ -935,8 +928,8 @@ static inline void wil_tx_data_init(struct vring_tx_data *txdata)
935 spin_unlock_bh(&txdata->lock); 928 spin_unlock_bh(&txdata->lock);
936} 929}
937 930
938int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, 931static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
939 int cid, int tid) 932 int cid, int tid)
940{ 933{
941 struct wil6210_priv *wil = vif_to_wil(vif); 934 struct wil6210_priv *wil = vif_to_wil(vif);
942 int rc; 935 int rc;
@@ -966,8 +959,8 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
966 } __packed reply = { 959 } __packed reply = {
967 .cmd = {.status = WMI_FW_STATUS_FAILURE}, 960 .cmd = {.status = WMI_FW_STATUS_FAILURE},
968 }; 961 };
969 struct vring *vring = &wil->vring_tx[id]; 962 struct wil_ring *vring = &wil->ring_tx[id];
970 struct vring_tx_data *txdata = &wil->vring_tx_data[id]; 963 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
971 964
972 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", 965 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
973 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 966 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -980,13 +973,14 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
980 } 973 }
981 974
982 wil_tx_data_init(txdata); 975 wil_tx_data_init(txdata);
976 vring->is_rx = false;
983 vring->size = size; 977 vring->size = size;
984 rc = wil_vring_alloc(wil, vring); 978 rc = wil_vring_alloc(wil, vring);
985 if (rc) 979 if (rc)
986 goto out; 980 goto out;
987 981
988 wil->vring2cid_tid[id][0] = cid; 982 wil->ring2cid_tid[id][0] = cid;
989 wil->vring2cid_tid[id][1] = tid; 983 wil->ring2cid_tid[id][1] = tid;
990 984
991 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 985 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
992 986
@@ -1019,9 +1013,9 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
1019 txdata->dot1x_open = false; 1013 txdata->dot1x_open = false;
1020 txdata->enabled = 0; 1014 txdata->enabled = 0;
1021 spin_unlock_bh(&txdata->lock); 1015 spin_unlock_bh(&txdata->lock);
1022 wil_vring_free(wil, vring, 1); 1016 wil_vring_free(wil, vring);
1023 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; 1017 wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
1024 wil->vring2cid_tid[id][1] = 0; 1018 wil->ring2cid_tid[id][1] = 0;
1025 1019
1026 out: 1020 out:
1027 1021
@@ -1050,8 +1044,8 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
1050 } __packed reply = { 1044 } __packed reply = {
1051 .cmd = {.status = WMI_FW_STATUS_FAILURE}, 1045 .cmd = {.status = WMI_FW_STATUS_FAILURE},
1052 }; 1046 };
1053 struct vring *vring = &wil->vring_tx[id]; 1047 struct wil_ring *vring = &wil->ring_tx[id];
1054 struct vring_tx_data *txdata = &wil->vring_tx_data[id]; 1048 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1055 1049
1056 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n", 1050 wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
1057 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 1051 cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
@@ -1064,13 +1058,14 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
1064 } 1058 }
1065 1059
1066 wil_tx_data_init(txdata); 1060 wil_tx_data_init(txdata);
1061 vring->is_rx = false;
1067 vring->size = size; 1062 vring->size = size;
1068 rc = wil_vring_alloc(wil, vring); 1063 rc = wil_vring_alloc(wil, vring);
1069 if (rc) 1064 if (rc)
1070 goto out; 1065 goto out;
1071 1066
1072 wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ 1067 wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */
1073 wil->vring2cid_tid[id][1] = 0; /* TID */ 1068 wil->ring2cid_tid[id][1] = 0; /* TID */
1074 1069
1075 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 1070 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1076 1071
@@ -1101,62 +1096,32 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
1101 txdata->enabled = 0; 1096 txdata->enabled = 0;
1102 txdata->dot1x_open = false; 1097 txdata->dot1x_open = false;
1103 spin_unlock_bh(&txdata->lock); 1098 spin_unlock_bh(&txdata->lock);
1104 wil_vring_free(wil, vring, 1); 1099 wil_vring_free(wil, vring);
1105 out: 1100 out:
1106 1101
1107 return rc; 1102 return rc;
1108} 1103}
1109 1104
1110void wil_vring_fini_tx(struct wil6210_priv *wil, int id) 1105static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
1111{ 1106 struct wil6210_vif *vif,
1112 struct vring *vring = &wil->vring_tx[id]; 1107 struct sk_buff *skb)
1113 struct vring_tx_data *txdata = &wil->vring_tx_data[id];
1114
1115 lockdep_assert_held(&wil->mutex);
1116
1117 if (!vring->va)
1118 return;
1119
1120 wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
1121
1122 spin_lock_bh(&txdata->lock);
1123 txdata->dot1x_open = false;
1124 txdata->mid = U8_MAX;
1125 txdata->enabled = 0; /* no Tx can be in progress or start anew */
1126 spin_unlock_bh(&txdata->lock);
1127 /* napi_synchronize waits for completion of the current NAPI but will
1128 * not prevent the next NAPI run.
1129 * Add a memory barrier to guarantee that txdata->enabled is zeroed
1130 * before napi_synchronize so that the next scheduled NAPI will not
1131 * handle this vring
1132 */
1133 wmb();
1134 /* make sure NAPI won't touch this vring */
1135 if (test_bit(wil_status_napi_en, wil->status))
1136 napi_synchronize(&wil->napi_tx);
1137
1138 wil_vring_free(wil, vring, 1);
1139}
1140
1141static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
1142 struct wil6210_vif *vif,
1143 struct sk_buff *skb)
1144{ 1108{
1145 int i; 1109 int i;
1146 struct ethhdr *eth = (void *)skb->data; 1110 struct ethhdr *eth = (void *)skb->data;
1147 int cid = wil_find_cid(wil, vif->mid, eth->h_dest); 1111 int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
1112 int min_ring_id = wil_get_min_tx_ring_id(wil);
1148 1113
1149 if (cid < 0) 1114 if (cid < 0)
1150 return NULL; 1115 return NULL;
1151 1116
1152 /* TODO: fix for multiple TID */ 1117 /* TODO: fix for multiple TID */
1153 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { 1118 for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
1154 if (!wil->vring_tx_data[i].dot1x_open && 1119 if (!wil->ring_tx_data[i].dot1x_open &&
1155 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 1120 skb->protocol != cpu_to_be16(ETH_P_PAE))
1156 continue; 1121 continue;
1157 if (wil->vring2cid_tid[i][0] == cid) { 1122 if (wil->ring2cid_tid[i][0] == cid) {
1158 struct vring *v = &wil->vring_tx[i]; 1123 struct wil_ring *v = &wil->ring_tx[i];
1159 struct vring_tx_data *txdata = &wil->vring_tx_data[i]; 1124 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
1160 1125
1161 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", 1126 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
1162 eth->h_dest, i); 1127 eth->h_dest, i);
@@ -1174,42 +1139,43 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
1174 return NULL; 1139 return NULL;
1175} 1140}
1176 1141
1177static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1142static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1178 struct vring *vring, struct sk_buff *skb); 1143 struct wil_ring *ring, struct sk_buff *skb);
1179 1144
1180static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, 1145static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
1181 struct wil6210_vif *vif, 1146 struct wil6210_vif *vif,
1182 struct sk_buff *skb) 1147 struct sk_buff *skb)
1183{ 1148{
1184 struct vring *v; 1149 struct wil_ring *ring;
1185 int i; 1150 int i;
1186 u8 cid; 1151 u8 cid;
1187 struct vring_tx_data *txdata; 1152 struct wil_ring_tx_data *txdata;
1153 int min_ring_id = wil_get_min_tx_ring_id(wil);
1188 1154
1189 /* In the STA mode, it is expected to have only 1 VRING 1155 /* In the STA mode, it is expected to have only 1 VRING
1190 * for the AP we connected to. 1156 * for the AP we connected to.
1191 * find 1-st vring eligible for this skb and use it. 1157 * find 1-st vring eligible for this skb and use it.
1192 */ 1158 */
1193 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 1159 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1194 v = &wil->vring_tx[i]; 1160 ring = &wil->ring_tx[i];
1195 txdata = &wil->vring_tx_data[i]; 1161 txdata = &wil->ring_tx_data[i];
1196 if (!v->va || !txdata->enabled || txdata->mid != vif->mid) 1162 if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
1197 continue; 1163 continue;
1198 1164
1199 cid = wil->vring2cid_tid[i][0]; 1165 cid = wil->ring2cid_tid[i][0];
1200 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1166 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1201 continue; 1167 continue;
1202 1168
1203 if (!wil->vring_tx_data[i].dot1x_open && 1169 if (!wil->ring_tx_data[i].dot1x_open &&
1204 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 1170 skb->protocol != cpu_to_be16(ETH_P_PAE))
1205 continue; 1171 continue;
1206 1172
1207 wil_dbg_txrx(wil, "Tx -> ring %d\n", i); 1173 wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
1208 1174
1209 return v; 1175 return ring;
1210 } 1176 }
1211 1177
1212 wil_dbg_txrx(wil, "Tx while no vrings active?\n"); 1178 wil_dbg_txrx(wil, "Tx while no rings active?\n");
1213 1179
1214 return NULL; 1180 return NULL;
1215} 1181}
@@ -1225,22 +1191,22 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
1225 * Use old strategy when new is not supported yet: 1191 * Use old strategy when new is not supported yet:
1226 * - for PBSS 1192 * - for PBSS
1227 */ 1193 */
1228static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, 1194static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
1229 struct wil6210_vif *vif, 1195 struct wil6210_vif *vif,
1230 struct sk_buff *skb) 1196 struct sk_buff *skb)
1231{ 1197{
1232 struct vring *v; 1198 struct wil_ring *v;
1233 struct vring_tx_data *txdata; 1199 struct wil_ring_tx_data *txdata;
1234 int i = vif->bcast_vring; 1200 int i = vif->bcast_ring;
1235 1201
1236 if (i < 0) 1202 if (i < 0)
1237 return NULL; 1203 return NULL;
1238 v = &wil->vring_tx[i]; 1204 v = &wil->ring_tx[i];
1239 txdata = &wil->vring_tx_data[i]; 1205 txdata = &wil->ring_tx_data[i];
1240 if (!v->va || !txdata->enabled) 1206 if (!v->va || !txdata->enabled)
1241 return NULL; 1207 return NULL;
1242 if (!wil->vring_tx_data[i].dot1x_open && 1208 if (!wil->ring_tx_data[i].dot1x_open &&
1243 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 1209 skb->protocol != cpu_to_be16(ETH_P_PAE))
1244 return NULL; 1210 return NULL;
1245 1211
1246 return v; 1212 return v;
@@ -1250,35 +1216,36 @@ static void wil_set_da_for_vring(struct wil6210_priv *wil,
1250 struct sk_buff *skb, int vring_index) 1216 struct sk_buff *skb, int vring_index)
1251{ 1217{
1252 struct ethhdr *eth = (void *)skb->data; 1218 struct ethhdr *eth = (void *)skb->data;
1253 int cid = wil->vring2cid_tid[vring_index][0]; 1219 int cid = wil->ring2cid_tid[vring_index][0];
1254 1220
1255 ether_addr_copy(eth->h_dest, wil->sta[cid].addr); 1221 ether_addr_copy(eth->h_dest, wil->sta[cid].addr);
1256} 1222}
1257 1223
1258static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, 1224static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
1259 struct wil6210_vif *vif, 1225 struct wil6210_vif *vif,
1260 struct sk_buff *skb) 1226 struct sk_buff *skb)
1261{ 1227{
1262 struct vring *v, *v2; 1228 struct wil_ring *v, *v2;
1263 struct sk_buff *skb2; 1229 struct sk_buff *skb2;
1264 int i; 1230 int i;
1265 u8 cid; 1231 u8 cid;
1266 struct ethhdr *eth = (void *)skb->data; 1232 struct ethhdr *eth = (void *)skb->data;
1267 char *src = eth->h_source; 1233 char *src = eth->h_source;
1268 struct vring_tx_data *txdata, *txdata2; 1234 struct wil_ring_tx_data *txdata, *txdata2;
1235 int min_ring_id = wil_get_min_tx_ring_id(wil);
1269 1236
1270 /* find 1-st vring eligible for data */ 1237 /* find 1-st vring eligible for data */
1271 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 1238 for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1272 v = &wil->vring_tx[i]; 1239 v = &wil->ring_tx[i];
1273 txdata = &wil->vring_tx_data[i]; 1240 txdata = &wil->ring_tx_data[i];
1274 if (!v->va || !txdata->enabled || txdata->mid != vif->mid) 1241 if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
1275 continue; 1242 continue;
1276 1243
1277 cid = wil->vring2cid_tid[i][0]; 1244 cid = wil->ring2cid_tid[i][0];
1278 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1245 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1279 continue; 1246 continue;
1280 if (!wil->vring_tx_data[i].dot1x_open && 1247 if (!wil->ring_tx_data[i].dot1x_open &&
1281 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 1248 skb->protocol != cpu_to_be16(ETH_P_PAE))
1282 continue; 1249 continue;
1283 1250
1284 /* don't Tx back to source when re-routing Rx->Tx at the AP */ 1251 /* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -1298,15 +1265,15 @@ found:
1298 1265
1299 /* find other active vrings and duplicate skb for each */ 1266 /* find other active vrings and duplicate skb for each */
1300 for (i++; i < WIL6210_MAX_TX_RINGS; i++) { 1267 for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1301 v2 = &wil->vring_tx[i]; 1268 v2 = &wil->ring_tx[i];
1302 txdata2 = &wil->vring_tx_data[i]; 1269 txdata2 = &wil->ring_tx_data[i];
1303 if (!v2->va || txdata2->mid != vif->mid) 1270 if (!v2->va || txdata2->mid != vif->mid)
1304 continue; 1271 continue;
1305 cid = wil->vring2cid_tid[i][0]; 1272 cid = wil->ring2cid_tid[i][0];
1306 if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1273 if (cid >= WIL6210_MAX_CID) /* skip BCAST */
1307 continue; 1274 continue;
1308 if (!wil->vring_tx_data[i].dot1x_open && 1275 if (!wil->ring_tx_data[i].dot1x_open &&
1309 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 1276 skb->protocol != cpu_to_be16(ETH_P_PAE))
1310 continue; 1277 continue;
1311 1278
1312 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 1279 if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1316,7 +1283,7 @@ found:
1316 if (skb2) { 1283 if (skb2) {
1317 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); 1284 wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1318 wil_set_da_for_vring(wil, skb2, i); 1285 wil_set_da_for_vring(wil, skb2, i);
1319 wil_tx_vring(wil, vif, v2, skb2); 1286 wil_tx_ring(wil, vif, v2, skb2);
1320 } else { 1287 } else {
1321 wil_err(wil, "skb_copy failed\n"); 1288 wil_err(wil, "skb_copy failed\n");
1322 } 1289 }
@@ -1325,28 +1292,6 @@ found:
1325 return v; 1292 return v;
1326} 1293}
1327 1294
1328static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
1329 int vring_index)
1330{
1331 wil_desc_addr_set(&d->dma.addr, pa);
1332 d->dma.ip_length = 0;
1333 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1334 d->dma.b11 = 0/*14 | BIT(7)*/;
1335 d->dma.error = 0;
1336 d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
1337 d->dma.length = cpu_to_le16((u16)len);
1338 d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
1339 d->mac.d[0] = 0;
1340 d->mac.d[1] = 0;
1341 d->mac.d[2] = 0;
1342 d->mac.ucode_cmd = 0;
1343 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi */
1344 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
1345 (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
1346
1347 return 0;
1348}
1349
1350static inline 1295static inline
1351void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) 1296void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1352{ 1297{
@@ -1454,7 +1399,7 @@ static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1454} 1399}
1455 1400
1456static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif, 1401static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1457 struct vring *vring, struct sk_buff *skb) 1402 struct wil_ring *vring, struct sk_buff *skb)
1458{ 1403{
1459 struct device *dev = wil_to_dev(wil); 1404 struct device *dev = wil_to_dev(wil);
1460 1405
@@ -1474,13 +1419,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1474 int sg_desc_cnt = 0; /* number of descriptors for current mss*/ 1419 int sg_desc_cnt = 0; /* number of descriptors for current mss*/
1475 1420
1476 u32 swhead = vring->swhead; 1421 u32 swhead = vring->swhead;
1477 int used, avail = wil_vring_avail_tx(vring); 1422 int used, avail = wil_ring_avail_tx(vring);
1478 int nr_frags = skb_shinfo(skb)->nr_frags; 1423 int nr_frags = skb_shinfo(skb)->nr_frags;
1479 int min_desc_required = nr_frags + 1; 1424 int min_desc_required = nr_frags + 1;
1480 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ 1425 int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */
1481 int f, len, hdrlen, headlen; 1426 int f, len, hdrlen, headlen;
1482 int vring_index = vring - wil->vring_tx; 1427 int vring_index = vring - wil->ring_tx;
1483 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; 1428 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
1484 uint i = swhead; 1429 uint i = swhead;
1485 dma_addr_t pa; 1430 dma_addr_t pa;
1486 const skb_frag_t *frag = NULL; 1431 const skb_frag_t *frag = NULL;
@@ -1548,7 +1493,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1548 tcp_hdr_len = tcp_hdrlen(skb); 1493 tcp_hdr_len = tcp_hdrlen(skb);
1549 skb_net_hdr_len = skb_network_header_len(skb); 1494 skb_net_hdr_len = skb_network_header_len(skb);
1550 1495
1551 _hdr_desc = &vring->va[i].tx; 1496 _hdr_desc = &vring->va[i].tx.legacy;
1552 1497
1553 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); 1498 pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1554 if (unlikely(dma_mapping_error(dev, pa))) { 1499 if (unlikely(dma_mapping_error(dev, pa))) {
@@ -1556,7 +1501,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1556 goto err_exit; 1501 goto err_exit;
1557 } 1502 }
1558 1503
1559 wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index); 1504 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
1505 hdrlen, vring_index);
1560 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4, 1506 wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1561 tcp_hdr_len, skb_net_hdr_len); 1507 tcp_hdr_len, skb_net_hdr_len);
1562 wil_tx_last_desc(hdr_desc); 1508 wil_tx_last_desc(hdr_desc);
@@ -1613,7 +1559,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1613 goto mem_error; 1559 goto mem_error;
1614 } 1560 }
1615 1561
1616 _desc = &vring->va[i].tx; 1562 _desc = &vring->va[i].tx.legacy;
1617 1563
1618 if (!_first_desc) { 1564 if (!_first_desc) {
1619 _first_desc = _desc; 1565 _first_desc = _desc;
@@ -1623,7 +1569,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1623 d = &desc_mem; 1569 d = &desc_mem;
1624 } 1570 }
1625 1571
1626 wil_tx_desc_map(d, pa, lenmss, vring_index); 1572 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1573 pa, lenmss, vring_index);
1627 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type, 1574 wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1628 is_ipv4, tcp_hdr_len, 1575 is_ipv4, tcp_hdr_len,
1629 skb_net_hdr_len); 1576 skb_net_hdr_len);
@@ -1701,8 +1648,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1701 vring->ctx[i].skb = skb_get(skb); 1648 vring->ctx[i].skb = skb_get(skb);
1702 1649
1703 /* performance monitoring */ 1650 /* performance monitoring */
1704 used = wil_vring_used_tx(vring); 1651 used = wil_ring_used_tx(vring);
1705 if (wil_val_in_range(wil->vring_idle_trsh, 1652 if (wil_val_in_range(wil->ring_idle_trsh,
1706 used, used + descs_used)) { 1653 used, used + descs_used)) {
1707 txdata->idle += get_cycles() - txdata->last_idle; 1654 txdata->idle += get_cycles() - txdata->last_idle;
1708 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1655 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
@@ -1717,7 +1664,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1717 wmb(); 1664 wmb();
1718 1665
1719 /* advance swhead */ 1666 /* advance swhead */
1720 wil_vring_advance_head(vring, descs_used); 1667 wil_ring_advance_head(vring, descs_used);
1721 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); 1668 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1722 1669
1723 /* make sure all writes to descriptors (shared memory) are done before 1670 /* make sure all writes to descriptors (shared memory) are done before
@@ -1733,12 +1680,12 @@ mem_error:
1733 struct wil_ctx *ctx; 1680 struct wil_ctx *ctx;
1734 1681
1735 i = (swhead + descs_used - 1) % vring->size; 1682 i = (swhead + descs_used - 1) % vring->size;
1736 d = (struct vring_tx_desc *)&vring->va[i].tx; 1683 d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
1737 _desc = &vring->va[i].tx; 1684 _desc = &vring->va[i].tx.legacy;
1738 *d = *_desc; 1685 *d = *_desc;
1739 _desc->dma.status = TX_DMA_STATUS_DU; 1686 _desc->dma.status = TX_DMA_STATUS_DU;
1740 ctx = &vring->ctx[i]; 1687 ctx = &vring->ctx[i];
1741 wil_txdesc_unmap(dev, d, ctx); 1688 wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
1742 memset(ctx, 0, sizeof(*ctx)); 1689 memset(ctx, 0, sizeof(*ctx));
1743 descs_used--; 1690 descs_used--;
1744 } 1691 }
@@ -1746,26 +1693,26 @@ err_exit:
1746 return rc; 1693 return rc;
1747} 1694}
1748 1695
1749static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1696static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1750 struct vring *vring, struct sk_buff *skb) 1697 struct wil_ring *ring, struct sk_buff *skb)
1751{ 1698{
1752 struct device *dev = wil_to_dev(wil); 1699 struct device *dev = wil_to_dev(wil);
1753 struct vring_tx_desc dd, *d = &dd; 1700 struct vring_tx_desc dd, *d = &dd;
1754 volatile struct vring_tx_desc *_d; 1701 volatile struct vring_tx_desc *_d;
1755 u32 swhead = vring->swhead; 1702 u32 swhead = ring->swhead;
1756 int avail = wil_vring_avail_tx(vring); 1703 int avail = wil_ring_avail_tx(ring);
1757 int nr_frags = skb_shinfo(skb)->nr_frags; 1704 int nr_frags = skb_shinfo(skb)->nr_frags;
1758 uint f = 0; 1705 uint f = 0;
1759 int vring_index = vring - wil->vring_tx; 1706 int ring_index = ring - wil->ring_tx;
1760 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; 1707 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
1761 uint i = swhead; 1708 uint i = swhead;
1762 dma_addr_t pa; 1709 dma_addr_t pa;
1763 int used; 1710 int used;
1764 bool mcast = (vring_index == vif->bcast_vring); 1711 bool mcast = (ring_index == vif->bcast_ring);
1765 uint len = skb_headlen(skb); 1712 uint len = skb_headlen(skb);
1766 1713
1767 wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len, 1714 wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
1768 vring_index); 1715 skb->len, ring_index, nr_frags);
1769 1716
1770 if (unlikely(!txdata->enabled)) 1717 if (unlikely(!txdata->enabled))
1771 return -EINVAL; 1718 return -EINVAL;
@@ -1773,23 +1720,24 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1773 if (unlikely(avail < 1 + nr_frags)) { 1720 if (unlikely(avail < 1 + nr_frags)) {
1774 wil_err_ratelimited(wil, 1721 wil_err_ratelimited(wil,
1775 "Tx ring[%2d] full. No space for %d fragments\n", 1722 "Tx ring[%2d] full. No space for %d fragments\n",
1776 vring_index, 1 + nr_frags); 1723 ring_index, 1 + nr_frags);
1777 return -ENOMEM; 1724 return -ENOMEM;
1778 } 1725 }
1779 _d = &vring->va[i].tx; 1726 _d = &ring->va[i].tx.legacy;
1780 1727
1781 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 1728 pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
1782 1729
1783 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", vring_index, 1730 wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
1784 skb_headlen(skb), skb->data, &pa); 1731 skb_headlen(skb), skb->data, &pa);
1785 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1, 1732 wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
1786 skb->data, skb_headlen(skb), false); 1733 skb->data, skb_headlen(skb), false);
1787 1734
1788 if (unlikely(dma_mapping_error(dev, pa))) 1735 if (unlikely(dma_mapping_error(dev, pa)))
1789 return -EINVAL; 1736 return -EINVAL;
1790 vring->ctx[i].mapped_as = wil_mapped_as_single; 1737 ring->ctx[i].mapped_as = wil_mapped_as_single;
1791 /* 1-st segment */ 1738 /* 1-st segment */
1792 wil_tx_desc_map(d, pa, len, vring_index); 1739 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
1740 ring_index);
1793 if (unlikely(mcast)) { 1741 if (unlikely(mcast)) {
1794 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ 1742 d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
1795 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */ 1743 if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
@@ -1798,11 +1746,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1798 /* Process TCP/UDP checksum offloading */ 1746 /* Process TCP/UDP checksum offloading */
1799 if (unlikely(wil_tx_desc_offload_setup(d, skb))) { 1747 if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
1800 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", 1748 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
1801 vring_index); 1749 ring_index);
1802 goto dma_error; 1750 goto dma_error;
1803 } 1751 }
1804 1752
1805 vring->ctx[i].nr_frags = nr_frags; 1753 ring->ctx[i].nr_frags = nr_frags;
1806 wil_tx_desc_set_nr_frags(d, nr_frags + 1); 1754 wil_tx_desc_set_nr_frags(d, nr_frags + 1);
1807 1755
1808 /* middle segments */ 1756 /* middle segments */
@@ -1812,20 +1760,21 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1812 int len = skb_frag_size(frag); 1760 int len = skb_frag_size(frag);
1813 1761
1814 *_d = *d; 1762 *_d = *d;
1815 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); 1763 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
1816 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1764 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1817 (const void *)d, sizeof(*d), false); 1765 (const void *)d, sizeof(*d), false);
1818 i = (swhead + f + 1) % vring->size; 1766 i = (swhead + f + 1) % ring->size;
1819 _d = &vring->va[i].tx; 1767 _d = &ring->va[i].tx.legacy;
1820 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), 1768 pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
1821 DMA_TO_DEVICE); 1769 DMA_TO_DEVICE);
1822 if (unlikely(dma_mapping_error(dev, pa))) { 1770 if (unlikely(dma_mapping_error(dev, pa))) {
1823 wil_err(wil, "Tx[%2d] failed to map fragment\n", 1771 wil_err(wil, "Tx[%2d] failed to map fragment\n",
1824 vring_index); 1772 ring_index);
1825 goto dma_error; 1773 goto dma_error;
1826 } 1774 }
1827 vring->ctx[i].mapped_as = wil_mapped_as_page; 1775 ring->ctx[i].mapped_as = wil_mapped_as_page;
1828 wil_tx_desc_map(d, pa, len, vring_index); 1776 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1777 pa, len, ring_index);
1829 /* no need to check return code - 1778 /* no need to check return code -
1830 * if it succeeded for 1-st descriptor, 1779 * if it succeeded for 1-st descriptor,
1831 * it will succeed here too 1780 * it will succeed here too
@@ -1837,7 +1786,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1837 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS); 1786 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
1838 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); 1787 d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1839 *_d = *d; 1788 *_d = *d;
1840 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", vring_index, i); 1789 wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
1841 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4, 1790 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1842 (const void *)d, sizeof(*d), false); 1791 (const void *)d, sizeof(*d), false);
1843 1792
@@ -1845,15 +1794,15 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1845 * to prevent skb release before accounting 1794 * to prevent skb release before accounting
1846 * in case of immediate "tx done" 1795 * in case of immediate "tx done"
1847 */ 1796 */
1848 vring->ctx[i].skb = skb_get(skb); 1797 ring->ctx[i].skb = skb_get(skb);
1849 1798
1850 /* performance monitoring */ 1799 /* performance monitoring */
1851 used = wil_vring_used_tx(vring); 1800 used = wil_ring_used_tx(ring);
1852 if (wil_val_in_range(wil->vring_idle_trsh, 1801 if (wil_val_in_range(wil->ring_idle_trsh,
1853 used, used + nr_frags + 1)) { 1802 used, used + nr_frags + 1)) {
1854 txdata->idle += get_cycles() - txdata->last_idle; 1803 txdata->idle += get_cycles() - txdata->last_idle;
1855 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", 1804 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1856 vring_index, used, used + nr_frags + 1); 1805 ring_index, used, used + nr_frags + 1);
1857 } 1806 }
1858 1807
1859 /* Make sure to advance the head only after descriptor update is done. 1808 /* Make sure to advance the head only after descriptor update is done.
@@ -1864,17 +1813,17 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1864 wmb(); 1813 wmb();
1865 1814
1866 /* advance swhead */ 1815 /* advance swhead */
1867 wil_vring_advance_head(vring, nr_frags + 1); 1816 wil_ring_advance_head(ring, nr_frags + 1);
1868 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, 1817 wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
1869 vring->swhead); 1818 ring->swhead);
1870 trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); 1819 trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
1871 1820
1872 /* make sure all writes to descriptors (shared memory) are done before 1821 /* make sure all writes to descriptors (shared memory) are done before
1873 * committing them to HW 1822 * committing them to HW
1874 */ 1823 */
1875 wmb(); 1824 wmb();
1876 1825
1877 wil_w(wil, vring->hwtail, vring->swhead); 1826 wil_w(wil, ring->hwtail, ring->swhead);
1878 1827
1879 return 0; 1828 return 0;
1880 dma_error: 1829 dma_error:
@@ -1883,12 +1832,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1883 for (f = 0; f < nr_frags; f++) { 1832 for (f = 0; f < nr_frags; f++) {
1884 struct wil_ctx *ctx; 1833 struct wil_ctx *ctx;
1885 1834
1886 i = (swhead + f) % vring->size; 1835 i = (swhead + f) % ring->size;
1887 ctx = &vring->ctx[i]; 1836 ctx = &ring->ctx[i];
1888 _d = &vring->va[i].tx; 1837 _d = &ring->va[i].tx.legacy;
1889 *d = *_d; 1838 *d = *_d;
1890 _d->dma.status = TX_DMA_STATUS_DU; 1839 _d->dma.status = TX_DMA_STATUS_DU;
1891 wil_txdesc_unmap(dev, d, ctx); 1840 wil->txrx_ops.tx_desc_unmap(dev,
1841 (union wil_tx_desc *)d,
1842 ctx);
1892 1843
1893 memset(ctx, 0, sizeof(*ctx)); 1844 memset(ctx, 0, sizeof(*ctx));
1894 } 1845 }
@@ -1896,11 +1847,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1896 return -EINVAL; 1847 return -EINVAL;
1897} 1848}
1898 1849
1899static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif, 1850static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1900 struct vring *vring, struct sk_buff *skb) 1851 struct wil_ring *ring, struct sk_buff *skb)
1901{ 1852{
1902 int vring_index = vring - wil->vring_tx; 1853 int ring_index = ring - wil->ring_tx;
1903 struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; 1854 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
1904 int rc; 1855 int rc;
1905 1856
1906 spin_lock(&txdata->lock); 1857 spin_lock(&txdata->lock);
@@ -1914,8 +1865,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1914 return -EINVAL; 1865 return -EINVAL;
1915 } 1866 }
1916 1867
1917 rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) 1868 rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
1918 (wil, vif, vring, skb); 1869 (wil, vif, ring, skb);
1919 1870
1920 spin_unlock(&txdata->lock); 1871 spin_unlock(&txdata->lock);
1921 1872
@@ -1941,7 +1892,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1941 */ 1892 */
1942static inline void __wil_update_net_queues(struct wil6210_priv *wil, 1893static inline void __wil_update_net_queues(struct wil6210_priv *wil,
1943 struct wil6210_vif *vif, 1894 struct wil6210_vif *vif,
1944 struct vring *vring, 1895 struct wil_ring *ring,
1945 bool check_stop) 1896 bool check_stop)
1946{ 1897{
1947 int i; 1898 int i;
@@ -1949,9 +1900,9 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
1949 if (unlikely(!vif)) 1900 if (unlikely(!vif))
1950 return; 1901 return;
1951 1902
1952 if (vring) 1903 if (ring)
1953 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d", 1904 wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
1954 (int)(vring - wil->vring_tx), vif->mid, check_stop, 1905 (int)(ring - wil->ring_tx), vif->mid, check_stop,
1955 vif->net_queue_stopped); 1906 vif->net_queue_stopped);
1956 else 1907 else
1957 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d", 1908 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
@@ -1962,7 +1913,7 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
1962 return; 1913 return;
1963 1914
1964 if (check_stop) { 1915 if (check_stop) {
1965 if (!vring || unlikely(wil_vring_avail_low(vring))) { 1916 if (!ring || unlikely(wil_ring_avail_low(ring))) {
1966 /* not enough room in the vring */ 1917 /* not enough room in the vring */
1967 netif_tx_stop_all_queues(vif_to_ndev(vif)); 1918 netif_tx_stop_all_queues(vif_to_ndev(vif));
1968 vif->net_queue_stopped = true; 1919 vif->net_queue_stopped = true;
@@ -1978,22 +1929,22 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
1978 1929
1979 /* check wake */ 1930 /* check wake */
1980 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 1931 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
1981 struct vring *cur_vring = &wil->vring_tx[i]; 1932 struct wil_ring *cur_ring = &wil->ring_tx[i];
1982 struct vring_tx_data *txdata = &wil->vring_tx_data[i]; 1933 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
1983 1934
1984 if (txdata->mid != vif->mid || !cur_vring->va || 1935 if (txdata->mid != vif->mid || !cur_ring->va ||
1985 !txdata->enabled || cur_vring == vring) 1936 !txdata->enabled || cur_ring == ring)
1986 continue; 1937 continue;
1987 1938
1988 if (wil_vring_avail_low(cur_vring)) { 1939 if (wil_ring_avail_low(cur_ring)) {
1989 wil_dbg_txrx(wil, "vring %d full, can't wake\n", 1940 wil_dbg_txrx(wil, "ring %d full, can't wake\n",
1990 (int)(cur_vring - wil->vring_tx)); 1941 (int)(cur_ring - wil->ring_tx));
1991 return; 1942 return;
1992 } 1943 }
1993 } 1944 }
1994 1945
1995 if (!vring || wil_vring_avail_high(vring)) { 1946 if (!ring || wil_ring_avail_high(ring)) {
1996 /* enough room in the vring */ 1947 /* enough room in the ring */
1997 wil_dbg_txrx(wil, "calling netif_tx_wake\n"); 1948 wil_dbg_txrx(wil, "calling netif_tx_wake\n");
1998 netif_tx_wake_all_queues(vif_to_ndev(vif)); 1949 netif_tx_wake_all_queues(vif_to_ndev(vif));
1999 vif->net_queue_stopped = false; 1950 vif->net_queue_stopped = false;
@@ -2001,18 +1952,18 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
2001} 1952}
2002 1953
2003void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, 1954void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
2004 struct vring *vring, bool check_stop) 1955 struct wil_ring *ring, bool check_stop)
2005{ 1956{
2006 spin_lock(&wil->net_queue_lock); 1957 spin_lock(&wil->net_queue_lock);
2007 __wil_update_net_queues(wil, vif, vring, check_stop); 1958 __wil_update_net_queues(wil, vif, ring, check_stop);
2008 spin_unlock(&wil->net_queue_lock); 1959 spin_unlock(&wil->net_queue_lock);
2009} 1960}
2010 1961
2011void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, 1962void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
2012 struct vring *vring, bool check_stop) 1963 struct wil_ring *ring, bool check_stop)
2013{ 1964{
2014 spin_lock_bh(&wil->net_queue_lock); 1965 spin_lock_bh(&wil->net_queue_lock);
2015 __wil_update_net_queues(wil, vif, vring, check_stop); 1966 __wil_update_net_queues(wil, vif, ring, check_stop);
2016 spin_unlock_bh(&wil->net_queue_lock); 1967 spin_unlock_bh(&wil->net_queue_lock);
2017} 1968}
2018 1969
@@ -2022,7 +1973,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2022 struct wil6210_priv *wil = vif_to_wil(vif); 1973 struct wil6210_priv *wil = vif_to_wil(vif);
2023 struct ethhdr *eth = (void *)skb->data; 1974 struct ethhdr *eth = (void *)skb->data;
2024 bool bcast = is_multicast_ether_addr(eth->h_dest); 1975 bool bcast = is_multicast_ether_addr(eth->h_dest);
2025 struct vring *vring; 1976 struct wil_ring *ring;
2026 static bool pr_once_fw; 1977 static bool pr_once_fw;
2027 int rc; 1978 int rc;
2028 1979
@@ -2048,36 +1999,36 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2048 /* find vring */ 1999 /* find vring */
2049 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) { 2000 if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
2050 /* in STA mode (ESS), all to same VRING (to AP) */ 2001 /* in STA mode (ESS), all to same VRING (to AP) */
2051 vring = wil_find_tx_vring_sta(wil, vif, skb); 2002 ring = wil_find_tx_ring_sta(wil, vif, skb);
2052 } else if (bcast) { 2003 } else if (bcast) {
2053 if (vif->pbss) 2004 if (vif->pbss)
2054 /* in pbss, no bcast VRING - duplicate skb in 2005 /* in pbss, no bcast VRING - duplicate skb in
2055 * all stations VRINGs 2006 * all stations VRINGs
2056 */ 2007 */
2057 vring = wil_find_tx_bcast_2(wil, vif, skb); 2008 ring = wil_find_tx_bcast_2(wil, vif, skb);
2058 else if (vif->wdev.iftype == NL80211_IFTYPE_AP) 2009 else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
2059 /* AP has a dedicated bcast VRING */ 2010 /* AP has a dedicated bcast VRING */
2060 vring = wil_find_tx_bcast_1(wil, vif, skb); 2011 ring = wil_find_tx_bcast_1(wil, vif, skb);
2061 else 2012 else
2062 /* unexpected combination, fallback to duplicating 2013 /* unexpected combination, fallback to duplicating
2063 * the skb in all stations VRINGs 2014 * the skb in all stations VRINGs
2064 */ 2015 */
2065 vring = wil_find_tx_bcast_2(wil, vif, skb); 2016 ring = wil_find_tx_bcast_2(wil, vif, skb);
2066 } else { 2017 } else {
2067 /* unicast, find specific VRING by dest. address */ 2018 /* unicast, find specific VRING by dest. address */
2068 vring = wil_find_tx_ucast(wil, vif, skb); 2019 ring = wil_find_tx_ucast(wil, vif, skb);
2069 } 2020 }
2070 if (unlikely(!vring)) { 2021 if (unlikely(!ring)) {
2071 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); 2022 wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest);
2072 goto drop; 2023 goto drop;
2073 } 2024 }
2074 /* set up vring entry */ 2025 /* set up vring entry */
2075 rc = wil_tx_vring(wil, vif, vring, skb); 2026 rc = wil_tx_ring(wil, vif, ring, skb);
2076 2027
2077 switch (rc) { 2028 switch (rc) {
2078 case 0: 2029 case 0:
2079 /* shall we stop net queues? */ 2030 /* shall we stop net queues? */
2080 wil_update_net_queues_bh(wil, vif, vring, true); 2031 wil_update_net_queues_bh(wil, vif, ring, true);
2081 /* statistics will be updated on the tx_complete */ 2032 /* statistics will be updated on the tx_complete */
2082 dev_kfree_skb_any(skb); 2033 dev_kfree_skb_any(skb);
2083 return NETDEV_TX_OK; 2034 return NETDEV_TX_OK;
@@ -2093,22 +2044,6 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2093 return NET_XMIT_DROP; 2044 return NET_XMIT_DROP;
2094} 2045}
2095 2046
2096static inline bool wil_need_txstat(struct sk_buff *skb)
2097{
2098 struct ethhdr *eth = (void *)skb->data;
2099
2100 return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
2101 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
2102}
2103
2104static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
2105{
2106 if (unlikely(wil_need_txstat(skb)))
2107 skb_complete_wifi_ack(skb, acked);
2108 else
2109 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
2110}
2111
2112/** 2047/**
2113 * Clean up transmitted skb's from the Tx VRING 2048 * Clean up transmitted skb's from the Tx VRING
2114 * 2049 *
@@ -2121,10 +2056,10 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2121 struct wil6210_priv *wil = vif_to_wil(vif); 2056 struct wil6210_priv *wil = vif_to_wil(vif);
2122 struct net_device *ndev = vif_to_ndev(vif); 2057 struct net_device *ndev = vif_to_ndev(vif);
2123 struct device *dev = wil_to_dev(wil); 2058 struct device *dev = wil_to_dev(wil);
2124 struct vring *vring = &wil->vring_tx[ringid]; 2059 struct wil_ring *vring = &wil->ring_tx[ringid];
2125 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; 2060 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
2126 int done = 0; 2061 int done = 0;
2127 int cid = wil->vring2cid_tid[ringid][0]; 2062 int cid = wil->ring2cid_tid[ringid][0];
2128 struct wil_net_stats *stats = NULL; 2063 struct wil_net_stats *stats = NULL;
2129 volatile struct vring_tx_desc *_d; 2064 volatile struct vring_tx_desc *_d;
2130 int used_before_complete; 2065 int used_before_complete;
@@ -2142,12 +2077,12 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2142 2077
2143 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid); 2078 wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
2144 2079
2145 used_before_complete = wil_vring_used_tx(vring); 2080 used_before_complete = wil_ring_used_tx(vring);
2146 2081
2147 if (cid < WIL6210_MAX_CID) 2082 if (cid < WIL6210_MAX_CID)
2148 stats = &wil->sta[cid].stats; 2083 stats = &wil->sta[cid].stats;
2149 2084
2150 while (!wil_vring_is_empty(vring)) { 2085 while (!wil_ring_is_empty(vring)) {
2151 int new_swtail; 2086 int new_swtail;
2152 struct wil_ctx *ctx = &vring->ctx[vring->swtail]; 2087 struct wil_ctx *ctx = &vring->ctx[vring->swtail];
2153 /** 2088 /**
@@ -2158,7 +2093,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2158 int lf = (vring->swtail + ctx->nr_frags) % vring->size; 2093 int lf = (vring->swtail + ctx->nr_frags) % vring->size;
2159 /* TODO: check we are not past head */ 2094 /* TODO: check we are not past head */
2160 2095
2161 _d = &vring->va[lf].tx; 2096 _d = &vring->va[lf].tx.legacy;
2162 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) 2097 if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
2163 break; 2098 break;
2164 2099
@@ -2170,7 +2105,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2170 2105
2171 ctx = &vring->ctx[vring->swtail]; 2106 ctx = &vring->ctx[vring->swtail];
2172 skb = ctx->skb; 2107 skb = ctx->skb;
2173 _d = &vring->va[vring->swtail].tx; 2108 _d = &vring->va[vring->swtail].tx.legacy;
2174 2109
2175 *d = *_d; 2110 *d = *_d;
2176 2111
@@ -2184,7 +2119,9 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2184 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4, 2119 wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
2185 (const void *)d, sizeof(*d), false); 2120 (const void *)d, sizeof(*d), false);
2186 2121
2187 wil_txdesc_unmap(dev, d, ctx); 2122 wil->txrx_ops.tx_desc_unmap(dev,
2123 (union wil_tx_desc *)d,
2124 ctx);
2188 2125
2189 if (skb) { 2126 if (skb) {
2190 if (likely(d->dma.error == 0)) { 2127 if (likely(d->dma.error == 0)) {
@@ -2203,7 +2140,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2203 } 2140 }
2204 memset(ctx, 0, sizeof(*ctx)); 2141 memset(ctx, 0, sizeof(*ctx));
2205 /* Make sure the ctx is zeroed before updating the tail 2142 /* Make sure the ctx is zeroed before updating the tail
2206 * to prevent a case where wil_tx_vring will see 2143 * to prevent a case where wil_tx_ring will see
2207 * this descriptor as used and handle it before ctx zero 2144 * this descriptor as used and handle it before ctx zero
2208 * is completed. 2145 * is completed.
2209 */ 2146 */
@@ -2213,14 +2150,14 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2213 * so hardware will not try to process this desc., 2150 * so hardware will not try to process this desc.,
2214 * - rest of descriptor will be initialized on Tx. 2151 * - rest of descriptor will be initialized on Tx.
2215 */ 2152 */
2216 vring->swtail = wil_vring_next_tail(vring); 2153 vring->swtail = wil_ring_next_tail(vring);
2217 done++; 2154 done++;
2218 } 2155 }
2219 } 2156 }
2220 2157
2221 /* performance monitoring */ 2158 /* performance monitoring */
2222 used_new = wil_vring_used_tx(vring); 2159 used_new = wil_ring_used_tx(vring);
2223 if (wil_val_in_range(wil->vring_idle_trsh, 2160 if (wil_val_in_range(wil->ring_idle_trsh,
2224 used_new, used_before_complete)) { 2161 used_new, used_before_complete)) {
2225 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", 2162 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
2226 ringid, used_before_complete, used_new); 2163 ringid, used_before_complete, used_new);
@@ -2233,3 +2170,47 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2233 2170
2234 return done; 2171 return done;
2235} 2172}
2173
2174static inline int wil_tx_init(struct wil6210_priv *wil)
2175{
2176 return 0;
2177}
2178
2179static inline void wil_tx_fini(struct wil6210_priv *wil) {}
2180
2181static void wil_get_reorder_params(struct wil6210_priv *wil,
2182 struct sk_buff *skb, int *tid, int *cid,
2183 int *mid, u16 *seq, int *mcast)
2184{
2185 struct vring_rx_desc *d = wil_skb_rxdesc(skb);
2186
2187 *tid = wil_rxdesc_tid(d);
2188 *cid = wil_rxdesc_cid(d);
2189 *mid = wil_rxdesc_mid(d);
2190 *seq = wil_rxdesc_seq(d);
2191 *mcast = wil_rxdesc_mcast(d);
2192}
2193
2194void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
2195{
2196 wil->txrx_ops.configure_interrupt_moderation =
2197 wil_configure_interrupt_moderation;
2198 /* TX ops */
2199 wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
2200 wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
2201 wil->txrx_ops.tx_ring_tso = __wil_tx_vring_tso;
2202 wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
2203 wil->txrx_ops.ring_fini_tx = wil_vring_free;
2204 wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
2205 wil->txrx_ops.tx_init = wil_tx_init;
2206 wil->txrx_ops.tx_fini = wil_tx_fini;
2207 /* RX ops */
2208 wil->txrx_ops.rx_init = wil_rx_init;
2209 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
2210 wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
2211 wil->txrx_ops.get_netif_rx_params =
2212 wil_get_netif_rx_params;
2213 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
2214 wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
2215 wil->txrx_ops.rx_fini = wil_rx_fini;
2216}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 5f07717acc2c..f361423628f5 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -18,6 +18,9 @@
18#ifndef WIL6210_TXRX_H 18#ifndef WIL6210_TXRX_H
19#define WIL6210_TXRX_H 19#define WIL6210_TXRX_H
20 20
21#include "wil6210.h"
22#include "txrx_edma.h"
23
21#define BUF_SW_OWNED (1) 24#define BUF_SW_OWNED (1)
22#define BUF_HW_OWNED (0) 25#define BUF_HW_OWNED (0)
23 26
@@ -29,19 +32,13 @@
29 32
30/* Tx/Rx path */ 33/* Tx/Rx path */
31 34
32/* Common representation of physical address in Vring */ 35static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr)
33struct vring_dma_addr {
34 __le32 addr_low;
35 __le16 addr_high;
36} __packed;
37
38static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
39{ 36{
40 return le32_to_cpu(addr->addr_low) | 37 return le32_to_cpu(addr->addr_low) |
41 ((u64)le16_to_cpu(addr->addr_high) << 32); 38 ((u64)le16_to_cpu(addr->addr_high) << 32);
42} 39}
43 40
44static inline void wil_desc_addr_set(struct vring_dma_addr *addr, 41static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr,
45 dma_addr_t pa) 42 dma_addr_t pa)
46{ 43{
47 addr->addr_low = cpu_to_le32(lower_32_bits(pa)); 44 addr->addr_low = cpu_to_le32(lower_32_bits(pa));
@@ -294,7 +291,7 @@ struct vring_tx_mac {
294 */ 291 */
295struct vring_tx_dma { 292struct vring_tx_dma {
296 u32 d0; 293 u32 d0;
297 struct vring_dma_addr addr; 294 struct wil_ring_dma_addr addr;
298 u8 ip_length; 295 u8 ip_length;
299 u8 b11; /* 0..6: mac_length; 7:ip_version */ 296 u8 b11; /* 0..6: mac_length; 7:ip_version */
300 u8 error; /* 0..2: err; 3..7: reserved; */ 297 u8 error; /* 0..2: err; 3..7: reserved; */
@@ -428,7 +425,7 @@ struct vring_rx_mac {
428 425
429struct vring_rx_dma { 426struct vring_rx_dma {
430 u32 d0; 427 u32 d0;
431 struct vring_dma_addr addr; 428 struct wil_ring_dma_addr addr;
432 u8 ip_length; 429 u8 ip_length;
433 u8 b11; 430 u8 b11;
434 u8 error; 431 u8 error;
@@ -441,14 +438,24 @@ struct vring_tx_desc {
441 struct vring_tx_dma dma; 438 struct vring_tx_dma dma;
442} __packed; 439} __packed;
443 440
441union wil_tx_desc {
442 struct vring_tx_desc legacy;
443 struct wil_tx_enhanced_desc enhanced;
444} __packed;
445
444struct vring_rx_desc { 446struct vring_rx_desc {
445 struct vring_rx_mac mac; 447 struct vring_rx_mac mac;
446 struct vring_rx_dma dma; 448 struct vring_rx_dma dma;
447} __packed; 449} __packed;
448 450
449union vring_desc { 451union wil_rx_desc {
450 struct vring_tx_desc tx; 452 struct vring_rx_desc legacy;
451 struct vring_rx_desc rx; 453 struct wil_rx_enhanced_desc enhanced;
454} __packed;
455
456union wil_ring_desc {
457 union wil_tx_desc tx;
458 union wil_rx_desc rx;
452} __packed; 459} __packed;
453 460
454static inline int wil_rxdesc_tid(struct vring_rx_desc *d) 461static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
@@ -528,6 +535,76 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
528 return (void *)skb->cb; 535 return (void *)skb->cb;
529} 536}
530 537
538static inline int wil_ring_is_empty(struct wil_ring *ring)
539{
540 return ring->swhead == ring->swtail;
541}
542
543static inline u32 wil_ring_next_tail(struct wil_ring *ring)
544{
545 return (ring->swtail + 1) % ring->size;
546}
547
548static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
549{
550 ring->swhead = (ring->swhead + n) % ring->size;
551}
552
553static inline int wil_ring_is_full(struct wil_ring *ring)
554{
555 return wil_ring_next_tail(ring) == ring->swhead;
556}
557
558static inline bool wil_need_txstat(struct sk_buff *skb)
559{
560 struct ethhdr *eth = (void *)skb->data;
561
562 return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
563 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
564}
565
566static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
567{
568 if (unlikely(wil_need_txstat(skb)))
569 skb_complete_wifi_ack(skb, acked);
570 else
571 acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
572}
573
574/* Used space in Tx ring */
575static inline int wil_ring_used_tx(struct wil_ring *ring)
576{
577 u32 swhead = ring->swhead;
578 u32 swtail = ring->swtail;
579
580 return (ring->size + swhead - swtail) % ring->size;
581}
582
583/* Available space in Tx ring */
584static inline int wil_ring_avail_tx(struct wil_ring *ring)
585{
586 return ring->size - wil_ring_used_tx(ring) - 1;
587}
588
589static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
590{
591 /* In Enhanced DMA ring 0 is reserved for RX */
592 return wil->use_enhanced_dma_hw ? 1 : 0;
593}
594
595/* similar to ieee80211_ version, but FC contain only 1-st byte */
596static inline int wil_is_back_req(u8 fc)
597{
598 return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
599 (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
600}
601
602/* wil_val_in_range - check if value in [min,max) */
603static inline bool wil_val_in_range(int val, int min, int max)
604{
605 return val >= min && val < max;
606}
607
531void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev); 608void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
532void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb); 609void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
533void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif, 610void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
@@ -536,5 +613,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
536 int size, u16 ssn); 613 int size, u16 ssn);
537void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, 614void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
538 struct wil_tid_ampdu_rx *r); 615 struct wil_tid_ampdu_rx *r);
616void wil_tx_data_init(struct wil_ring_tx_data *txdata);
617void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
539 618
540#endif /* WIL6210_TXRX_H */ 619#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
new file mode 100644
index 000000000000..95f38e65d969
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -0,0 +1,1598 @@
1/*
2 * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/etherdevice.h>
18#include <linux/moduleparam.h>
19#include <linux/prefetch.h>
20#include <linux/types.h>
21#include <linux/list.h>
22#include <linux/ip.h>
23#include <linux/ipv6.h>
24#include "wil6210.h"
25#include "txrx_edma.h"
26#include "txrx.h"
27#include "trace.h"
28
29#define WIL_EDMA_MAX_DATA_OFFSET (2)
30
31static void wil_tx_desc_unmap_edma(struct device *dev,
32 union wil_tx_desc *desc,
33 struct wil_ctx *ctx)
34{
35 struct wil_tx_enhanced_desc *d = (struct wil_tx_enhanced_desc *)desc;
36 dma_addr_t pa = wil_tx_desc_get_addr_edma(&d->dma);
37 u16 dmalen = le16_to_cpu(d->dma.length);
38
39 switch (ctx->mapped_as) {
40 case wil_mapped_as_single:
41 dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
42 break;
43 case wil_mapped_as_page:
44 dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
45 break;
46 default:
47 break;
48 }
49}
50
51static int wil_find_free_sring(struct wil6210_priv *wil)
52{
53 int i;
54
55 for (i = 0; i < WIL6210_MAX_STATUS_RINGS; i++) {
56 if (!wil->srings[i].va)
57 return i;
58 }
59
60 return -EINVAL;
61}
62
63static void wil_sring_free(struct wil6210_priv *wil,
64 struct wil_status_ring *sring)
65{
66 struct device *dev = wil_to_dev(wil);
67 size_t sz;
68
69 if (!sring || !sring->va)
70 return;
71
72 sz = sring->elem_size * sring->size;
73
74 wil_dbg_misc(wil, "status_ring_free, size(bytes)=%zu, 0x%p:%pad\n",
75 sz, sring->va, &sring->pa);
76
77 dma_free_coherent(dev, sz, (void *)sring->va, sring->pa);
78 sring->pa = 0;
79 sring->va = NULL;
80}
81
82static int wil_sring_alloc(struct wil6210_priv *wil,
83 struct wil_status_ring *sring)
84{
85 struct device *dev = wil_to_dev(wil);
86 size_t sz = sring->elem_size * sring->size;
87
88 wil_dbg_misc(wil, "status_ring_alloc: size=%zu\n", sz);
89
90 if (sz == 0) {
91 wil_err(wil, "Cannot allocate a zero size status ring\n");
92 return -EINVAL;
93 }
94
95 sring->swhead = 0;
96
97 /* Status messages are allocated and initialized to 0. This is necessary
98 * since DR bit should be initialized to 0.
99 */
100 sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
101 if (!sring->va)
102 return -ENOMEM;
103
104 wil_dbg_misc(wil, "status_ring[%d] 0x%p:%pad\n", sring->size, sring->va,
105 &sring->pa);
106
107 return 0;
108}
109
110static int wil_tx_init_edma(struct wil6210_priv *wil)
111{
112 int ring_id = wil_find_free_sring(wil);
113 struct wil_status_ring *sring;
114 int rc;
115 u16 status_ring_size;
116
117 if (wil->tx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
118 wil->tx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
119 wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
120
121 status_ring_size = 1 << wil->tx_status_ring_order;
122
123 wil_dbg_misc(wil, "init TX sring: size=%u, ring_id=%u\n",
124 status_ring_size, ring_id);
125
126 if (ring_id < 0)
127 return ring_id;
128
129 /* Allocate Tx status ring. Tx descriptor rings will be
130 * allocated on WMI connect event
131 */
132 sring = &wil->srings[ring_id];
133
134 sring->is_rx = false;
135 sring->size = status_ring_size;
136 sring->elem_size = sizeof(struct wil_ring_tx_status);
137 rc = wil_sring_alloc(wil, sring);
138 if (rc)
139 return rc;
140
141 rc = wil_wmi_tx_sring_cfg(wil, ring_id);
142 if (rc)
143 goto out_free;
144
145 sring->desc_rdy_pol = 1;
146 wil->tx_sring_idx = ring_id;
147
148 return 0;
149out_free:
150 wil_sring_free(wil, sring);
151 return rc;
152}
153
154/**
155 * Allocate one skb for Rx descriptor RING
156 */
157static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
158 struct wil_ring *ring, u32 i)
159{
160 struct device *dev = wil_to_dev(wil);
161 unsigned int sz = wil->rx_buf_len + ETH_HLEN +
162 WIL_EDMA_MAX_DATA_OFFSET;
163 dma_addr_t pa;
164 u16 buff_id;
165 struct list_head *active = &wil->rx_buff_mgmt.active;
166 struct list_head *free = &wil->rx_buff_mgmt.free;
167 struct wil_rx_buff *rx_buff;
168 struct wil_rx_buff *buff_arr = wil->rx_buff_mgmt.buff_arr;
169 struct sk_buff *skb;
170 struct wil_rx_enhanced_desc dd, *d = &dd;
171 struct wil_rx_enhanced_desc *_d = (struct wil_rx_enhanced_desc *)
172 &ring->va[i].rx.enhanced;
173
174 if (unlikely(list_empty(free))) {
175 wil->rx_buff_mgmt.free_list_empty_cnt++;
176 return -EAGAIN;
177 }
178
179 skb = dev_alloc_skb(sz);
180 if (unlikely(!skb))
181 return -ENOMEM;
182
183 skb_put(skb, sz);
184
185 pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
186 if (unlikely(dma_mapping_error(dev, pa))) {
187 kfree_skb(skb);
188 return -ENOMEM;
189 }
190
191 /* Get the buffer ID - the index of the rx buffer in the buff_arr */
192 rx_buff = list_first_entry(free, struct wil_rx_buff, list);
193 buff_id = rx_buff->id;
194
195 /* Move a buffer from the free list to the active list */
196 list_move(&rx_buff->list, active);
197
198 buff_arr[buff_id].skb = skb;
199
200 wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
201 d->dma.length = cpu_to_le16(sz);
202 d->mac.buff_id = cpu_to_le16(buff_id);
203 *_d = *d;
204
205 /* Save the physical address in skb->cb for later use in dma_unmap */
206 memcpy(skb->cb, &pa, sizeof(pa));
207
208 return 0;
209}
210
211static inline
212void wil_get_next_rx_status_msg(struct wil_status_ring *sring, void *msg)
213{
214 memcpy(msg, (void *)(sring->va + (sring->elem_size * sring->swhead)),
215 sring->elem_size);
216}
217
218static inline void wil_sring_advance_swhead(struct wil_status_ring *sring)
219{
220 sring->swhead = (sring->swhead + 1) % sring->size;
221 if (sring->swhead == 0)
222 sring->desc_rdy_pol = 1 - sring->desc_rdy_pol;
223}
224
225static int wil_rx_refill_edma(struct wil6210_priv *wil)
226{
227 struct wil_ring *ring = &wil->ring_rx;
228 u32 next_head;
229 int rc = 0;
230 u32 swtail = *ring->edma_rx_swtail.va;
231
232 for (; next_head = wil_ring_next_head(ring), (next_head != swtail);
233 ring->swhead = next_head) {
234 rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
235 if (unlikely(rc)) {
236 if (rc == -EAGAIN)
237 wil_dbg_txrx(wil, "No free buffer ID found\n");
238 else
239 wil_err_ratelimited(wil,
240 "Error %d in refill desc[%d]\n",
241 rc, ring->swhead);
242 break;
243 }
244 }
245
246 /* make sure all writes to descriptors (shared memory) are done before
247 * committing them to HW
248 */
249 wmb();
250
251 wil_w(wil, ring->hwtail, ring->swhead);
252
253 return rc;
254}
255
256static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
257 struct wil_ring *ring)
258{
259 struct device *dev = wil_to_dev(wil);
260 u32 next_tail;
261 u32 swhead = (ring->swhead + 1) % ring->size;
262 dma_addr_t pa;
263 u16 dmalen;
264
265 for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead);
266 ring->swtail = next_tail) {
267 struct wil_rx_enhanced_desc dd, *d = &dd;
268 struct wil_rx_enhanced_desc *_d =
269 (struct wil_rx_enhanced_desc *)
270 &ring->va[ring->swtail].rx.enhanced;
271 struct sk_buff *skb;
272 u16 buff_id;
273
274 *d = *_d;
275 pa = wil_rx_desc_get_addr_edma(&d->dma);
276 dmalen = le16_to_cpu(d->dma.length);
277 dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
278
279 /* Extract the SKB from the rx_buff management array */
280 buff_id = __le16_to_cpu(d->mac.buff_id);
281 if (buff_id >= wil->rx_buff_mgmt.size) {
282 wil_err(wil, "invalid buff_id %d\n", buff_id);
283 continue;
284 }
285 skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
286 wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
287 if (unlikely(!skb))
288 wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
289 else
290 kfree_skb(skb);
291
292 /* Move the buffer from the active to the free list */
293 list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
294 &wil->rx_buff_mgmt.free);
295 }
296}
297
298static void wil_free_rx_buff_arr(struct wil6210_priv *wil)
299{
300 struct wil_ring *ring = &wil->ring_rx;
301
302 if (!wil->rx_buff_mgmt.buff_arr)
303 return;
304
305 /* Move all the buffers to the free list in case active list is
306 * not empty in order to release all SKBs before deleting the array
307 */
308 wil_move_all_rx_buff_to_free_list(wil, ring);
309
310 kfree(wil->rx_buff_mgmt.buff_arr);
311 wil->rx_buff_mgmt.buff_arr = NULL;
312}
313
314static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
315 size_t size)
316{
317 struct wil_rx_buff *buff_arr;
318 struct list_head *active = &wil->rx_buff_mgmt.active;
319 struct list_head *free = &wil->rx_buff_mgmt.free;
320 int i;
321
322 wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
323 GFP_KERNEL);
324 if (!wil->rx_buff_mgmt.buff_arr)
325 return -ENOMEM;
326
327 /* Set list heads */
328 INIT_LIST_HEAD(active);
329 INIT_LIST_HEAD(free);
330
331 /* Linkify the list */
332 buff_arr = wil->rx_buff_mgmt.buff_arr;
333 for (i = 0; i < size; i++) {
334 list_add(&buff_arr[i].list, free);
335 buff_arr[i].id = i;
336 }
337
338 wil->rx_buff_mgmt.size = size;
339
340 return 0;
341}
342
343static int wil_init_rx_sring(struct wil6210_priv *wil,
344 u16 status_ring_size,
345 size_t elem_size,
346 u16 ring_id)
347{
348 struct wil_status_ring *sring = &wil->srings[ring_id];
349 int rc;
350
351 wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
352 ring_id);
353
354 memset(&sring->rx_data, 0, sizeof(sring->rx_data));
355
356 sring->is_rx = true;
357 sring->size = status_ring_size;
358 sring->elem_size = elem_size;
359 rc = wil_sring_alloc(wil, sring);
360 if (rc)
361 return rc;
362
363 rc = wil_wmi_rx_sring_add(wil, ring_id);
364 if (rc)
365 goto out_free;
366
367 sring->desc_rdy_pol = 1;
368
369 return 0;
370out_free:
371 wil_sring_free(wil, sring);
372 return rc;
373}
374
375static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
376 struct wil_ring *ring)
377{
378 struct device *dev = wil_to_dev(wil);
379 size_t sz = ring->size * sizeof(ring->va[0]);
380
381 wil_dbg_misc(wil, "alloc_desc_ring:\n");
382
383 BUILD_BUG_ON(sizeof(ring->va[0]) != 32);
384
385 ring->swhead = 0;
386 ring->swtail = 0;
387 ring->ctx = kcalloc(ring->size, sizeof(ring->ctx[0]), GFP_KERNEL);
388 if (!ring->ctx)
389 goto err;
390
391 ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
392 if (!ring->va)
393 goto err_free_ctx;
394
395 if (ring->is_rx) {
396 sz = sizeof(*ring->edma_rx_swtail.va);
397 ring->edma_rx_swtail.va =
398 dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
399 GFP_KERNEL);
400 if (!ring->edma_rx_swtail.va)
401 goto err_free_va;
402 }
403
404 wil_dbg_misc(wil, "%s ring[%d] 0x%p:%pad 0x%p\n",
405 ring->is_rx ? "RX" : "TX",
406 ring->size, ring->va, &ring->pa, ring->ctx);
407
408 return 0;
409err_free_va:
410 dma_free_coherent(dev, ring->size * sizeof(ring->va[0]),
411 (void *)ring->va, ring->pa);
412 ring->va = NULL;
413err_free_ctx:
414 kfree(ring->ctx);
415 ring->ctx = NULL;
416err:
417 return -ENOMEM;
418}
419
420static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
421{
422 struct device *dev = wil_to_dev(wil);
423 size_t sz;
424 int ring_index = 0;
425
426 if (!ring->va)
427 return;
428
429 sz = ring->size * sizeof(ring->va[0]);
430
431 lockdep_assert_held(&wil->mutex);
432 if (ring->is_rx) {
433 wil_dbg_misc(wil, "free Rx ring [%d] 0x%p:%pad 0x%p\n",
434 ring->size, ring->va,
435 &ring->pa, ring->ctx);
436
437 wil_move_all_rx_buff_to_free_list(wil, ring);
438 goto out;
439 }
440
441 /* TX ring */
442 ring_index = ring - wil->ring_tx;
443
444 wil_dbg_misc(wil, "free Tx ring %d [%d] 0x%p:%pad 0x%p\n",
445 ring_index, ring->size, ring->va,
446 &ring->pa, ring->ctx);
447
448 while (!wil_ring_is_empty(ring)) {
449 struct wil_ctx *ctx;
450
451 struct wil_tx_enhanced_desc dd, *d = &dd;
452 struct wil_tx_enhanced_desc *_d =
453 (struct wil_tx_enhanced_desc *)
454 &ring->va[ring->swtail].tx.enhanced;
455
456 ctx = &ring->ctx[ring->swtail];
457 if (!ctx) {
458 wil_dbg_txrx(wil,
459 "ctx(%d) was already completed\n",
460 ring->swtail);
461 ring->swtail = wil_ring_next_tail(ring);
462 continue;
463 }
464 *d = *_d;
465 wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
466 if (ctx->skb)
467 dev_kfree_skb_any(ctx->skb);
468 ring->swtail = wil_ring_next_tail(ring);
469 }
470
471out:
472 dma_free_coherent(dev, sz, (void *)ring->va, ring->pa);
473 kfree(ring->ctx);
474 ring->pa = 0;
475 ring->va = NULL;
476 ring->ctx = NULL;
477}
478
479static int wil_init_rx_desc_ring(struct wil6210_priv *wil, u16 desc_ring_size,
480 int status_ring_id)
481{
482 struct wil_ring *ring = &wil->ring_rx;
483 int rc;
484
485 wil_dbg_misc(wil, "init RX desc ring\n");
486
487 ring->size = desc_ring_size;
488 ring->is_rx = true;
489 rc = wil_ring_alloc_desc_ring(wil, ring);
490 if (rc)
491 return rc;
492
493 rc = wil_wmi_rx_desc_ring_add(wil, status_ring_id);
494 if (rc)
495 goto out_free;
496
497 return 0;
498out_free:
499 wil_ring_free_edma(wil, ring);
500 return rc;
501}
502
503static void wil_get_reorder_params_edma(struct wil6210_priv *wil,
504 struct sk_buff *skb, int *tid,
505 int *cid, int *mid, u16 *seq,
506 int *mcast)
507{
508 struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
509
510 *tid = wil_rx_status_get_tid(s);
511 *cid = wil_rx_status_get_cid(s);
512 *mid = wil_rx_status_get_mid(s);
513 *seq = le16_to_cpu(wil_rx_status_get_seq(wil, s));
514 *mcast = wil_rx_status_get_mcast(s);
515}
516
517static void wil_get_netif_rx_params_edma(struct sk_buff *skb, int *cid,
518 int *security)
519{
520 struct wil_rx_status_extended *s = wil_skb_rxstatus(skb);
521
522 *cid = wil_rx_status_get_cid(s);
523 *security = wil_rx_status_get_security(s);
524}
525
526static int wil_rx_crypto_check_edma(struct wil6210_priv *wil,
527 struct sk_buff *skb)
528{
529 struct wil_rx_status_extended *st;
530 int cid, tid, key_id, mc;
531 struct wil_sta_info *s;
532 struct wil_tid_crypto_rx *c;
533 struct wil_tid_crypto_rx_single *cc;
534 const u8 *pn;
535
536 /* In HW reorder, HW is responsible for crypto check */
537 if (wil->use_rx_hw_reordering)
538 return 0;
539
540 st = wil_skb_rxstatus(skb);
541
542 cid = wil_rx_status_get_cid(st);
543 tid = wil_rx_status_get_tid(st);
544 key_id = wil_rx_status_get_key_id(st);
545 mc = wil_rx_status_get_mcast(st);
546 s = &wil->sta[cid];
547 c = mc ? &s->group_crypto_rx : &s->tid_crypto_rx[tid];
548 cc = &c->key_id[key_id];
549 pn = (u8 *)&st->ext.pn_15_0;
550
551 if (!cc->key_set) {
552 wil_err_ratelimited(wil,
553 "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
554 cid, tid, mc, key_id);
555 return -EINVAL;
556 }
557
558 if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
559 wil_err_ratelimited(wil,
560 "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
561 cid, tid, mc, key_id, pn, cc->pn);
562 return -EINVAL;
563 }
564 memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
565
566 return 0;
567}
568
569static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
570{
571 struct wil_status_ring *sring;
572 struct wil_rx_status_extended msg1;
573 void *msg = &msg1;
574 u8 dr_bit;
575 int i;
576
577 for (i = 0; i < wil->num_rx_status_rings; i++) {
578 sring = &wil->srings[i];
579 if (!sring->va)
580 continue;
581
582 wil_get_next_rx_status_msg(sring, msg);
583 dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
584
585 /* Check if there are unhandled RX status messages */
586 if (dr_bit == sring->desc_rdy_pol)
587 return false;
588 }
589
590 return true;
591}
592
593static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
594{
595 wil->rx_buf_len = rx_large_buf ?
596 WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
597}
598
599static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
600{
601 u16 status_ring_size;
602 struct wil_ring *ring = &wil->ring_rx;
603 int rc;
604 size_t elem_size = wil->use_compressed_rx_status ?
605 sizeof(struct wil_rx_status_compressed) :
606 sizeof(struct wil_rx_status_extended);
607 int i;
608 u16 max_rx_pl_per_desc;
609
610 /* In SW reorder one must use extended status messages */
611 if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
612 wil_err(wil,
613 "compressed RX status cannot be used with SW reorder\n");
614 return -EINVAL;
615 }
616
617 if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
618 wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
619 wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
620
621 status_ring_size = 1 << wil->rx_status_ring_order;
622
623 wil_dbg_misc(wil,
624 "rx_init, desc_ring_size=%u, status_ring_size=%u, elem_size=%zu\n",
625 desc_ring_size, status_ring_size, elem_size);
626
627 wil_rx_buf_len_init_edma(wil);
628
629 max_rx_pl_per_desc = wil->rx_buf_len + ETH_HLEN +
630 WIL_EDMA_MAX_DATA_OFFSET;
631
632 /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
633 if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
634 wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
635
636 wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
637 wil->num_rx_status_rings);
638
639 rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
640 if (rc)
641 return rc;
642
643 /* Allocate status ring */
644 for (i = 0; i < wil->num_rx_status_rings; i++) {
645 int sring_id = wil_find_free_sring(wil);
646
647 if (sring_id < 0) {
648 rc = -EFAULT;
649 goto err_free_status;
650 }
651 rc = wil_init_rx_sring(wil, status_ring_size, elem_size,
652 sring_id);
653 if (rc)
654 goto err_free_status;
655 }
656
657 /* Allocate descriptor ring */
658 rc = wil_init_rx_desc_ring(wil, desc_ring_size,
659 WIL_DEFAULT_RX_STATUS_RING_ID);
660 if (rc)
661 goto err_free_status;
662
663 if (wil->rx_buff_id_count >= status_ring_size) {
664 wil_info(wil,
665 "rx_buff_id_count %d exceeds sring_size %d. set it to %d\n",
666 wil->rx_buff_id_count, status_ring_size,
667 status_ring_size - 1);
668 wil->rx_buff_id_count = status_ring_size - 1;
669 }
670
671 /* Allocate Rx buffer array */
672 rc = wil_init_rx_buff_arr(wil, wil->rx_buff_id_count);
673 if (rc)
674 goto err_free_desc;
675
676 /* Fill descriptor ring with credits */
677 rc = wil_rx_refill_edma(wil);
678 if (rc)
679 goto err_free_rx_buff_arr;
680
681 return 0;
682err_free_rx_buff_arr:
683 wil_free_rx_buff_arr(wil);
684err_free_desc:
685 wil_ring_free_edma(wil, ring);
686err_free_status:
687 for (i = 0; i < wil->num_rx_status_rings; i++)
688 wil_sring_free(wil, &wil->srings[i]);
689
690 return rc;
691}
692
693static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id,
694 int size, int cid, int tid)
695{
696 struct wil6210_priv *wil = vif_to_wil(vif);
697 int rc;
698 struct wil_ring *ring = &wil->ring_tx[ring_id];
699 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
700
701 lockdep_assert_held(&wil->mutex);
702
703 wil_dbg_misc(wil,
704 "init TX ring: ring_id=%u, cid=%u, tid=%u, sring_id=%u\n",
705 ring_id, cid, tid, wil->tx_sring_idx);
706
707 wil_tx_data_init(txdata);
708 ring->size = size;
709 rc = wil_ring_alloc_desc_ring(wil, ring);
710 if (rc)
711 goto out;
712
713 wil->ring2cid_tid[ring_id][0] = cid;
714 wil->ring2cid_tid[ring_id][1] = tid;
715 if (!vif->privacy)
716 txdata->dot1x_open = true;
717
718 rc = wil_wmi_tx_desc_ring_add(vif, ring_id, cid, tid);
719 if (rc) {
720 wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed\n");
721 goto out_free;
722 }
723
724 if (txdata->dot1x_open && agg_wsize >= 0)
725 wil_addba_tx_request(wil, ring_id, agg_wsize);
726
727 return 0;
728 out_free:
729 spin_lock_bh(&txdata->lock);
730 txdata->dot1x_open = false;
731 txdata->enabled = 0;
732 spin_unlock_bh(&txdata->lock);
733 wil_ring_free_edma(wil, ring);
734 wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID;
735 wil->ring2cid_tid[ring_id][1] = 0;
736
737 out:
738 return rc;
739}
740
741/* This function is used only for RX SW reorder */
742static int wil_check_bar(struct wil6210_priv *wil, void *msg, int cid,
743 struct sk_buff *skb, struct wil_net_stats *stats)
744{
745 u8 ftype;
746 u8 fc1;
747 int mid;
748 int tid;
749 u16 seq;
750 struct wil6210_vif *vif;
751
752 ftype = wil_rx_status_get_frame_type(wil, msg);
753 if (ftype == IEEE80211_FTYPE_DATA)
754 return 0;
755
756 fc1 = wil_rx_status_get_fc1(wil, msg);
757 mid = wil_rx_status_get_mid(msg);
758 tid = wil_rx_status_get_tid(msg);
759 seq = le16_to_cpu(wil_rx_status_get_seq(wil, msg));
760 vif = wil->vifs[mid];
761
762 if (unlikely(!vif)) {
763 wil_dbg_txrx(wil, "RX descriptor with invalid mid %d", mid);
764 return -EAGAIN;
765 }
766
767 wil_dbg_txrx(wil,
768 "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
769 fc1, mid, cid, tid, seq);
770 if (stats)
771 stats->rx_non_data_frame++;
772 if (wil_is_back_req(fc1)) {
773 wil_dbg_txrx(wil,
774 "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
775 mid, cid, tid, seq);
776 wil_rx_bar(wil, vif, cid, tid, seq);
777 } else {
778 u32 sz = wil->use_compressed_rx_status ?
779 sizeof(struct wil_rx_status_compressed) :
780 sizeof(struct wil_rx_status_extended);
781
782 /* print again all info. One can enable only this
783 * without overhead for printing every Rx frame
784 */
785 wil_dbg_txrx(wil,
786 "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
787 fc1, mid, cid, tid, seq);
788 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
789 (const void *)msg, sz, false);
790 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
791 skb->data, skb_headlen(skb), false);
792 }
793
794 return -EAGAIN;
795}
796
797static int wil_rx_edma_check_errors(struct wil6210_priv *wil, void *msg,
798 struct wil_net_stats *stats,
799 struct sk_buff *skb)
800{
801 int error;
802 int l2_rx_status;
803 int l3_rx_status;
804 int l4_rx_status;
805
806 error = wil_rx_status_get_error(msg);
807 if (!error) {
808 skb->ip_summed = CHECKSUM_UNNECESSARY;
809 return 0;
810 }
811
812 l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
813 if (l2_rx_status != 0) {
814 wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
815 l2_rx_status);
816 /* Due to HW issue, KEY error will trigger a MIC error */
817 if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) {
818 wil_dbg_txrx(wil,
819 "L2 MIC/KEY error, dropping packet\n");
820 stats->rx_mic_error++;
821 }
822 if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) {
823 wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n");
824 stats->rx_key_error++;
825 }
826 if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) {
827 wil_dbg_txrx(wil,
828 "L2 REPLAY error, dropping packet\n");
829 stats->rx_replay++;
830 }
831 if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) {
832 wil_dbg_txrx(wil,
833 "L2 AMSDU error, dropping packet\n");
834 stats->rx_amsdu_error++;
835 }
836 return -EFAULT;
837 }
838
839 l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
840 l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
841 if (!l3_rx_status && !l4_rx_status)
842 skb->ip_summed = CHECKSUM_UNNECESSARY;
843 /* If HW reports bad checksum, let IP stack re-check it
844 * For example, HW don't understand Microsoft IP stack that
845 * mis-calculates TCP checksum - if it should be 0x0,
846 * it writes 0xffff in violation of RFC 1624
847 */
848
849 return 0;
850}
851
852static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
853 struct wil_status_ring *sring)
854{
855 struct device *dev = wil_to_dev(wil);
856 struct wil_rx_status_extended msg1;
857 void *msg = &msg1;
858 u16 buff_id;
859 struct sk_buff *skb;
860 dma_addr_t pa;
861 struct wil_ring_rx_data *rxdata = &sring->rx_data;
862 unsigned int sz = wil->rx_buf_len + ETH_HLEN +
863 WIL_EDMA_MAX_DATA_OFFSET;
864 struct wil_net_stats *stats = NULL;
865 u16 dmalen;
866 int cid;
867 int rc;
868 bool eop, headstolen;
869 int delta;
870 u8 dr_bit;
871 u8 data_offset;
872 struct wil_rx_status_extended *s;
873 u16 sring_idx = sring - wil->srings;
874
875 BUILD_BUG_ON(sizeof(struct wil_rx_status_extended) > sizeof(skb->cb));
876
877again:
878 wil_get_next_rx_status_msg(sring, msg);
879 dr_bit = wil_rx_status_get_desc_rdy_bit(msg);
880
881 /* Completed handling all the ready status messages */
882 if (dr_bit != sring->desc_rdy_pol)
883 return NULL;
884
885 /* Extract the buffer ID from the status message */
886 buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
887 if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
888 wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
889 buff_id, sring->swhead);
890 wil_sring_advance_swhead(sring);
891 goto again;
892 }
893
894 wil_sring_advance_swhead(sring);
895
896 /* Extract the SKB from the rx_buff management array */
897 skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
898 wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
899 if (!skb) {
900 wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
901 goto again;
902 }
903
904 memcpy(&pa, skb->cb, sizeof(pa));
905 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
906 dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
907
908 trace_wil6210_rx_status(wil, wil->use_compressed_rx_status, buff_id,
909 msg);
910 wil_dbg_txrx(wil, "Rx, buff_id=%u, sring_idx=%u, dmalen=%u bytes\n",
911 buff_id, sring_idx, dmalen);
912 wil_hex_dump_txrx("RxS ", DUMP_PREFIX_NONE, 32, 4,
913 (const void *)msg, wil->use_compressed_rx_status ?
914 sizeof(struct wil_rx_status_compressed) :
915 sizeof(struct wil_rx_status_extended), false);
916
917 /* Move the buffer from the active list to the free list */
918 list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
919 &wil->rx_buff_mgmt.free);
920
921 eop = wil_rx_status_get_eop(msg);
922
923 cid = wil_rx_status_get_cid(msg);
924 if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) {
925 wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n",
926 cid, sring->swhead);
927 rxdata->skipping = true;
928 goto skipping;
929 }
930 stats = &wil->sta[cid].stats;
931
932 if (unlikely(skb->len < ETH_HLEN)) {
933 wil_dbg_txrx(wil, "Short frame, len = %d\n", skb->len);
934 stats->rx_short_frame++;
935 rxdata->skipping = true;
936 goto skipping;
937 }
938
939 /* Check and treat errors reported by HW */
940 rc = wil_rx_edma_check_errors(wil, msg, stats, skb);
941 if (rc) {
942 rxdata->skipping = true;
943 goto skipping;
944 }
945
946 if (unlikely(dmalen > sz)) {
947 wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
948 stats->rx_large_frame++;
949 rxdata->skipping = true;
950 }
951
952skipping:
953 /* skipping indicates if a certain SKB should be dropped.
954 * It is set in case there is an error on the current SKB or in case
955 * of RX chaining: as long as we manage to merge the SKBs it will
956 * be false. once we have a bad SKB or we don't manage to merge SKBs
957 * it will be set to the !EOP value of the current SKB.
958 * This guarantees that all the following SKBs until EOP will also
959 * get dropped.
960 */
961 if (unlikely(rxdata->skipping)) {
962 kfree_skb(skb);
963 if (rxdata->skb) {
964 kfree_skb(rxdata->skb);
965 rxdata->skb = NULL;
966 }
967 rxdata->skipping = !eop;
968 goto again;
969 }
970
971 skb_trim(skb, dmalen);
972
973 prefetch(skb->data);
974
975 if (!rxdata->skb) {
976 rxdata->skb = skb;
977 } else {
978 if (likely(skb_try_coalesce(rxdata->skb, skb, &headstolen,
979 &delta))) {
980 kfree_skb_partial(skb, headstolen);
981 } else {
982 wil_err(wil, "failed to merge skbs!\n");
983 kfree_skb(skb);
984 kfree_skb(rxdata->skb);
985 rxdata->skb = NULL;
986 rxdata->skipping = !eop;
987 goto again;
988 }
989 }
990
991 if (!eop)
992 goto again;
993
994 /* reaching here rxdata->skb always contains a full packet */
995 skb = rxdata->skb;
996 rxdata->skb = NULL;
997 rxdata->skipping = false;
998
999 if (stats) {
1000 stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
1001 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
1002 stats->rx_per_mcs[stats->last_mcs_rx]++;
1003 }
1004
1005 if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
1006 wil_check_bar(wil, msg, cid, skb, stats) == -EAGAIN) {
1007 kfree_skb(skb);
1008 goto again;
1009 }
1010
1011 /* Compensate for the HW data alignment according to the status
1012 * message
1013 */
1014 data_offset = wil_rx_status_get_data_offset(msg);
1015 if (data_offset == 0xFF ||
1016 data_offset > WIL_EDMA_MAX_DATA_OFFSET) {
1017 wil_err(wil, "Unexpected data offset %d\n", data_offset);
1018 kfree_skb(skb);
1019 goto again;
1020 }
1021
1022 skb_pull(skb, data_offset);
1023
1024 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
1025 skb->data, skb_headlen(skb), false);
1026
1027 /* Has to be done after dma_unmap_single as skb->cb is also
1028 * used for holding the pa
1029 */
1030 s = wil_skb_rxstatus(skb);
1031 memcpy(s, msg, sring->elem_size);
1032
1033 return skb;
1034}
1035
1036void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota)
1037{
1038 struct net_device *ndev;
1039 struct wil_ring *ring = &wil->ring_rx;
1040 struct wil_status_ring *sring;
1041 struct sk_buff *skb;
1042 int i;
1043
1044 if (unlikely(!ring->va)) {
1045 wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
1046 return;
1047 }
1048 wil_dbg_txrx(wil, "rx_handle\n");
1049
1050 for (i = 0; i < wil->num_rx_status_rings; i++) {
1051 sring = &wil->srings[i];
1052 if (unlikely(!sring->va)) {
1053 wil_err(wil,
1054 "Rx IRQ while Rx status ring %d not yet initialized\n",
1055 i);
1056 continue;
1057 }
1058
1059 while ((*quota > 0) &&
1060 (NULL != (skb =
1061 wil_sring_reap_rx_edma(wil, sring)))) {
1062 (*quota)--;
1063 if (wil->use_rx_hw_reordering) {
1064 void *msg = wil_skb_rxstatus(skb);
1065 int mid = wil_rx_status_get_mid(msg);
1066 struct wil6210_vif *vif = wil->vifs[mid];
1067
1068 if (unlikely(!vif)) {
1069 wil_dbg_txrx(wil,
1070 "RX desc invalid mid %d",
1071 mid);
1072 kfree_skb(skb);
1073 continue;
1074 }
1075 ndev = vif_to_ndev(vif);
1076 wil_netif_rx_any(skb, ndev);
1077 } else {
1078 wil_rx_reorder(wil, skb);
1079 }
1080 }
1081
1082 wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
1083 }
1084
1085 wil_rx_refill_edma(wil);
1086}
1087
1088static int wil_tx_desc_map_edma(union wil_tx_desc *desc,
1089 dma_addr_t pa,
1090 u32 len,
1091 int ring_index)
1092{
1093 struct wil_tx_enhanced_desc *d =
1094 (struct wil_tx_enhanced_desc *)&desc->enhanced;
1095
1096 memset(d, 0, sizeof(struct wil_tx_enhanced_desc));
1097
1098 wil_desc_set_addr_edma(&d->dma.addr, &d->dma.addr_high_high, pa);
1099
1100 /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1101 d->dma.length = cpu_to_le16((u16)len);
1102 d->mac.d[0] = (ring_index << WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS);
1103 /* translation type: 0 - bypass; 1 - 802.3; 2 - native wifi;
1104 * 3 - eth mode
1105 */
1106 d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
1107 (0x3 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
1108
1109 return 0;
1110}
1111
1112static inline void
1113wil_get_next_tx_status_msg(struct wil_status_ring *sring,
1114 struct wil_ring_tx_status *msg)
1115{
1116 struct wil_ring_tx_status *_msg = (struct wil_ring_tx_status *)
1117 (sring->va + (sring->elem_size * sring->swhead));
1118
1119 *msg = *_msg;
1120}
1121
1122/**
1123 * Clean up transmitted skb's from the Tx descriptor RING.
1124 * Return number of descriptors cleared.
1125 */
1126int wil_tx_sring_handler(struct wil6210_priv *wil,
1127 struct wil_status_ring *sring)
1128{
1129 struct net_device *ndev;
1130 struct device *dev = wil_to_dev(wil);
1131 struct wil_ring *ring = NULL;
1132 struct wil_ring_tx_data *txdata;
1133 /* Total number of completed descriptors in all descriptor rings */
1134 int desc_cnt = 0;
1135 int cid;
1136 struct wil_net_stats *stats = NULL;
1137 struct wil_tx_enhanced_desc *_d;
1138 unsigned int ring_id;
1139 unsigned int num_descs;
1140 int i;
1141 u8 dr_bit; /* Descriptor Ready bit */
1142 struct wil_ring_tx_status msg;
1143 struct wil6210_vif *vif;
1144 int used_before_complete;
1145 int used_new;
1146
1147 wil_get_next_tx_status_msg(sring, &msg);
1148 dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
1149
1150 /* Process completion messages while DR bit has the expected polarity */
1151 while (dr_bit == sring->desc_rdy_pol) {
1152 num_descs = msg.num_descriptors;
1153 if (!num_descs) {
1154 wil_err(wil, "invalid num_descs 0\n");
1155 goto again;
1156 }
1157
1158 /* Find the corresponding descriptor ring */
1159 ring_id = msg.ring_id;
1160
1161 if (unlikely(ring_id >= WIL6210_MAX_TX_RINGS)) {
1162 wil_err(wil, "invalid ring id %d\n", ring_id);
1163 goto again;
1164 }
1165 ring = &wil->ring_tx[ring_id];
1166 if (unlikely(!ring->va)) {
1167 wil_err(wil, "Tx irq[%d]: ring not initialized\n",
1168 ring_id);
1169 goto again;
1170 }
1171 txdata = &wil->ring_tx_data[ring_id];
1172 if (unlikely(!txdata->enabled)) {
1173 wil_info(wil, "Tx irq[%d]: ring disabled\n", ring_id);
1174 goto again;
1175 }
1176 vif = wil->vifs[txdata->mid];
1177 if (unlikely(!vif)) {
1178 wil_dbg_txrx(wil, "invalid MID %d for ring %d\n",
1179 txdata->mid, ring_id);
1180 goto again;
1181 }
1182
1183 ndev = vif_to_ndev(vif);
1184
1185 cid = wil->ring2cid_tid[ring_id][0];
1186 if (cid < WIL6210_MAX_CID)
1187 stats = &wil->sta[cid].stats;
1188
1189 wil_dbg_txrx(wil,
1190 "tx_status: completed desc_ring (%d), num_descs (%d)\n",
1191 ring_id, num_descs);
1192
1193 used_before_complete = wil_ring_used_tx(ring);
1194
1195 for (i = 0 ; i < num_descs; ++i) {
1196 struct wil_ctx *ctx = &ring->ctx[ring->swtail];
1197 struct wil_tx_enhanced_desc dd, *d = &dd;
1198 u16 dmalen;
1199 struct sk_buff *skb = ctx->skb;
1200
1201 _d = (struct wil_tx_enhanced_desc *)
1202 &ring->va[ring->swtail].tx.enhanced;
1203 *d = *_d;
1204
1205 dmalen = le16_to_cpu(d->dma.length);
1206 trace_wil6210_tx_status(&msg, ring->swtail, dmalen);
1207 wil_dbg_txrx(wil,
1208 "TxC[%2d][%3d] : %d bytes, status 0x%02x\n",
1209 ring_id, ring->swtail, dmalen,
1210 msg.status);
1211 wil_hex_dump_txrx("TxS ", DUMP_PREFIX_NONE, 32, 4,
1212 (const void *)&msg, sizeof(msg),
1213 false);
1214
1215 wil_tx_desc_unmap_edma(dev,
1216 (union wil_tx_desc *)d,
1217 ctx);
1218
1219 if (skb) {
1220 if (likely(msg.status == 0)) {
1221 ndev->stats.tx_packets++;
1222 ndev->stats.tx_bytes += skb->len;
1223 if (stats) {
1224 stats->tx_packets++;
1225 stats->tx_bytes += skb->len;
1226 }
1227 } else {
1228 ndev->stats.tx_errors++;
1229 if (stats)
1230 stats->tx_errors++;
1231 }
1232 wil_consume_skb(skb, msg.status == 0);
1233 }
1234 memset(ctx, 0, sizeof(*ctx));
1235 /* Make sure the ctx is zeroed before updating the tail
1236 * to prevent a case where wil_tx_ring will see
1237 * this descriptor as used and handle it before ctx zero
1238 * is completed.
1239 */
1240 wmb();
1241
1242 ring->swtail = wil_ring_next_tail(ring);
1243
1244 desc_cnt++;
1245 }
1246
1247 /* performance monitoring */
1248 used_new = wil_ring_used_tx(ring);
1249 if (wil_val_in_range(wil->ring_idle_trsh,
1250 used_new, used_before_complete)) {
1251 wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
1252 ring_id, used_before_complete, used_new);
1253 txdata->last_idle = get_cycles();
1254 }
1255
1256again:
1257 wil_sring_advance_swhead(sring);
1258
1259 wil_get_next_tx_status_msg(sring, &msg);
1260 dr_bit = msg.desc_ready >> TX_STATUS_DESC_READY_POS;
1261 }
1262
1263 /* shall we wake net queues? */
1264 if (desc_cnt)
1265 wil_update_net_queues(wil, vif, NULL, false);
1266
1267 /* Update the HW tail ptr (RD ptr) */
1268 wil_w(wil, sring->hwtail, (sring->swhead - 1) % sring->size);
1269
1270 return desc_cnt;
1271}
1272
1273/**
1274 * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1275 * @skb is used to obtain the protocol and headers length.
1276 * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1277 * 2 - middle, 3 - last descriptor.
1278 */
1279static void wil_tx_desc_offload_setup_tso_edma(struct wil_tx_enhanced_desc *d,
1280 int tso_desc_type, bool is_ipv4,
1281 int tcp_hdr_len,
1282 int skb_net_hdr_len,
1283 int mss)
1284{
1285 /* Number of descriptors */
1286 d->mac.d[2] |= 1;
1287 /* Maximum Segment Size */
1288 d->mac.tso_mss |= cpu_to_le16(mss >> 2);
1289 /* L4 header len: TCP header length */
1290 d->dma.l4_hdr_len |= tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK;
1291 /* EOP, TSO desc type, Segmentation enable,
1292 * Insert IPv4 and TCP / UDP Checksum
1293 */
1294 d->dma.cmd |= BIT(WIL_EDMA_DESC_TX_CFG_EOP_POS) |
1295 tso_desc_type << WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS |
1296 BIT(WIL_EDMA_DESC_TX_CFG_SEG_EN_POS) |
1297 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS) |
1298 BIT(WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS);
1299 /* Calculate pseudo-header */
1300 d->dma.w1 |= BIT(WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS) |
1301 BIT(WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS);
1302 /* IP Header Length */
1303 d->dma.ip_length |= skb_net_hdr_len;
1304 /* MAC header length and IP address family*/
1305 d->dma.b11 |= ETH_HLEN |
1306 is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1307}
1308
1309static int wil_tx_tso_gen_desc(struct wil6210_priv *wil, void *buff_addr,
1310 int len, uint i, int tso_desc_type,
1311 skb_frag_t *frag, struct wil_ring *ring,
1312 struct sk_buff *skb, bool is_ipv4,
1313 int tcp_hdr_len, int skb_net_hdr_len,
1314 int mss, int *descs_used)
1315{
1316 struct device *dev = wil_to_dev(wil);
1317 struct wil_tx_enhanced_desc *_desc = (struct wil_tx_enhanced_desc *)
1318 &ring->va[i].tx.enhanced;
1319 struct wil_tx_enhanced_desc desc_mem, *d = &desc_mem;
1320 int ring_index = ring - wil->ring_tx;
1321 dma_addr_t pa;
1322
1323 if (len == 0)
1324 return 0;
1325
1326 if (!frag) {
1327 pa = dma_map_single(dev, buff_addr, len, DMA_TO_DEVICE);
1328 ring->ctx[i].mapped_as = wil_mapped_as_single;
1329 } else {
1330 pa = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1331 ring->ctx[i].mapped_as = wil_mapped_as_page;
1332 }
1333 if (unlikely(dma_mapping_error(dev, pa))) {
1334 wil_err(wil, "TSO: Skb DMA map error\n");
1335 return -EINVAL;
1336 }
1337
1338 wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa,
1339 len, ring_index);
1340 wil_tx_desc_offload_setup_tso_edma(d, tso_desc_type, is_ipv4,
1341 tcp_hdr_len,
1342 skb_net_hdr_len, mss);
1343
1344 /* hold reference to skb
1345 * to prevent skb release before accounting
1346 * in case of immediate "tx done"
1347 */
1348 if (tso_desc_type == wil_tso_type_lst)
1349 ring->ctx[i].skb = skb_get(skb);
1350
1351 wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
1352 (const void *)d, sizeof(*d), false);
1353
1354 *_desc = *d;
1355 (*descs_used)++;
1356
1357 return 0;
1358}
1359
1360static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
1361 struct wil6210_vif *vif,
1362 struct wil_ring *ring,
1363 struct sk_buff *skb)
1364{
1365 int ring_index = ring - wil->ring_tx;
1366 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
1367 int nr_frags = skb_shinfo(skb)->nr_frags;
1368 int min_desc_required = nr_frags + 2; /* Headers, Head, Fragments */
1369 int used, avail = wil_ring_avail_tx(ring);
1370 int f, hdrlen, headlen;
1371 int gso_type;
1372 bool is_ipv4;
1373 u32 swhead = ring->swhead;
1374 int descs_used = 0; /* total number of used descriptors */
1375 int rc = -EINVAL;
1376 int tcp_hdr_len;
1377 int skb_net_hdr_len;
1378 int mss = skb_shinfo(skb)->gso_size;
1379
1380 wil_dbg_txrx(wil, "tx_ring_tso: %d bytes to ring %d\n", skb->len,
1381 ring_index);
1382
1383 if (unlikely(!txdata->enabled))
1384 return -EINVAL;
1385
1386 if (unlikely(avail < min_desc_required)) {
1387 wil_err_ratelimited(wil,
1388 "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1389 ring_index, min_desc_required);
1390 return -ENOMEM;
1391 }
1392
1393 gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1394 switch (gso_type) {
1395 case SKB_GSO_TCPV4:
1396 is_ipv4 = true;
1397 break;
1398 case SKB_GSO_TCPV6:
1399 is_ipv4 = false;
1400 break;
1401 default:
1402 return -EINVAL;
1403 }
1404
1405 if (skb->ip_summed != CHECKSUM_PARTIAL)
1406 return -EINVAL;
1407
1408 /* tcp header length and skb network header length are fixed for all
1409 * packet's descriptors - read them once here
1410 */
1411 tcp_hdr_len = tcp_hdrlen(skb);
1412 skb_net_hdr_len = skb_network_header_len(skb);
1413
1414 /* First descriptor must contain the header only
1415 * Header Length = MAC header len + IP header len + TCP header len
1416 */
1417 hdrlen = ETH_HLEN + tcp_hdr_len + skb_net_hdr_len;
1418 wil_dbg_txrx(wil, "TSO: process header descriptor, hdrlen %u\n",
1419 hdrlen);
1420 rc = wil_tx_tso_gen_desc(wil, skb->data, hdrlen, swhead,
1421 wil_tso_type_hdr, NULL, ring, skb,
1422 is_ipv4, tcp_hdr_len, skb_net_hdr_len,
1423 mss, &descs_used);
1424 if (rc)
1425 return -EINVAL;
1426
1427 /* Second descriptor contains the head */
1428 headlen = skb_headlen(skb) - hdrlen;
1429 wil_dbg_txrx(wil, "TSO: process skb head, headlen %u\n", headlen);
1430 rc = wil_tx_tso_gen_desc(wil, skb->data + hdrlen, headlen,
1431 (swhead + descs_used) % ring->size,
1432 (nr_frags != 0) ? wil_tso_type_first :
1433 wil_tso_type_lst, NULL, ring, skb,
1434 is_ipv4, tcp_hdr_len, skb_net_hdr_len,
1435 mss, &descs_used);
1436 if (rc)
1437 goto mem_error;
1438
1439 /* Rest of the descriptors are from the SKB fragments */
1440 for (f = 0; f < nr_frags; f++) {
1441 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1442 int len = frag->size;
1443
1444 wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
1445 len, descs_used);
1446
1447 rc = wil_tx_tso_gen_desc(wil, NULL, len,
1448 (swhead + descs_used) % ring->size,
1449 (f != nr_frags - 1) ?
1450 wil_tso_type_mid : wil_tso_type_lst,
1451 frag, ring, skb, is_ipv4,
1452 tcp_hdr_len, skb_net_hdr_len,
1453 mss, &descs_used);
1454 if (rc)
1455 goto mem_error;
1456 }
1457
1458 /* performance monitoring */
1459 used = wil_ring_used_tx(ring);
1460 if (wil_val_in_range(wil->ring_idle_trsh,
1461 used, used + descs_used)) {
1462 txdata->idle += get_cycles() - txdata->last_idle;
1463 wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n",
1464 ring_index, used, used + descs_used);
1465 }
1466
1467 /* advance swhead */
1468 wil_ring_advance_head(ring, descs_used);
1469 wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, ring->swhead);
1470
1471 /* make sure all writes to descriptors (shared memory) are done before
1472 * committing them to HW
1473 */
1474 wmb();
1475
1476 wil_w(wil, ring->hwtail, ring->swhead);
1477
1478 return 0;
1479
1480mem_error:
1481 while (descs_used > 0) {
1482 struct device *dev = wil_to_dev(wil);
1483 struct wil_ctx *ctx;
1484 int i = (swhead + descs_used - 1) % ring->size;
1485 struct wil_tx_enhanced_desc dd, *d = &dd;
1486 struct wil_tx_enhanced_desc *_desc =
1487 (struct wil_tx_enhanced_desc *)
1488 &ring->va[i].tx.enhanced;
1489
1490 *d = *_desc;
1491 ctx = &ring->ctx[i];
1492 wil_tx_desc_unmap_edma(dev, (union wil_tx_desc *)d, ctx);
1493 memset(ctx, 0, sizeof(*ctx));
1494 descs_used--;
1495 }
1496 return rc;
1497}
1498
1499static int wil_ring_init_bcast_edma(struct wil6210_vif *vif, int ring_id,
1500 int size)
1501{
1502 struct wil6210_priv *wil = vif_to_wil(vif);
1503 struct wil_ring *ring = &wil->ring_tx[ring_id];
1504 int rc;
1505 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
1506
1507 wil_dbg_misc(wil, "init bcast: ring_id=%d, sring_id=%d\n",
1508 ring_id, wil->tx_sring_idx);
1509
1510 lockdep_assert_held(&wil->mutex);
1511
1512 wil_tx_data_init(txdata);
1513 ring->size = size;
1514 ring->is_rx = false;
1515 rc = wil_ring_alloc_desc_ring(wil, ring);
1516 if (rc)
1517 goto out;
1518
1519 wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; /* CID */
1520 wil->ring2cid_tid[ring_id][1] = 0; /* TID */
1521 if (!vif->privacy)
1522 txdata->dot1x_open = true;
1523
1524 rc = wil_wmi_bcast_desc_ring_add(vif, ring_id);
1525 if (rc)
1526 goto out_free;
1527
1528 return 0;
1529
1530 out_free:
1531 spin_lock_bh(&txdata->lock);
1532 txdata->enabled = 0;
1533 txdata->dot1x_open = false;
1534 spin_unlock_bh(&txdata->lock);
1535 wil_ring_free_edma(wil, ring);
1536
1537out:
1538 return rc;
1539}
1540
1541static void wil_tx_fini_edma(struct wil6210_priv *wil)
1542{
1543 struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
1544
1545 wil_dbg_misc(wil, "free TX sring\n");
1546
1547 wil_sring_free(wil, sring);
1548}
1549
1550static void wil_rx_data_free(struct wil_status_ring *sring)
1551{
1552 if (!sring)
1553 return;
1554
1555 kfree_skb(sring->rx_data.skb);
1556 sring->rx_data.skb = NULL;
1557}
1558
1559static void wil_rx_fini_edma(struct wil6210_priv *wil)
1560{
1561 struct wil_ring *ring = &wil->ring_rx;
1562 int i;
1563
1564 wil_dbg_misc(wil, "rx_fini_edma\n");
1565
1566 wil_ring_free_edma(wil, ring);
1567
1568 for (i = 0; i < wil->num_rx_status_rings; i++) {
1569 wil_rx_data_free(&wil->srings[i]);
1570 wil_sring_free(wil, &wil->srings[i]);
1571 }
1572
1573 wil_free_rx_buff_arr(wil);
1574}
1575
1576void wil_init_txrx_ops_edma(struct wil6210_priv *wil)
1577{
1578 wil->txrx_ops.configure_interrupt_moderation =
1579 wil_configure_interrupt_moderation_edma;
1580 /* TX ops */
1581 wil->txrx_ops.ring_init_tx = wil_ring_init_tx_edma;
1582 wil->txrx_ops.ring_fini_tx = wil_ring_free_edma;
1583 wil->txrx_ops.ring_init_bcast = wil_ring_init_bcast_edma;
1584 wil->txrx_ops.tx_init = wil_tx_init_edma;
1585 wil->txrx_ops.tx_fini = wil_tx_fini_edma;
1586 wil->txrx_ops.tx_desc_map = wil_tx_desc_map_edma;
1587 wil->txrx_ops.tx_desc_unmap = wil_tx_desc_unmap_edma;
1588 wil->txrx_ops.tx_ring_tso = __wil_tx_ring_tso_edma;
1589 /* RX ops */
1590 wil->txrx_ops.rx_init = wil_rx_init_edma;
1591 wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp_edma;
1592 wil->txrx_ops.get_reorder_params = wil_get_reorder_params_edma;
1593 wil->txrx_ops.get_netif_rx_params = wil_get_netif_rx_params_edma;
1594 wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check_edma;
1595 wil->txrx_ops.is_rx_idle = wil_is_rx_idle_edma;
1596 wil->txrx_ops.rx_fini = wil_rx_fini_edma;
1597}
1598
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
new file mode 100644
index 000000000000..e86fc2dc0ce0
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -0,0 +1,562 @@
1/*
2 * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef WIL6210_TXRX_EDMA_H
18#define WIL6210_TXRX_EDMA_H
19
20#include "wil6210.h"
21
22/* limit status ring size in range [ring size..max ring size] */
23#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
24#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
25/* RX sring order should be bigger than RX ring order */
26#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11)
27#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
28#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
29
30#define WIL_DEFAULT_RX_STATUS_RING_ID 0
31#define WIL_RX_DESC_RING_ID 0
32#define WIL_RX_STATUS_IRQ_IDX 0
33#define WIL_TX_STATUS_IRQ_IDX 1
34
35#define WIL_EDMA_AGG_WATERMARK (0xffff)
36#define WIL_EDMA_AGG_WATERMARK_POS (16)
37
38#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50)
39#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */
40
41/* Error field */
42#define WIL_RX_EDMA_ERROR_MIC (1)
43#define WIL_RX_EDMA_ERROR_KEY (2) /* Key missing */
44#define WIL_RX_EDMA_ERROR_REPLAY (3)
45#define WIL_RX_EDMA_ERROR_AMSDU (4)
46#define WIL_RX_EDMA_ERROR_FCS (7)
47
48#define WIL_RX_EDMA_ERROR_L3_ERR (BIT(0) | BIT(1))
49#define WIL_RX_EDMA_ERROR_L4_ERR (BIT(0) | BIT(1))
50
51#define WIL_RX_EDMA_DLPF_LU_MISS_BIT BIT(11)
52#define WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK 0x7
53#define WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK 0xf
54
55#define WIL_RX_EDMA_DLPF_LU_MISS_CID_POS 2
56#define WIL_RX_EDMA_DLPF_LU_HIT_CID_POS 4
57
58#define WIL_RX_EDMA_DLPF_LU_MISS_TID_POS 5
59
60#define WIL_RX_EDMA_MID_VALID_BIT BIT(22)
61
62#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_POS 16
63#define WIL_EDMA_DESC_TX_MAC_CFG_0_QID_LEN 6
64
65#define WIL_EDMA_DESC_TX_CFG_EOP_POS 0
66#define WIL_EDMA_DESC_TX_CFG_EOP_LEN 1
67
68#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_POS 3
69#define WIL_EDMA_DESC_TX_CFG_TSO_DESC_TYPE_LEN 2
70
71#define WIL_EDMA_DESC_TX_CFG_SEG_EN_POS 5
72#define WIL_EDMA_DESC_TX_CFG_SEG_EN_LEN 1
73
74#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_POS 6
75#define WIL_EDMA_DESC_TX_CFG_INSERT_IP_CHKSUM_LEN 1
76
77#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_POS 7
78#define WIL_EDMA_DESC_TX_CFG_INSERT_TCP_CHKSUM_LEN 1
79
80#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_POS 15
81#define WIL_EDMA_DESC_TX_CFG_L4_TYPE_LEN 1
82
83#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_POS 5
84#define WIL_EDMA_DESC_TX_CFG_PSEUDO_HEADER_CALC_EN_LEN 1
85
86/* Enhanced Rx descriptor - MAC part
87 * [dword 0] : Reserved
88 * [dword 1] : Reserved
89 * [dword 2] : Reserved
90 * [dword 3]
91 * bit 0..15 : Buffer ID
92 * bit 16..31 : Reserved
93 */
94struct wil_ring_rx_enhanced_mac {
95 u32 d[3];
96 __le16 buff_id;
97 u16 reserved;
98} __packed;
99
100/* Enhanced Rx descriptor - DMA part
101 * [dword 0] - Reserved
102 * [dword 1]
103 * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
104 * [dword 2]
105 * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
106 * bit 16..31 : Reserved
107 * [dword 3]
108 * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
109 * bit 16..31 : length
110 */
111struct wil_ring_rx_enhanced_dma {
112 u32 d0;
113 struct wil_ring_dma_addr addr;
114 u16 w5;
115 __le16 addr_high_high;
116 __le16 length;
117} __packed;
118
119struct wil_rx_enhanced_desc {
120 struct wil_ring_rx_enhanced_mac mac;
121 struct wil_ring_rx_enhanced_dma dma;
122} __packed;
123
124/* Enhanced Tx descriptor - DMA part
125 * [dword 0]
126 * Same as legacy
127 * [dword 1]
128 * bit 0..31 : addr_low:32 The payload buffer address, bits 0-31
129 * [dword 2]
130 * bit 0..15 : addr_high_low:16 The payload buffer address, bits 32-47
131 * bit 16..23 : ip_length:8 The IP header length for the TX IP checksum
132 * offload feature
133 * bit 24..30 : mac_length:7
134 * bit 31 : ip_version:1 1 - IPv4, 0 - IPv6
135 * [dword 3]
136 * bit 0..15 : addr_high_high:16 The payload buffer address, bits 48-63
137 * bit 16..31 : length
138 */
139struct wil_ring_tx_enhanced_dma {
140 u8 l4_hdr_len;
141 u8 cmd;
142 u16 w1;
143 struct wil_ring_dma_addr addr;
144 u8 ip_length;
145 u8 b11; /* 0..6: mac_length; 7:ip_version */
146 __le16 addr_high_high;
147 __le16 length;
148} __packed;
149
150/* Enhanced Tx descriptor - MAC part
151 * [dword 0]
152 * bit 0.. 9 : lifetime_expiry_value:10
153 * bit 10 : interrupt_en:1
154 * bit 11 : status_en:1
155 * bit 12..13 : txss_override:2
156 * bit 14 : timestamp_insertion:1
157 * bit 15 : duration_preserve:1
158 * bit 16..21 : reserved0:6
159 * bit 22..26 : mcs_index:5
160 * bit 27 : mcs_en:1
161 * bit 28..30 : reserved1:3
162 * bit 31 : sn_preserved:1
163 * [dword 1]
164 * bit 0.. 3 : pkt_mode:4
165 * bit 4 : pkt_mode_en:1
166 * bit 5..14 : reserved0:10
167 * bit 15 : ack_policy_en:1
168 * bit 16..19 : dst_index:4
169 * bit 20 : dst_index_en:1
170 * bit 21..22 : ack_policy:2
171 * bit 23 : lifetime_en:1
172 * bit 24..30 : max_retry:7
173 * bit 31 : max_retry_en:1
174 * [dword 2]
175 * bit 0.. 7 : num_of_descriptors:8
176 * bit 8..17 : reserved:10
177 * bit 18..19 : l2_translation_type:2 00 - bypass, 01 - 802.3, 10 - 802.11
178 * bit 20 : snap_hdr_insertion_en:1
179 * bit 21 : vlan_removal_en:1
180 * bit 22..23 : reserved0:2
181 * bit 24 : Dest ID extension:1
182 * bit 25..31 : reserved0:7
183 * [dword 3]
184 * bit 0..15 : tso_mss:16
185 * bit 16..31 : descriptor_scratchpad:16 - mailbox between driver and ucode
186 */
187struct wil_ring_tx_enhanced_mac {
188 u32 d[3];
189 __le16 tso_mss;
190 u16 scratchpad;
191} __packed;
192
193struct wil_tx_enhanced_desc {
194 struct wil_ring_tx_enhanced_mac mac;
195 struct wil_ring_tx_enhanced_dma dma;
196} __packed;
197
198#define TX_STATUS_DESC_READY_POS 7
199
200/* Enhanced TX status message
201 * [dword 0]
202 * bit 0.. 7 : Number of Descriptor:8 - The number of descriptors that
203 * are used to form the packets. It is needed for WB when
204 * releasing the packet
205 * bit 8..15 : tx_ring_id:8 The transmission ring ID that is related to
206 * the message
207 * bit 16..23 : Status:8 - The TX status Code
208 * 0x0 - A successful transmission
209 * 0x1 - Retry expired
210 * 0x2 - Lifetime Expired
211 * 0x3 - Released
212 * 0x4-0xFF - Reserved
213 * bit 24..30 : Reserved:7
214 * bit 31 : Descriptor Ready bit:1 - It is initiated to
215 * zero by the driver when the ring is created. It is set by the HW
216 * to one for each completed status message. Each wrap around,
217 * the DR bit value is flipped.
218 * [dword 1]
219 * bit 0..31 : timestamp:32 - Set when MPDU is transmitted.
220 * [dword 2]
221 * bit 0.. 4 : MCS:5 - The transmitted MCS value
222 * bit 5 : Reserved:1
223 * bit 6.. 7 : CB mode:2 - 0-DMG 1-EDMG 2-Wide
224 * bit 8..12 : QID:5 - The QID that was used for the transmission
225 * bit 13..15 : Reserved:3
226 * bit 16..20 : Num of MSDUs:5 - Number of MSDUs in the aggregation
227 * bit 21..22 : Reserved:2
228 * bit 23 : Retry:1 - An indication that the transmission was retried
229 * bit 24..31 : TX-Sector:8 - the antenna sector that was used for
230 * transmission
231 * [dword 3]
232 * bit 0..11 : Sequence number:12 - The Sequence Number that was used
233 * for the MPDU transmission
234 * bit 12..31 : Reserved:20
235 */
236struct wil_ring_tx_status {
237 u8 num_descriptors;
238 u8 ring_id;
239 u8 status;
240 u8 desc_ready; /* Only the last bit should be set */
241 u32 timestamp;
242 u32 d2;
243 u16 seq_number; /* Only the first 12 bits */
244 u16 w7;
245} __packed;
246
247/* Enhanced Rx status message - compressed part
248 * [dword 0]
249 * bit 0.. 2 : L2 Rx Status:3 - The L2 packet reception Status
250 * 0-Success, 1-MIC Error, 2-Key Error, 3-Replay Error,
251 * 4-A-MSDU Error, 5-Reserved, 6-Reserved, 7-FCS Error
252 * bit 3.. 4 : L3 Rx Status:2 - Bit0 - L3I - L3 identified and checksum
253 * calculated, Bit1- L3Err - IPv4 Checksum Error
254 * bit 5.. 6 : L4 Rx Status:2 - Bit0 - L4I - L4 identified and checksum
255 * calculated, Bit1- L4Err - TCP/UDP Checksum Error
256 * bit 7 : Reserved:1
257 * bit 8..19 : Flow ID:12 - MSDU flow ID
258 * bit 20..21 : MID:2 - The MAC ID
259 * bit 22 : MID_V:1 - The MAC ID field is valid
260 * bit 23 : L3T:1 - IP types: 0-IPv6, 1-IPv4
261 * bit 24 : L4T:1 - Layer 4 Type: 0-UDP, 1-TCP
262 * bit 25 : BC:1 - The received MPDU is broadcast
263 * bit 26 : MC:1 - The received MPDU is multicast
264 * bit 27 : Raw:1 - The MPDU received with no translation
265 * bit 28 : Sec:1 - The FC control (b14) - Frame Protected
266 * bit 29 : Error:1 - An error is set when (L2 status != 0) ||
267 * (L3 status == 3) || (L4 status == 3)
268 * bit 30 : EOP:1 - End of MSDU signaling. It is set to mark the end
269 * of the transfer, otherwise the status indicates buffer
270 * only completion.
271 * bit 31 : Descriptor Ready bit:1 - It is initiated to
272 * zero by the driver when the ring is created. It is set
273 * by the HW to one for each completed status message.
274 * Each wrap around, the DR bit value is flipped.
275 * [dword 1]
276 * bit 0.. 5 : MAC Len:6 - The number of bytes that are used for L2 header
277 * bit 6..11 : IPLEN:6 - The number of DW that are used for L3 header
278 * bit 12..15 : I4Len:4 - The number of DW that are used for L4 header
279 * bit 16..21 : MCS:6 - The received MCS field from the PLCP Header
280 * bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide
281 * bit 24..27 : Data Offset:4 - The data offset, a code that describe the
282 * payload shift from the beginning of the buffer:
283 * 0 - 0 Bytes, 3 - 2 Bytes
284 * bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field
285 * bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field
286 * bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU
287 * bit 31 : Key ID:1 - The extracted Key ID from the encryption header
288 * [dword 2]
289 * bit 0..15 : Buffer ID:16 - The Buffer Identifier
290 * bit 16..31 : Length:16 - It indicates the valid bytes that are stored
291 * in the current descriptor buffer. For multiple buffer
292 * descriptor, SW need to sum the total descriptor length
293 * in all buffers to produce the packet length
294 * [dword 3]
295 * bit 0..31 : timestamp:32 - The MPDU Timestamp.
296 */
297struct wil_rx_status_compressed {
298 u32 d0;
299 u32 d1;
300 __le16 buff_id;
301 __le16 length;
302 u32 timestamp;
303} __packed;
304
305/* Enhanced Rx status message - extension part
306 * [dword 0]
307 * bit 0.. 4 : QID:5 - The Queue Identifier that the packet is received
308 * from
309 * bit 5.. 7 : Reserved:3
310 * bit 8..11 : TID:4 - The QoS (b3-0) TID Field
311 * bit 12..15 Source index:4 - The Source index that was found
312 during Parsing the TA. This field is used to define the
313 source of the packet
314 * bit 16..18 : Destination index:3 - The Destination index that
315 was found during Parsing the RA.
316 * bit 19..20 : DS Type:2 - The FC Control (b9-8) - From / To DS
317 * bit 21..22 : MIC ICR:2 - this signal tells the DMA to assert an
318 interrupt after it writes the packet
319 * bit 23 : ESOP:1 - The QoS (b4) ESOP field
320 * bit 24 : RDG:1
321 * bit 25..31 : Reserved:7
322 * [dword 1]
323 * bit 0.. 1 : Frame Type:2 - The FC Control (b3-2) - MPDU Type
324 (management, data, control and extension)
325 * bit 2.. 5 : Syb type:4 - The FC Control (b7-4) - Frame Subtype
326 * bit 6..11 : Ext sub type:6 - The FC Control (b11-8) - Frame Extended
327 * Subtype
328 * bit 12..13 : ACK Policy:2 - The QoS (b6-5) ACK Policy fields
329 * bit 14 : DECRYPT_BYP:1 - The MPDU is bypass by the decryption unit
330 * bit 15..23 : Reserved:9
331 * bit 24..31 : RSSI/SNR:8 - The RSSI / SNR measurement for the received
332 * MPDU
333 * [dword 2]
334 * bit 0..11 : SN:12 - The received Sequence number field
335 * bit 12..15 : Reserved:4
336 * bit 16..31 : PN bits [15:0]:16
337 * [dword 3]
338 * bit 0..31 : PN bits [47:16]:32
339 */
340struct wil_rx_status_extension {
341 u32 d0;
342 u32 d1;
343 __le16 seq_num; /* only lower 12 bits */
344 u16 pn_15_0;
345 u32 pn_47_16;
346} __packed;
347
348struct wil_rx_status_extended {
349 struct wil_rx_status_compressed comp;
350 struct wil_rx_status_extension ext;
351} __packed;
352
353static inline void *wil_skb_rxstatus(struct sk_buff *skb)
354{
355 return (void *)skb->cb;
356}
357
358static inline __le16 wil_rx_status_get_length(void *msg)
359{
360 return ((struct wil_rx_status_compressed *)msg)->length;
361}
362
363static inline u8 wil_rx_status_get_mcs(void *msg)
364{
365 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
366 16, 21);
367}
368
369static inline u16 wil_rx_status_get_flow_id(void *msg)
370{
371 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
372 8, 19);
373}
374
375static inline u8 wil_rx_status_get_mcast(void *msg)
376{
377 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
378 26, 26);
379}
380
381/**
382 * In case of DLPF miss the parsing of flow Id should be as follows:
383 * dest_id:2
384 * src_id :3 - cid
385 * tid:3
386 * Otherwise:
387 * tid:4
388 * cid:4
389 */
390
391static inline u8 wil_rx_status_get_cid(void *msg)
392{
393 u16 val = wil_rx_status_get_flow_id(msg);
394
395 if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
396 /* CID is in bits 2..4 */
397 return (val >> WIL_RX_EDMA_DLPF_LU_MISS_CID_POS) &
398 WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
399 else
400 /* CID is in bits 4..7 */
401 return (val >> WIL_RX_EDMA_DLPF_LU_HIT_CID_POS) &
402 WIL_RX_EDMA_DLPF_LU_HIT_CID_TID_MASK;
403}
404
405static inline u8 wil_rx_status_get_tid(void *msg)
406{
407 u16 val = wil_rx_status_get_flow_id(msg);
408
409 if (val & WIL_RX_EDMA_DLPF_LU_MISS_BIT)
410 /* TID is in bits 5..7 */
411 return (val >> WIL_RX_EDMA_DLPF_LU_MISS_TID_POS) &
412 WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
413 else
414 /* TID is in bits 0..3 */
415 return val & WIL_RX_EDMA_DLPF_LU_MISS_CID_TID_MASK;
416}
417
418static inline int wil_rx_status_get_desc_rdy_bit(void *msg)
419{
420 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
421 31, 31);
422}
423
424static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
425{
426 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
427 30, 30);
428}
429
430static inline __le16 wil_rx_status_get_buff_id(void *msg)
431{
432 return ((struct wil_rx_status_compressed *)msg)->buff_id;
433}
434
435static inline u8 wil_rx_status_get_data_offset(void *msg)
436{
437 u8 val = WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
438 24, 27);
439
440 switch (val) {
441 case 0: return 0;
442 case 3: return 2;
443 default: return 0xFF;
444 }
445}
446
447static inline int wil_rx_status_get_frame_type(struct wil6210_priv *wil,
448 void *msg)
449{
450 if (wil->use_compressed_rx_status)
451 return IEEE80211_FTYPE_DATA;
452
453 return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
454 0, 1) << 2;
455}
456
457static inline int wil_rx_status_get_fc1(struct wil6210_priv *wil, void *msg)
458{
459 if (wil->use_compressed_rx_status)
460 return 0;
461
462 return WIL_GET_BITS(((struct wil_rx_status_extended *)msg)->ext.d1,
463 0, 5) << 2;
464}
465
466static inline __le16 wil_rx_status_get_seq(struct wil6210_priv *wil, void *msg)
467{
468 if (wil->use_compressed_rx_status)
469 return 0;
470
471 return ((struct wil_rx_status_extended *)msg)->ext.seq_num;
472}
473
474static inline int wil_rx_status_get_mid(void *msg)
475{
476 if (!(((struct wil_rx_status_compressed *)msg)->d0 &
477 WIL_RX_EDMA_MID_VALID_BIT))
478 return 0; /* use the default MID */
479
480 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
481 20, 21);
482}
483
484static inline int wil_rx_status_get_error(void *msg)
485{
486 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
487 29, 29);
488}
489
490static inline int wil_rx_status_get_l2_rx_status(void *msg)
491{
492 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
493 0, 2);
494}
495
496static inline int wil_rx_status_get_l3_rx_status(void *msg)
497{
498 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
499 3, 4);
500}
501
502static inline int wil_rx_status_get_l4_rx_status(void *msg)
503{
504 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
505 5, 6);
506}
507
508static inline int wil_rx_status_get_security(void *msg)
509{
510 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
511 28, 28);
512}
513
514static inline u8 wil_rx_status_get_key_id(void *msg)
515{
516 return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
517 31, 31);
518}
519
520static inline u8 wil_tx_status_get_mcs(struct wil_ring_tx_status *msg)
521{
522 return WIL_GET_BITS(msg->d2, 0, 4);
523}
524
525static inline u32 wil_ring_next_head(struct wil_ring *ring)
526{
527 return (ring->swhead + 1) % ring->size;
528}
529
530static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr,
531 __le16 *addr_high_high,
532 dma_addr_t pa)
533{
534 addr->addr_low = cpu_to_le32(lower_32_bits(pa));
535 addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
536 *addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16));
537}
538
539static inline
540dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma)
541{
542 return le32_to_cpu(dma->addr.addr_low) |
543 ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
544 ((u64)le16_to_cpu(dma->addr_high_high) << 48);
545}
546
547static inline
548dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma)
549{
550 return le32_to_cpu(dma->addr.addr_low) |
551 ((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
552 ((u64)le16_to_cpu(dma->addr_high_high) << 48);
553}
554
555void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil);
556int wil_tx_sring_handler(struct wil6210_priv *wil,
557 struct wil_status_ring *sring);
558void wil_rx_handle_edma(struct wil6210_priv *wil, int *quota);
559void wil_init_txrx_ops_edma(struct wil6210_priv *wil);
560
561#endif /* WIL6210_TXRX_EDMA_H */
562
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index b623510c6f6c..d963c76b679e 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -24,6 +24,7 @@
24#include <net/cfg80211.h> 24#include <net/cfg80211.h>
25#include <linux/timex.h> 25#include <linux/timex.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/irqreturn.h>
27#include "wmi.h" 28#include "wmi.h"
28#include "wil_platform.h" 29#include "wil_platform.h"
29#include "fw.h" 30#include "fw.h"
@@ -37,6 +38,10 @@ extern bool rx_large_buf;
37extern bool debug_fw; 38extern bool debug_fw;
38extern bool disable_ap_sme; 39extern bool disable_ap_sme;
39 40
41struct wil6210_priv;
42struct wil6210_vif;
43union wil_tx_desc;
44
40#define WIL_NAME "wil6210" 45#define WIL_NAME "wil6210"
41 46
42#define WIL_FW_NAME_DEFAULT "wil6210.fw" 47#define WIL_FW_NAME_DEFAULT "wil6210.fw"
@@ -80,6 +85,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
80#define WIL6210_NAPI_BUDGET (16) /* arbitrary */ 85#define WIL6210_NAPI_BUDGET (16) /* arbitrary */
81#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */ 86#define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */
82#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */ 87#define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */
88#define WIL6210_MAX_STATUS_RINGS (8)
89
83/* Hardware offload block adds the following: 90/* Hardware offload block adds the following:
84 * 26 bytes - 3-address QoS data header 91 * 26 bytes - 3-address QoS data header
85 * 8 bytes - IV + EIV (for GCMP) 92 * 8 bytes - IV + EIV (for GCMP)
@@ -203,7 +210,9 @@ struct RGF_ICR {
203#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */ 210#define RGF_USER_SPARROW_M_4 (0x880c50) /* Sparrow */
204 #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2) 211 #define BIT_SPARROW_M_4_SEL_SLEEP_OR_REF BIT(2)
205#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0) 212#define RGF_USER_OTP_HW_RD_MACHINE_1 (0x880ce0)
206 #define BIT_NO_FLASH_INDICATION BIT(8) 213 #define BIT_OTP_SIGNATURE_ERR_TALYN_MB BIT(0)
214 #define BIT_OTP_HW_SECTION_DONE_TALYN_MB BIT(2)
215 #define BIT_NO_FLASH_INDICATION BIT(8)
207#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec) 216#define RGF_USER_XPM_IFC_RD_TIME1 (0x880cec)
208#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0) 217#define RGF_USER_XPM_IFC_RD_TIME2 (0x880cf0)
209#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4) 218#define RGF_USER_XPM_IFC_RD_TIME3 (0x880cf4)
@@ -305,20 +314,49 @@ struct RGF_ICR {
305#define RGF_CAF_PLL_LOCK_STATUS (0x88afec) 314#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
306 #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0) 315 #define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
307 316
317#define RGF_OTP_QC_SECURED (0x8a0038)
318 #define BIT_BOOT_FROM_ROM BIT(31)
319
320/* eDMA */
321#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8)
322
323#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000)
324#define RGF_INT_CTRL_INT_GEN_CFG_1 (0x8bc004)
325#define RGF_INT_GEN_TIME_UNIT_LIMIT (0x8bc0c8)
326
327#define RGF_INT_GEN_CTRL (0x8bc0ec)
328 #define BIT_CONTROL_0 BIT(0)
329
330/* eDMA status interrupts */
331#define RGF_INT_GEN_RX_ICR (0x8bc0f4)
332 #define BIT_RX_STATUS_IRQ BIT(WIL_RX_STATUS_IRQ_IDX)
333#define RGF_INT_GEN_TX_ICR (0x8bc110)
334 #define BIT_TX_STATUS_IRQ BIT(WIL_TX_STATUS_IRQ_IDX)
335#define RGF_INT_CTRL_RX_INT_MASK (0x8bc12c)
336#define RGF_INT_CTRL_TX_INT_MASK (0x8bc130)
337
338#define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134)
339
308#define USER_EXT_USER_PMU_3 (0x88d00c) 340#define USER_EXT_USER_PMU_3 (0x88d00c)
309 #define BIT_PMU_DEVICE_RDY BIT(0) 341 #define BIT_PMU_DEVICE_RDY BIT(0)
310 342
311#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */ 343#define RGF_USER_JTAG_DEV_ID (0x880b34) /* device ID */
312 #define JTAG_DEV_ID_SPARROW (0x2632072f) 344 #define JTAG_DEV_ID_SPARROW (0x2632072f)
313 #define JTAG_DEV_ID_TALYN (0x7e0e1) 345 #define JTAG_DEV_ID_TALYN (0x7e0e1)
346 #define JTAG_DEV_ID_TALYN_MB (0x1007e0e1)
314 347
315#define RGF_USER_REVISION_ID (0x88afe4) 348#define RGF_USER_REVISION_ID (0x88afe4)
316#define RGF_USER_REVISION_ID_MASK (3) 349#define RGF_USER_REVISION_ID_MASK (3)
317 #define REVISION_ID_SPARROW_B0 (0x0) 350 #define REVISION_ID_SPARROW_B0 (0x0)
318 #define REVISION_ID_SPARROW_D0 (0x3) 351 #define REVISION_ID_SPARROW_D0 (0x3)
319 352
353#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
320#define RGF_OTP_MAC (0x8a0620) 354#define RGF_OTP_MAC (0x8a0620)
321 355
356/* Talyn-MB */
357#define RGF_USER_USER_CPU_0_TALYN_MB (0x8c0138)
358#define RGF_USER_MAC_CPU_0_TALYN_MB (0x8c0154)
359
322/* crash codes for FW/Ucode stored here */ 360/* crash codes for FW/Ucode stored here */
323 361
324/* ASSERT RGFs */ 362/* ASSERT RGFs */
@@ -332,6 +370,7 @@ enum {
332 HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */ 370 HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
333 HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */ 371 HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
334 HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */ 372 HW_VER_TALYN, /* JTAG_DEV_ID_TALYN */
373 HW_VER_TALYN_MB /* JTAG_DEV_ID_TALYN_MB */
335}; 374};
336 375
337/* popular locations */ 376/* popular locations */
@@ -349,7 +388,14 @@ enum {
349/* Hardware definitions end */ 388/* Hardware definitions end */
350#define SPARROW_FW_MAPPING_TABLE_SIZE 10 389#define SPARROW_FW_MAPPING_TABLE_SIZE 10
351#define TALYN_FW_MAPPING_TABLE_SIZE 13 390#define TALYN_FW_MAPPING_TABLE_SIZE 13
352#define MAX_FW_MAPPING_TABLE_SIZE 13 391#define TALYN_MB_FW_MAPPING_TABLE_SIZE 19
392#define MAX_FW_MAPPING_TABLE_SIZE 19
393
394/* Common representation of physical address in wil ring */
395struct wil_ring_dma_addr {
396 __le32 addr_low;
397 __le16 addr_high;
398} __packed;
353 399
354struct fw_map { 400struct fw_map {
355 u32 from; /* linker address - from, inclusive */ 401 u32 from; /* linker address - from, inclusive */
@@ -357,12 +403,14 @@ struct fw_map {
357 u32 host; /* PCI/Host address - BAR0 + 0x880000 */ 403 u32 host; /* PCI/Host address - BAR0 + 0x880000 */
358 const char *name; /* for debugfs */ 404 const char *name; /* for debugfs */
359 bool fw; /* true if FW mapping, false if UCODE mapping */ 405 bool fw; /* true if FW mapping, false if UCODE mapping */
406 bool crash_dump; /* true if should be dumped during crash dump */
360}; 407};
361 408
362/* array size should be in sync with actual definition in the wmi.c */ 409/* array size should be in sync with actual definition in the wmi.c */
363extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE]; 410extern const struct fw_map sparrow_fw_mapping[SPARROW_FW_MAPPING_TABLE_SIZE];
364extern const struct fw_map sparrow_d0_mac_rgf_ext; 411extern const struct fw_map sparrow_d0_mac_rgf_ext;
365extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE]; 412extern const struct fw_map talyn_fw_mapping[TALYN_FW_MAPPING_TABLE_SIZE];
413extern const struct fw_map talyn_mb_fw_mapping[TALYN_MB_FW_MAPPING_TABLE_SIZE];
366extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE]; 414extern struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
367 415
368/** 416/**
@@ -438,7 +486,7 @@ enum { /* for wil_ctx.mapped_as */
438}; 486};
439 487
440/** 488/**
441 * struct wil_ctx - software context for Vring descriptor 489 * struct wil_ctx - software context for ring descriptor
442 */ 490 */
443struct wil_ctx { 491struct wil_ctx {
444 struct sk_buff *skb; 492 struct sk_buff *skb;
@@ -446,22 +494,96 @@ struct wil_ctx {
446 u8 mapped_as; 494 u8 mapped_as;
447}; 495};
448 496
449union vring_desc; 497struct wil_desc_ring_rx_swtail { /* relevant for enhanced DMA only */
498 u32 *va;
499 dma_addr_t pa;
500};
450 501
451struct vring { 502/**
503 * A general ring structure, used for RX and TX.
504 * In legacy DMA it represents the vring,
505 * In enahnced DMA it represents the descriptor ring (vrings are handled by FW)
506 */
507struct wil_ring {
452 dma_addr_t pa; 508 dma_addr_t pa;
453 volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */ 509 volatile union wil_ring_desc *va;
454 u16 size; /* number of vring_desc elements */ 510 u16 size; /* number of wil_ring_desc elements */
455 u32 swtail; 511 u32 swtail;
456 u32 swhead; 512 u32 swhead;
457 u32 hwtail; /* write here to inform hw */ 513 u32 hwtail; /* write here to inform hw */
458 struct wil_ctx *ctx; /* ctx[size] - software context */ 514 struct wil_ctx *ctx; /* ctx[size] - software context */
515 struct wil_desc_ring_rx_swtail edma_rx_swtail;
516 bool is_rx;
459}; 517};
460 518
461/** 519/**
462 * Additional data for Tx Vring 520 * Additional data for Rx ring.
521 * Used for enhanced DMA RX chaining.
463 */ 522 */
464struct vring_tx_data { 523struct wil_ring_rx_data {
524 /* the skb being assembled */
525 struct sk_buff *skb;
526 /* true if we are skipping a bad fragmented packet */
527 bool skipping;
528 u16 buff_size;
529};
530
531/**
532 * Status ring structure, used for enhanced DMA completions for RX and TX.
533 */
534struct wil_status_ring {
535 dma_addr_t pa;
536 void *va; /* pointer to ring_[tr]x_status elements */
537 u16 size; /* number of status elements */
538 size_t elem_size; /* status element size in bytes */
539 u32 swhead;
540 u32 hwtail; /* write here to inform hw */
541 bool is_rx;
542 u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
543 struct wil_ring_rx_data rx_data;
544};
545
546/**
547 * struct tx_rx_ops - different TX/RX ops for legacy and enhanced
548 * DMA flow
549 */
550struct wil_txrx_ops {
551 void (*configure_interrupt_moderation)(struct wil6210_priv *wil);
552 /* TX ops */
553 int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id,
554 int size, int cid, int tid);
555 void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
556 int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size);
557 int (*tx_init)(struct wil6210_priv *wil);
558 void (*tx_fini)(struct wil6210_priv *wil);
559 int (*tx_desc_map)(union wil_tx_desc *desc, dma_addr_t pa,
560 u32 len, int ring_index);
561 void (*tx_desc_unmap)(struct device *dev,
562 union wil_tx_desc *desc,
563 struct wil_ctx *ctx);
564 int (*tx_ring_tso)(struct wil6210_priv *wil, struct wil6210_vif *vif,
565 struct wil_ring *ring, struct sk_buff *skb);
566 irqreturn_t (*irq_tx)(int irq, void *cookie);
567 /* RX ops */
568 int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
569 void (*rx_fini)(struct wil6210_priv *wil);
570 int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
571 u8 tid, u8 token, u16 status, bool amsdu,
572 u16 agg_wsize, u16 timeout);
573 void (*get_reorder_params)(struct wil6210_priv *wil,
574 struct sk_buff *skb, int *tid, int *cid,
575 int *mid, u16 *seq, int *mcast);
576 void (*get_netif_rx_params)(struct sk_buff *skb,
577 int *cid, int *security);
578 int (*rx_crypto_check)(struct wil6210_priv *wil, struct sk_buff *skb);
579 bool (*is_rx_idle)(struct wil6210_priv *wil);
580 irqreturn_t (*irq_rx)(int irq, void *cookie);
581};
582
583/**
584 * Additional data for Tx ring
585 */
586struct wil_ring_tx_data {
465 bool dot1x_open; 587 bool dot1x_open;
466 int enabled; 588 int enabled;
467 cycles_t idle, last_idle, begin; 589 cycles_t idle, last_idle, begin;
@@ -564,6 +686,9 @@ struct wil_net_stats {
564 unsigned long rx_short_frame; 686 unsigned long rx_short_frame;
565 unsigned long rx_large_frame; 687 unsigned long rx_large_frame;
566 unsigned long rx_replay; 688 unsigned long rx_replay;
689 unsigned long rx_mic_error; /* eDMA specific */
690 unsigned long rx_key_error; /* eDMA specific */
691 unsigned long rx_amsdu_error; /* eDMA specific */
567 u16 last_mcs_rx; 692 u16 last_mcs_rx;
568 u64 rx_per_mcs[WIL_MCS_MAX + 1]; 693 u64 rx_per_mcs[WIL_MCS_MAX + 1];
569}; 694};
@@ -681,7 +806,7 @@ struct wil6210_vif {
681 u8 hidden_ssid; /* relevant in AP mode */ 806 u8 hidden_ssid; /* relevant in AP mode */
682 u32 ap_isolate; /* no intra-BSS communication */ 807 u32 ap_isolate; /* no intra-BSS communication */
683 bool pbss; 808 bool pbss;
684 int bcast_vring; 809 int bcast_ring;
685 struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */ 810 struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
686 int locally_generated_disc; /* relevant in STA mode */ 811 int locally_generated_disc; /* relevant in STA mode */
687 struct timer_list connect_timer; 812 struct timer_list connect_timer;
@@ -697,6 +822,31 @@ struct wil6210_vif {
697 int net_queue_stopped; /* netif_tx_stop_all_queues invoked */ 822 int net_queue_stopped; /* netif_tx_stop_all_queues invoked */
698}; 823};
699 824
825/**
826 * RX buffer allocated for enhanced DMA RX descriptors
827 */
828struct wil_rx_buff {
829 struct sk_buff *skb;
830 struct list_head list;
831 int id;
832};
833
834/**
835 * During Rx completion processing, the driver extracts a buffer ID which
836 * is used as an index to the rx_buff_mgmt.buff_arr array and then the SKB
837 * is given to the network stack and the buffer is moved from the 'active'
838 * list to the 'free' list.
839 * During Rx refill, SKBs are attached to free buffers and moved to the
840 * 'active' list.
841 */
842struct wil_rx_buff_mgmt {
843 struct wil_rx_buff *buff_arr;
844 size_t size; /* number of items in buff_arr */
845 struct list_head active;
846 struct list_head free;
847 unsigned long free_list_empty_cnt; /* statistics */
848};
849
700struct wil6210_priv { 850struct wil6210_priv {
701 struct pci_dev *pdev; 851 struct pci_dev *pdev;
702 u32 bar_size; 852 u32 bar_size;
@@ -761,14 +911,20 @@ struct wil6210_priv {
761 struct net_device napi_ndev; /* dummy net_device serving all VIFs */ 911 struct net_device napi_ndev; /* dummy net_device serving all VIFs */
762 912
763 /* DMA related */ 913 /* DMA related */
764 struct vring vring_rx; 914 struct wil_ring ring_rx;
765 unsigned int rx_buf_len; 915 unsigned int rx_buf_len;
766 struct vring vring_tx[WIL6210_MAX_TX_RINGS]; 916 struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS];
767 struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS]; 917 struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS];
768 u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ 918 struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS];
919 u8 num_rx_status_rings;
920 int tx_sring_idx;
921 u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
769 struct wil_sta_info sta[WIL6210_MAX_CID]; 922 struct wil_sta_info sta[WIL6210_MAX_CID];
770 u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ 923 u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */
771 u32 dma_addr_size; /* indicates dma addr size */ 924 u32 dma_addr_size; /* indicates dma addr size */
925 struct wil_rx_buff_mgmt rx_buff_mgmt;
926 bool use_enhanced_dma_hw;
927 struct wil_txrx_ops txrx_ops;
772 928
773 struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */ 929 struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
774 /* statistics */ 930 /* statistics */
@@ -811,6 +967,16 @@ struct wil6210_priv {
811 u32 rgf_fw_assert_code_addr; 967 u32 rgf_fw_assert_code_addr;
812 u32 rgf_ucode_assert_code_addr; 968 u32 rgf_ucode_assert_code_addr;
813 u32 iccm_base; 969 u32 iccm_base;
970
971 /* relevant only for eDMA */
972 bool use_compressed_rx_status;
973 u32 rx_status_ring_order;
974 u32 tx_status_ring_order;
975 u32 rx_buff_id_count;
976 bool amsdu_en;
977 bool use_rx_hw_reordering;
978 bool secured_boot;
979 u8 boot_config;
814}; 980};
815 981
816#define wil_to_wiphy(i) (i->wiphy) 982#define wil_to_wiphy(i) (i->wiphy)
@@ -990,7 +1156,7 @@ int wmi_add_cipher_key(struct wil6210_vif *vif, u8 key_index,
990 int key_usage); 1156 int key_usage);
991int wmi_echo(struct wil6210_priv *wil); 1157int wmi_echo(struct wil6210_priv *wil);
992int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie); 1158int wmi_set_ie(struct wil6210_vif *vif, u8 type, u16 ie_len, const void *ie);
993int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); 1159int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
994int wmi_rxon(struct wil6210_priv *wil, bool on); 1160int wmi_rxon(struct wil6210_priv *wil, bool on);
995int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); 1161int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
996int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, 1162int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
@@ -1083,30 +1249,28 @@ void wil_probe_client_flush(struct wil6210_vif *vif);
1083void wil_probe_client_worker(struct work_struct *work); 1249void wil_probe_client_worker(struct work_struct *work);
1084void wil_disconnect_worker(struct work_struct *work); 1250void wil_disconnect_worker(struct work_struct *work);
1085 1251
1086int wil_rx_init(struct wil6210_priv *wil, u16 size); 1252void wil_init_txrx_ops(struct wil6210_priv *wil);
1087void wil_rx_fini(struct wil6210_priv *wil);
1088 1253
1089/* TX API */ 1254/* TX API */
1090int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, 1255int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
1091 int cid, int tid);
1092void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
1093int wil_tx_init(struct wil6210_vif *vif, int cid);
1094int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size); 1256int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
1095int wil_bcast_init(struct wil6210_vif *vif); 1257int wil_bcast_init(struct wil6210_vif *vif);
1096void wil_bcast_fini(struct wil6210_vif *vif); 1258void wil_bcast_fini(struct wil6210_vif *vif);
1097void wil_bcast_fini_all(struct wil6210_priv *wil); 1259void wil_bcast_fini_all(struct wil6210_priv *wil);
1098 1260
1099void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif, 1261void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
1100 struct vring *vring, bool should_stop); 1262 struct wil_ring *ring, bool should_stop);
1101void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif, 1263void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
1102 struct vring *vring, bool check_stop); 1264 struct wil_ring *ring, bool check_stop);
1103netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); 1265netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
1104int wil_tx_complete(struct wil6210_vif *vif, int ringid); 1266int wil_tx_complete(struct wil6210_vif *vif, int ringid);
1105void wil6210_unmask_irq_tx(struct wil6210_priv *wil); 1267void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
1268void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil);
1106 1269
1107/* RX API */ 1270/* RX API */
1108void wil_rx_handle(struct wil6210_priv *wil, int *quota); 1271void wil_rx_handle(struct wil6210_priv *wil, int *quota);
1109void wil6210_unmask_irq_rx(struct wil6210_priv *wil); 1272void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
1273void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil);
1110 1274
1111int wil_iftype_nl2wmi(enum nl80211_iftype type); 1275int wil_iftype_nl2wmi(enum nl80211_iftype type);
1112 1276
@@ -1127,7 +1291,6 @@ bool wil_is_wmi_idle(struct wil6210_priv *wil);
1127int wmi_resume(struct wil6210_priv *wil); 1291int wmi_resume(struct wil6210_priv *wil);
1128int wmi_suspend(struct wil6210_priv *wil); 1292int wmi_suspend(struct wil6210_priv *wil);
1129bool wil_is_tx_idle(struct wil6210_priv *wil); 1293bool wil_is_tx_idle(struct wil6210_priv *wil);
1130bool wil_is_rx_idle(struct wil6210_priv *wil);
1131 1294
1132int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size); 1295int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
1133void wil_fw_core_dump(struct wil6210_priv *wil); 1296void wil_fw_core_dump(struct wil6210_priv *wil);
@@ -1142,4 +1305,19 @@ int wmi_start_sched_scan(struct wil6210_priv *wil,
1142int wmi_stop_sched_scan(struct wil6210_priv *wil); 1305int wmi_stop_sched_scan(struct wil6210_priv *wil);
1143int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len); 1306int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
1144 1307
1308int reverse_memcmp(const void *cs, const void *ct, size_t count);
1309
1310/* WMI for enhanced DMA */
1311int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id);
1312int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil,
1313 u16 max_rx_pl_per_desc);
1314int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id);
1315int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id);
1316int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
1317 int tid);
1318int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id);
1319int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid,
1320 u8 tid, u8 token, u16 status, bool amsdu,
1321 u16 agg_wsize, u16 timeout);
1322
1145#endif /* __WIL6210_H__ */ 1323#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1ed330674d9b..dc33a0b4c3fa 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. 2 * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
36 for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) { 37 for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
37 map = &fw_mapping[i]; 38 map = &fw_mapping[i];
38 39
39 if (!map->fw) 40 if (!map->crash_dump)
40 continue; 41 continue;
41 42
42 if (map->host < host_min) 43 if (map->host < host_min)
@@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
85 for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { 86 for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
86 map = &fw_mapping[i]; 87 map = &fw_mapping[i];
87 88
88 if (!map->fw) 89 if (!map->crash_dump)
89 continue; 90 continue;
90 91
91 data = (void * __force)wil->csr + HOSTADDR(map->host); 92 data = (void * __force)wil->csr + HOSTADDR(map->host);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 5d991243cdb5..71056c834fff 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -89,28 +89,28 @@ MODULE_PARM_DESC(led_id,
89 */ 89 */
90const struct fw_map sparrow_fw_mapping[] = { 90const struct fw_map sparrow_fw_mapping[] = {
91 /* FW code RAM 256k */ 91 /* FW code RAM 256k */
92 {0x000000, 0x040000, 0x8c0000, "fw_code", true}, 92 {0x000000, 0x040000, 0x8c0000, "fw_code", true, true},
93 /* FW data RAM 32k */ 93 /* FW data RAM 32k */
94 {0x800000, 0x808000, 0x900000, "fw_data", true}, 94 {0x800000, 0x808000, 0x900000, "fw_data", true, true},
95 /* periph data 128k */ 95 /* periph data 128k */
96 {0x840000, 0x860000, 0x908000, "fw_peri", true}, 96 {0x840000, 0x860000, 0x908000, "fw_peri", true, true},
97 /* various RGF 40k */ 97 /* various RGF 40k */
98 {0x880000, 0x88a000, 0x880000, "rgf", true}, 98 {0x880000, 0x88a000, 0x880000, "rgf", true, true},
99 /* AGC table 4k */ 99 /* AGC table 4k */
100 {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, 100 {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
101 /* Pcie_ext_rgf 4k */ 101 /* Pcie_ext_rgf 4k */
102 {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, 102 {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
103 /* mac_ext_rgf 512b */ 103 /* mac_ext_rgf 512b */
104 {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true}, 104 {0x88c000, 0x88c200, 0x88c000, "mac_rgf_ext", true, true},
105 /* upper area 548k */ 105 /* upper area 548k */
106 {0x8c0000, 0x949000, 0x8c0000, "upper", true}, 106 {0x8c0000, 0x949000, 0x8c0000, "upper", true, true},
107 /* UCODE areas - accessible by debugfs blobs but not by 107 /* UCODE areas - accessible by debugfs blobs but not by
108 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! 108 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
109 */ 109 */
110 /* ucode code RAM 128k */ 110 /* ucode code RAM 128k */
111 {0x000000, 0x020000, 0x920000, "uc_code", false}, 111 {0x000000, 0x020000, 0x920000, "uc_code", false, false},
112 /* ucode data RAM 16k */ 112 /* ucode data RAM 16k */
113 {0x800000, 0x804000, 0x940000, "uc_data", false}, 113 {0x800000, 0x804000, 0x940000, "uc_data", false, false},
114}; 114};
115 115
116/** 116/**
@@ -118,7 +118,7 @@ const struct fw_map sparrow_fw_mapping[] = {
118 * it is a bit larger to support extra features 118 * it is a bit larger to support extra features
119 */ 119 */
120const struct fw_map sparrow_d0_mac_rgf_ext = { 120const struct fw_map sparrow_d0_mac_rgf_ext = {
121 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true 121 0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
122}; 122};
123 123
124/** 124/**
@@ -134,34 +134,89 @@ const struct fw_map sparrow_d0_mac_rgf_ext = {
134 */ 134 */
135const struct fw_map talyn_fw_mapping[] = { 135const struct fw_map talyn_fw_mapping[] = {
136 /* FW code RAM 1M */ 136 /* FW code RAM 1M */
137 {0x000000, 0x100000, 0x900000, "fw_code", true}, 137 {0x000000, 0x100000, 0x900000, "fw_code", true, true},
138 /* FW data RAM 128k */ 138 /* FW data RAM 128k */
139 {0x800000, 0x820000, 0xa00000, "fw_data", true}, 139 {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
140 /* periph. data RAM 96k */ 140 /* periph. data RAM 96k */
141 {0x840000, 0x858000, 0xa20000, "fw_peri", true}, 141 {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
142 /* various RGF 40k */ 142 /* various RGF 40k */
143 {0x880000, 0x88a000, 0x880000, "rgf", true}, 143 {0x880000, 0x88a000, 0x880000, "rgf", true, true},
144 /* AGC table 4k */ 144 /* AGC table 4k */
145 {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true}, 145 {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
146 /* Pcie_ext_rgf 4k */ 146 /* Pcie_ext_rgf 4k */
147 {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true}, 147 {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
148 /* mac_ext_rgf 1344b */ 148 /* mac_ext_rgf 1344b */
149 {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true}, 149 {0x88c000, 0x88c540, 0x88c000, "mac_rgf_ext", true, true},
150 /* ext USER RGF 4k */ 150 /* ext USER RGF 4k */
151 {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true}, 151 {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
152 /* OTP 4k */ 152 /* OTP 4k */
153 {0x8a0000, 0x8a1000, 0x8a0000, "otp", true}, 153 {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
154 /* DMA EXT RGF 64k */ 154 /* DMA EXT RGF 64k */
155 {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true}, 155 {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
156 /* upper area 1536k */ 156 /* upper area 1536k */
157 {0x900000, 0xa80000, 0x900000, "upper", true}, 157 {0x900000, 0xa80000, 0x900000, "upper", true, true},
158 /* UCODE areas - accessible by debugfs blobs but not by 158 /* UCODE areas - accessible by debugfs blobs but not by
159 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas! 159 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
160 */ 160 */
161 /* ucode code RAM 256k */ 161 /* ucode code RAM 256k */
162 {0x000000, 0x040000, 0xa38000, "uc_code", false}, 162 {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
163 /* ucode data RAM 32k */ 163 /* ucode data RAM 32k */
164 {0x800000, 0x808000, 0xa78000, "uc_data", false}, 164 {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
165};
166
167/**
168 * @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
169 *
170 * array size should be in sync with the declaration in the wil6210.h
171 *
172 * Talyn MB memory mapping:
173 * Linker address PCI/Host address
174 * 0x880000 .. 0xc80000 4Mb BAR0
175 * 0x800000 .. 0x820000 0xa00000 .. 0xa20000 128k DCCM
176 * 0x840000 .. 0x858000 0xa20000 .. 0xa38000 96k PERIPH
177 */
178const struct fw_map talyn_mb_fw_mapping[] = {
179 /* FW code RAM 768k */
180 {0x000000, 0x0c0000, 0x900000, "fw_code", true, true},
181 /* FW data RAM 128k */
182 {0x800000, 0x820000, 0xa00000, "fw_data", true, true},
183 /* periph. data RAM 96k */
184 {0x840000, 0x858000, 0xa20000, "fw_peri", true, true},
185 /* various RGF 40k */
186 {0x880000, 0x88a000, 0x880000, "rgf", true, true},
187 /* AGC table 4k */
188 {0x88a000, 0x88b000, 0x88a000, "AGC_tbl", true, true},
189 /* Pcie_ext_rgf 4k */
190 {0x88b000, 0x88c000, 0x88b000, "rgf_ext", true, true},
191 /* mac_ext_rgf 2256b */
192 {0x88c000, 0x88c8d0, 0x88c000, "mac_rgf_ext", true, true},
193 /* ext USER RGF 4k */
194 {0x88d000, 0x88e000, 0x88d000, "ext_user_rgf", true, true},
195 /* SEC PKA 16k */
196 {0x890000, 0x894000, 0x890000, "sec_pka", true, true},
197 /* SEC KDF RGF 3096b */
198 {0x898000, 0x898c18, 0x898000, "sec_kdf_rgf", true, true},
199 /* SEC MAIN 2124b */
200 {0x89a000, 0x89a84c, 0x89a000, "sec_main", true, true},
201 /* OTP 4k */
202 {0x8a0000, 0x8a1000, 0x8a0000, "otp", true, false},
203 /* DMA EXT RGF 64k */
204 {0x8b0000, 0x8c0000, 0x8b0000, "dma_ext_rgf", true, true},
205 /* DUM USER RGF 528b */
206 {0x8c0000, 0x8c0210, 0x8c0000, "dum_user_rgf", true, true},
207 /* DMA OFU 296b */
208 {0x8c2000, 0x8c2128, 0x8c2000, "dma_ofu", true, true},
209 /* ucode debug 4k */
210 {0x8c3000, 0x8c4000, 0x8c3000, "ucode_debug", true, true},
211 /* upper area 1536k */
212 {0x900000, 0xa80000, 0x900000, "upper", true, true},
213 /* UCODE areas - accessible by debugfs blobs but not by
214 * wmi_addr_remap. UCODE areas MUST be added AFTER FW areas!
215 */
216 /* ucode code RAM 256k */
217 {0x000000, 0x040000, 0xa38000, "uc_code", false, false},
218 /* ucode data RAM 32k */
219 {0x800000, 0x808000, 0xa78000, "uc_data", false, false},
165}; 220};
166 221
167struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE]; 222struct fw_map fw_mapping[MAX_FW_MAPPING_TABLE_SIZE];
@@ -365,14 +420,16 @@ static const char *cmdid2name(u16 cmdid)
365 return "WMI_DEL_STA_CMD"; 420 return "WMI_DEL_STA_CMD";
366 case WMI_DISCONNECT_STA_CMDID: 421 case WMI_DISCONNECT_STA_CMDID:
367 return "WMI_DISCONNECT_STA_CMD"; 422 return "WMI_DISCONNECT_STA_CMD";
368 case WMI_VRING_BA_EN_CMDID: 423 case WMI_RING_BA_EN_CMDID:
369 return "WMI_VRING_BA_EN_CMD"; 424 return "WMI_RING_BA_EN_CMD";
370 case WMI_VRING_BA_DIS_CMDID: 425 case WMI_RING_BA_DIS_CMDID:
371 return "WMI_VRING_BA_DIS_CMD"; 426 return "WMI_RING_BA_DIS_CMD";
372 case WMI_RCP_DELBA_CMDID: 427 case WMI_RCP_DELBA_CMDID:
373 return "WMI_RCP_DELBA_CMD"; 428 return "WMI_RCP_DELBA_CMD";
374 case WMI_RCP_ADDBA_RESP_CMDID: 429 case WMI_RCP_ADDBA_RESP_CMDID:
375 return "WMI_RCP_ADDBA_RESP_CMD"; 430 return "WMI_RCP_ADDBA_RESP_CMD";
431 case WMI_RCP_ADDBA_RESP_EDMA_CMDID:
432 return "WMI_RCP_ADDBA_RESP_EDMA_CMD";
376 case WMI_PS_DEV_PROFILE_CFG_CMDID: 433 case WMI_PS_DEV_PROFILE_CFG_CMDID:
377 return "WMI_PS_DEV_PROFILE_CFG_CMD"; 434 return "WMI_PS_DEV_PROFILE_CFG_CMD";
378 case WMI_SET_MGMT_RETRY_LIMIT_CMDID: 435 case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
@@ -395,6 +452,18 @@ static const char *cmdid2name(u16 cmdid)
395 return "WMI_START_SCHED_SCAN_CMD"; 452 return "WMI_START_SCHED_SCAN_CMD";
396 case WMI_STOP_SCHED_SCAN_CMDID: 453 case WMI_STOP_SCHED_SCAN_CMDID:
397 return "WMI_STOP_SCHED_SCAN_CMD"; 454 return "WMI_STOP_SCHED_SCAN_CMD";
455 case WMI_TX_STATUS_RING_ADD_CMDID:
456 return "WMI_TX_STATUS_RING_ADD_CMD";
457 case WMI_RX_STATUS_RING_ADD_CMDID:
458 return "WMI_RX_STATUS_RING_ADD_CMD";
459 case WMI_TX_DESC_RING_ADD_CMDID:
460 return "WMI_TX_DESC_RING_ADD_CMD";
461 case WMI_RX_DESC_RING_ADD_CMDID:
462 return "WMI_RX_DESC_RING_ADD_CMD";
463 case WMI_BCAST_DESC_RING_ADD_CMDID:
464 return "WMI_BCAST_DESC_RING_ADD_CMD";
465 case WMI_CFG_DEF_RX_OFFLOAD_CMDID:
466 return "WMI_CFG_DEF_RX_OFFLOAD_CMD";
398 default: 467 default:
399 return "Untracked CMD"; 468 return "Untracked CMD";
400 } 469 }
@@ -449,8 +518,8 @@ static const char *eventid2name(u16 eventid)
449 return "WMI_RCP_ADDBA_REQ_EVENT"; 518 return "WMI_RCP_ADDBA_REQ_EVENT";
450 case WMI_DELBA_EVENTID: 519 case WMI_DELBA_EVENTID:
451 return "WMI_DELBA_EVENT"; 520 return "WMI_DELBA_EVENT";
452 case WMI_VRING_EN_EVENTID: 521 case WMI_RING_EN_EVENTID:
453 return "WMI_VRING_EN_EVENT"; 522 return "WMI_RING_EN_EVENT";
454 case WMI_DATA_PORT_OPEN_EVENTID: 523 case WMI_DATA_PORT_OPEN_EVENTID:
455 return "WMI_DATA_PORT_OPEN_EVENT"; 524 return "WMI_DATA_PORT_OPEN_EVENT";
456 case WMI_AOA_MEAS_EVENTID: 525 case WMI_AOA_MEAS_EVENTID:
@@ -519,6 +588,16 @@ static const char *eventid2name(u16 eventid)
519 return "WMI_STOP_SCHED_SCAN_EVENT"; 588 return "WMI_STOP_SCHED_SCAN_EVENT";
520 case WMI_SCHED_SCAN_RESULT_EVENTID: 589 case WMI_SCHED_SCAN_RESULT_EVENTID:
521 return "WMI_SCHED_SCAN_RESULT_EVENT"; 590 return "WMI_SCHED_SCAN_RESULT_EVENT";
591 case WMI_TX_STATUS_RING_CFG_DONE_EVENTID:
592 return "WMI_TX_STATUS_RING_CFG_DONE_EVENT";
593 case WMI_RX_STATUS_RING_CFG_DONE_EVENTID:
594 return "WMI_RX_STATUS_RING_CFG_DONE_EVENT";
595 case WMI_TX_DESC_RING_CFG_DONE_EVENTID:
596 return "WMI_TX_DESC_RING_CFG_DONE_EVENT";
597 case WMI_RX_DESC_RING_CFG_DONE_EVENTID:
598 return "WMI_RX_DESC_RING_CFG_DONE_EVENT";
599 case WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID:
600 return "WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENT";
522 default: 601 default:
523 return "Untracked EVENT"; 602 return "Untracked EVENT";
524 } 603 }
@@ -906,7 +985,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
906 wil->sta[evt->cid].mid = vif->mid; 985 wil->sta[evt->cid].mid = vif->mid;
907 wil->sta[evt->cid].status = wil_sta_conn_pending; 986 wil->sta[evt->cid].status = wil_sta_conn_pending;
908 987
909 rc = wil_tx_init(vif, evt->cid); 988 rc = wil_ring_init_tx(vif, evt->cid);
910 if (rc) { 989 if (rc) {
911 wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n", 990 wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
912 evt->cid, rc); 991 evt->cid, rc);
@@ -1063,16 +1142,16 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
1063 } 1142 }
1064} 1143}
1065 1144
1066static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len) 1145static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
1067{ 1146{
1068 struct wil6210_priv *wil = vif_to_wil(vif); 1147 struct wil6210_priv *wil = vif_to_wil(vif);
1069 struct wmi_vring_en_event *evt = d; 1148 struct wmi_ring_en_event *evt = d;
1070 u8 vri = evt->vring_index; 1149 u8 vri = evt->ring_index;
1071 struct wireless_dev *wdev = vif_to_wdev(vif); 1150 struct wireless_dev *wdev = vif_to_wdev(vif);
1072 1151
1073 wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid); 1152 wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);
1074 1153
1075 if (vri >= ARRAY_SIZE(wil->vring_tx)) { 1154 if (vri >= ARRAY_SIZE(wil->ring_tx)) {
1076 wil_err(wil, "Enable for invalid vring %d\n", vri); 1155 wil_err(wil, "Enable for invalid vring %d\n", vri);
1077 return; 1156 return;
1078 } 1157 }
@@ -1081,8 +1160,8 @@ static void wmi_evt_vring_en(struct wil6210_vif *vif, int id, void *d, int len)
1081 /* in AP mode with disable_ap_sme, this is done by 1160 /* in AP mode with disable_ap_sme, this is done by
1082 * wil_cfg80211_change_station() 1161 * wil_cfg80211_change_station()
1083 */ 1162 */
1084 wil->vring_tx_data[vri].dot1x_open = true; 1163 wil->ring_tx_data[vri].dot1x_open = true;
1085 if (vri == vif->bcast_vring) /* no BA for bcast */ 1164 if (vri == vif->bcast_ring) /* no BA for bcast */
1086 return; 1165 return;
1087 if (agg_wsize >= 0) 1166 if (agg_wsize >= 0)
1088 wil_addba_tx_request(wil, vri, agg_wsize); 1167 wil_addba_tx_request(wil, vri, agg_wsize);
@@ -1093,7 +1172,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
1093{ 1172{
1094 struct wil6210_priv *wil = vif_to_wil(vif); 1173 struct wil6210_priv *wil = vif_to_wil(vif);
1095 struct wmi_ba_status_event *evt = d; 1174 struct wmi_ba_status_event *evt = d;
1096 struct vring_tx_data *txdata; 1175 struct wil_ring_tx_data *txdata;
1097 1176
1098 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n", 1177 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
1099 evt->ringid, 1178 evt->ringid,
@@ -1112,7 +1191,7 @@ static void wmi_evt_ba_status(struct wil6210_vif *vif, int id,
1112 evt->amsdu = 0; 1191 evt->amsdu = 0;
1113 } 1192 }
1114 1193
1115 txdata = &wil->vring_tx_data[evt->ringid]; 1194 txdata = &wil->ring_tx_data[evt->ringid];
1116 1195
1117 txdata->agg_timeout = le16_to_cpu(evt->ba_timeout); 1196 txdata->agg_timeout = le16_to_cpu(evt->ba_timeout);
1118 txdata->agg_wsize = evt->agg_wsize; 1197 txdata->agg_wsize = evt->agg_wsize;
@@ -1150,11 +1229,11 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
1150 if (!evt->from_initiator) { 1229 if (!evt->from_initiator) {
1151 int i; 1230 int i;
1152 /* find Tx vring it belongs to */ 1231 /* find Tx vring it belongs to */
1153 for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { 1232 for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
1154 if ((wil->vring2cid_tid[i][0] == cid) && 1233 if (wil->ring2cid_tid[i][0] == cid &&
1155 (wil->vring2cid_tid[i][1] == tid)) { 1234 wil->ring2cid_tid[i][1] == tid) {
1156 struct vring_tx_data *txdata = 1235 struct wil_ring_tx_data *txdata =
1157 &wil->vring_tx_data[i]; 1236 &wil->ring_tx_data[i];
1158 1237
1159 wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i); 1238 wil_dbg_wmi(wil, "DELBA Tx vring %d\n", i);
1160 txdata->agg_timeout = 0; 1239 txdata->agg_timeout = 0;
@@ -1164,7 +1243,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
1164 break; /* max. 1 matching ring */ 1243 break; /* max. 1 matching ring */
1165 } 1244 }
1166 } 1245 }
1167 if (i >= ARRAY_SIZE(wil->vring2cid_tid)) 1246 if (i >= ARRAY_SIZE(wil->ring2cid_tid))
1168 wil_err(wil, "DELBA: unable to find Tx vring\n"); 1247 wil_err(wil, "DELBA: unable to find Tx vring\n");
1169 return; 1248 return;
1170 } 1249 }
@@ -1277,7 +1356,7 @@ static const struct {
1277 {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status}, 1356 {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
1278 {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req}, 1357 {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
1279 {WMI_DELBA_EVENTID, wmi_evt_delba}, 1358 {WMI_DELBA_EVENTID, wmi_evt_delba},
1280 {WMI_VRING_EN_EVENTID, wmi_evt_vring_en}, 1359 {WMI_RING_EN_EVENTID, wmi_evt_ring_en},
1281 {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore}, 1360 {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
1282 {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result}, 1361 {WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
1283}; 1362};
@@ -1909,7 +1988,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
1909 return rc; 1988 return rc;
1910} 1989}
1911 1990
1912int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) 1991int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring)
1913{ 1992{
1914 struct net_device *ndev = wil->main_ndev; 1993 struct net_device *ndev = wil->main_ndev;
1915 struct wireless_dev *wdev = ndev->ieee80211_ptr; 1994 struct wireless_dev *wdev = ndev->ieee80211_ptr;
@@ -2063,29 +2142,32 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
2063int wmi_addba(struct wil6210_priv *wil, u8 mid, 2142int wmi_addba(struct wil6210_priv *wil, u8 mid,
2064 u8 ringid, u8 size, u16 timeout) 2143 u8 ringid, u8 size, u16 timeout)
2065{ 2144{
2066 struct wmi_vring_ba_en_cmd cmd = { 2145 u8 amsdu = wil->use_enhanced_dma_hw && wil->use_rx_hw_reordering &&
2067 .ringid = ringid, 2146 test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
2147 wil->amsdu_en;
2148 struct wmi_ring_ba_en_cmd cmd = {
2149 .ring_id = ringid,
2068 .agg_max_wsize = size, 2150 .agg_max_wsize = size,
2069 .ba_timeout = cpu_to_le16(timeout), 2151 .ba_timeout = cpu_to_le16(timeout),
2070 .amsdu = 0, 2152 .amsdu = amsdu,
2071 }; 2153 };
2072 2154
2073 wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size, 2155 wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d amsdu %d)\n",
2074 timeout); 2156 ringid, size, timeout, amsdu);
2075 2157
2076 return wmi_send(wil, WMI_VRING_BA_EN_CMDID, mid, &cmd, sizeof(cmd)); 2158 return wmi_send(wil, WMI_RING_BA_EN_CMDID, mid, &cmd, sizeof(cmd));
2077} 2159}
2078 2160
2079int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason) 2161int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason)
2080{ 2162{
2081 struct wmi_vring_ba_dis_cmd cmd = { 2163 struct wmi_ring_ba_dis_cmd cmd = {
2082 .ringid = ringid, 2164 .ring_id = ringid,
2083 .reason = cpu_to_le16(reason), 2165 .reason = cpu_to_le16(reason),
2084 }; 2166 };
2085 2167
2086 wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason); 2168 wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
2087 2169
2088 return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd)); 2170 return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd));
2089} 2171}
2090 2172
2091int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason) 2173int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason)
@@ -2146,6 +2228,54 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil,
2146 return rc; 2228 return rc;
2147} 2229}
2148 2230
2231int wmi_addba_rx_resp_edma(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid,
2232 u8 token, u16 status, bool amsdu, u16 agg_wsize,
2233 u16 timeout)
2234{
2235 int rc;
2236 struct wmi_rcp_addba_resp_edma_cmd cmd = {
2237 .cid = cid,
2238 .tid = tid,
2239 .dialog_token = token,
2240 .status_code = cpu_to_le16(status),
2241 /* bit 0: A-MSDU supported
2242 * bit 1: policy (should be 0 for us)
2243 * bits 2..5: TID
2244 * bits 6..15: buffer size
2245 */
2246 .ba_param_set = cpu_to_le16((amsdu ? 1 : 0) | (tid << 2) |
2247 (agg_wsize << 6)),
2248 .ba_timeout = cpu_to_le16(timeout),
2249 /* route all the connections to status ring 0 */
2250 .status_ring_id = WIL_DEFAULT_RX_STATUS_RING_ID,
2251 };
2252 struct {
2253 struct wmi_cmd_hdr wmi;
2254 struct wmi_rcp_addba_resp_sent_event evt;
2255 } __packed reply = {
2256 .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)},
2257 };
2258
2259 wil_dbg_wmi(wil,
2260 "ADDBA response for CID %d TID %d size %d timeout %d status %d AMSDU%s, sring_id %d\n",
2261 cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-",
2262 WIL_DEFAULT_RX_STATUS_RING_ID);
2263
2264 rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_EDMA_CMDID, mid, &cmd,
2265 sizeof(cmd), WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply,
2266 sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
2267 if (rc)
2268 return rc;
2269
2270 if (reply.evt.status) {
2271 wil_err(wil, "ADDBA response failed with status %d\n",
2272 le16_to_cpu(reply.evt.status));
2273 rc = -EINVAL;
2274 }
2275
2276 return rc;
2277}
2278
2149int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, 2279int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
2150 enum wmi_ps_profile_type ps_profile) 2280 enum wmi_ps_profile_type ps_profile)
2151{ 2281{
@@ -2852,3 +2982,263 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
2852 2982
2853 return rc; 2983 return rc;
2854} 2984}
2985
2986int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id)
2987{
2988 int rc;
2989 struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
2990 struct wil_status_ring *sring = &wil->srings[ring_id];
2991 struct wmi_tx_status_ring_add_cmd cmd = {
2992 .ring_cfg = {
2993 .ring_size = cpu_to_le16(sring->size),
2994 },
2995 .irq_index = WIL_TX_STATUS_IRQ_IDX
2996 };
2997 struct {
2998 struct wmi_cmd_hdr hdr;
2999 struct wmi_tx_status_ring_cfg_done_event evt;
3000 } __packed reply = {
3001 .evt = {.status = WMI_FW_STATUS_FAILURE},
3002 };
3003
3004 cmd.ring_cfg.ring_id = ring_id;
3005
3006 cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
3007 rc = wmi_call(wil, WMI_TX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
3008 sizeof(cmd), WMI_TX_STATUS_RING_CFG_DONE_EVENTID,
3009 &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
3010 if (rc) {
3011 wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
3012 return rc;
3013 }
3014
3015 if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
3016 wil_err(wil, "TX_STATUS_RING_ADD_CMD failed, status %d\n",
3017 reply.evt.status);
3018 return -EINVAL;
3019 }
3020
3021 sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
3022
3023 return 0;
3024}
3025
3026int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil, u16 max_rx_pl_per_desc)
3027{
3028 struct net_device *ndev = wil->main_ndev;
3029 struct wil6210_vif *vif = ndev_to_vif(ndev);
3030 int rc;
3031 struct wmi_cfg_def_rx_offload_cmd cmd = {
3032 .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(WIL_MAX_ETH_MTU)),
3033 .max_rx_pl_per_desc = cpu_to_le16(max_rx_pl_per_desc),
3034 .decap_trans_type = WMI_DECAP_TYPE_802_3,
3035 .l2_802_3_offload_ctrl = 0,
3036 .l3_l4_ctrl = 1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS,
3037 };
3038 struct {
3039 struct wmi_cmd_hdr hdr;
3040 struct wmi_cfg_def_rx_offload_done_event evt;
3041 } __packed reply = {
3042 .evt = {.status = WMI_FW_STATUS_FAILURE},
3043 };
3044
3045 rc = wmi_call(wil, WMI_CFG_DEF_RX_OFFLOAD_CMDID, vif->mid, &cmd,
3046 sizeof(cmd), WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID, &reply,
3047 sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
3048 if (rc) {
3049 wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, rc %d\n", rc);
3050 return rc;
3051 }
3052
3053 if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
3054 wil_err(wil, "WMI_CFG_DEF_RX_OFFLOAD_CMD failed, status %d\n",
3055 reply.evt.status);
3056 return -EINVAL;
3057 }
3058
3059 return 0;
3060}
3061
3062int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id)
3063{
3064 struct net_device *ndev = wil->main_ndev;
3065 struct wil6210_vif *vif = ndev_to_vif(ndev);
3066 struct wil_status_ring *sring = &wil->srings[ring_id];
3067 int rc;
3068 struct wmi_rx_status_ring_add_cmd cmd = {
3069 .ring_cfg = {
3070 .ring_size = cpu_to_le16(sring->size),
3071 .ring_id = ring_id,
3072 },
3073 .rx_msg_type = wil->use_compressed_rx_status ?
3074 WMI_RX_MSG_TYPE_COMPRESSED :
3075 WMI_RX_MSG_TYPE_EXTENDED,
3076 .irq_index = WIL_RX_STATUS_IRQ_IDX,
3077 };
3078 struct {
3079 struct wmi_cmd_hdr hdr;
3080 struct wmi_rx_status_ring_cfg_done_event evt;
3081 } __packed reply = {
3082 .evt = {.status = WMI_FW_STATUS_FAILURE},
3083 };
3084
3085 cmd.ring_cfg.ring_mem_base = cpu_to_le64(sring->pa);
3086 rc = wmi_call(wil, WMI_RX_STATUS_RING_ADD_CMDID, vif->mid, &cmd,
3087 sizeof(cmd), WMI_RX_STATUS_RING_CFG_DONE_EVENTID, &reply,
3088 sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
3089 if (rc) {
3090 wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, rc %d\n", rc);
3091 return rc;
3092 }
3093
3094 if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
3095 wil_err(wil, "RX_STATUS_RING_ADD_CMD failed, status %d\n",
3096 reply.evt.status);
3097 return -EINVAL;
3098 }
3099
3100 sring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
3101
3102 return 0;
3103}
3104
3105int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id)
3106{
3107 struct net_device *ndev = wil->main_ndev;
3108 struct wil6210_vif *vif = ndev_to_vif(ndev);
3109 struct wil_ring *ring = &wil->ring_rx;
3110 int rc;
3111 struct wmi_rx_desc_ring_add_cmd cmd = {
3112 .ring_cfg = {
3113 .ring_size = cpu_to_le16(ring->size),
3114 .ring_id = WIL_RX_DESC_RING_ID,
3115 },
3116 .status_ring_id = status_ring_id,
3117 .irq_index = WIL_RX_STATUS_IRQ_IDX,
3118 };
3119 struct {
3120 struct wmi_cmd_hdr hdr;
3121 struct wmi_rx_desc_ring_cfg_done_event evt;
3122 } __packed reply = {
3123 .evt = {.status = WMI_FW_STATUS_FAILURE},
3124 };
3125
3126 cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
3127 cmd.sw_tail_host_addr = cpu_to_le64(ring->edma_rx_swtail.pa);
3128 rc = wmi_call(wil, WMI_RX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
3129 sizeof(cmd), WMI_RX_DESC_RING_CFG_DONE_EVENTID, &reply,
3130 sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
3131 if (rc) {
3132 wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
3133 return rc;
3134 }
3135
3136 if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
3137 wil_err(wil, "WMI_RX_DESC_RING_ADD_CMD failed, status %d\n",
3138 reply.evt.status);
3139 return -EINVAL;
3140 }
3141
3142 ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
3143
3144 return 0;
3145}
3146
3147int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
3148 int tid)
3149{
3150 struct wil6210_priv *wil = vif_to_wil(vif);
3151 int sring_id = wil->tx_sring_idx; /* there is only one TX sring */
3152 int rc;
3153 struct wil_ring *ring = &wil->ring_tx[ring_id];
3154 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
3155 struct wmi_tx_desc_ring_add_cmd cmd = {
3156 .ring_cfg = {
3157 .ring_size = cpu_to_le16(ring->size),
3158 .ring_id = ring_id,
3159 },
3160 .status_ring_id = sring_id,
3161 .cid = cid,
3162 .tid = tid,
3163 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
3164 .max_msdu_size = cpu_to_le16(wil_mtu2macbuf(mtu_max)),
3165 .schd_params = {
3166 .priority = cpu_to_le16(0),
3167 .timeslot_us = cpu_to_le16(0xfff),
3168 }
3169 };
3170 struct {
3171 struct wmi_cmd_hdr hdr;
3172 struct wmi_tx_desc_ring_cfg_done_event evt;
3173 } __packed reply = {
3174 .evt = {.status = WMI_FW_STATUS_FAILURE},
3175 };
3176
3177 cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
3178 rc = wmi_call(wil, WMI_TX_DESC_RING_ADD_CMDID, vif->mid, &cmd,
3179 sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
3180 sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
3181 if (rc) {
3182 wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, rc %d\n", rc);
3183 return rc;
3184 }
3185
3186 if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
3187 wil_err(wil, "WMI_TX_DESC_RING_ADD_CMD failed, status %d\n",
3188 reply.evt.status);
3189 return -EINVAL;
3190 }
3191
3192 spin_lock_bh(&txdata->lock);
3193 ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
3194 txdata->mid = vif->mid;
3195 txdata->enabled = 1;
3196 spin_unlock_bh(&txdata->lock);
3197
3198 return 0;
3199}
3200
3201int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id)
3202{
3203 struct wil6210_priv *wil = vif_to_wil(vif);
3204 struct wil_ring *ring = &wil->ring_tx[ring_id];
3205 int rc;
3206 struct wmi_bcast_desc_ring_add_cmd cmd = {
3207 .ring_cfg = {
3208 .ring_size = cpu_to_le16(ring->size),
3209 .ring_id = ring_id,
3210 },
3211 .status_ring_id = wil->tx_sring_idx,
3212 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
3213 };
3214 struct {
3215 struct wmi_cmd_hdr hdr;
3216 struct wmi_rx_desc_ring_cfg_done_event evt;
3217 } __packed reply = {
3218 .evt = {.status = WMI_FW_STATUS_FAILURE},
3219 };
3220 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
3221
3222 cmd.ring_cfg.ring_mem_base = cpu_to_le64(ring->pa);
3223 rc = wmi_call(wil, WMI_BCAST_DESC_RING_ADD_CMDID, vif->mid, &cmd,
3224 sizeof(cmd), WMI_TX_DESC_RING_CFG_DONE_EVENTID, &reply,
3225 sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
3226 if (rc) {
3227 wil_err(wil, "WMI_BCAST_DESC_RING_ADD_CMD failed, rc %d\n", rc);
3228 return rc;
3229 }
3230
3231 if (reply.evt.status != WMI_FW_STATUS_SUCCESS) {
3232 wil_err(wil, "Broadcast Tx config failed, status %d\n",
3233 reply.evt.status);
3234 return -EINVAL;
3235 }
3236
3237 spin_lock_bh(&txdata->lock);
3238 ring->hwtail = le32_to_cpu(reply.evt.ring_tail_ptr);
3239 txdata->mid = vif->mid;
3240 txdata->enabled = 1;
3241 spin_unlock_bh(&txdata->lock);
3242
3243 return 0;
3244}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index dc503d903786..abf6f05c4801 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -86,6 +86,7 @@ enum wmi_fw_capability {
86 WMI_FW_CAPABILITY_PNO = 15, 86 WMI_FW_CAPABILITY_PNO = 15,
87 WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18, 87 WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
88 WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19, 88 WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19,
89 WMI_FW_CAPABILITY_AMSDU = 23,
89 WMI_FW_CAPABILITY_MAX, 90 WMI_FW_CAPABILITY_MAX,
90}; 91};
91 92
@@ -148,8 +149,8 @@ enum wmi_command_id {
148 WMI_CFG_RX_CHAIN_CMDID = 0x820, 149 WMI_CFG_RX_CHAIN_CMDID = 0x820,
149 WMI_VRING_CFG_CMDID = 0x821, 150 WMI_VRING_CFG_CMDID = 0x821,
150 WMI_BCAST_VRING_CFG_CMDID = 0x822, 151 WMI_BCAST_VRING_CFG_CMDID = 0x822,
151 WMI_VRING_BA_EN_CMDID = 0x823, 152 WMI_RING_BA_EN_CMDID = 0x823,
152 WMI_VRING_BA_DIS_CMDID = 0x824, 153 WMI_RING_BA_DIS_CMDID = 0x824,
153 WMI_RCP_ADDBA_RESP_CMDID = 0x825, 154 WMI_RCP_ADDBA_RESP_CMDID = 0x825,
154 WMI_RCP_DELBA_CMDID = 0x826, 155 WMI_RCP_DELBA_CMDID = 0x826,
155 WMI_SET_SSID_CMDID = 0x827, 156 WMI_SET_SSID_CMDID = 0x827,
@@ -163,6 +164,7 @@ enum wmi_command_id {
163 WMI_BF_SM_MGMT_CMDID = 0x838, 164 WMI_BF_SM_MGMT_CMDID = 0x838,
164 WMI_BF_RXSS_MGMT_CMDID = 0x839, 165 WMI_BF_RXSS_MGMT_CMDID = 0x839,
165 WMI_BF_TRIG_CMDID = 0x83A, 166 WMI_BF_TRIG_CMDID = 0x83A,
167 WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
166 WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842, 168 WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
167 WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843, 169 WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
168 WMI_SET_SECTORS_CMDID = 0x849, 170 WMI_SET_SECTORS_CMDID = 0x849,
@@ -235,6 +237,12 @@ enum wmi_command_id {
235 WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, 237 WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
236 WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, 238 WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
237 WMI_BF_CONTROL_CMDID = 0x9AA, 239 WMI_BF_CONTROL_CMDID = 0x9AA,
240 WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0,
241 WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1,
242 WMI_TX_DESC_RING_ADD_CMDID = 0x9C2,
243 WMI_RX_DESC_RING_ADD_CMDID = 0x9C3,
244 WMI_BCAST_DESC_RING_ADD_CMDID = 0x9C4,
245 WMI_CFG_DEF_RX_OFFLOAD_CMDID = 0x9C5,
238 WMI_SCHEDULING_SCHEME_CMDID = 0xA01, 246 WMI_SCHEDULING_SCHEME_CMDID = 0xA01,
239 WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02, 247 WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02,
240 WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03, 248 WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03,
@@ -781,18 +789,90 @@ struct wmi_lo_power_calib_from_otp_event {
781 u8 reserved[3]; 789 u8 reserved[3];
782} __packed; 790} __packed;
783 791
784/* WMI_VRING_BA_EN_CMDID */ 792struct wmi_edma_ring_cfg {
785struct wmi_vring_ba_en_cmd { 793 __le64 ring_mem_base;
786 u8 ringid; 794 /* size in number of items */
795 __le16 ring_size;
796 u8 ring_id;
797 u8 reserved;
798} __packed;
799
800enum wmi_rx_msg_type {
801 WMI_RX_MSG_TYPE_COMPRESSED = 0x00,
802 WMI_RX_MSG_TYPE_EXTENDED = 0x01,
803};
804
805struct wmi_tx_status_ring_add_cmd {
806 struct wmi_edma_ring_cfg ring_cfg;
807 u8 irq_index;
808 u8 reserved[3];
809} __packed;
810
811struct wmi_rx_status_ring_add_cmd {
812 struct wmi_edma_ring_cfg ring_cfg;
813 u8 irq_index;
814 /* wmi_rx_msg_type */
815 u8 rx_msg_type;
816 u8 reserved[2];
817} __packed;
818
819struct wmi_cfg_def_rx_offload_cmd {
820 __le16 max_msdu_size;
821 __le16 max_rx_pl_per_desc;
822 u8 decap_trans_type;
823 u8 l2_802_3_offload_ctrl;
824 u8 l2_nwifi_offload_ctrl;
825 u8 vlan_id;
826 u8 nwifi_ds_trans_type;
827 u8 l3_l4_ctrl;
828 u8 reserved[6];
829} __packed;
830
831struct wmi_tx_desc_ring_add_cmd {
832 struct wmi_edma_ring_cfg ring_cfg;
833 __le16 max_msdu_size;
834 /* Correlated status ring (0-63) */
835 u8 status_ring_id;
836 u8 cid;
837 u8 tid;
838 u8 encap_trans_type;
839 u8 mac_ctrl;
840 u8 to_resolution;
841 u8 agg_max_wsize;
842 u8 reserved[3];
843 struct wmi_vring_cfg_schd schd_params;
844} __packed;
845
846struct wmi_rx_desc_ring_add_cmd {
847 struct wmi_edma_ring_cfg ring_cfg;
848 u8 irq_index;
849 /* 0-63 status rings */
850 u8 status_ring_id;
851 u8 reserved[2];
852 __le64 sw_tail_host_addr;
853} __packed;
854
855struct wmi_bcast_desc_ring_add_cmd {
856 struct wmi_edma_ring_cfg ring_cfg;
857 __le16 max_msdu_size;
858 /* Correlated status ring (0-63) */
859 u8 status_ring_id;
860 u8 encap_trans_type;
861 u8 reserved[4];
862} __packed;
863
864/* WMI_RING_BA_EN_CMDID */
865struct wmi_ring_ba_en_cmd {
866 u8 ring_id;
787 u8 agg_max_wsize; 867 u8 agg_max_wsize;
788 __le16 ba_timeout; 868 __le16 ba_timeout;
789 u8 amsdu; 869 u8 amsdu;
790 u8 reserved[3]; 870 u8 reserved[3];
791} __packed; 871} __packed;
792 872
793/* WMI_VRING_BA_DIS_CMDID */ 873/* WMI_RING_BA_DIS_CMDID */
794struct wmi_vring_ba_dis_cmd { 874struct wmi_ring_ba_dis_cmd {
795 u8 ringid; 875 u8 ring_id;
796 u8 reserved; 876 u8 reserved;
797 __le16 reason; 877 __le16 reason;
798} __packed; 878} __packed;
@@ -950,6 +1030,21 @@ struct wmi_rcp_addba_resp_cmd {
950 u8 reserved[2]; 1030 u8 reserved[2];
951} __packed; 1031} __packed;
952 1032
1033/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */
1034struct wmi_rcp_addba_resp_edma_cmd {
1035 u8 cid;
1036 u8 tid;
1037 u8 dialog_token;
1038 u8 reserved;
1039 __le16 status_code;
1040 /* ieee80211_ba_parameterset field to send */
1041 __le16 ba_param_set;
1042 __le16 ba_timeout;
1043 u8 status_ring_id;
1044 /* wmi_cfg_rx_chain_cmd_reorder_type */
1045 u8 reorder_type;
1046} __packed;
1047
953/* WMI_RCP_DELBA_CMDID */ 1048/* WMI_RCP_DELBA_CMDID */
954struct wmi_rcp_delba_cmd { 1049struct wmi_rcp_delba_cmd {
955 /* Used for cid less than 8. For higher cid set 1050 /* Used for cid less than 8. For higher cid set
@@ -1535,7 +1630,7 @@ enum wmi_event_id {
1535 WMI_BF_CTRL_DONE_EVENTID = 0x1862, 1630 WMI_BF_CTRL_DONE_EVENTID = 0x1862,
1536 WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863, 1631 WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
1537 WMI_GET_STATUS_DONE_EVENTID = 0x1864, 1632 WMI_GET_STATUS_DONE_EVENTID = 0x1864,
1538 WMI_VRING_EN_EVENTID = 0x1865, 1633 WMI_RING_EN_EVENTID = 0x1865,
1539 WMI_GET_RF_STATUS_EVENTID = 0x1866, 1634 WMI_GET_RF_STATUS_EVENTID = 0x1866,
1540 WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867, 1635 WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
1541 WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868, 1636 WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868,
@@ -1587,6 +1682,11 @@ enum wmi_event_id {
1587 WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, 1682 WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
1588 WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, 1683 WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
1589 WMI_BF_CONTROL_EVENTID = 0x19AA, 1684 WMI_BF_CONTROL_EVENTID = 0x19AA,
1685 WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0,
1686 WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1,
1687 WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2,
1688 WMI_RX_DESC_RING_CFG_DONE_EVENTID = 0x19C3,
1689 WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID = 0x19C5,
1590 WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01, 1690 WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01,
1591 WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02, 1691 WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02,
1592 WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03, 1692 WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03,
@@ -1997,6 +2097,49 @@ struct wmi_rcp_addba_resp_sent_event {
1997 u8 reserved2[2]; 2097 u8 reserved2[2];
1998} __packed; 2098} __packed;
1999 2099
2100/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */
2101struct wmi_tx_status_ring_cfg_done_event {
2102 u8 ring_id;
2103 /* wmi_fw_status */
2104 u8 status;
2105 u8 reserved[2];
2106 __le32 ring_tail_ptr;
2107} __packed;
2108
2109/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */
2110struct wmi_rx_status_ring_cfg_done_event {
2111 u8 ring_id;
2112 /* wmi_fw_status */
2113 u8 status;
2114 u8 reserved[2];
2115 __le32 ring_tail_ptr;
2116} __packed;
2117
2118/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */
2119struct wmi_cfg_def_rx_offload_done_event {
2120 /* wmi_fw_status */
2121 u8 status;
2122 u8 reserved[3];
2123} __packed;
2124
2125/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */
2126struct wmi_tx_desc_ring_cfg_done_event {
2127 u8 ring_id;
2128 /* wmi_fw_status */
2129 u8 status;
2130 u8 reserved[2];
2131 __le32 ring_tail_ptr;
2132} __packed;
2133
2134/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */
2135struct wmi_rx_desc_ring_cfg_done_event {
2136 u8 ring_id;
2137 /* wmi_fw_status */
2138 u8 status;
2139 u8 reserved[2];
2140 __le32 ring_tail_ptr;
2141} __packed;
2142
2000/* WMI_RCP_ADDBA_REQ_EVENTID */ 2143/* WMI_RCP_ADDBA_REQ_EVENTID */
2001struct wmi_rcp_addba_req_event { 2144struct wmi_rcp_addba_req_event {
2002 /* Used for cid less than 8. For higher cid set 2145 /* Used for cid less than 8. For higher cid set
@@ -2047,9 +2190,9 @@ struct wmi_data_port_open_event {
2047 u8 reserved[3]; 2190 u8 reserved[3];
2048} __packed; 2191} __packed;
2049 2192
2050/* WMI_VRING_EN_EVENTID */ 2193/* WMI_RING_EN_EVENTID */
2051struct wmi_vring_en_event { 2194struct wmi_ring_en_event {
2052 u8 vring_index; 2195 u8 ring_index;
2053 u8 reserved[3]; 2196 u8 reserved[3];
2054} __packed; 2197} __packed;
2055 2198
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 3ed3d9f6aae9..74538085cfb7 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1399,6 +1399,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel)
1399 return 0; 1399 return 0;
1400} 1400}
1401 1401
1402#ifdef CONFIG_PROC_FS
1402static int atmel_proc_show(struct seq_file *m, void *v) 1403static int atmel_proc_show(struct seq_file *m, void *v)
1403{ 1404{
1404 struct atmel_private *priv = m->private; 1405 struct atmel_private *priv = m->private;
@@ -1481,6 +1482,7 @@ static int atmel_proc_show(struct seq_file *m, void *v)
1481 seq_printf(m, "Current state:\t\t%s\n", s); 1482 seq_printf(m, "Current state:\t\t%s\n", s);
1482 return 0; 1483 return 0;
1483} 1484}
1485#endif
1484 1486
1485static const struct net_device_ops atmel_netdev_ops = { 1487static const struct net_device_ops atmel_netdev_ops = {
1486 .ndo_open = atmel_open, 1488 .ndo_open = atmel_open,
@@ -3675,7 +3677,7 @@ static int probe_atmel_card(struct net_device *dev)
3675 atmel_write16(dev, GCR, 0x0060); 3677 atmel_write16(dev, GCR, 0x0060);
3676 3678
3677 atmel_write16(dev, GCR, 0x0040); 3679 atmel_write16(dev, GCR, 0x0040);
3678 mdelay(500); 3680 msleep(500);
3679 3681
3680 if (atmel_read16(dev, MR2) == 0) { 3682 if (atmel_read16(dev, MR2) == 0) {
3681 /* No stored firmware so load a small stub which just 3683 /* No stored firmware so load a small stub which just
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 4db4d444407a..8347da632a5b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -93,6 +93,42 @@ static int brcmf_feat_debugfs_read(struct seq_file *seq, void *data)
93} 93}
94#endif /* DEBUG */ 94#endif /* DEBUG */
95 95
96struct brcmf_feat_fwfeat {
97 const char * const fwid;
98 u32 feat_flags;
99};
100
101static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = {
102 /* brcmfmac43602-pcie.ap.bin from linux-firmware.git commit ea1178515b88 */
103 { "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) },
104 /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */
105 { "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) },
106};
107
108static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv)
109{
110 const struct brcmf_feat_fwfeat *e;
111 u32 feat_flags = 0;
112 int i;
113
114 for (i = 0; i < ARRAY_SIZE(brcmf_feat_fwfeat_map); i++) {
115 e = &brcmf_feat_fwfeat_map[i];
116 if (!strcmp(e->fwid, drv->fwver)) {
117 feat_flags = e->feat_flags;
118 break;
119 }
120 }
121
122 if (!feat_flags)
123 return;
124
125 for (i = 0; i < BRCMF_FEAT_LAST; i++)
126 if (feat_flags & BIT(i))
127 brcmf_dbg(INFO, "enabling firmware feature: %s\n",
128 brcmf_feat_names[i]);
129 drv->feat_flags |= feat_flags;
130}
131
96/** 132/**
97 * brcmf_feat_iovar_int_get() - determine feature through iovar query. 133 * brcmf_feat_iovar_int_get() - determine feature through iovar query.
98 * 134 *
@@ -253,6 +289,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
253 } 289 }
254 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa"); 290 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
255 291
292 brcmf_feat_firmware_overrides(drvr);
293
256 /* set chip related quirks */ 294 /* set chip related quirks */
257 switch (drvr->bus_if->chip) { 295 switch (drvr->bus_if->chip) {
258 case BRCM_CC_43236_CHIP_ID: 296 case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 72046e182745..04dd7a936593 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -3419,7 +3419,7 @@ done:
3419 3419
3420static void airo_handle_tx(struct airo_info *ai, u16 status) 3420static void airo_handle_tx(struct airo_info *ai, u16 status)
3421{ 3421{
3422 int i, len = 0, index = -1; 3422 int i, index = -1;
3423 u16 fid; 3423 u16 fid;
3424 3424
3425 if (test_bit(FLAG_MPI, &ai->flags)) { 3425 if (test_bit(FLAG_MPI, &ai->flags)) {
@@ -3443,11 +3443,9 @@ static void airo_handle_tx(struct airo_info *ai, u16 status)
3443 3443
3444 fid = IN4500(ai, TXCOMPLFID); 3444 fid = IN4500(ai, TXCOMPLFID);
3445 3445
3446 for(i = 0; i < MAX_FIDS; i++) { 3446 for (i = 0; i < MAX_FIDS; i++) {
3447 if ((ai->fids[i] & 0xffff) == fid) { 3447 if ((ai->fids[i] & 0xffff) == fid)
3448 len = ai->fids[i] >> 16;
3449 index = i; 3448 index = i;
3450 }
3451 } 3449 }
3452 3450
3453 if (index != -1) { 3451 if (index != -1) {
diff --git a/drivers/net/wireless/cisco/airo_cs.c b/drivers/net/wireless/cisco/airo_cs.c
index d9ed22b4cc6b..3718f958c0fc 100644
--- a/drivers/net/wireless/cisco/airo_cs.c
+++ b/drivers/net/wireless/cisco/airo_cs.c
@@ -102,11 +102,8 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
102 102
103static int airo_config(struct pcmcia_device *link) 103static int airo_config(struct pcmcia_device *link)
104{ 104{
105 struct local_info *dev;
106 int ret; 105 int ret;
107 106
108 dev = link->priv;
109
110 dev_dbg(&link->dev, "airo_config\n"); 107 dev_dbg(&link->dev, "airo_config\n");
111 108
112 link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | 109 link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 1ad83ef5f202..910db46db6a1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5112,11 +5112,9 @@ static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
5112 .host_command_length = ETH_ALEN 5112 .host_command_length = ETH_ALEN
5113 }; 5113 };
5114 int err; 5114 int err;
5115 int len;
5116 5115
5117 IPW_DEBUG_HC("DISASSOCIATION_BSSID\n"); 5116 IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
5118 5117
5119 len = ETH_ALEN;
5120 /* The Firmware currently ignores the BSSID and just disassociates from 5118 /* The Firmware currently ignores the BSSID and just disassociates from
5121 * the currently associated AP -- but in the off chance that a future 5119 * the currently associated AP -- but in the off chance that a future
5122 * firmware does use the BSSID provided here, we go ahead and try and 5120 * firmware does use the BSSID provided here, we go ahead and try and
@@ -7723,7 +7721,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
7723 struct libipw_device *ieee = priv->ieee; 7721 struct libipw_device *ieee = priv->ieee;
7724 struct lib80211_crypt_data *crypt; 7722 struct lib80211_crypt_data *crypt;
7725 struct iw_param *param = &wrqu->param; 7723 struct iw_param *param = &wrqu->param;
7726 int ret = 0;
7727 7724
7728 switch (param->flags & IW_AUTH_INDEX) { 7725 switch (param->flags & IW_AUTH_INDEX) {
7729 case IW_AUTH_WPA_VERSION: 7726 case IW_AUTH_WPA_VERSION:
@@ -7733,7 +7730,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
7733 /* 7730 /*
7734 * wpa_supplicant will control these internally 7731 * wpa_supplicant will control these internally
7735 */ 7732 */
7736 ret = -EOPNOTSUPP;
7737 break; 7733 break;
7738 7734
7739 case IW_AUTH_TKIP_COUNTERMEASURES: 7735 case IW_AUTH_TKIP_COUNTERMEASURES:
@@ -7801,9 +7797,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
7801{ 7797{
7802 struct ipw2100_priv *priv = libipw_priv(dev); 7798 struct ipw2100_priv *priv = libipw_priv(dev);
7803 struct iw_mlme *mlme = (struct iw_mlme *)extra; 7799 struct iw_mlme *mlme = (struct iw_mlme *)extra;
7804 __le16 reason;
7805
7806 reason = cpu_to_le16(mlme->reason_code);
7807 7800
7808 switch (mlme->cmd) { 7801 switch (mlme->cmd) {
7809 case IW_MLME_DEAUTH: 7802 case IW_MLME_DEAUTH:
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
index dd29f46d086b..d32d39fa2686 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -479,7 +479,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
479{ 479{
480 struct iw_point *erq = &(wrqu->encoding); 480 struct iw_point *erq = &(wrqu->encoding);
481 int len, key; 481 int len, key;
482 struct lib80211_crypt_data *crypt;
483 struct libipw_security *sec = &ieee->sec; 482 struct libipw_security *sec = &ieee->sec;
484 483
485 LIBIPW_DEBUG_WX("GET_ENCODE\n"); 484 LIBIPW_DEBUG_WX("GET_ENCODE\n");
@@ -492,7 +491,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee,
492 } else 491 } else
493 key = ieee->crypt_info.tx_keyidx; 492 key = ieee->crypt_info.tx_keyidx;
494 493
495 crypt = ieee->crypt_info.crypt[key];
496 erq->flags = key + 1; 494 erq->flags = key + 1;
497 495
498 if (!sec->enabled) { 496 if (!sec->enabled) {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 62a9794f952b..57e3b6cca234 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -476,8 +476,6 @@ il3945_tx_skb(struct il_priv *il,
476 int txq_id = skb_get_queue_mapping(skb); 476 int txq_id = skb_get_queue_mapping(skb);
477 u16 len, idx, hdr_len; 477 u16 len, idx, hdr_len;
478 u16 firstlen, secondlen; 478 u16 firstlen, secondlen;
479 u8 id;
480 u8 unicast;
481 u8 sta_id; 479 u8 sta_id;
482 u8 tid = 0; 480 u8 tid = 0;
483 __le16 fc; 481 __le16 fc;
@@ -496,9 +494,6 @@ il3945_tx_skb(struct il_priv *il,
496 goto drop_unlock; 494 goto drop_unlock;
497 } 495 }
498 496
499 unicast = !is_multicast_ether_addr(hdr->addr1);
500 id = 0;
501
502 fc = hdr->frame_control; 497 fc = hdr->frame_control;
503 498
504#ifdef CONFIG_IWLEGACY_DEBUG 499#ifdef CONFIG_IWLEGACY_DEBUG
@@ -957,10 +952,8 @@ il3945_rx_queue_restock(struct il_priv *il)
957 struct list_head *element; 952 struct list_head *element;
958 struct il_rx_buf *rxb; 953 struct il_rx_buf *rxb;
959 unsigned long flags; 954 unsigned long flags;
960 int write;
961 955
962 spin_lock_irqsave(&rxq->lock, flags); 956 spin_lock_irqsave(&rxq->lock, flags);
963 write = rxq->write & ~0x7;
964 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { 957 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
965 /* Get next free Rx buffer, remove from free list */ 958 /* Get next free Rx buffer, remove from free list */
966 element = rxq->rx_free.next; 959 element = rxq->rx_free.next;
@@ -2725,7 +2718,6 @@ void
2725il3945_post_associate(struct il_priv *il) 2718il3945_post_associate(struct il_priv *il)
2726{ 2719{
2727 int rc = 0; 2720 int rc = 0;
2728 struct ieee80211_conf *conf = NULL;
2729 2721
2730 if (!il->vif || !il->is_open) 2722 if (!il->vif || !il->is_open)
2731 return; 2723 return;
@@ -2738,8 +2730,6 @@ il3945_post_associate(struct il_priv *il)
2738 2730
2739 il_scan_cancel_timeout(il, 200); 2731 il_scan_cancel_timeout(il, 200);
2740 2732
2741 conf = &il->hw->conf;
2742
2743 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2733 il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2744 il3945_commit_rxon(il); 2734 il3945_commit_rxon(il);
2745 2735
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index dbf164d48ed3..3e568ce2fb20 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -1634,7 +1634,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
1634{ 1634{
1635 struct il_channel_info *ch_info; 1635 struct il_channel_info *ch_info;
1636 s8 max_power; 1636 s8 max_power;
1637 u8 a_band;
1638 u8 i; 1637 u8 i;
1639 1638
1640 if (il->tx_power_user_lmt == power) { 1639 if (il->tx_power_user_lmt == power) {
@@ -1650,7 +1649,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
1650 1649
1651 for (i = 0; i < il->channel_count; i++) { 1650 for (i = 0; i < il->channel_count; i++) {
1652 ch_info = &il->channel_info[i]; 1651 ch_info = &il->channel_info[i];
1653 a_band = il_is_channel_a_band(ch_info);
1654 1652
1655 /* find minimum power of all user and regulatory constraints 1653 /* find minimum power of all user and regulatory constraints
1656 * (does not consider h/w clipping limitations) */ 1654 * (does not consider h/w clipping limitations) */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 562e94870a9c..280cd8ae1696 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1338,15 +1338,12 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1338 u32 *accum_stats; 1338 u32 *accum_stats;
1339 u32 *delta, *max_delta; 1339 u32 *delta, *max_delta;
1340 struct stats_general_common *general, *accum_general; 1340 struct stats_general_common *general, *accum_general;
1341 struct stats_tx *tx, *accum_tx;
1342 1341
1343 prev_stats = (__le32 *) &il->_4965.stats; 1342 prev_stats = (__le32 *) &il->_4965.stats;
1344 accum_stats = (u32 *) &il->_4965.accum_stats; 1343 accum_stats = (u32 *) &il->_4965.accum_stats;
1345 size = sizeof(struct il_notif_stats); 1344 size = sizeof(struct il_notif_stats);
1346 general = &il->_4965.stats.general.common; 1345 general = &il->_4965.stats.general.common;
1347 accum_general = &il->_4965.accum_stats.general.common; 1346 accum_general = &il->_4965.accum_stats.general.common;
1348 tx = &il->_4965.stats.tx;
1349 accum_tx = &il->_4965.accum_stats.tx;
1350 delta = (u32 *) &il->_4965.delta_stats; 1347 delta = (u32 *) &il->_4965.delta_stats;
1351 max_delta = (u32 *) &il->_4965.max_delta; 1348 max_delta = (u32 *) &il->_4965.max_delta;
1352 1349
@@ -4784,7 +4781,6 @@ static void
4784il4965_ucode_callback(const struct firmware *ucode_raw, void *context) 4781il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4785{ 4782{
4786 struct il_priv *il = context; 4783 struct il_priv *il = context;
4787 struct il_ucode_header *ucode;
4788 int err; 4784 int err;
4789 struct il4965_firmware_pieces pieces; 4785 struct il4965_firmware_pieces pieces;
4790 const unsigned int api_max = il->cfg->ucode_api_max; 4786 const unsigned int api_max = il->cfg->ucode_api_max;
@@ -4814,8 +4810,6 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4814 } 4810 }
4815 4811
4816 /* Data from ucode file: header followed by uCode images */ 4812 /* Data from ucode file: header followed by uCode images */
4817 ucode = (struct il_ucode_header *)ucode_raw->data;
4818
4819 err = il4965_load_firmware(il, ucode_raw, &pieces); 4813 err = il4965_load_firmware(il, ucode_raw, &pieces);
4820 4814
4821 if (err) 4815 if (err)
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 4d08d78c6b71..04e376cc898c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,13 +7,13 @@ iwlwifi-objs += iwl-debug.o
7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o 7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o 8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o 9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
10iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o 10iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
11iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
11iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o 12iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
12iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o 13iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
13iwlwifi-objs += iwl-trans.o 14iwlwifi-objs += iwl-trans.o
14iwlwifi-objs += fw/notif-wait.o 15iwlwifi-objs += fw/notif-wait.o
15iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o 16iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
16iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
17iwlwifi-$(CONFIG_ACPI) += fw/acpi.o 17iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
18iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o 18iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
19 19
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index a63ca8820568..fedb108db68f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -63,6 +63,7 @@
63static const struct iwl_base_params iwl2000_base_params = { 63static const struct iwl_base_params iwl2000_base_params = {
64 .eeprom_size = OTP_LOW_IMAGE_SIZE, 64 .eeprom_size = OTP_LOW_IMAGE_SIZE,
65 .num_of_queues = IWLAGN_NUM_QUEUES, 65 .num_of_queues = IWLAGN_NUM_QUEUES,
66 .max_tfd_queue_size = 256,
66 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 67 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
67 .shadow_ram_support = true, 68 .shadow_ram_support = true,
68 .led_compensation = 51, 69 .led_compensation = 51,
@@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = {
76static const struct iwl_base_params iwl2030_base_params = { 77static const struct iwl_base_params iwl2030_base_params = {
77 .eeprom_size = OTP_LOW_IMAGE_SIZE, 78 .eeprom_size = OTP_LOW_IMAGE_SIZE,
78 .num_of_queues = IWLAGN_NUM_QUEUES, 79 .num_of_queues = IWLAGN_NUM_QUEUES,
80 .max_tfd_queue_size = 256,
79 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 81 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
80 .shadow_ram_support = true, 82 .shadow_ram_support = true,
81 .led_compensation = 57, 83 .led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d4ba66aecdc9..91ca77c7571c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -59,7 +59,7 @@
59#define IWL_22000_UCODE_API_MAX 38 59#define IWL_22000_UCODE_API_MAX 38
60 60
61/* Lowest firmware API version supported */ 61/* Lowest firmware API version supported */
62#define IWL_22000_UCODE_API_MIN 24 62#define IWL_22000_UCODE_API_MIN 39
63 63
64/* NVM versions */ 64/* NVM versions */
65#define IWL_22000_NVM_VERSION 0x0a1d 65#define IWL_22000_NVM_VERSION 0x0a1d
@@ -73,29 +73,48 @@
73#define IWL_22000_SMEM_OFFSET 0x400000 73#define IWL_22000_SMEM_OFFSET 0x400000
74#define IWL_22000_SMEM_LEN 0xD0000 74#define IWL_22000_SMEM_LEN 0xD0000
75 75
76#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" 76#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
77#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" 77#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
78#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" 78#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
79#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" 79#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
80#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" 80#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
81#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" 81#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
82#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
83#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
82 84
83#define IWL_22000_HR_MODULE_FIRMWARE(api) \ 85#define IWL_22000_HR_MODULE_FIRMWARE(api) \
84 IWL_22000_HR_FW_PRE __stringify(api) ".ucode" 86 IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
85#define IWL_22000_JF_MODULE_FIRMWARE(api) \ 87#define IWL_22000_JF_MODULE_FIRMWARE(api) \
86 IWL_22000_JF_FW_PRE __stringify(api) ".ucode" 88 IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
87#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ 89#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
88 IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode" 90 IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
91#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
92 IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
89#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ 93#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
90 IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode" 94 IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
91#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ 95#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
92 IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" 96 IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
97#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
98 IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
93 99
94#define NVM_HW_SECTION_NUM_FAMILY_22000 10 100#define NVM_HW_SECTION_NUM_FAMILY_22000 10
95 101
96static const struct iwl_base_params iwl_22000_base_params = { 102static const struct iwl_base_params iwl_22000_base_params = {
97 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000, 103 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
98 .num_of_queues = 512, 104 .num_of_queues = 512,
105 .max_tfd_queue_size = 256,
106 .shadow_ram_support = true,
107 .led_compensation = 57,
108 .wd_timeout = IWL_LONG_WD_TIMEOUT,
109 .max_event_log_size = 512,
110 .shadow_reg_enable = true,
111 .pcie_l1_allowed = true,
112};
113
114static const struct iwl_base_params iwl_22560_base_params = {
115 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
116 .num_of_queues = 512,
117 .max_tfd_queue_size = 65536,
99 .shadow_ram_support = true, 118 .shadow_ram_support = true,
100 .led_compensation = 57, 119 .led_compensation = 57,
101 .wd_timeout = IWL_LONG_WD_TIMEOUT, 120 .wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -110,11 +129,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
110 .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), 129 .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
111}; 130};
112 131
113#define IWL_DEVICE_22000 \ 132#define IWL_DEVICE_22000_COMMON \
114 .ucode_api_max = IWL_22000_UCODE_API_MAX, \ 133 .ucode_api_max = IWL_22000_UCODE_API_MAX, \
115 .ucode_api_min = IWL_22000_UCODE_API_MIN, \ 134 .ucode_api_min = IWL_22000_UCODE_API_MIN, \
116 .device_family = IWL_DEVICE_FAMILY_22000, \
117 .base_params = &iwl_22000_base_params, \
118 .led_mode = IWL_LED_RF_STATE, \ 135 .led_mode = IWL_LED_RF_STATE, \
119 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ 136 .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
120 .non_shared_ant = ANT_A, \ 137 .non_shared_ant = ANT_A, \
@@ -129,6 +146,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
129 .mq_rx_supported = true, \ 146 .mq_rx_supported = true, \
130 .vht_mu_mimo_supported = true, \ 147 .vht_mu_mimo_supported = true, \
131 .mac_addr_from_csr = true, \ 148 .mac_addr_from_csr = true, \
149 .ht_params = &iwl_22000_ht_params, \
150 .nvm_ver = IWL_22000_NVM_VERSION, \
151 .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \
152 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
132 .use_tfh = true, \ 153 .use_tfh = true, \
133 .rf_id = true, \ 154 .rf_id = true, \
134 .gen2 = true, \ 155 .gen2 = true, \
@@ -136,86 +157,114 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
136 .dbgc_supported = true, \ 157 .dbgc_supported = true, \
137 .min_umac_error_event_table = 0x400000 158 .min_umac_error_event_table = 0x400000
138 159
160#define IWL_DEVICE_22500 \
161 IWL_DEVICE_22000_COMMON, \
162 .device_family = IWL_DEVICE_FAMILY_22000, \
163 .base_params = &iwl_22000_base_params, \
164 .csr = &iwl_csr_v1
165
166#define IWL_DEVICE_22560 \
167 IWL_DEVICE_22000_COMMON, \
168 .device_family = IWL_DEVICE_FAMILY_22560, \
169 .base_params = &iwl_22560_base_params, \
170 .csr = &iwl_csr_v2
171
139const struct iwl_cfg iwl22000_2ac_cfg_hr = { 172const struct iwl_cfg iwl22000_2ac_cfg_hr = {
140 .name = "Intel(R) Dual Band Wireless AC 22000", 173 .name = "Intel(R) Dual Band Wireless AC 22000",
141 .fw_name_pre = IWL_22000_HR_FW_PRE, 174 .fw_name_pre = IWL_22000_HR_FW_PRE,
142 IWL_DEVICE_22000, 175 IWL_DEVICE_22500,
143 .csr = &iwl_csr_v1,
144 .ht_params = &iwl_22000_ht_params,
145 .nvm_ver = IWL_22000_NVM_VERSION,
146 .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
147 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
148}; 176};
149 177
150const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = { 178const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
151 .name = "Intel(R) Dual Band Wireless AC 22000", 179 .name = "Intel(R) Dual Band Wireless AC 22000",
152 .fw_name_pre = IWL_22000_HR_CDB_FW_PRE, 180 .fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
153 IWL_DEVICE_22000, 181 IWL_DEVICE_22500,
154 .csr = &iwl_csr_v1,
155 .ht_params = &iwl_22000_ht_params,
156 .nvm_ver = IWL_22000_NVM_VERSION,
157 .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
158 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
159 .cdb = true, 182 .cdb = true,
160}; 183};
161 184
162const struct iwl_cfg iwl22000_2ac_cfg_jf = { 185const struct iwl_cfg iwl22000_2ac_cfg_jf = {
163 .name = "Intel(R) Dual Band Wireless AC 22000", 186 .name = "Intel(R) Dual Band Wireless AC 22000",
164 .fw_name_pre = IWL_22000_JF_FW_PRE, 187 .fw_name_pre = IWL_22000_JF_FW_PRE,
165 IWL_DEVICE_22000, 188 IWL_DEVICE_22500,
166 .csr = &iwl_csr_v1,
167 .ht_params = &iwl_22000_ht_params,
168 .nvm_ver = IWL_22000_NVM_VERSION,
169 .nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
170 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
171}; 189};
172 190
173const struct iwl_cfg iwl22000_2ax_cfg_hr = { 191const struct iwl_cfg iwl22000_2ax_cfg_hr = {
174 .name = "Intel(R) Dual Band Wireless AX 22000", 192 .name = "Intel(R) Dual Band Wireless AX 22000",
175 .fw_name_pre = IWL_22000_HR_FW_PRE, 193 .fw_name_pre = IWL_22000_HR_FW_PRE,
176 IWL_DEVICE_22000, 194 IWL_DEVICE_22500,
177 .csr = &iwl_csr_v1, 195 /*
178 .ht_params = &iwl_22000_ht_params, 196 * This device doesn't support receiving BlockAck with a large bitmap
179 .nvm_ver = IWL_22000_NVM_VERSION, 197 * so we need to restrict the size of transmitted aggregation to the
180 .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, 198 * HT size; mac80211 would otherwise pick the HE max (256) by default.
181 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 199 */
200 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
182}; 201};
183 202
184const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { 203const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
185 .name = "Intel(R) Dual Band Wireless AX 22000", 204 .name = "Intel(R) Dual Band Wireless AX 22000",
186 .fw_name_pre = IWL_22000_HR_F0_FW_PRE, 205 .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
187 IWL_DEVICE_22000, 206 IWL_DEVICE_22500,
188 .csr = &iwl_csr_v1, 207 /*
189 .ht_params = &iwl_22000_ht_params, 208 * This device doesn't support receiving BlockAck with a large bitmap
190 .nvm_ver = IWL_22000_NVM_VERSION, 209 * so we need to restrict the size of transmitted aggregation to the
191 .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, 210 * HT size; mac80211 would otherwise pick the HE max (256) by default.
192 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 211 */
212 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
213};
214
215const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
216 .name = "Intel(R) Dual Band Wireless AX 22000",
217 .fw_name_pre = IWL_22000_HR_B_FW_PRE,
218 IWL_DEVICE_22500,
219 /*
220 * This device doesn't support receiving BlockAck with a large bitmap
221 * so we need to restrict the size of transmitted aggregation to the
222 * HT size; mac80211 would otherwise pick the HE max (256) by default.
223 */
224 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
193}; 225};
194 226
195const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { 227const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
196 .name = "Intel(R) Dual Band Wireless AX 22000", 228 .name = "Intel(R) Dual Band Wireless AX 22000",
197 .fw_name_pre = IWL_22000_JF_B0_FW_PRE, 229 .fw_name_pre = IWL_22000_JF_B0_FW_PRE,
198 IWL_DEVICE_22000, 230 IWL_DEVICE_22500,
199 .csr = &iwl_csr_v1, 231 /*
200 .ht_params = &iwl_22000_ht_params, 232 * This device doesn't support receiving BlockAck with a large bitmap
201 .nvm_ver = IWL_22000_NVM_VERSION, 233 * so we need to restrict the size of transmitted aggregation to the
202 .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, 234 * HT size; mac80211 would otherwise pick the HE max (256) by default.
203 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 235 */
236 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
204}; 237};
205 238
206const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { 239const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
207 .name = "Intel(R) Dual Band Wireless AX 22000", 240 .name = "Intel(R) Dual Band Wireless AX 22000",
208 .fw_name_pre = IWL_22000_HR_A0_FW_PRE, 241 .fw_name_pre = IWL_22000_HR_A0_FW_PRE,
209 IWL_DEVICE_22000, 242 IWL_DEVICE_22500,
210 .csr = &iwl_csr_v1, 243 /*
211 .ht_params = &iwl_22000_ht_params, 244 * This device doesn't support receiving BlockAck with a large bitmap
212 .nvm_ver = IWL_22000_NVM_VERSION, 245 * so we need to restrict the size of transmitted aggregation to the
213 .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, 246 * HT size; mac80211 would otherwise pick the HE max (256) by default.
214 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 247 */
248 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
249};
250
251const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
252 .name = "Intel(R) Dual Band Wireless AX 22560",
253 .fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
254 IWL_DEVICE_22560,
255 .cdb = true,
256 /*
257 * This device doesn't support receiving BlockAck with a large bitmap
258 * so we need to restrict the size of transmitted aggregation to the
259 * HT size; mac80211 would otherwise pick the HE max (256) by default.
260 */
261 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
215}; 262};
216 263
217MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 264MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
218MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 265MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
219MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 266MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
267MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
220MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 268MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
221MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 269MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
270MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index a224f1be1ec2..36151e61a26f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -53,6 +53,7 @@
53static const struct iwl_base_params iwl5000_base_params = { 53static const struct iwl_base_params iwl5000_base_params = {
54 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 54 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
55 .num_of_queues = IWLAGN_NUM_QUEUES, 55 .num_of_queues = IWLAGN_NUM_QUEUES,
56 .max_tfd_queue_size = 256,
56 .pll_cfg = true, 57 .pll_cfg = true,
57 .led_compensation = 51, 58 .led_compensation = 51,
58 .wd_timeout = IWL_WATCHDOG_DISABLED, 59 .wd_timeout = IWL_WATCHDOG_DISABLED,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index dbcec7ce7863..b5d8274761d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -72,6 +72,7 @@
72static const struct iwl_base_params iwl6000_base_params = { 72static const struct iwl_base_params iwl6000_base_params = {
73 .eeprom_size = OTP_LOW_IMAGE_SIZE, 73 .eeprom_size = OTP_LOW_IMAGE_SIZE,
74 .num_of_queues = IWLAGN_NUM_QUEUES, 74 .num_of_queues = IWLAGN_NUM_QUEUES,
75 .max_tfd_queue_size = 256,
75 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 76 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
76 .shadow_ram_support = true, 77 .shadow_ram_support = true,
77 .led_compensation = 51, 78 .led_compensation = 51,
@@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
84static const struct iwl_base_params iwl6050_base_params = { 85static const struct iwl_base_params iwl6050_base_params = {
85 .eeprom_size = OTP_LOW_IMAGE_SIZE, 86 .eeprom_size = OTP_LOW_IMAGE_SIZE,
86 .num_of_queues = IWLAGN_NUM_QUEUES, 87 .num_of_queues = IWLAGN_NUM_QUEUES,
88 .max_tfd_queue_size = 256,
87 .max_ll_items = OTP_MAX_LL_ITEMS_6x50, 89 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
88 .shadow_ram_support = true, 90 .shadow_ram_support = true,
89 .led_compensation = 51, 91 .led_compensation = 51,
@@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
96static const struct iwl_base_params iwl6000_g2_base_params = { 98static const struct iwl_base_params iwl6000_g2_base_params = {
97 .eeprom_size = OTP_LOW_IMAGE_SIZE, 99 .eeprom_size = OTP_LOW_IMAGE_SIZE,
98 .num_of_queues = IWLAGN_NUM_QUEUES, 100 .num_of_queues = IWLAGN_NUM_QUEUES,
101 .max_tfd_queue_size = 256,
99 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 102 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
100 .shadow_ram_support = true, 103 .shadow_ram_support = true,
101 .led_compensation = 57, 104 .led_compensation = 57,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 69bfa827e82a..a62c8346f13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -123,6 +123,7 @@
123static const struct iwl_base_params iwl7000_base_params = { 123static const struct iwl_base_params iwl7000_base_params = {
124 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000, 124 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
125 .num_of_queues = 31, 125 .num_of_queues = 31,
126 .max_tfd_queue_size = 256,
126 .shadow_ram_support = true, 127 .shadow_ram_support = true,
127 .led_compensation = 57, 128 .led_compensation = 57,
128 .wd_timeout = IWL_LONG_WD_TIMEOUT, 129 .wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 7262e973e0d6..c46fa712985b 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -104,6 +104,7 @@
104static const struct iwl_base_params iwl8000_base_params = { 104static const struct iwl_base_params iwl8000_base_params = {
105 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, 105 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
106 .num_of_queues = 31, 106 .num_of_queues = 31,
107 .max_tfd_queue_size = 256,
107 .shadow_ram_support = true, 108 .shadow_ram_support = true,
108 .led_compensation = 57, 109 .led_compensation = 57,
109 .wd_timeout = IWL_LONG_WD_TIMEOUT, 110 .wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index c8ea63d02619..24b2f7cbb308 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -95,6 +95,7 @@
95static const struct iwl_base_params iwl9000_base_params = { 95static const struct iwl_base_params iwl9000_base_params = {
96 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000, 96 .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
97 .num_of_queues = 31, 97 .num_of_queues = 31,
98 .max_tfd_queue_size = 256,
98 .shadow_ram_support = true, 99 .shadow_ram_support = true,
99 .led_compensation = 57, 100 .led_compensation = 57,
100 .wd_timeout = IWL_LONG_WD_TIMEOUT, 101 .wd_timeout = IWL_LONG_WD_TIMEOUT,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 007bfe7656a4..08d3d8a190f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright (C) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright (C) 2018 Intel Corporation
33 * All rights reserved. 35 * All rights reserved.
34 * 36 *
35 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
@@ -187,20 +189,4 @@ struct iwl_card_state_notif {
187 __le32 flags; 189 __le32 flags;
188} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ 190} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
189 191
190/**
191 * struct iwl_fseq_ver_mismatch_nty - Notification about version
192 *
193 * This notification does not have a direct impact on the init flow.
194 * It means that another core (not WiFi) has initiated the FSEQ flow
195 * and updated the FSEQ version. The driver only prints an error when
196 * this occurs.
197 *
198 * @aux_read_fseq_ver: auxiliary read FSEQ version
199 * @wifi_fseq_ver: FSEQ version (embedded in WiFi)
200 */
201struct iwl_fseq_ver_mismatch_ntf {
202 __le32 aux_read_fseq_ver;
203 __le32 wifi_fseq_ver;
204} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */
205
206#endif /* __iwl_fw_api_alive_h__ */ 192#endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index f285bacc8726..6dad748e5cdc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -193,7 +193,8 @@ enum iwl_legacy_cmds {
193 FW_GET_ITEM_CMD = 0x1a, 193 FW_GET_ITEM_CMD = 0x1a,
194 194
195 /** 195 /**
196 * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, 196 * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
197 * &struct iwl_tx_cmd_gen3,
197 * response in &struct iwl_mvm_tx_resp or 198 * response in &struct iwl_mvm_tx_resp or
198 * &struct iwl_mvm_tx_resp_v3 199 * &struct iwl_mvm_tx_resp_v3
199 */ 200 */
@@ -646,13 +647,6 @@ enum iwl_system_subcmd_ids {
646 * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd 647 * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
647 */ 648 */
648 INIT_EXTENDED_CFG_CMD = 0x03, 649 INIT_EXTENDED_CFG_CMD = 0x03,
649
650 /**
651 * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version
652 * mismatch during init. The format is specified in
653 * &struct iwl_fseq_ver_mismatch_ntf.
654 */
655 FSEQ_VER_MISMATCH_NTF = 0xFF,
656}; 650};
657 651
658#endif /* __iwl_fw_api_commands_h__ */ 652#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 5f6e855006dd..59b3c6e8f37b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright (C) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright (C) 2018 Intel Corporation
33 * All rights reserved. 35 * All rights reserved.
34 * 36 *
35 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
@@ -83,6 +85,16 @@ enum iwl_data_path_subcmd_ids {
83 TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, 85 TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
84 86
85 /** 87 /**
88 * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
89 */
90 STA_HE_CTXT_CMD = 0x7,
91
92 /**
93 * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
94 */
95 RFH_QUEUE_CONFIG_CMD = 0xD,
96
97 /**
86 * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd 98 * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
87 */ 99 */
88 TLC_MNG_CONFIG_CMD = 0xF, 100 TLC_MNG_CONFIG_CMD = 0xF,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index f2e31e040a7b..55594c93b014 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2017 Intel Deutschland GmbH 9 * Copyright(c) 2017 Intel Deutschland GmbH
10 * Copyright(c) 2018 Intel Corporation
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
28 * 29 *
29 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
30 * Copyright(c) 2017 Intel Deutschland GmbH 31 * Copyright(c) 2017 Intel Deutschland GmbH
32 * Copyright(c) 2018 Intel Corporation
31 * All rights reserved. 33 * All rights reserved.
32 * 34 *
33 * Redistribution and use in source and binary forms, with or without 35 * Redistribution and use in source and binary forms, with or without
@@ -279,6 +281,10 @@ enum iwl_mac_filter_flags {
279 MAC_FILTER_OUT_BCAST = BIT(8), 281 MAC_FILTER_OUT_BCAST = BIT(8),
280 MAC_FILTER_IN_CRC32 = BIT(11), 282 MAC_FILTER_IN_CRC32 = BIT(11),
281 MAC_FILTER_IN_PROBE_REQUEST = BIT(12), 283 MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
284 /**
285 * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
286 */
287 MAC_FILTER_IN_11AX = BIT(14),
282}; 288};
283 289
284/** 290/**
@@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif {
406 __le32 num_recvd_beacons; 412 __le32 num_recvd_beacons;
407} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ 413} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
408 414
415/**
416 * struct iwl_he_backoff_conf - used for backoff configuration
417 * Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
418 * used for backoff configuration of TXF5..TXF8 trigger based.
419 * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
420 * trigger-based TX.
421 * @cwmin: CW min
422 * @cwmax: CW max
423 * @aifsn: AIFSN
424 * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
425 * allowed till the MU-TIMER is 0
426 * @mu_time: MU time in 8TU units
427 */
428struct iwl_he_backoff_conf {
429 __le16 cwmin;
430 __le16 cwmax;
431 __le16 aifsn;
432 __le16 mu_time;
433} __packed; /* AC_QOS_DOT11AX_API_S */
434
435#define MAX_HE_SUPP_NSS 2
436#define MAX_HE_CHANNEL_BW_INDX 4
437
438/**
439 * struct iwl_he_pkt_ext - QAM thresholds
440 * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
441 * The IE is organized in the following way:
442 * Support for Nss x BW (or RU) matrix:
443 * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
444 * Each entry contains 2 QAM thresholds for 8us and 16us:
445 * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
446 * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
447 * QAM_tx < QAM_th1 --> PPE=0us
448 * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
449 * QAM_th2 <= QAM_tx --> PPE=16us
450 * @pkt_ext_qam_th: QAM thresholds
451 * For each Nss/Bw define 2 QAM thrsholds (0..5)
452 * For rates below the low_th, no need for PPE
453 * For rates between low_th and high_th, need 8us PPE
454 * For rates equal or higher then the high_th, need 16us PPE
455 * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
456 * (0-low_th, 1-high_th)
457 */
458struct iwl_he_pkt_ext {
459 u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
460} __packed; /* PKT_EXT_DOT11AX_API_S */
461
462/**
463 * enum iwl_he_sta_ctxt_flags - HE STA context flags
464 * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
465 * control frames such as TRIG, NDPA, BACK)
466 * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
467 * color for RX filter but use MAC header
468 * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
469 * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
470 * of 32-bits
471 * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
472 * and should be used
473 * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
474 * is enabled according to UORA element existence
475 * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
476 * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
477 * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
478 * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
479 * parameter set, i.e. the backoff counters for trig-based ACs
480 */
481enum iwl_he_sta_ctxt_flags {
482 STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
483 STA_CTXT_HE_BSS_COLOR_DIS = BIT(5),
484 STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6),
485 STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7),
486 STA_CTXT_HE_PACKET_EXT = BIT(8),
487 STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9),
488 STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
489 STA_CTXT_HE_ACK_ENABLED = BIT(11),
490 STA_CTXT_HE_MU_EDCA_CW = BIT(12),
491};
492
493/**
494 * enum iwl_he_htc_flags - HE HTC support flags
495 * @IWL_HE_HTC_SUPPORT: HE-HTC support
496 * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
497 * support via A-control field
498 * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
499 * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
500 * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
501 */
502enum iwl_he_htc_flags {
503 IWL_HE_HTC_SUPPORT = BIT(0),
504 IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3),
505 IWL_HE_HTC_BSR_SUPP = BIT(4),
506 IWL_HE_HTC_OMI_SUPP = BIT(5),
507 IWL_HE_HTC_BQR_SUPP = BIT(6),
508};
509
510/*
511 * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
512 * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
513 * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
514 * response to HE MRQ and if the STA provides unsolicited HE MFB
515 */
516#define IWL_HE_HTC_LINK_ADAP_POS (1)
517#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0)
518#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS)
519#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
520
521/**
522 * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
523 * @sta_id: STA id
524 * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
525 * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
526 * @reserved1: reserved byte for future use
527 * @reserved2: reserved byte for future use
528 * @flags: see %iwl_11ax_sta_ctxt_flags
529 * @ref_bssid_addr: reference BSSID used by the AP
530 * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
531 * @htc_flags: which features are supported in HTC
532 * @frag_flags: frag support in A-MSDU
533 * @frag_level: frag support level
534 * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
535 * @frag_min_size: min frag size (except last frag)
536 * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
537 * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
538 * @htc_trig_based_pkt_ext: default PE in 4us units
539 * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
540 * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
541 * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
542 * @reserved3: reserved byte for future use
543 * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
544 */
545struct iwl_he_sta_context_cmd {
546 u8 sta_id;
547 u8 tid_limit;
548 u8 reserved1;
549 u8 reserved2;
550 __le32 flags;
551
552 /* The below fields are set via Multiple BSSID IE */
553 u8 ref_bssid_addr[6];
554 __le16 reserved0;
555
556 /* The below fields are set via HE-capabilities IE */
557 __le32 htc_flags;
558
559 u8 frag_flags;
560 u8 frag_level;
561 u8 frag_max_num;
562 u8 frag_min_size;
563
564 /* The below fields are set via PPE thresholds element */
565 struct iwl_he_pkt_ext pkt_ext;
566
567 /* The below fields are set via HE-Operation IE */
568 u8 bss_color;
569 u8 htc_trig_based_pkt_ext;
570 __le16 frame_time_rts_th;
571
572 /* Random access parameter set (i.e. RAPS) */
573 u8 rand_alloc_ecwmin;
574 u8 rand_alloc_ecwmax;
575 __le16 reserved3;
576
577 /* The below fields are set via MU EDCA parameter set element */
578 struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
579} __packed; /* STA_CONTEXT_DOT11AX_API_S */
580
409#endif /* __iwl_fw_api_mac_h__ */ 581#endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 8d6dc9189985..6c5338364794 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general {
195 * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled 195 * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
196 * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled 196 * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
197 * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled 197 * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
198 * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
199 * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled 198 * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
200 * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled 199 * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
201 * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled 200 * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
@@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags {
206 NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1), 205 NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
207 NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2), 206 NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
208 NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3), 207 NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
208 /**
209 * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
210 */
209 NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4), 211 NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
210 NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5), 212 NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
211 NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8), 213 NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 21e13a315421..087fae91baef 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -314,8 +314,11 @@ enum {
314 IWL_RATE_MCS_8_INDEX, 314 IWL_RATE_MCS_8_INDEX,
315 IWL_RATE_MCS_9_INDEX, 315 IWL_RATE_MCS_9_INDEX,
316 IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, 316 IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
317 IWL_RATE_MCS_10_INDEX,
318 IWL_RATE_MCS_11_INDEX,
319 IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
317 IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, 320 IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
318 IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1, 321 IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
319}; 322};
320 323
321#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) 324#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -440,8 +443,8 @@ enum {
440#define RATE_LEGACY_RATE_MSK 0xff 443#define RATE_LEGACY_RATE_MSK 0xff
441 444
442/* Bit 10 - OFDM HE */ 445/* Bit 10 - OFDM HE */
443#define RATE_MCS_OFDM_HE_POS 10 446#define RATE_MCS_HE_POS 10
444#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS) 447#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS)
445 448
446/* 449/*
447 * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz 450 * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
@@ -482,15 +485,33 @@ enum {
482#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) 485#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
483 486
484/* 487/*
485 * Bit 20-21: HE guard interval and LTF type. 488 * Bit 20-21: HE LTF type and guard interval
486 * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us, 489 * HE (ext) SU:
487 * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us 490 * 0 1xLTF+0.8us
491 * 1 2xLTF+0.8us
492 * 2 2xLTF+1.6us
493 * 3 & SGI (bit 13) clear 4xLTF+3.2us
494 * 3 & SGI (bit 13) set 4xLTF+0.8us
495 * HE MU:
496 * 0 4xLTF+0.8us
497 * 1 2xLTF+0.8us
498 * 2 2xLTF+1.6us
499 * 3 4xLTF+3.2us
500 * HE TRIG:
501 * 0 1xLTF+1.6us
502 * 1 2xLTF+1.6us
503 * 2 4xLTF+3.2us
504 * 3 (does not occur)
488 */ 505 */
489#define RATE_MCS_HE_GI_LTF_POS 20 506#define RATE_MCS_HE_GI_LTF_POS 20
490#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) 507#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
491 508
492/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ 509/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
493#define RATE_MCS_HE_TYPE_POS 22 510#define RATE_MCS_HE_TYPE_POS 22
511#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
512#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
513#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
514#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
494#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) 515#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
495 516
496/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ 517/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
@@ -501,6 +522,9 @@ enum {
501#define RATE_MCS_LDPC_POS 27 522#define RATE_MCS_LDPC_POS 27
502#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) 523#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
503 524
525/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
526#define RATE_MCS_HE_106T_POS 28
527#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
504 528
505/* Link Quality definitions */ 529/* Link Quality definitions */
506 530
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 7e570c4a9df0..2f599353c885 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 33 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
33 * All rights reserved. 35 * All rights reserved.
34 * 36 *
35 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
@@ -343,6 +345,169 @@ enum iwl_rx_mpdu_mac_info {
343 IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, 345 IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
344}; 346};
345 347
348/*
349 * enum iwl_rx_he_phy - HE PHY data
350 */
351enum iwl_rx_he_phy {
352 IWL_RX_HE_PHY_BEAM_CHNG = BIT(0),
353 IWL_RX_HE_PHY_UPLINK = BIT(1),
354 IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc,
355 IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00,
356 IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12),
357 IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000,
358 IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20),
359 IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000,
360 IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23),
361 IWL_RX_HE_PHY_DOPPLER = BIT(24),
362 /* 6 bits reserved */
363 IWL_RX_HE_PHY_DELIM_EOF = BIT(31),
364
365 /* second dword - MU data */
366 IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0),
367 IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
368 IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL,
369 IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8),
370 /* trigger encoded */
371 IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
372 IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL,
373 /* 1 bit reserved */
374 IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21),
375 IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
376 /* 8 bits reserved */
377};
378
379/**
380 * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
381 */
382struct iwl_rx_mpdu_desc_v1 {
383 /* DW7 - carries rss_hash only when rpa_en == 1 */
384 /**
385 * @rss_hash: RSS hash value
386 */
387 __le32 rss_hash;
388 /* DW8 - carries filter_match only when rpa_en == 1 */
389 /**
390 * @filter_match: filter match value
391 */
392 __le32 filter_match;
393 /* DW9 */
394 /**
395 * @rate_n_flags: RX rate/flags encoding
396 */
397 __le32 rate_n_flags;
398 /* DW10 */
399 /**
400 * @energy_a: energy chain A
401 */
402 u8 energy_a;
403 /**
404 * @energy_b: energy chain B
405 */
406 u8 energy_b;
407 /**
408 * @channel: channel number
409 */
410 u8 channel;
411 /**
412 * @mac_context: MAC context mask
413 */
414 u8 mac_context;
415 /* DW11 */
416 /**
417 * @gp2_on_air_rise: GP2 timer value on air rise (INA)
418 */
419 __le32 gp2_on_air_rise;
420 /* DW12 & DW13 */
421 union {
422 /**
423 * @tsf_on_air_rise:
424 * TSF value on air rise (INA), only valid if
425 * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
426 */
427 __le64 tsf_on_air_rise;
428 /**
429 * @he_phy_data:
430 * HE PHY data, see &enum iwl_rx_he_phy, valid
431 * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
432 */
433 __le64 he_phy_data;
434 };
435} __packed;
436
437/**
438 * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
439 */
440struct iwl_rx_mpdu_desc_v3 {
441 /* DW7 - carries filter_match only when rpa_en == 1 */
442 /**
443 * @filter_match: filter match value
444 */
445 __le32 filter_match;
446 /* DW8 - carries rss_hash only when rpa_en == 1 */
447 /**
448 * @rss_hash: RSS hash value
449 */
450 __le32 rss_hash;
451 /* DW9 */
452 /**
453 * @partial_hash: 31:0 ip/tcp header hash
454 * w/o some fields (such as IP SRC addr)
455 */
456 __le32 partial_hash;
457 /* DW10 */
458 /**
459 * @raw_xsum: raw xsum value
460 */
461 __le32 raw_xsum;
462 /* DW11 */
463 /**
464 * @rate_n_flags: RX rate/flags encoding
465 */
466 __le32 rate_n_flags;
467 /* DW12 */
468 /**
469 * @energy_a: energy chain A
470 */
471 u8 energy_a;
472 /**
473 * @energy_b: energy chain B
474 */
475 u8 energy_b;
476 /**
477 * @channel: channel number
478 */
479 u8 channel;
480 /**
481 * @mac_context: MAC context mask
482 */
483 u8 mac_context;
484 /* DW13 */
485 /**
486 * @gp2_on_air_rise: GP2 timer value on air rise (INA)
487 */
488 __le32 gp2_on_air_rise;
489 /* DW14 & DW15 */
490 union {
491 /**
492 * @tsf_on_air_rise:
493 * TSF value on air rise (INA), only valid if
494 * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
495 */
496 __le64 tsf_on_air_rise;
497 /**
498 * @he_phy_data:
499 * HE PHY data, see &enum iwl_rx_he_phy, valid
500 * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
501 */
502 __le64 he_phy_data;
503 };
504 /* DW16 & DW17 */
505 /**
506 * @reserved: reserved
507 */
508 __le32 reserved[2];
509} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
510
346/** 511/**
347 * struct iwl_rx_mpdu_desc - RX MPDU descriptor 512 * struct iwl_rx_mpdu_desc - RX MPDU descriptor
348 */ 513 */
@@ -400,51 +565,14 @@ struct iwl_rx_mpdu_desc {
400 * @reorder_data: &enum iwl_rx_mpdu_reorder_data 565 * @reorder_data: &enum iwl_rx_mpdu_reorder_data
401 */ 566 */
402 __le32 reorder_data; 567 __le32 reorder_data;
403 /* DW7 - carries rss_hash only when rpa_en == 1 */ 568
404 /** 569 union {
405 * @rss_hash: RSS hash value 570 struct iwl_rx_mpdu_desc_v1 v1;
406 */ 571 struct iwl_rx_mpdu_desc_v3 v3;
407 __le32 rss_hash; 572 };
408 /* DW8 - carries filter_match only when rpa_en == 1 */ 573} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
409 /** 574
410 * @filter_match: filter match value 575#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
411 */
412 __le32 filter_match;
413 /* DW9 */
414 /**
415 * @rate_n_flags: RX rate/flags encoding
416 */
417 __le32 rate_n_flags;
418 /* DW10 */
419 /**
420 * @energy_a: energy chain A
421 */
422 u8 energy_a;
423 /**
424 * @energy_b: energy chain B
425 */
426 u8 energy_b;
427 /**
428 * @channel: channel number
429 */
430 u8 channel;
431 /**
432 * @mac_context: MAC context mask
433 */
434 u8 mac_context;
435 /* DW11 */
436 /**
437 * @gp2_on_air_rise: GP2 timer value on air rise (INA)
438 */
439 __le32 gp2_on_air_rise;
440 /* DW12 & DW13 */
441 /**
442 * @tsf_on_air_rise:
443 * TSF value on air rise (INA), only valid if
444 * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
445 */
446 __le64 tsf_on_air_rise;
447} __packed;
448 576
449struct iwl_frame_release { 577struct iwl_frame_release {
450 u8 baid; 578 u8 baid;
@@ -587,4 +715,36 @@ struct iwl_ba_window_status_notif {
587 __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; 715 __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
588} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ 716} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
589 717
718/**
719 * struct iwl_rfh_queue_config - RX queue configuration
720 * @q_num: Q num
721 * @enable: enable queue
722 * @reserved: alignment
723 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
724 * @fr_bd_cb: DMA address of freeRB table
725 * @ur_bd_cb: DMA address of used RB table
726 * @fr_bd_wid: Initial index of the free table
727 */
728struct iwl_rfh_queue_data {
729 u8 q_num;
730 u8 enable;
731 __le16 reserved;
732 __le64 urbd_stts_wrptr;
733 __le64 fr_bd_cb;
734 __le64 ur_bd_cb;
735 __le32 fr_bd_wid;
736} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
737
738/**
739 * struct iwl_rfh_queue_config - RX queue configuration
740 * @num_queues: number of queues configured
741 * @reserved: alignment
742 * @data: DMA addresses per-queue
743 */
744struct iwl_rfh_queue_config {
745 u8 num_queues;
746 u8 reserved[3];
747 struct iwl_rfh_queue_data data[];
748} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
749
590#endif /* __iwl_fw_api_rx_h__ */ 750#endif /* __iwl_fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index a2a40b515a3c..514b86123d3d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
10 * Copyright(c) 2018 Intel Corporation
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
28 * 29 *
29 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
30 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 31 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
32 * Copyright(c) 2018 Intel Corporation
31 * All rights reserved. 33 * All rights reserved.
32 * 34 *
33 * Redistribution and use in source and binary forms, with or without 35 * Redistribution and use in source and binary forms, with or without
@@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 {
320 struct ieee80211_hdr hdr[0]; 322 struct ieee80211_hdr hdr[0];
321} __packed; /* TX_CMD_API_S_VER_7 */ 323} __packed; /* TX_CMD_API_S_VER_7 */
322 324
325/**
326 * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
327 * ( TX_CMD = 0x1c )
328 * @len: in bytes of the payload, see below for details
329 * @flags: combination of &enum iwl_tx_cmd_flags
330 * @offload_assist: TX offload configuration
331 * @dram_info: FW internal DRAM storage
332 * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
333 * cleared. Combination of RATE_MCS_*
334 * @ttl: time to live - packet lifetime limit. The FW should drop if
335 * passed.
336 * @hdr: 802.11 header
337 */
338struct iwl_tx_cmd_gen3 {
339 __le16 len;
340 __le16 flags;
341 __le32 offload_assist;
342 struct iwl_dram_sec_info dram_info;
343 __le32 rate_n_flags;
344 __le64 ttl;
345 struct ieee80211_hdr hdr[0];
346} __packed; /* TX_CMD_API_S_VER_8 */
347
323/* 348/*
324 * TX response related data 349 * TX response related data
325 */ 350 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
deleted file mode 100644
index 6f75985eea66..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * The full GNU General Public License is included in this distribution
20 * in the file called COPYING.
21 *
22 * Contact Information:
23 * Intel Linux Wireless <linuxwifi@intel.com>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 * BSD LICENSE
27 *
28 * Copyright(c) 2017 Intel Deutschland GmbH
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 *
35 * * Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * * Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in
39 * the documentation and/or other materials provided with the
40 * distribution.
41 * * Neither the name Intel Corporation nor the names of its
42 * contributors may be used to endorse or promote products derived
43 * from this software without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
48 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
51 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
55 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 *
57 *****************************************************************************/
58#include "iwl-drv.h"
59#include "runtime.h"
60#include "fw/api/commands.h"
61#include "fw/api/alive.h"
62
63static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt,
64 struct iwl_rx_cmd_buffer *rxb)
65{
66 struct iwl_rx_packet *pkt = rxb_addr(rxb);
67 struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data;
68
69 IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n",
70 __le32_to_cpu(fseq->aux_read_fseq_ver),
71 __le32_to_cpu(fseq->wifi_fseq_ver));
72}
73
74void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
75 struct iwl_rx_cmd_buffer *rxb)
76{
77 struct iwl_rx_packet *pkt = rxb_addr(rxb);
78 u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
79
80 switch (cmd) {
81 case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF):
82 iwl_fwrt_fseq_ver_mismatch(fwrt, rxb);
83 break;
84 default:
85 break;
86 }
87}
88IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fa283285fcbe..a31a42e673c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
243 if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) 243 if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
244 return; 244 return;
245 245
246 /* Pull RXF1 */ 246 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
247 iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); 247 /* Pull RXF1 */
248 /* Pull RXF2 */ 248 iwl_fwrt_dump_rxf(fwrt, dump_data,
249 iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, 249 cfg->lmac[0].rxfifo1_size, 0, 0);
250 RXF_DIFF_FROM_PREV, 1); 250 /* Pull RXF2 */
251 /* Pull LMAC2 RXF1 */ 251 iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
252 if (fwrt->smem_cfg.num_lmacs > 1) 252 RXF_DIFF_FROM_PREV, 1);
253 iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, 253 /* Pull LMAC2 RXF1 */
254 LMAC2_PRPH_OFFSET, 2); 254 if (fwrt->smem_cfg.num_lmacs > 1)
255 255 iwl_fwrt_dump_rxf(fwrt, dump_data,
256 /* Pull TXF data from LMAC1 */ 256 cfg->lmac[1].rxfifo1_size,
257 for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { 257 LMAC2_PRPH_OFFSET, 2);
258 /* Mark the number of TXF we're pulling now */
259 iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
260 iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
261 0, i);
262 } 258 }
263 259
264 /* Pull TXF data from LMAC2 */ 260 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
265 if (fwrt->smem_cfg.num_lmacs > 1) { 261 /* Pull TXF data from LMAC1 */
266 for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { 262 for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
267 /* Mark the number of TXF we're pulling now */ 263 /* Mark the number of TXF we're pulling now */
268 iwl_trans_write_prph(fwrt->trans, 264 iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
269 TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
270 i);
271 iwl_fwrt_dump_txf(fwrt, dump_data, 265 iwl_fwrt_dump_txf(fwrt, dump_data,
272 cfg->lmac[1].txfifo_size[i], 266 cfg->lmac[0].txfifo_size[i], 0, i);
273 LMAC2_PRPH_OFFSET, 267 }
274 i + cfg->num_txfifo_entries); 268
269 /* Pull TXF data from LMAC2 */
270 if (fwrt->smem_cfg.num_lmacs > 1) {
271 for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
272 i++) {
273 /* Mark the number of TXF we're pulling now */
274 iwl_trans_write_prph(fwrt->trans,
275 TXF_LARC_NUM +
276 LMAC2_PRPH_OFFSET, i);
277 iwl_fwrt_dump_txf(fwrt, dump_data,
278 cfg->lmac[1].txfifo_size[i],
279 LMAC2_PRPH_OFFSET,
280 i + cfg->num_txfifo_entries);
281 }
275 } 282 }
276 } 283 }
277 284
278 if (fw_has_capa(&fwrt->fw->ucode_capa, 285 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
286 fw_has_capa(&fwrt->fw->ucode_capa,
279 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { 287 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
280 /* Pull UMAC internal TXF data from all TXFs */ 288 /* Pull UMAC internal TXF data from all TXFs */
281 for (i = 0; 289 for (i = 0;
@@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
600 if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { 608 if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
601 fifo_data_len = 0; 609 fifo_data_len = 0;
602 610
603 /* Count RXF2 size */ 611 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
604 if (mem_cfg->rxfifo2_size) {
605 /* Add header info */
606 fifo_data_len += mem_cfg->rxfifo2_size +
607 sizeof(*dump_data) +
608 sizeof(struct iwl_fw_error_dump_fifo);
609 }
610
611 /* Count RXF1 sizes */
612 for (i = 0; i < mem_cfg->num_lmacs; i++) {
613 if (!mem_cfg->lmac[i].rxfifo1_size)
614 continue;
615
616 /* Add header info */
617 fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
618 sizeof(*dump_data) +
619 sizeof(struct iwl_fw_error_dump_fifo);
620 }
621 612
622 /* Count TXF sizes */ 613 /* Count RXF2 size */
623 for (i = 0; i < mem_cfg->num_lmacs; i++) { 614 if (mem_cfg->rxfifo2_size) {
624 int j; 615 /* Add header info */
616 fifo_data_len +=
617 mem_cfg->rxfifo2_size +
618 sizeof(*dump_data) +
619 sizeof(struct iwl_fw_error_dump_fifo);
620 }
625 621
626 for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { 622 /* Count RXF1 sizes */
627 if (!mem_cfg->lmac[i].txfifo_size[j]) 623 for (i = 0; i < mem_cfg->num_lmacs; i++) {
624 if (!mem_cfg->lmac[i].rxfifo1_size)
628 continue; 625 continue;
629 626
630 /* Add header info */ 627 /* Add header info */
631 fifo_data_len += 628 fifo_data_len +=
632 mem_cfg->lmac[i].txfifo_size[j] + 629 mem_cfg->lmac[i].rxfifo1_size +
633 sizeof(*dump_data) + 630 sizeof(*dump_data) +
634 sizeof(struct iwl_fw_error_dump_fifo); 631 sizeof(struct iwl_fw_error_dump_fifo);
635 } 632 }
636 } 633 }
637 634
638 if (fw_has_capa(&fwrt->fw->ucode_capa, 635 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
636 size_t fifo_const_len = sizeof(*dump_data) +
637 sizeof(struct iwl_fw_error_dump_fifo);
638
639 /* Count TXF sizes */
640 for (i = 0; i < mem_cfg->num_lmacs; i++) {
641 int j;
642
643 for (j = 0; j < mem_cfg->num_txfifo_entries;
644 j++) {
645 if (!mem_cfg->lmac[i].txfifo_size[j])
646 continue;
647
648 /* Add header info */
649 fifo_data_len +=
650 fifo_const_len +
651 mem_cfg->lmac[i].txfifo_size[j];
652 }
653 }
654 }
655
656 if ((fwrt->fw->dbg_dump_mask &
657 BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
658 fw_has_capa(&fwrt->fw->ucode_capa,
639 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { 659 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
640 for (i = 0; 660 for (i = 0;
641 i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); 661 i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
@@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
652 } 672 }
653 673
654 /* Make room for PRPH registers */ 674 /* Make room for PRPH registers */
655 if (!fwrt->trans->cfg->gen2) { 675 if (!fwrt->trans->cfg->gen2 &&
676 fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
656 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); 677 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
657 i++) { 678 i++) {
658 /* The range includes both boundaries */ 679 /* The range includes both boundaries */
@@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
667 } 688 }
668 689
669 if (!fwrt->trans->cfg->gen2 && 690 if (!fwrt->trans->cfg->gen2 &&
670 fwrt->trans->cfg->mq_rx_supported) { 691 fwrt->trans->cfg->mq_rx_supported &&
692 fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
671 for (i = 0; i < 693 for (i = 0; i <
672 ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { 694 ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
673 /* The range includes both boundaries */ 695 /* The range includes both boundaries */
@@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
681 } 703 }
682 } 704 }
683 705
684 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 706 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
707 fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
685 radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; 708 radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
686 } 709 }
687 710
688 file_len = sizeof(*dump_file) + 711 file_len = sizeof(*dump_file) +
689 sizeof(*dump_data) * 3 +
690 sizeof(*dump_smem_cfg) +
691 fifo_data_len + 712 fifo_data_len +
692 prph_len + 713 prph_len +
693 radio_len + 714 radio_len;
694 sizeof(*dump_info); 715
695 716 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
696 /* Make room for the SMEM, if it exists */ 717 file_len += sizeof(*dump_data) + sizeof(*dump_info);
697 if (smem_len) 718 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
698 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; 719 file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
699 720
700 /* Make room for the secondary SRAM, if it exists */ 721 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
701 if (sram2_len) 722 /* Make room for the SMEM, if it exists */
702 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; 723 if (smem_len)
703 724 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
704 /* Make room for MEM segments */ 725 smem_len;
705 for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { 726
706 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + 727 /* Make room for the secondary SRAM, if it exists */
707 le32_to_cpu(fw_dbg_mem[i].len); 728 if (sram2_len)
729 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
730 sram2_len;
731
732 /* Make room for MEM segments */
733 for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
734 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
735 le32_to_cpu(fw_dbg_mem[i].len);
736 }
708 } 737 }
709 738
710 /* Make room for fw's virtual image pages, if it exists */ 739 /* Make room for fw's virtual image pages, if it exists */
711 if (!fwrt->trans->cfg->gen2 && 740 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
741 !fwrt->trans->cfg->gen2 &&
712 fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && 742 fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
713 fwrt->fw_paging_db[0].fw_paging_block) 743 fwrt->fw_paging_db[0].fw_paging_block)
714 file_len += fwrt->num_of_paging_blk * 744 file_len += fwrt->num_of_paging_blk *
@@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
722 sizeof(*dump_info) + sizeof(*dump_smem_cfg); 752 sizeof(*dump_info) + sizeof(*dump_smem_cfg);
723 } 753 }
724 754
725 if (fwrt->dump.desc) 755 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
756 fwrt->dump.desc)
726 file_len += sizeof(*dump_data) + sizeof(*dump_trig) + 757 file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
727 fwrt->dump.desc->len; 758 fwrt->dump.desc->len;
728 759
729 if (!fwrt->fw->n_dbg_mem_tlv) 760 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
730 file_len += sram_len + sizeof(*dump_mem); 761 !fwrt->fw->n_dbg_mem_tlv)
762 file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
731 763
732 dump_file = vzalloc(file_len); 764 dump_file = vzalloc(file_len);
733 if (!dump_file) { 765 if (!dump_file) {
@@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
740 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); 772 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
741 dump_data = (void *)dump_file->data; 773 dump_data = (void *)dump_file->data;
742 774
743 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); 775 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
744 dump_data->len = cpu_to_le32(sizeof(*dump_info)); 776 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
745 dump_info = (void *)dump_data->data; 777 dump_data->len = cpu_to_le32(sizeof(*dump_info));
746 dump_info->device_family = 778 dump_info = (void *)dump_data->data;
747 fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? 779 dump_info->device_family =
748 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : 780 fwrt->trans->cfg->device_family ==
749 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); 781 IWL_DEVICE_FAMILY_7000 ?
750 dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); 782 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
751 memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, 783 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
752 sizeof(dump_info->fw_human_readable)); 784 dump_info->hw_step =
753 strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, 785 cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
754 sizeof(dump_info->dev_human_readable)); 786 memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
755 strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, 787 sizeof(dump_info->fw_human_readable));
756 sizeof(dump_info->bus_human_readable)); 788 strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
757 789 sizeof(dump_info->dev_human_readable) - 1);
758 dump_data = iwl_fw_error_next_data(dump_data); 790 strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
759 791 sizeof(dump_info->bus_human_readable) - 1);
760 /* Dump shared memory configuration */ 792
761 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); 793 dump_data = iwl_fw_error_next_data(dump_data);
762 dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
763 dump_smem_cfg = (void *)dump_data->data;
764 dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
765 dump_smem_cfg->num_txfifo_entries =
766 cpu_to_le32(mem_cfg->num_txfifo_entries);
767 for (i = 0; i < MAX_NUM_LMAC; i++) {
768 int j;
769
770 for (j = 0; j < TX_FIFO_MAX_NUM; j++)
771 dump_smem_cfg->lmac[i].txfifo_size[j] =
772 cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]);
773 dump_smem_cfg->lmac[i].rxfifo1_size =
774 cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
775 }
776 dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
777 dump_smem_cfg->internal_txfifo_addr =
778 cpu_to_le32(mem_cfg->internal_txfifo_addr);
779 for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
780 dump_smem_cfg->internal_txfifo_size[i] =
781 cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
782 } 794 }
783 795
784 dump_data = iwl_fw_error_next_data(dump_data); 796 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
797 /* Dump shared memory configuration */
798 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
799 dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
800 dump_smem_cfg = (void *)dump_data->data;
801 dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
802 dump_smem_cfg->num_txfifo_entries =
803 cpu_to_le32(mem_cfg->num_txfifo_entries);
804 for (i = 0; i < MAX_NUM_LMAC; i++) {
805 int j;
806 u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
807
808 for (j = 0; j < TX_FIFO_MAX_NUM; j++)
809 dump_smem_cfg->lmac[i].txfifo_size[j] =
810 cpu_to_le32(txf_size[j]);
811 dump_smem_cfg->lmac[i].rxfifo1_size =
812 cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
813 }
814 dump_smem_cfg->rxfifo2_size =
815 cpu_to_le32(mem_cfg->rxfifo2_size);
816 dump_smem_cfg->internal_txfifo_addr =
817 cpu_to_le32(mem_cfg->internal_txfifo_addr);
818 for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
819 dump_smem_cfg->internal_txfifo_size[i] =
820 cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
821 }
822
823 dump_data = iwl_fw_error_next_data(dump_data);
824 }
785 825
786 /* We only dump the FIFOs if the FW is in error state */ 826 /* We only dump the FIFOs if the FW is in error state */
787 if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { 827 if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
@@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
790 iwl_read_radio_regs(fwrt, &dump_data); 830 iwl_read_radio_regs(fwrt, &dump_data);
791 } 831 }
792 832
793 if (fwrt->dump.desc) { 833 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
834 fwrt->dump.desc) {
794 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); 835 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
795 dump_data->len = cpu_to_le32(sizeof(*dump_trig) + 836 dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
796 fwrt->dump.desc->len); 837 fwrt->dump.desc->len);
@@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
805 if (monitor_dump_only) 846 if (monitor_dump_only)
806 goto dump_trans_data; 847 goto dump_trans_data;
807 848
808 if (!fwrt->fw->n_dbg_mem_tlv) { 849 if (!fwrt->fw->n_dbg_mem_tlv &&
850 fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
809 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 851 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
810 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); 852 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
811 dump_mem = (void *)dump_data->data; 853 dump_mem = (void *)dump_data->data;
@@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
821 u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); 863 u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
822 bool success; 864 bool success;
823 865
866 if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
867 break;
868
824 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 869 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
825 dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); 870 dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
826 dump_mem = (void *)dump_data->data; 871 dump_mem = (void *)dump_data->data;
@@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
854 dump_data = iwl_fw_error_next_data(dump_data); 899 dump_data = iwl_fw_error_next_data(dump_data);
855 } 900 }
856 901
857 if (smem_len) { 902 if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
858 IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n"); 903 IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
859 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 904 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
860 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); 905 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
@@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
867 dump_data = iwl_fw_error_next_data(dump_data); 912 dump_data = iwl_fw_error_next_data(dump_data);
868 } 913 }
869 914
870 if (sram2_len) { 915 if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
871 IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n"); 916 IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
872 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 917 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
873 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); 918 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
@@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
881 } 926 }
882 927
883 /* Dump fw's virtual image */ 928 /* Dump fw's virtual image */
884 if (!fwrt->trans->cfg->gen2 && 929 if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
930 !fwrt->trans->cfg->gen2 &&
885 fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && 931 fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
886 fwrt->fw_paging_db[0].fw_paging_block) { 932 fwrt->fw_paging_db[0].fw_paging_block) {
887 IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); 933 IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 9d939cbaf6c6..bbf2b265a06a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type {
146 IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, 146 IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
147 IWL_UCODE_TLV_FW_MEM_SEG = 51, 147 IWL_UCODE_TLV_FW_MEM_SEG = 51,
148 IWL_UCODE_TLV_IML = 52, 148 IWL_UCODE_TLV_IML = 52,
149
150 /* TLVs 0x1000-0x2000 are for internal driver usage */
151 IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
149}; 152};
150 153
151struct iwl_ucode_tlv { 154struct iwl_ucode_tlv {
@@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
318 * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR 321 * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
319 * is supported. 322 * is supported.
320 * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC 323 * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
321 * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan 324 * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
322 * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification 325 * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
323 * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm 326 * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
324 * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related 327 * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
@@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv {
889 struct iwl_fw_dbg_conf_hcmd hcmd; 892 struct iwl_fw_dbg_conf_hcmd hcmd;
890} __packed; 893} __packed;
891 894
892/**
893 * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
894 * @max_scan_cache_size: total space allocated for scan results (in bytes).
895 * @max_scan_buckets: maximum number of channel buckets.
896 * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
897 * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
898 * @max_scan_reporting_threshold: max possible report threshold. in percentage.
899 * @max_hotlist_aps: maximum number of entries for hotlist APs.
900 * @max_significant_change_aps: maximum number of entries for significant
901 * change APs.
902 * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
903 * hold.
904 * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
905 * @max_number_epno_networks: max number of epno entries.
906 * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
907 * specified.
908 * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
909 * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
910 */
911struct iwl_fw_gscan_capabilities {
912 __le32 max_scan_cache_size;
913 __le32 max_scan_buckets;
914 __le32 max_ap_cache_per_scan;
915 __le32 max_rssi_sample_size;
916 __le32 max_scan_reporting_threshold;
917 __le32 max_hotlist_aps;
918 __le32 max_significant_change_aps;
919 __le32 max_bssid_history_entries;
920 __le32 max_hotlist_ssids;
921 __le32 max_number_epno_networks;
922 __le32 max_number_epno_networks_by_ssid;
923 __le32 max_number_of_white_listed_ssid;
924 __le32 max_number_of_black_listed_ssid;
925} __packed;
926
927#endif /* __iwl_fw_file_h__ */ 895#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f4912382b6af..0861b97c4233 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -193,41 +193,6 @@ struct iwl_fw_cscheme_list {
193} __packed; 193} __packed;
194 194
195/** 195/**
196 * struct iwl_gscan_capabilities - gscan capabilities supported by FW
197 * @max_scan_cache_size: total space allocated for scan results (in bytes).
198 * @max_scan_buckets: maximum number of channel buckets.
199 * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
200 * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
201 * @max_scan_reporting_threshold: max possible report threshold. in percentage.
202 * @max_hotlist_aps: maximum number of entries for hotlist APs.
203 * @max_significant_change_aps: maximum number of entries for significant
204 * change APs.
205 * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
206 * hold.
207 * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
208 * @max_number_epno_networks: max number of epno entries.
209 * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
210 * specified.
211 * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
212 * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
213 */
214struct iwl_gscan_capabilities {
215 u32 max_scan_cache_size;
216 u32 max_scan_buckets;
217 u32 max_ap_cache_per_scan;
218 u32 max_rssi_sample_size;
219 u32 max_scan_reporting_threshold;
220 u32 max_hotlist_aps;
221 u32 max_significant_change_aps;
222 u32 max_bssid_history_entries;
223 u32 max_hotlist_ssids;
224 u32 max_number_epno_networks;
225 u32 max_number_epno_networks_by_ssid;
226 u32 max_number_of_white_listed_ssid;
227 u32 max_number_of_black_listed_ssid;
228};
229
230/**
231 * enum iwl_fw_type - iwlwifi firmware type 196 * enum iwl_fw_type - iwlwifi firmware type
232 * @IWL_FW_DVM: DVM firmware 197 * @IWL_FW_DVM: DVM firmware
233 * @IWL_FW_MVM: MVM firmware 198 * @IWL_FW_MVM: MVM firmware
@@ -298,7 +263,7 @@ struct iwl_fw {
298 size_t n_dbg_mem_tlv; 263 size_t n_dbg_mem_tlv;
299 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; 264 size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
300 u8 dbg_dest_reg_num; 265 u8 dbg_dest_reg_num;
301 struct iwl_gscan_capabilities gscan_capa; 266 u32 dbg_dump_mask;
302}; 267};
303 268
304static inline const char *get_fw_dbg_mode_string(int mode) 269static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index d8db1dd100b0..ed23367f7088 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
168 168
169void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); 169void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
170 170
171void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
172 struct iwl_rx_cmd_buffer *rxb);
173
174#endif /* __iwl_fw_runtime_h__ */ 171#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index fb4b6442b4d7..ff85d69c2a8c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
33 * All rights reserved. 35 * All rights reserved.
34 * 36 *
35 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
@@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
143 return; 145 return;
144 146
145 pkt = cmd.resp_pkt; 147 pkt = cmd.resp_pkt;
146 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) 148 if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
147 iwl_parse_shared_mem_22000(fwrt, pkt); 149 iwl_parse_shared_mem_22000(fwrt, pkt);
148 else 150 else
149 iwl_parse_shared_mem(fwrt, pkt); 151 iwl_parse_shared_mem(fwrt, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 84a816809723..12fddcf15bab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -93,6 +93,7 @@ enum iwl_device_family {
93 IWL_DEVICE_FAMILY_8000, 93 IWL_DEVICE_FAMILY_8000,
94 IWL_DEVICE_FAMILY_9000, 94 IWL_DEVICE_FAMILY_9000,
95 IWL_DEVICE_FAMILY_22000, 95 IWL_DEVICE_FAMILY_22000,
96 IWL_DEVICE_FAMILY_22560,
96}; 97};
97 98
98/* 99/*
@@ -176,6 +177,7 @@ static inline u8 num_of_ant(u8 mask)
176 * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command 177 * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
177 * is in flight. This is due to a HW bug in 7260, 3160 and 7265. 178 * is in flight. This is due to a HW bug in 7260, 3160 and 7265.
178 * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. 179 * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
180 * @max_tfd_queue_size: max number of entries in tfd queue.
179 */ 181 */
180struct iwl_base_params { 182struct iwl_base_params {
181 unsigned int wd_timeout; 183 unsigned int wd_timeout;
@@ -191,6 +193,7 @@ struct iwl_base_params {
191 scd_chain_ext_wa:1; 193 scd_chain_ext_wa:1;
192 194
193 u16 num_of_queues; /* def: HW dependent */ 195 u16 num_of_queues; /* def: HW dependent */
196 u32 max_tfd_queue_size; /* def: HW dependent */
194 197
195 u8 max_ll_items; 198 u8 max_ll_items;
196 u8 led_compensation; 199 u8 led_compensation;
@@ -571,9 +574,11 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
571extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; 574extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
572extern const struct iwl_cfg iwl22000_2ac_cfg_jf; 575extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
573extern const struct iwl_cfg iwl22000_2ax_cfg_hr; 576extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
574extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0; 577extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
578extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
575extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0; 579extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
576extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; 580extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
581extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
577#endif /* CONFIG_IWLMVM */ 582#endif /* CONFIG_IWLMVM */
578 583
579#endif /* __IWL_CONFIG_H__ */ 584#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
new file mode 100644
index 000000000000..ebea99189ca9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -0,0 +1,286 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2018 Intel Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2018 Intel Corporation
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 *****************************************************************************/
51#ifndef __iwl_context_info_file_gen3_h__
52#define __iwl_context_info_file_gen3_h__
53
54#include "iwl-context-info.h"
55
56#define CSR_CTXT_INFO_BOOT_CTRL 0x0
57#define CSR_CTXT_INFO_ADDR 0x118
58#define CSR_IML_DATA_ADDR 0x120
59#define CSR_IML_SIZE_ADDR 0x128
60#define CSR_IML_RESP_ADDR 0x12c
61
62/* Set bit for enabling automatic function boot */
63#define CSR_AUTO_FUNC_BOOT_ENA BIT(1)
64/* Set bit for initiating function boot */
65#define CSR_AUTO_FUNC_INIT BIT(7)
66
67/**
68 * enum iwl_prph_scratch_mtr_format - tfd size configuration
69 * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
70 * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
71 * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
72 * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
73 */
74enum iwl_prph_scratch_mtr_format {
75 IWL_PRPH_MTR_FORMAT_16B = 0x0,
76 IWL_PRPH_MTR_FORMAT_32B = 0x40000,
77 IWL_PRPH_MTR_FORMAT_64B = 0x80000,
78 IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
79};
80
81/**
82 * enum iwl_prph_scratch_flags - PRPH scratch control flags
83 * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
84 * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
85 * in hwm config.
86 * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
87 * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
88 * multicomm.
89 * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
90 * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
91 * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
92 * completion descriptor, 1 for responses (legacy)
93 * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
94 * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
95 * 3: 256 bit.
96 */
97enum iwl_prph_scratch_flags {
98 IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
99 IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
100 IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
101 IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
102 IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
103 IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
104 IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
105 IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
106};
107
108/*
109 * struct iwl_prph_scratch_version - version structure
110 * @mac_id: SKU and revision id
111 * @version: prph scratch information version id
112 * @size: the size of the context information in DWs
113 * @reserved: reserved
114 */
115struct iwl_prph_scratch_version {
116 __le16 mac_id;
117 __le16 version;
118 __le16 size;
119 __le16 reserved;
120} __packed; /* PERIPH_SCRATCH_VERSION_S */
121
122/*
123 * struct iwl_prph_scratch_control - control structure
124 * @control_flags: context information flags see &enum iwl_prph_scratch_flags
125 * @reserved: reserved
126 */
127struct iwl_prph_scratch_control {
128 __le32 control_flags;
129 __le32 reserved;
130} __packed; /* PERIPH_SCRATCH_CONTROL_S */
131
132/*
133 * struct iwl_prph_scratch_ror_cfg - ror config
134 * @ror_base_addr: ror start address
135 * @ror_size: ror size in DWs
136 * @reserved: reserved
137 */
138struct iwl_prph_scratch_ror_cfg {
139 __le64 ror_base_addr;
140 __le32 ror_size;
141 __le32 reserved;
142} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
143
144/*
145 * struct iwl_prph_scratch_hwm_cfg - hwm config
146 * @hwm_base_addr: hwm start address
147 * @hwm_size: hwm size in DWs
148 * @reserved: reserved
149 */
150struct iwl_prph_scratch_hwm_cfg {
151 __le64 hwm_base_addr;
152 __le32 hwm_size;
153 __le32 reserved;
154} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
155
156/*
157 * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
158 * @free_rbd_addr: default queue free RB CB base address
159 * @reserved: reserved
160 */
161struct iwl_prph_scratch_rbd_cfg {
162 __le64 free_rbd_addr;
163 __le32 reserved;
164} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
165
166/*
167 * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
168 * @version: version information of context info and HW
169 * @control: control flags of FH configurations
170 * @ror_cfg: ror configuration
171 * @hwm_cfg: hwm configuration
172 * @rbd_cfg: default RX queue configuration
173 */
174struct iwl_prph_scratch_ctrl_cfg {
175 struct iwl_prph_scratch_version version;
176 struct iwl_prph_scratch_control control;
177 struct iwl_prph_scratch_ror_cfg ror_cfg;
178 struct iwl_prph_scratch_hwm_cfg hwm_cfg;
179 struct iwl_prph_scratch_rbd_cfg rbd_cfg;
180} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
181
182/*
183 * struct iwl_prph_scratch - peripheral scratch mapping
184 * @ctrl_cfg: control and configuration of prph scratch
185 * @dram: firmware images addresses in DRAM
186 * @reserved: reserved
187 */
188struct iwl_prph_scratch {
189 struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
190 __le32 reserved[16];
191 struct iwl_context_info_dram dram;
192} __packed; /* PERIPH_SCRATCH_S */
193
194/*
195 * struct iwl_prph_info - peripheral information
196 * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
197 * @ipc_status_mirror: reflects the value in the IPC Status CSR register
198 * @sleep_notif: indicates the peripheral sleep status
199 * @reserved: reserved
200 */
201struct iwl_prph_info {
202 __le32 boot_stage_mirror;
203 __le32 ipc_status_mirror;
204 __le32 sleep_notif;
205 __le32 reserved;
206} __packed; /* PERIPH_INFO_S */
207
208/*
209 * struct iwl_context_info_gen3 - device INIT configuration
210 * @version: version of the context information
211 * @size: size of context information in DWs
212 * @config: context in which the peripheral would execute - a subset of
213 * capability csr register published by the peripheral
214 * @prph_info_base_addr: the peripheral information structure start address
215 * @cr_head_idx_arr_base_addr: the completion ring head index array
216 * start address
217 * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
218 * start address
219 * @cr_tail_idx_arr_base_addr: the completion ring tail index array
220 * start address
221 * @tr_head_idx_arr_base_addr: the transfer ring head index array
222 * start address
223 * @cr_idx_arr_size: number of entries in the completion ring index array
224 * @tr_idx_arr_size: number of entries in the transfer ring index array
225 * @mtr_base_addr: the message transfer ring start address
226 * @mcr_base_addr: the message completion ring start address
227 * @mtr_size: number of entries which the message transfer ring can hold
228 * @mcr_size: number of entries which the message completion ring can hold
229 * @mtr_doorbell_vec: the doorbell vector associated with the message
230 * transfer ring
231 * @mcr_doorbell_vec: the doorbell vector associated with the message
232 * completion ring
233 * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
234 * completing a transfer descriptor in the message transfer ring
235 * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
236 * completing a completion descriptor in the message completion ring
237 * @mtr_opt_header_size: the size of the optional header in the transfer
238 * descriptor associated with the message transfer ring in DWs
239 * @mtr_opt_footer_size: the size of the optional footer in the transfer
240 * descriptor associated with the message transfer ring in DWs
241 * @mcr_opt_header_size: the size of the optional header in the completion
242 * descriptor associated with the message completion ring in DWs
243 * @mcr_opt_footer_size: the size of the optional footer in the completion
244 * descriptor associated with the message completion ring in DWs
245 * @msg_rings_ctrl_flags: message rings control flags
246 * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
247 * after updating the Peripheral Information structure
248 * @prph_scratch_base_addr: the peripheral scratch structure start address
249 * @prph_scratch_size: the size of the peripheral scratch structure in DWs
250 * @reserved: reserved
251 */
252struct iwl_context_info_gen3 {
253 __le16 version;
254 __le16 size;
255 __le32 config;
256 __le64 prph_info_base_addr;
257 __le64 cr_head_idx_arr_base_addr;
258 __le64 tr_tail_idx_arr_base_addr;
259 __le64 cr_tail_idx_arr_base_addr;
260 __le64 tr_head_idx_arr_base_addr;
261 __le16 cr_idx_arr_size;
262 __le16 tr_idx_arr_size;
263 __le64 mtr_base_addr;
264 __le64 mcr_base_addr;
265 __le16 mtr_size;
266 __le16 mcr_size;
267 __le16 mtr_doorbell_vec;
268 __le16 mcr_doorbell_vec;
269 __le16 mtr_msi_vec;
270 __le16 mcr_msi_vec;
271 u8 mtr_opt_header_size;
272 u8 mtr_opt_footer_size;
273 u8 mcr_opt_header_size;
274 u8 mcr_opt_footer_size;
275 __le16 msg_rings_ctrl_flags;
276 __le16 prph_info_msi_vec;
277 __le64 prph_scratch_base_addr;
278 __le32 prph_scratch_size;
279 __le32 reserved;
280} __packed; /* IPC_CONTEXT_INFO_S */
281
282int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
283 const struct fw_img *fw);
284void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
285
286#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index b870c0986744..4b6fdf3b15fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH 8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
19 * BSD LICENSE 20 * BSD LICENSE
20 * 21 *
21 * Copyright(c) 2017 Intel Deutschland GmbH 22 * Copyright(c) 2017 Intel Deutschland GmbH
23 * Copyright(c) 2018 Intel Corporation
22 * All rights reserved. 24 * All rights reserved.
23 * 25 *
24 * Redistribution and use in source and binary forms, with or without 26 * Redistribution and use in source and binary forms, with or without
@@ -199,5 +201,8 @@ struct iwl_context_info {
199int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw); 201int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
200void iwl_pcie_ctxt_info_free(struct iwl_trans *trans); 202void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
201void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); 203void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
204int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
205 const struct fw_img *fw,
206 struct iwl_context_info_dram *ctxt_dram);
202 207
203#endif /* __iwl_context_info_file_h__ */ 208#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index ba971d3946e2..9019de99f077 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -339,6 +339,9 @@ enum {
339/* HW_RF CHIP ID */ 339/* HW_RF CHIP ID */
340#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF) 340#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
341 341
342/* HW_RF CHIP STEP */
343#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
344
342/* EEPROM REG */ 345/* EEPROM REG */
343#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 346#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
344#define CSR_EEPROM_REG_BIT_CMD (0x00000002) 347#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -592,6 +595,8 @@ enum msix_fh_int_causes {
592enum msix_hw_int_causes { 595enum msix_hw_int_causes {
593 MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), 596 MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
594 MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), 597 MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
598 MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
599 MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
595 MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), 600 MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
596 MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), 601 MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
597 MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), 602 MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c59ce4f8a5ed..c0631255aee7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
402 return 0; 402 return 0;
403} 403}
404 404
405static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
406 const u32 len)
407{
408 struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
409 struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
410
411 capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
412 capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
413 capa->max_ap_cache_per_scan =
414 le32_to_cpu(fw_capa->max_ap_cache_per_scan);
415 capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
416 capa->max_scan_reporting_threshold =
417 le32_to_cpu(fw_capa->max_scan_reporting_threshold);
418 capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
419 capa->max_significant_change_aps =
420 le32_to_cpu(fw_capa->max_significant_change_aps);
421 capa->max_bssid_history_entries =
422 le32_to_cpu(fw_capa->max_bssid_history_entries);
423 capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
424 capa->max_number_epno_networks =
425 le32_to_cpu(fw_capa->max_number_epno_networks);
426 capa->max_number_epno_networks_by_ssid =
427 le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
428 capa->max_number_of_white_listed_ssid =
429 le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
430 capa->max_number_of_black_listed_ssid =
431 le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
432}
433
434/* 405/*
435 * Gets uCode section from tlv. 406 * Gets uCode section from tlv.
436 */ 407 */
@@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
644 u32 build, paging_mem_size; 615 u32 build, paging_mem_size;
645 int num_of_cpus; 616 int num_of_cpus;
646 bool usniffer_req = false; 617 bool usniffer_req = false;
647 bool gscan_capa = false;
648 618
649 if (len < sizeof(*ucode)) { 619 if (len < sizeof(*ucode)) {
650 IWL_ERR(drv, "uCode has invalid length: %zd\n", len); 620 IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
1043 pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; 1013 pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
1044 break; 1014 break;
1045 } 1015 }
1016 case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
1017 if (tlv_len != sizeof(u32)) {
1018 IWL_ERR(drv,
1019 "dbg lst mask size incorrect, skip\n");
1020 break;
1021 }
1022
1023 drv->fw.dbg_dump_mask =
1024 le32_to_cpup((__le32 *)tlv_data);
1025 break;
1026 }
1046 case IWL_UCODE_TLV_SEC_RT_USNIFFER: 1027 case IWL_UCODE_TLV_SEC_RT_USNIFFER:
1047 *usniffer_images = true; 1028 *usniffer_images = true;
1048 iwl_store_ucode_sec(pieces, tlv_data, 1029 iwl_store_ucode_sec(pieces, tlv_data,
@@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
1079 paging_mem_size; 1060 paging_mem_size;
1080 break; 1061 break;
1081 case IWL_UCODE_TLV_FW_GSCAN_CAPA: 1062 case IWL_UCODE_TLV_FW_GSCAN_CAPA:
1082 /* 1063 /* ignored */
1083 * Don't return an error in case of a shorter tlv_len
1084 * to enable loading of FW that has an old format
1085 * of GSCAN capabilities TLV.
1086 */
1087 if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
1088 break;
1089
1090 iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
1091 gscan_capa = true;
1092 break; 1064 break;
1093 case IWL_UCODE_TLV_FW_MEM_SEG: { 1065 case IWL_UCODE_TLV_FW_MEM_SEG: {
1094 struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = 1066 struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
@@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
1153 return -EINVAL; 1125 return -EINVAL;
1154 } 1126 }
1155 1127
1156 /*
1157 * If ucode advertises that it supports GSCAN but GSCAN
1158 * capabilities TLV is not present, or if it has an old format,
1159 * warn and continue without GSCAN.
1160 */
1161 if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
1162 !gscan_capa) {
1163 IWL_DEBUG_INFO(drv,
1164 "GSCAN is supported but capabilities TLV is unavailable\n");
1165 __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
1166 capa->_capa);
1167 }
1168
1169 return 0; 1128 return 0;
1170 1129
1171 invalid_tlv_len: 1130 invalid_tlv_len:
@@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
1316 fw->ucode_capa.standard_phy_calibration_size = 1275 fw->ucode_capa.standard_phy_calibration_size =
1317 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; 1276 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1318 fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; 1277 fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
1278 /* dump all fw memory areas by default */
1279 fw->dbg_dump_mask = 0xffffffff;
1319 1280
1320 pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); 1281 pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
1321 if (!pieces) 1282 if (!pieces)
@@ -1787,7 +1748,8 @@ MODULE_PARM_DESC(11n_disable,
1787 "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); 1748 "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
1788module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444); 1749module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
1789MODULE_PARM_DESC(amsdu_size, 1750MODULE_PARM_DESC(amsdu_size,
1790 "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)"); 1751 "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
1752 "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
1791module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); 1753module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
1792MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); 1754MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
1793 1755
@@ -1856,3 +1818,7 @@ module_param_named(remove_when_gone,
1856 0444); 1818 0444);
1857MODULE_PARM_DESC(remove_when_gone, 1819MODULE_PARM_DESC(remove_when_gone,
1858 "Remove dev from PCIe bus if it is deemed inaccessible (default: false)"); 1820 "Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
1821
1822module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
1823 S_IRUGO);
1824MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 777f5df8a0c6..a4c96215933b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2018 Intel Corporation
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
18 * General Public License for more details. 19 * General Public License for more details.
19 * 20 *
20 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 22 * along with this program;
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 * 23 *
25 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING. 25 * in the file called COPYING.
@@ -33,6 +32,7 @@
33 * 32 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2015 Intel Mobile Communications GmbH 34 * Copyright(c) 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2018 Intel Corporation
36 * All rights reserved. 36 * All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
767 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; 767 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
768 768
769 if ((cfg->mq_rx_supported && 769 if ((cfg->mq_rx_supported &&
770 iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) || 770 iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
771 iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) 771 iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
772 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 772 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
773 773
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 11789ffb6512..df0e9ffff706 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
10 * Copyright(c) 2018 Intel Corporation
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -18,9 +19,7 @@
18 * General Public License for more details. 19 * General Public License for more details.
19 * 20 *
20 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 22 * along with this program.
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 * 23 *
25 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING. 25 * in the file called COPYING.
@@ -33,6 +32,7 @@
33 * 32 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
35 * Copyright(c) 2018 Intel Corporation
36 * All rights reserved. 36 * All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -434,13 +434,15 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
434 * RXF to DRAM. 434 * RXF to DRAM.
435 * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. 435 * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
436 */ 436 */
437#define RFH_GEN_STATUS 0xA09808 437#define RFH_GEN_STATUS 0xA09808
438#define RFH_GEN_STATUS_GEN3 0xA07824
438#define RBD_FETCH_IDLE BIT(29) 439#define RBD_FETCH_IDLE BIT(29)
439#define SRAM_DMA_IDLE BIT(30) 440#define SRAM_DMA_IDLE BIT(30)
440#define RXF_DMA_IDLE BIT(31) 441#define RXF_DMA_IDLE BIT(31)
441 442
442/* DMA configuration */ 443/* DMA configuration */
443#define RFH_RXF_DMA_CFG 0xA09820 444#define RFH_RXF_DMA_CFG 0xA09820
445#define RFH_RXF_DMA_CFG_GEN3 0xA07880
444/* RB size */ 446/* RB size */
445#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ 447#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
446#define RFH_RXF_DMA_RB_SIZE_POS 16 448#define RFH_RXF_DMA_RB_SIZE_POS 16
@@ -643,10 +645,13 @@ struct iwl_rb_status {
643 645
644 646
645#define TFD_QUEUE_SIZE_MAX (256) 647#define TFD_QUEUE_SIZE_MAX (256)
648#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
646/* cb size is the exponent - 3 */ 649/* cb size is the exponent - 3 */
647#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) 650#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
648#define TFD_QUEUE_SIZE_BC_DUP (64) 651#define TFD_QUEUE_SIZE_BC_DUP (64)
649#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) 652#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
653#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
654 TFD_QUEUE_SIZE_BC_DUP)
650#define IWL_TX_DMA_MASK DMA_BIT_MASK(36) 655#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
651#define IWL_NUM_OF_TBS 20 656#define IWL_NUM_OF_TBS 20
652#define IWL_TFH_NUM_TBS 25 657#define IWL_TFH_NUM_TBS 25
@@ -753,7 +758,7 @@ struct iwl_tfh_tfd {
753 * For devices up to 22000: 758 * For devices up to 22000:
754 * @tfd_offset 0-12 - tx command byte count 759 * @tfd_offset 0-12 - tx command byte count
755 * 12-16 - station index 760 * 12-16 - station index
756 * For 22000 and on: 761 * For 22000:
757 * @tfd_offset 0-12 - tx command byte count 762 * @tfd_offset 0-12 - tx command byte count
758 * 12-13 - number of 64 byte chunks 763 * 12-13 - number of 64 byte chunks
759 * 14-16 - reserved 764 * 14-16 - reserved
@@ -762,4 +767,15 @@ struct iwlagn_scd_bc_tbl {
762 __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; 767 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
763} __packed; 768} __packed;
764 769
770/**
771 * struct iwl_gen3_bc_tbl scheduler byte count table gen3
772 * For 22560 and on:
773 * @tfd_offset: 0-12 - tx command byte count
774 * 12-13 - number of 64 byte chunks
775 * 14-16 - reserved
776 */
777struct iwl_gen3_bc_tbl {
778 __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
779} __packed;
780
765#endif /* !__iwl_fh_h__ */ 781#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index a7dd8a8cddf9..97072cf75bca 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -17,9 +18,7 @@
17 * General Public License for more details. 18 * General Public License for more details.
18 * 19 *
19 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 21 * along with this program;
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 * 22 *
24 * The full GNU General Public License is included in this distribution 23 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING. 24 * in the file called COPYING.
@@ -31,6 +30,7 @@
31 * BSD LICENSE 30 * BSD LICENSE
32 * 31 *
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 32 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
33 * Copyright(c) 2018 Intel Corporation
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,8 @@ enum iwl_amsdu_size {
90 IWL_AMSDU_4K = 1, 90 IWL_AMSDU_4K = 1,
91 IWL_AMSDU_8K = 2, 91 IWL_AMSDU_8K = 2,
92 IWL_AMSDU_12K = 3, 92 IWL_AMSDU_12K = 3,
93 /* Add 2K at the end to avoid breaking current API */
94 IWL_AMSDU_2K = 4,
93}; 95};
94 96
95enum iwl_uapsd_disable { 97enum iwl_uapsd_disable {
@@ -144,6 +146,10 @@ struct iwl_mod_params {
144 bool lar_disable; 146 bool lar_disable;
145 bool fw_monitor; 147 bool fw_monitor;
146 bool disable_11ac; 148 bool disable_11ac;
149 /**
150 * @disable_11ax: disable HE capabilities, default = false
151 */
152 bool disable_11ax;
147 bool remove_when_gone; 153 bool remove_when_gone;
148}; 154};
149 155
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b815ba38dbdb..b4c3a957c102 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
430 else 430 else
431 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 431 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
432 break; 432 break;
433 case IWL_AMSDU_2K:
434 if (cfg->mq_rx_supported)
435 vht_cap->cap |=
436 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
437 else
438 WARN(1, "RB size of 2K is not supported by this device\n");
439 break;
433 case IWL_AMSDU_4K: 440 case IWL_AMSDU_4K:
434 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 441 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
435 break; 442 break;
@@ -463,6 +470,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
463 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; 470 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
464} 471}
465 472
473static struct ieee80211_sband_iftype_data iwl_he_capa = {
474 .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
475 .he_cap = {
476 .has_he = true,
477 .he_cap_elem = {
478 .mac_cap_info[0] =
479 IEEE80211_HE_MAC_CAP0_HTC_HE,
480 .mac_cap_info[1] =
481 IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
482 IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
483 .mac_cap_info[2] =
484 IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
485 IEEE80211_HE_MAC_CAP2_ACK_EN,
486 .mac_cap_info[3] =
487 IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
488 IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
489 .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
490 .phy_cap_info[0] =
491 IEEE80211_HE_PHY_CAP0_DUAL_BAND |
492 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
493 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
494 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
495 .phy_cap_info[1] =
496 IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
497 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
498 IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
499 .phy_cap_info[2] =
500 IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
501 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
502 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
503 .phy_cap_info[3] =
504 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
505 IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
506 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
507 IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
508 .phy_cap_info[4] =
509 IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
510 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
511 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
512 .phy_cap_info[5] =
513 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
514 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
515 .phy_cap_info[6] =
516 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
517 .phy_cap_info[7] =
518 IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
519 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
520 IEEE80211_HE_PHY_CAP7_MAX_NC_7,
521 .phy_cap_info[8] =
522 IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
523 IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
524 IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
525 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
526 },
527 /*
528 * Set default Tx/Rx HE MCS NSS Support field. Indicate support
529 * for up to 2 spatial streams and all MCS, without any special
530 * cases
531 */
532 .he_mcs_nss_supp = {
533 .rx_mcs_80 = cpu_to_le16(0xfffa),
534 .tx_mcs_80 = cpu_to_le16(0xfffa),
535 .rx_mcs_160 = cpu_to_le16(0xfffa),
536 .tx_mcs_160 = cpu_to_le16(0xfffa),
537 .rx_mcs_80p80 = cpu_to_le16(0xffff),
538 .tx_mcs_80p80 = cpu_to_le16(0xffff),
539 },
540 /*
541 * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
542 * to 7
543 */
544 .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
545 },
546};
547
548static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
549 u8 tx_chains, u8 rx_chains)
550{
551 if (sband->band == NL80211_BAND_2GHZ ||
552 sband->band == NL80211_BAND_5GHZ)
553 sband->iftype_data = &iwl_he_capa;
554 else
555 return;
556
557 sband->n_iftype_data = 1;
558
559 /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
560 if ((tx_chains & rx_chains) != ANT_AB) {
561 iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
562 ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
563 iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
564 ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
565 }
566}
567
466static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 568static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
467 struct iwl_nvm_data *data, 569 struct iwl_nvm_data *data,
468 const __le16 *nvm_ch_flags, u8 tx_chains, 570 const __le16 *nvm_ch_flags, u8 tx_chains,
@@ -483,6 +585,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
483 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, 585 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
484 tx_chains, rx_chains); 586 tx_chains, rx_chains);
485 587
588 if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
589 iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
590
486 sband = &data->bands[NL80211_BAND_5GHZ]; 591 sband = &data->bands[NL80211_BAND_5GHZ];
487 sband->band = NL80211_BAND_5GHZ; 592 sband->band = NL80211_BAND_5GHZ;
488 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; 593 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
@@ -495,6 +600,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
495 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, 600 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
496 tx_chains, rx_chains); 601 tx_chains, rx_chains);
497 602
603 if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
604 iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
605
498 if (n_channels != n_used) 606 if (n_channels != n_used)
499 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", 607 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
500 n_used, n_channels); 608 n_used, n_channels);
@@ -1293,6 +1401,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
1293 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); 1401 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
1294 nvm->sku_cap_11n_enable = 1402 nvm->sku_cap_11n_enable =
1295 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); 1403 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
1404 nvm->sku_cap_11ax_enable =
1405 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
1296 nvm->sku_cap_band_24ghz_enable = 1406 nvm->sku_cap_band_24ghz_enable =
1297 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); 1407 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
1298 nvm->sku_cap_band_52ghz_enable = 1408 nvm->sku_cap_band_52ghz_enable =
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 1b9c627ee34d..279dd7b7a3fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -350,6 +350,8 @@ static inline int
350iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) 350iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
351{ 351{
352 switch (rb_size) { 352 switch (rb_size) {
353 case IWL_AMSDU_2K:
354 return get_order(2 * 1024);
353 case IWL_AMSDU_4K: 355 case IWL_AMSDU_4K:
354 return get_order(4 * 1024); 356 return get_order(4 * 1024);
355 case IWL_AMSDU_8K: 357 case IWL_AMSDU_8K:
@@ -438,6 +440,20 @@ struct iwl_trans_txq_scd_cfg {
438}; 440};
439 441
440/** 442/**
443 * struct iwl_trans_rxq_dma_data - RX queue DMA data
444 * @fr_bd_cb: DMA address of free BD cyclic buffer
445 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
446 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
447 * @ur_bd_cb: DMA address of used BD cyclic buffer
448 */
449struct iwl_trans_rxq_dma_data {
450 u64 fr_bd_cb;
451 u32 fr_bd_wid;
452 u64 urbd_stts_wrptr;
453 u64 ur_bd_cb;
454};
455
456/**
441 * struct iwl_trans_ops - transport specific operations 457 * struct iwl_trans_ops - transport specific operations
442 * 458 *
443 * All the handlers MUST be implemented 459 * All the handlers MUST be implemented
@@ -557,6 +573,8 @@ struct iwl_trans_ops {
557 int cmd_id, int size, 573 int cmd_id, int size,
558 unsigned int queue_wdg_timeout); 574 unsigned int queue_wdg_timeout);
559 void (*txq_free)(struct iwl_trans *trans, int queue); 575 void (*txq_free)(struct iwl_trans *trans, int queue);
576 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
577 struct iwl_trans_rxq_dma_data *data);
560 578
561 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, 579 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
562 bool shared); 580 bool shared);
@@ -753,6 +771,7 @@ struct iwl_trans {
753 const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; 771 const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
754 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; 772 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
755 struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; 773 struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
774 u32 dbg_dump_mask;
756 u8 dbg_dest_reg_num; 775 u8 dbg_dest_reg_num;
757 776
758 enum iwl_plat_pm_mode system_pm_mode; 777 enum iwl_plat_pm_mode system_pm_mode;
@@ -945,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
945 cfg, queue_wdg_timeout); 964 cfg, queue_wdg_timeout);
946} 965}
947 966
967static inline int
968iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
969 struct iwl_trans_rxq_dma_data *data)
970{
971 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
972 return -ENOTSUPP;
973
974 return trans->ops->rxq_dma_data(trans, queue, data);
975}
976
948static inline void 977static inline void
949iwl_trans_txq_free(struct iwl_trans *trans, int queue) 978iwl_trans_txq_free(struct iwl_trans *trans, int queue)
950{ 979{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 3fcf489f3120..79bdae994822 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1037 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); 1037 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1038#endif 1038#endif
1039 1039
1040 /*
1041 * TODO: this is needed because the firmware is not stopping
1042 * the recording automatically before entering D3. This can
1043 * be removed once the FW starts doing that.
1044 */
1045 iwl_fw_dbg_stop_recording(&mvm->fwrt);
1046
1040 /* must be last -- this switches firmware state */ 1047 /* must be last -- this switches firmware state */
1041 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); 1048 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1042 if (ret) 1049 if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1c4178f20441..05b77419953c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
1150 struct iwl_rx_mpdu_desc *desc; 1150 struct iwl_rx_mpdu_desc *desc;
1151 int bin_len = count / 2; 1151 int bin_len = count / 2;
1152 int ret = -EINVAL; 1152 int ret = -EINVAL;
1153 size_t mpdu_cmd_hdr_size =
1154 (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
1155 sizeof(struct iwl_rx_mpdu_desc) :
1156 IWL_RX_DESC_SIZE_V1;
1153 1157
1154 if (!iwl_mvm_firmware_running(mvm)) 1158 if (!iwl_mvm_firmware_running(mvm))
1155 return -EIO; 1159 return -EIO;
@@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
1168 goto out; 1172 goto out;
1169 1173
1170 /* avoid invalid memory access */ 1174 /* avoid invalid memory access */
1171 if (bin_len < sizeof(*pkt) + sizeof(*desc)) 1175 if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
1172 goto out; 1176 goto out;
1173 1177
1174 /* check this is RX packet */ 1178 /* check this is RX packet */
@@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
1179 /* check the length in metadata matches actual received length */ 1183 /* check the length in metadata matches actual received length */
1180 desc = (void *)pkt->data; 1184 desc = (void *)pkt->data;
1181 if (le16_to_cpu(desc->mpdu_len) != 1185 if (le16_to_cpu(desc->mpdu_len) !=
1182 (bin_len - sizeof(*desc) - sizeof(*pkt))) 1186 (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
1183 goto out; 1187 goto out;
1184 1188
1185 local_bh_disable(); 1189 local_bh_disable();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 866c91c923be..6bb1a99a197a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
130 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 130 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
131} 131}
132 132
133static int iwl_configure_rxq(struct iwl_mvm *mvm)
134{
135 int i, num_queues, size;
136 struct iwl_rfh_queue_config *cmd;
137
138 /* Do not configure default queue, it is configured via context info */
139 num_queues = mvm->trans->num_rx_queues - 1;
140
141 size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
142
143 cmd = kzalloc(size, GFP_KERNEL);
144 if (!cmd)
145 return -ENOMEM;
146
147 cmd->num_queues = num_queues;
148
149 for (i = 0; i < num_queues; i++) {
150 struct iwl_trans_rxq_dma_data data;
151
152 cmd->data[i].q_num = i + 1;
153 iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
154
155 cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
156 cmd->data[i].urbd_stts_wrptr =
157 cpu_to_le64(data.urbd_stts_wrptr);
158 cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
159 cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
160 }
161
162 return iwl_mvm_send_cmd_pdu(mvm,
163 WIDE_ID(DATA_PATH_GROUP,
164 RFH_QUEUE_CONFIG_CMD),
165 0, size, cmd);
166}
167
133static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) 168static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
134{ 169{
135 struct iwl_dqa_enable_cmd dqa_cmd = { 170 struct iwl_dqa_enable_cmd dqa_cmd = {
@@ -301,7 +336,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
301 if (ret) { 336 if (ret) {
302 struct iwl_trans *trans = mvm->trans; 337 struct iwl_trans *trans = mvm->trans;
303 338
304 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) 339 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
305 IWL_ERR(mvm, 340 IWL_ERR(mvm,
306 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 341 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
307 iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), 342 iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
1007 goto error; 1042 goto error;
1008 1043
1009 /* Init RSS configuration */ 1044 /* Init RSS configuration */
1010 /* TODO - remove 22000 disablement when we have RXQ config API */ 1045 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
1011 if (iwl_mvm_has_new_rx_api(mvm) && 1046 ret = iwl_configure_rxq(mvm);
1012 mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) { 1047 if (ret) {
1048 IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
1049 ret);
1050 goto error;
1051 }
1052 }
1053
1054 if (iwl_mvm_has_new_rx_api(mvm)) {
1013 ret = iwl_send_rss_cfg_cmd(mvm); 1055 ret = iwl_send_rss_cfg_cmd(mvm);
1014 if (ret) { 1056 if (ret) {
1015 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", 1057 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8ba16fc24e3a..b3fd20502abb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -780,6 +780,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
780 if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) 780 if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
781 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); 781 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
782 782
783 if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
784 !iwlwifi_mod_params.disable_11ax)
785 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
786
783 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 787 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
784} 788}
785 789
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 26021bc55e98..b15b0d84bb7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -36,6 +36,7 @@
36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
39 * All rights reserved. 40 * All rights reserved.
40 * 41 *
41 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
@@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
914 enum ieee80211_ampdu_mlme_action action = params->action; 915 enum ieee80211_ampdu_mlme_action action = params->action;
915 u16 tid = params->tid; 916 u16 tid = params->tid;
916 u16 *ssn = &params->ssn; 917 u16 *ssn = &params->ssn;
917 u8 buf_size = params->buf_size; 918 u16 buf_size = params->buf_size;
918 bool amsdu = params->amsdu; 919 bool amsdu = params->amsdu;
919 u16 timeout = params->timeout; 920 u16 timeout = params->timeout;
920 921
@@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1897 iwl_mvm_mu_mimo_iface_iterator, notif); 1898 iwl_mvm_mu_mimo_iface_iterator, notif);
1898} 1899}
1899 1900
1901static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
1902{
1903 u8 byte_num = ppe_pos_bit / 8;
1904 u8 bit_num = ppe_pos_bit % 8;
1905 u8 residue_bits;
1906 u8 res;
1907
1908 if (bit_num <= 5)
1909 return (ppe[byte_num] >> bit_num) &
1910 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
1911
1912 /*
1913 * If bit_num > 5, we have to combine bits with next byte.
1914 * Calculate how many bits we need to take from current byte (called
1915 * here "residue_bits"), and add them to bits from next byte.
1916 */
1917
1918 residue_bits = 8 - bit_num;
1919
1920 res = (ppe[byte_num + 1] &
1921 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
1922 residue_bits;
1923 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
1924
1925 return res;
1926}
1927
1928static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
1929 struct ieee80211_vif *vif, u8 sta_id)
1930{
1931 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1932 struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
1933 .sta_id = sta_id,
1934 .tid_limit = IWL_MAX_TID_COUNT,
1935 .bss_color = vif->bss_conf.bss_color,
1936 .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
1937 .frame_time_rts_th =
1938 cpu_to_le16(vif->bss_conf.frame_time_rts_th),
1939 };
1940 struct ieee80211_sta *sta;
1941 u32 flags;
1942 int i;
1943
1944 rcu_read_lock();
1945
1946 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
1947 if (IS_ERR(sta)) {
1948 rcu_read_unlock();
1949 WARN(1, "Can't find STA to configure HE\n");
1950 return;
1951 }
1952
1953 if (!sta->he_cap.has_he) {
1954 rcu_read_unlock();
1955 return;
1956 }
1957
1958 flags = 0;
1959
1960 /* HTC flags */
1961 if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
1962 IEEE80211_HE_MAC_CAP0_HTC_HE)
1963 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
1964 if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
1965 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
1966 (sta->he_cap.he_cap_elem.mac_cap_info[2] &
1967 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
1968 u8 link_adap =
1969 ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
1970 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
1971 (sta->he_cap.he_cap_elem.mac_cap_info[1] &
1972 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
1973
1974 if (link_adap == 2)
1975 sta_ctxt_cmd.htc_flags |=
1976 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
1977 else if (link_adap == 3)
1978 sta_ctxt_cmd.htc_flags |=
1979 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
1980 }
1981 if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
1982 IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
1983 sta_ctxt_cmd.htc_flags |=
1984 cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
1985 if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
1986 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
1987 if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
1988 IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
1989 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
1990 if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
1991 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
1992
1993 /* If PPE Thresholds exist, parse them into a FW-familiar format */
1994 if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
1995 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
1996 u8 nss = (sta->he_cap.ppe_thres[0] &
1997 IEEE80211_PPE_THRES_NSS_MASK) + 1;
1998 u8 ru_index_bitmap =
1999 (sta->he_cap.ppe_thres[0] &
2000 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
2001 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
2002 u8 *ppe = &sta->he_cap.ppe_thres[0];
2003 u8 ppe_pos_bit = 7; /* Starting after PPE header */
2004
2005 /*
2006 * FW currently supports only nss == MAX_HE_SUPP_NSS
2007 *
2008 * If nss > MAX: we can ignore values we don't support
2009 * If nss < MAX: we can set zeros in other streams
2010 */
2011 if (nss > MAX_HE_SUPP_NSS) {
2012 IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
2013 MAX_HE_SUPP_NSS);
2014 nss = MAX_HE_SUPP_NSS;
2015 }
2016
2017 for (i = 0; i < nss; i++) {
2018 u8 ru_index_tmp = ru_index_bitmap << 1;
2019 u8 bw;
2020
2021 for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
2022 ru_index_tmp >>= 1;
2023 if (!(ru_index_tmp & 1))
2024 continue;
2025
2026 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
2027 iwl_mvm_he_get_ppe_val(ppe,
2028 ppe_pos_bit);
2029 ppe_pos_bit +=
2030 IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2031 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
2032 iwl_mvm_he_get_ppe_val(ppe,
2033 ppe_pos_bit);
2034 ppe_pos_bit +=
2035 IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2036 }
2037 }
2038
2039 flags |= STA_CTXT_HE_PACKET_EXT;
2040 }
2041 rcu_read_unlock();
2042
2043 /* Mark MU EDCA as enabled, unless none detected on some AC */
2044 flags |= STA_CTXT_HE_MU_EDCA_CW;
2045 for (i = 0; i < AC_NUM; i++) {
2046 struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
2047 &mvmvif->queue_params[i].mu_edca_param_rec;
2048
2049 if (!mvmvif->queue_params[i].mu_edca) {
2050 flags &= ~STA_CTXT_HE_MU_EDCA_CW;
2051 break;
2052 }
2053
2054 sta_ctxt_cmd.trig_based_txf[i].cwmin =
2055 cpu_to_le16(mu_edca->ecw_min_max & 0xf);
2056 sta_ctxt_cmd.trig_based_txf[i].cwmax =
2057 cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
2058 sta_ctxt_cmd.trig_based_txf[i].aifsn =
2059 cpu_to_le16(mu_edca->aifsn);
2060 sta_ctxt_cmd.trig_based_txf[i].mu_time =
2061 cpu_to_le16(mu_edca->mu_edca_timer);
2062 }
2063
2064 if (vif->bss_conf.multi_sta_back_32bit)
2065 flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
2066
2067 if (vif->bss_conf.ack_enabled)
2068 flags |= STA_CTXT_HE_ACK_ENABLED;
2069
2070 if (vif->bss_conf.uora_exists) {
2071 flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
2072
2073 sta_ctxt_cmd.rand_alloc_ecwmin =
2074 vif->bss_conf.uora_ocw_range & 0x7;
2075 sta_ctxt_cmd.rand_alloc_ecwmax =
2076 (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
2077 }
2078
2079 /* TODO: support Multi BSSID IE */
2080
2081 sta_ctxt_cmd.flags = cpu_to_le32(flags);
2082
2083 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
2084 DATA_PATH_GROUP, 0),
2085 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
2086 IWL_ERR(mvm, "Failed to config FW to work HE!\n");
2087}
2088
1900static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 2089static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1901 struct ieee80211_vif *vif, 2090 struct ieee80211_vif *vif,
1902 struct ieee80211_bss_conf *bss_conf, 2091 struct ieee80211_bss_conf *bss_conf,
@@ -1910,8 +2099,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1910 * beacon interval, which was not known when the station interface was 2099 * beacon interval, which was not known when the station interface was
1911 * added. 2100 * added.
1912 */ 2101 */
1913 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) 2102 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
2103 if (vif->bss_conf.he_support &&
2104 !iwlwifi_mod_params.disable_11ax)
2105 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
2106
1914 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2107 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2108 }
1915 2109
1916 /* 2110 /*
1917 * If we're not associated yet, take the (new) BSSID before associating 2111 * If we're not associated yet, take the (new) BSSID before associating
@@ -4364,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4364 atomic_set(&mvm->queue_sync_counter, 4558 atomic_set(&mvm->queue_sync_counter,
4365 mvm->trans->num_rx_queues); 4559 mvm->trans->num_rx_queues);
4366 4560
4367 /* TODO - remove this when we have RXQ config API */
4368 if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
4369 qmask = BIT(0);
4370 if (notif->sync)
4371 atomic_set(&mvm->queue_sync_counter, 1);
4372 }
4373
4374 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); 4561 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4375 if (ret) { 4562 if (ret) {
4376 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 4563 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 6a4ba160c59e..b3987a0a7018 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -654,7 +654,7 @@ struct iwl_mvm_tcm {
654struct iwl_mvm_reorder_buffer { 654struct iwl_mvm_reorder_buffer {
655 u16 head_sn; 655 u16 head_sn;
656 u16 num_stored; 656 u16 num_stored;
657 u8 buf_size; 657 u16 buf_size;
658 int queue; 658 int queue;
659 u16 last_amsdu; 659 u16 last_amsdu;
660 u8 last_sub_index; 660 u8 last_sub_index;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ff1e518096c5..0e26619fb330 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -448,6 +448,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
448 HCMD_NAME(DQA_ENABLE_CMD), 448 HCMD_NAME(DQA_ENABLE_CMD),
449 HCMD_NAME(UPDATE_MU_GROUPS_CMD), 449 HCMD_NAME(UPDATE_MU_GROUPS_CMD),
450 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), 450 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
451 HCMD_NAME(STA_HE_CTXT_CMD),
452 HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
451 HCMD_NAME(STA_PM_NOTIF), 453 HCMD_NAME(STA_PM_NOTIF),
452 HCMD_NAME(MU_GROUP_MGMT_NOTIF), 454 HCMD_NAME(MU_GROUP_MGMT_NOTIF),
453 HCMD_NAME(RX_QUEUES_NOTIFICATION), 455 HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -620,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
620 622
621 if (iwl_mvm_has_new_rx_api(mvm)) { 623 if (iwl_mvm_has_new_rx_api(mvm)) {
622 op_mode->ops = &iwl_mvm_ops_mq; 624 op_mode->ops = &iwl_mvm_ops_mq;
623 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc); 625 trans->rx_mpdu_cmd_hdr_size =
626 (trans->cfg->device_family >=
627 IWL_DEVICE_FAMILY_22560) ?
628 sizeof(struct iwl_rx_mpdu_desc) :
629 IWL_RX_DESC_SIZE_V1;
624 } else { 630 } else {
625 op_mode->ops = &iwl_mvm_ops; 631 op_mode->ops = &iwl_mvm_ops;
626 trans->rx_mpdu_cmd_hdr_size = 632 trans->rx_mpdu_cmd_hdr_size =
@@ -703,11 +709,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
703 } 709 }
704 710
705 /* the hardware splits the A-MSDU */ 711 /* the hardware splits the A-MSDU */
706 if (mvm->cfg->mq_rx_supported) 712 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
713 trans_cfg.rx_buf_size = IWL_AMSDU_2K;
714 /* TODO: remove when balanced power mode is fw supported */
715 iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
716 } else if (mvm->cfg->mq_rx_supported) {
707 trans_cfg.rx_buf_size = IWL_AMSDU_4K; 717 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
718 }
708 719
709 trans->wide_cmd_header = true; 720 trans->wide_cmd_header = true;
710 trans_cfg.bc_table_dword = true; 721 trans_cfg.bc_table_dword =
722 mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
711 723
712 trans_cfg.command_groups = iwl_mvm_groups; 724 trans_cfg.command_groups = iwl_mvm_groups;
713 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); 725 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
@@ -738,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
738 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, 750 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
739 sizeof(trans->dbg_conf_tlv)); 751 sizeof(trans->dbg_conf_tlv));
740 trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; 752 trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
753 trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
741 754
742 trans->iml = mvm->fw->iml; 755 trans->iml = mvm->fw->iml;
743 trans->iml_len = mvm->fw->iml_len; 756 trans->iml_len = mvm->fw->iml_len;
@@ -1003,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
1003 list_add_tail(&entry->list, &mvm->async_handlers_list); 1016 list_add_tail(&entry->list, &mvm->async_handlers_list);
1004 spin_unlock(&mvm->async_handlers_lock); 1017 spin_unlock(&mvm->async_handlers_lock);
1005 schedule_work(&mvm->async_handlers_wk); 1018 schedule_work(&mvm->async_handlers_wk);
1006 return; 1019 break;
1007 } 1020 }
1008
1009 iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
1010} 1021}
1011 1022
1012static void iwl_mvm_rx(struct iwl_op_mode *op_mode, 1023static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index b8b2b819e8e7..8169d1450b3b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
183 } 183 }
184} 184}
185 185
186static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
187{
188 switch (mcs) {
189 case IEEE80211_HE_MCS_SUPPORT_0_7:
190 return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
191 case IEEE80211_HE_MCS_SUPPORT_0_9:
192 return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
193 case IEEE80211_HE_MCS_SUPPORT_0_11:
194 return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
195 case IEEE80211_HE_MCS_NOT_SUPPORTED:
196 return 0;
197 }
198
199 WARN(1, "invalid HE MCS %d\n", mcs);
200 return 0;
201}
202
203static void
204rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
205 const struct ieee80211_sta_he_cap *he_cap,
206 struct iwl_tlc_config_cmd *cmd)
207{
208 u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
209 u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
210 int i;
211
212 for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
213 u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
214 u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
215
216 cmd->ht_rates[i][0] =
217 cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
218 cmd->ht_rates[i][1] =
219 cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
220 }
221}
222
186static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, 223static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
187 struct ieee80211_supported_band *sband, 224 struct ieee80211_supported_band *sband,
188 struct iwl_tlc_config_cmd *cmd) 225 struct iwl_tlc_config_cmd *cmd)
@@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
192 unsigned long supp; /* must be unsigned long for for_each_set_bit */ 229 unsigned long supp; /* must be unsigned long for for_each_set_bit */
193 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 230 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
194 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 231 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
232 const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
195 233
196 /* non HT rates */ 234 /* non HT rates */
197 supp = 0; 235 supp = 0;
@@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
202 cmd->non_ht_rates = cpu_to_le16(supp); 240 cmd->non_ht_rates = cpu_to_le16(supp);
203 cmd->mode = IWL_TLC_MNG_MODE_NON_HT; 241 cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
204 242
205 if (vht_cap && vht_cap->vht_supported) { 243 /* HT/VHT rates */
244 if (he_cap && he_cap->has_he) {
245 cmd->mode = IWL_TLC_MNG_MODE_HE;
246 rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
247 } else if (vht_cap && vht_cap->vht_supported) {
206 cmd->mode = IWL_TLC_MNG_MODE_VHT; 248 cmd->mode = IWL_TLC_MNG_MODE_VHT;
207 rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); 249 rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
208 } else if (ht_cap && ht_cap->ht_supported) { 250 } else if (ht_cap && ht_cap->ht_supported) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 642da10b0b7f..30cfd7d50bc9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
363 idx += 1; 363 idx += 1;
364 if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE)) 364 if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
365 return idx; 365 return idx;
366 } else if (rate_n_flags & RATE_MCS_VHT_MSK) { 366 } else if (rate_n_flags & RATE_MCS_VHT_MSK ||
367 rate_n_flags & RATE_MCS_HE_MSK) {
367 idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; 368 idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
368 idx += IWL_RATE_MCS_0_INDEX; 369 idx += IWL_RATE_MCS_0_INDEX;
369 370
@@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
372 idx++; 373 idx++;
373 if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE)) 374 if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
374 return idx; 375 return idx;
376 if ((rate_n_flags & RATE_MCS_HE_MSK) &&
377 (idx <= IWL_LAST_HE_RATE))
378 return idx;
375 } else { 379 } else {
376 /* legacy rate format, search for match in table */ 380 /* legacy rate format, search for match in table */
377 381
@@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
516 [LQ_HT_MIMO2] = "HT MIMO", 520 [LQ_HT_MIMO2] = "HT MIMO",
517 [LQ_VHT_SISO] = "VHT SISO", 521 [LQ_VHT_SISO] = "VHT SISO",
518 [LQ_VHT_MIMO2] = "VHT MIMO", 522 [LQ_VHT_MIMO2] = "VHT MIMO",
523 [LQ_HE_SISO] = "HE SISO",
524 [LQ_HE_MIMO2] = "HE MIMO",
519 }; 525 };
520 526
521 if (type < LQ_NONE || type >= LQ_MAX) 527 if (type < LQ_NONE || type >= LQ_MAX)
@@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
900 906
901 /* Legacy */ 907 /* Legacy */
902 if (!(ucode_rate & RATE_MCS_HT_MSK) && 908 if (!(ucode_rate & RATE_MCS_HT_MSK) &&
903 !(ucode_rate & RATE_MCS_VHT_MSK)) { 909 !(ucode_rate & RATE_MCS_VHT_MSK) &&
910 !(ucode_rate & RATE_MCS_HE_MSK)) {
904 if (num_of_ant == 1) { 911 if (num_of_ant == 1) {
905 if (band == NL80211_BAND_5GHZ) 912 if (band == NL80211_BAND_5GHZ)
906 rate->type = LQ_LEGACY_A; 913 rate->type = LQ_LEGACY_A;
@@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
911 return 0; 918 return 0;
912 } 919 }
913 920
914 /* HT or VHT */ 921 /* HT, VHT or HE */
915 if (ucode_rate & RATE_MCS_SGI_MSK) 922 if (ucode_rate & RATE_MCS_SGI_MSK)
916 rate->sgi = true; 923 rate->sgi = true;
917 if (ucode_rate & RATE_MCS_LDPC_MSK) 924 if (ucode_rate & RATE_MCS_LDPC_MSK)
@@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
953 } else { 960 } else {
954 WARN_ON_ONCE(1); 961 WARN_ON_ONCE(1);
955 } 962 }
963 } else if (ucode_rate & RATE_MCS_HE_MSK) {
964 nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
965 RATE_VHT_MCS_NSS_POS) + 1;
966
967 if (nss == 1) {
968 rate->type = LQ_HE_SISO;
969 WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
970 "stbc %d bfer %d", rate->stbc, rate->bfer);
971 } else if (nss == 2) {
972 rate->type = LQ_HE_MIMO2;
973 WARN_ON_ONCE(num_of_ant != 2);
974 } else {
975 WARN_ON_ONCE(1);
976 }
956 } 977 }
957 978
958 WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 && 979 WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
959 !is_vht(rate)); 980 !is_he(rate) && !is_vht(rate));
960 981
961 return 0; 982 return 0;
962} 983}
@@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
3606 u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; 3627 u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
3607 3628
3608 if (!(rate & RATE_MCS_HT_MSK) && 3629 if (!(rate & RATE_MCS_HT_MSK) &&
3609 !(rate & RATE_MCS_VHT_MSK)) { 3630 !(rate & RATE_MCS_VHT_MSK) &&
3631 !(rate & RATE_MCS_HE_MSK)) {
3610 int index = iwl_hwrate_to_plcp_idx(rate); 3632 int index = iwl_hwrate_to_plcp_idx(rate);
3611 3633
3612 return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n", 3634 return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
@@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
3625 mcs = rate & RATE_HT_MCS_INDEX_MSK; 3647 mcs = rate & RATE_HT_MCS_INDEX_MSK;
3626 nss = ((rate & RATE_HT_MCS_NSS_MSK) 3648 nss = ((rate & RATE_HT_MCS_NSS_MSK)
3627 >> RATE_HT_MCS_NSS_POS) + 1; 3649 >> RATE_HT_MCS_NSS_POS) + 1;
3650 } else if (rate & RATE_MCS_HE_MSK) {
3651 type = "HE";
3652 mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
3653 nss = ((rate & RATE_VHT_MCS_NSS_MSK)
3654 >> RATE_VHT_MCS_NSS_POS) + 1;
3628 } else { 3655 } else {
3629 type = "Unknown"; /* shouldn't happen */ 3656 type = "Unknown"; /* shouldn't happen */
3630 } 3657 }
@@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
3886 [IWL_RATE_MCS_7_INDEX] = "MCS7", 3913 [IWL_RATE_MCS_7_INDEX] = "MCS7",
3887 [IWL_RATE_MCS_8_INDEX] = "MCS8", 3914 [IWL_RATE_MCS_8_INDEX] = "MCS8",
3888 [IWL_RATE_MCS_9_INDEX] = "MCS9", 3915 [IWL_RATE_MCS_9_INDEX] = "MCS9",
3916 [IWL_RATE_MCS_10_INDEX] = "MCS10",
3917 [IWL_RATE_MCS_11_INDEX] = "MCS11",
3889 }; 3918 };
3890 3919
3891 char *buff, *pos, *endpos; 3920 char *buff, *pos, *endpos;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index cffb8c852934..d2cf484e2b73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -144,8 +144,13 @@ enum {
144 144
145#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) 145#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
146#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) 146#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
147#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64) 147/*
148#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64) 148 * FIXME - various places in firmware API still use u8,
149 * e.g. LQ command and SCD config command.
150 * This should be 256 instead.
151 */
152#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
153#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
149#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) 154#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
150 155
151#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ 156#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -162,6 +167,8 @@ enum iwl_table_type {
162 LQ_HT_MIMO2, 167 LQ_HT_MIMO2,
163 LQ_VHT_SISO, /* VHT types */ 168 LQ_VHT_SISO, /* VHT types */
164 LQ_VHT_MIMO2, 169 LQ_VHT_MIMO2,
170 LQ_HE_SISO, /* HE types */
171 LQ_HE_MIMO2,
165 LQ_MAX, 172 LQ_MAX,
166}; 173};
167 174
@@ -183,11 +190,16 @@ struct rs_rate {
183#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2) 190#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
184#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO) 191#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
185#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2) 192#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
186#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type)) 193#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
187#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type)) 194#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
195#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
196 is_type_he_siso(type))
197#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
198 is_type_vht_mimo2(type) || is_type_he_mimo2(type))
188#define is_type_mimo(type) (is_type_mimo2(type)) 199#define is_type_mimo(type) (is_type_mimo2(type))
189#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type)) 200#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
190#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type)) 201#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
202#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
191#define is_type_a_band(type) ((type) == LQ_LEGACY_A) 203#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
192#define is_type_g_band(type) ((type) == LQ_LEGACY_G) 204#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
193 205
@@ -201,6 +213,7 @@ struct rs_rate {
201#define is_mimo(rate) is_type_mimo((rate)->type) 213#define is_mimo(rate) is_type_mimo((rate)->type)
202#define is_ht(rate) is_type_ht((rate)->type) 214#define is_ht(rate) is_type_ht((rate)->type)
203#define is_vht(rate) is_type_vht((rate)->type) 215#define is_vht(rate) is_type_vht((rate)->type)
216#define is_he(rate) is_type_he((rate)->type)
204#define is_a_band(rate) is_type_a_band((rate)->type) 217#define is_a_band(rate) is_type_a_band((rate)->type)
205#define is_g_band(rate) is_type_g_band((rate)->type) 218#define is_g_band(rate) is_type_g_band((rate)->type)
206 219
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 129c4c09648d..b53148f972a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 33 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
33 * All rights reserved. 35 * All rights reserved.
34 * 36 *
35 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
@@ -196,22 +198,31 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
196 struct sk_buff *skb, int queue, 198 struct sk_buff *skb, int queue,
197 struct ieee80211_sta *sta) 199 struct ieee80211_sta *sta)
198{ 200{
199 if (iwl_mvm_check_pn(mvm, skb, queue, sta)) 201 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
202
203 if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
200 kfree_skb(skb); 204 kfree_skb(skb);
201 else 205 } else {
206 unsigned int radiotap_len = 0;
207
208 if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
209 radiotap_len += sizeof(struct ieee80211_radiotap_he);
210 if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
211 radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
212 __skb_push(skb, radiotap_len);
202 ieee80211_rx_napi(mvm->hw, sta, skb, napi); 213 ieee80211_rx_napi(mvm->hw, sta, skb, napi);
214 }
203} 215}
204 216
205static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, 217static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
206 struct iwl_rx_mpdu_desc *desc, 218 struct ieee80211_rx_status *rx_status,
207 struct ieee80211_rx_status *rx_status) 219 u32 rate_n_flags, int energy_a,
220 int energy_b)
208{ 221{
209 int energy_a, energy_b, max_energy; 222 int max_energy;
210 u32 rate_flags = le32_to_cpu(desc->rate_n_flags); 223 u32 rate_flags = rate_n_flags;
211 224
212 energy_a = desc->energy_a;
213 energy_a = energy_a ? -energy_a : S8_MIN; 225 energy_a = energy_a ? -energy_a : S8_MIN;
214 energy_b = desc->energy_b;
215 energy_b = energy_b ? -energy_b : S8_MIN; 226 energy_b = energy_b ? -energy_b : S8_MIN;
216 max_energy = max(energy_a, energy_b); 227 max_energy = max(energy_a, energy_b);
217 228
@@ -356,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
356 tid = IWL_MAX_TID_COUNT; 367 tid = IWL_MAX_TID_COUNT;
357 368
358 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ 369 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
359 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; 370 sub_frame_idx = desc->amsdu_info &
371 IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
360 372
361 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 373 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
362 dup_data->last_seq[tid] == hdr->seq_ctrl && 374 dup_data->last_seq[tid] == hdr->seq_ctrl &&
@@ -850,17 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
850 struct ieee80211_rx_status *rx_status; 862 struct ieee80211_rx_status *rx_status;
851 struct iwl_rx_packet *pkt = rxb_addr(rxb); 863 struct iwl_rx_packet *pkt = rxb_addr(rxb);
852 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; 864 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
853 struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); 865 struct ieee80211_hdr *hdr;
854 u32 len = le16_to_cpu(desc->mpdu_len); 866 u32 len = le16_to_cpu(desc->mpdu_len);
855 u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); 867 u32 rate_n_flags, gp2_on_air_rise;
856 u16 phy_info = le16_to_cpu(desc->phy_info); 868 u16 phy_info = le16_to_cpu(desc->phy_info);
857 struct ieee80211_sta *sta = NULL; 869 struct ieee80211_sta *sta = NULL;
858 struct sk_buff *skb; 870 struct sk_buff *skb;
859 u8 crypt_len = 0; 871 u8 crypt_len = 0, channel, energy_a, energy_b;
872 struct ieee80211_radiotap_he *he = NULL;
873 struct ieee80211_radiotap_he_mu *he_mu = NULL;
874 u32 he_type = 0xffffffff;
875 /* this is invalid e.g. because puncture type doesn't allow 0b11 */
876#define HE_PHY_DATA_INVAL ((u64)-1)
877 u64 he_phy_data = HE_PHY_DATA_INVAL;
878 size_t desc_size;
860 879
861 if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) 880 if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
862 return; 881 return;
863 882
883 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
884 rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
885 channel = desc->v3.channel;
886 gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
887 energy_a = desc->v3.energy_a;
888 energy_b = desc->v3.energy_b;
889 desc_size = sizeof(*desc);
890 } else {
891 rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
892 channel = desc->v1.channel;
893 gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
894 energy_a = desc->v1.energy_a;
895 energy_b = desc->v1.energy_b;
896 desc_size = IWL_RX_DESC_SIZE_V1;
897 }
898
899 hdr = (void *)(pkt->data + desc_size);
864 /* Dont use dev_alloc_skb(), we'll have enough headroom once 900 /* Dont use dev_alloc_skb(), we'll have enough headroom once
865 * ieee80211_hdr pulled. 901 * ieee80211_hdr pulled.
866 */ 902 */
@@ -882,6 +918,51 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
882 918
883 rx_status = IEEE80211_SKB_RXCB(skb); 919 rx_status = IEEE80211_SKB_RXCB(skb);
884 920
921 if (rate_n_flags & RATE_MCS_HE_MSK) {
922 static const struct ieee80211_radiotap_he known = {
923 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
924 IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
925 IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
926 IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
927 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
928 IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
929 };
930 static const struct ieee80211_radiotap_he_mu mu_known = {
931 .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
932 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
933 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
934 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
935 .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
936 };
937 unsigned int radiotap_len = 0;
938
939 he = skb_put_data(skb, &known, sizeof(known));
940 radiotap_len += sizeof(known);
941 rx_status->flag |= RX_FLAG_RADIOTAP_HE;
942
943 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
944
945 if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
946 if (mvm->trans->cfg->device_family >=
947 IWL_DEVICE_FAMILY_22560)
948 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
949 else
950 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
951
952 if (he_type == RATE_MCS_HE_TYPE_MU) {
953 he_mu = skb_put_data(skb, &mu_known,
954 sizeof(mu_known));
955 radiotap_len += sizeof(mu_known);
956 rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
957 }
958 }
959
960 /* temporarily hide the radiotap data */
961 __skb_pull(skb, radiotap_len);
962 }
963
964 rx_status = IEEE80211_SKB_RXCB(skb);
965
885 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, 966 if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
886 le32_to_cpu(pkt->len_n_flags), queue, 967 le32_to_cpu(pkt->len_n_flags), queue,
887 &crypt_len)) { 968 &crypt_len)) {
@@ -904,20 +985,80 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
904 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; 985 rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
905 986
906 if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { 987 if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
907 rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); 988 u64 tsf_on_air_rise;
989
990 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
991 tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
992 else
993 tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
994
995 rx_status->mactime = tsf_on_air_rise;
908 /* TSF as indicated by the firmware is at INA time */ 996 /* TSF as indicated by the firmware is at INA time */
909 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; 997 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
998 } else if (he_type == RATE_MCS_HE_TYPE_SU) {
999 u64 he_phy_data;
1000
1001 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1002 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1003 else
1004 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1005
1006 he->data1 |=
1007 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
1008 if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
1009 he_phy_data))
1010 he->data3 |=
1011 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
1012
1013 if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
1014 rx_status->ampdu_reference = mvm->ampdu_ref;
1015 mvm->ampdu_ref++;
1016
1017 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
1018 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1019 if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
1020 he_phy_data))
1021 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
1022 }
1023 } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
1024 he_mu->flags1 |=
1025 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
1026 he_phy_data),
1027 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
1028 he_mu->flags1 |=
1029 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
1030 he_phy_data),
1031 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
1032 he_mu->flags1 |=
1033 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
1034 he_phy_data),
1035 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
1036 he_mu->flags2 |=
1037 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
1038 he_phy_data),
1039 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
1040 he_mu->flags2 |=
1041 le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
1042 he_phy_data),
1043 IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
910 } 1044 }
911 rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); 1045 rx_status->device_timestamp = gp2_on_air_rise;
912 rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : 1046 rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
913 NL80211_BAND_2GHZ; 1047 NL80211_BAND_2GHZ;
914 rx_status->freq = ieee80211_channel_to_frequency(desc->channel, 1048 rx_status->freq = ieee80211_channel_to_frequency(channel,
915 rx_status->band); 1049 rx_status->band);
916 iwl_mvm_get_signal_strength(mvm, desc, rx_status); 1050 iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
1051 energy_b);
917 1052
918 /* update aggregation data for monitor sake on default queue */ 1053 /* update aggregation data for monitor sake on default queue */
919 if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { 1054 if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
920 bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; 1055 bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
1056 u64 he_phy_data;
1057
1058 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1059 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1060 else
1061 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
921 1062
922 rx_status->flag |= RX_FLAG_AMPDU_DETAILS; 1063 rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
923 rx_status->ampdu_reference = mvm->ampdu_ref; 1064 rx_status->ampdu_reference = mvm->ampdu_ref;
@@ -925,6 +1066,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
925 if (toggle_bit != mvm->ampdu_toggle) { 1066 if (toggle_bit != mvm->ampdu_toggle) {
926 mvm->ampdu_ref++; 1067 mvm->ampdu_ref++;
927 mvm->ampdu_toggle = toggle_bit; 1068 mvm->ampdu_toggle = toggle_bit;
1069
1070 if (he_phy_data != HE_PHY_DATA_INVAL &&
1071 he_type == RATE_MCS_HE_TYPE_MU) {
1072 rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
1073 if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
1074 he_phy_data))
1075 rx_status->flag |=
1076 RX_FLAG_AMPDU_EOF_BIT;
1077 }
928 } 1078 }
929 } 1079 }
930 1080
@@ -1033,7 +1183,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
1033 } 1183 }
1034 } 1184 }
1035 1185
1036 /* Set up the HT phy flags */
1037 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { 1186 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
1038 case RATE_MCS_CHAN_WIDTH_20: 1187 case RATE_MCS_CHAN_WIDTH_20:
1039 break; 1188 break;
@@ -1048,6 +1197,70 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
1048 break; 1197 break;
1049 } 1198 }
1050 1199
1200 if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
1201 rate_n_flags & RATE_MCS_HE_106T_MSK) {
1202 rx_status->bw = RATE_INFO_BW_HE_RU;
1203 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1204 }
1205
1206 if (rate_n_flags & RATE_MCS_HE_MSK &&
1207 phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
1208 he_type == RATE_MCS_HE_TYPE_MU) {
1209 /*
1210 * Unfortunately, we have to leave the mac80211 data
1211 * incorrect for the case that we receive an HE-MU
1212 * transmission and *don't* have the he_mu pointer,
1213 * i.e. we don't have the phy data (due to the bits
1214 * being used for TSF). This shouldn't happen though
1215 * as management frames where we need the TSF/timers
1216 * are not be transmitted in HE-MU, I think.
1217 */
1218 u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
1219 u8 offs = 0;
1220
1221 rx_status->bw = RATE_INFO_BW_HE_RU;
1222
1223 switch (ru) {
1224 case 0 ... 36:
1225 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
1226 offs = ru;
1227 break;
1228 case 37 ... 52:
1229 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
1230 offs = ru - 37;
1231 break;
1232 case 53 ... 60:
1233 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
1234 offs = ru - 53;
1235 break;
1236 case 61 ... 64:
1237 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
1238 offs = ru - 61;
1239 break;
1240 case 65 ... 66:
1241 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
1242 offs = ru - 65;
1243 break;
1244 case 67:
1245 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
1246 break;
1247 case 68:
1248 rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
1249 break;
1250 }
1251 he->data2 |=
1252 le16_encode_bits(offs,
1253 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
1254 he->data2 |=
1255 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
1256 if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
1257 he->data2 |=
1258 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
1259 } else if (he) {
1260 he->data1 |=
1261 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
1262 }
1263
1051 if (!(rate_n_flags & RATE_MCS_CCK_MSK) && 1264 if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
1052 rate_n_flags & RATE_MCS_SGI_MSK) 1265 rate_n_flags & RATE_MCS_SGI_MSK)
1053 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1266 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
@@ -1072,6 +1285,119 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
1072 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; 1285 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1073 if (rate_n_flags & RATE_MCS_BF_MSK) 1286 if (rate_n_flags & RATE_MCS_BF_MSK)
1074 rx_status->enc_flags |= RX_ENC_FLAG_BF; 1287 rx_status->enc_flags |= RX_ENC_FLAG_BF;
1288 } else if (he) {
1289 u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
1290 RATE_MCS_STBC_POS;
1291 rx_status->nss =
1292 ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
1293 RATE_VHT_MCS_NSS_POS) + 1;
1294 rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
1295 rx_status->encoding = RX_ENC_HE;
1296 rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
1297 if (rate_n_flags & RATE_MCS_BF_MSK)
1298 rx_status->enc_flags |= RX_ENC_FLAG_BF;
1299
1300 rx_status->he_dcm =
1301 !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
1302
1303#define CHECK_TYPE(F) \
1304 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
1305 (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
1306
1307 CHECK_TYPE(SU);
1308 CHECK_TYPE(EXT_SU);
1309 CHECK_TYPE(MU);
1310 CHECK_TYPE(TRIG);
1311
1312 he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
1313
1314 if (rate_n_flags & RATE_MCS_BF_POS)
1315 he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
1316
1317 switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
1318 RATE_MCS_HE_GI_LTF_POS) {
1319 case 0:
1320 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1321 break;
1322 case 1:
1323 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1324 break;
1325 case 2:
1326 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
1327 break;
1328 case 3:
1329 if (rate_n_flags & RATE_MCS_SGI_MSK)
1330 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
1331 else
1332 rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
1333 break;
1334 }
1335
1336 switch (he_type) {
1337 case RATE_MCS_HE_TYPE_SU: {
1338 u16 val;
1339
1340 /* LTF syms correspond to streams */
1341 he->data2 |=
1342 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1343 switch (rx_status->nss) {
1344 case 1:
1345 val = 0;
1346 break;
1347 case 2:
1348 val = 1;
1349 break;
1350 case 3:
1351 case 4:
1352 val = 2;
1353 break;
1354 case 5:
1355 case 6:
1356 val = 3;
1357 break;
1358 case 7:
1359 case 8:
1360 val = 4;
1361 break;
1362 default:
1363 WARN_ONCE(1, "invalid nss: %d\n",
1364 rx_status->nss);
1365 val = 0;
1366 }
1367 he->data5 |=
1368 le16_encode_bits(val,
1369 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
1370 }
1371 break;
1372 case RATE_MCS_HE_TYPE_MU: {
1373 u16 val;
1374 u64 he_phy_data;
1375
1376 if (mvm->trans->cfg->device_family >=
1377 IWL_DEVICE_FAMILY_22560)
1378 he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
1379 else
1380 he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
1381
1382 if (he_phy_data == HE_PHY_DATA_INVAL)
1383 break;
1384
1385 val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
1386 he_phy_data);
1387
1388 he->data2 |=
1389 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
1390 he->data5 |=
1391 cpu_to_le16(FIELD_PREP(
1392 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
1393 val));
1394 }
1395 break;
1396 case RATE_MCS_HE_TYPE_EXT_SU:
1397 case RATE_MCS_HE_TYPE_TRIG:
1398 /* not supported yet */
1399 break;
1400 }
1075 } else { 1401 } else {
1076 int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, 1402 int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1077 rx_status->band); 1403 rx_status->band);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9263b9aa8b72..18db1ed92d9b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2184 2184
2185static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, 2185static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2186 struct iwl_mvm_baid_data *data, 2186 struct iwl_mvm_baid_data *data,
2187 u16 ssn, u8 buf_size) 2187 u16 ssn, u16 buf_size)
2188{ 2188{
2189 int i; 2189 int i;
2190 2190
@@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2211} 2211}
2212 2212
2213int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2213int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2214 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) 2214 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2215{ 2215{
2216 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2216 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2217 struct iwl_mvm_add_sta_cmd cmd = {}; 2217 struct iwl_mvm_add_sta_cmd cmd = {};
@@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2273 if (start) { 2273 if (start) {
2274 cmd.add_immediate_ba_tid = (u8) tid; 2274 cmd.add_immediate_ba_tid = (u8) tid;
2275 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 2275 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2276 cmd.rx_ba_window = cpu_to_le16((u16)buf_size); 2276 cmd.rx_ba_window = cpu_to_le16(buf_size);
2277 } else { 2277 } else {
2278 cmd.remove_immediate_ba_tid = (u8) tid; 2278 cmd.remove_immediate_ba_tid = (u8) tid;
2279 } 2279 }
@@ -2559,7 +2559,7 @@ out:
2559} 2559}
2560 2560
2561int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2561int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2562 struct ieee80211_sta *sta, u16 tid, u8 buf_size, 2562 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2563 bool amsdu) 2563 bool amsdu)
2564{ 2564{
2565 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2565 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1c43ea8dd8cc..0fc211108149 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -412,7 +412,7 @@ struct iwl_mvm_sta {
412 u32 tfd_queue_msk; 412 u32 tfd_queue_msk;
413 u32 mac_id_n_color; 413 u32 mac_id_n_color;
414 u16 tid_disable_agg; 414 u16 tid_disable_agg;
415 u8 max_agg_bufsize; 415 u16 max_agg_bufsize;
416 enum iwl_sta_type sta_type; 416 enum iwl_sta_type sta_type;
417 enum ieee80211_sta_state sta_state; 417 enum ieee80211_sta_state sta_state;
418 bool bt_reduced_txpower; 418 bool bt_reduced_txpower;
@@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
518 518
519/* AMPDU */ 519/* AMPDU */
520int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 520int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
521 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout); 521 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
522int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 522int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
523 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 523 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
524int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 524int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
525 struct ieee80211_sta *sta, u16 tid, u8 buf_size, 525 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
526 bool amsdu); 526 bool amsdu);
527int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 527int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
528 struct ieee80211_sta *sta, u16 tid); 528 struct ieee80211_sta *sta, u16 tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index cf2591f2ac23..ff193dca2020 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
484 484
485 /* Make sure we zero enough of dev_cmd */ 485 /* Make sure we zero enough of dev_cmd */
486 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); 486 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
487 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
487 488
488 memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); 489 memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
489 dev_cmd->hdr.cmd = TX_CMD; 490 dev_cmd->hdr.cmd = TX_CMD;
490 491
491 if (iwl_mvm_has_new_tx_api(mvm)) { 492 if (iwl_mvm_has_new_tx_api(mvm)) {
492 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
493 u16 offload_assist = 0; 493 u16 offload_assist = 0;
494 u32 rate_n_flags = 0;
495 u16 flags = 0;
494 496
495 if (ieee80211_is_data_qos(hdr->frame_control)) { 497 if (ieee80211_is_data_qos(hdr->frame_control)) {
496 u8 *qc = ieee80211_get_qos_ctl(hdr); 498 u8 *qc = ieee80211_get_qos_ctl(hdr);
@@ -507,25 +509,43 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
507 !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) 509 !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
508 offload_assist |= BIT(TX_CMD_OFFLD_PAD); 510 offload_assist |= BIT(TX_CMD_OFFLD_PAD);
509 511
510 cmd->offload_assist |= cpu_to_le16(offload_assist); 512 if (!info->control.hw_key)
513 flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
511 514
512 /* Total # bytes to be transmitted */ 515 /* For data packets rate info comes from the fw */
513 cmd->len = cpu_to_le16((u16)skb->len); 516 if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
517 flags |= IWL_TX_FLAGS_CMD_RATE;
518 rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
519 }
514 520
515 /* Copy MAC header from skb into command buffer */ 521 if (mvm->trans->cfg->device_family >=
516 memcpy(cmd->hdr, hdr, hdrlen); 522 IWL_DEVICE_FAMILY_22560) {
523 struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
517 524
518 if (!info->control.hw_key) 525 cmd->offload_assist |= cpu_to_le32(offload_assist);
519 cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
520 526
521 /* For data packets rate info comes from the fw */ 527 /* Total # bytes to be transmitted */
522 if (ieee80211_is_data(hdr->frame_control) && sta) 528 cmd->len = cpu_to_le16((u16)skb->len);
523 goto out;
524 529
525 cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE); 530 /* Copy MAC header from skb into command buffer */
526 cmd->rate_n_flags = 531 memcpy(cmd->hdr, hdr, hdrlen);
527 cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
528 532
533 cmd->flags = cpu_to_le16(flags);
534 cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
535 } else {
536 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
537
538 cmd->offload_assist |= cpu_to_le16(offload_assist);
539
540 /* Total # bytes to be transmitted */
541 cmd->len = cpu_to_le16((u16)skb->len);
542
543 /* Copy MAC header from skb into command buffer */
544 memcpy(cmd->hdr, hdr, hdrlen);
545
546 cmd->flags = cpu_to_le32(flags);
547 cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
548 }
529 goto out; 549 goto out;
530 } 550 }
531 551
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
new file mode 100644
index 000000000000..2146fda8da2f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -0,0 +1,207 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2018 Intel Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2018 Intel Corporation
22 * All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name Intel Corporation nor the names of its
35 * contributors may be used to endorse or promote products derived
36 * from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 *
50 *****************************************************************************/
51
52#include "iwl-trans.h"
53#include "iwl-fh.h"
54#include "iwl-context-info-gen3.h"
55#include "internal.h"
56#include "iwl-prph.h"
57
58int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
59 const struct fw_img *fw)
60{
61 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
62 struct iwl_context_info_gen3 *ctxt_info_gen3;
63 struct iwl_prph_scratch *prph_scratch;
64 struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
65 struct iwl_prph_info *prph_info;
66 void *iml_img;
67 u32 control_flags = 0;
68 int ret;
69
70 /* Allocate prph scratch */
71 prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
72 &trans_pcie->prph_scratch_dma_addr,
73 GFP_KERNEL);
74 if (!prph_scratch)
75 return -ENOMEM;
76
77 prph_sc_ctrl = &prph_scratch->ctrl_cfg;
78
79 prph_sc_ctrl->version.version = 0;
80 prph_sc_ctrl->version.mac_id =
81 cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
82 prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
83
84 control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
85 IWL_PRPH_SCRATCH_MTR_MODE |
86 (IWL_PRPH_MTR_FORMAT_256B &
87 IWL_PRPH_SCRATCH_MTR_FORMAT) |
88 IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
89 IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
90 prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
91
92 /* initialize RX default queue */
93 prph_sc_ctrl->rbd_cfg.free_rbd_addr =
94 cpu_to_le64(trans_pcie->rxq->bd_dma);
95
96 /* Configure debug, for integration */
97 iwl_pcie_alloc_fw_monitor(trans, 0);
98 prph_sc_ctrl->hwm_cfg.hwm_base_addr =
99 cpu_to_le64(trans_pcie->fw_mon_phys);
100 prph_sc_ctrl->hwm_cfg.hwm_size =
101 cpu_to_le32(trans_pcie->fw_mon_size);
102
103 /* allocate ucode sections in dram and set addresses */
104 ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
105 if (ret) {
106 dma_free_coherent(trans->dev,
107 sizeof(*prph_scratch),
108 prph_scratch,
109 trans_pcie->prph_scratch_dma_addr);
110 return ret;
111 }
112
113 /* Allocate prph information
114 * currently we don't assign to the prph info anything, but it would get
115 * assigned later */
116 prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
117 &trans_pcie->prph_info_dma_addr,
118 GFP_KERNEL);
119 if (!prph_info)
120 return -ENOMEM;
121
122 /* Allocate context info */
123 ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
124 sizeof(*ctxt_info_gen3),
125 &trans_pcie->ctxt_info_dma_addr,
126 GFP_KERNEL);
127 if (!ctxt_info_gen3)
128 return -ENOMEM;
129
130 ctxt_info_gen3->prph_info_base_addr =
131 cpu_to_le64(trans_pcie->prph_info_dma_addr);
132 ctxt_info_gen3->prph_scratch_base_addr =
133 cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
134 ctxt_info_gen3->prph_scratch_size =
135 cpu_to_le32(sizeof(*prph_scratch));
136 ctxt_info_gen3->cr_head_idx_arr_base_addr =
137 cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
138 ctxt_info_gen3->tr_tail_idx_arr_base_addr =
139 cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
140 ctxt_info_gen3->cr_tail_idx_arr_base_addr =
141 cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
142 ctxt_info_gen3->cr_idx_arr_size =
143 cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
144 ctxt_info_gen3->tr_idx_arr_size =
145 cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
146 ctxt_info_gen3->mtr_base_addr =
147 cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
148 ctxt_info_gen3->mcr_base_addr =
149 cpu_to_le64(trans_pcie->rxq->used_bd_dma);
150 ctxt_info_gen3->mtr_size =
151 cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
152 ctxt_info_gen3->mcr_size =
153 cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
154
155 trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
156 trans_pcie->prph_info = prph_info;
157 trans_pcie->prph_scratch = prph_scratch;
158
159 /* Allocate IML */
160 iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
161 &trans_pcie->iml_dma_addr, GFP_KERNEL);
162 if (!iml_img)
163 return -ENOMEM;
164
165 memcpy(iml_img, trans->iml, trans->iml_len);
166
167 iwl_enable_interrupts(trans);
168
169 /* kick FW self load */
170 iwl_write64(trans, CSR_CTXT_INFO_ADDR,
171 trans_pcie->ctxt_info_dma_addr);
172 iwl_write64(trans, CSR_IML_DATA_ADDR,
173 trans_pcie->iml_dma_addr);
174 iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
175 iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
176 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
177
178 return 0;
179}
180
181void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
182{
183 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
184
185 if (!trans_pcie->ctxt_info_gen3)
186 return;
187
188 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
189 trans_pcie->ctxt_info_gen3,
190 trans_pcie->ctxt_info_dma_addr);
191 trans_pcie->ctxt_info_dma_addr = 0;
192 trans_pcie->ctxt_info_gen3 = NULL;
193
194 iwl_pcie_ctxt_info_free_fw_img(trans);
195
196 dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
197 trans_pcie->prph_scratch,
198 trans_pcie->prph_scratch_dma_addr);
199 trans_pcie->prph_scratch_dma_addr = 0;
200 trans_pcie->prph_scratch = NULL;
201
202 dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
203 trans_pcie->prph_info,
204 trans_pcie->prph_info_dma_addr);
205 trans_pcie->prph_info_dma_addr = 0;
206 trans_pcie->prph_info = NULL;
207}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 3fc4343581ee..b2cd7ef5fc3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH 8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
19 * BSD LICENSE 20 * BSD LICENSE
20 * 21 *
21 * Copyright(c) 2017 Intel Deutschland GmbH 22 * Copyright(c) 2017 Intel Deutschland GmbH
23 * Copyright(c) 2018 Intel Corporation
22 * All rights reserved. 24 * All rights reserved.
23 * 25 *
24 * Redistribution and use in source and binary forms, with or without 26 * Redistribution and use in source and binary forms, with or without
@@ -55,57 +57,6 @@
55#include "internal.h" 57#include "internal.h"
56#include "iwl-prph.h" 58#include "iwl-prph.h"
57 59
58static int iwl_pcie_get_num_sections(const struct fw_img *fw,
59 int start)
60{
61 int i = 0;
62
63 while (start < fw->num_sec &&
64 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
65 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
66 start++;
67 i++;
68 }
69
70 return i;
71}
72
73static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
74 const struct fw_desc *sec,
75 struct iwl_dram_data *dram)
76{
77 dram->block = dma_alloc_coherent(trans->dev, sec->len,
78 &dram->physical,
79 GFP_KERNEL);
80 if (!dram->block)
81 return -ENOMEM;
82
83 dram->size = sec->len;
84 memcpy(dram->block, sec->data, sec->len);
85
86 return 0;
87}
88
89static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
90{
91 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
92 struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
93 int i;
94
95 if (!dram->fw) {
96 WARN_ON(dram->fw_cnt);
97 return;
98 }
99
100 for (i = 0; i < dram->fw_cnt; i++)
101 dma_free_coherent(trans->dev, dram->fw[i].size,
102 dram->fw[i].block, dram->fw[i].physical);
103
104 kfree(dram->fw);
105 dram->fw_cnt = 0;
106 dram->fw = NULL;
107}
108
109void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) 60void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
110{ 61{
111 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 62 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
128 dram->paging = NULL; 79 dram->paging = NULL;
129} 80}
130 81
131static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans, 82int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
132 const struct fw_img *fw, 83 const struct fw_img *fw,
133 struct iwl_context_info *ctxt_info) 84 struct iwl_context_info_dram *ctxt_dram)
134{ 85{
135 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 86 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
136 struct iwl_self_init_dram *dram = &trans_pcie->init_dram; 87 struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
137 struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
138 int i, ret, lmac_cnt, umac_cnt, paging_cnt; 88 int i, ret, lmac_cnt, umac_cnt, paging_cnt;
139 89
140 if (WARN(dram->paging, 90 if (WARN(dram->paging,
@@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
247 TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); 197 TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
248 198
249 /* allocate ucode sections in dram and set addresses */ 199 /* allocate ucode sections in dram and set addresses */
250 ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); 200 ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
251 if (ret) { 201 if (ret) {
252 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), 202 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
253 ctxt_info, trans_pcie->ctxt_info_dma_addr); 203 ctxt_info, trans_pcie->ctxt_info_dma_addr);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 8520523b91b4..562cc79288a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -828,19 +828,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
828 {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, 828 {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
829 829
830/* 22000 Series */ 830/* 22000 Series */
831 {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
832 {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
833 {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)}, 831 {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
834 {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, 832 {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
835 {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)}, 833 {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
836 {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)}, 834 {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
837 {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)}, 835 {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
838 {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)}, 836 {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
839 {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, 837 {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
840 {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, 838 {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
841 {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)}, 839 {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
842 {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)}, 840 {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
841 {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
842 {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
843 {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
844 {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
845 {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
846 {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
847 {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
848 {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
849 {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
850 {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
843 {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)}, 851 {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
852 {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
853 {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
854 {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
855 {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
856 {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
844 857
845#endif /* CONFIG_IWLMVM */ 858#endif /* CONFIG_IWLMVM */
846 859
@@ -1003,6 +1016,10 @@ static int iwl_pci_resume(struct device *device)
1003 if (!trans->op_mode) 1016 if (!trans->op_mode)
1004 return 0; 1017 return 0;
1005 1018
1019 /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
1020 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1021 return 0;
1022
1006 /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */ 1023 /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
1007 iwl_pcie_conf_msix_hw(trans_pcie); 1024 iwl_pcie_conf_msix_hw(trans_pcie);
1008 1025
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 45ea32796cda..b63d44b7cd7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -3,6 +3,7 @@
3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6 * Copyright(c) 2018 Intel Corporation
6 * 7 *
7 * Portions of this file are derived from the ipw3945 project, as well 8 * Portions of this file are derived from the ipw3945 project, as well
8 * as portions of the ieee80211 subsystem header files. 9 * as portions of the ieee80211 subsystem header files.
@@ -17,8 +18,7 @@
17 * more details. 18 * more details.
18 * 19 *
19 * You should have received a copy of the GNU General Public License along with 20 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 21 * this program.
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
22 * 22 *
23 * The full GNU General Public License is included in this distribution in the 23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE. 24 * file called LICENSE.
@@ -45,6 +45,7 @@
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-io.h" 46#include "iwl-io.h"
47#include "iwl-op-mode.h" 47#include "iwl-op-mode.h"
48#include "iwl-drv.h"
48 49
49/* We need 2 entries for the TX command and header, and another one might 50/* We need 2 entries for the TX command and header, and another one might
50 * be needed for potential data in the SKB's head. The remaining ones can 51 * be needed for potential data in the SKB's head. The remaining ones can
@@ -59,6 +60,7 @@
59#define RX_POST_REQ_ALLOC 2 60#define RX_POST_REQ_ALLOC 2
60#define RX_CLAIM_REQ_ALLOC 8 61#define RX_CLAIM_REQ_ALLOC 8
61#define RX_PENDING_WATERMARK 16 62#define RX_PENDING_WATERMARK 16
63#define FIRST_RX_QUEUE 512
62 64
63struct iwl_host_cmd; 65struct iwl_host_cmd;
64 66
@@ -71,6 +73,7 @@ struct iwl_host_cmd;
71 * @page: driver's pointer to the rxb page 73 * @page: driver's pointer to the rxb page
72 * @invalid: rxb is in driver ownership - not owned by HW 74 * @invalid: rxb is in driver ownership - not owned by HW
73 * @vid: index of this rxb in the global table 75 * @vid: index of this rxb in the global table
76 * @size: size used from the buffer
74 */ 77 */
75struct iwl_rx_mem_buffer { 78struct iwl_rx_mem_buffer {
76 dma_addr_t page_dma; 79 dma_addr_t page_dma;
@@ -78,6 +81,7 @@ struct iwl_rx_mem_buffer {
78 u16 vid; 81 u16 vid;
79 bool invalid; 82 bool invalid;
80 struct list_head list; 83 struct list_head list;
84 u32 size;
81}; 85};
82 86
83/** 87/**
@@ -98,14 +102,121 @@ struct isr_statistics {
98 u32 unhandled; 102 u32 unhandled;
99}; 103};
100 104
105#define IWL_CD_STTS_OPTIMIZED_POS 0
106#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
107#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
108#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
109#define IWL_CD_STTS_WIFI_STATUS_POS 4
110#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
111
112/**
113 * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
114 * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
115 * In sniffer mode, when split is used, set in last CD completion. (RX)
116 * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
117 * all CD completion. (RX)
118 * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
119 */
120enum iwl_completion_desc_transfer_status {
121 IWL_CD_STTS_UNUSED,
122 IWL_CD_STTS_UNUSED_2,
123 IWL_CD_STTS_END_TRANSFER,
124 IWL_CD_STTS_OVERFLOW,
125 IWL_CD_STTS_ABORTED,
126 IWL_CD_STTS_ERROR,
127};
128
129/**
130 * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
131 * @IWL_CD_STTS_VALID: the packet is valid (RX)
132 * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
133 * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
134 * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
135 * @IWL_CD_STTS_DUP: duplicate packet (RX)
136 * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
137 * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
138 * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
139 * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
140 * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
141 * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
142 * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
143 * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
144 * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
145 * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
146 */
147enum iwl_completion_desc_wifi_status {
148 IWL_CD_STTS_VALID,
149 IWL_CD_STTS_FCS_ERR,
150 IWL_CD_STTS_SEC_KEY_ERR,
151 IWL_CD_STTS_DECRYPTION_ERR,
152 IWL_CD_STTS_DUP,
153 IWL_CD_STTS_ICV_MIC_ERR,
154 IWL_CD_STTS_INTERNAL_SNAP_ERR,
155 IWL_CD_STTS_SEC_PORT_FAIL,
156 IWL_CD_STTS_BA_OLD_SN,
157 IWL_CD_STTS_QOS_NULL,
158 IWL_CD_STTS_MAC_HDR_ERR,
159 IWL_CD_STTS_MAX_RETRANS,
160 IWL_CD_STTS_EX_LIFETIME,
161 IWL_CD_STTS_NOT_USED,
162 IWL_CD_STTS_REPLAY_ERR,
163};
164
165#define IWL_RX_TD_TYPE_MSK 0xff000000
166#define IWL_RX_TD_SIZE_MSK 0x00ffffff
167#define IWL_RX_TD_SIZE_2K BIT(11)
168#define IWL_RX_TD_TYPE 0
169
170/**
171 * struct iwl_rx_transfer_desc - transfer descriptor
172 * @type_n_size: buffer type (bit 0: external buff valid,
173 * bit 1: optional footer valid, bit 2-7: reserved)
174 * and buffer size
175 * @addr: ptr to free buffer start address
176 * @rbid: unique tag of the buffer
177 * @reserved: reserved
178 */
179struct iwl_rx_transfer_desc {
180 __le32 type_n_size;
181 __le64 addr;
182 __le16 rbid;
183 __le16 reserved;
184} __packed;
185
186#define IWL_RX_CD_SIZE 0xffffff00
187
188/**
189 * struct iwl_rx_completion_desc - completion descriptor
190 * @type: buffer type (bit 0: external buff valid,
191 * bit 1: optional footer valid, bit 2-7: reserved)
192 * @status: status of the completion
193 * @reserved1: reserved
194 * @rbid: unique tag of the received buffer
195 * @size: buffer size, masked by IWL_RX_CD_SIZE
196 * @reserved2: reserved
197 */
198struct iwl_rx_completion_desc {
199 u8 type;
200 u8 status;
201 __le16 reserved1;
202 __le16 rbid;
203 __le32 size;
204 u8 reserved2[22];
205} __packed;
206
101/** 207/**
102 * struct iwl_rxq - Rx queue 208 * struct iwl_rxq - Rx queue
103 * @id: queue index 209 * @id: queue index
104 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 210 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
105 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 211 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
212 * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
106 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 213 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
107 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 214 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
108 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 215 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
216 * @tr_tail: driver's pointer to the transmission ring tail buffer
217 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
218 * @cr_tail: driver's pointer to the completion ring tail buffer
219 * @cr_tail_dma: physical address of the buffer for the completion ring tail
109 * @read: Shared index to newest available Rx buffer 220 * @read: Shared index to newest available Rx buffer
110 * @write: Shared index to oldest written Rx packet 221 * @write: Shared index to oldest written Rx packet
111 * @free_count: Number of pre-allocated buffers in rx_free 222 * @free_count: Number of pre-allocated buffers in rx_free
@@ -125,8 +236,16 @@ struct iwl_rxq {
125 int id; 236 int id;
126 void *bd; 237 void *bd;
127 dma_addr_t bd_dma; 238 dma_addr_t bd_dma;
128 __le32 *used_bd; 239 union {
240 void *used_bd;
241 __le32 *bd_32;
242 struct iwl_rx_completion_desc *cd;
243 };
129 dma_addr_t used_bd_dma; 244 dma_addr_t used_bd_dma;
245 __le16 *tr_tail;
246 dma_addr_t tr_tail_dma;
247 __le16 *cr_tail;
248 dma_addr_t cr_tail_dma;
130 u32 read; 249 u32 read;
131 u32 write; 250 u32 write;
132 u32 free_count; 251 u32 free_count;
@@ -136,7 +255,7 @@ struct iwl_rxq {
136 struct list_head rx_free; 255 struct list_head rx_free;
137 struct list_head rx_used; 256 struct list_head rx_used;
138 bool need_update; 257 bool need_update;
139 struct iwl_rb_status *rb_stts; 258 void *rb_stts;
140 dma_addr_t rb_stts_dma; 259 dma_addr_t rb_stts_dma;
141 spinlock_t lock; 260 spinlock_t lock;
142 struct napi_struct napi; 261 struct napi_struct napi;
@@ -175,18 +294,36 @@ struct iwl_dma_ptr {
175 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 294 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
176 * @index -- current index 295 * @index -- current index
177 */ 296 */
178static inline int iwl_queue_inc_wrap(int index) 297static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
179{ 298{
180 return ++index & (TFD_QUEUE_SIZE_MAX - 1); 299 return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
300}
301
302/**
303 * iwl_get_closed_rb_stts - get closed rb stts from different structs
304 * @rxq - the rxq to get the rb stts from
305 */
306static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
307 struct iwl_rxq *rxq)
308{
309 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
310 __le16 *rb_stts = rxq->rb_stts;
311
312 return READ_ONCE(*rb_stts);
313 } else {
314 struct iwl_rb_status *rb_stts = rxq->rb_stts;
315
316 return READ_ONCE(rb_stts->closed_rb_num);
317 }
181} 318}
182 319
183/** 320/**
184 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 321 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
185 * @index -- current index 322 * @index -- current index
186 */ 323 */
187static inline int iwl_queue_dec_wrap(int index) 324static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
188{ 325{
189 return --index & (TFD_QUEUE_SIZE_MAX - 1); 326 return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
190} 327}
191 328
192struct iwl_cmd_meta { 329struct iwl_cmd_meta {
@@ -315,6 +452,18 @@ enum iwl_shared_irq_flags {
315}; 452};
316 453
317/** 454/**
455 * enum iwl_image_response_code - image response values
456 * @IWL_IMAGE_RESP_DEF: the default value of the register
457 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
458 * @IWL_IMAGE_RESP_FAIL: iml reading failed
459 */
460enum iwl_image_response_code {
461 IWL_IMAGE_RESP_DEF = 0,
462 IWL_IMAGE_RESP_SUCCESS = 1,
463 IWL_IMAGE_RESP_FAIL = 2,
464};
465
466/**
318 * struct iwl_dram_data 467 * struct iwl_dram_data
319 * @physical: page phy pointer 468 * @physical: page phy pointer
320 * @block: pointer to the allocated block/page 469 * @block: pointer to the allocated block/page
@@ -347,6 +496,12 @@ struct iwl_self_init_dram {
347 * @global_table: table mapping received VID from hw to rxb 496 * @global_table: table mapping received VID from hw to rxb
348 * @rba: allocator for RX replenishing 497 * @rba: allocator for RX replenishing
349 * @ctxt_info: context information for FW self init 498 * @ctxt_info: context information for FW self init
499 * @ctxt_info_gen3: context information for gen3 devices
500 * @prph_info: prph info for self init
501 * @prph_scratch: prph scratch for self init
502 * @ctxt_info_dma_addr: dma addr of context information
503 * @prph_info_dma_addr: dma addr of prph info
504 * @prph_scratch_dma_addr: dma addr of prph scratch
350 * @ctxt_info_dma_addr: dma addr of context information 505 * @ctxt_info_dma_addr: dma addr of context information
351 * @init_dram: DRAM data of firmware image (including paging). 506 * @init_dram: DRAM data of firmware image (including paging).
352 * Context information addresses will be taken from here. 507 * Context information addresses will be taken from here.
@@ -391,8 +546,16 @@ struct iwl_trans_pcie {
391 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; 546 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
392 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; 547 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
393 struct iwl_rb_allocator rba; 548 struct iwl_rb_allocator rba;
394 struct iwl_context_info *ctxt_info; 549 union {
550 struct iwl_context_info *ctxt_info;
551 struct iwl_context_info_gen3 *ctxt_info_gen3;
552 };
553 struct iwl_prph_info *prph_info;
554 struct iwl_prph_scratch *prph_scratch;
395 dma_addr_t ctxt_info_dma_addr; 555 dma_addr_t ctxt_info_dma_addr;
556 dma_addr_t prph_info_dma_addr;
557 dma_addr_t prph_scratch_dma_addr;
558 dma_addr_t iml_dma_addr;
396 struct iwl_self_init_dram init_dram; 559 struct iwl_self_init_dram init_dram;
397 struct iwl_trans *trans; 560 struct iwl_trans *trans;
398 561
@@ -477,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
477 return (void *)trans->trans_specific; 640 return (void *)trans->trans_specific;
478} 641}
479 642
643static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
644 struct msix_entry *entry)
645{
646 /*
647 * Before sending the interrupt the HW disables it to prevent
648 * a nested interrupt. This is done by writing 1 to the corresponding
649 * bit in the mask register. After handling the interrupt, it should be
650 * re-enabled by clearing this bit. This register is defined as
651 * write 1 clear (W1C) register, meaning that it's being clear
652 * by writing 1 to the bit.
653 */
654 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
655}
656
480static inline struct iwl_trans * 657static inline struct iwl_trans *
481iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 658iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
482{ 659{
@@ -504,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
504irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); 681irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
505int iwl_pcie_rx_stop(struct iwl_trans *trans); 682int iwl_pcie_rx_stop(struct iwl_trans *trans);
506void iwl_pcie_rx_free(struct iwl_trans *trans); 683void iwl_pcie_rx_free(struct iwl_trans *trans);
684void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
685void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
686int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
687void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
688 struct iwl_rxq *rxq);
507 689
508/***************************************************** 690/*****************************************************
509* ICT - interrupt handling 691* ICT - interrupt handling
@@ -588,6 +770,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
588 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 770 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
589} 771}
590 772
773#define IWL_NUM_OF_COMPLETION_RINGS 31
774#define IWL_NUM_OF_TRANSFER_RINGS 527
775
776static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
777 int start)
778{
779 int i = 0;
780
781 while (start < fw->num_sec &&
782 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
783 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
784 start++;
785 i++;
786 }
787
788 return i;
789}
790
791static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
792 const struct fw_desc *sec,
793 struct iwl_dram_data *dram)
794{
795 dram->block = dma_alloc_coherent(trans->dev, sec->len,
796 &dram->physical,
797 GFP_KERNEL);
798 if (!dram->block)
799 return -ENOMEM;
800
801 dram->size = sec->len;
802 memcpy(dram->block, sec->data, sec->len);
803
804 return 0;
805}
806
807static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
808{
809 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
810 struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
811 int i;
812
813 if (!dram->fw) {
814 WARN_ON(dram->fw_cnt);
815 return;
816 }
817
818 for (i = 0; i < dram->fw_cnt; i++)
819 dma_free_coherent(trans->dev, dram->fw[i].size,
820 dram->fw[i].block, dram->fw[i].physical);
821
822 kfree(dram->fw);
823 dram->fw_cnt = 0;
824 dram->fw = NULL;
825}
826
591static inline void iwl_disable_interrupts(struct iwl_trans *trans) 827static inline void iwl_disable_interrupts(struct iwl_trans *trans)
592{ 828{
593 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 829 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -660,7 +896,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
660 } 896 }
661} 897}
662 898
663static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) 899static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
664{ 900{
665 return index & (q->n_window - 1); 901 return index & (q->n_window - 1);
666} 902}
@@ -676,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
676 return txq->tfds + trans_pcie->tfd_size * idx; 912 return txq->tfds + trans_pcie->tfd_size * idx;
677} 913}
678 914
915static inline const char *queue_name(struct device *dev,
916 struct iwl_trans_pcie *trans_p, int i)
917{
918 if (trans_p->shared_vec_mask) {
919 int vec = trans_p->shared_vec_mask &
920 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
921
922 if (i == 0)
923 return DRV_NAME ": shared IRQ";
924
925 return devm_kasprintf(dev, GFP_KERNEL,
926 DRV_NAME ": queue %d", i + vec);
927 }
928 if (i == 0)
929 return DRV_NAME ": default queue";
930
931 if (i == trans_p->alloc_vecs - 1)
932 return DRV_NAME ": exception";
933
934 return devm_kasprintf(dev, GFP_KERNEL,
935 DRV_NAME ": queue %d", i);
936}
937
679static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 938static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
680{ 939{
681 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 940 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -730,9 +989,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
730 989
731static inline bool iwl_queue_used(const struct iwl_txq *q, int i) 990static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
732{ 991{
733 return q->write_ptr >= q->read_ptr ? 992 int index = iwl_pcie_get_cmd_index(q, i);
734 (i >= q->read_ptr && i < q->write_ptr) : 993 int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
735 !(i < q->read_ptr && i >= q->write_ptr); 994 int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
995
996 return w >= r ?
997 (index >= r && index < w) :
998 !(index < r && index >= w);
736} 999}
737 1000
738static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 1001static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
@@ -801,7 +1064,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
801void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1064void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
802 bool was_in_rfkill); 1065 bool was_in_rfkill);
803void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); 1066void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
804int iwl_queue_space(const struct iwl_txq *q); 1067int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
805void iwl_pcie_apm_stop_master(struct iwl_trans *trans); 1068void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
806void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); 1069void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
807int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 1070int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
@@ -818,6 +1081,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
818struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); 1081struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
819#endif 1082#endif
820 1083
1084/* common functions that are used by gen3 transport */
1085void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1086
821/* transport gen 2 exported functions */ 1087/* transport gen 2 exported functions */
822int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, 1088int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
823 const struct fw_img *fw, bool run_in_rfkill); 1089 const struct fw_img *fw, bool run_in_rfkill);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index d15f5ba2dc77..d017aa2a0a8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -18,8 +18,7 @@
18 * more details. 18 * more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License along with 20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 21 * this program.
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23 * 22 *
24 * The full GNU General Public License is included in this distribution in the 23 * The full GNU General Public License is included in this distribution in the
25 * file called LICENSE. 24 * file called LICENSE.
@@ -37,6 +36,7 @@
37#include "iwl-io.h" 36#include "iwl-io.h"
38#include "internal.h" 37#include "internal.h"
39#include "iwl-op-mode.h" 38#include "iwl-op-mode.h"
39#include "iwl-context-info-gen3.h"
40 40
41/****************************************************************************** 41/******************************************************************************
42 * 42 *
@@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
167 */ 167 */
168int iwl_pcie_rx_stop(struct iwl_trans *trans) 168int iwl_pcie_rx_stop(struct iwl_trans *trans)
169{ 169{
170 if (trans->cfg->mq_rx_supported) { 170 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
171 /* TODO: remove this for 22560 once fw does it */
172 iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
173 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
174 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
175 } else if (trans->cfg->mq_rx_supported) {
171 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 176 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
172 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, 177 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
173 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); 178 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
@@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
209 } 214 }
210 215
211 rxq->write_actual = round_down(rxq->write, 8); 216 rxq->write_actual = round_down(rxq->write, 8);
212 if (trans->cfg->mq_rx_supported) 217 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
218 iwl_write32(trans, HBUS_TARG_WRPTR,
219 (rxq->write_actual |
220 ((FIRST_RX_QUEUE + rxq->id) << 16)));
221 else if (trans->cfg->mq_rx_supported)
213 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 222 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
214 rxq->write_actual); 223 rxq->write_actual);
215 else 224 else
@@ -233,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
233 } 242 }
234} 243}
235 244
245static void iwl_pcie_restock_bd(struct iwl_trans *trans,
246 struct iwl_rxq *rxq,
247 struct iwl_rx_mem_buffer *rxb)
248{
249 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
250 struct iwl_rx_transfer_desc *bd = rxq->bd;
251
252 bd[rxq->write].type_n_size =
253 cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
254 ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
255 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
256 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
257 } else {
258 __le64 *bd = rxq->bd;
259
260 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
261 }
262}
263
236/* 264/*
237 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx 265 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
238 */ 266 */
@@ -254,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
254 282
255 spin_lock(&rxq->lock); 283 spin_lock(&rxq->lock);
256 while (rxq->free_count) { 284 while (rxq->free_count) {
257 __le64 *bd = (__le64 *)rxq->bd;
258
259 /* Get next free Rx buffer, remove from free list */ 285 /* Get next free Rx buffer, remove from free list */
260 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 286 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
261 list); 287 list);
@@ -264,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
264 /* 12 first bits are expected to be empty */ 290 /* 12 first bits are expected to be empty */
265 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); 291 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
266 /* Point to Rx buffer via next RBD in circular buffer */ 292 /* Point to Rx buffer via next RBD in circular buffer */
267 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 293 iwl_pcie_restock_bd(trans, rxq, rxb);
268 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; 294 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
269 rxq->free_count--; 295 rxq->free_count--;
270 } 296 }
@@ -391,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
391 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 417 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
392 * allocated buffers. 418 * allocated buffers.
393 */ 419 */
394static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 420void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
395 struct iwl_rxq *rxq) 421 struct iwl_rxq *rxq)
396{ 422{
397 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 423 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
398 struct iwl_rx_mem_buffer *rxb; 424 struct iwl_rx_mem_buffer *rxb;
@@ -448,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
448 } 474 }
449} 475}
450 476
451static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 477void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
452{ 478{
453 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
454 int i; 480 int i;
@@ -608,89 +634,174 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
608 iwl_pcie_rx_allocator(trans_pcie->trans); 634 iwl_pcie_rx_allocator(trans_pcie->trans);
609} 635}
610 636
611static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 637static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
612{ 638{
613 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 639 struct iwl_rx_transfer_desc *rx_td;
614 struct iwl_rb_allocator *rba = &trans_pcie->rba;
615 struct device *dev = trans->dev;
616 int i;
617 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
618 sizeof(__le32);
619 640
620 if (WARN_ON(trans_pcie->rxq)) 641 if (use_rx_td)
621 return -EINVAL; 642 return sizeof(*rx_td);
643 else
644 return trans->cfg->mq_rx_supported ? sizeof(__le64) :
645 sizeof(__le32);
646}
622 647
623 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 648static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
624 GFP_KERNEL); 649 struct iwl_rxq *rxq)
625 if (!trans_pcie->rxq) 650{
626 return -EINVAL; 651 struct device *dev = trans->dev;
652 bool use_rx_td = (trans->cfg->device_family >=
653 IWL_DEVICE_FAMILY_22560);
654 int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
655
656 if (rxq->bd)
657 dma_free_coherent(trans->dev,
658 free_size * rxq->queue_size,
659 rxq->bd, rxq->bd_dma);
660 rxq->bd_dma = 0;
661 rxq->bd = NULL;
662
663 if (rxq->rb_stts)
664 dma_free_coherent(trans->dev,
665 use_rx_td ? sizeof(__le16) :
666 sizeof(struct iwl_rb_status),
667 rxq->rb_stts, rxq->rb_stts_dma);
668 rxq->rb_stts_dma = 0;
669 rxq->rb_stts = NULL;
670
671 if (rxq->used_bd)
672 dma_free_coherent(trans->dev,
673 (use_rx_td ? sizeof(*rxq->cd) :
674 sizeof(__le32)) * rxq->queue_size,
675 rxq->used_bd, rxq->used_bd_dma);
676 rxq->used_bd_dma = 0;
677 rxq->used_bd = NULL;
678
679 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
680 return;
627 681
628 spin_lock_init(&rba->lock); 682 if (rxq->tr_tail)
683 dma_free_coherent(dev, sizeof(__le16),
684 rxq->tr_tail, rxq->tr_tail_dma);
685 rxq->tr_tail_dma = 0;
686 rxq->tr_tail = NULL;
687
688 if (rxq->cr_tail)
689 dma_free_coherent(dev, sizeof(__le16),
690 rxq->cr_tail, rxq->cr_tail_dma);
691 rxq->cr_tail_dma = 0;
692 rxq->cr_tail = NULL;
693}
629 694
630 for (i = 0; i < trans->num_rx_queues; i++) { 695static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
631 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 696 struct iwl_rxq *rxq)
697{
698 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
699 struct device *dev = trans->dev;
700 int i;
701 int free_size;
702 bool use_rx_td = (trans->cfg->device_family >=
703 IWL_DEVICE_FAMILY_22560);
632 704
633 spin_lock_init(&rxq->lock); 705 spin_lock_init(&rxq->lock);
634 if (trans->cfg->mq_rx_supported) 706 if (trans->cfg->mq_rx_supported)
635 rxq->queue_size = MQ_RX_TABLE_SIZE; 707 rxq->queue_size = MQ_RX_TABLE_SIZE;
636 else 708 else
637 rxq->queue_size = RX_QUEUE_SIZE; 709 rxq->queue_size = RX_QUEUE_SIZE;
638 710
639 /* 711 free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
640 * Allocate the circular buffer of Read Buffer Descriptors
641 * (RBDs)
642 */
643 rxq->bd = dma_zalloc_coherent(dev,
644 free_size * rxq->queue_size,
645 &rxq->bd_dma, GFP_KERNEL);
646 if (!rxq->bd)
647 goto err;
648 712
649 if (trans->cfg->mq_rx_supported) { 713 /*
650 rxq->used_bd = dma_zalloc_coherent(dev, 714 * Allocate the circular buffer of Read Buffer Descriptors
651 sizeof(__le32) * 715 * (RBDs)
652 rxq->queue_size, 716 */
653 &rxq->used_bd_dma, 717 rxq->bd = dma_zalloc_coherent(dev,
654 GFP_KERNEL); 718 free_size * rxq->queue_size,
655 if (!rxq->used_bd) 719 &rxq->bd_dma, GFP_KERNEL);
656 goto err; 720 if (!rxq->bd)
657 } 721 goto err;
658 722
659 /*Allocate the driver's pointer to receive buffer status */ 723 if (trans->cfg->mq_rx_supported) {
660 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), 724 rxq->used_bd = dma_zalloc_coherent(dev,
661 &rxq->rb_stts_dma, 725 (use_rx_td ?
726 sizeof(*rxq->cd) :
727 sizeof(__le32)) *
728 rxq->queue_size,
729 &rxq->used_bd_dma,
662 GFP_KERNEL); 730 GFP_KERNEL);
663 if (!rxq->rb_stts) 731 if (!rxq->used_bd)
664 goto err; 732 goto err;
665 } 733 }
734
735 /* Allocate the driver's pointer to receive buffer status */
736 rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
737 sizeof(__le16) :
738 sizeof(struct iwl_rb_status),
739 &rxq->rb_stts_dma,
740 GFP_KERNEL);
741 if (!rxq->rb_stts)
742 goto err;
743
744 if (!use_rx_td)
745 return 0;
746
747 /* Allocate the driver's pointer to TR tail */
748 rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
749 &rxq->tr_tail_dma,
750 GFP_KERNEL);
751 if (!rxq->tr_tail)
752 goto err;
753
754 /* Allocate the driver's pointer to CR tail */
755 rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
756 &rxq->cr_tail_dma,
757 GFP_KERNEL);
758 if (!rxq->cr_tail)
759 goto err;
760 /*
761 * W/A 22560 device step Z0 must be non zero bug
762 * TODO: remove this when stop supporting Z0
763 */
764 *rxq->cr_tail = cpu_to_le16(500);
765
666 return 0; 766 return 0;
667 767
668err: 768err:
669 for (i = 0; i < trans->num_rx_queues; i++) { 769 for (i = 0; i < trans->num_rx_queues; i++) {
670 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 770 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
671 771
672 if (rxq->bd) 772 iwl_pcie_free_rxq_dma(trans, rxq);
673 dma_free_coherent(dev, free_size * rxq->queue_size,
674 rxq->bd, rxq->bd_dma);
675 rxq->bd_dma = 0;
676 rxq->bd = NULL;
677
678 if (rxq->rb_stts)
679 dma_free_coherent(trans->dev,
680 sizeof(struct iwl_rb_status),
681 rxq->rb_stts, rxq->rb_stts_dma);
682
683 if (rxq->used_bd)
684 dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
685 rxq->used_bd, rxq->used_bd_dma);
686 rxq->used_bd_dma = 0;
687 rxq->used_bd = NULL;
688 } 773 }
689 kfree(trans_pcie->rxq); 774 kfree(trans_pcie->rxq);
690 775
691 return -ENOMEM; 776 return -ENOMEM;
692} 777}
693 778
779static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
780{
781 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
782 struct iwl_rb_allocator *rba = &trans_pcie->rba;
783 int i, ret;
784
785 if (WARN_ON(trans_pcie->rxq))
786 return -EINVAL;
787
788 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
789 GFP_KERNEL);
790 if (!trans_pcie->rxq)
791 return -EINVAL;
792
793 spin_lock_init(&rba->lock);
794
795 for (i = 0; i < trans->num_rx_queues; i++) {
796 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
797
798 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
799 if (ret)
800 return ret;
801 }
802 return 0;
803}
804
694static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 805static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
695{ 806{
696 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 807 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -792,6 +903,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
792 int i; 903 int i;
793 904
794 switch (trans_pcie->rx_buf_size) { 905 switch (trans_pcie->rx_buf_size) {
906 case IWL_AMSDU_2K:
907 rb_size = RFH_RXF_DMA_RB_SIZE_2K;
908 break;
795 case IWL_AMSDU_4K: 909 case IWL_AMSDU_4K:
796 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 910 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
797 break; 911 break;
@@ -872,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
872 iwl_pcie_enable_rx_wake(trans, true); 986 iwl_pcie_enable_rx_wake(trans, true);
873} 987}
874 988
875static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 989void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
876{ 990{
877 lockdep_assert_held(&rxq->lock); 991 lockdep_assert_held(&rxq->lock);
878 992
@@ -882,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
882 rxq->used_count = 0; 996 rxq->used_count = 0;
883} 997}
884 998
885static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 999int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
886{ 1000{
887 WARN_ON(1); 1001 WARN_ON(1);
888 return 0; 1002 return 0;
@@ -931,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
931 rxq->read = 0; 1045 rxq->read = 0;
932 rxq->write = 0; 1046 rxq->write = 0;
933 rxq->write_actual = 0; 1047 rxq->write_actual = 0;
934 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 1048 memset(rxq->rb_stts, 0,
1049 (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
1050 sizeof(__le16) : sizeof(struct iwl_rb_status));
935 1051
936 iwl_pcie_rx_init_rxb_lists(rxq); 1052 iwl_pcie_rx_init_rxb_lists(rxq);
937 1053
@@ -1002,8 +1118,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
1002{ 1118{
1003 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1119 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1004 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1120 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1005 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
1006 sizeof(__le32);
1007 int i; 1121 int i;
1008 1122
1009 /* 1123 /*
@@ -1022,27 +1136,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
1022 for (i = 0; i < trans->num_rx_queues; i++) { 1136 for (i = 0; i < trans->num_rx_queues; i++) {
1023 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 1137 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1024 1138
1025 if (rxq->bd) 1139 iwl_pcie_free_rxq_dma(trans, rxq);
1026 dma_free_coherent(trans->dev,
1027 free_size * rxq->queue_size,
1028 rxq->bd, rxq->bd_dma);
1029 rxq->bd_dma = 0;
1030 rxq->bd = NULL;
1031
1032 if (rxq->rb_stts)
1033 dma_free_coherent(trans->dev,
1034 sizeof(struct iwl_rb_status),
1035 rxq->rb_stts, rxq->rb_stts_dma);
1036 else
1037 IWL_DEBUG_INFO(trans,
1038 "Free rxq->rb_stts which is NULL\n");
1039
1040 if (rxq->used_bd)
1041 dma_free_coherent(trans->dev,
1042 sizeof(__le32) * rxq->queue_size,
1043 rxq->used_bd, rxq->used_bd_dma);
1044 rxq->used_bd_dma = 0;
1045 rxq->used_bd = NULL;
1046 1140
1047 if (rxq->napi.poll) 1141 if (rxq->napi.poll)
1048 netif_napi_del(&rxq->napi); 1142 netif_napi_del(&rxq->napi);
@@ -1202,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1202 } 1296 }
1203 1297
1204 page_stolen |= rxcb._page_stolen; 1298 page_stolen |= rxcb._page_stolen;
1299 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1300 break;
1205 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 1301 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1206 } 1302 }
1207 1303
@@ -1236,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1236 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1332 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1237} 1333}
1238 1334
1335static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1336 struct iwl_rxq *rxq, int i)
1337{
1338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1339 struct iwl_rx_mem_buffer *rxb;
1340 u16 vid;
1341
1342 if (!trans->cfg->mq_rx_supported) {
1343 rxb = rxq->queue[i];
1344 rxq->queue[i] = NULL;
1345 return rxb;
1346 }
1347
1348 /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
1349 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1350 vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
1351 else
1352 vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
1353
1354 if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
1355 goto out_err;
1356
1357 rxb = trans_pcie->global_table[vid - 1];
1358 if (rxb->invalid)
1359 goto out_err;
1360
1361 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1362 rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
1363
1364 rxb->invalid = true;
1365
1366 return rxb;
1367
1368out_err:
1369 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1370 iwl_force_nmi(trans);
1371 return NULL;
1372}
1373
1239/* 1374/*
1240 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1375 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1241 */ 1376 */
@@ -1250,7 +1385,7 @@ restart:
1250 spin_lock(&rxq->lock); 1385 spin_lock(&rxq->lock);
1251 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1386 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1252 * buffer that the driver may process (last buffer filled by ucode). */ 1387 * buffer that the driver may process (last buffer filled by ucode). */
1253 r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 1388 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1254 i = rxq->read; 1389 i = rxq->read;
1255 1390
1256 /* W/A 9000 device step A0 wrap-around bug */ 1391 /* W/A 9000 device step A0 wrap-around bug */
@@ -1266,30 +1401,9 @@ restart:
1266 if (unlikely(rxq->used_count == rxq->queue_size / 2)) 1401 if (unlikely(rxq->used_count == rxq->queue_size / 2))
1267 emergency = true; 1402 emergency = true;
1268 1403
1269 if (trans->cfg->mq_rx_supported) { 1404 rxb = iwl_pcie_get_rxb(trans, rxq, i);
1270 /* 1405 if (!rxb)
1271 * used_bd is a 32 bit but only 12 are used to retrieve 1406 goto out;
1272 * the vid
1273 */
1274 u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
1275
1276 if (WARN(!vid ||
1277 vid > ARRAY_SIZE(trans_pcie->global_table),
1278 "Invalid rxb index from HW %u\n", (u32)vid)) {
1279 iwl_force_nmi(trans);
1280 goto out;
1281 }
1282 rxb = trans_pcie->global_table[vid - 1];
1283 if (WARN(rxb->invalid,
1284 "Invalid rxb from HW %u\n", (u32)vid)) {
1285 iwl_force_nmi(trans);
1286 goto out;
1287 }
1288 rxb->invalid = true;
1289 } else {
1290 rxb = rxq->queue[i];
1291 rxq->queue[i] = NULL;
1292 }
1293 1407
1294 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 1408 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1295 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); 1409 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
@@ -1331,6 +1445,9 @@ restart:
1331out: 1445out:
1332 /* Backtrack one entry */ 1446 /* Backtrack one entry */
1333 rxq->read = i; 1447 rxq->read = i;
1448 /* update cr tail with the rxq read pointer */
1449 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1450 *rxq->cr_tail = cpu_to_le16(r);
1334 spin_unlock(&rxq->lock); 1451 spin_unlock(&rxq->lock);
1335 1452
1336 /* 1453 /*
@@ -1362,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1362 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 1479 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1363} 1480}
1364 1481
1365static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
1366 struct msix_entry *entry)
1367{
1368 /*
1369 * Before sending the interrupt the HW disables it to prevent
1370 * a nested interrupt. This is done by writing 1 to the corresponding
1371 * bit in the mask register. After handling the interrupt, it should be
1372 * re-enabled by clearing this bit. This register is defined as
1373 * write 1 clear (W1C) register, meaning that it's being clear
1374 * by writing 1 to the bit.
1375 */
1376 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
1377}
1378
1379/* 1482/*
1380 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 1483 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1381 * This interrupt handler should be used with RSS queue only. 1484 * This interrupt handler should be used with RSS queue only.
@@ -1970,7 +2073,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
1970 2073
1971 /* Error detected by uCode */ 2074 /* Error detected by uCode */
1972 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || 2075 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
1973 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { 2076 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
2077 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
1974 IWL_ERR(trans, 2078 IWL_ERR(trans,
1975 "Microcode SW error detected. Restarting 0x%X.\n", 2079 "Microcode SW error detected. Restarting 0x%X.\n",
1976 inta_fh); 2080 inta_fh);
@@ -1995,8 +2099,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
1995 } 2099 }
1996 } 2100 }
1997 2101
1998 /* uCode wakes up after power-down sleep */ 2102 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
1999 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { 2103 inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
2104 /* Reflect IML transfer status */
2105 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2106
2107 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2108 if (res == IWL_IMAGE_RESP_FAIL) {
2109 isr_stats->sw++;
2110 iwl_pcie_irq_handle_error(trans);
2111 }
2112 } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2113 /* uCode wakes up after power-down sleep */
2000 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 2114 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2001 iwl_pcie_rxq_check_wrptr(trans); 2115 iwl_pcie_rxq_check_wrptr(trans);
2002 iwl_pcie_txq_check_wrptrs(trans); 2116 iwl_pcie_txq_check_wrptrs(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index b8e8dac2895d..2bc67219ed3e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -53,6 +53,7 @@
53#include "iwl-trans.h" 53#include "iwl-trans.h"
54#include "iwl-prph.h" 54#include "iwl-prph.h"
55#include "iwl-context-info.h" 55#include "iwl-context-info.h"
56#include "iwl-context-info-gen3.h"
56#include "internal.h" 57#include "internal.h"
57 58
58/* 59/*
@@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
188 } 189 }
189 190
190 iwl_pcie_ctxt_info_free_paging(trans); 191 iwl_pcie_ctxt_info_free_paging(trans);
191 iwl_pcie_ctxt_info_free(trans); 192 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
193 iwl_pcie_ctxt_info_gen3_free(trans);
194 else
195 iwl_pcie_ctxt_info_free(trans);
192 196
193 /* Make sure (redundant) we've released our request to stay awake */ 197 /* Make sure (redundant) we've released our request to stay awake */
194 iwl_clear_bit(trans, CSR_GP_CNTRL, 198 iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
346 goto out; 350 goto out;
347 } 351 }
348 352
349 ret = iwl_pcie_ctxt_info_init(trans, fw); 353 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
354 ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
355 else
356 ret = iwl_pcie_ctxt_info_init(trans, fw);
350 if (ret) 357 if (ret)
351 goto out; 358 goto out;
352 359
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 7229991ae70d..7d319b6863fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -84,6 +84,7 @@
84#include "iwl-scd.h" 84#include "iwl-scd.h"
85#include "iwl-agn-hw.h" 85#include "iwl-agn-hw.h"
86#include "fw/error-dump.h" 86#include "fw/error-dump.h"
87#include "fw/dbg.h"
87#include "internal.h" 88#include "internal.h"
88#include "iwl-fh.h" 89#include "iwl-fh.h"
89 90
@@ -203,7 +204,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
203 trans_pcie->fw_mon_size = 0; 204 trans_pcie->fw_mon_size = 0;
204} 205}
205 206
206static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 207void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
207{ 208{
208 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
209 struct page *page = NULL; 210 struct page *page = NULL;
@@ -1132,21 +1133,44 @@ static struct iwl_causes_list causes_list[] = {
1132 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, 1133 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1133}; 1134};
1134 1135
1136static struct iwl_causes_list causes_list_v2[] = {
1137 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
1138 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
1139 {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
1140 {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
1141 {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
1142 {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
1143 {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
1144 {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
1145 {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
1146 {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
1147 {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1148 {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1149 {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1150 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1151};
1152
1135static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 1153static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1136{ 1154{
1137 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1155 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1138 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 1156 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1139 int i; 1157 int i, arr_size =
1158 (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
1159 ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
1140 1160
1141 /* 1161 /*
1142 * Access all non RX causes and map them to the default irq. 1162 * Access all non RX causes and map them to the default irq.
1143 * In case we are missing at least one interrupt vector, 1163 * In case we are missing at least one interrupt vector,
1144 * the first interrupt vector will serve non-RX and FBQ causes. 1164 * the first interrupt vector will serve non-RX and FBQ causes.
1145 */ 1165 */
1146 for (i = 0; i < ARRAY_SIZE(causes_list); i++) { 1166 for (i = 0; i < arr_size; i++) {
1147 iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); 1167 struct iwl_causes_list *causes =
1148 iwl_clear_bit(trans, causes_list[i].mask_reg, 1168 (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
1149 causes_list[i].cause_num); 1169 causes_list : causes_list_v2;
1170
1171 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1172 iwl_clear_bit(trans, causes[i].mask_reg,
1173 causes[i].cause_num);
1150 } 1174 }
1151} 1175}
1152 1176
@@ -1539,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1539 1563
1540 iwl_pcie_enable_rx_wake(trans, true); 1564 iwl_pcie_enable_rx_wake(trans, true);
1541 1565
1542 /*
1543 * Reconfigure IVAR table in case of MSIX or reset ict table in
1544 * MSI mode since HW reset erased it.
1545 * Also enables interrupts - none will happen as
1546 * the device doesn't know we're waking it up, only when
1547 * the opmode actually tells it after this call.
1548 */
1549 iwl_pcie_conf_msix_hw(trans_pcie);
1550 if (!trans_pcie->msix_enabled)
1551 iwl_pcie_reset_ict(trans);
1552 iwl_enable_interrupts(trans);
1553
1554 iwl_set_bit(trans, CSR_GP_CNTRL, 1566 iwl_set_bit(trans, CSR_GP_CNTRL,
1555 BIT(trans->cfg->csr->flag_mac_access_req)); 1567 BIT(trans->cfg->csr->flag_mac_access_req));
1556 iwl_set_bit(trans, CSR_GP_CNTRL, 1568 iwl_set_bit(trans, CSR_GP_CNTRL,
@@ -1568,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1568 return ret; 1580 return ret;
1569 } 1581 }
1570 1582
1583 /*
1584 * Reconfigure IVAR table in case of MSIX or reset ict table in
1585 * MSI mode since HW reset erased it.
1586 * Also enables interrupts - none will happen as
1587 * the device doesn't know we're waking it up, only when
1588 * the opmode actually tells it after this call.
1589 */
1590 iwl_pcie_conf_msix_hw(trans_pcie);
1591 if (!trans_pcie->msix_enabled)
1592 iwl_pcie_reset_ict(trans);
1593 iwl_enable_interrupts(trans);
1594
1571 iwl_pcie_set_pwr(trans, false); 1595 iwl_pcie_set_pwr(trans, false);
1572 1596
1573 if (!reset) { 1597 if (!reset) {
@@ -1685,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1685 } 1709 }
1686} 1710}
1687 1711
1688static const char *queue_name(struct device *dev,
1689 struct iwl_trans_pcie *trans_p, int i)
1690{
1691 if (trans_p->shared_vec_mask) {
1692 int vec = trans_p->shared_vec_mask &
1693 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1694
1695 if (i == 0)
1696 return DRV_NAME ": shared IRQ";
1697
1698 return devm_kasprintf(dev, GFP_KERNEL,
1699 DRV_NAME ": queue %d", i + vec);
1700 }
1701 if (i == 0)
1702 return DRV_NAME ": default queue";
1703
1704 if (i == trans_p->alloc_vecs - 1)
1705 return DRV_NAME ": exception";
1706
1707 return devm_kasprintf(dev, GFP_KERNEL,
1708 DRV_NAME ": queue %d", i);
1709}
1710
1711static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 1712static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1712 struct iwl_trans_pcie *trans_pcie) 1713 struct iwl_trans_pcie *trans_pcie)
1713{ 1714{
@@ -2236,12 +2237,28 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
2236 jiffies_to_msecs(txq->wd_timeout), 2237 jiffies_to_msecs(txq->wd_timeout),
2237 txq->read_ptr, txq->write_ptr, 2238 txq->read_ptr, txq->write_ptr,
2238 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 2239 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
2239 (TFD_QUEUE_SIZE_MAX - 1), 2240 (trans->cfg->base_params->max_tfd_queue_size - 1),
2240 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 2241 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
2241 (TFD_QUEUE_SIZE_MAX - 1), 2242 (trans->cfg->base_params->max_tfd_queue_size - 1),
2242 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 2243 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
2243} 2244}
2244 2245
2246static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2247 struct iwl_trans_rxq_dma_data *data)
2248{
2249 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2250
2251 if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2252 return -EINVAL;
2253
2254 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2255 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2256 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2257 data->fr_bd_wid = 0;
2258
2259 return 0;
2260}
2261
2245static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2262static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2246{ 2263{
2247 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2264 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2522,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2522 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2539 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2523 rxq->free_count); 2540 rxq->free_count);
2524 if (rxq->rb_stts) { 2541 if (rxq->rb_stts) {
2542 u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
2543 rxq));
2525 pos += scnprintf(buf + pos, bufsz - pos, 2544 pos += scnprintf(buf + pos, bufsz - pos,
2526 "\tclosed_rb_num: %u\n", 2545 "\tclosed_rb_num: %u\n",
2527 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 2546 r & 0x0FFF);
2528 0x0FFF);
2529 } else { 2547 } else {
2530 pos += scnprintf(buf + pos, bufsz - pos, 2548 pos += scnprintf(buf + pos, bufsz - pos,
2531 "\tclosed_rb_num: Not Allocated\n"); 2549 "\tclosed_rb_num: Not Allocated\n");
@@ -2731,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2731 2749
2732 spin_lock(&rxq->lock); 2750 spin_lock(&rxq->lock);
2733 2751
2734 r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 2752 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
2735 2753
2736 for (i = rxq->read, j = 0; 2754 for (i = rxq->read, j = 0;
2737 i != r && j < allocated_rb_nums; 2755 i != r && j < allocated_rb_nums;
@@ -2934,11 +2952,12 @@ static struct iwl_trans_dump_data
2934 struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; 2952 struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
2935 struct iwl_fw_error_dump_txcmd *txcmd; 2953 struct iwl_fw_error_dump_txcmd *txcmd;
2936 struct iwl_trans_dump_data *dump_data; 2954 struct iwl_trans_dump_data *dump_data;
2937 u32 len, num_rbs; 2955 u32 len, num_rbs = 0;
2938 u32 monitor_len; 2956 u32 monitor_len;
2939 int i, ptr; 2957 int i, ptr;
2940 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 2958 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
2941 !trans->cfg->mq_rx_supported; 2959 !trans->cfg->mq_rx_supported &&
2960 trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
2942 2961
2943 /* transport dump header */ 2962 /* transport dump header */
2944 len = sizeof(*dump_data); 2963 len = sizeof(*dump_data);
@@ -2990,6 +3009,10 @@ static struct iwl_trans_dump_data
2990 } 3009 }
2991 3010
2992 if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { 3011 if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
3012 if (!(trans->dbg_dump_mask &
3013 BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
3014 return NULL;
3015
2993 dump_data = vzalloc(len); 3016 dump_data = vzalloc(len);
2994 if (!dump_data) 3017 if (!dump_data)
2995 return NULL; 3018 return NULL;
@@ -3002,22 +3025,28 @@ static struct iwl_trans_dump_data
3002 } 3025 }
3003 3026
3004 /* CSR registers */ 3027 /* CSR registers */
3005 len += sizeof(*data) + IWL_CSR_TO_DUMP; 3028 if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3029 len += sizeof(*data) + IWL_CSR_TO_DUMP;
3006 3030
3007 /* FH registers */ 3031 /* FH registers */
3008 if (trans->cfg->gen2) 3032 if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3009 len += sizeof(*data) + 3033 if (trans->cfg->gen2)
3010 (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2); 3034 len += sizeof(*data) +
3011 else 3035 (FH_MEM_UPPER_BOUND_GEN2 -
3012 len += sizeof(*data) + 3036 FH_MEM_LOWER_BOUND_GEN2);
3013 (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); 3037 else
3038 len += sizeof(*data) +
3039 (FH_MEM_UPPER_BOUND -
3040 FH_MEM_LOWER_BOUND);
3041 }
3014 3042
3015 if (dump_rbs) { 3043 if (dump_rbs) {
3016 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3044 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3017 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3045 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3018 /* RBs */ 3046 /* RBs */
3019 num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) 3047 num_rbs =
3020 & 0x0FFF; 3048 le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3049 & 0x0FFF;
3021 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 3050 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3022 len += num_rbs * (sizeof(*data) + 3051 len += num_rbs * (sizeof(*data) +
3023 sizeof(struct iwl_fw_error_dump_rb) + 3052 sizeof(struct iwl_fw_error_dump_rb) +
@@ -3025,7 +3054,8 @@ static struct iwl_trans_dump_data
3025 } 3054 }
3026 3055
3027 /* Paged memory for gen2 HW */ 3056 /* Paged memory for gen2 HW */
3028 if (trans->cfg->gen2) 3057 if (trans->cfg->gen2 &&
3058 trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3029 for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) 3059 for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
3030 len += sizeof(*data) + 3060 len += sizeof(*data) +
3031 sizeof(struct iwl_fw_error_dump_paging) + 3061 sizeof(struct iwl_fw_error_dump_paging) +
@@ -3037,41 +3067,51 @@ static struct iwl_trans_dump_data
3037 3067
3038 len = 0; 3068 len = 0;
3039 data = (void *)dump_data->data; 3069 data = (void *)dump_data->data;
3040 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 3070
3041 txcmd = (void *)data->data; 3071 if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
3042 spin_lock_bh(&cmdq->lock); 3072 u16 tfd_size = trans_pcie->tfd_size;
3043 ptr = cmdq->write_ptr; 3073
3044 for (i = 0; i < cmdq->n_window; i++) { 3074 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3045 u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); 3075 txcmd = (void *)data->data;
3046 u32 caplen, cmdlen; 3076 spin_lock_bh(&cmdq->lock);
3047 3077 ptr = cmdq->write_ptr;
3048 cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + 3078 for (i = 0; i < cmdq->n_window; i++) {
3049 trans_pcie->tfd_size * ptr); 3079 u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
3050 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 3080 u32 caplen, cmdlen;
3051 3081
3052 if (cmdlen) { 3082 cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3053 len += sizeof(*txcmd) + caplen; 3083 cmdq->tfds +
3054 txcmd->cmdlen = cpu_to_le32(cmdlen); 3084 tfd_size * ptr);
3055 txcmd->caplen = cpu_to_le32(caplen); 3085 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3056 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); 3086
3057 txcmd = (void *)((u8 *)txcmd->data + caplen); 3087 if (cmdlen) {
3088 len += sizeof(*txcmd) + caplen;
3089 txcmd->cmdlen = cpu_to_le32(cmdlen);
3090 txcmd->caplen = cpu_to_le32(caplen);
3091 memcpy(txcmd->data, cmdq->entries[idx].cmd,
3092 caplen);
3093 txcmd = (void *)((u8 *)txcmd->data + caplen);
3094 }
3095
3096 ptr = iwl_queue_dec_wrap(trans, ptr);
3058 } 3097 }
3098 spin_unlock_bh(&cmdq->lock);
3059 3099
3060 ptr = iwl_queue_dec_wrap(ptr); 3100 data->len = cpu_to_le32(len);
3101 len += sizeof(*data);
3102 data = iwl_fw_error_next_data(data);
3061 } 3103 }
3062 spin_unlock_bh(&cmdq->lock);
3063 3104
3064 data->len = cpu_to_le32(len); 3105 if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3065 len += sizeof(*data); 3106 len += iwl_trans_pcie_dump_csr(trans, &data);
3066 data = iwl_fw_error_next_data(data); 3107 if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3067 3108 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3068 len += iwl_trans_pcie_dump_csr(trans, &data);
3069 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3070 if (dump_rbs) 3109 if (dump_rbs)
3071 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 3110 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3072 3111
3073 /* Paged memory for gen2 HW */ 3112 /* Paged memory for gen2 HW */
3074 if (trans->cfg->gen2) { 3113 if (trans->cfg->gen2 &&
3114 trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3075 for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) { 3115 for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
3076 struct iwl_fw_error_dump_paging *paging; 3116 struct iwl_fw_error_dump_paging *paging;
3077 dma_addr_t addr = 3117 dma_addr_t addr =
@@ -3091,8 +3131,8 @@ static struct iwl_trans_dump_data
3091 len += sizeof(*data) + sizeof(*paging) + page_len; 3131 len += sizeof(*data) + sizeof(*paging) + page_len;
3092 } 3132 }
3093 } 3133 }
3094 3134 if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3095 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 3135 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3096 3136
3097 dump_data->len = len; 3137 dump_data->len = len;
3098 3138
@@ -3187,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3187 .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, 3227 .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
3188 .txq_free = iwl_trans_pcie_dyn_txq_free, 3228 .txq_free = iwl_trans_pcie_dyn_txq_free,
3189 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, 3229 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3230 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3190}; 3231};
3191 3232
3192struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 3233struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -3349,14 +3390,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3349 3390
3350#if IS_ENABLED(CONFIG_IWLMVM) 3391#if IS_ENABLED(CONFIG_IWLMVM)
3351 trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); 3392 trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
3352 if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) { 3393
3394 if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3395 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
3353 u32 hw_status; 3396 u32 hw_status;
3354 3397
3355 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); 3398 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
3356 if (hw_status & UMAG_GEN_HW_IS_FPGA) 3399 if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
3357 trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0; 3400 /*
3358 else 3401 * b step fw is the same for physical card and fpga
3402 */
3403 trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
3404 else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
3405 CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
3406 trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
3407 } else {
3408 /*
3409 * a step no FPGA
3410 */
3359 trans->cfg = &iwl22000_2ac_cfg_hr; 3411 trans->cfg = &iwl22000_2ac_cfg_hr;
3412 }
3360 } 3413 }
3361#endif 3414#endif
3362 3415
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 48890a1c825f..b99f33ff9123 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH 8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -19,6 +20,7 @@
19 * BSD LICENSE 20 * BSD LICENSE
20 * 21 *
21 * Copyright(c) 2017 Intel Deutschland GmbH 22 * Copyright(c) 2017 Intel Deutschland GmbH
23 * Copyright(c) 2018 Intel Corporation
22 * All rights reserved. 24 * All rights reserved.
23 * 25 *
24 * Redistribution and use in source and binary forms, with or without 26 * Redistribution and use in source and binary forms, with or without
@@ -50,6 +52,7 @@
50 *****************************************************************************/ 52 *****************************************************************************/
51#include <linux/pm_runtime.h> 53#include <linux/pm_runtime.h>
52#include <net/tso.h> 54#include <net/tso.h>
55#include <linux/tcp.h>
53 56
54#include "iwl-debug.h" 57#include "iwl-debug.h"
55#include "iwl-csr.h" 58#include "iwl-csr.h"
@@ -84,16 +87,20 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
84/* 87/*
85 * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array 88 * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
86 */ 89 */
87static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, 90static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
91 struct iwl_txq *txq, u16 byte_cnt,
88 int num_tbs) 92 int num_tbs)
89{ 93{
90 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; 94 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
95 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
96 struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
91 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 97 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
92 u8 filled_tfd_size, num_fetch_chunks; 98 u8 filled_tfd_size, num_fetch_chunks;
93 u16 len = byte_cnt; 99 u16 len = byte_cnt;
94 __le16 bc_ent; 100 __le16 bc_ent;
95 101
96 len = DIV_ROUND_UP(len, 4); 102 if (trans_pcie->bc_table_dword)
103 len = DIV_ROUND_UP(len, 4);
97 104
98 if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) 105 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
99 return; 106 return;
@@ -111,7 +118,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
111 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 118 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
112 119
113 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 120 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
114 scd_bc_tbl->tfd_offset[idx] = bc_ent; 121 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
123 else
124 scd_bc_tbl->tfd_offset[idx] = bc_ent;
115} 125}
116 126
117/* 127/*
@@ -355,52 +365,89 @@ out_err:
355 return -EINVAL; 365 return -EINVAL;
356} 366}
357 367
358static 368static struct
359struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, 369iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
360 struct iwl_txq *txq, 370 struct iwl_txq *txq,
361 struct iwl_device_cmd *dev_cmd, 371 struct iwl_device_cmd *dev_cmd,
362 struct sk_buff *skb, 372 struct sk_buff *skb,
363 struct iwl_cmd_meta *out_meta) 373 struct iwl_cmd_meta *out_meta,
374 int hdr_len,
375 int tx_cmd_len)
364{ 376{
365 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
366 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 377 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
367 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); 378 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
368 dma_addr_t tb_phys; 379 dma_addr_t tb_phys;
369 bool amsdu; 380 int len;
370 int i, len, tb1_len, tb2_len, hdr_len;
371 void *tb1_addr; 381 void *tb1_addr;
372 382
373 memset(tfd, 0, sizeof(*tfd)); 383 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
374 384
375 amsdu = ieee80211_is_data_qos(hdr->frame_control) && 385 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
376 (*ieee80211_get_qos_ctl(hdr) & 386
377 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 387 /*
388 * The second TB (tb1) points to the remainder of the TX command
389 * and the 802.11 header - dword aligned size
390 * (This calculation modifies the TX command, so do it before the
391 * setup of the first TB)
392 */
393 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
394 IWL_FIRST_TB_SIZE;
395
396 /* do not align A-MSDU to dword as the subframe header aligns it */
397
398 /* map the data for TB1 */
399 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
400 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
401 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
402 goto out_err;
403 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
404
405 if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
406 len + IWL_FIRST_TB_SIZE,
407 hdr_len, dev_cmd))
408 goto out_err;
409
410 /* building the A-MSDU might have changed this data, memcpy it now */
411 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
412 return tfd;
413
414out_err:
415 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
416 return NULL;
417}
418
419static struct
420iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
421 struct iwl_txq *txq,
422 struct iwl_device_cmd *dev_cmd,
423 struct sk_buff *skb,
424 struct iwl_cmd_meta *out_meta,
425 int hdr_len,
426 int tx_cmd_len)
427{
428 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
429 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
430 dma_addr_t tb_phys;
431 int i, len, tb1_len, tb2_len;
432 void *tb1_addr;
378 433
379 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); 434 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
435
380 /* The first TB points to bi-directional DMA data */ 436 /* The first TB points to bi-directional DMA data */
381 if (!amsdu) 437 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
382 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
383 IWL_FIRST_TB_SIZE);
384 438
385 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); 439 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
386 440
387 /* there must be data left over for TB1 or this code must be changed */
388 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
389
390 /* 441 /*
391 * The second TB (tb1) points to the remainder of the TX command 442 * The second TB (tb1) points to the remainder of the TX command
392 * and the 802.11 header - dword aligned size 443 * and the 802.11 header - dword aligned size
393 * (This calculation modifies the TX command, so do it before the 444 * (This calculation modifies the TX command, so do it before the
394 * setup of the first TB) 445 * setup of the first TB)
395 */ 446 */
396 len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) + 447 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
397 ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE; 448 IWL_FIRST_TB_SIZE;
398 449
399 /* do not align A-MSDU to dword as the subframe header aligns it */ 450 tb1_len = ALIGN(len, 4);
400 if (amsdu)
401 tb1_len = len;
402 else
403 tb1_len = ALIGN(len, 4);
404 451
405 /* map the data for TB1 */ 452 /* map the data for TB1 */
406 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 453 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@@ -409,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
409 goto out_err; 456 goto out_err;
410 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 457 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
411 458
412 hdr_len = ieee80211_hdrlen(hdr->frame_control);
413
414 if (amsdu) {
415 if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
416 tb1_len + IWL_FIRST_TB_SIZE,
417 hdr_len, dev_cmd))
418 goto out_err;
419
420 /*
421 * building the A-MSDU might have changed this data, so memcpy
422 * it now
423 */
424 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
425 IWL_FIRST_TB_SIZE);
426 return tfd;
427 }
428
429 /* set up TFD's third entry to point to remainder of skb's head */ 459 /* set up TFD's third entry to point to remainder of skb's head */
430 tb2_len = skb_headlen(skb) - hdr_len; 460 tb2_len = skb_headlen(skb) - hdr_len;
431 461
@@ -467,13 +497,50 @@ out_err:
467 return NULL; 497 return NULL;
468} 498}
469 499
500static
501struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
502 struct iwl_txq *txq,
503 struct iwl_device_cmd *dev_cmd,
504 struct sk_buff *skb,
505 struct iwl_cmd_meta *out_meta)
506{
507 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
508 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
509 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
510 int len, hdr_len;
511 bool amsdu;
512
513 /* There must be data left over for TB1 or this code must be changed */
514 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
515
516 memset(tfd, 0, sizeof(*tfd));
517
518 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
519 len = sizeof(struct iwl_tx_cmd_gen2);
520 else
521 len = sizeof(struct iwl_tx_cmd_gen3);
522
523 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
524 (*ieee80211_get_qos_ctl(hdr) &
525 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
526
527 hdr_len = ieee80211_hdrlen(hdr->frame_control);
528
529 if (amsdu)
530 return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
531 out_meta, hdr_len, len);
532
533 return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
534 hdr_len, len);
535}
536
470int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, 537int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
471 struct iwl_device_cmd *dev_cmd, int txq_id) 538 struct iwl_device_cmd *dev_cmd, int txq_id)
472{ 539{
473 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 540 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
474 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
475 struct iwl_cmd_meta *out_meta; 541 struct iwl_cmd_meta *out_meta;
476 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 542 struct iwl_txq *txq = trans_pcie->txq[txq_id];
543 u16 cmd_len;
477 int idx; 544 int idx;
478 void *tfd; 545 void *tfd;
479 546
@@ -488,11 +555,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
488 555
489 spin_lock(&txq->lock); 556 spin_lock(&txq->lock);
490 557
491 if (iwl_queue_space(txq) < txq->high_mark) { 558 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
559 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
560 (void *)dev_cmd->payload;
561
562 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
563 } else {
564 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
565 (void *)dev_cmd->payload;
566
567 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
568 }
569
570 if (iwl_queue_space(trans, txq) < txq->high_mark) {
492 iwl_stop_queue(trans, txq); 571 iwl_stop_queue(trans, txq);
493 572
494 /* don't put the packet on the ring, if there is no room */ 573 /* don't put the packet on the ring, if there is no room */
495 if (unlikely(iwl_queue_space(txq) < 3)) { 574 if (unlikely(iwl_queue_space(trans, txq) < 3)) {
496 struct iwl_device_cmd **dev_cmd_ptr; 575 struct iwl_device_cmd **dev_cmd_ptr;
497 576
498 dev_cmd_ptr = (void *)((u8 *)skb->cb + 577 dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -526,7 +605,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
526 } 605 }
527 606
528 /* Set up entry for this TFD in Tx byte-count array */ 607 /* Set up entry for this TFD in Tx byte-count array */
529 iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len), 608 iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
530 iwl_pcie_gen2_get_num_tbs(trans, tfd)); 609 iwl_pcie_gen2_get_num_tbs(trans, tfd));
531 610
532 /* start timer if queue currently empty */ 611 /* start timer if queue currently empty */
@@ -538,7 +617,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
538 } 617 }
539 618
540 /* Tell device the write index *just past* this latest filled TFD */ 619 /* Tell device the write index *just past* this latest filled TFD */
541 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 620 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
542 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); 621 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
543 /* 622 /*
544 * At this point the frame is "transmitted" successfully 623 * At this point the frame is "transmitted" successfully
@@ -650,7 +729,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
650 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); 729 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
651 memset(tfd, 0, sizeof(*tfd)); 730 memset(tfd, 0, sizeof(*tfd));
652 731
653 if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 732 if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
654 spin_unlock_bh(&txq->lock); 733 spin_unlock_bh(&txq->lock);
655 734
656 IWL_ERR(trans, "No space in command queue\n"); 735 IWL_ERR(trans, "No space in command queue\n");
@@ -787,7 +866,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
787 iwl_trans_ref(trans); 866 iwl_trans_ref(trans);
788 } 867 }
789 /* Increment and update queue's write index */ 868 /* Increment and update queue's write index */
790 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 869 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
791 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); 870 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
792 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 871 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
793 872
@@ -954,7 +1033,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
954 iwl_pcie_free_tso_page(trans_pcie, skb); 1033 iwl_pcie_free_tso_page(trans_pcie, skb);
955 } 1034 }
956 iwl_pcie_gen2_free_tfd(trans, txq); 1035 iwl_pcie_gen2_free_tfd(trans, txq);
957 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); 1036 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
958 1037
959 if (txq->read_ptr == txq->write_ptr) { 1038 if (txq->read_ptr == txq->write_ptr) {
960 unsigned long flags; 1039 unsigned long flags;
@@ -1062,6 +1141,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1062 if (!txq) 1141 if (!txq)
1063 return -ENOMEM; 1142 return -ENOMEM;
1064 ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, 1143 ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1144 (trans->cfg->device_family >=
1145 IWL_DEVICE_FAMILY_22560) ?
1146 sizeof(struct iwl_gen3_bc_tbl) :
1065 sizeof(struct iwlagn_scd_bc_tbl)); 1147 sizeof(struct iwlagn_scd_bc_tbl));
1066 if (ret) { 1148 if (ret) {
1067 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 1149 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -1113,7 +1195,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1113 1195
1114 txq->id = qid; 1196 txq->id = qid;
1115 trans_pcie->txq[qid] = txq; 1197 trans_pcie->txq[qid] = txq;
1116 wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1); 1198 wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
1117 1199
1118 /* Place first TFD at index corresponding to start sequence number */ 1200 /* Place first TFD at index corresponding to start sequence number */
1119 txq->read_ptr = wr_ptr; 1201 txq->read_ptr = wr_ptr;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 473fe7ccb07c..93f0d387688a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -71,27 +71,28 @@
71 * 71 *
72 ***************************************************/ 72 ***************************************************/
73 73
74int iwl_queue_space(const struct iwl_txq *q) 74int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
75{ 75{
76 unsigned int max; 76 unsigned int max;
77 unsigned int used; 77 unsigned int used;
78 78
79 /* 79 /*
80 * To avoid ambiguity between empty and completely full queues, there 80 * To avoid ambiguity between empty and completely full queues, there
81 * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. 81 * should always be less than max_tfd_queue_size elements in the queue.
82 * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need 82 * If q->n_window is smaller than max_tfd_queue_size, there is no need
83 * to reserve any queue entries for this purpose. 83 * to reserve any queue entries for this purpose.
84 */ 84 */
85 if (q->n_window < TFD_QUEUE_SIZE_MAX) 85 if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
86 max = q->n_window; 86 max = q->n_window;
87 else 87 else
88 max = TFD_QUEUE_SIZE_MAX - 1; 88 max = trans->cfg->base_params->max_tfd_queue_size - 1;
89 89
90 /* 90 /*
91 * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to 91 * max_tfd_queue_size is a power of 2, so the following is equivalent to
92 * modulo by TFD_QUEUE_SIZE_MAX and is well defined. 92 * modulo by max_tfd_queue_size and is well defined.
93 */ 93 */
94 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); 94 used = (q->write_ptr - q->read_ptr) &
95 (trans->cfg->base_params->max_tfd_queue_size - 1);
95 96
96 if (WARN_ON(used > max)) 97 if (WARN_ON(used > max))
97 return 0; 98 return 0;
@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
489 int slots_num, bool cmd_queue) 490 int slots_num, bool cmd_queue)
490{ 491{
491 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 492 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
492 size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; 493 size_t tfd_sz = trans_pcie->tfd_size *
494 trans->cfg->base_params->max_tfd_queue_size;
493 size_t tb0_buf_sz; 495 size_t tb0_buf_sz;
494 int i; 496 int i;
495 497
@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
555 int slots_num, bool cmd_queue) 557 int slots_num, bool cmd_queue)
556{ 558{
557 int ret; 559 int ret;
560 u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
558 561
559 txq->need_update = false; 562 txq->need_update = false;
560 563
561 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 564 /* max_tfd_queue_size must be power-of-two size, otherwise
562 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 565 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
563 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 566 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
567 "Max tfd queue size must be a power of two, but is %d",
568 tfd_queue_max_size))
569 return -EINVAL;
564 570
565 /* Initialize queue's high/low-water marks, and head/tail indexes */ 571 /* Initialize queue's high/low-water marks, and head/tail indexes */
566 ret = iwl_queue_init(txq, slots_num); 572 ret = iwl_queue_init(txq, slots_num);
@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
637 iwl_pcie_free_tso_page(trans_pcie, skb); 643 iwl_pcie_free_tso_page(trans_pcie, skb);
638 } 644 }
639 iwl_pcie_txq_free_tfd(trans, txq); 645 iwl_pcie_txq_free_tfd(trans, txq);
640 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); 646 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
641 647
642 if (txq->read_ptr == txq->write_ptr) { 648 if (txq->read_ptr == txq->write_ptr) {
643 unsigned long flags; 649 unsigned long flags;
@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
696 /* De-alloc circular buffer of TFDs */ 702 /* De-alloc circular buffer of TFDs */
697 if (txq->tfds) { 703 if (txq->tfds) {
698 dma_free_coherent(dev, 704 dma_free_coherent(dev,
699 trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, 705 trans_pcie->tfd_size *
706 trans->cfg->base_params->max_tfd_queue_size,
700 txq->tfds, txq->dma_addr); 707 txq->tfds, txq->dma_addr);
701 txq->dma_addr = 0; 708 txq->dma_addr = 0;
702 txq->tfds = NULL; 709 txq->tfds = NULL;
@@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
916 int ret; 923 int ret;
917 int txq_id, slots_num; 924 int txq_id, slots_num;
918 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 925 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
926 u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
919 927
920 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * 928 bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
921 sizeof(struct iwlagn_scd_bc_tbl); 929 sizeof(struct iwl_gen3_bc_tbl) :
930 sizeof(struct iwlagn_scd_bc_tbl);
922 931
923 /*It is not allowed to alloc twice, so warn when this happens. 932 /*It is not allowed to alloc twice, so warn when this happens.
924 * We cannot rely on the previous allocation, so free and fail */ 933 * We cannot rely on the previous allocation, so free and fail */
@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
928 } 937 }
929 938
930 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, 939 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
931 scd_bc_tbls_size); 940 bc_tbls_size);
932 if (ret) { 941 if (ret) {
933 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 942 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
934 goto error; 943 goto error;
@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1064{ 1073{
1065 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1074 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1066 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1075 struct iwl_txq *txq = trans_pcie->txq[txq_id];
1067 int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); 1076 int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
1077 int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1068 int last_to_free; 1078 int last_to_free;
1069 1079
1070 /* This function is not meant to release cmd queue*/ 1080 /* This function is not meant to release cmd queue*/
@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1079 goto out; 1089 goto out;
1080 } 1090 }
1081 1091
1082 if (txq->read_ptr == tfd_num) 1092 if (read_ptr == tfd_num)
1083 goto out; 1093 goto out;
1084 1094
1085 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1095 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1087 1097
1088 /*Since we free until index _not_ inclusive, the one before index is 1098 /*Since we free until index _not_ inclusive, the one before index is
1089 * the last we will free. This one must be used */ 1099 * the last we will free. This one must be used */
1090 last_to_free = iwl_queue_dec_wrap(tfd_num); 1100 last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
1091 1101
1092 if (!iwl_queue_used(txq, last_to_free)) { 1102 if (!iwl_queue_used(txq, last_to_free)) {
1093 IWL_ERR(trans, 1103 IWL_ERR(trans,
1094 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1104 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1095 __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, 1105 __func__, txq_id, last_to_free,
1106 trans->cfg->base_params->max_tfd_queue_size,
1096 txq->write_ptr, txq->read_ptr); 1107 txq->write_ptr, txq->read_ptr);
1097 goto out; 1108 goto out;
1098 } 1109 }
@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1101 goto out; 1112 goto out;
1102 1113
1103 for (; 1114 for (;
1104 txq->read_ptr != tfd_num; 1115 read_ptr != tfd_num;
1105 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1116 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
1106 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); 1117 read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
1107 struct sk_buff *skb = txq->entries[idx].skb; 1118 struct sk_buff *skb = txq->entries[read_ptr].skb;
1108 1119
1109 if (WARN_ON_ONCE(!skb)) 1120 if (WARN_ON_ONCE(!skb))
1110 continue; 1121 continue;
@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1113 1124
1114 __skb_queue_tail(skbs, skb); 1125 __skb_queue_tail(skbs, skb);
1115 1126
1116 txq->entries[idx].skb = NULL; 1127 txq->entries[read_ptr].skb = NULL;
1117 1128
1118 if (!trans->cfg->use_tfh) 1129 if (!trans->cfg->use_tfh)
1119 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 1130 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1123 1134
1124 iwl_pcie_txq_progress(txq); 1135 iwl_pcie_txq_progress(txq);
1125 1136
1126 if (iwl_queue_space(txq) > txq->low_mark && 1137 if (iwl_queue_space(trans, txq) > txq->low_mark &&
1127 test_bit(txq_id, trans_pcie->queue_stopped)) { 1138 test_bit(txq_id, trans_pcie->queue_stopped)) {
1128 struct sk_buff_head overflow_skbs; 1139 struct sk_buff_head overflow_skbs;
1129 1140
@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1155 } 1166 }
1156 spin_lock_bh(&txq->lock); 1167 spin_lock_bh(&txq->lock);
1157 1168
1158 if (iwl_queue_space(txq) > txq->low_mark) 1169 if (iwl_queue_space(trans, txq) > txq->low_mark)
1159 iwl_wake_queue(trans, txq); 1170 iwl_wake_queue(trans, txq);
1160 } 1171 }
1161 1172
@@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1225 struct iwl_txq *txq = trans_pcie->txq[txq_id]; 1236 struct iwl_txq *txq = trans_pcie->txq[txq_id];
1226 unsigned long flags; 1237 unsigned long flags;
1227 int nfreed = 0; 1238 int nfreed = 0;
1239 u16 r;
1228 1240
1229 lockdep_assert_held(&txq->lock); 1241 lockdep_assert_held(&txq->lock);
1230 1242
1231 if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { 1243 idx = iwl_pcie_get_cmd_index(txq, idx);
1244 r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1245
1246 if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
1247 (!iwl_queue_used(txq, idx))) {
1232 IWL_ERR(trans, 1248 IWL_ERR(trans,
1233 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1249 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1234 __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, 1250 __func__, txq_id, idx,
1251 trans->cfg->base_params->max_tfd_queue_size,
1235 txq->write_ptr, txq->read_ptr); 1252 txq->write_ptr, txq->read_ptr);
1236 return; 1253 return;
1237 } 1254 }
1238 1255
1239 for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; 1256 for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
1240 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { 1257 r = iwl_queue_inc_wrap(trans, r)) {
1258 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
1241 1259
1242 if (nfreed++ > 0) { 1260 if (nfreed++ > 0) {
1243 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1261 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1244 idx, txq->write_ptr, txq->read_ptr); 1262 idx, txq->write_ptr, r);
1245 iwl_force_nmi(trans); 1263 iwl_force_nmi(trans);
1246 } 1264 }
1247 } 1265 }
@@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1555 1573
1556 spin_lock_bh(&txq->lock); 1574 spin_lock_bh(&txq->lock);
1557 1575
1558 if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1576 if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1559 spin_unlock_bh(&txq->lock); 1577 spin_unlock_bh(&txq->lock);
1560 1578
1561 IWL_ERR(trans, "No space in command queue\n"); 1579 IWL_ERR(trans, "No space in command queue\n");
@@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1711 } 1729 }
1712 1730
1713 /* Increment and update queue's write index */ 1731 /* Increment and update queue's write index */
1714 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 1732 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
1715 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1733 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1716 1734
1717 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1735 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2311 2329
2312 spin_lock(&txq->lock); 2330 spin_lock(&txq->lock);
2313 2331
2314 if (iwl_queue_space(txq) < txq->high_mark) { 2332 if (iwl_queue_space(trans, txq) < txq->high_mark) {
2315 iwl_stop_queue(trans, txq); 2333 iwl_stop_queue(trans, txq);
2316 2334
2317 /* don't put the packet on the ring, if there is no room */ 2335 /* don't put the packet on the ring, if there is no room */
2318 if (unlikely(iwl_queue_space(txq) < 3)) { 2336 if (unlikely(iwl_queue_space(trans, txq) < 3)) {
2319 struct iwl_device_cmd **dev_cmd_ptr; 2337 struct iwl_device_cmd **dev_cmd_ptr;
2320 2338
2321 dev_cmd_ptr = (void *)((u8 *)skb->cb + 2339 dev_cmd_ptr = (void *)((u8 *)skb->cb +
@@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2444 } 2462 }
2445 2463
2446 /* Tell device the write index *just past* this latest filled TFD */ 2464 /* Tell device the write index *just past* this latest filled TFD */
2447 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); 2465 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
2448 if (!wait_write_ptr) 2466 if (!wait_write_ptr)
2449 iwl_pcie_txq_inc_wr_ptr(trans, txq); 2467 iwl_pcie_txq_inc_wr_ptr(trans, txq);
2450 2468
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index d1884b8913e7..0094b1d2b577 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -66,7 +66,7 @@ static void prism2_send_mgmt(struct net_device *dev,
66#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 66#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
67 67
68 68
69#ifndef PRISM2_NO_PROCFS_DEBUG 69#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
70static int ap_debug_proc_show(struct seq_file *m, void *v) 70static int ap_debug_proc_show(struct seq_file *m, void *v)
71{ 71{
72 struct ap_data *ap = PDE_DATA(file_inode(m->file)); 72 struct ap_data *ap = PDE_DATA(file_inode(m->file));
@@ -81,8 +81,7 @@ static int ap_debug_proc_show(struct seq_file *m, void *v)
81 seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc); 81 seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
82 return 0; 82 return 0;
83} 83}
84#endif /* PRISM2_NO_PROCFS_DEBUG */ 84#endif
85
86 85
87static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta) 86static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
88{ 87{
@@ -990,7 +989,7 @@ static void prism2_send_mgmt(struct net_device *dev,
990} 989}
991#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 990#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
992 991
993 992#ifdef CONFIG_PROC_FS
994static int prism2_sta_proc_show(struct seq_file *m, void *v) 993static int prism2_sta_proc_show(struct seq_file *m, void *v)
995{ 994{
996 struct sta_info *sta = m->private; 995 struct sta_info *sta = m->private;
@@ -1059,6 +1058,7 @@ static int prism2_sta_proc_show(struct seq_file *m, void *v)
1059 1058
1060 return 0; 1059 return 0;
1061} 1060}
1061#endif
1062 1062
1063static void handle_add_proc_queue(struct work_struct *work) 1063static void handle_add_proc_queue(struct work_struct *work)
1064{ 1064{
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 2720aa39f530..ad1aa65fee7f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -151,13 +151,6 @@ static int prism2_get_ram_size(local_info_t *local);
151#define HFA384X_MAGIC 0x8A32 151#define HFA384X_MAGIC 0x8A32
152#endif 152#endif
153 153
154
155static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
156{
157 return HFA384X_INW(reg);
158}
159
160
161static void hfa384x_read_regs(struct net_device *dev, 154static void hfa384x_read_regs(struct net_device *dev,
162 struct hfa384x_regs *regs) 155 struct hfa384x_regs *regs)
163{ 156{
@@ -2897,7 +2890,12 @@ static void hostap_tick_timer(struct timer_list *t)
2897} 2890}
2898 2891
2899 2892
2900#ifndef PRISM2_NO_PROCFS_DEBUG 2893#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
2894static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
2895{
2896 return HFA384X_INW(reg);
2897}
2898
2901static int prism2_registers_proc_show(struct seq_file *m, void *v) 2899static int prism2_registers_proc_show(struct seq_file *m, void *v)
2902{ 2900{
2903 local_info_t *local = m->private; 2901 local_info_t *local = m->private;
@@ -2951,8 +2949,7 @@ static int prism2_registers_proc_show(struct seq_file *m, void *v)
2951 2949
2952 return 0; 2950 return 0;
2953} 2951}
2954#endif /* PRISM2_NO_PROCFS_DEBUG */ 2952#endif
2955
2956 2953
2957struct set_tim_data { 2954struct set_tim_data {
2958 struct list_head list; 2955 struct list_head list;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 5b33ccab9188..703d74cea3c2 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -11,8 +11,7 @@
11 11
12#define PROC_LIMIT (PAGE_SIZE - 80) 12#define PROC_LIMIT (PAGE_SIZE - 80)
13 13
14 14#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
15#ifndef PRISM2_NO_PROCFS_DEBUG
16static int prism2_debug_proc_show(struct seq_file *m, void *v) 15static int prism2_debug_proc_show(struct seq_file *m, void *v)
17{ 16{
18 local_info_t *local = m->private; 17 local_info_t *local = m->private;
@@ -43,9 +42,9 @@ static int prism2_debug_proc_show(struct seq_file *m, void *v)
43 42
44 return 0; 43 return 0;
45} 44}
46#endif /* PRISM2_NO_PROCFS_DEBUG */ 45#endif
47
48 46
47#ifdef CONFIG_PROC_FS
49static int prism2_stats_proc_show(struct seq_file *m, void *v) 48static int prism2_stats_proc_show(struct seq_file *m, void *v)
50{ 49{
51 local_info_t *local = m->private; 50 local_info_t *local = m->private;
@@ -82,6 +81,7 @@ static int prism2_stats_proc_show(struct seq_file *m, void *v)
82 81
83 return 0; 82 return 0;
84} 83}
84#endif
85 85
86static int prism2_wds_proc_show(struct seq_file *m, void *v) 86static int prism2_wds_proc_show(struct seq_file *m, void *v)
87{ 87{
@@ -174,6 +174,7 @@ static const struct seq_operations prism2_bss_list_proc_seqops = {
174 .show = prism2_bss_list_proc_show, 174 .show = prism2_bss_list_proc_show,
175}; 175};
176 176
177#ifdef CONFIG_PROC_FS
177static int prism2_crypt_proc_show(struct seq_file *m, void *v) 178static int prism2_crypt_proc_show(struct seq_file *m, void *v)
178{ 179{
179 local_info_t *local = m->private; 180 local_info_t *local = m->private;
@@ -190,6 +191,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
190 } 191 }
191 return 0; 192 return 0;
192} 193}
194#endif
193 195
194static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf, 196static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
195 size_t count, loff_t *_pos) 197 size_t count, loff_t *_pos)
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 5d75c971004b..e2addd8b878b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,10 +696,11 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
696 "Send delba to tid=%d, %pM\n", 696 "Send delba to tid=%d, %pM\n",
697 tid, rx_reor_tbl_ptr->ta); 697 tid, rx_reor_tbl_ptr->ta);
698 mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0); 698 mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
699 goto exit; 699 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
700 flags);
701 return;
700 } 702 }
701 } 703 }
702exit:
703 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 704 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
704} 705}
705 706
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 7ab44cd32a9d..8e63d14c1e1c 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,6 +103,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
103 * There could be holes in the buffer, which are skipped by the function. 103 * There could be holes in the buffer, which are skipped by the function.
104 * Since the buffer is linear, the function uses rotation to simulate 104 * Since the buffer is linear, the function uses rotation to simulate
105 * circular buffer. 105 * circular buffer.
106 *
107 * The caller must hold rx_reorder_tbl_lock spinlock.
106 */ 108 */
107static void 109static void
108mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, 110mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -111,25 +113,21 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
111{ 113{
112 int pkt_to_send, i; 114 int pkt_to_send, i;
113 void *rx_tmp_ptr; 115 void *rx_tmp_ptr;
114 unsigned long flags;
115 116
116 pkt_to_send = (start_win > tbl->start_win) ? 117 pkt_to_send = (start_win > tbl->start_win) ?
117 min((start_win - tbl->start_win), tbl->win_size) : 118 min((start_win - tbl->start_win), tbl->win_size) :
118 tbl->win_size; 119 tbl->win_size;
119 120
120 for (i = 0; i < pkt_to_send; ++i) { 121 for (i = 0; i < pkt_to_send; ++i) {
121 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
122 rx_tmp_ptr = NULL; 122 rx_tmp_ptr = NULL;
123 if (tbl->rx_reorder_ptr[i]) { 123 if (tbl->rx_reorder_ptr[i]) {
124 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; 124 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
125 tbl->rx_reorder_ptr[i] = NULL; 125 tbl->rx_reorder_ptr[i] = NULL;
126 } 126 }
127 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
128 if (rx_tmp_ptr) 127 if (rx_tmp_ptr)
129 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); 128 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
130 } 129 }
131 130
132 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
133 /* 131 /*
134 * We don't have a circular buffer, hence use rotation to simulate 132 * We don't have a circular buffer, hence use rotation to simulate
135 * circular buffer 133 * circular buffer
@@ -140,7 +138,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
140 } 138 }
141 139
142 tbl->start_win = start_win; 140 tbl->start_win = start_win;
143 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
144} 141}
145 142
146/* 143/*
@@ -150,6 +147,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
150 * The start window is adjusted automatically when a hole is located. 147 * The start window is adjusted automatically when a hole is located.
151 * Since the buffer is linear, the function uses rotation to simulate 148 * Since the buffer is linear, the function uses rotation to simulate
152 * circular buffer. 149 * circular buffer.
150 *
151 * The caller must hold rx_reorder_tbl_lock spinlock.
153 */ 152 */
154static void 153static void
155mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, 154mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -157,21 +156,15 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
157{ 156{
158 int i, j, xchg; 157 int i, j, xchg;
159 void *rx_tmp_ptr; 158 void *rx_tmp_ptr;
160 unsigned long flags;
161 159
162 for (i = 0; i < tbl->win_size; ++i) { 160 for (i = 0; i < tbl->win_size; ++i) {
163 spin_lock_irqsave(&priv->rx_pkt_lock, flags); 161 if (!tbl->rx_reorder_ptr[i])
164 if (!tbl->rx_reorder_ptr[i]) {
165 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
166 break; 162 break;
167 }
168 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; 163 rx_tmp_ptr = tbl->rx_reorder_ptr[i];
169 tbl->rx_reorder_ptr[i] = NULL; 164 tbl->rx_reorder_ptr[i] = NULL;
170 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
171 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr); 165 mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
172 } 166 }
173 167
174 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
175 /* 168 /*
176 * We don't have a circular buffer, hence use rotation to simulate 169 * We don't have a circular buffer, hence use rotation to simulate
177 * circular buffer 170 * circular buffer
@@ -184,7 +177,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
184 } 177 }
185 } 178 }
186 tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); 179 tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
187 spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
188} 180}
189 181
190/* 182/*
@@ -192,6 +184,8 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
192 * 184 *
193 * The function stops the associated timer and dispatches all the 185 * The function stops the associated timer and dispatches all the
194 * pending packets in the Rx reorder table before deletion. 186 * pending packets in the Rx reorder table before deletion.
187 *
188 * The caller must hold rx_reorder_tbl_lock spinlock.
195 */ 189 */
196static void 190static void
197mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, 191mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -217,11 +211,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
217 211
218 del_timer_sync(&tbl->timer_context.timer); 212 del_timer_sync(&tbl->timer_context.timer);
219 tbl->timer_context.timer_is_set = false; 213 tbl->timer_context.timer_is_set = false;
220
221 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
222 list_del(&tbl->list); 214 list_del(&tbl->list);
223 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
224
225 kfree(tbl->rx_reorder_ptr); 215 kfree(tbl->rx_reorder_ptr);
226 kfree(tbl); 216 kfree(tbl);
227 217
@@ -234,22 +224,17 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
234/* 224/*
235 * This function returns the pointer to an entry in Rx reordering 225 * This function returns the pointer to an entry in Rx reordering
236 * table which matches the given TA/TID pair. 226 * table which matches the given TA/TID pair.
227 *
228 * The caller must hold rx_reorder_tbl_lock spinlock.
237 */ 229 */
238struct mwifiex_rx_reorder_tbl * 230struct mwifiex_rx_reorder_tbl *
239mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) 231mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
240{ 232{
241 struct mwifiex_rx_reorder_tbl *tbl; 233 struct mwifiex_rx_reorder_tbl *tbl;
242 unsigned long flags;
243 234
244 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 235 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
245 list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { 236 if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
246 if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
247 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
248 flags);
249 return tbl; 237 return tbl;
250 }
251 }
252 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
253 238
254 return NULL; 239 return NULL;
255} 240}
@@ -266,14 +251,9 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
266 return; 251 return;
267 252
268 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 253 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
269 list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) { 254 list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
270 if (!memcmp(tbl->ta, ta, ETH_ALEN)) { 255 if (!memcmp(tbl->ta, ta, ETH_ALEN))
271 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
272 flags);
273 mwifiex_del_rx_reorder_entry(priv, tbl); 256 mwifiex_del_rx_reorder_entry(priv, tbl);
274 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
275 }
276 }
277 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 257 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
278 258
279 return; 259 return;
@@ -282,24 +262,18 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
282/* 262/*
283 * This function finds the last sequence number used in the packets 263 * This function finds the last sequence number used in the packets
284 * buffered in Rx reordering table. 264 * buffered in Rx reordering table.
265 *
266 * The caller must hold rx_reorder_tbl_lock spinlock.
285 */ 267 */
286static int 268static int
287mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx) 269mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
288{ 270{
289 struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr; 271 struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
290 struct mwifiex_private *priv = ctx->priv;
291 unsigned long flags;
292 int i; 272 int i;
293 273
294 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 274 for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
295 for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) { 275 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
296 if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
297 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
298 flags);
299 return i; 276 return i;
300 }
301 }
302 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
303 277
304 return -1; 278 return -1;
305} 279}
@@ -317,17 +291,22 @@ mwifiex_flush_data(struct timer_list *t)
317 struct reorder_tmr_cnxt *ctx = 291 struct reorder_tmr_cnxt *ctx =
318 from_timer(ctx, t, timer); 292 from_timer(ctx, t, timer);
319 int start_win, seq_num; 293 int start_win, seq_num;
294 unsigned long flags;
320 295
321 ctx->timer_is_set = false; 296 ctx->timer_is_set = false;
297 spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
322 seq_num = mwifiex_11n_find_last_seq_num(ctx); 298 seq_num = mwifiex_11n_find_last_seq_num(ctx);
323 299
324 if (seq_num < 0) 300 if (seq_num < 0) {
301 spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
325 return; 302 return;
303 }
326 304
327 mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num); 305 mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
328 start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1); 306 start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
329 mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr, 307 mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
330 start_win); 308 start_win);
309 spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
331} 310}
332 311
333/* 312/*
@@ -354,11 +333,14 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
354 * If we get a TID, ta pair which is already present dispatch all the 333 * If we get a TID, ta pair which is already present dispatch all the
355 * the packets and move the window size until the ssn 334 * the packets and move the window size until the ssn
356 */ 335 */
336 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
357 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 337 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
358 if (tbl) { 338 if (tbl) {
359 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num); 339 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
340 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
360 return; 341 return;
361 } 342 }
343 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
362 /* if !tbl then create one */ 344 /* if !tbl then create one */
363 new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); 345 new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
364 if (!new_node) 346 if (!new_node)
@@ -569,16 +551,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
569 int prev_start_win, start_win, end_win, win_size; 551 int prev_start_win, start_win, end_win, win_size;
570 u16 pkt_index; 552 u16 pkt_index;
571 bool init_window_shift = false; 553 bool init_window_shift = false;
554 unsigned long flags;
572 int ret = 0; 555 int ret = 0;
573 556
557 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
574 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 558 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
575 if (!tbl) { 559 if (!tbl) {
560 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
576 if (pkt_type != PKT_TYPE_BAR) 561 if (pkt_type != PKT_TYPE_BAR)
577 mwifiex_11n_dispatch_pkt(priv, payload); 562 mwifiex_11n_dispatch_pkt(priv, payload);
578 return ret; 563 return ret;
579 } 564 }
580 565
581 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { 566 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
567 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
582 mwifiex_11n_dispatch_pkt(priv, payload); 568 mwifiex_11n_dispatch_pkt(priv, payload);
583 return ret; 569 return ret;
584 } 570 }
@@ -665,6 +651,8 @@ done:
665 if (!tbl->timer_context.timer_is_set || 651 if (!tbl->timer_context.timer_is_set ||
666 prev_start_win != tbl->start_win) 652 prev_start_win != tbl->start_win)
667 mwifiex_11n_rxreorder_timer_restart(tbl); 653 mwifiex_11n_rxreorder_timer_restart(tbl);
654
655 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
668 return ret; 656 return ret;
669} 657}
670 658
@@ -693,14 +681,18 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
693 peer_mac, tid, initiator); 681 peer_mac, tid, initiator);
694 682
695 if (cleanup_rx_reorder_tbl) { 683 if (cleanup_rx_reorder_tbl) {
684 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
696 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, 685 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
697 peer_mac); 686 peer_mac);
698 if (!tbl) { 687 if (!tbl) {
688 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
689 flags);
699 mwifiex_dbg(priv->adapter, EVENT, 690 mwifiex_dbg(priv->adapter, EVENT,
700 "event: TID, TA not found in table\n"); 691 "event: TID, TA not found in table\n");
701 return; 692 return;
702 } 693 }
703 mwifiex_del_rx_reorder_entry(priv, tbl); 694 mwifiex_del_rx_reorder_entry(priv, tbl);
695 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
704 } else { 696 } else {
705 ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); 697 ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
706 if (!ptx_tbl) { 698 if (!ptx_tbl) {
@@ -734,6 +726,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
734 int tid, win_size; 726 int tid, win_size;
735 struct mwifiex_rx_reorder_tbl *tbl; 727 struct mwifiex_rx_reorder_tbl *tbl;
736 uint16_t block_ack_param_set; 728 uint16_t block_ack_param_set;
729 unsigned long flags;
737 730
738 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); 731 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
739 732
@@ -747,17 +740,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
747 mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n", 740 mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
748 add_ba_rsp->peer_mac_addr, tid); 741 add_ba_rsp->peer_mac_addr, tid);
749 742
743 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
750 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, 744 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
751 add_ba_rsp->peer_mac_addr); 745 add_ba_rsp->peer_mac_addr);
752 if (tbl) 746 if (tbl)
753 mwifiex_del_rx_reorder_entry(priv, tbl); 747 mwifiex_del_rx_reorder_entry(priv, tbl);
754 748
749 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
755 return 0; 750 return 0;
756 } 751 }
757 752
758 win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) 753 win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
759 >> BLOCKACKPARAM_WINSIZE_POS; 754 >> BLOCKACKPARAM_WINSIZE_POS;
760 755
756 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
761 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, 757 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
762 add_ba_rsp->peer_mac_addr); 758 add_ba_rsp->peer_mac_addr);
763 if (tbl) { 759 if (tbl) {
@@ -768,6 +764,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
768 else 764 else
769 tbl->amsdu = false; 765 tbl->amsdu = false;
770 } 766 }
767 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
771 768
772 mwifiex_dbg(priv->adapter, CMD, 769 mwifiex_dbg(priv->adapter, CMD,
773 "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", 770 "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -807,11 +804,8 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
807 804
808 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 805 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
809 list_for_each_entry_safe(del_tbl_ptr, tmp_node, 806 list_for_each_entry_safe(del_tbl_ptr, tmp_node,
810 &priv->rx_reorder_tbl_ptr, list) { 807 &priv->rx_reorder_tbl_ptr, list)
811 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
812 mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); 808 mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
813 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
814 }
815 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); 809 INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
816 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 810 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
817 811
@@ -935,6 +929,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
935 int tlv_buf_left = len; 929 int tlv_buf_left = len;
936 int ret; 930 int ret;
937 u8 *tmp; 931 u8 *tmp;
932 unsigned long flags;
938 933
939 mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:", 934 mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
940 event_buf, len); 935 event_buf, len);
@@ -954,14 +949,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
954 tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num, 949 tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
955 tlv_bitmap_len); 950 tlv_bitmap_len);
956 951
952 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
957 rx_reor_tbl_ptr = 953 rx_reor_tbl_ptr =
958 mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid, 954 mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
959 tlv_rxba->mac); 955 tlv_rxba->mac);
960 if (!rx_reor_tbl_ptr) { 956 if (!rx_reor_tbl_ptr) {
957 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
958 flags);
961 mwifiex_dbg(priv->adapter, ERROR, 959 mwifiex_dbg(priv->adapter, ERROR,
962 "Can not find rx_reorder_tbl!"); 960 "Can not find rx_reorder_tbl!");
963 return; 961 return;
964 } 962 }
963 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
965 964
966 for (i = 0; i < tlv_bitmap_len; i++) { 965 for (i = 0; i < tlv_bitmap_len; i++) {
967 for (j = 0 ; j < 8; j++) { 966 for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index c02e02c17c9c..adc88433faa8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2322,7 +2322,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
2322 if (priv->scan_block) 2322 if (priv->scan_block)
2323 priv->scan_block = false; 2323 priv->scan_block = false;
2324 2324
2325 if (adapter->surprise_removed || adapter->is_cmd_timedout) { 2325 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
2326 test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
2326 mwifiex_dbg(adapter, ERROR, 2327 mwifiex_dbg(adapter, ERROR,
2327 "%s: Ignore connection.\t" 2328 "%s: Ignore connection.\t"
2328 "Card removed or FW in bad state\n", 2329 "Card removed or FW in bad state\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 9cfcdf6bec52..60db2b969e20 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -372,7 +372,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
372 adapter->ps_state = PS_STATE_SLEEP_CFM; 372 adapter->ps_state = PS_STATE_SLEEP_CFM;
373 373
374 if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) && 374 if (!le16_to_cpu(sleep_cfm_buf->resp_ctrl) &&
375 (adapter->is_hs_configured && 375 (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags) &&
376 !adapter->sleep_period.period)) { 376 !adapter->sleep_period.period)) {
377 adapter->pm_wakeup_card_req = true; 377 adapter->pm_wakeup_card_req = true;
378 mwifiex_hs_activated_event(mwifiex_get_priv 378 mwifiex_hs_activated_event(mwifiex_get_priv
@@ -564,25 +564,26 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
564 return -1; 564 return -1;
565 } 565 }
566 566
567 if (adapter->is_suspended) { 567 if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
568 mwifiex_dbg(adapter, ERROR, 568 mwifiex_dbg(adapter, ERROR,
569 "PREP_CMD: device in suspended state\n"); 569 "PREP_CMD: device in suspended state\n");
570 return -1; 570 return -1;
571 } 571 }
572 572
573 if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { 573 if (test_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags) &&
574 cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
574 mwifiex_dbg(adapter, ERROR, 575 mwifiex_dbg(adapter, ERROR,
575 "PREP_CMD: host entering sleep state\n"); 576 "PREP_CMD: host entering sleep state\n");
576 return -1; 577 return -1;
577 } 578 }
578 579
579 if (adapter->surprise_removed) { 580 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
580 mwifiex_dbg(adapter, ERROR, 581 mwifiex_dbg(adapter, ERROR,
581 "PREP_CMD: card is removed\n"); 582 "PREP_CMD: card is removed\n");
582 return -1; 583 return -1;
583 } 584 }
584 585
585 if (adapter->is_cmd_timedout) { 586 if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
586 mwifiex_dbg(adapter, ERROR, 587 mwifiex_dbg(adapter, ERROR,
587 "PREP_CMD: FW is in bad state\n"); 588 "PREP_CMD: FW is in bad state\n");
588 return -1; 589 return -1;
@@ -789,7 +790,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
789 if (priv && (host_cmd->command != 790 if (priv && (host_cmd->command !=
790 cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) { 791 cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
791 if (adapter->hs_activated) { 792 if (adapter->hs_activated) {
792 adapter->is_hs_configured = false; 793 clear_bit(MWIFIEX_IS_HS_CONFIGURED,
794 &adapter->work_flags);
793 mwifiex_hs_activated_event(priv, false); 795 mwifiex_hs_activated_event(priv, false);
794 } 796 }
795 } 797 }
@@ -825,7 +827,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
825 return -1; 827 return -1;
826 } 828 }
827 829
828 adapter->is_cmd_timedout = 0; 830 clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
829 831
830 resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; 832 resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
831 if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { 833 if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
@@ -927,7 +929,7 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
927 struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer); 929 struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
928 struct cmd_ctrl_node *cmd_node; 930 struct cmd_ctrl_node *cmd_node;
929 931
930 adapter->is_cmd_timedout = 1; 932 set_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
931 if (!adapter->curr_cmd) { 933 if (!adapter->curr_cmd) {
932 mwifiex_dbg(adapter, ERROR, 934 mwifiex_dbg(adapter, ERROR,
933 "cmd: empty curr_cmd\n"); 935 "cmd: empty curr_cmd\n");
@@ -953,7 +955,8 @@ mwifiex_cmd_timeout_func(struct timer_list *t)
953 955
954 mwifiex_dbg(adapter, MSG, 956 mwifiex_dbg(adapter, MSG,
955 "is_cmd_timedout = %d\n", 957 "is_cmd_timedout = %d\n",
956 adapter->is_cmd_timedout); 958 test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
959 &adapter->work_flags));
957 mwifiex_dbg(adapter, MSG, 960 mwifiex_dbg(adapter, MSG,
958 "num_tx_timeout = %d\n", 961 "num_tx_timeout = %d\n",
959 adapter->dbg.num_tx_timeout); 962 adapter->dbg.num_tx_timeout);
@@ -1135,7 +1138,8 @@ void
1135mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated) 1138mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
1136{ 1139{
1137 if (activated) { 1140 if (activated) {
1138 if (priv->adapter->is_hs_configured) { 1141 if (test_bit(MWIFIEX_IS_HS_CONFIGURED,
1142 &priv->adapter->work_flags)) {
1139 priv->adapter->hs_activated = true; 1143 priv->adapter->hs_activated = true;
1140 mwifiex_update_rxreor_flags(priv->adapter, 1144 mwifiex_update_rxreor_flags(priv->adapter,
1141 RXREOR_FORCE_NO_DROP); 1145 RXREOR_FORCE_NO_DROP);
@@ -1186,11 +1190,11 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1186 phs_cfg->params.hs_config.gap); 1190 phs_cfg->params.hs_config.gap);
1187 } 1191 }
1188 if (conditions != HS_CFG_CANCEL) { 1192 if (conditions != HS_CFG_CANCEL) {
1189 adapter->is_hs_configured = true; 1193 set_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
1190 if (adapter->iface_type == MWIFIEX_USB) 1194 if (adapter->iface_type == MWIFIEX_USB)
1191 mwifiex_hs_activated_event(priv, true); 1195 mwifiex_hs_activated_event(priv, true);
1192 } else { 1196 } else {
1193 adapter->is_hs_configured = false; 1197 clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
1194 if (adapter->hs_activated) 1198 if (adapter->hs_activated)
1195 mwifiex_hs_activated_event(priv, false); 1199 mwifiex_hs_activated_event(priv, false);
1196 } 1200 }
@@ -1212,8 +1216,8 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1212 1216
1213 adapter->if_ops.wakeup(adapter); 1217 adapter->if_ops.wakeup(adapter);
1214 adapter->hs_activated = false; 1218 adapter->hs_activated = false;
1215 adapter->is_hs_configured = false; 1219 clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
1216 adapter->is_suspended = false; 1220 clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
1217 mwifiex_hs_activated_event(mwifiex_get_priv(adapter, 1221 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1218 MWIFIEX_BSS_ROLE_ANY), 1222 MWIFIEX_BSS_ROLE_ANY),
1219 false); 1223 false);
@@ -1273,7 +1277,7 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
1273 return; 1277 return;
1274 } 1278 }
1275 adapter->pm_wakeup_card_req = true; 1279 adapter->pm_wakeup_card_req = true;
1276 if (adapter->is_hs_configured) 1280 if (test_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags))
1277 mwifiex_hs_activated_event(mwifiex_get_priv 1281 mwifiex_hs_activated_event(mwifiex_get_priv
1278 (adapter, MWIFIEX_BSS_ROLE_ANY), 1282 (adapter, MWIFIEX_BSS_ROLE_ANY),
1279 true); 1283 true);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 07453932f703..cce70252fd96 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -813,7 +813,7 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
813 MWIFIEX_SYNC_CMD, &hscfg); 813 MWIFIEX_SYNC_CMD, &hscfg);
814 814
815 mwifiex_enable_hs(priv->adapter); 815 mwifiex_enable_hs(priv->adapter);
816 priv->adapter->hs_enabling = false; 816 clear_bit(MWIFIEX_IS_HS_ENABLING, &priv->adapter->work_flags);
817 ret = count; 817 ret = count;
818done: 818done:
819 kfree(buf); 819 kfree(buf);
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index b10baacb51c9..75cbd609d606 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -355,8 +355,14 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
355 case WLAN_EID_HT_OPERATION: 355 case WLAN_EID_HT_OPERATION:
356 case WLAN_EID_VHT_CAPABILITY: 356 case WLAN_EID_VHT_CAPABILITY:
357 case WLAN_EID_VHT_OPERATION: 357 case WLAN_EID_VHT_OPERATION:
358 case WLAN_EID_VENDOR_SPECIFIC:
359 break; 358 break;
359 case WLAN_EID_VENDOR_SPECIFIC:
360 /* Skip only Microsoft WMM IE */
361 if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
362 WLAN_OUI_TYPE_MICROSOFT_WMM,
363 (const u8 *)hdr,
364 hdr->len + sizeof(struct ieee_types_header)))
365 break;
360 default: 366 default:
361 memcpy(gen_ie->ie_buffer + ie_len, hdr, 367 memcpy(gen_ie->ie_buffer + ie_len, hdr,
362 hdr->len + sizeof(struct ieee_types_header)); 368 hdr->len + sizeof(struct ieee_types_header));
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index d239e9248c05..673e89dff0b5 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -233,7 +233,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
233 adapter->event_received = false; 233 adapter->event_received = false;
234 adapter->data_received = false; 234 adapter->data_received = false;
235 235
236 adapter->surprise_removed = false; 236 clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
237 237
238 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; 238 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
239 239
@@ -270,7 +270,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
270 270
271 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; 271 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
272 272
273 adapter->is_hs_configured = false; 273 clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
274 adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF); 274 adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
275 adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF; 275 adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
276 adapter->hs_cfg.gap = HS_CFG_GAP_DEF; 276 adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
@@ -439,7 +439,6 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
439 for (i = 0; i < adapter->priv_num; i++) { 439 for (i = 0; i < adapter->priv_num; i++) {
440 if (adapter->priv[i]) { 440 if (adapter->priv[i]) {
441 priv = adapter->priv[i]; 441 priv = adapter->priv[i];
442 spin_lock_init(&priv->rx_pkt_lock);
443 spin_lock_init(&priv->wmm.ra_list_spinlock); 442 spin_lock_init(&priv->wmm.ra_list_spinlock);
444 spin_lock_init(&priv->curr_bcn_buf_lock); 443 spin_lock_init(&priv->curr_bcn_buf_lock);
445 spin_lock_init(&priv->sta_list_spinlock); 444 spin_lock_init(&priv->sta_list_spinlock);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index fa3e8ddfe9a9..20cee5c397fb 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -404,7 +404,8 @@ process_start:
404 !skb_queue_empty(&adapter->tx_data_q)) { 404 !skb_queue_empty(&adapter->tx_data_q)) {
405 mwifiex_process_tx_queue(adapter); 405 mwifiex_process_tx_queue(adapter);
406 if (adapter->hs_activated) { 406 if (adapter->hs_activated) {
407 adapter->is_hs_configured = false; 407 clear_bit(MWIFIEX_IS_HS_CONFIGURED,
408 &adapter->work_flags);
408 mwifiex_hs_activated_event 409 mwifiex_hs_activated_event
409 (mwifiex_get_priv 410 (mwifiex_get_priv
410 (adapter, MWIFIEX_BSS_ROLE_ANY), 411 (adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -420,7 +421,8 @@ process_start:
420 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { 421 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
421 mwifiex_process_bypass_tx(adapter); 422 mwifiex_process_bypass_tx(adapter);
422 if (adapter->hs_activated) { 423 if (adapter->hs_activated) {
423 adapter->is_hs_configured = false; 424 clear_bit(MWIFIEX_IS_HS_CONFIGURED,
425 &adapter->work_flags);
424 mwifiex_hs_activated_event 426 mwifiex_hs_activated_event
425 (mwifiex_get_priv 427 (mwifiex_get_priv
426 (adapter, MWIFIEX_BSS_ROLE_ANY), 428 (adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -435,7 +437,8 @@ process_start:
435 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) { 437 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
436 mwifiex_wmm_process_tx(adapter); 438 mwifiex_wmm_process_tx(adapter);
437 if (adapter->hs_activated) { 439 if (adapter->hs_activated) {
438 adapter->is_hs_configured = false; 440 clear_bit(MWIFIEX_IS_HS_CONFIGURED,
441 &adapter->work_flags);
439 mwifiex_hs_activated_event 442 mwifiex_hs_activated_event
440 (mwifiex_get_priv 443 (mwifiex_get_priv
441 (adapter, MWIFIEX_BSS_ROLE_ANY), 444 (adapter, MWIFIEX_BSS_ROLE_ANY),
@@ -647,7 +650,7 @@ err_dnld_fw:
647 if (adapter->if_ops.unregister_dev) 650 if (adapter->if_ops.unregister_dev)
648 adapter->if_ops.unregister_dev(adapter); 651 adapter->if_ops.unregister_dev(adapter);
649 652
650 adapter->surprise_removed = true; 653 set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
651 mwifiex_terminate_workqueue(adapter); 654 mwifiex_terminate_workqueue(adapter);
652 655
653 if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { 656 if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
@@ -870,7 +873,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
870 "data: %lu BSS(%d-%d): Data <= kernel\n", 873 "data: %lu BSS(%d-%d): Data <= kernel\n",
871 jiffies, priv->bss_type, priv->bss_num); 874 jiffies, priv->bss_type, priv->bss_num);
872 875
873 if (priv->adapter->surprise_removed) { 876 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags)) {
874 kfree_skb(skb); 877 kfree_skb(skb);
875 priv->stats.tx_dropped++; 878 priv->stats.tx_dropped++;
876 return 0; 879 return 0;
@@ -1372,7 +1375,7 @@ static void mwifiex_rx_work_queue(struct work_struct *work)
1372 struct mwifiex_adapter *adapter = 1375 struct mwifiex_adapter *adapter =
1373 container_of(work, struct mwifiex_adapter, rx_work); 1376 container_of(work, struct mwifiex_adapter, rx_work);
1374 1377
1375 if (adapter->surprise_removed) 1378 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
1376 return; 1379 return;
1377 mwifiex_process_rx(adapter); 1380 mwifiex_process_rx(adapter);
1378} 1381}
@@ -1388,7 +1391,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
1388 struct mwifiex_adapter *adapter = 1391 struct mwifiex_adapter *adapter =
1389 container_of(work, struct mwifiex_adapter, main_work); 1392 container_of(work, struct mwifiex_adapter, main_work);
1390 1393
1391 if (adapter->surprise_removed) 1394 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
1392 return; 1395 return;
1393 mwifiex_main_process(adapter); 1396 mwifiex_main_process(adapter);
1394} 1397}
@@ -1405,7 +1408,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
1405 if (adapter->if_ops.disable_int) 1408 if (adapter->if_ops.disable_int)
1406 adapter->if_ops.disable_int(adapter); 1409 adapter->if_ops.disable_int(adapter);
1407 1410
1408 adapter->surprise_removed = true; 1411 set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
1409 mwifiex_terminate_workqueue(adapter); 1412 mwifiex_terminate_workqueue(adapter);
1410 adapter->int_status = 0; 1413 adapter->int_status = 0;
1411 1414
@@ -1493,11 +1496,11 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
1493 adapter->if_ops.up_dev(adapter); 1496 adapter->if_ops.up_dev(adapter);
1494 1497
1495 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; 1498 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
1496 adapter->surprise_removed = false; 1499 clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
1497 init_waitqueue_head(&adapter->init_wait_q); 1500 init_waitqueue_head(&adapter->init_wait_q);
1498 adapter->is_suspended = false; 1501 clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
1499 adapter->hs_activated = false; 1502 adapter->hs_activated = false;
1500 adapter->is_cmd_timedout = 0; 1503 clear_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags);
1501 init_waitqueue_head(&adapter->hs_activate_wait_q); 1504 init_waitqueue_head(&adapter->hs_activate_wait_q);
1502 init_waitqueue_head(&adapter->cmd_wait_q.wait); 1505 init_waitqueue_head(&adapter->cmd_wait_q.wait);
1503 adapter->cmd_wait_q.status = 0; 1506 adapter->cmd_wait_q.status = 0;
@@ -1552,7 +1555,7 @@ err_init_fw:
1552 adapter->if_ops.unregister_dev(adapter); 1555 adapter->if_ops.unregister_dev(adapter);
1553 1556
1554err_kmalloc: 1557err_kmalloc:
1555 adapter->surprise_removed = true; 1558 set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
1556 mwifiex_terminate_workqueue(adapter); 1559 mwifiex_terminate_workqueue(adapter);
1557 if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { 1560 if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
1558 mwifiex_dbg(adapter, ERROR, 1561 mwifiex_dbg(adapter, ERROR,
@@ -1649,9 +1652,9 @@ mwifiex_add_card(void *card, struct completion *fw_done,
1649 adapter->fw_done = fw_done; 1652 adapter->fw_done = fw_done;
1650 1653
1651 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING; 1654 adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
1652 adapter->surprise_removed = false; 1655 clear_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
1653 init_waitqueue_head(&adapter->init_wait_q); 1656 init_waitqueue_head(&adapter->init_wait_q);
1654 adapter->is_suspended = false; 1657 clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
1655 adapter->hs_activated = false; 1658 adapter->hs_activated = false;
1656 init_waitqueue_head(&adapter->hs_activate_wait_q); 1659 init_waitqueue_head(&adapter->hs_activate_wait_q);
1657 init_waitqueue_head(&adapter->cmd_wait_q.wait); 1660 init_waitqueue_head(&adapter->cmd_wait_q.wait);
@@ -1699,7 +1702,7 @@ err_init_fw:
1699 if (adapter->if_ops.unregister_dev) 1702 if (adapter->if_ops.unregister_dev)
1700 adapter->if_ops.unregister_dev(adapter); 1703 adapter->if_ops.unregister_dev(adapter);
1701err_registerdev: 1704err_registerdev:
1702 adapter->surprise_removed = true; 1705 set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
1703 mwifiex_terminate_workqueue(adapter); 1706 mwifiex_terminate_workqueue(adapter);
1704 if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { 1707 if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
1705 pr_debug("info: %s: shutdown mwifiex\n", __func__); 1708 pr_debug("info: %s: shutdown mwifiex\n", __func__);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 69ac0a22c28c..b025ba164412 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -517,6 +517,14 @@ enum mwifiex_iface_work_flags {
517 MWIFIEX_IFACE_WORK_CARD_RESET, 517 MWIFIEX_IFACE_WORK_CARD_RESET,
518}; 518};
519 519
520enum mwifiex_adapter_work_flags {
521 MWIFIEX_SURPRISE_REMOVED,
522 MWIFIEX_IS_CMD_TIMEDOUT,
523 MWIFIEX_IS_SUSPENDED,
524 MWIFIEX_IS_HS_CONFIGURED,
525 MWIFIEX_IS_HS_ENABLING,
526};
527
520struct mwifiex_band_config { 528struct mwifiex_band_config {
521 u8 chan_band:2; 529 u8 chan_band:2;
522 u8 chan_width:2; 530 u8 chan_width:2;
@@ -616,9 +624,6 @@ struct mwifiex_private {
616 struct list_head rx_reorder_tbl_ptr; 624 struct list_head rx_reorder_tbl_ptr;
617 /* spin lock for rx_reorder_tbl_ptr queue */ 625 /* spin lock for rx_reorder_tbl_ptr queue */
618 spinlock_t rx_reorder_tbl_lock; 626 spinlock_t rx_reorder_tbl_lock;
619 /* spin lock for Rx packets */
620 spinlock_t rx_pkt_lock;
621
622#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500 627#define MWIFIEX_ASSOC_RSP_BUF_SIZE 500
623 u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE]; 628 u8 assoc_rsp_buf[MWIFIEX_ASSOC_RSP_BUF_SIZE];
624 u32 assoc_rsp_size; 629 u32 assoc_rsp_size;
@@ -875,7 +880,7 @@ struct mwifiex_adapter {
875 struct device *dev; 880 struct device *dev;
876 struct wiphy *wiphy; 881 struct wiphy *wiphy;
877 u8 perm_addr[ETH_ALEN]; 882 u8 perm_addr[ETH_ALEN];
878 bool surprise_removed; 883 unsigned long work_flags;
879 u32 fw_release_number; 884 u32 fw_release_number;
880 u8 intf_hdr_len; 885 u8 intf_hdr_len;
881 u16 init_wait_q_woken; 886 u16 init_wait_q_woken;
@@ -929,7 +934,6 @@ struct mwifiex_adapter {
929 struct cmd_ctrl_node *curr_cmd; 934 struct cmd_ctrl_node *curr_cmd;
930 /* spin lock for command */ 935 /* spin lock for command */
931 spinlock_t mwifiex_cmd_lock; 936 spinlock_t mwifiex_cmd_lock;
932 u8 is_cmd_timedout;
933 u16 last_init_cmd; 937 u16 last_init_cmd;
934 struct timer_list cmd_timer; 938 struct timer_list cmd_timer;
935 struct list_head cmd_free_q; 939 struct list_head cmd_free_q;
@@ -979,13 +983,10 @@ struct mwifiex_adapter {
979 u16 pps_uapsd_mode; 983 u16 pps_uapsd_mode;
980 u32 pm_wakeup_fw_try; 984 u32 pm_wakeup_fw_try;
981 struct timer_list wakeup_timer; 985 struct timer_list wakeup_timer;
982 u8 is_hs_configured;
983 struct mwifiex_hs_config_param hs_cfg; 986 struct mwifiex_hs_config_param hs_cfg;
984 u8 hs_activated; 987 u8 hs_activated;
985 u16 hs_activate_wait_q_woken; 988 u16 hs_activate_wait_q_woken;
986 wait_queue_head_t hs_activate_wait_q; 989 wait_queue_head_t hs_activate_wait_q;
987 bool is_suspended;
988 bool hs_enabling;
989 u8 event_body[MAX_EVENT_SIZE]; 990 u8 event_body[MAX_EVENT_SIZE];
990 u32 hw_dot_11n_dev_cap; 991 u32 hw_dot_11n_dev_cap;
991 u8 hw_dev_mcs_support; 992 u8 hw_dev_mcs_support;
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c42b7296ddd..3fe81b2a929a 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -170,7 +170,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
170 if (!mwifiex_enable_hs(adapter)) { 170 if (!mwifiex_enable_hs(adapter)) {
171 mwifiex_dbg(adapter, ERROR, 171 mwifiex_dbg(adapter, ERROR,
172 "cmd: failed to suspend\n"); 172 "cmd: failed to suspend\n");
173 adapter->hs_enabling = false; 173 clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
174 mwifiex_disable_wake(adapter); 174 mwifiex_disable_wake(adapter);
175 return -EFAULT; 175 return -EFAULT;
176 } 176 }
@@ -178,8 +178,8 @@ static int mwifiex_pcie_suspend(struct device *dev)
178 flush_workqueue(adapter->workqueue); 178 flush_workqueue(adapter->workqueue);
179 179
180 /* Indicate device suspended */ 180 /* Indicate device suspended */
181 adapter->is_suspended = true; 181 set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
182 adapter->hs_enabling = false; 182 clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
183 183
184 return 0; 184 return 0;
185} 185}
@@ -207,13 +207,13 @@ static int mwifiex_pcie_resume(struct device *dev)
207 207
208 adapter = card->adapter; 208 adapter = card->adapter;
209 209
210 if (!adapter->is_suspended) { 210 if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
211 mwifiex_dbg(adapter, WARN, 211 mwifiex_dbg(adapter, WARN,
212 "Device already resumed\n"); 212 "Device already resumed\n");
213 return 0; 213 return 0;
214 } 214 }
215 215
216 adapter->is_suspended = false; 216 clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
217 217
218 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), 218 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
219 MWIFIEX_ASYNC_CMD); 219 MWIFIEX_ASYNC_CMD);
@@ -2430,7 +2430,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
2430 } 2430 }
2431 adapter = card->adapter; 2431 adapter = card->adapter;
2432 2432
2433 if (adapter->surprise_removed) 2433 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
2434 goto exit; 2434 goto exit;
2435 2435
2436 if (card->msix_enable) 2436 if (card->msix_enable)
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 895b806cdb03..8e483b0bc3b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1495,7 +1495,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
1495 return -EBUSY; 1495 return -EBUSY;
1496 } 1496 }
1497 1497
1498 if (adapter->surprise_removed || adapter->is_cmd_timedout) { 1498 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags) ||
1499 test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags)) {
1499 mwifiex_dbg(adapter, ERROR, 1500 mwifiex_dbg(adapter, ERROR,
1500 "Ignore scan. Card removed or firmware in bad state\n"); 1501 "Ignore scan. Card removed or firmware in bad state\n");
1501 return -EFAULT; 1502 return -EFAULT;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index dfdcbc4f141a..d49fbd58afa7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,13 +181,13 @@ static int mwifiex_sdio_resume(struct device *dev)
181 181
182 adapter = card->adapter; 182 adapter = card->adapter;
183 183
184 if (!adapter->is_suspended) { 184 if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
185 mwifiex_dbg(adapter, WARN, 185 mwifiex_dbg(adapter, WARN,
186 "device already resumed\n"); 186 "device already resumed\n");
187 return 0; 187 return 0;
188 } 188 }
189 189
190 adapter->is_suspended = false; 190 clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
191 191
192 /* Disable Host Sleep */ 192 /* Disable Host Sleep */
193 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), 193 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -260,7 +260,7 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
260 MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len; 260 MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
261 u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); 261 u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
262 262
263 if (adapter->is_suspended) { 263 if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
264 mwifiex_dbg(adapter, ERROR, 264 mwifiex_dbg(adapter, ERROR,
265 "%s: not allowed while suspended\n", __func__); 265 "%s: not allowed while suspended\n", __func__);
266 return -1; 266 return -1;
@@ -450,7 +450,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
450 if (!mwifiex_enable_hs(adapter)) { 450 if (!mwifiex_enable_hs(adapter)) {
451 mwifiex_dbg(adapter, ERROR, 451 mwifiex_dbg(adapter, ERROR,
452 "cmd: failed to suspend\n"); 452 "cmd: failed to suspend\n");
453 adapter->hs_enabling = false; 453 clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
454 mwifiex_disable_wake(adapter); 454 mwifiex_disable_wake(adapter);
455 return -EFAULT; 455 return -EFAULT;
456 } 456 }
@@ -460,8 +460,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
460 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 460 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
461 461
462 /* Indicate device suspended */ 462 /* Indicate device suspended */
463 adapter->is_suspended = true; 463 set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
464 adapter->hs_enabling = false; 464 clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
465 465
466 return ret; 466 return ret;
467} 467}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 03a6492662ca..a327fc5b36e3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -224,7 +224,8 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
224 adapter->tx_lock_flag = false; 224 adapter->tx_lock_flag = false;
225 adapter->pps_uapsd_mode = false; 225 adapter->pps_uapsd_mode = false;
226 226
227 if (adapter->is_cmd_timedout && adapter->curr_cmd) 227 if (test_bit(MWIFIEX_IS_CMD_TIMEDOUT, &adapter->work_flags) &&
228 adapter->curr_cmd)
228 return; 229 return;
229 priv->media_connected = false; 230 priv->media_connected = false;
230 mwifiex_dbg(adapter, MSG, 231 mwifiex_dbg(adapter, MSG,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 5414b755cf82..b454b5f85503 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -419,7 +419,8 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
419 } 419 }
420 if (hs_cfg->is_invoke_hostcmd) { 420 if (hs_cfg->is_invoke_hostcmd) {
421 if (hs_cfg->conditions == HS_CFG_CANCEL) { 421 if (hs_cfg->conditions == HS_CFG_CANCEL) {
422 if (!adapter->is_hs_configured) 422 if (!test_bit(MWIFIEX_IS_HS_CONFIGURED,
423 &adapter->work_flags))
423 /* Already cancelled */ 424 /* Already cancelled */
424 break; 425 break;
425 /* Save previous condition */ 426 /* Save previous condition */
@@ -535,7 +536,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
535 memset(&hscfg, 0, sizeof(hscfg)); 536 memset(&hscfg, 0, sizeof(hscfg));
536 hscfg.is_invoke_hostcmd = true; 537 hscfg.is_invoke_hostcmd = true;
537 538
538 adapter->hs_enabling = true; 539 set_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
539 mwifiex_cancel_all_pending_cmd(adapter); 540 mwifiex_cancel_all_pending_cmd(adapter);
540 541
541 if (mwifiex_set_hs_params(mwifiex_get_priv(adapter, 542 if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
@@ -601,7 +602,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
601 else 602 else
602 info->wep_status = false; 603 info->wep_status = false;
603 604
604 info->is_hs_configured = adapter->is_hs_configured; 605 info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
606 &adapter->work_flags);
605 info->is_deep_sleep = adapter->is_deep_sleep; 607 info->is_deep_sleep = adapter->is_deep_sleep;
606 608
607 return 0; 609 return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_tx.c b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
index 620f8650a742..37c24b95e642 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_tx.c
@@ -143,7 +143,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
143 int ret; 143 int ret;
144 struct mwifiex_txinfo *tx_info = NULL; 144 struct mwifiex_txinfo *tx_info = NULL;
145 145
146 if (adapter->surprise_removed) 146 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags))
147 return -1; 147 return -1;
148 148
149 if (!priv->media_connected) 149 if (!priv->media_connected)
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 5ce85d5727e4..a83c5afc256a 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
421 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); 421 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
422 } 422 }
423 423
424 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
424 if (!priv->ap_11n_enabled || 425 if (!priv->ap_11n_enabled ||
425 (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) && 426 (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
426 (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) { 427 (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
427 ret = mwifiex_handle_uap_rx_forward(priv, skb); 428 ret = mwifiex_handle_uap_rx_forward(priv, skb);
429 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
428 return ret; 430 return ret;
429 } 431 }
432 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
430 433
431 /* Reorder and send to kernel */ 434 /* Reorder and send to kernel */
432 pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type); 435 pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 88f4c89f89ba..433c6a16870b 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -181,7 +181,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
181 atomic_dec(&card->rx_data_urb_pending); 181 atomic_dec(&card->rx_data_urb_pending);
182 182
183 if (recv_length) { 183 if (recv_length) {
184 if (urb->status || (adapter->surprise_removed)) { 184 if (urb->status ||
185 test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
185 mwifiex_dbg(adapter, ERROR, 186 mwifiex_dbg(adapter, ERROR,
186 "URB status is failed: %d\n", urb->status); 187 "URB status is failed: %d\n", urb->status);
187 /* Do not free skb in case of command ep */ 188 /* Do not free skb in case of command ep */
@@ -218,10 +219,10 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
218 dev_kfree_skb_any(skb); 219 dev_kfree_skb_any(skb);
219 } 220 }
220 } else if (urb->status) { 221 } else if (urb->status) {
221 if (!adapter->is_suspended) { 222 if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
222 mwifiex_dbg(adapter, FATAL, 223 mwifiex_dbg(adapter, FATAL,
223 "Card is removed: %d\n", urb->status); 224 "Card is removed: %d\n", urb->status);
224 adapter->surprise_removed = true; 225 set_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags);
225 } 226 }
226 dev_kfree_skb_any(skb); 227 dev_kfree_skb_any(skb);
227 return; 228 return;
@@ -529,7 +530,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
529 return 0; 530 return 0;
530 } 531 }
531 532
532 if (unlikely(adapter->is_suspended)) 533 if (unlikely(test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)))
533 mwifiex_dbg(adapter, WARN, 534 mwifiex_dbg(adapter, WARN,
534 "Device already suspended\n"); 535 "Device already suspended\n");
535 536
@@ -537,19 +538,19 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
537 if (!mwifiex_enable_hs(adapter)) { 538 if (!mwifiex_enable_hs(adapter)) {
538 mwifiex_dbg(adapter, ERROR, 539 mwifiex_dbg(adapter, ERROR,
539 "cmd: failed to suspend\n"); 540 "cmd: failed to suspend\n");
540 adapter->hs_enabling = false; 541 clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
541 return -EFAULT; 542 return -EFAULT;
542 } 543 }
543 544
544 545
545 /* 'is_suspended' flag indicates device is suspended. 546 /* 'MWIFIEX_IS_SUSPENDED' bit indicates device is suspended.
546 * It must be set here before the usb_kill_urb() calls. Reason 547 * It must be set here before the usb_kill_urb() calls. Reason
547 * is in the complete handlers, urb->status(= -ENOENT) and 548 * is in the complete handlers, urb->status(= -ENOENT) and
548 * this flag is used in combination to distinguish between a 549 * this flag is used in combination to distinguish between a
549 * 'suspended' state and a 'disconnect' one. 550 * 'suspended' state and a 'disconnect' one.
550 */ 551 */
551 adapter->is_suspended = true; 552 set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
552 adapter->hs_enabling = false; 553 clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags);
553 554
554 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb) 555 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
555 usb_kill_urb(card->rx_cmd.urb); 556 usb_kill_urb(card->rx_cmd.urb);
@@ -593,7 +594,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
593 } 594 }
594 adapter = card->adapter; 595 adapter = card->adapter;
595 596
596 if (unlikely(!adapter->is_suspended)) { 597 if (unlikely(!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags))) {
597 mwifiex_dbg(adapter, WARN, 598 mwifiex_dbg(adapter, WARN,
598 "Device already resumed\n"); 599 "Device already resumed\n");
599 return 0; 600 return 0;
@@ -602,7 +603,7 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
602 /* Indicate device resumed. The netdev queue will be resumed only 603 /* Indicate device resumed. The netdev queue will be resumed only
603 * after the urbs have been re-submitted 604 * after the urbs have been re-submitted
604 */ 605 */
605 adapter->is_suspended = false; 606 clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
606 607
607 if (!atomic_read(&card->rx_data_urb_pending)) 608 if (!atomic_read(&card->rx_data_urb_pending))
608 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) 609 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
@@ -1158,13 +1159,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
1158 unsigned long flags; 1159 unsigned long flags;
1159 int idx, ret; 1160 int idx, ret;
1160 1161
1161 if (adapter->is_suspended) { 1162 if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
1162 mwifiex_dbg(adapter, ERROR, 1163 mwifiex_dbg(adapter, ERROR,
1163 "%s: not allowed while suspended\n", __func__); 1164 "%s: not allowed while suspended\n", __func__);
1164 return -1; 1165 return -1;
1165 } 1166 }
1166 1167
1167 if (adapter->surprise_removed) { 1168 if (test_bit(MWIFIEX_SURPRISE_REMOVED, &adapter->work_flags)) {
1168 mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__); 1169 mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
1169 return -1; 1170 return -1;
1170 } 1171 }
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 6dd212898117..f9b71539d33e 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -197,9 +197,11 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
197 info->is_deep_sleep = adapter->is_deep_sleep; 197 info->is_deep_sleep = adapter->is_deep_sleep;
198 info->pm_wakeup_card_req = adapter->pm_wakeup_card_req; 198 info->pm_wakeup_card_req = adapter->pm_wakeup_card_req;
199 info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try; 199 info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
200 info->is_hs_configured = adapter->is_hs_configured; 200 info->is_hs_configured = test_bit(MWIFIEX_IS_HS_CONFIGURED,
201 &adapter->work_flags);
201 info->hs_activated = adapter->hs_activated; 202 info->hs_activated = adapter->hs_activated;
202 info->is_cmd_timedout = adapter->is_cmd_timedout; 203 info->is_cmd_timedout = test_bit(MWIFIEX_IS_CMD_TIMEDOUT,
204 &adapter->work_flags);
203 info->num_cmd_host_to_card_failure 205 info->num_cmd_host_to_card_failure
204 = adapter->dbg.num_cmd_host_to_card_failure; 206 = adapter->dbg.num_cmd_host_to_card_failure;
205 info->num_cmd_sleep_cfm_host_to_card_failure 207 info->num_cmd_sleep_cfm_host_to_card_failure
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 936a0a841af8..407b9932ca4d 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -599,7 +599,7 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
599 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); 599 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
600 600
601 if (priv->adapter->if_ops.clean_pcie_ring && 601 if (priv->adapter->if_ops.clean_pcie_ring &&
602 !priv->adapter->surprise_removed) 602 !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
603 priv->adapter->if_ops.clean_pcie_ring(priv->adapter); 603 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
604 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 604 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
605 605
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index fc05d79c80d0..850611ad347a 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,10 +1,36 @@
1config MT76_CORE 1config MT76_CORE
2 tristate 2 tristate
3 3
4config MT76_USB
5 tristate
6 depends on MT76_CORE
7
8config MT76x2_COMMON
9 tristate
10 depends on MT76_CORE
11
12config MT76x0U
13 tristate "MediaTek MT76x0U (USB) support"
14 depends on MAC80211
15 depends on USB
16 help
17 This adds support for MT7610U-based wireless USB dongles.
18
4config MT76x2E 19config MT76x2E
5 tristate "MediaTek MT76x2E (PCIe) support" 20 tristate "MediaTek MT76x2E (PCIe) support"
6 select MT76_CORE 21 select MT76_CORE
22 select MT76x2_COMMON
7 depends on MAC80211 23 depends on MAC80211
8 depends on PCI 24 depends on PCI
9 ---help--- 25 ---help---
10 This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices. 26 This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
27
28config MT76x2U
29 tristate "MediaTek MT76x2U (USB) support"
30 select MT76_CORE
31 select MT76_USB
32 select MT76x2_COMMON
33 depends on MAC80211
34 depends on USB
35 help
36 This adds support for MT7612U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index a0156bc01dea..158d10d2716c 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -1,15 +1,31 @@
1obj-$(CONFIG_MT76_CORE) += mt76.o 1obj-$(CONFIG_MT76_CORE) += mt76.o
2obj-$(CONFIG_MT76_USB) += mt76-usb.o
3obj-$(CONFIG_MT76x0U) += mt76x0/
4obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
2obj-$(CONFIG_MT76x2E) += mt76x2e.o 5obj-$(CONFIG_MT76x2E) += mt76x2e.o
6obj-$(CONFIG_MT76x2U) += mt76x2u.o
3 7
4mt76-y := \ 8mt76-y := \
5 mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o 9 mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
6 10
11mt76-usb-y := usb.o usb_trace.o usb_mcu.o
12
7CFLAGS_trace.o := -I$(src) 13CFLAGS_trace.o := -I$(src)
14CFLAGS_usb_trace.o := -I$(src)
15
16mt76x2-common-y := \
17 mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
18 mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
19 mt76x2_debugfs.o
8 20
9mt76x2e-y := \ 21mt76x2e-y := \
10 mt76x2_pci.o mt76x2_dma.o \ 22 mt76x2_pci.o mt76x2_dma.o \
11 mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \ 23 mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
12 mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \ 24 mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
13 mt76x2_dfs.o mt76x2_trace.o 25 mt76x2_dfs.o mt76x2_trace.o
14 26
27mt76x2u-y := \
28 mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
29 mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
30
15CFLAGS_mt76x2_trace.o := -I$(src) 31CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 1e8cdce919d9..73c8b2805c97 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -113,7 +113,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
113 if (nframes) 113 if (nframes)
114 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, 114 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
115 REORDER_TIMEOUT); 115 REORDER_TIMEOUT);
116 mt76_rx_complete(dev, &frames, -1); 116 mt76_rx_complete(dev, &frames, NULL);
117 117
118 rcu_read_unlock(); 118 rcu_read_unlock();
119 local_bh_enable(); 119 local_bh_enable();
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 3dbedcedc2c4..c51da2205b93 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -239,6 +239,80 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
239 iowrite32(q->head, &q->regs->cpu_idx); 239 iowrite32(q->head, &q->regs->cpu_idx);
240} 240}
241 241
242int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
243 struct sk_buff *skb, struct mt76_wcid *wcid,
244 struct ieee80211_sta *sta)
245{
246 struct mt76_queue_entry e;
247 struct mt76_txwi_cache *t;
248 struct mt76_queue_buf buf[32];
249 struct sk_buff *iter;
250 dma_addr_t addr;
251 int len;
252 u32 tx_info = 0;
253 int n, ret;
254
255 t = mt76_get_txwi(dev);
256 if (!t) {
257 ieee80211_free_txskb(dev->hw, skb);
258 return -ENOMEM;
259 }
260
261 dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
262 DMA_TO_DEVICE);
263 ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
264 &tx_info);
265 dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
266 DMA_TO_DEVICE);
267 if (ret < 0)
268 goto free;
269
270 len = skb->len - skb->data_len;
271 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
272 if (dma_mapping_error(dev->dev, addr)) {
273 ret = -ENOMEM;
274 goto free;
275 }
276
277 n = 0;
278 buf[n].addr = t->dma_addr;
279 buf[n++].len = dev->drv->txwi_size;
280 buf[n].addr = addr;
281 buf[n++].len = len;
282
283 skb_walk_frags(skb, iter) {
284 if (n == ARRAY_SIZE(buf))
285 goto unmap;
286
287 addr = dma_map_single(dev->dev, iter->data, iter->len,
288 DMA_TO_DEVICE);
289 if (dma_mapping_error(dev->dev, addr))
290 goto unmap;
291
292 buf[n].addr = addr;
293 buf[n++].len = iter->len;
294 }
295
296 if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
297 goto unmap;
298
299 return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
300
301unmap:
302 ret = -ENOMEM;
303 for (n--; n > 0; n--)
304 dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
305 DMA_TO_DEVICE);
306
307free:
308 e.skb = skb;
309 e.txwi = t;
310 dev->drv->tx_complete_skb(dev, q, &e, true);
311 mt76_put_txwi(dev, t);
312 return ret;
313}
314EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
315
242static int 316static int
243mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) 317mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
244{ 318{
@@ -400,7 +474,7 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
400 474
401 do { 475 do {
402 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); 476 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
403 mt76_rx_poll_complete(dev, qid); 477 mt76_rx_poll_complete(dev, qid, napi);
404 done += cur; 478 done += cur;
405 } while (cur && done < budget); 479 } while (cur && done < budget);
406 480
@@ -436,6 +510,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
436 .init = mt76_dma_init, 510 .init = mt76_dma_init,
437 .alloc = mt76_dma_alloc_queue, 511 .alloc = mt76_dma_alloc_queue,
438 .add_buf = mt76_dma_add_buf, 512 .add_buf = mt76_dma_add_buf,
513 .tx_queue_skb = mt76_dma_tx_queue_skb,
439 .tx_cleanup = mt76_dma_tx_cleanup, 514 .tx_cleanup = mt76_dma_tx_cleanup,
440 .rx_reset = mt76_dma_rx_reset, 515 .rx_reset = mt76_dma_rx_reset,
441 .kick = mt76_dma_kick_queue, 516 .kick = mt76_dma_kick_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1dad39697929..27248e24a19b 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -25,6 +25,39 @@
25#define MT_DMA_CTL_LAST_SEC0 BIT(30) 25#define MT_DMA_CTL_LAST_SEC0 BIT(30)
26#define MT_DMA_CTL_DMA_DONE BIT(31) 26#define MT_DMA_CTL_DMA_DONE BIT(31)
27 27
28#define MT_TXD_INFO_LEN GENMASK(15, 0)
29#define MT_TXD_INFO_NEXT_VLD BIT(16)
30#define MT_TXD_INFO_TX_BURST BIT(17)
31#define MT_TXD_INFO_80211 BIT(19)
32#define MT_TXD_INFO_TSO BIT(20)
33#define MT_TXD_INFO_CSO BIT(21)
34#define MT_TXD_INFO_WIV BIT(24)
35#define MT_TXD_INFO_QSEL GENMASK(26, 25)
36#define MT_TXD_INFO_DPORT GENMASK(29, 27)
37#define MT_TXD_INFO_TYPE GENMASK(31, 30)
38
39#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
40#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
41#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
42#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
43#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
44#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
45#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
46#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
47
48/* MCU request message header */
49#define MT_MCU_MSG_LEN GENMASK(15, 0)
50#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
51#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
52#define MT_MCU_MSG_PORT GENMASK(29, 27)
53#define MT_MCU_MSG_TYPE GENMASK(31, 30)
54#define MT_MCU_MSG_TYPE_CMD BIT(30)
55
56#define MT_DMA_HDR_LEN 4
57#define MT_RX_INFO_LEN 4
58#define MT_FCE_INFO_LEN 4
59#define MT_RX_RXWI_LEN 32
60
28struct mt76_desc { 61struct mt76_desc {
29 __le32 buf0; 62 __le32 buf0;
30 __le32 ctrl; 63 __le32 ctrl;
@@ -32,6 +65,16 @@ struct mt76_desc {
32 __le32 info; 65 __le32 info;
33} __packed __aligned(4); 66} __packed __aligned(4);
34 67
68enum dma_msg_port {
69 WLAN_PORT,
70 CPU_RX_PORT,
71 CPU_TX_PORT,
72 HOST_PORT,
73 VIRTUAL_CPU_RX_PORT,
74 VIRTUAL_CPU_TX_PORT,
75 DISCARD,
76};
77
35int mt76_dma_attach(struct mt76_dev *dev); 78int mt76_dma_attach(struct mt76_dev *dev);
36void mt76_dma_cleanup(struct mt76_dev *dev); 79void mt76_dma_cleanup(struct mt76_dev *dev);
37 80
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index d62e34e7eadf..029d54bce9e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -303,14 +303,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
303 SET_IEEE80211_DEV(hw, dev->dev); 303 SET_IEEE80211_DEV(hw, dev->dev);
304 SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); 304 SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
305 305
306 wiphy->interface_modes =
307 BIT(NL80211_IFTYPE_STATION) |
308 BIT(NL80211_IFTYPE_AP) |
309#ifdef CONFIG_MAC80211_MESH
310 BIT(NL80211_IFTYPE_MESH_POINT) |
311#endif
312 BIT(NL80211_IFTYPE_ADHOC);
313
314 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 306 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
315 307
316 wiphy->available_antennas_tx = dev->antenna_mask; 308 wiphy->available_antennas_tx = dev->antenna_mask;
@@ -591,15 +583,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
591} 583}
592 584
593void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 585void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
594 int queue) 586 struct napi_struct *napi)
595{ 587{
596 struct napi_struct *napi = NULL;
597 struct ieee80211_sta *sta; 588 struct ieee80211_sta *sta;
598 struct sk_buff *skb; 589 struct sk_buff *skb;
599 590
600 if (queue >= 0)
601 napi = &dev->napi[queue];
602
603 spin_lock(&dev->rx_lock); 591 spin_lock(&dev->rx_lock);
604 while ((skb = __skb_dequeue(frames)) != NULL) { 592 while ((skb = __skb_dequeue(frames)) != NULL) {
605 if (mt76_check_ccmp_pn(skb)) { 593 if (mt76_check_ccmp_pn(skb)) {
@@ -613,7 +601,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
613 spin_unlock(&dev->rx_lock); 601 spin_unlock(&dev->rx_lock);
614} 602}
615 603
616void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q) 604void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
605 struct napi_struct *napi)
617{ 606{
618 struct sk_buff_head frames; 607 struct sk_buff_head frames;
619 struct sk_buff *skb; 608 struct sk_buff *skb;
@@ -625,5 +614,6 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
625 mt76_rx_aggr_reorder(skb, &frames); 614 mt76_rx_aggr_reorder(skb, &frames);
626 } 615 }
627 616
628 mt76_rx_complete(dev, &frames, q); 617 mt76_rx_complete(dev, &frames, napi);
629} 618}
619EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 96e9798bb8a0..2eab35879163 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -22,6 +22,7 @@
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/leds.h> 24#include <linux/leds.h>
25#include <linux/usb.h>
25#include <net/mac80211.h> 26#include <net/mac80211.h>
26#include "util.h" 27#include "util.h"
27 28
@@ -30,6 +31,7 @@
30#define MT_RX_BUF_SIZE 2048 31#define MT_RX_BUF_SIZE 2048
31 32
32struct mt76_dev; 33struct mt76_dev;
34struct mt76_wcid;
33 35
34struct mt76_bus_ops { 36struct mt76_bus_ops {
35 u32 (*rr)(struct mt76_dev *dev, u32 offset); 37 u32 (*rr)(struct mt76_dev *dev, u32 offset);
@@ -62,12 +64,22 @@ struct mt76_queue_buf {
62 int len; 64 int len;
63}; 65};
64 66
67struct mt76u_buf {
68 struct mt76_dev *dev;
69 struct urb *urb;
70 size_t len;
71 bool done;
72};
73
65struct mt76_queue_entry { 74struct mt76_queue_entry {
66 union { 75 union {
67 void *buf; 76 void *buf;
68 struct sk_buff *skb; 77 struct sk_buff *skb;
69 }; 78 };
70 struct mt76_txwi_cache *txwi; 79 union {
80 struct mt76_txwi_cache *txwi;
81 struct mt76u_buf ubuf;
82 };
71 bool schedule; 83 bool schedule;
72}; 84};
73 85
@@ -88,6 +100,7 @@ struct mt76_queue {
88 struct list_head swq; 100 struct list_head swq;
89 int swq_queued; 101 int swq_queued;
90 102
103 u16 first;
91 u16 head; 104 u16 head;
92 u16 tail; 105 u16 tail;
93 int ndesc; 106 int ndesc;
@@ -110,6 +123,10 @@ struct mt76_queue_ops {
110 struct mt76_queue_buf *buf, int nbufs, u32 info, 123 struct mt76_queue_buf *buf, int nbufs, u32 info,
111 struct sk_buff *skb, void *txwi); 124 struct sk_buff *skb, void *txwi);
112 125
126 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
127 struct sk_buff *skb, struct mt76_wcid *wcid,
128 struct ieee80211_sta *sta);
129
113 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 130 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
114 int *len, u32 *info, bool *more); 131 int *len, u32 *info, bool *more);
115 132
@@ -187,9 +204,13 @@ struct mt76_rx_tid {
187enum { 204enum {
188 MT76_STATE_INITIALIZED, 205 MT76_STATE_INITIALIZED,
189 MT76_STATE_RUNNING, 206 MT76_STATE_RUNNING,
207 MT76_STATE_MCU_RUNNING,
190 MT76_SCANNING, 208 MT76_SCANNING,
191 MT76_RESET, 209 MT76_RESET,
192 MT76_OFFCHANNEL, 210 MT76_OFFCHANNEL,
211 MT76_REMOVED,
212 MT76_READING_STATS,
213 MT76_MORE_STATS,
193}; 214};
194 215
195struct mt76_hw_cap { 216struct mt76_hw_cap {
@@ -210,6 +231,8 @@ struct mt76_driver_ops {
210 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 231 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
211 struct mt76_queue_entry *e, bool flush); 232 struct mt76_queue_entry *e, bool flush);
212 233
234 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
235
213 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 236 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
214 struct sk_buff *skb); 237 struct sk_buff *skb);
215 238
@@ -229,6 +252,64 @@ struct mt76_sband {
229 struct mt76_channel_state *chan; 252 struct mt76_channel_state *chan;
230}; 253};
231 254
255/* addr req mask */
256#define MT_VEND_TYPE_EEPROM BIT(31)
257#define MT_VEND_TYPE_CFG BIT(30)
258#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
259
260#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
261enum mt_vendor_req {
262 MT_VEND_DEV_MODE = 0x1,
263 MT_VEND_WRITE = 0x2,
264 MT_VEND_MULTI_WRITE = 0x6,
265 MT_VEND_MULTI_READ = 0x7,
266 MT_VEND_READ_EEPROM = 0x9,
267 MT_VEND_WRITE_FCE = 0x42,
268 MT_VEND_WRITE_CFG = 0x46,
269 MT_VEND_READ_CFG = 0x47,
270};
271
272enum mt76u_in_ep {
273 MT_EP_IN_PKT_RX,
274 MT_EP_IN_CMD_RESP,
275 __MT_EP_IN_MAX,
276};
277
278enum mt76u_out_ep {
279 MT_EP_OUT_INBAND_CMD,
280 MT_EP_OUT_AC_BK,
281 MT_EP_OUT_AC_BE,
282 MT_EP_OUT_AC_VI,
283 MT_EP_OUT_AC_VO,
284 MT_EP_OUT_HCCA,
285 __MT_EP_OUT_MAX,
286};
287
288#define MT_SG_MAX_SIZE 8
289#define MT_NUM_TX_ENTRIES 256
290#define MT_NUM_RX_ENTRIES 128
291#define MCU_RESP_URB_SIZE 1024
292struct mt76_usb {
293 struct mutex usb_ctrl_mtx;
294 u8 data[32];
295
296 struct tasklet_struct rx_tasklet;
297 struct tasklet_struct tx_tasklet;
298 struct delayed_work stat_work;
299
300 u8 out_ep[__MT_EP_OUT_MAX];
301 u16 out_max_packet;
302 u8 in_ep[__MT_EP_IN_MAX];
303 u16 in_max_packet;
304
305 struct mt76u_mcu {
306 struct mutex mutex;
307 struct completion cmpl;
308 struct mt76u_buf res;
309 u32 msg_seq;
310 } mcu;
311};
312
232struct mt76_dev { 313struct mt76_dev {
233 struct ieee80211_hw *hw; 314 struct ieee80211_hw *hw;
234 struct cfg80211_chan_def chandef; 315 struct cfg80211_chan_def chandef;
@@ -271,6 +352,8 @@ struct mt76_dev {
271 char led_name[32]; 352 char led_name[32];
272 bool led_al; 353 bool led_al;
273 u8 led_pin; 354 u8 led_pin;
355
356 struct mt76_usb usb;
274}; 357};
275 358
276enum mt76_phy_type { 359enum mt76_phy_type {
@@ -402,6 +485,14 @@ static inline int mt76_decr(int val, int size)
402 return (val - 1) & (size - 1); 485 return (val - 1) & (size - 1);
403} 486}
404 487
488/* Hardware uses mirrored order of queues with Q3
489 * having the highest priority
490 */
491static inline u8 q2hwq(u8 q)
492{
493 return q ^ 0x3;
494}
495
405static inline struct ieee80211_txq * 496static inline struct ieee80211_txq *
406mtxq_to_txq(struct mt76_txq *mtxq) 497mtxq_to_txq(struct mt76_txq *mtxq)
407{ 498{
@@ -421,9 +512,9 @@ wcid_to_sta(struct mt76_wcid *wcid)
421 return container_of(ptr, struct ieee80211_sta, drv_priv); 512 return container_of(ptr, struct ieee80211_sta, drv_priv);
422} 513}
423 514
424int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 515int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
425 struct sk_buff *skb, struct mt76_wcid *wcid, 516 struct sk_buff *skb, struct mt76_wcid *wcid,
426 struct ieee80211_sta *sta); 517 struct ieee80211_sta *sta);
427 518
428void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 519void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
429void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 520void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -454,10 +545,69 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
454 545
455/* internal */ 546/* internal */
456void mt76_tx_free(struct mt76_dev *dev); 547void mt76_tx_free(struct mt76_dev *dev);
548struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
457void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 549void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
458void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 550void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
459 int queue); 551 struct napi_struct *napi);
460void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q); 552void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
553 struct napi_struct *napi);
461void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 554void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
462 555
556/* usb */
557static inline bool mt76u_urb_error(struct urb *urb)
558{
559 return urb->status &&
560 urb->status != -ECONNRESET &&
561 urb->status != -ESHUTDOWN &&
562 urb->status != -ENOENT;
563}
564
565/* Map hardware queues to usb endpoints */
566static inline u8 q2ep(u8 qid)
567{
568 /* TODO: take management packets to queue 5 */
569 return qid + 1;
570}
571
572static inline bool mt76u_check_sg(struct mt76_dev *dev)
573{
574 struct usb_interface *intf = to_usb_interface(dev->dev);
575 struct usb_device *udev = interface_to_usbdev(intf);
576
577 return (udev->bus->sg_tablesize > 0 &&
578 (udev->bus->no_sg_constraint ||
579 udev->speed == USB_SPEED_WIRELESS));
580}
581
582int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
583 u8 req_type, u16 val, u16 offset,
584 void *buf, size_t len);
585void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
586 const u16 offset, const u32 val);
587u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
588void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
589int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
590void mt76u_deinit(struct mt76_dev *dev);
591int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
592 int nsgs, int len, int sglen, gfp_t gfp);
593void mt76u_buf_free(struct mt76u_buf *buf);
594int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
595 struct mt76u_buf *buf, gfp_t gfp,
596 usb_complete_t complete_fn, void *context);
597int mt76u_submit_rx_buffers(struct mt76_dev *dev);
598int mt76u_alloc_queues(struct mt76_dev *dev);
599void mt76u_stop_queues(struct mt76_dev *dev);
600void mt76u_stop_stat_wk(struct mt76_dev *dev);
601void mt76u_queues_deinit(struct mt76_dev *dev);
602int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
603
604int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
605 int data_len, u32 max_payload, u32 offset);
606void mt76u_mcu_complete_urb(struct urb *urb);
607struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len);
608int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
609 int cmd, bool wait_resp);
610void mt76u_mcu_fw_reset(struct mt76_dev *dev);
611int mt76u_mcu_init_rx(struct mt76_dev *dev);
612
463#endif 613#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
new file mode 100644
index 000000000000..7843908261ba
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_MT76x0U) += mt76x0.o
2
3mt76x0-objs = \
4 usb.o init.o main.o mcu.o trace.o dma.o eeprom.o phy.o \
5 mac.o util.o debugfs.o tx.o core.o
6# ccflags-y := -DDEBUG
7CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/core.c b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
new file mode 100644
index 000000000000..892803fce842
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/core.c
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include "mt76x0.h"
16
17int mt76x0_wait_asic_ready(struct mt76x0_dev *dev)
18{
19 int i = 100;
20 u32 val;
21
22 do {
23 if (test_bit(MT76_REMOVED, &dev->mt76.state))
24 return -EIO;
25
26 val = mt76_rr(dev, MT_MAC_CSR0);
27 if (val && ~val)
28 return 0;
29
30 udelay(10);
31 } while (i--);
32
33 return -EIO;
34}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
new file mode 100644
index 000000000000..e7a77a886068
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
@@ -0,0 +1,166 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/debugfs.h>
17
18#include "mt76x0.h"
19#include "eeprom.h"
20
21static int
22mt76_reg_set(void *data, u64 val)
23{
24 struct mt76x0_dev *dev = data;
25
26 mt76_wr(dev, dev->debugfs_reg, val);
27 return 0;
28}
29
30static int
31mt76_reg_get(void *data, u64 *val)
32{
33 struct mt76x0_dev *dev = data;
34
35 *val = mt76_rr(dev, dev->debugfs_reg);
36 return 0;
37}
38
39DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
40
41static int
42mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
43{
44 struct mt76x0_dev *dev = file->private;
45 int i, j;
46
47#define stat_printf(grp, off, name) \
48 seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
49
50 stat_printf(rx_stat, 0, rx_crc_err);
51 stat_printf(rx_stat, 1, rx_phy_err);
52 stat_printf(rx_stat, 2, rx_false_cca);
53 stat_printf(rx_stat, 3, rx_plcp_err);
54 stat_printf(rx_stat, 4, rx_fifo_overflow);
55 stat_printf(rx_stat, 5, rx_duplicate);
56
57 stat_printf(tx_stat, 0, tx_fail_cnt);
58 stat_printf(tx_stat, 1, tx_bcn_cnt);
59 stat_printf(tx_stat, 2, tx_success);
60 stat_printf(tx_stat, 3, tx_retransmit);
61 stat_printf(tx_stat, 4, tx_zero_len);
62 stat_printf(tx_stat, 5, tx_underflow);
63
64 stat_printf(aggr_stat, 0, non_aggr_tx);
65 stat_printf(aggr_stat, 1, aggr_tx);
66
67 stat_printf(zero_len_del, 0, tx_zero_len_del);
68 stat_printf(zero_len_del, 1, rx_zero_len_del);
69#undef stat_printf
70
71 seq_puts(file, "Aggregations stats:\n");
72 for (i = 0; i < 4; i++) {
73 for (j = 0; j < 8; j++)
74 seq_printf(file, "%08llx ",
75 dev->stats.aggr_n[i * 8 + j]);
76 seq_putc(file, '\n');
77 }
78
79 seq_printf(file, "recent average AMPDU len: %d\n",
80 atomic_read(&dev->avg_ampdu_len));
81
82 return 0;
83}
84
85static int
86mt76x0_ampdu_stat_open(struct inode *inode, struct file *f)
87{
88 return single_open(f, mt76x0_ampdu_stat_read, inode->i_private);
89}
90
91static const struct file_operations fops_ampdu_stat = {
92 .open = mt76x0_ampdu_stat_open,
93 .read = seq_read,
94 .llseek = seq_lseek,
95 .release = single_release,
96};
97
98static int
99mt76x0_eeprom_param_read(struct seq_file *file, void *data)
100{
101 struct mt76x0_dev *dev = file->private;
102 int i;
103
104 seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
105 seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
106 dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
107 seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
108 dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
109 dev->ee->rssi_offset_5ghz[2]);
110 seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
111 seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
112 seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
113 dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
114 dev->ee->lna_gain_5ghz[2]);
115 seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
116 seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
117 dev->ee->reg.start + dev->ee->reg.num - 1);
118
119 seq_puts(file, "Per channel power:\n");
120 for (i = 0; i < 58; i++)
121 seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
122 dev->ee->tx_pwr_per_chan[i]);
123
124 seq_puts(file, "Per rate power 2GHz:\n");
125 for (i = 0; i < 5; i++)
126 seq_printf(file, "\t %d bw20:%d bw40:%d\n",
127 i, dev->ee->tx_pwr_cfg_2g[i][0],
128 dev->ee->tx_pwr_cfg_5g[i][1]);
129
130 seq_puts(file, "Per rate power 5GHz:\n");
131 for (i = 0; i < 5; i++)
132 seq_printf(file, "\t %d bw20:%d bw40:%d\n",
133 i, dev->ee->tx_pwr_cfg_5g[i][0],
134 dev->ee->tx_pwr_cfg_5g[i][1]);
135
136 return 0;
137}
138
139static int
140mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
141{
142 return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
143}
144
145static const struct file_operations fops_eeprom_param = {
146 .open = mt76x0_eeprom_param_open,
147 .read = seq_read,
148 .llseek = seq_lseek,
149 .release = single_release,
150};
151
152void mt76x0_init_debugfs(struct mt76x0_dev *dev)
153{
154 struct dentry *dir;
155
156 dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
157 if (!dir)
158 return;
159
160 debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
161 debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
162 &fops_regval);
163 debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
164 debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
165 &fops_eeprom_param);
166}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
new file mode 100644
index 000000000000..e2efb430419b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.c
@@ -0,0 +1,522 @@
1/*
2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
3 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include "mt76x0.h"
16#include "dma.h"
17#include "usb.h"
18#include "trace.h"
19
20static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
21 struct mt76x0_dma_buf_rx *e, gfp_t gfp);
22
23static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
24{
25 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
26 unsigned int hdrlen;
27
28 if (unlikely(len < 10))
29 return 0;
30 hdrlen = ieee80211_hdrlen(hdr->frame_control);
31 if (unlikely(hdrlen > len))
32 return 0;
33 return hdrlen;
34}
35
36static struct sk_buff *
37mt76x0_rx_skb_from_seg(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
38 void *data, u32 seg_len, u32 truesize, struct page *p)
39{
40 struct sk_buff *skb;
41 u32 true_len, hdr_len = 0, copy, frag;
42
43 skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
44 if (!skb)
45 return NULL;
46
47 true_len = mt76x0_mac_process_rx(dev, skb, data, rxwi);
48 if (!true_len || true_len > seg_len)
49 goto bad_frame;
50
51 hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
52 if (!hdr_len)
53 goto bad_frame;
54
55 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
56 memcpy(skb_put(skb, hdr_len), data, hdr_len);
57
58 data += hdr_len + 2;
59 true_len -= hdr_len;
60 hdr_len = 0;
61 }
62
63 /* If not doing paged RX allocated skb will always have enough space */
64 copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
65 frag = true_len - copy;
66
67 memcpy(skb_put(skb, copy), data, copy);
68 data += copy;
69
70 if (frag) {
71 skb_add_rx_frag(skb, 0, p, data - page_address(p),
72 frag, truesize);
73 get_page(p);
74 }
75
76 return skb;
77
78bad_frame:
79 dev_err_ratelimited(dev->mt76.dev, "Error: incorrect frame len:%u hdr:%u\n",
80 true_len, hdr_len);
81 dev_kfree_skb(skb);
82 return NULL;
83}
84
85static void mt76x0_rx_process_seg(struct mt76x0_dev *dev, u8 *data,
86 u32 seg_len, struct page *p)
87{
88 struct sk_buff *skb;
89 struct mt76x0_rxwi *rxwi;
90 u32 fce_info, truesize = seg_len;
91
92 /* DMA_INFO field at the beginning of the segment contains only some of
93 * the information, we need to read the FCE descriptor from the end.
94 */
95 fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
96 seg_len -= MT_FCE_INFO_LEN;
97
98 data += MT_DMA_HDR_LEN;
99 seg_len -= MT_DMA_HDR_LEN;
100
101 rxwi = (struct mt76x0_rxwi *) data;
102 data += sizeof(struct mt76x0_rxwi);
103 seg_len -= sizeof(struct mt76x0_rxwi);
104
105 if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
106 dev_err_once(dev->mt76.dev, "Error: RX path seen a non-pkt urb\n");
107
108 trace_mt76x0_rx(&dev->mt76, rxwi, fce_info);
109
110 skb = mt76x0_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
111 if (!skb)
112 return;
113
114 spin_lock(&dev->mac_lock);
115 ieee80211_rx(dev->mt76.hw, skb);
116 spin_unlock(&dev->mac_lock);
117}
118
119static u16 mt76x0_rx_next_seg_len(u8 *data, u32 data_len)
120{
121 u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
122 sizeof(struct mt76x0_rxwi) + MT_FCE_INFO_LEN;
123 u16 dma_len = get_unaligned_le16(data);
124
125 if (data_len < min_seg_len ||
126 WARN_ON(!dma_len) ||
127 WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
128 WARN_ON(dma_len & 0x3))
129 return 0;
130
131 return MT_DMA_HDRS + dma_len;
132}
133
134static void
135mt76x0_rx_process_entry(struct mt76x0_dev *dev, struct mt76x0_dma_buf_rx *e)
136{
137 u32 seg_len, data_len = e->urb->actual_length;
138 u8 *data = page_address(e->p);
139 struct page *new_p = NULL;
140 int cnt = 0;
141
142 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
143 return;
144
145 /* Copy if there is very little data in the buffer. */
146 if (data_len > 512)
147 new_p = dev_alloc_pages(MT_RX_ORDER);
148
149 while ((seg_len = mt76x0_rx_next_seg_len(data, data_len))) {
150 mt76x0_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
151
152 data_len -= seg_len;
153 data += seg_len;
154 cnt++;
155 }
156
157 if (cnt > 1)
158 trace_mt76x0_rx_dma_aggr(&dev->mt76, cnt, !!new_p);
159
160 if (new_p) {
161 /* we have one extra ref from the allocator */
162 __free_pages(e->p, MT_RX_ORDER);
163
164 e->p = new_p;
165 }
166}
167
168static struct mt76x0_dma_buf_rx *
169mt76x0_rx_get_pending_entry(struct mt76x0_dev *dev)
170{
171 struct mt76x0_rx_queue *q = &dev->rx_q;
172 struct mt76x0_dma_buf_rx *buf = NULL;
173 unsigned long flags;
174
175 spin_lock_irqsave(&dev->rx_lock, flags);
176
177 if (!q->pending)
178 goto out;
179
180 buf = &q->e[q->start];
181 q->pending--;
182 q->start = (q->start + 1) % q->entries;
183out:
184 spin_unlock_irqrestore(&dev->rx_lock, flags);
185
186 return buf;
187}
188
189static void mt76x0_complete_rx(struct urb *urb)
190{
191 struct mt76x0_dev *dev = urb->context;
192 struct mt76x0_rx_queue *q = &dev->rx_q;
193 unsigned long flags;
194
195 spin_lock_irqsave(&dev->rx_lock, flags);
196
197 if (mt76x0_urb_has_error(urb))
198 dev_err(dev->mt76.dev, "Error: RX urb failed:%d\n", urb->status);
199 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
200 goto out;
201
202 q->end = (q->end + 1) % q->entries;
203 q->pending++;
204 tasklet_schedule(&dev->rx_tasklet);
205out:
206 spin_unlock_irqrestore(&dev->rx_lock, flags);
207}
208
209static void mt76x0_rx_tasklet(unsigned long data)
210{
211 struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
212 struct mt76x0_dma_buf_rx *e;
213
214 while ((e = mt76x0_rx_get_pending_entry(dev))) {
215 if (e->urb->status)
216 continue;
217
218 mt76x0_rx_process_entry(dev, e);
219 mt76x0_submit_rx_buf(dev, e, GFP_ATOMIC);
220 }
221}
222
223static void mt76x0_complete_tx(struct urb *urb)
224{
225 struct mt76x0_tx_queue *q = urb->context;
226 struct mt76x0_dev *dev = q->dev;
227 struct sk_buff *skb;
228 unsigned long flags;
229
230 spin_lock_irqsave(&dev->tx_lock, flags);
231
232 if (mt76x0_urb_has_error(urb))
233 dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
234 if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
235 goto out;
236
237 skb = q->e[q->start].skb;
238 trace_mt76x0_tx_dma_done(&dev->mt76, skb);
239
240 __skb_queue_tail(&dev->tx_skb_done, skb);
241 tasklet_schedule(&dev->tx_tasklet);
242
243 if (q->used == q->entries - q->entries / 8)
244 ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
245
246 q->start = (q->start + 1) % q->entries;
247 q->used--;
248out:
249 spin_unlock_irqrestore(&dev->tx_lock, flags);
250}
251
252static void mt76x0_tx_tasklet(unsigned long data)
253{
254 struct mt76x0_dev *dev = (struct mt76x0_dev *) data;
255 struct sk_buff_head skbs;
256 unsigned long flags;
257
258 __skb_queue_head_init(&skbs);
259
260 spin_lock_irqsave(&dev->tx_lock, flags);
261
262 set_bit(MT76_MORE_STATS, &dev->mt76.state);
263 if (!test_and_set_bit(MT76_READING_STATS, &dev->mt76.state))
264 queue_delayed_work(dev->stat_wq, &dev->stat_work,
265 msecs_to_jiffies(10));
266
267 skb_queue_splice_init(&dev->tx_skb_done, &skbs);
268
269 spin_unlock_irqrestore(&dev->tx_lock, flags);
270
271 while (!skb_queue_empty(&skbs)) {
272 struct sk_buff *skb = __skb_dequeue(&skbs);
273
274 mt76x0_tx_status(dev, skb);
275 }
276}
277
278static int mt76x0_dma_submit_tx(struct mt76x0_dev *dev,
279 struct sk_buff *skb, u8 ep)
280{
281 struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
282 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep]);
283 struct mt76x0_dma_buf_tx *e;
284 struct mt76x0_tx_queue *q = &dev->tx_q[ep];
285 unsigned long flags;
286 int ret;
287
288 spin_lock_irqsave(&dev->tx_lock, flags);
289
290 if (WARN_ON_ONCE(q->entries <= q->used)) {
291 ret = -ENOSPC;
292 goto out;
293 }
294
295 e = &q->e[q->end];
296 e->skb = skb;
297 usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
298 mt76x0_complete_tx, q);
299 ret = usb_submit_urb(e->urb, GFP_ATOMIC);
300 if (ret) {
301 /* Special-handle ENODEV from TX urb submission because it will
302 * often be the first ENODEV we see after device is removed.
303 */
304 if (ret == -ENODEV)
305 set_bit(MT76_REMOVED, &dev->mt76.state);
306 else
307 dev_err(dev->mt76.dev, "Error: TX urb submit failed:%d\n",
308 ret);
309 goto out;
310 }
311
312 q->end = (q->end + 1) % q->entries;
313 q->used++;
314
315 if (q->used >= q->entries)
316 ieee80211_stop_queue(dev->mt76.hw, skb_get_queue_mapping(skb));
317out:
318 spin_unlock_irqrestore(&dev->tx_lock, flags);
319
320 return ret;
321}
322
323/* Map USB endpoint number to Q id in the DMA engine */
324static enum mt76_qsel ep2dmaq(u8 ep)
325{
326 if (ep == 5)
327 return MT_QSEL_MGMT;
328 return MT_QSEL_EDCA;
329}
330
331int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
332 struct mt76_wcid *wcid, int hw_q)
333{
334 u8 ep = q2ep(hw_q);
335 u32 dma_flags;
336 int ret;
337
338 dma_flags = MT_TXD_PKT_INFO_80211;
339 if (wcid->hw_key_idx == 0xff)
340 dma_flags |= MT_TXD_PKT_INFO_WIV;
341
342 ret = mt76x0_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
343 if (ret)
344 return ret;
345
346 ret = mt76x0_dma_submit_tx(dev, skb, ep);
347
348 if (ret) {
349 ieee80211_free_txskb(dev->mt76.hw, skb);
350 return ret;
351 }
352
353 return 0;
354}
355
356static void mt76x0_kill_rx(struct mt76x0_dev *dev)
357{
358 int i;
359 unsigned long flags;
360
361 spin_lock_irqsave(&dev->rx_lock, flags);
362
363 for (i = 0; i < dev->rx_q.entries; i++) {
364 int next = dev->rx_q.end;
365
366 spin_unlock_irqrestore(&dev->rx_lock, flags);
367 usb_poison_urb(dev->rx_q.e[next].urb);
368 spin_lock_irqsave(&dev->rx_lock, flags);
369 }
370
371 spin_unlock_irqrestore(&dev->rx_lock, flags);
372}
373
374static int mt76x0_submit_rx_buf(struct mt76x0_dev *dev,
375 struct mt76x0_dma_buf_rx *e, gfp_t gfp)
376{
377 struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
378 u8 *buf = page_address(e->p);
379 unsigned pipe;
380 int ret;
381
382 pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[MT_EP_IN_PKT_RX]);
383
384 usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
385 mt76x0_complete_rx, dev);
386
387 trace_mt76x0_submit_urb(&dev->mt76, e->urb);
388 ret = usb_submit_urb(e->urb, gfp);
389 if (ret)
390 dev_err(dev->mt76.dev, "Error: submit RX URB failed:%d\n", ret);
391
392 return ret;
393}
394
395static int mt76x0_submit_rx(struct mt76x0_dev *dev)
396{
397 int i, ret;
398
399 for (i = 0; i < dev->rx_q.entries; i++) {
400 ret = mt76x0_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
401 if (ret)
402 return ret;
403 }
404
405 return 0;
406}
407
408static void mt76x0_free_rx(struct mt76x0_dev *dev)
409{
410 int i;
411
412 for (i = 0; i < dev->rx_q.entries; i++) {
413 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
414 usb_free_urb(dev->rx_q.e[i].urb);
415 }
416}
417
418static int mt76x0_alloc_rx(struct mt76x0_dev *dev)
419{
420 int i;
421
422 memset(&dev->rx_q, 0, sizeof(dev->rx_q));
423 dev->rx_q.dev = dev;
424 dev->rx_q.entries = N_RX_ENTRIES;
425
426 for (i = 0; i < N_RX_ENTRIES; i++) {
427 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
428 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
429
430 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
431 return -ENOMEM;
432 }
433
434 return 0;
435}
436
437static void mt76x0_free_tx_queue(struct mt76x0_tx_queue *q)
438{
439 int i;
440
441 WARN_ON(q->used);
442
443 for (i = 0; i < q->entries; i++) {
444 usb_poison_urb(q->e[i].urb);
445 usb_free_urb(q->e[i].urb);
446 }
447}
448
449static void mt76x0_free_tx(struct mt76x0_dev *dev)
450{
451 int i;
452
453 for (i = 0; i < __MT_EP_OUT_MAX; i++)
454 mt76x0_free_tx_queue(&dev->tx_q[i]);
455}
456
457static int mt76x0_alloc_tx_queue(struct mt76x0_dev *dev,
458 struct mt76x0_tx_queue *q)
459{
460 int i;
461
462 q->dev = dev;
463 q->entries = N_TX_ENTRIES;
464
465 for (i = 0; i < N_TX_ENTRIES; i++) {
466 q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
467 if (!q->e[i].urb)
468 return -ENOMEM;
469 }
470
471 return 0;
472}
473
474static int mt76x0_alloc_tx(struct mt76x0_dev *dev)
475{
476 int i;
477
478 dev->tx_q = devm_kcalloc(dev->mt76.dev, __MT_EP_OUT_MAX,
479 sizeof(*dev->tx_q), GFP_KERNEL);
480
481 for (i = 0; i < __MT_EP_OUT_MAX; i++)
482 if (mt76x0_alloc_tx_queue(dev, &dev->tx_q[i]))
483 return -ENOMEM;
484
485 return 0;
486}
487
488int mt76x0_dma_init(struct mt76x0_dev *dev)
489{
490 int ret = -ENOMEM;
491
492 tasklet_init(&dev->tx_tasklet, mt76x0_tx_tasklet, (unsigned long) dev);
493 tasklet_init(&dev->rx_tasklet, mt76x0_rx_tasklet, (unsigned long) dev);
494
495 ret = mt76x0_alloc_tx(dev);
496 if (ret)
497 goto err;
498 ret = mt76x0_alloc_rx(dev);
499 if (ret)
500 goto err;
501
502 ret = mt76x0_submit_rx(dev);
503 if (ret)
504 goto err;
505
506 return 0;
507err:
508 mt76x0_dma_cleanup(dev);
509 return ret;
510}
511
512void mt76x0_dma_cleanup(struct mt76x0_dev *dev)
513{
514 mt76x0_kill_rx(dev);
515
516 tasklet_kill(&dev->rx_tasklet);
517
518 mt76x0_free_rx(dev);
519 mt76x0_free_tx(dev);
520
521 tasklet_kill(&dev->tx_tasklet);
522}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
new file mode 100644
index 000000000000..891ce1c3461f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __MT76X0U_DMA_H
16#define __MT76X0U_DMA_H
17
18#include <asm/unaligned.h>
19#include <linux/skbuff.h>
20
21#define MT_DMA_HDR_LEN 4
22#define MT_RX_INFO_LEN 4
23#define MT_FCE_INFO_LEN 4
24#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
25
26/* Common Tx DMA descriptor fields */
27#define MT_TXD_INFO_LEN GENMASK(15, 0)
28#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
29#define MT_TXD_INFO_TYPE GENMASK(31, 30)
30
31/* Tx DMA MCU command specific flags */
32#define MT_TXD_CMD_SEQ GENMASK(19, 16)
33#define MT_TXD_CMD_TYPE GENMASK(26, 20)
34
35enum mt76_msg_port {
36 WLAN_PORT,
37 CPU_RX_PORT,
38 CPU_TX_PORT,
39 HOST_PORT,
40 VIRTUAL_CPU_RX_PORT,
41 VIRTUAL_CPU_TX_PORT,
42 DISCARD,
43};
44
45enum mt76_info_type {
46 DMA_PACKET,
47 DMA_COMMAND,
48};
49
50/* Tx DMA packet specific flags */
51#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
52#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
53#define MT_TXD_PKT_INFO_80211 BIT(19)
54#define MT_TXD_PKT_INFO_TSO BIT(20)
55#define MT_TXD_PKT_INFO_CSO BIT(21)
56#define MT_TXD_PKT_INFO_WIV BIT(24)
57#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
58
59enum mt76_qsel {
60 MT_QSEL_MGMT,
61 MT_QSEL_HCCA,
62 MT_QSEL_EDCA,
63 MT_QSEL_EDCA_2,
64};
65
66
67static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
68 enum mt76_msg_port d_port,
69 enum mt76_info_type type, u32 flags)
70{
71 u32 info;
72
73 /* Buffer layout:
74 * | 4B | xfer len | pad | 4B |
75 * | TXINFO | pkt/cmd | zero pad to 4B | zero |
76 *
77 * length field of TXINFO should be set to 'xfer len'.
78 */
79
80 info = flags |
81 FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
82 FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
83 FIELD_PREP(MT_TXD_INFO_TYPE, type);
84
85 put_unaligned_le32(info, skb_push(skb, sizeof(info)));
86 return skb_put_padto(skb, round_up(skb->len, 4) + 4);
87}
88
89static inline int
90mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
91{
92 flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
93 return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
94}
95
96/* Common Rx DMA descriptor fields */
97#define MT_RXD_INFO_LEN GENMASK(13, 0)
98#define MT_RXD_INFO_PCIE_INTR BIT(24)
99#define MT_RXD_INFO_QSEL GENMASK(26, 25)
100#define MT_RXD_INFO_PORT GENMASK(29, 27)
101#define MT_RXD_INFO_TYPE GENMASK(31, 30)
102
103/* Rx DMA packet specific flags */
104#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
105#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
106#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
107#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
108#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
109#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
110
111/* Rx DMA MCU command specific flags */
112#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
113#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
114#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
115
116enum mt76_evt_type {
117 CMD_DONE,
118 CMD_ERROR,
119 CMD_RETRY,
120 EVENT_PWR_RSP,
121 EVENT_WOW_RSP,
122 EVENT_CARRIER_DETECT_RSP,
123 EVENT_DFS_DETECT_RSP,
124};
125
126#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
new file mode 100644
index 000000000000..1ecd018f12b8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -0,0 +1,445 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/of.h>
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/partitions.h>
19#include <linux/etherdevice.h>
20#include <asm/unaligned.h>
21#include "mt76x0.h"
22#include "eeprom.h"
23
24static bool
25field_valid(u8 val)
26{
27 return val != 0xff;
28}
29
30static s8
31field_validate(u8 val)
32{
33 if (!field_valid(val))
34 return 0;
35
36 return val;
37}
38
39static inline int
40sign_extend(u32 val, unsigned int size)
41{
42 bool sign = val & BIT(size - 1);
43
44 val &= BIT(size - 1) - 1;
45
46 return sign ? val : -val;
47}
48
49static int
50mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
51 enum mt76x0_eeprom_access_modes mode)
52{
53 u32 val;
54 int i;
55
56 val = mt76_rr(dev, MT_EFUSE_CTRL);
57 val &= ~(MT_EFUSE_CTRL_AIN |
58 MT_EFUSE_CTRL_MODE);
59 val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
60 FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
61 MT_EFUSE_CTRL_KICK;
62 mt76_wr(dev, MT_EFUSE_CTRL, val);
63
64 if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
65 return -ETIMEDOUT;
66
67 val = mt76_rr(dev, MT_EFUSE_CTRL);
68 if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
69 /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
70 * will not return valid data but it's ok.
71 */
72 memset(data, 0xff, 16);
73 return 0;
74 }
75
76 for (i = 0; i < 4; i++) {
77 val = mt76_rr(dev, MT_EFUSE_DATA(i));
78 put_unaligned_le32(val, data + 4 * i);
79 }
80
81 return 0;
82}
83
84static int
85mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
86{
87 const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
88 u8 data[map_reads * 16];
89 int ret, i;
90 u32 start = 0, end = 0, cnt_free;
91
92 for (i = 0; i < map_reads; i++) {
93 ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
94 data + i * 16, MT_EE_PHYSICAL_READ);
95 if (ret)
96 return ret;
97 }
98
99 for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
100 if (!data[i]) {
101 if (!start)
102 start = MT_EE_USAGE_MAP_START + i;
103 end = MT_EE_USAGE_MAP_START + i;
104 }
105 cnt_free = end - start + 1;
106
107 if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
108 dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
109 return -EINVAL;
110 }
111
112 return 0;
113}
114
115static void
116mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
117{
118 enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
119 u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
120 u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
121
122 dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
123
124 switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
125 case BOARD_TYPE_5GHZ:
126 dev->ee->has_5ghz = true;
127 break;
128 case BOARD_TYPE_2GHZ:
129 dev->ee->has_2ghz = true;
130 break;
131 default:
132 dev->ee->has_2ghz = true;
133 dev->ee->has_5ghz = true;
134 break;
135 }
136
137 dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n", dev->ee->has_2ghz, dev->ee->has_5ghz);
138
139 if (!field_valid(nic_conf1 & 0xff))
140 nic_conf1 &= 0xff00;
141
142 if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
143 dev_err(dev->mt76.dev,
144 "Error: this driver does not support HW RF ctrl\n");
145
146 if (!field_valid(nic_conf0 >> 8))
147 return;
148
149 if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
150 FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
151 dev_err(dev->mt76.dev,
152 "Error: device has more than 1 RX/TX stream!\n");
153
154 dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
155 dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
156}
157
158static int
159mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
160{
161 const void *src = eeprom + MT_EE_MAC_ADDR;
162
163 ether_addr_copy(dev->macaddr, src);
164
165 if (!is_valid_ether_addr(dev->macaddr)) {
166 eth_random_addr(dev->macaddr);
167 dev_info(dev->mt76.dev,
168 "Invalid MAC address, using random address %pM\n",
169 dev->macaddr);
170 }
171
172 mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
173 mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
174 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
175
176 return 0;
177}
178
179static void
180mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
181{
182 u8 temp = eeprom[MT_EE_TEMP_OFFSET];
183
184 if (field_valid(temp))
185 dev->ee->temp_off = sign_extend(temp, 8);
186 else
187 dev->ee->temp_off = -10;
188}
189
190static void
191mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
192{
193 /* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
194 * - comments in rtmp_def.h are incorrect (see rt_channel.c)
195 */
196 static const struct reg_channel_bounds chan_bounds[] = {
197 /* EEPROM country regions 0 - 7 */
198 { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
199 { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
200 /* EEPROM country regions 32 - 33 */
201 { 1, 11 }, { 1, 14 }
202 };
203 u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
204 int idx = -1;
205
206 dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
207 if (val < 8)
208 idx = val;
209 if (val > 31 && val < 33)
210 idx = val - 32 + 8;
211
212 if (idx != -1)
213 dev_info(dev->mt76.dev,
214 "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
215 val, chan_bounds[idx].start,
216 chan_bounds[idx].start + chan_bounds[idx].num - 1);
217 else
218 idx = 5; /* channels 1 - 14 */
219
220 dev->ee->reg = chan_bounds[idx];
221
222 /* TODO: country region 33 is special - phy should be set to B-mode
223 * before entering channel 14 (see sta/connect.c)
224 */
225}
226
227static void
228mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
229{
230 u8 comp;
231
232 dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
233 comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
234
235 if (comp & BIT(7))
236 dev->ee->rf_freq_off -= comp & 0x7f;
237 else
238 dev->ee->rf_freq_off += comp;
239}
240
241static void
242mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
243{
244 s8 gain;
245
246 dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
247 dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
248
249 gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
250 if (gain == 0xff || gain == 0)
251 dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
252 else
253 dev->ee->lna_gain_5ghz[1] = gain;
254
255 gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
256 if (gain == 0xff || gain == 0)
257 dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
258 else
259 dev->ee->lna_gain_5ghz[2] = gain;
260}
261
262static void
263mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
264{
265 int i;
266 s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
267
268 for (i = 0; i < 2; i++) {
269 rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
270
271 if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
272 dev_warn(dev->mt76.dev,
273 "Warning: EEPROM RSSI is invalid %02hhx\n",
274 rssi_offset[i]);
275 rssi_offset[i] = 0;
276 }
277 }
278
279 rssi_offset = dev->ee->rssi_offset_5ghz;
280
281 for (i = 0; i < 3; i++) {
282 rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
283
284 if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
285 dev_warn(dev->mt76.dev,
286 "Warning: EEPROM RSSI is invalid %02hhx\n",
287 rssi_offset[i]);
288 rssi_offset[i] = 0;
289 }
290 }
291}
292
293static u32
294calc_bw40_power_rate(u32 value, int delta)
295{
296 u32 ret = 0;
297 int i, tmp;
298
299 for (i = 0; i < 4; i++) {
300 tmp = s6_to_int((value >> i*8) & 0xff) + delta;
301 ret |= (u32)(int_to_s6(tmp)) << i*8;
302 }
303
304 return ret;
305}
306
307static s8
308get_delta(u8 val)
309{
310 s8 ret;
311
312 if (!field_valid(val) || !(val & BIT(7)))
313 return 0;
314
315 ret = val & 0x1f;
316 if (ret > 8)
317 ret = 8;
318 if (val & BIT(6))
319 ret = -ret;
320
321 return ret;
322}
323
324static void
325mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
326{
327 s8 bw40_delta_2g, bw40_delta_5g;
328 u32 val;
329 int i;
330
331 bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
332 bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
333
334 for (i = 0; i < 5; i++) {
335 val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
336
337 /* Skip last 16 bits. */
338 if (i == 4)
339 val &= 0x0000ffff;
340
341 dev->ee->tx_pwr_cfg_2g[i][0] = val;
342 dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
343 }
344
345 /* Reading per rate tx power for 5 GHz band is a bit more complex. Note
346 * we mix 16 bit and 32 bit reads and sometimes do shifts.
347 */
348 val = get_unaligned_le16(eeprom + 0x120);
349 val <<= 16;
350 dev->ee->tx_pwr_cfg_5g[0][0] = val;
351 dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
352
353 val = get_unaligned_le32(eeprom + 0x122);
354 dev->ee->tx_pwr_cfg_5g[1][0] = val;
355 dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
356
357 val = get_unaligned_le16(eeprom + 0x126);
358 dev->ee->tx_pwr_cfg_5g[2][0] = val;
359 dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
360
361 val = get_unaligned_le16(eeprom + 0xec);
362 val <<= 16;
363 dev->ee->tx_pwr_cfg_5g[3][0] = val;
364 dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
365
366 val = get_unaligned_le16(eeprom + 0xee);
367 dev->ee->tx_pwr_cfg_5g[4][0] = val;
368 dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
369}
370
371static void
372mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
373{
374 int i;
375 u8 tx_pwr;
376
377 for (i = 0; i < 14; i++) {
378 tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
379 if (tx_pwr <= 0x3f && tx_pwr > 0)
380 dev->ee->tx_pwr_per_chan[i] = tx_pwr;
381 else
382 dev->ee->tx_pwr_per_chan[i] = 5;
383 }
384
385 for (i = 0; i < 40; i++) {
386 tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
387 if (tx_pwr <= 0x3f && tx_pwr > 0)
388 dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
389 else
390 dev->ee->tx_pwr_per_chan[14 + i] = 5;
391 }
392
393 dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
394 dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
395 dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
396 dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
397}
398
399int
400mt76x0_eeprom_init(struct mt76x0_dev *dev)
401{
402 u8 *eeprom;
403 int i, ret;
404
405 ret = mt76x0_efuse_physical_size_check(dev);
406 if (ret)
407 return ret;
408
409 dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
410 if (!dev->ee)
411 return -ENOMEM;
412
413 eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
414 if (!eeprom)
415 return -ENOMEM;
416
417 for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
418 ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
419 if (ret)
420 goto out;
421 }
422
423 if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
424 dev_warn(dev->mt76.dev,
425 "Warning: unsupported EEPROM version %02hhx\n",
426 eeprom[MT_EE_VERSION_EE]);
427 dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
428 eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
429
430 mt76x0_set_macaddr(dev, eeprom);
431 mt76x0_set_chip_cap(dev, eeprom);
432 mt76x0_set_country_reg(dev, eeprom);
433 mt76x0_set_rf_freq_off(dev, eeprom);
434 mt76x0_set_temp_offset(dev, eeprom);
435 mt76x0_set_lna_gain(dev, eeprom);
436 mt76x0_set_rssi_offset(dev, eeprom);
437 dev->chainmask = 0x0101;
438
439 mt76x0_set_tx_power_per_rate(dev, eeprom);
440 mt76x0_set_tx_power_per_chan(dev, eeprom);
441
442out:
443 kfree(eeprom);
444 return ret;
445}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
new file mode 100644
index 000000000000..e37b573aed7b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -0,0 +1,149 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __MT76X0U_EEPROM_H
17#define __MT76X0U_EEPROM_H
18
19struct mt76x0_dev;
20
21#define MT76X0U_EE_MAX_VER 0x0c
22#define MT76X0_EEPROM_SIZE 512
23
24#define MT76X0U_DEFAULT_TX_POWER 6
25
26enum mt76_eeprom_field {
27 MT_EE_CHIP_ID = 0x00,
28 MT_EE_VERSION_FAE = 0x02,
29 MT_EE_VERSION_EE = 0x03,
30 MT_EE_MAC_ADDR = 0x04,
31 MT_EE_NIC_CONF_0 = 0x34,
32 MT_EE_NIC_CONF_1 = 0x36,
33 MT_EE_COUNTRY_REGION_5GHZ = 0x38,
34 MT_EE_COUNTRY_REGION_2GHZ = 0x39,
35 MT_EE_FREQ_OFFSET = 0x3a,
36 MT_EE_NIC_CONF_2 = 0x42,
37
38 MT_EE_LNA_GAIN_2GHZ = 0x44,
39 MT_EE_LNA_GAIN_5GHZ_0 = 0x45,
40 MT_EE_RSSI_OFFSET = 0x46,
41 MT_EE_RSSI_OFFSET_5GHZ = 0x4a,
42 MT_EE_LNA_GAIN_5GHZ_1 = 0x49,
43 MT_EE_LNA_GAIN_5GHZ_2 = 0x4d,
44
45 MT_EE_TX_POWER_DELTA_BW40 = 0x50,
46
47 MT_EE_TX_POWER_OFFSET_2GHZ = 0x52,
48
49 MT_EE_TX_TSSI_SLOPE = 0x6e,
50 MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
51 MT_EE_TX_TSSI_OFFSET = 0x76,
52
53 MT_EE_TX_POWER_OFFSET_5GHZ = 0x78,
54
55 MT_EE_TEMP_OFFSET = 0xd1,
56 MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
57 MT_EE_TX_POWER_BYRATE_BASE = 0xde,
58
59 MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120,
60
61 MT_EE_USAGE_MAP_START = 0x1e0,
62 MT_EE_USAGE_MAP_END = 0x1fc,
63};
64
65#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
66#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
67#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
68#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
69
70#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
71#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
72#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
73#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
74#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
75
76#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
77#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
78#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
79#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
80#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
81#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
82
83#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
84 (i) * 4)
85
86#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
87 MT_EE_USAGE_MAP_START + 1)
88
89enum mt76x0_eeprom_access_modes {
90 MT_EE_READ = 0,
91 MT_EE_PHYSICAL_READ = 1,
92};
93
94struct reg_channel_bounds {
95 u8 start;
96 u8 num;
97};
98
99struct mt76x0_eeprom_params {
100 u8 rf_freq_off;
101 s16 temp_off;
102 s8 rssi_offset_2ghz[2];
103 s8 rssi_offset_5ghz[3];
104 s8 lna_gain_2ghz;
105 s8 lna_gain_5ghz[3];
106 u8 pa_type;
107
108 /* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
109 u32 tx_pwr_cfg_2g[5][2];
110 u32 tx_pwr_cfg_5g[5][2];
111
112 u8 tx_pwr_per_chan[58];
113
114 struct reg_channel_bounds reg;
115
116 bool has_2ghz;
117 bool has_5ghz;
118};
119
120int mt76x0_eeprom_init(struct mt76x0_dev *dev);
121
122static inline u32 s6_validate(u32 reg)
123{
124 WARN_ON(reg & ~GENMASK(5, 0));
125 return reg & GENMASK(5, 0);
126}
127
128static inline int s6_to_int(u32 reg)
129{
130 int s6;
131
132 s6 = s6_validate(reg);
133 if (s6 & BIT(5))
134 s6 -= BIT(6);
135
136 return s6;
137}
138
139static inline u32 int_to_s6(int val)
140{
141 if (val < -0x20)
142 return 0x20;
143 if (val > 0x1f)
144 return 0x1f;
145
146 return val & 0x3f;
147}
148
149#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
new file mode 100644
index 000000000000..7cdb3e740522
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -0,0 +1,720 @@
1/*
2 * (c) Copyright 2002-2010, Ralink Technology, Inc.
3 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
5 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "mt76x0.h"
18#include "eeprom.h"
19#include "trace.h"
20#include "mcu.h"
21#include "usb.h"
22
23#include "initvals.h"
24
25static void
26mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
27{
28 int i;
29
30 /* Note: we don't turn off WLAN_CLK because that makes the device
31 * not respond properly on the probe path.
32 * In case anyone (PSM?) wants to use this function we can
33 * bring the clock stuff back and fixup the probe path.
34 */
35
36 if (enable)
37 val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
38 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
39 else
40 val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
41
42 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
43 udelay(20);
44
45 if (!enable)
46 return;
47
48 for (i = 200; i; i--) {
49 val = mt76_rr(dev, MT_CMB_CTRL);
50
51 if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
52 break;
53
54 udelay(20);
55 }
56
57 /* Note: vendor driver tries to disable/enable wlan here and retry
58 * but the code which does it is so buggy it must have never
59 * triggered, so don't bother.
60 */
61 if (!i)
62 dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
63}
64
65void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
66{
67 u32 val;
68
69 mutex_lock(&dev->hw_atomic_mutex);
70
71 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
72
73 if (reset) {
74 val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
75 val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
76
77 if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
78 val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
79 MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
80 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
81 udelay(20);
82
83 val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
84 MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
85 }
86 }
87
88 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
89 udelay(20);
90
91 mt76x0_set_wlan_state(dev, val, enable);
92
93 mutex_unlock(&dev->hw_atomic_mutex);
94}
95
96static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
97{
98 u32 val;
99
100 val = mt76_rr(dev, MT_PBF_SYS_CTRL);
101 val &= ~0x2000;
102 mt76_wr(dev, MT_PBF_SYS_CTRL, val);
103
104 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
105 MT_MAC_SYS_CTRL_RESET_BBP);
106
107 msleep(200);
108}
109
110static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
111{
112 u32 val;
113
114 val = mt76_rr(dev, MT_USB_DMA_CFG);
115
116 val |= FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
117 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
118 MT_USB_DMA_CFG_RX_BULK_EN |
119 MT_USB_DMA_CFG_TX_BULK_EN;
120 if (dev->in_max_packet == 512)
121 val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
122 mt76_wr(dev, MT_USB_DMA_CFG, val);
123
124 val = mt76_rr(dev, MT_COM_REG0);
125 if (val & 1)
126 dev_dbg(dev->mt76.dev, "MCU not ready\n");
127
128 val = mt76_rr(dev, MT_USB_DMA_CFG);
129
130 val |= MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
131 mt76_wr(dev, MT_USB_DMA_CFG, val);
132 val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PADDING;
133 mt76_wr(dev, MT_USB_DMA_CFG, val);
134}
135
136#define RANDOM_WRITE(dev, tab) \
137 mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, tab, ARRAY_SIZE(tab));
138
139static int mt76x0_init_bbp(struct mt76x0_dev *dev)
140{
141 int ret, i;
142
143 ret = mt76x0_wait_bbp_ready(dev);
144 if (ret)
145 return ret;
146
147 RANDOM_WRITE(dev, mt76x0_bbp_init_tab);
148
149 for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
150 const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
151 const struct mt76_reg_pair *pair = &item->reg_pair;
152
153 if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
154 mt76_wr(dev, pair->reg, pair->value);
155 }
156
157 RANDOM_WRITE(dev, mt76x0_dcoc_tab);
158
159 return 0;
160}
161
162static void
163mt76_init_beacon_offsets(struct mt76x0_dev *dev)
164{
165 u16 base = MT_BEACON_BASE;
166 u32 regs[4] = {};
167 int i;
168
169 for (i = 0; i < 16; i++) {
170 u16 addr = dev->beacon_offsets[i];
171
172 regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
173 }
174
175 for (i = 0; i < 4; i++)
176 mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
177}
178
179static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
180{
181 u32 reg;
182
183 RANDOM_WRITE(dev, common_mac_reg_table);
184
185 mt76_init_beacon_offsets(dev);
186
187 /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
188 RANDOM_WRITE(dev, mt76x0_mac_reg_table);
189
190 /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
191 reg = mt76_rr(dev, MT_MAC_SYS_CTRL);
192 reg &= ~0x3;
193 mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
194
195 if (is_mt7610e(dev)) {
196 /* Disable COEX_EN */
197 reg = mt76_rr(dev, MT_COEXCFG0);
198 reg &= 0xFFFFFFFE;
199 mt76_wr(dev, MT_COEXCFG0, reg);
200 }
201
202 /* Set 0x141C[15:12]=0xF */
203 reg = mt76_rr(dev, MT_EXT_CCA_CFG);
204 reg |= 0x0000F000;
205 mt76_wr(dev, MT_EXT_CCA_CFG, reg);
206
207 mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
208
209 /*
210 TxRing 9 is for Mgmt frame.
211 TxRing 8 is for In-band command frame.
212 WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9.
213 WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8.
214 */
215 reg = mt76_rr(dev, MT_WMM_CTRL);
216 reg &= ~0x000003FF;
217 reg |= 0x00000201;
218 mt76_wr(dev, MT_WMM_CTRL, reg);
219
220 /* TODO: Probably not needed */
221 mt76_wr(dev, 0x7028, 0);
222 mt76_wr(dev, 0x7010, 0);
223 mt76_wr(dev, 0x7024, 0);
224 msleep(10);
225}
226
227static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
228{
229 u32 *vals;
230 int i, ret;
231
232 vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
233 if (!vals)
234 return -ENOMEM;
235
236 for (i = 0; i < N_WCIDS; i++) {
237 vals[i * 2] = 0xffffffff;
238 vals[i * 2 + 1] = 0x00ffffff;
239 }
240
241 ret = mt76x0_burst_write_regs(dev, MT_WCID_ADDR_BASE,
242 vals, N_WCIDS * 2);
243 kfree(vals);
244
245 return ret;
246}
247
248static int mt76x0_init_key_mem(struct mt76x0_dev *dev)
249{
250 u32 vals[4] = {};
251
252 return mt76x0_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
253 vals, ARRAY_SIZE(vals));
254}
255
256static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
257{
258 u32 *vals;
259 int i, ret;
260
261 vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
262 if (!vals)
263 return -ENOMEM;
264
265 for (i = 0; i < N_WCIDS * 2; i++)
266 vals[i] = 1;
267
268 ret = mt76x0_burst_write_regs(dev, MT_WCID_ATTR_BASE,
269 vals, N_WCIDS * 2);
270 kfree(vals);
271
272 return ret;
273}
274
275static void mt76x0_reset_counters(struct mt76x0_dev *dev)
276{
277 mt76_rr(dev, MT_RX_STA_CNT0);
278 mt76_rr(dev, MT_RX_STA_CNT1);
279 mt76_rr(dev, MT_RX_STA_CNT2);
280 mt76_rr(dev, MT_TX_STA_CNT0);
281 mt76_rr(dev, MT_TX_STA_CNT1);
282 mt76_rr(dev, MT_TX_STA_CNT2);
283}
284
285int mt76x0_mac_start(struct mt76x0_dev *dev)
286{
287 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
288
289 if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
290 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
291 return -ETIMEDOUT;
292
293 dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
294 MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
295 MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
296 MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
297 MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
298 MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
299 MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
300 mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
301
302 mt76_wr(dev, MT_MAC_SYS_CTRL,
303 MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
304
305 if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
306 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
307 return -ETIMEDOUT;
308
309 return 0;
310}
311
312static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
313{
314 int i, ok;
315
316 if (test_bit(MT76_REMOVED, &dev->mt76.state))
317 return;
318
319 mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
320 MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
321 MT_BEACON_TIME_CFG_BEACON_TX);
322
323 if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
324 dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
325
326 /* Page count on TxQ */
327 i = 200;
328 while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
329 (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
330 (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
331 msleep(10);
332
333 if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
334 dev_warn(dev->mt76.dev, "Warning: MAC TX did not stop!\n");
335
336 mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
337 MT_MAC_SYS_CTRL_ENABLE_TX);
338
339 /* Page count on RxQ */
340 ok = 0;
341 i = 200;
342 while (i--) {
343 if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
344 !mt76_rr(dev, 0x0a30) &&
345 !mt76_rr(dev, 0x0a34)) {
346 if (ok++ > 5)
347 break;
348 continue;
349 }
350 msleep(1);
351 }
352
353 if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
354 dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
355
356 if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
357 dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
358}
359
360void mt76x0_mac_stop(struct mt76x0_dev *dev)
361{
362 mt76x0_mac_stop_hw(dev);
363 flush_delayed_work(&dev->stat_work);
364 cancel_delayed_work_sync(&dev->stat_work);
365}
366
367static void mt76x0_stop_hardware(struct mt76x0_dev *dev)
368{
369 mt76x0_chip_onoff(dev, false, false);
370}
371
372int mt76x0_init_hardware(struct mt76x0_dev *dev)
373{
374 static const u16 beacon_offsets[16] = {
375 /* 512 byte per beacon */
376 0xc000, 0xc200, 0xc400, 0xc600,
377 0xc800, 0xca00, 0xcc00, 0xce00,
378 0xd000, 0xd200, 0xd400, 0xd600,
379 0xd800, 0xda00, 0xdc00, 0xde00
380 };
381 int ret;
382
383 dev->beacon_offsets = beacon_offsets;
384
385 mt76x0_chip_onoff(dev, true, true);
386
387 ret = mt76x0_wait_asic_ready(dev);
388 if (ret)
389 goto err;
390 ret = mt76x0_mcu_init(dev);
391 if (ret)
392 goto err;
393
394 if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
395 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
396 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
397 ret = -EIO;
398 goto err;
399 }
400
401 /* Wait for ASIC ready after FW load. */
402 ret = mt76x0_wait_asic_ready(dev);
403 if (ret)
404 goto err;
405
406 mt76x0_reset_csr_bbp(dev);
407 mt76x0_init_usb_dma(dev);
408
409 mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
410 mt76_wr(dev, MT_TSO_CTRL, 0x0);
411
412 ret = mt76x0_mcu_cmd_init(dev);
413 if (ret)
414 goto err;
415 ret = mt76x0_dma_init(dev);
416 if (ret)
417 goto err_mcu;
418
419 mt76x0_init_mac_registers(dev);
420
421 if (!mt76_poll_msec(dev, MT_MAC_STATUS,
422 MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000)) {
423 ret = -EIO;
424 goto err_rx;
425 }
426
427 ret = mt76x0_init_bbp(dev);
428 if (ret)
429 goto err_rx;
430
431 ret = mt76x0_init_wcid_mem(dev);
432 if (ret)
433 goto err_rx;
434 ret = mt76x0_init_key_mem(dev);
435 if (ret)
436 goto err_rx;
437 ret = mt76x0_init_wcid_attr_mem(dev);
438 if (ret)
439 goto err_rx;
440
441 mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
442 MT_BEACON_TIME_CFG_SYNC_MODE |
443 MT_BEACON_TIME_CFG_TBTT_EN |
444 MT_BEACON_TIME_CFG_BEACON_TX));
445
446 mt76x0_reset_counters(dev);
447
448 mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
449
450 mt76_wr(dev, MT_TXOP_CTRL_CFG,
451 FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
452 FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
453
454 ret = mt76x0_eeprom_init(dev);
455 if (ret)
456 goto err_rx;
457
458 mt76x0_phy_init(dev);
459 return 0;
460
461err_rx:
462 mt76x0_dma_cleanup(dev);
463err_mcu:
464 mt76x0_mcu_cmd_deinit(dev);
465err:
466 mt76x0_chip_onoff(dev, false, false);
467 return ret;
468}
469
470void mt76x0_cleanup(struct mt76x0_dev *dev)
471{
472 if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
473 return;
474
475 mt76x0_stop_hardware(dev);
476 mt76x0_dma_cleanup(dev);
477 mt76x0_mcu_cmd_deinit(dev);
478}
479
480struct mt76x0_dev *mt76x0_alloc_device(struct device *pdev)
481{
482 struct ieee80211_hw *hw;
483 struct mt76x0_dev *dev;
484
485 hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x0_ops);
486 if (!hw)
487 return NULL;
488
489 dev = hw->priv;
490 dev->mt76.dev = pdev;
491 dev->mt76.hw = hw;
492 mutex_init(&dev->usb_ctrl_mtx);
493 mutex_init(&dev->reg_atomic_mutex);
494 mutex_init(&dev->hw_atomic_mutex);
495 mutex_init(&dev->mutex);
496 spin_lock_init(&dev->tx_lock);
497 spin_lock_init(&dev->rx_lock);
498 spin_lock_init(&dev->mt76.lock);
499 spin_lock_init(&dev->mac_lock);
500 spin_lock_init(&dev->con_mon_lock);
501 atomic_set(&dev->avg_ampdu_len, 1);
502 skb_queue_head_init(&dev->tx_skb_done);
503
504 dev->stat_wq = alloc_workqueue("mt76x0", WQ_UNBOUND, 0);
505 if (!dev->stat_wq) {
506 ieee80211_free_hw(hw);
507 return NULL;
508 }
509
510 return dev;
511}
512
513#define CHAN2G(_idx, _freq) { \
514 .band = NL80211_BAND_2GHZ, \
515 .center_freq = (_freq), \
516 .hw_value = (_idx), \
517 .max_power = 30, \
518}
519
520static const struct ieee80211_channel mt76_channels_2ghz[] = {
521 CHAN2G(1, 2412),
522 CHAN2G(2, 2417),
523 CHAN2G(3, 2422),
524 CHAN2G(4, 2427),
525 CHAN2G(5, 2432),
526 CHAN2G(6, 2437),
527 CHAN2G(7, 2442),
528 CHAN2G(8, 2447),
529 CHAN2G(9, 2452),
530 CHAN2G(10, 2457),
531 CHAN2G(11, 2462),
532 CHAN2G(12, 2467),
533 CHAN2G(13, 2472),
534 CHAN2G(14, 2484),
535};
536
537#define CHAN5G(_idx, _freq) { \
538 .band = NL80211_BAND_5GHZ, \
539 .center_freq = (_freq), \
540 .hw_value = (_idx), \
541 .max_power = 30, \
542}
543
544static const struct ieee80211_channel mt76_channels_5ghz[] = {
545 CHAN5G(36, 5180),
546 CHAN5G(40, 5200),
547 CHAN5G(44, 5220),
548 CHAN5G(46, 5230),
549 CHAN5G(48, 5240),
550 CHAN5G(52, 5260),
551 CHAN5G(56, 5280),
552 CHAN5G(60, 5300),
553 CHAN5G(64, 5320),
554
555 CHAN5G(100, 5500),
556 CHAN5G(104, 5520),
557 CHAN5G(108, 5540),
558 CHAN5G(112, 5560),
559 CHAN5G(116, 5580),
560 CHAN5G(120, 5600),
561 CHAN5G(124, 5620),
562 CHAN5G(128, 5640),
563 CHAN5G(132, 5660),
564 CHAN5G(136, 5680),
565 CHAN5G(140, 5700),
566};
567
568#define CCK_RATE(_idx, _rate) { \
569 .bitrate = _rate, \
570 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
571 .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
572 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
573}
574
575#define OFDM_RATE(_idx, _rate) { \
576 .bitrate = _rate, \
577 .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
578 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
579}
580
581static struct ieee80211_rate mt76_rates[] = {
582 CCK_RATE(0, 10),
583 CCK_RATE(1, 20),
584 CCK_RATE(2, 55),
585 CCK_RATE(3, 110),
586 OFDM_RATE(0, 60),
587 OFDM_RATE(1, 90),
588 OFDM_RATE(2, 120),
589 OFDM_RATE(3, 180),
590 OFDM_RATE(4, 240),
591 OFDM_RATE(5, 360),
592 OFDM_RATE(6, 480),
593 OFDM_RATE(7, 540),
594};
595
596static int
597mt76_init_sband(struct mt76x0_dev *dev, struct ieee80211_supported_band *sband,
598 const struct ieee80211_channel *chan, int n_chan,
599 struct ieee80211_rate *rates, int n_rates)
600{
601 struct ieee80211_sta_ht_cap *ht_cap;
602 void *chanlist;
603 int size;
604
605 size = n_chan * sizeof(*chan);
606 chanlist = devm_kmemdup(dev->mt76.dev, chan, size, GFP_KERNEL);
607 if (!chanlist)
608 return -ENOMEM;
609
610 sband->channels = chanlist;
611 sband->n_channels = n_chan;
612 sband->bitrates = rates;
613 sband->n_bitrates = n_rates;
614
615 ht_cap = &sband->ht_cap;
616 ht_cap->ht_supported = true;
617 ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
618 IEEE80211_HT_CAP_GRN_FLD |
619 IEEE80211_HT_CAP_SGI_20 |
620 IEEE80211_HT_CAP_SGI_40 |
621 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
622
623 ht_cap->mcs.rx_mask[0] = 0xff;
624 ht_cap->mcs.rx_mask[4] = 0x1;
625 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
626 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
627 ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
628
629 return 0;
630}
631
632static int
633mt76_init_sband_2g(struct mt76x0_dev *dev)
634{
635 dev->mt76.hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->mt76.sband_2g.sband;
636
637 WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
638 ARRAY_SIZE(mt76_channels_2ghz));
639
640
641 return mt76_init_sband(dev, &dev->mt76.sband_2g.sband,
642 mt76_channels_2ghz, ARRAY_SIZE(mt76_channels_2ghz),
643 mt76_rates, ARRAY_SIZE(mt76_rates));
644}
645
646static int
647mt76_init_sband_5g(struct mt76x0_dev *dev)
648{
649 dev->mt76.hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->mt76.sband_5g.sband;
650
651 return mt76_init_sband(dev, &dev->mt76.sband_5g.sband,
652 mt76_channels_5ghz, ARRAY_SIZE(mt76_channels_5ghz),
653 mt76_rates + 4, ARRAY_SIZE(mt76_rates) - 4);
654}
655
656
657int mt76x0_register_device(struct mt76x0_dev *dev)
658{
659 struct ieee80211_hw *hw = dev->mt76.hw;
660 struct wiphy *wiphy = hw->wiphy;
661 int ret;
662
663 /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
664 * entry no. 1 like it does in the vendor driver.
665 */
666 dev->wcid_mask[0] |= 1;
667
668 /* init fake wcid for monitor interfaces */
669 dev->mon_wcid = devm_kmalloc(dev->mt76.dev, sizeof(*dev->mon_wcid),
670 GFP_KERNEL);
671 if (!dev->mon_wcid)
672 return -ENOMEM;
673 dev->mon_wcid->idx = 0xff;
674 dev->mon_wcid->hw_key_idx = -1;
675
676 SET_IEEE80211_DEV(hw, dev->mt76.dev);
677
678 hw->queues = 4;
679 ieee80211_hw_set(hw, SIGNAL_DBM);
680 ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
681 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
682 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
683 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
684 hw->max_rates = 1;
685 hw->max_report_rates = 7;
686 hw->max_rate_tries = 1;
687
688 hw->sta_data_size = sizeof(struct mt76_sta);
689 hw->vif_data_size = sizeof(struct mt76_vif);
690
691 SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
692
693 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
694 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
695
696 if (dev->ee->has_2ghz) {
697 ret = mt76_init_sband_2g(dev);
698 if (ret)
699 return ret;
700 }
701
702 if (dev->ee->has_5ghz) {
703 ret = mt76_init_sband_5g(dev);
704 if (ret)
705 return ret;
706 }
707
708 dev->mt76.chandef.chan = &dev->mt76.sband_2g.sband.channels[0];
709
710 INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
711 INIT_DELAYED_WORK(&dev->stat_work, mt76x0_tx_stat);
712
713 ret = ieee80211_register_hw(hw);
714 if (ret)
715 return ret;
716
717 mt76x0_init_debugfs(dev);
718
719 return 0;
720}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
new file mode 100644
index 000000000000..24afcfd94b4e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -0,0 +1,282 @@
1/*
2 * (c) Copyright 2002-2010, Ralink Technology, Inc.
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __MT76X0U_INITVALS_H
17#define __MT76X0U_INITVALS_H
18
19#include "phy.h"
20
21static const struct mt76_reg_pair common_mac_reg_table[] = {
22#if 1
23 {MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
24 {MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
25#endif
26
27 {MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/
28 {MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
29 {MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/
30 {MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */
31 {MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */
32 /*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */
33 {MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */
34 {MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */
35 {MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */
36 /*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
37 {MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
38 {MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
39 {MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/
40
41 {MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/},
42 {MT_PBF_RX_MAX_PCNT, 0x9f},
43
44 /*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/
45/* WMM_ACM_SUPPORT */
46/* {TX_RTY_CFG, 0x6bb80101}, sample*/
47 {MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
48
49 {MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
50 {MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
51 {MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
52 {MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/
53 {MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
54 {MT_WPDMA_GLO_CFG, 0x00000030},
55 {MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/
56 {MT_GF40_PROT_CFG, 0x03F44084},
57 {MT_MM20_PROT_CFG, 0x01744004},
58 {MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/
59 {MT_TX_RTS_CFG, 0x00092b20},
60
61 {MT_EXP_ACK_TIME, 0x002400ca}, /* default value */
62 {MT_TXOP_HLDR_ET, 0x00000002},
63
64 /* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
65 is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
66 and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
67 will always lost. So we change the SIFS of CCK from 10us to 16us. */
68 {MT_XIFS_TIME_CFG, 0x33a41010},
69 {MT_PWR_PIN_CFG, 0x00000000},
70};
71
72static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
73 /* {MT_IOCFG_6, 0xA0040080 }, */
74 {MT_PBF_SYS_CTRL, 0x00080c00 },
75 {MT_PBF_CFG, 0x77723c1f },
76 {MT_FCE_PSE_CTRL, 0x00000001 },
77
78 {MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 },
79
80 /* Delay bb_tx_pe for proper tx_mcs_pwr update */
81 {MT_TX_SW_CFG0, 0x00000601 },
82
83 /* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
84 {MT_TX_SW_CFG1, 0x00040000 },
85 {MT_TX_SW_CFG2, 0x00000000 },
86
87 /* disable Tx info report */
88 {0xa44, 0x0000000 },
89
90 {MT_HEADER_TRANS_CTRL_REG, 0x0},
91 {MT_TSO_CTRL, 0x0},
92
93 /* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
94 {MT_BB_PA_MODE_CFG1, 0x00500055},
95
96 /* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
97 {MT_RF_PA_MODE_CFG1, 0x00500055},
98
99 {MT_TX_ALC_CFG_0, 0x2F2F000C},
100 {MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */
101
102 {MT_TX_PWR_CFG_0, 0x3A3A3A3A},
103 {MT_TX_PWR_CFG_1, 0x3A3A3A3A},
104 {MT_TX_PWR_CFG_2, 0x3A3A3A3A},
105 {MT_TX_PWR_CFG_3, 0x3A3A3A3A},
106 {MT_TX_PWR_CFG_4, 0x3A3A3A3A},
107 {MT_TX_PWR_CFG_7, 0x3A3A3A3A},
108 {MT_TX_PWR_CFG_8, 0x3A},
109 {MT_TX_PWR_CFG_9, 0x3A},
110 /* Enable Tx length > 4095 byte */
111 {0x150C, 0x00000002},
112
113 /* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
114 {0x1238, 0x001700C8},
115 /* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
116 /* {MT_LDO_CTRL_0, 0x00A647B6}, */
117
118 /* Default LDO_DIG supply 1.26V, change to 1.2V */
119 {MT_LDO_CTRL_1, 0x6B006464 },
120/*
121 {MT_HT_BASIC_RATE, 0x00004003 },
122 {MT_HT_CTRL_CFG, 0x000001FF },
123*/
124};
125
126
127static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
128 {MT_BBP(CORE, 1), 0x00000002},
129 {MT_BBP(CORE, 4), 0x00000000},
130 {MT_BBP(CORE, 24), 0x00000000},
131 {MT_BBP(CORE, 32), 0x4003000a},
132 {MT_BBP(CORE, 42), 0x00000000},
133 {MT_BBP(CORE, 44), 0x00000000},
134
135 {MT_BBP(IBI, 11), 0x00000080},
136
137 /*
138 0x2300[5] Default Antenna:
139 0 for WIFI main antenna
140 1 for WIFI aux antenna
141
142 */
143 {MT_BBP(AGC, 0), 0x00021400},
144 {MT_BBP(AGC, 1), 0x00000003},
145 {MT_BBP(AGC, 2), 0x003A6464},
146 {MT_BBP(AGC, 15), 0x88A28CB8},
147 {MT_BBP(AGC, 22), 0x00001E21},
148 {MT_BBP(AGC, 23), 0x0000272C},
149 {MT_BBP(AGC, 24), 0x00002F3A},
150 {MT_BBP(AGC, 25), 0x8000005A},
151 {MT_BBP(AGC, 26), 0x007C2005},
152 {MT_BBP(AGC, 34), 0x000A0C0C},
153 {MT_BBP(AGC, 37), 0x2121262C},
154 {MT_BBP(AGC, 41), 0x38383E45},
155 {MT_BBP(AGC, 57), 0x00001010},
156 {MT_BBP(AGC, 59), 0xBAA20E96},
157 {MT_BBP(AGC, 63), 0x00000001},
158
159 {MT_BBP(TXC, 0), 0x00280403},
160 {MT_BBP(TXC, 1), 0x00000000},
161
162 {MT_BBP(RXC, 1), 0x00000012},
163 {MT_BBP(RXC, 2), 0x00000011},
164 {MT_BBP(RXC, 3), 0x00000005},
165 {MT_BBP(RXC, 4), 0x00000000},
166 {MT_BBP(RXC, 5), 0xF977C4EC},
167 {MT_BBP(RXC, 7), 0x00000090},
168
169 {MT_BBP(TXO, 8), 0x00000000},
170
171 {MT_BBP(TXBE, 0), 0x00000000},
172 {MT_BBP(TXBE, 4), 0x00000004},
173 {MT_BBP(TXBE, 6), 0x00000000},
174 {MT_BBP(TXBE, 8), 0x00000014},
175 {MT_BBP(TXBE, 9), 0x20000000},
176 {MT_BBP(TXBE, 10), 0x00000000},
177 {MT_BBP(TXBE, 12), 0x00000000},
178 {MT_BBP(TXBE, 13), 0x00000000},
179 {MT_BBP(TXBE, 14), 0x00000000},
180 {MT_BBP(TXBE, 15), 0x00000000},
181 {MT_BBP(TXBE, 16), 0x00000000},
182 {MT_BBP(TXBE, 17), 0x00000000},
183
184 {MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
185 {MT_BBP(RXFE, 3), 0x00000000},
186 {MT_BBP(RXFE, 4), 0x00000000},
187
188 {MT_BBP(RXO, 13), 0x00000092},
189 {MT_BBP(RXO, 14), 0x00060612},
190 {MT_BBP(RXO, 15), 0xC8321B18},
191 {MT_BBP(RXO, 16), 0x0000001E},
192 {MT_BBP(RXO, 17), 0x00000000},
193 {MT_BBP(RXO, 18), 0xCC00A993},
194 {MT_BBP(RXO, 19), 0xB9CB9CB9},
195 {MT_BBP(RXO, 20), 0x26c00057},
196 {MT_BBP(RXO, 21), 0x00000001},
197 {MT_BBP(RXO, 24), 0x00000006},
198};
199
200static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
201 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}},
202 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}},
203
204 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}},
205 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}},
206
207 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}},
208 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}},
209
210 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}},
211 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}},
212
213 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}},
214 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}},
215
216 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}},
217 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}},
218
219 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}},
220 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}},
221
222 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}},
223 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}},
224
225 {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}},
226 {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}},
227 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}},
228
229 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}},
230 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}},
231
232 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}},
233 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}},
234
235 {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}},
236 {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}},
237 {RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}},
238 {RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}},
239
240 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}},
241 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}},
242
243 {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}},
244 {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}},
245 {RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}},
246 {RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}},
247
248 {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}},
249 {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}},
250 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}},
251
252 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}},
253 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}},
254
255 {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}},
256 {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}},
257 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}},
258
259 {RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}},
260 {RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}},
261 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}},
262
263 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}},
264 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}},
265
266 {RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}},
267 {RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}},
268};
269
270static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
271 {MT_BBP(CAL, 47), 0x000010F0 },
272 {MT_BBP(CAL, 48), 0x00008080 },
273 {MT_BBP(CAL, 49), 0x00000F07 },
274 {MT_BBP(CAL, 50), 0x00000040 },
275 {MT_BBP(CAL, 51), 0x00000404 },
276 {MT_BBP(CAL, 52), 0x00080803 },
277 {MT_BBP(CAL, 53), 0x00000704 },
278 {MT_BBP(CAL, 54), 0x00002828 },
279 {MT_BBP(CAL, 55), 0x00005050 },
280};
281
282#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
new file mode 100644
index 000000000000..95d43efc1f3d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -0,0 +1,772 @@
1/*
2 * (c) Copyright 2002-2010, Ralink Technology, Inc.
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __MT76X0U_PHY_INITVALS_H
17#define __MT76X0U_PHY_INITVALS_H
18
19#define RF_REG_PAIR(bank, reg, value) \
20 { (bank) << 16 | (reg), value }
21
22
23static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
24/*
25 Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC
26*/
27 { MT_RF(0, 1), 0x01},
28 { MT_RF(0, 2), 0x11},
29
30 /*
31 R3 ~ R7: VCO Cal.
32 */
33 { MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */
34 { MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */
35 { MT_RF(0, 5), 0x00},
36 { MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */
37 { MT_RF(0, 7), 0x00},
38
39 /*
40 XO
41 */
42 { MT_RF(0, 8), 0x00},
43 { MT_RF(0, 9), 0x00},
44 { MT_RF(0, 10), 0x0C},
45 { MT_RF(0, 11), 0x00},
46 { MT_RF(0, 12), 0x00},
47
48 /*
49 BG
50 */
51 { MT_RF(0, 13), 0x00},
52 { MT_RF(0, 14), 0x00},
53 { MT_RF(0, 15), 0x00},
54
55 /*
56 LDO
57 */
58 { MT_RF(0, 19), 0x20},
59 /*
60 XO
61 */
62 { MT_RF(0, 20), 0x22},
63 { MT_RF(0, 21), 0x12},
64 { MT_RF(0, 23), 0x00},
65 { MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */
66 { MT_RF(0, 25), 0x00},
67
68 /*
69 PLL, See Freq Selection
70 */
71 { MT_RF(0, 26), 0x00},
72 { MT_RF(0, 27), 0x00},
73 { MT_RF(0, 28), 0x00},
74 { MT_RF(0, 29), 0x00},
75 { MT_RF(0, 30), 0x00},
76 { MT_RF(0, 31), 0x00},
77 { MT_RF(0, 32), 0x00},
78 { MT_RF(0, 33), 0x00},
79 { MT_RF(0, 34), 0x00},
80 { MT_RF(0, 35), 0x00},
81 { MT_RF(0, 36), 0x00},
82 { MT_RF(0, 37), 0x00},
83
84 /*
85 LO Buffer
86 */
87 { MT_RF(0, 38), 0x2F},
88
89 /*
90 Test Ports
91 */
92 { MT_RF(0, 64), 0x00},
93 { MT_RF(0, 65), 0x80},
94 { MT_RF(0, 66), 0x01},
95 { MT_RF(0, 67), 0x04},
96
97 /*
98 ADC/DAC
99 */
100 { MT_RF(0, 68), 0x00},
101 { MT_RF(0, 69), 0x08},
102 { MT_RF(0, 70), 0x08},
103 { MT_RF(0, 71), 0x40},
104 { MT_RF(0, 72), 0xD0},
105 { MT_RF(0, 73), 0x93},
106};
107
108static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
109/*
110 Bank 5 - Channel 0 2G RF registers
111*/
112 /*
113 RX logic operation
114 */
115 /* RF_R00 Change in SelectBand6590 */
116
117 { MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */
118 { MT_RF(5, 3), 0x00},
119
120 /*
121 TX logic operation
122 */
123 { MT_RF(5, 4), 0x00},
124 { MT_RF(5, 5), 0x84},
125 { MT_RF(5, 6), 0x02},
126
127 /*
128 LDO
129 */
130 { MT_RF(5, 7), 0x00},
131 { MT_RF(5, 8), 0x00},
132 { MT_RF(5, 9), 0x00},
133
134 /*
135 RX
136 */
137 { MT_RF(5, 10), 0x51},
138 { MT_RF(5, 11), 0x22},
139 { MT_RF(5, 12), 0x22},
140 { MT_RF(5, 13), 0x0F},
141 { MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */
142 { MT_RF(5, 15), 0x25},
143 { MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */
144 { MT_RF(5, 17), 0x00},
145 { MT_RF(5, 18), 0x00},
146 { MT_RF(5, 19), 0x30}, /* Improve max Pin */
147 { MT_RF(5, 20), 0x33},
148 { MT_RF(5, 21), 0x02},
149 { MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */
150 { MT_RF(5, 23), 0x00},
151 { MT_RF(5, 24), 0x25},
152 { MT_RF(5, 26), 0x00},
153 { MT_RF(5, 27), 0x12},
154 { MT_RF(5, 28), 0x0F},
155 { MT_RF(5, 29), 0x00},
156
157 /*
158 LOGEN
159 */
160 { MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */
161 { MT_RF(5, 31), 0x35},
162 { MT_RF(5, 32), 0x31},
163 { MT_RF(5, 33), 0x31},
164 { MT_RF(5, 34), 0x34},
165 { MT_RF(5, 35), 0x03},
166 { MT_RF(5, 36), 0x00},
167
168 /*
169 TX
170 */
171 { MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */
172 { MT_RF(5, 38), 0xB3},
173 { MT_RF(5, 39), 0x33},
174 { MT_RF(5, 40), 0xB1},
175 { MT_RF(5, 41), 0x71},
176 { MT_RF(5, 42), 0xF2},
177 { MT_RF(5, 43), 0x47},
178 { MT_RF(5, 44), 0x77},
179 { MT_RF(5, 45), 0x0E},
180 { MT_RF(5, 46), 0x10},
181 { MT_RF(5, 47), 0x00},
182 { MT_RF(5, 48), 0x53},
183 { MT_RF(5, 49), 0x03},
184 { MT_RF(5, 50), 0xEF},
185 { MT_RF(5, 51), 0xC7},
186 { MT_RF(5, 52), 0x62},
187 { MT_RF(5, 53), 0x62},
188 { MT_RF(5, 54), 0x00},
189 { MT_RF(5, 55), 0x00},
190 { MT_RF(5, 56), 0x0F},
191 { MT_RF(5, 57), 0x0F},
192 { MT_RF(5, 58), 0x16},
193 { MT_RF(5, 59), 0x16},
194 { MT_RF(5, 60), 0x10},
195 { MT_RF(5, 61), 0x10},
196 { MT_RF(5, 62), 0xD0},
197 { MT_RF(5, 63), 0x6C},
198 { MT_RF(5, 64), 0x58},
199 { MT_RF(5, 65), 0x58},
200 { MT_RF(5, 66), 0xF2},
201 { MT_RF(5, 67), 0xE8},
202 { MT_RF(5, 68), 0xF0},
203 { MT_RF(5, 69), 0xF0},
204 { MT_RF(5, 127), 0x04},
205};
206
207static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
208/*
209 Bank 6 - Channel 0 5G RF registers
210*/
211 /*
212 RX logic operation
213 */
214 /* RF_R00 Change in SelectBandmt76x0 */
215
216 { MT_RF(6, 2), 0x0C},
217 { MT_RF(6, 3), 0x00},
218
219 /*
220 TX logic operation
221 */
222 { MT_RF(6, 4), 0x00},
223 { MT_RF(6, 5), 0x84},
224 { MT_RF(6, 6), 0x02},
225
226 /*
227 LDO
228 */
229 { MT_RF(6, 7), 0x00},
230 { MT_RF(6, 8), 0x00},
231 { MT_RF(6, 9), 0x00},
232
233 /*
234 RX
235 */
236 { MT_RF(6, 10), 0x00},
237 { MT_RF(6, 11), 0x01},
238
239 { MT_RF(6, 13), 0x23},
240 { MT_RF(6, 14), 0x00},
241 { MT_RF(6, 15), 0x04},
242 { MT_RF(6, 16), 0x22},
243
244 { MT_RF(6, 18), 0x08},
245 { MT_RF(6, 19), 0x00},
246 { MT_RF(6, 20), 0x00},
247 { MT_RF(6, 21), 0x00},
248 { MT_RF(6, 22), 0xFB},
249
250 /*
251 LOGEN5G
252 */
253 { MT_RF(6, 25), 0x76},
254 { MT_RF(6, 26), 0x24},
255 { MT_RF(6, 27), 0x04},
256 { MT_RF(6, 28), 0x00},
257 { MT_RF(6, 29), 0x00},
258
259 /*
260 TX
261 */
262 { MT_RF(6, 37), 0xBB},
263 { MT_RF(6, 38), 0xB3},
264
265 { MT_RF(6, 40), 0x33},
266 { MT_RF(6, 41), 0x33},
267
268 { MT_RF(6, 43), 0x03},
269 { MT_RF(6, 44), 0xB3},
270
271 { MT_RF(6, 46), 0x17},
272 { MT_RF(6, 47), 0x0E},
273 { MT_RF(6, 48), 0x10},
274 { MT_RF(6, 49), 0x07},
275
276 { MT_RF(6, 62), 0x00},
277 { MT_RF(6, 63), 0x00},
278 { MT_RF(6, 64), 0xF1},
279 { MT_RF(6, 65), 0x0F},
280};
281
282static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
283/*
284 Bank 7 - Channel 0 VGA RF registers
285*/
286 /* E3 CR */
287 { MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */
288 { MT_RF(7, 1), 0x00},
289 { MT_RF(7, 2), 0x00},
290 { MT_RF(7, 3), 0x00},
291 { MT_RF(7, 4), 0x00},
292
293 { MT_RF(7, 10), 0x13},
294 { MT_RF(7, 11), 0x0F},
295 { MT_RF(7, 12), 0x13}, /* For dcoc */
296 { MT_RF(7, 13), 0x13}, /* For dcoc */
297 { MT_RF(7, 14), 0x13}, /* For dcoc */
298 { MT_RF(7, 15), 0x20}, /* For dcoc */
299 { MT_RF(7, 16), 0x22}, /* For dcoc */
300
301 { MT_RF(7, 17), 0x7C},
302
303 { MT_RF(7, 18), 0x00},
304 { MT_RF(7, 19), 0x00},
305 { MT_RF(7, 20), 0x00},
306 { MT_RF(7, 21), 0xF1},
307 { MT_RF(7, 22), 0x11},
308 { MT_RF(7, 23), 0xC2},
309 { MT_RF(7, 24), 0x41},
310 { MT_RF(7, 25), 0x20},
311 { MT_RF(7, 26), 0x40},
312 { MT_RF(7, 27), 0xD7},
313 { MT_RF(7, 28), 0xA2},
314 { MT_RF(7, 29), 0x60},
315 { MT_RF(7, 30), 0x49},
316 { MT_RF(7, 31), 0x20},
317 { MT_RF(7, 32), 0x44},
318 { MT_RF(7, 33), 0xC1},
319 { MT_RF(7, 34), 0x60},
320 { MT_RF(7, 35), 0xC0},
321
322 { MT_RF(7, 61), 0x01},
323
324 { MT_RF(7, 72), 0x3C},
325 { MT_RF(7, 73), 0x34},
326 { MT_RF(7, 74), 0x00},
327};
328
329static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
330 /* Bank, Register, Bw/Band, Value */
331 { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00},
332 { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00},
333 { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00},
334 { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00},
335 { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00},
336
337 /* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */
338 { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40},
339 { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C},
340 { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40},
341 { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20},
342 { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10},
343
344 { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40},
345 { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20},
346 { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40},
347 { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20},
348 { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10},
349
350 { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03},
351 { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01},
352 { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03},
353 { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01},
354 { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00},
355
356 /* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */
357 { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40},
358 { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40},
359 { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40},
360 { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40},
361 { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10},
362
363 { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40},
364 { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40},
365 { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40},
366 { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40},
367 { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10},
368
369 { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA},
370 { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA},
371 { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA},
372 { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA},
373 { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA},
374
375 { MT_RF(7, 76), RF_BW_20, 0x40},
376 { MT_RF(7, 76), RF_BW_40, 0x40},
377 { MT_RF(7, 76), RF_BW_80, 0x10},
378
379 { MT_RF(7, 77), RF_BW_20, 0x40},
380 { MT_RF(7, 77), RF_BW_40, 0x40},
381 { MT_RF(7, 77), RF_BW_80, 0x10},
382};
383
384static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
385 /* Bank, Register, Bw/Band, Value */
386 { MT_RF(0, 16), RF_G_BAND, 0x20},
387 { MT_RF(0, 16), RF_A_BAND, 0x20},
388
389 { MT_RF(0, 18), RF_G_BAND, 0x00},
390 { MT_RF(0, 18), RF_A_BAND, 0x00},
391
392 { MT_RF(0, 39), RF_G_BAND, 0x36},
393 { MT_RF(0, 39), RF_A_BAND_LB, 0x34},
394 { MT_RF(0, 39), RF_A_BAND_MB, 0x33},
395 { MT_RF(0, 39), RF_A_BAND_HB, 0x31},
396 { MT_RF(0, 39), RF_A_BAND_11J, 0x36},
397
398 { MT_RF(6, 12), RF_A_BAND_LB, 0x44},
399 { MT_RF(6, 12), RF_A_BAND_MB, 0x44},
400 { MT_RF(6, 12), RF_A_BAND_HB, 0x55},
401 { MT_RF(6, 12), RF_A_BAND_11J, 0x44},
402
403 { MT_RF(6, 17), RF_A_BAND_LB, 0x02},
404 { MT_RF(6, 17), RF_A_BAND_MB, 0x00},
405 { MT_RF(6, 17), RF_A_BAND_HB, 0x00},
406 { MT_RF(6, 17), RF_A_BAND_11J, 0x05},
407
408 { MT_RF(6, 24), RF_A_BAND_LB, 0xA1},
409 { MT_RF(6, 24), RF_A_BAND_MB, 0x41},
410 { MT_RF(6, 24), RF_A_BAND_HB, 0x21},
411 { MT_RF(6, 24), RF_A_BAND_11J, 0xE1},
412
413 { MT_RF(6, 39), RF_A_BAND_LB, 0x36},
414 { MT_RF(6, 39), RF_A_BAND_MB, 0x34},
415 { MT_RF(6, 39), RF_A_BAND_HB, 0x32},
416 { MT_RF(6, 39), RF_A_BAND_11J, 0x37},
417
418 { MT_RF(6, 42), RF_A_BAND_LB, 0xFB},
419 { MT_RF(6, 42), RF_A_BAND_MB, 0xF3},
420 { MT_RF(6, 42), RF_A_BAND_HB, 0xEB},
421 { MT_RF(6, 42), RF_A_BAND_11J, 0xEB},
422
423 /* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */
424
425 { MT_RF(6, 127), RF_G_BAND, 0x84},
426 { MT_RF(6, 127), RF_A_BAND, 0x04},
427
428 { MT_RF(7, 5), RF_G_BAND, 0x40},
429 { MT_RF(7, 5), RF_A_BAND, 0x00},
430
431 { MT_RF(7, 9), RF_G_BAND, 0x00},
432 { MT_RF(7, 9), RF_A_BAND, 0x00},
433
434 { MT_RF(7, 70), RF_G_BAND, 0x00},
435 { MT_RF(7, 70), RF_A_BAND, 0x6D},
436
437 { MT_RF(7, 71), RF_G_BAND, 0x00},
438 { MT_RF(7, 71), RF_A_BAND, 0xB0},
439
440 { MT_RF(7, 78), RF_G_BAND, 0x00},
441 { MT_RF(7, 78), RF_A_BAND, 0x55},
442
443 { MT_RF(7, 79), RF_G_BAND, 0x00},
444 { MT_RF(7, 79), RF_A_BAND, 0x55},
445};
446
447static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
448 {1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */
449 {2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */
450 {3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */
451 {4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */
452 {5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */
453 {6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */
454 {7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */
455 {8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */
456 {9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */
457 {10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */
458 {11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */
459 {12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */
460 {13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */
461 {14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */
462
463 {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */
464 {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */
465 {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */
466 {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */
467 {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */
468 {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */
469 {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */
470 {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */
471
472 {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */
473 {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */
474 {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */
475 {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */
476 {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */
477 {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */
478 {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */
479 {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */
480 {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */
481 {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */
482 {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */
483 {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */
484 {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */
485 {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */
486 {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */
487 {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */
488 {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */
489 {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */
490 {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */
491 {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */
492 {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */
493 {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */
494 {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */
495 {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */
496 {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */
497 {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */
498 {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */
499 {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */
500 {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */
501
502 {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */
503 {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */
504 {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */
505 {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */
506 {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */
507 {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */
508 {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */
509 {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */
510 {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */
511 {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */
512 {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */
513 {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */
514 {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */
515 {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */
516 {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */
517 {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */
518 {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */
519 {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */
520 {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */
521 {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */
522 {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */
523 {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */
524 {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */
525 {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */
526 {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */
527 {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */
528 {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */
529 {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */
530 {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */
531 {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */
532 {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */
533 {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */
534 {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */
535 {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */
536 {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */
537 {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */
538 {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */
539
540 {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */
541 {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */
542 {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */
543 {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */
544 {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */
545 {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */
546 {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */
547 {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */
548 {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */
549 {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */
550 {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */
551 {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */
552 {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */
553 {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */
554 {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */
555 {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */
556 {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */
557 {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */
558 {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */
559 {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */
560 {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */
561 {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */
562 {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */
563 {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */
564 {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */
565 {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */
566 {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */
567 {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */
568 {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */
569 {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */
570 {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */
571 {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */
572 {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */
573 {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */
574 {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */
575 {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */
576 {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */
577};
578
579static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
580 {1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2412 */
581 {2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */
582 {3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */
583 {4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */
584 {5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */
585 {6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */
586 {7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */
587 {8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */
588 {9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */
589 {10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */
590 {11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */
591 {12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */
592 {13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */
593 {14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */
594
595 {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */
596 {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0, 0x3}, /* Freq 4920 */
597 {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA, 0x3}, /* Freq 4925 */
598 {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000, 0x3}, /* Freq 4935 */
599 {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 4940 */
600 {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 4945 */
601 {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */
602 {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */
603
604 {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 5180 */
605 {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 5185 */
606 {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */
607 {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */
608 {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */
609 {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */
610 {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */
611 {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */
612 {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */
613 {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */
614 {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */
615 {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */
616 {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */
617 {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */
618 {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */
619 {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */
620 {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */
621 {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */
622 {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */
623 {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */
624 {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */
625 {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */
626 {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */
627 {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */
628 {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */
629 {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */
630 {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */
631 {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */
632 {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */
633
634 {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */
635 {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */
636 {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */
637 {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */
638 {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */
639 {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */
640 {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */
641 {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */
642 {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */
643 {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */
644 {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */
645 {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */
646 {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */
647 {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */
648 {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */
649 {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */
650 {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */
651 {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */
652 {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */
653 {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */
654 {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */
655 {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */
656 {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */
657 {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */
658 {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */
659 {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */
660 {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */
661 {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */
662 {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */
663 {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */
664 {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */
665 {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */
666 {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */
667 {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */
668 {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */
669 {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */
670 {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */
671
672 {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */
673 {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */
674 {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */
675 {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */
676 {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */
677 {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */
678 {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */
679 {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */
680 {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */
681 {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */
682 {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */
683 {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */
684 {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */
685 {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */
686 {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */
687 {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */
688 {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */
689 {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */
690 {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */
691 {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */
692 {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */
693 {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */
694 {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */
695 {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */
696 {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */
697 {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */
698 {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */
699 {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */
700 {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */
701 {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */
702 {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */
703 {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */
704 {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */
705 {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */
706 {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */
707 {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */
708 {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */
709};
710
711static const u8 mt76x0_sdm_channel[] = {
712 183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165
713};
714
715static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
716 { MT_RF(6, 45), RF_A_BAND_LB, 0x63},
717 { MT_RF(6, 45), RF_A_BAND_MB, 0x43},
718 { MT_RF(6, 45), RF_A_BAND_HB, 0x33},
719 { MT_RF(6, 45), RF_A_BAND_11J, 0x73},
720
721 { MT_RF(6, 50), RF_A_BAND_LB, 0x02},
722 { MT_RF(6, 50), RF_A_BAND_MB, 0x02},
723 { MT_RF(6, 50), RF_A_BAND_HB, 0x02},
724 { MT_RF(6, 50), RF_A_BAND_11J, 0x02},
725
726 { MT_RF(6, 51), RF_A_BAND_LB, 0x02},
727 { MT_RF(6, 51), RF_A_BAND_MB, 0x02},
728 { MT_RF(6, 51), RF_A_BAND_HB, 0x02},
729 { MT_RF(6, 51), RF_A_BAND_11J, 0x02},
730
731 { MT_RF(6, 52), RF_A_BAND_LB, 0x08},
732 { MT_RF(6, 52), RF_A_BAND_MB, 0x08},
733 { MT_RF(6, 52), RF_A_BAND_HB, 0x08},
734 { MT_RF(6, 52), RF_A_BAND_11J, 0x08},
735
736 { MT_RF(6, 53), RF_A_BAND_LB, 0x08},
737 { MT_RF(6, 53), RF_A_BAND_MB, 0x08},
738 { MT_RF(6, 53), RF_A_BAND_HB, 0x08},
739 { MT_RF(6, 53), RF_A_BAND_11J, 0x08},
740
741 { MT_RF(6, 54), RF_A_BAND_LB, 0x0A},
742 { MT_RF(6, 54), RF_A_BAND_MB, 0x0A},
743 { MT_RF(6, 54), RF_A_BAND_HB, 0x0A},
744 { MT_RF(6, 54), RF_A_BAND_11J, 0x0A},
745
746 { MT_RF(6, 55), RF_A_BAND_LB, 0x0A},
747 { MT_RF(6, 55), RF_A_BAND_MB, 0x0A},
748 { MT_RF(6, 55), RF_A_BAND_HB, 0x0A},
749 { MT_RF(6, 55), RF_A_BAND_11J, 0x0A},
750
751 { MT_RF(6, 56), RF_A_BAND_LB, 0x05},
752 { MT_RF(6, 56), RF_A_BAND_MB, 0x05},
753 { MT_RF(6, 56), RF_A_BAND_HB, 0x05},
754 { MT_RF(6, 56), RF_A_BAND_11J, 0x05},
755
756 { MT_RF(6, 57), RF_A_BAND_LB, 0x05},
757 { MT_RF(6, 57), RF_A_BAND_MB, 0x05},
758 { MT_RF(6, 57), RF_A_BAND_HB, 0x05},
759 { MT_RF(6, 57), RF_A_BAND_11J, 0x05},
760
761 { MT_RF(6, 58), RF_A_BAND_LB, 0x05},
762 { MT_RF(6, 58), RF_A_BAND_MB, 0x03},
763 { MT_RF(6, 58), RF_A_BAND_HB, 0x02},
764 { MT_RF(6, 58), RF_A_BAND_11J, 0x07},
765
766 { MT_RF(6, 59), RF_A_BAND_LB, 0x05},
767 { MT_RF(6, 59), RF_A_BAND_MB, 0x03},
768 { MT_RF(6, 59), RF_A_BAND_HB, 0x02},
769 { MT_RF(6, 59), RF_A_BAND_11J, 0x07},
770};
771
772#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
new file mode 100644
index 000000000000..95f28492a843
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
@@ -0,0 +1,660 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include "mt76x0.h"
17#include "trace.h"
18#include <linux/etherdevice.h>
19
20static void
21mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
22 enum nl80211_band band)
23{
24 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
25
26 txrate->idx = 0;
27 txrate->flags = 0;
28 txrate->count = 1;
29
30 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
31 case MT_PHY_TYPE_OFDM:
32 if (band == NL80211_BAND_2GHZ)
33 idx += 4;
34
35 txrate->idx = idx;
36 return;
37 case MT_PHY_TYPE_CCK:
38 if (idx >= 8)
39 idx -= 8;
40
41 txrate->idx = idx;
42 return;
43 case MT_PHY_TYPE_HT_GF:
44 txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
45 /* fall through */
46 case MT_PHY_TYPE_HT:
47 txrate->flags |= IEEE80211_TX_RC_MCS;
48 txrate->idx = idx;
49 break;
50 case MT_PHY_TYPE_VHT:
51 txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
52 txrate->idx = idx;
53 break;
54 default:
55 WARN_ON(1);
56 return;
57 }
58
59 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
60 case MT_PHY_BW_20:
61 break;
62 case MT_PHY_BW_40:
63 txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
64 break;
65 case MT_PHY_BW_80:
66 txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
67 break;
68 default:
69 WARN_ON(1);
70 return;
71 }
72
73 if (rate & MT_RXWI_RATE_SGI)
74 txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
75}
76
77static void
78mt76_mac_fill_tx_status(struct mt76x0_dev *dev, struct ieee80211_tx_info *info,
79 struct mt76_tx_status *st, int n_frames)
80{
81 struct ieee80211_tx_rate *rate = info->status.rates;
82 int cur_idx, last_rate;
83 int i;
84
85 if (!n_frames)
86 return;
87
88 last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
89 mt76_mac_process_tx_rate(&rate[last_rate], st->rate,
90 dev->mt76.chandef.chan->band);
91 if (last_rate < IEEE80211_TX_MAX_RATES - 1)
92 rate[last_rate + 1].idx = -1;
93
94 cur_idx = rate[last_rate].idx + last_rate;
95 for (i = 0; i <= last_rate; i++) {
96 rate[i].flags = rate[last_rate].flags;
97 rate[i].idx = max_t(int, 0, cur_idx - i);
98 rate[i].count = 1;
99 }
100
101 rate[last_rate - 1].count = st->retry + 1 - last_rate;
102
103 info->status.ampdu_len = n_frames;
104 info->status.ampdu_ack_len = st->success ? n_frames : 0;
105
106 if (st->pktid & MT_TXWI_PKTID_PROBE)
107 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
108
109 if (st->aggr)
110 info->flags |= IEEE80211_TX_CTL_AMPDU |
111 IEEE80211_TX_STAT_AMPDU;
112
113 if (!st->ack_req)
114 info->flags |= IEEE80211_TX_CTL_NO_ACK;
115 else if (st->success)
116 info->flags |= IEEE80211_TX_STAT_ACK;
117}
118
119u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
120 const struct ieee80211_tx_rate *rate, u8 *nss_val)
121{
122 u16 rateval;
123 u8 phy, rate_idx;
124 u8 nss = 1;
125 u8 bw = 0;
126
127 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
128 rate_idx = rate->idx;
129 nss = 1 + (rate->idx >> 4);
130 phy = MT_PHY_TYPE_VHT;
131 if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
132 bw = 2;
133 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
134 bw = 1;
135 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
136 rate_idx = rate->idx;
137 nss = 1 + (rate->idx >> 3);
138 phy = MT_PHY_TYPE_HT;
139 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
140 phy = MT_PHY_TYPE_HT_GF;
141 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
142 bw = 1;
143 } else {
144 const struct ieee80211_rate *r;
145 int band = dev->mt76.chandef.chan->band;
146 u16 val;
147
148 r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
149 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
150 val = r->hw_value_short;
151 else
152 val = r->hw_value;
153
154 phy = val >> 8;
155 rate_idx = val & 0xff;
156 bw = 0;
157 }
158
159 rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
160 rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
161 rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
162 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
163 rateval |= MT_RXWI_RATE_SGI;
164
165 *nss_val = nss;
166 return cpu_to_le16(rateval);
167}
168
169void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
170 const struct ieee80211_tx_rate *rate)
171{
172 unsigned long flags;
173
174 spin_lock_irqsave(&dev->mt76.lock, flags);
175 wcid->tx_rate = mt76x0_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
176 wcid->tx_rate_set = true;
177 spin_unlock_irqrestore(&dev->mt76.lock, flags);
178}
179
180struct mt76_tx_status mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev)
181{
182 struct mt76_tx_status stat = {};
183 u32 stat2, stat1;
184
185 stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
186 stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
187
188 stat.valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
189 stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
190 stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
191 stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
192 stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
193 stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
194
195 stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
196 stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
197
198 return stat;
199}
200
201void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update)
202{
203 struct ieee80211_tx_info info = {};
204 struct ieee80211_sta *sta = NULL;
205 struct mt76_wcid *wcid = NULL;
206 struct mt76_sta *msta = NULL;
207
208 rcu_read_lock();
209 if (stat->wcid < ARRAY_SIZE(dev->wcid))
210 wcid = rcu_dereference(dev->wcid[stat->wcid]);
211
212 if (wcid) {
213 void *priv;
214 priv = msta = container_of(wcid, struct mt76_sta, wcid);
215 sta = container_of(priv, struct ieee80211_sta, drv_priv);
216 }
217
218 if (msta && stat->aggr) {
219 u32 stat_val, stat_cache;
220
221 stat_val = stat->rate;
222 stat_val |= ((u32) stat->retry) << 16;
223 stat_cache = msta->status.rate;
224 stat_cache |= ((u32) msta->status.retry) << 16;
225
226 if (*update == 0 && stat_val == stat_cache &&
227 stat->wcid == msta->status.wcid && msta->n_frames < 32) {
228 msta->n_frames++;
229 goto out;
230 }
231
232 mt76_mac_fill_tx_status(dev, &info, &msta->status,
233 msta->n_frames);
234 msta->status = *stat;
235 msta->n_frames = 1;
236 *update = 0;
237 } else {
238 mt76_mac_fill_tx_status(dev, &info, stat, 1);
239 *update = 1;
240 }
241
242 spin_lock_bh(&dev->mac_lock);
243 ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
244 spin_unlock_bh(&dev->mac_lock);
245out:
246 rcu_read_unlock();
247}
248
249void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
250 int ht_mode)
251{
252 int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
253 bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
254 u32 prot[6];
255 bool ht_rts[4] = {};
256 int i;
257
258 prot[0] = MT_PROT_NAV_SHORT |
259 MT_PROT_TXOP_ALLOW_ALL |
260 MT_PROT_RTS_THR_EN;
261 prot[1] = prot[0];
262 if (legacy_prot)
263 prot[1] |= MT_PROT_CTRL_CTS2SELF;
264
265 prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
266 prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
267
268 if (legacy_prot) {
269 prot[2] |= MT_PROT_RATE_CCK_11;
270 prot[3] |= MT_PROT_RATE_CCK_11;
271 prot[4] |= MT_PROT_RATE_CCK_11;
272 prot[5] |= MT_PROT_RATE_CCK_11;
273 } else {
274 prot[2] |= MT_PROT_RATE_OFDM_24;
275 prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
276 prot[4] |= MT_PROT_RATE_OFDM_24;
277 prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
278 }
279
280 switch (mode) {
281 case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
282 break;
283
284 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
285 ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
286 break;
287
288 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
289 ht_rts[1] = ht_rts[3] = true;
290 break;
291
292 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
293 ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
294 break;
295 }
296
297 if (non_gf)
298 ht_rts[2] = ht_rts[3] = true;
299
300 for (i = 0; i < 4; i++)
301 if (ht_rts[i])
302 prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
303
304 for (i = 0; i < 6; i++)
305 mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
306}
307
308void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
309{
310 if (short_preamb)
311 mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
312 else
313 mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
314}
315
316void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
317{
318 u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
319
320 val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
321 MT_BEACON_TIME_CFG_SYNC_MODE |
322 MT_BEACON_TIME_CFG_TBTT_EN);
323
324 if (!enable) {
325 mt76_wr(dev, MT_BEACON_TIME_CFG, val);
326 return;
327 }
328
329 val &= ~MT_BEACON_TIME_CFG_INTVAL;
330 val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
331 MT_BEACON_TIME_CFG_TIMER_EN |
332 MT_BEACON_TIME_CFG_SYNC_MODE |
333 MT_BEACON_TIME_CFG_TBTT_EN;
334}
335
336static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
337{
338 u32 val = mt76_rr(dev, 0x10f4);
339
340 if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
341 return;
342
343 dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n");
344
345 mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
346 udelay(10);
347 mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
348}
349void mt76x0_mac_work(struct work_struct *work)
350{
351 struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
352 mac_work.work);
353 struct {
354 u32 addr_base;
355 u32 span;
356 u64 *stat_base;
357 } spans[] = {
358 { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
359 { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
360 { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
361 { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
362 { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
363 { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
364 };
365 u32 sum, n;
366 int i, j, k;
367
368 /* Note: using MCU_RANDOM_READ is actually slower then reading all the
369 * registers by hand. MCU takes ca. 20ms to complete read of 24
370 * registers while reading them one by one will takes roughly
371 * 24*200us =~ 5ms.
372 */
373
374 k = 0;
375 n = 0;
376 sum = 0;
377 for (i = 0; i < ARRAY_SIZE(spans); i++)
378 for (j = 0; j < spans[i].span; j++) {
379 u32 val = mt76_rr(dev, spans[i].addr_base + j * 4);
380
381 spans[i].stat_base[j * 2] += val & 0xffff;
382 spans[i].stat_base[j * 2 + 1] += val >> 16;
383
384 /* Calculate average AMPDU length */
385 if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
386 spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
387 continue;
388
389 n += (val >> 16) + (val & 0xffff);
390 sum += (val & 0xffff) * (1 + k * 2) +
391 (val >> 16) * (2 + k * 2);
392 k++;
393 }
394
395 atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
396
397 mt76x0_check_mac_err(dev);
398
399 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
400}
401
402void
403mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
404{
405 u8 zmac[ETH_ALEN] = {};
406 u32 attr;
407
408 attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
409 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
410
411 mt76_wr(dev, MT_WCID_ATTR(idx), attr);
412
413 if (mac)
414 memcpy(zmac, mac, sizeof(zmac));
415
416 mt76x0_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
417}
418
419void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
420{
421 struct ieee80211_sta *sta;
422 struct mt76_wcid *wcid;
423 void *msta;
424 u8 min_factor = 3;
425 int i;
426
427 return;
428
429 rcu_read_lock();
430 for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
431 wcid = rcu_dereference(dev->wcid[i]);
432 if (!wcid)
433 continue;
434
435 msta = container_of(wcid, struct mt76_sta, wcid);
436 sta = container_of(msta, struct ieee80211_sta, drv_priv);
437
438 min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
439 }
440 rcu_read_unlock();
441
442 mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
443 FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
444}
445
446static void
447mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
448{
449 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
450
451 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
452 case MT_PHY_TYPE_OFDM:
453 if (idx >= 8)
454 idx = 0;
455
456 if (status->band == NL80211_BAND_2GHZ)
457 idx += 4;
458
459 status->rate_idx = idx;
460 return;
461 case MT_PHY_TYPE_CCK:
462 if (idx >= 8) {
463 idx -= 8;
464 status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
465 }
466
467 if (idx >= 4)
468 idx = 0;
469
470 status->rate_idx = idx;
471 return;
472 case MT_PHY_TYPE_HT_GF:
473 status->enc_flags |= RX_ENC_FLAG_HT_GF;
474 /* fall through */
475 case MT_PHY_TYPE_HT:
476 status->encoding = RX_ENC_HT;
477 status->rate_idx = idx;
478 break;
479 case MT_PHY_TYPE_VHT:
480 status->encoding = RX_ENC_VHT;
481 status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
482 status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
483 break;
484 default:
485 WARN_ON(1);
486 return;
487 }
488
489 if (rate & MT_RXWI_RATE_LDPC)
490 status->enc_flags |= RX_ENC_FLAG_LDPC;
491
492 if (rate & MT_RXWI_RATE_SGI)
493 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
494
495 if (rate & MT_RXWI_RATE_STBC)
496 status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
497
498 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
499 case MT_PHY_BW_20:
500 break;
501 case MT_PHY_BW_40:
502 status->bw = RATE_INFO_BW_40;
503 break;
504 case MT_PHY_BW_80:
505 status->bw = RATE_INFO_BW_80;
506 break;
507 default:
508 WARN_ON(1);
509 break;
510 }
511}
512
513static void
514mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi,
515 u16 rate, int rssi)
516{
517 dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
518 dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
519}
520
521static int
522mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
523{
524 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
525
526 return ieee80211_is_beacon(hdr->frame_control) &&
527 ether_addr_equal(hdr->addr2, dev->ap_bssid);
528}
529
530u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
531 u8 *data, void *rxi)
532{
533 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
534 struct mt76x0_rxwi *rxwi = rxi;
535 u32 len, ctl = le32_to_cpu(rxwi->ctl);
536 u16 rate = le16_to_cpu(rxwi->rate);
537 int rssi;
538
539 len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
540 if (WARN_ON(len < 10))
541 return 0;
542
543 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
544 status->flag |= RX_FLAG_DECRYPTED;
545 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
546 }
547
548 status->chains = BIT(0);
549 rssi = mt76x0_phy_get_rssi(dev, rxwi);
550 status->chain_signal[0] = status->signal = rssi;
551 status->freq = dev->mt76.chandef.chan->center_freq;
552 status->band = dev->mt76.chandef.chan->band;
553
554 mt76_mac_process_rate(status, rate);
555
556 spin_lock_bh(&dev->con_mon_lock);
557 if (mt76x0_rx_is_our_beacon(dev, data)) {
558 mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
559 } else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) {
560 if (dev->avg_rssi == 0)
561 dev->avg_rssi = rssi;
562 else
563 dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
564
565 }
566 spin_unlock_bh(&dev->con_mon_lock);
567
568 return len;
569}
570
571static enum mt76_cipher_type
572mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
573{
574 memset(key_data, 0, 32);
575 if (!key)
576 return MT_CIPHER_NONE;
577
578 if (key->keylen > 32)
579 return MT_CIPHER_NONE;
580
581 memcpy(key_data, key->key, key->keylen);
582
583 switch (key->cipher) {
584 case WLAN_CIPHER_SUITE_WEP40:
585 return MT_CIPHER_WEP40;
586 case WLAN_CIPHER_SUITE_WEP104:
587 return MT_CIPHER_WEP104;
588 case WLAN_CIPHER_SUITE_TKIP:
589 return MT_CIPHER_TKIP;
590 case WLAN_CIPHER_SUITE_CCMP:
591 return MT_CIPHER_AES_CCMP;
592 default:
593 return MT_CIPHER_NONE;
594 }
595}
596
597int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
598 struct ieee80211_key_conf *key)
599{
600 enum mt76_cipher_type cipher;
601 u8 key_data[32];
602 u8 iv_data[8];
603 u32 val;
604
605 cipher = mt76_mac_get_key_info(key, key_data);
606 if (cipher == MT_CIPHER_NONE && key)
607 return -EINVAL;
608
609 trace_mt76x0_set_key(&dev->mt76, idx);
610
611 mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
612
613 memset(iv_data, 0, sizeof(iv_data));
614 if (key) {
615 iv_data[3] = key->keyidx << 6;
616 if (cipher >= MT_CIPHER_TKIP) {
617 /* Note: start with 1 to comply with spec,
618 * (see comment on common/cmm_wpa.c:4291).
619 */
620 iv_data[0] |= 1;
621 iv_data[3] |= 0x20;
622 }
623 }
624 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
625
626 val = mt76_rr(dev, MT_WCID_ATTR(idx));
627 val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
628 val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
629 FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
630 val &= ~MT_WCID_ATTR_PAIRWISE;
631 val |= MT_WCID_ATTR_PAIRWISE *
632 !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
633 mt76_wr(dev, MT_WCID_ATTR(idx), val);
634
635 return 0;
636}
637
638int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
639 struct ieee80211_key_conf *key)
640{
641 enum mt76_cipher_type cipher;
642 u8 key_data[32];
643 u32 val;
644
645 cipher = mt76_mac_get_key_info(key, key_data);
646 if (cipher == MT_CIPHER_NONE && key)
647 return -EINVAL;
648
649 trace_mt76x0_set_shared_key(&dev->mt76, vif_idx, key_idx);
650
651 mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
652 key_data, sizeof(key_data));
653
654 val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
655 val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
656 val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
657 mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
658
659 return 0;
660}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
new file mode 100644
index 000000000000..bea067b71c13
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.h
@@ -0,0 +1,154 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __MT76_MAC_H
16#define __MT76_MAC_H
17
18/* Note: values in original "RSSI" and "SNR" fields are not actually what they
19 * are called for MT76X0U, names used by this driver are educated guesses
20 * (see vendor mac/ral_omac.c).
21 */
22struct mt76x0_rxwi {
23 __le32 rxinfo;
24
25 __le32 ctl;
26
27 __le16 tid_sn;
28 __le16 rate;
29
30 s8 rssi[4];
31
32 __le32 bbp_rxinfo[4];
33} __packed __aligned(4);
34
35#define MT_RXINFO_BA BIT(0)
36#define MT_RXINFO_DATA BIT(1)
37#define MT_RXINFO_NULL BIT(2)
38#define MT_RXINFO_FRAG BIT(3)
39#define MT_RXINFO_U2M BIT(4)
40#define MT_RXINFO_MULTICAST BIT(5)
41#define MT_RXINFO_BROADCAST BIT(6)
42#define MT_RXINFO_MYBSS BIT(7)
43#define MT_RXINFO_CRCERR BIT(8)
44#define MT_RXINFO_ICVERR BIT(9)
45#define MT_RXINFO_MICERR BIT(10)
46#define MT_RXINFO_AMSDU BIT(11)
47#define MT_RXINFO_HTC BIT(12)
48#define MT_RXINFO_RSSI BIT(13)
49#define MT_RXINFO_L2PAD BIT(14)
50#define MT_RXINFO_AMPDU BIT(15)
51#define MT_RXINFO_DECRYPT BIT(16)
52#define MT_RXINFO_BSSIDX3 BIT(17)
53#define MT_RXINFO_WAPI_KEY BIT(18)
54#define MT_RXINFO_PN_LEN GENMASK(21, 19)
55#define MT_RXINFO_SW_PKT_80211 BIT(22)
56#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
57#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
58#define MT_RXINFO_TCP_SUM_ERR BIT(30)
59#define MT_RXINFO_IP_SUM_ERR BIT(31)
60
61#define MT_RXWI_CTL_WCID GENMASK(7, 0)
62#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
63#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
64#define MT_RXWI_CTL_UDF GENMASK(15, 13)
65#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
66#define MT_RXWI_CTL_TID GENMASK(31, 28)
67
68#define MT_RXWI_FRAG GENMASK(3, 0)
69#define MT_RXWI_SN GENMASK(15, 4)
70
71#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
72#define MT_RXWI_RATE_LDPC BIT(6)
73#define MT_RXWI_RATE_BW GENMASK(8, 7)
74#define MT_RXWI_RATE_SGI BIT(9)
75#define MT_RXWI_RATE_STBC BIT(10)
76#define MT_RXWI_RATE_LDPC_ETXBF BIT(11)
77#define MT_RXWI_RATE_SND BIT(12)
78#define MT_RXWI_RATE_PHY GENMASK(15, 13)
79
80#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
81#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
82
83#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
84#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
85#define MT_RXWI_ANT_AUX_LNA BIT(7)
86
87#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
88
89enum mt76_phy_bandwidth {
90 MT_PHY_BW_20,
91 MT_PHY_BW_40,
92 MT_PHY_BW_80,
93};
94
95struct mt76_txwi {
96 __le16 flags;
97 __le16 rate_ctl;
98 u8 ack_ctl;
99 u8 wcid;
100 __le16 len_ctl;
101 __le32 iv;
102 __le32 eiv;
103 u8 aid;
104 u8 txstream;
105 u8 ctl2;
106 u8 pktid;
107} __packed __aligned(4);
108
109#define MT_TXWI_FLAGS_FRAG BIT(0)
110#define MT_TXWI_FLAGS_MMPS BIT(1)
111#define MT_TXWI_FLAGS_CFACK BIT(2)
112#define MT_TXWI_FLAGS_TS BIT(3)
113#define MT_TXWI_FLAGS_AMPDU BIT(4)
114#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
115#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
116#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
117#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
118#define MT_TXWI_FLAGS_TX_RPT BIT(14)
119#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
120
121#define MT_TXWI_RATE_MCS GENMASK(6, 0)
122#define MT_TXWI_RATE_BW BIT(7)
123#define MT_TXWI_RATE_SGI BIT(8)
124#define MT_TXWI_RATE_STBC GENMASK(10, 9)
125#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
126
127#define MT_TXWI_ACK_CTL_REQ BIT(0)
128#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
129#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
130
131#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
132
133#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
134#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
135#define MT_TXWI_CTL_PIFS_REV BIT(6)
136
137#define MT_TXWI_PKTID_PROBE BIT(7)
138
139u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
140 u8 *data, void *rxi);
141int mt76x0_mac_wcid_set_key(struct mt76x0_dev *dev, u8 idx,
142 struct ieee80211_key_conf *key);
143void mt76x0_mac_wcid_set_rate(struct mt76x0_dev *dev, struct mt76_wcid *wcid,
144 const struct ieee80211_tx_rate *rate);
145
146int mt76x0_mac_shared_key_setup(struct mt76x0_dev *dev, u8 vif_idx, u8 key_idx,
147 struct ieee80211_key_conf *key);
148u16 mt76x0_mac_tx_rate_val(struct mt76x0_dev *dev,
149 const struct ieee80211_tx_rate *rate, u8 *nss_val);
150struct mt76_tx_status
151mt76x0_mac_fetch_tx_status(struct mt76x0_dev *dev);
152void mt76x0_send_tx_status(struct mt76x0_dev *dev, struct mt76_tx_status *stat, u8 *update);
153
154#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
new file mode 100644
index 000000000000..cf6ffb1ba4a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -0,0 +1,403 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include "mt76x0.h"
17#include "mac.h"
18#include <linux/etherdevice.h>
19
20static int mt76x0_start(struct ieee80211_hw *hw)
21{
22 struct mt76x0_dev *dev = hw->priv;
23 int ret;
24
25 mutex_lock(&dev->mutex);
26
27 ret = mt76x0_mac_start(dev);
28 if (ret)
29 goto out;
30
31 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
32 MT_CALIBRATE_INTERVAL);
33 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
34 MT_CALIBRATE_INTERVAL);
35out:
36 mutex_unlock(&dev->mutex);
37 return ret;
38}
39
40static void mt76x0_stop(struct ieee80211_hw *hw)
41{
42 struct mt76x0_dev *dev = hw->priv;
43
44 mutex_lock(&dev->mutex);
45
46 cancel_delayed_work_sync(&dev->cal_work);
47 cancel_delayed_work_sync(&dev->mac_work);
48 mt76x0_mac_stop(dev);
49
50 mutex_unlock(&dev->mutex);
51}
52
53
54static int mt76x0_add_interface(struct ieee80211_hw *hw,
55 struct ieee80211_vif *vif)
56{
57 struct mt76x0_dev *dev = hw->priv;
58 struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
59 unsigned int idx;
60
61 idx = ffs(~dev->vif_mask);
62 if (!idx || idx > 8)
63 return -ENOSPC;
64
65 idx--;
66 dev->vif_mask |= BIT(idx);
67
68 mvif->idx = idx;
69 mvif->group_wcid.idx = GROUP_WCID(idx);
70 mvif->group_wcid.hw_key_idx = -1;
71
72 return 0;
73}
74
75static void mt76x0_remove_interface(struct ieee80211_hw *hw,
76 struct ieee80211_vif *vif)
77{
78 struct mt76x0_dev *dev = hw->priv;
79 struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
80 unsigned int wcid = mvif->group_wcid.idx;
81
82 dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
83}
84
85static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
86{
87 struct mt76x0_dev *dev = hw->priv;
88 int ret = 0;
89
90 mutex_lock(&dev->mutex);
91
92 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
93 if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
94 dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
95 else
96 dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
97
98 mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
99 }
100
101 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
102 ieee80211_stop_queues(hw);
103 ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
104 ieee80211_wake_queues(hw);
105 }
106
107 mutex_unlock(&dev->mutex);
108
109 return ret;
110}
111
112static void
113mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
114 unsigned int *total_flags, u64 multicast)
115{
116 struct mt76x0_dev *dev = hw->priv;
117 u32 flags = 0;
118
119#define MT76_FILTER(_flag, _hw) do { \
120 flags |= *total_flags & FIF_##_flag; \
121 dev->rxfilter &= ~(_hw); \
122 dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
123 } while (0)
124
125 mutex_lock(&dev->mutex);
126
127 dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
128
129 MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
130 MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
131 MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
132 MT_RX_FILTR_CFG_CTS |
133 MT_RX_FILTR_CFG_CFEND |
134 MT_RX_FILTR_CFG_CFACK |
135 MT_RX_FILTR_CFG_BA |
136 MT_RX_FILTR_CFG_CTRL_RSV);
137 MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
138
139 *total_flags = flags;
140 mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
141
142 mutex_unlock(&dev->mutex);
143}
144
145static void
146mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
147 struct ieee80211_bss_conf *info, u32 changed)
148{
149 struct mt76x0_dev *dev = hw->priv;
150
151 mutex_lock(&dev->mutex);
152
153 if (changed & BSS_CHANGED_ASSOC)
154 mt76x0_phy_con_cal_onoff(dev, info);
155
156 if (changed & BSS_CHANGED_BSSID) {
157 mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
158
159 /* Note: this is a hack because beacon_int is not changed
160 * on leave nor is any more appropriate event generated.
161 * rt2x00 doesn't seem to be bothered though.
162 */
163 if (is_zero_ether_addr(info->bssid))
164 mt76x0_mac_config_tsf(dev, false, 0);
165 }
166
167 if (changed & BSS_CHANGED_BASIC_RATES) {
168 mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
169 mt76_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
170 mt76_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
171 mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
172 mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
173 }
174
175 if (changed & BSS_CHANGED_BEACON_INT)
176 mt76x0_mac_config_tsf(dev, true, info->beacon_int);
177
178 if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
179 mt76x0_mac_set_protection(dev, info->use_cts_prot,
180 info->ht_operation_mode);
181
182 if (changed & BSS_CHANGED_ERP_PREAMBLE)
183 mt76x0_mac_set_short_preamble(dev, info->use_short_preamble);
184
185 if (changed & BSS_CHANGED_ERP_SLOT) {
186 int slottime = info->use_short_slot ? 9 : 20;
187
188 mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
189 MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
190 }
191
192 if (changed & BSS_CHANGED_ASSOC)
193 mt76x0_phy_recalibrate_after_assoc(dev);
194
195 mutex_unlock(&dev->mutex);
196}
197
198static int
199mt76x0_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
200 struct ieee80211_sta *sta)
201{
202 struct mt76x0_dev *dev = hw->priv;
203 struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
204 struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
205 int ret = 0;
206 int idx = 0;
207
208 mutex_lock(&dev->mutex);
209
210 idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
211 if (idx < 0) {
212 ret = -ENOSPC;
213 goto out;
214 }
215
216 msta->wcid.idx = idx;
217 msta->wcid.hw_key_idx = -1;
218 mt76x0_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
219 mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
220 rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
221 mt76x0_mac_set_ampdu_factor(dev);
222
223out:
224 mutex_unlock(&dev->mutex);
225
226 return ret;
227}
228
229static int
230mt76x0_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
231 struct ieee80211_sta *sta)
232{
233 struct mt76x0_dev *dev = hw->priv;
234 struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
235 int idx = msta->wcid.idx;
236
237 mutex_lock(&dev->mutex);
238 rcu_assign_pointer(dev->wcid[idx], NULL);
239 mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
240 dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
241 mt76x0_mac_wcid_setup(dev, idx, 0, NULL);
242 mt76x0_mac_set_ampdu_factor(dev);
243 mutex_unlock(&dev->mutex);
244
245 return 0;
246}
247
248static void
249mt76x0_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
250 enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
251{
252}
253
254static void
255mt76x0_sw_scan(struct ieee80211_hw *hw,
256 struct ieee80211_vif *vif,
257 const u8 *mac_addr)
258{
259 struct mt76x0_dev *dev = hw->priv;
260
261 cancel_delayed_work_sync(&dev->cal_work);
262 mt76x0_agc_save(dev);
263 set_bit(MT76_SCANNING, &dev->mt76.state);
264}
265
266static void
267mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
268 struct ieee80211_vif *vif)
269{
270 struct mt76x0_dev *dev = hw->priv;
271
272 mt76x0_agc_restore(dev);
273 clear_bit(MT76_SCANNING, &dev->mt76.state);
274
275 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
276 MT_CALIBRATE_INTERVAL);
277}
278
279static int
280mt76x0_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
281 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
282 struct ieee80211_key_conf *key)
283{
284 struct mt76x0_dev *dev = hw->priv;
285 struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
286 struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
287 struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
288 int idx = key->keyidx;
289 int ret;
290
291 if (cmd == SET_KEY) {
292 key->hw_key_idx = wcid->idx;
293 wcid->hw_key_idx = idx;
294 } else {
295 if (idx == wcid->hw_key_idx)
296 wcid->hw_key_idx = -1;
297
298 key = NULL;
299 }
300
301 if (!msta) {
302 if (key || wcid->hw_key_idx == idx) {
303 ret = mt76x0_mac_wcid_set_key(dev, wcid->idx, key);
304 if (ret)
305 return ret;
306 }
307
308 return mt76x0_mac_shared_key_setup(dev, mvif->idx, idx, key);
309 }
310
311 return mt76x0_mac_wcid_set_key(dev, msta->wcid.idx, key);
312}
313
314static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
315{
316 struct mt76x0_dev *dev = hw->priv;
317
318 mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
319
320 return 0;
321}
322
323static int
324mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
325 struct ieee80211_ampdu_params *params)
326{
327 struct mt76x0_dev *dev = hw->priv;
328 struct ieee80211_sta *sta = params->sta;
329 enum ieee80211_ampdu_mlme_action action = params->action;
330 u16 tid = params->tid;
331 u16 *ssn = &params->ssn;
332 struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
333
334 WARN_ON(msta->wcid.idx > N_WCIDS);
335
336 switch (action) {
337 case IEEE80211_AMPDU_RX_START:
338 mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
339 break;
340 case IEEE80211_AMPDU_RX_STOP:
341 mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
342 break;
343 case IEEE80211_AMPDU_TX_OPERATIONAL:
344 ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
345 break;
346 case IEEE80211_AMPDU_TX_STOP_FLUSH:
347 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
348 break;
349 case IEEE80211_AMPDU_TX_START:
350 msta->agg_ssn[tid] = *ssn << 4;
351 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
352 break;
353 case IEEE80211_AMPDU_TX_STOP_CONT:
354 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
355 break;
356 }
357
358 return 0;
359}
360
361static void
362mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
363 struct ieee80211_sta *sta)
364{
365 struct mt76x0_dev *dev = hw->priv;
366 struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
367 struct ieee80211_sta_rates *rates;
368 struct ieee80211_tx_rate rate = {};
369
370 rcu_read_lock();
371 rates = rcu_dereference(sta->rates);
372
373 if (!rates)
374 goto out;
375
376 rate.idx = rates->rate[0].idx;
377 rate.flags = rates->rate[0].flags;
378 mt76x0_mac_wcid_set_rate(dev, &msta->wcid, &rate);
379
380out:
381 rcu_read_unlock();
382}
383
384const struct ieee80211_ops mt76x0_ops = {
385 .tx = mt76x0_tx,
386 .start = mt76x0_start,
387 .stop = mt76x0_stop,
388 .add_interface = mt76x0_add_interface,
389 .remove_interface = mt76x0_remove_interface,
390 .config = mt76x0_config,
391 .configure_filter = mt76_configure_filter,
392 .bss_info_changed = mt76x0_bss_info_changed,
393 .sta_add = mt76x0_sta_add,
394 .sta_remove = mt76x0_sta_remove,
395 .sta_notify = mt76x0_sta_notify,
396 .set_key = mt76x0_set_key,
397 .conf_tx = mt76x0_conf_tx,
398 .sw_scan_start = mt76x0_sw_scan,
399 .sw_scan_complete = mt76x0_sw_scan_complete,
400 .ampdu_action = mt76_ampdu_action,
401 .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
402 .set_rts_threshold = mt76x0_set_rts_threshold,
403};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
new file mode 100644
index 000000000000..8affacbab90a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.c
@@ -0,0 +1,656 @@
1/*
2 * (c) Copyright 2002-2010, Ralink Technology, Inc.
3 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
5 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/kernel.h>
18#include <linux/firmware.h>
19#include <linux/delay.h>
20#include <linux/usb.h>
21#include <linux/skbuff.h>
22
23#include "mt76x0.h"
24#include "dma.h"
25#include "mcu.h"
26#include "usb.h"
27#include "trace.h"
28
29#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
30#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
31#define MCU_RESP_URB_SIZE 1024
32
33static inline int firmware_running(struct mt76x0_dev *dev)
34{
35 return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
36}
37
38static inline void skb_put_le32(struct sk_buff *skb, u32 val)
39{
40 put_unaligned_le32(val, skb_put(skb, 4));
41}
42
43static inline void mt76x0_dma_skb_wrap_cmd(struct sk_buff *skb,
44 u8 seq, enum mcu_cmd cmd)
45{
46 WARN_ON(mt76x0_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
47 FIELD_PREP(MT_TXD_CMD_SEQ, seq) |
48 FIELD_PREP(MT_TXD_CMD_TYPE, cmd)));
49}
50
51static inline void trace_mt76x0_mcu_msg_send_cs(struct mt76_dev *dev,
52 struct sk_buff *skb, bool need_resp)
53{
54 u32 i, csum = 0;
55
56 for (i = 0; i < skb->len / 4; i++)
57 csum ^= get_unaligned_le32(skb->data + i * 4);
58
59 trace_mt76x0_mcu_msg_send(dev, skb, csum, need_resp);
60}
61
62static struct sk_buff *
63mt76x0_mcu_msg_alloc(struct mt76x0_dev *dev, const void *data, int len)
64{
65 struct sk_buff *skb;
66
67 WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
68
69 skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
70 if (skb) {
71 skb_reserve(skb, MT_DMA_HDR_LEN);
72 memcpy(skb_put(skb, len), data, len);
73 }
74 return skb;
75}
76
77static void mt76x0_read_resp_regs(struct mt76x0_dev *dev, int len)
78{
79 int i;
80 int n = dev->mcu.reg_pairs_len;
81 u8 *buf = dev->mcu.resp.buf;
82
83 buf += 4;
84 len -= 8;
85
86 if (dev->mcu.burst_read) {
87 u32 reg = dev->mcu.reg_pairs[0].reg - dev->mcu.reg_base;
88
89 WARN_ON_ONCE(len/4 != n);
90 for (i = 0; i < n; i++) {
91 u32 val = get_unaligned_le32(buf + 4*i);
92
93 dev->mcu.reg_pairs[i].reg = reg++;
94 dev->mcu.reg_pairs[i].value = val;
95 }
96 } else {
97 WARN_ON_ONCE(len/8 != n);
98 for (i = 0; i < n; i++) {
99 u32 reg = get_unaligned_le32(buf + 8*i) - dev->mcu.reg_base;
100 u32 val = get_unaligned_le32(buf + 8*i + 4);
101
102 WARN_ON_ONCE(dev->mcu.reg_pairs[i].reg != reg);
103 dev->mcu.reg_pairs[i].value = val;
104 }
105 }
106}
107
108static int mt76x0_mcu_wait_resp(struct mt76x0_dev *dev, u8 seq)
109{
110 struct urb *urb = dev->mcu.resp.urb;
111 u32 rxfce;
112 int urb_status, ret, try = 5;
113
114 while (try--) {
115 if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
116 msecs_to_jiffies(300))) {
117 dev_warn(dev->mt76.dev, "Warning: %s retrying\n", __func__);
118 continue;
119 }
120
121 /* Make copies of important data before reusing the urb */
122 rxfce = get_unaligned_le32(dev->mcu.resp.buf);
123 urb_status = urb->status * mt76x0_urb_has_error(urb);
124
125 if (urb_status == 0 && dev->mcu.reg_pairs)
126 mt76x0_read_resp_regs(dev, urb->actual_length);
127
128 ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
129 &dev->mcu.resp, GFP_KERNEL,
130 mt76x0_complete_urb,
131 &dev->mcu.resp_cmpl);
132 if (ret)
133 return ret;
134
135 if (urb_status)
136 dev_err(dev->mt76.dev, "Error: MCU resp urb failed:%d\n",
137 urb_status);
138
139 if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
140 FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
141 return 0;
142
143 dev_err(dev->mt76.dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
144 FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
145 seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
146 }
147
148 dev_err(dev->mt76.dev, "Error: %s timed out\n", __func__);
149 return -ETIMEDOUT;
150}
151
152static int
153__mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
154 enum mcu_cmd cmd, bool wait_resp)
155{
156 struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
157 unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
158 dev->out_ep[MT_EP_OUT_INBAND_CMD]);
159 int sent, ret;
160 u8 seq = 0;
161
162 if (wait_resp)
163 while (!seq)
164 seq = ++dev->mcu.msg_seq & 0xf;
165
166 mt76x0_dma_skb_wrap_cmd(skb, seq, cmd);
167
168 if (dev->mcu.resp_cmpl.done)
169 dev_err(dev->mt76.dev, "Error: MCU response pre-completed!\n");
170
171 trace_mt76x0_mcu_msg_send_cs(&dev->mt76, skb, wait_resp);
172 trace_mt76x0_submit_urb_sync(&dev->mt76, cmd_pipe, skb->len);
173
174 ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
175 if (ret) {
176 dev_err(dev->mt76.dev, "Error: send MCU cmd failed:%d\n", ret);
177 goto out;
178 }
179 if (sent != skb->len)
180 dev_err(dev->mt76.dev, "Error: %s sent != skb->len\n", __func__);
181
182 if (wait_resp)
183 ret = mt76x0_mcu_wait_resp(dev, seq);
184
185out:
186 return ret;
187}
188
189static int
190mt76x0_mcu_msg_send(struct mt76x0_dev *dev, struct sk_buff *skb,
191 enum mcu_cmd cmd, bool wait_resp)
192{
193 int ret;
194
195 if (test_bit(MT76_REMOVED, &dev->mt76.state))
196 return 0;
197
198 mutex_lock(&dev->mcu.mutex);
199 ret = __mt76x0_mcu_msg_send(dev, skb, cmd, wait_resp);
200 mutex_unlock(&dev->mcu.mutex);
201
202 consume_skb(skb);
203
204 return ret;
205}
206
207int mt76x0_mcu_function_select(struct mt76x0_dev *dev,
208 enum mcu_function func, u32 val)
209{
210 struct sk_buff *skb;
211 struct {
212 __le32 id;
213 __le32 value;
214 } __packed __aligned(4) msg = {
215 .id = cpu_to_le32(func),
216 .value = cpu_to_le32(val),
217 };
218
219 skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
220 if (!skb)
221 return -ENOMEM;
222 return mt76x0_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
223}
224
225int
226mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val)
227{
228 struct sk_buff *skb;
229 struct {
230 __le32 id;
231 __le32 value;
232 } __packed __aligned(4) msg = {
233 .id = cpu_to_le32(cal),
234 .value = cpu_to_le32(val),
235 };
236
237 skb = mt76x0_mcu_msg_alloc(dev, &msg, sizeof(msg));
238 if (!skb)
239 return -ENOMEM;
240 return mt76x0_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
241}
242
243int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
244 const struct mt76_reg_pair *data, int n)
245{
246 const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
247 struct sk_buff *skb;
248 int cnt, i, ret;
249
250 if (!n)
251 return 0;
252
253 cnt = min(max_vals_per_cmd, n);
254
255 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
256 if (!skb)
257 return -ENOMEM;
258 skb_reserve(skb, MT_DMA_HDR_LEN);
259
260 for (i = 0; i < cnt; i++) {
261 skb_put_le32(skb, base + data[i].reg);
262 skb_put_le32(skb, data[i].value);
263 }
264
265 ret = mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
266 if (ret)
267 return ret;
268
269 return mt76x0_write_reg_pairs(dev, base, data + cnt, n - cnt);
270}
271
272int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
273 struct mt76_reg_pair *data, int n)
274{
275 const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
276 struct sk_buff *skb;
277 int cnt, i, ret;
278
279 if (!n)
280 return 0;
281
282 cnt = min(max_vals_per_cmd, n);
283 if (cnt != n)
284 return -EINVAL;
285
286 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
287 if (!skb)
288 return -ENOMEM;
289 skb_reserve(skb, MT_DMA_HDR_LEN);
290
291 for (i = 0; i < cnt; i++) {
292 skb_put_le32(skb, base + data[i].reg);
293 skb_put_le32(skb, data[i].value);
294 }
295
296 mutex_lock(&dev->mcu.mutex);
297
298 dev->mcu.reg_pairs = data;
299 dev->mcu.reg_pairs_len = n;
300 dev->mcu.reg_base = base;
301 dev->mcu.burst_read = false;
302
303 ret = __mt76x0_mcu_msg_send(dev, skb, CMD_RANDOM_READ, true);
304
305 dev->mcu.reg_pairs = NULL;
306
307 mutex_unlock(&dev->mcu.mutex);
308
309 consume_skb(skb);
310
311 return ret;
312
313}
314
315int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
316 const u32 *data, int n)
317{
318 const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
319 struct sk_buff *skb;
320 int cnt, i, ret;
321
322 if (!n)
323 return 0;
324
325 cnt = min(max_regs_per_cmd, n);
326
327 skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
328 if (!skb)
329 return -ENOMEM;
330 skb_reserve(skb, MT_DMA_HDR_LEN);
331
332 skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
333 for (i = 0; i < cnt; i++)
334 skb_put_le32(skb, data[i]);
335
336 ret = mt76x0_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
337 if (ret)
338 return ret;
339
340 return mt76x0_burst_write_regs(dev, offset + cnt * 4,
341 data + cnt, n - cnt);
342}
343
344#if 0
345static int mt76x0_burst_read_regs(struct mt76x0_dev *dev, u32 base,
346 struct mt76_reg_pair *data, int n)
347{
348 const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
349 struct sk_buff *skb;
350 int cnt, ret;
351
352 if (!n)
353 return 0;
354
355 cnt = min(max_vals_per_cmd, n);
356 if (cnt != n)
357 return -EINVAL;
358
359 skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
360 if (!skb)
361 return -ENOMEM;
362 skb_reserve(skb, MT_DMA_HDR_LEN);
363
364 skb_put_le32(skb, base + data[0].reg);
365 skb_put_le32(skb, n);
366
367 mutex_lock(&dev->mcu.mutex);
368
369 dev->mcu.reg_pairs = data;
370 dev->mcu.reg_pairs_len = n;
371 dev->mcu.reg_base = base;
372 dev->mcu.burst_read = true;
373
374 ret = __mt76x0_mcu_msg_send(dev, skb, CMD_BURST_READ, true);
375
376 dev->mcu.reg_pairs = NULL;
377
378 mutex_unlock(&dev->mcu.mutex);
379
380 consume_skb(skb);
381
382 return ret;
383}
384#endif
385
386struct mt76_fw_header {
387 __le32 ilm_len;
388 __le32 dlm_len;
389 __le16 build_ver;
390 __le16 fw_ver;
391 u8 pad[4];
392 char build_time[16];
393};
394
395struct mt76_fw {
396 struct mt76_fw_header hdr;
397 u8 ivb[MT_MCU_IVB_SIZE];
398 u8 ilm[];
399};
400
401static int __mt76x0_dma_fw(struct mt76x0_dev *dev,
402 const struct mt76x0_dma_buf *dma_buf,
403 const void *data, u32 len, u32 dst_addr)
404{
405 DECLARE_COMPLETION_ONSTACK(cmpl);
406 struct mt76x0_dma_buf buf = *dma_buf; /* we need to fake length */
407 __le32 reg;
408 u32 val;
409 int ret;
410
411 reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_COMMAND) |
412 FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
413 FIELD_PREP(MT_TXD_INFO_LEN, len));
414 memcpy(buf.buf, &reg, sizeof(reg));
415 memcpy(buf.buf + sizeof(reg), data, len);
416 memset(buf.buf + sizeof(reg) + len, 0, 8);
417
418 ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
419 MT_FCE_DMA_ADDR, dst_addr);
420 if (ret)
421 return ret;
422 len = roundup(len, 4);
423 ret = mt76x0_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
424 MT_FCE_DMA_LEN, len << 16);
425 if (ret)
426 return ret;
427
428 buf.len = MT_DMA_HDR_LEN + len + 4;
429 ret = mt76x0_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
430 &buf, GFP_KERNEL,
431 mt76x0_complete_urb, &cmpl);
432 if (ret)
433 return ret;
434
435 if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
436 dev_err(dev->mt76.dev, "Error: firmware upload timed out\n");
437 usb_kill_urb(buf.urb);
438 return -ETIMEDOUT;
439 }
440 if (mt76x0_urb_has_error(buf.urb)) {
441 dev_err(dev->mt76.dev, "Error: firmware upload urb failed:%d\n",
442 buf.urb->status);
443 return buf.urb->status;
444 }
445
446 val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
447 val++;
448 mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
449
450 msleep(5);
451
452 return 0;
453}
454
455static int
456mt76x0_dma_fw(struct mt76x0_dev *dev, struct mt76x0_dma_buf *dma_buf,
457 const void *data, int len, u32 dst_addr)
458{
459 int n, ret;
460
461 if (len == 0)
462 return 0;
463
464 n = min(MCU_FW_URB_MAX_PAYLOAD, len);
465 ret = __mt76x0_dma_fw(dev, dma_buf, data, n, dst_addr);
466 if (ret)
467 return ret;
468
469#if 0
470 if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
471 return -ETIMEDOUT;
472#endif
473
474 return mt76x0_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
475}
476
477static int
478mt76x0_upload_firmware(struct mt76x0_dev *dev, const struct mt76_fw *fw)
479{
480 struct mt76x0_dma_buf dma_buf;
481 void *ivb;
482 u32 ilm_len, dlm_len;
483 int i, ret;
484
485 ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
486 if (!ivb)
487 return -ENOMEM;
488 if (mt76x0_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
489 ret = -ENOMEM;
490 goto error;
491 }
492
493 ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
494 dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %zu\n",
495 ilm_len, sizeof(fw->ivb));
496 ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
497 if (ret)
498 goto error;
499
500 dlm_len = le32_to_cpu(fw->hdr.dlm_len);
501 dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
502 ret = mt76x0_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
503 dlm_len, MT_MCU_DLM_OFFSET);
504 if (ret)
505 goto error;
506
507 ret = mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
508 0x12, 0, ivb, sizeof(fw->ivb));
509 if (ret < 0)
510 goto error;
511 ret = 0;
512
513 for (i = 100; i && !firmware_running(dev); i--)
514 msleep(10);
515 if (!i) {
516 ret = -ETIMEDOUT;
517 goto error;
518 }
519
520 dev_dbg(dev->mt76.dev, "Firmware running!\n");
521error:
522 kfree(ivb);
523 mt76x0_usb_free_buf(dev, &dma_buf);
524
525 return ret;
526}
527
528static int mt76x0_load_firmware(struct mt76x0_dev *dev)
529{
530 const struct firmware *fw;
531 const struct mt76_fw_header *hdr;
532 int len, ret;
533 u32 val;
534
535 mt76_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
536 MT_USB_DMA_CFG_TX_BULK_EN));
537
538 if (firmware_running(dev))
539 return 0;
540
541 ret = request_firmware(&fw, MT7610_FIRMWARE, dev->mt76.dev);
542 if (ret)
543 return ret;
544
545 if (!fw || !fw->data || fw->size < sizeof(*hdr))
546 goto err_inv_fw;
547
548 hdr = (const struct mt76_fw_header *) fw->data;
549
550 if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
551 goto err_inv_fw;
552
553 len = sizeof(*hdr);
554 len += le32_to_cpu(hdr->ilm_len);
555 len += le32_to_cpu(hdr->dlm_len);
556
557 if (fw->size != len)
558 goto err_inv_fw;
559
560 val = le16_to_cpu(hdr->fw_ver);
561 dev_dbg(dev->mt76.dev,
562 "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
563 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
564 le16_to_cpu(hdr->build_ver), hdr->build_time);
565
566 len = le32_to_cpu(hdr->ilm_len);
567
568 mt76_wr(dev, 0x1004, 0x2c);
569
570 mt76_set(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
571 MT_USB_DMA_CFG_TX_BULK_EN) |
572 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
573 mt76x0_vendor_reset(dev);
574 msleep(5);
575/*
576 mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
577 MT_PBF_CFG_TX1Q_EN |
578 MT_PBF_CFG_TX2Q_EN |
579 MT_PBF_CFG_TX3Q_EN));
580*/
581
582 mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
583
584 /* FCE tx_fs_base_ptr */
585 mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
586 /* FCE tx_fs_max_cnt */
587 mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
588 /* FCE pdma enable */
589 mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
590 /* FCE skip_fs_en */
591 mt76_wr(dev, MT_FCE_SKIP_FS, 3);
592
593 val = mt76_rr(dev, MT_USB_DMA_CFG);
594 val |= MT_USB_DMA_CFG_TX_WL_DROP;
595 mt76_wr(dev, MT_USB_DMA_CFG, val);
596 val &= ~MT_USB_DMA_CFG_TX_WL_DROP;
597 mt76_wr(dev, MT_USB_DMA_CFG, val);
598
599 ret = mt76x0_upload_firmware(dev, (const struct mt76_fw *)fw->data);
600 release_firmware(fw);
601
602 mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
603
604 return ret;
605
606err_inv_fw:
607 dev_err(dev->mt76.dev, "Invalid firmware image\n");
608 release_firmware(fw);
609 return -ENOENT;
610}
611
612int mt76x0_mcu_init(struct mt76x0_dev *dev)
613{
614 int ret;
615
616 mutex_init(&dev->mcu.mutex);
617
618 ret = mt76x0_load_firmware(dev);
619 if (ret)
620 return ret;
621
622 set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
623
624 return 0;
625}
626
627int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev)
628{
629 int ret;
630
631 ret = mt76x0_mcu_function_select(dev, Q_SELECT, 1);
632 if (ret)
633 return ret;
634
635 init_completion(&dev->mcu.resp_cmpl);
636 if (mt76x0_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
637 mt76x0_usb_free_buf(dev, &dev->mcu.resp);
638 return -ENOMEM;
639 }
640
641 ret = mt76x0_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
642 &dev->mcu.resp, GFP_KERNEL,
643 mt76x0_complete_urb, &dev->mcu.resp_cmpl);
644 if (ret) {
645 mt76x0_usb_free_buf(dev, &dev->mcu.resp);
646 return ret;
647 }
648
649 return 0;
650}
651
652void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev)
653{
654 usb_kill_urb(dev->mcu.resp.urb);
655 mt76x0_usb_free_buf(dev, &dev->mcu.resp);
656}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
new file mode 100644
index 000000000000..8c2f77f4c3f5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __MT76X0U_MCU_H
16#define __MT76X0U_MCU_H
17
18struct mt76x0_dev;
19
20/* Register definitions */
21#define MT_MCU_RESET_CTL 0x070C
22#define MT_MCU_INT_LEVEL 0x0718
23#define MT_MCU_COM_REG0 0x0730
24#define MT_MCU_COM_REG1 0x0734
25#define MT_MCU_COM_REG2 0x0738
26#define MT_MCU_COM_REG3 0x073C
27
28#define MT_MCU_IVB_SIZE 0x40
29#define MT_MCU_DLM_OFFSET 0x80000
30
31#define MT_MCU_MEMMAP_WLAN 0x00410000
32/* We use same space for BBP as for MAC regs
33 * #define MT_MCU_MEMMAP_BBP 0x40000000
34 */
35#define MT_MCU_MEMMAP_RF 0x80000000
36
37#define INBAND_PACKET_MAX_LEN 192
38
39enum mcu_cmd {
40 CMD_FUN_SET_OP = 1,
41 CMD_LOAD_CR = 2,
42 CMD_INIT_GAIN_OP = 3,
43 CMD_DYNC_VGA_OP = 6,
44 CMD_TDLS_CH_SW = 7,
45 CMD_BURST_WRITE = 8,
46 CMD_READ_MODIFY_WRITE = 9,
47 CMD_RANDOM_READ = 10,
48 CMD_BURST_READ = 11,
49 CMD_RANDOM_WRITE = 12,
50 CMD_LED_MODE_OP = 16,
51 CMD_POWER_SAVING_OP = 20,
52 CMD_WOW_CONFIG = 21,
53 CMD_WOW_QUERY = 22,
54 CMD_WOW_FEATURE = 24,
55 CMD_CARRIER_DETECT_OP = 28,
56 CMD_RADOR_DETECT_OP = 29,
57 CMD_SWITCH_CHANNEL_OP = 30,
58 CMD_CALIBRATION_OP = 31,
59 CMD_BEACON_OP = 32,
60 CMD_ANTENNA_OP = 33,
61};
62
63enum mcu_function {
64 Q_SELECT = 1,
65 BW_SETTING = 2,
66 ATOMIC_TSSI_SETTING = 5,
67};
68
69enum mcu_power_mode {
70 RADIO_OFF = 0x30,
71 RADIO_ON = 0x31,
72 RADIO_OFF_AUTO_WAKEUP = 0x32,
73 RADIO_OFF_ADVANCE = 0x33,
74 RADIO_ON_ADVANCE = 0x34,
75};
76
77enum mcu_calibrate {
78 MCU_CAL_R = 1,
79 MCU_CAL_RXDCOC,
80 MCU_CAL_LC,
81 MCU_CAL_LOFT,
82 MCU_CAL_TXIQ,
83 MCU_CAL_BW,
84 MCU_CAL_DPD,
85 MCU_CAL_RXIQ,
86 MCU_CAL_TXDCOC,
87 MCU_CAL_RX_GROUP_DELAY,
88 MCU_CAL_TX_GROUP_DELAY,
89};
90
91int mt76x0_mcu_init(struct mt76x0_dev *dev);
92int mt76x0_mcu_cmd_init(struct mt76x0_dev *dev);
93void mt76x0_mcu_cmd_deinit(struct mt76x0_dev *dev);
94
95int
96mt76x0_mcu_calibrate(struct mt76x0_dev *dev, enum mcu_calibrate cal, u32 val);
97
98int
99mt76x0_mcu_function_select(struct mt76x0_dev *dev, enum mcu_function func, u32 val);
100
101#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
new file mode 100644
index 000000000000..fc9857f61771
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -0,0 +1,330 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef MT76X0U_H
17#define MT76X0U_H
18
19#include <linux/bitfield.h>
20#include <linux/kernel.h>
21#include <linux/device.h>
22#include <linux/mutex.h>
23#include <linux/usb.h>
24#include <linux/completion.h>
25#include <net/mac80211.h>
26#include <linux/debugfs.h>
27
28#include "../mt76.h"
29#include "regs.h"
30
31#define MT_CALIBRATE_INTERVAL (4 * HZ)
32
33#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
34#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
35#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
36
37#define MT_BBP_REG_VERSION 0x00
38
39#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
40#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
41#define MT_RX_ORDER 3
42#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
43
44struct mt76x0_dma_buf {
45 struct urb *urb;
46 void *buf;
47 dma_addr_t dma;
48 size_t len;
49};
50
51struct mt76x0_mcu {
52 struct mutex mutex;
53
54 u8 msg_seq;
55
56 struct mt76x0_dma_buf resp;
57 struct completion resp_cmpl;
58
59 struct mt76_reg_pair *reg_pairs;
60 unsigned int reg_pairs_len;
61 u32 reg_base;
62 bool burst_read;
63};
64
65struct mac_stats {
66 u64 rx_stat[6];
67 u64 tx_stat[6];
68 u64 aggr_stat[2];
69 u64 aggr_n[32];
70 u64 zero_len_del[2];
71};
72
73#define N_RX_ENTRIES 16
74struct mt76x0_rx_queue {
75 struct mt76x0_dev *dev;
76
77 struct mt76x0_dma_buf_rx {
78 struct urb *urb;
79 struct page *p;
80 } e[N_RX_ENTRIES];
81
82 unsigned int start;
83 unsigned int end;
84 unsigned int entries;
85 unsigned int pending;
86};
87
88#define N_TX_ENTRIES 64
89
90struct mt76x0_tx_queue {
91 struct mt76x0_dev *dev;
92
93 struct mt76x0_dma_buf_tx {
94 struct urb *urb;
95 struct sk_buff *skb;
96 } e[N_TX_ENTRIES];
97
98 unsigned int start;
99 unsigned int end;
100 unsigned int entries;
101 unsigned int used;
102 unsigned int fifo_seq;
103};
104
105/* WCID allocation:
106 * 0: mcast wcid
107 * 1: bssid wcid
108 * 1...: STAs
109 * ...7e: group wcids
110 * 7f: reserved
111 */
112#define N_WCIDS 128
113#define GROUP_WCID(idx) (254 - idx)
114
115struct mt76x0_eeprom_params;
116
117#define MT_EE_TEMPERATURE_SLOPE 39
118#define MT_FREQ_OFFSET_INVALID -128
119
120/* addr req mask */
121#define MT_VEND_TYPE_EEPROM BIT(31)
122#define MT_VEND_TYPE_CFG BIT(30)
123#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
124
125#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
126
127enum mt_bw {
128 MT_BW_20,
129 MT_BW_40,
130};
131
132/**
133 * struct mt76x0_dev - adapter structure
134 * @lock: protects @wcid->tx_rate.
135 * @mac_lock: locks out mac80211's tx status and rx paths.
136 * @tx_lock: protects @tx_q and changes of MT76_STATE_*_STATS
137 * flags in @state.
138 * @rx_lock: protects @rx_q.
139 * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
140 * @mutex: ensures exclusive access from mac80211 callbacks.
141 * @reg_atomic_mutex: ensures atomicity of indirect register accesses
142 * (accesses to RF and BBP).
143 * @hw_atomic_mutex: ensures exclusive access to HW during critical
144 * operations (power management, channel switch).
145 */
146struct mt76x0_dev {
147 struct mt76_dev mt76; /* must be first */
148
149 struct mutex mutex;
150
151 struct mutex usb_ctrl_mtx;
152 u8 data[32];
153
154 struct tasklet_struct rx_tasklet;
155 struct tasklet_struct tx_tasklet;
156
157 u8 out_ep[__MT_EP_OUT_MAX];
158 u16 out_max_packet;
159 u8 in_ep[__MT_EP_IN_MAX];
160 u16 in_max_packet;
161
162 unsigned long wcid_mask[DIV_ROUND_UP(N_WCIDS, BITS_PER_LONG)];
163 unsigned long vif_mask;
164
165 struct mt76x0_mcu mcu;
166
167 struct delayed_work cal_work;
168 struct delayed_work mac_work;
169
170 struct workqueue_struct *stat_wq;
171 struct delayed_work stat_work;
172
173 struct mt76_wcid *mon_wcid;
174 struct mt76_wcid __rcu *wcid[N_WCIDS];
175
176 spinlock_t mac_lock;
177
178 const u16 *beacon_offsets;
179
180 u8 macaddr[ETH_ALEN];
181 struct mt76x0_eeprom_params *ee;
182
183 struct mutex reg_atomic_mutex;
184 struct mutex hw_atomic_mutex;
185
186 u32 rxfilter;
187 u32 debugfs_reg;
188
189 /* TX */
190 spinlock_t tx_lock;
191 struct mt76x0_tx_queue *tx_q;
192 struct sk_buff_head tx_skb_done;
193
194 atomic_t avg_ampdu_len;
195
196 /* RX */
197 spinlock_t rx_lock;
198 struct mt76x0_rx_queue rx_q;
199
200 /* Connection monitoring things */
201 spinlock_t con_mon_lock;
202 u8 ap_bssid[ETH_ALEN];
203
204 s8 bcn_freq_off;
205 u8 bcn_phy_mode;
206
207 int avg_rssi; /* starts at 0 and converges */
208
209 u8 agc_save;
210 u16 chainmask;
211
212 struct mac_stats stats;
213};
214
215struct mt76x0_wcid {
216 u8 idx;
217 u8 hw_key_idx;
218
219 u16 tx_rate;
220 bool tx_rate_set;
221 u8 tx_rate_nss;
222};
223
224struct mt76_vif {
225 u8 idx;
226
227 struct mt76_wcid group_wcid;
228};
229
230struct mt76_tx_status {
231 u8 valid:1;
232 u8 success:1;
233 u8 aggr:1;
234 u8 ack_req:1;
235 u8 is_probe:1;
236 u8 wcid;
237 u8 pktid;
238 u8 retry;
239 u16 rate;
240} __packed __aligned(2);
241
242struct mt76_sta {
243 struct mt76_wcid wcid;
244 struct mt76_tx_status status;
245 int n_frames;
246 u16 agg_ssn[IEEE80211_NUM_TIDS];
247};
248
249struct mt76_reg_pair {
250 u32 reg;
251 u32 value;
252};
253
254struct mt76x0_rxwi;
255
256extern const struct ieee80211_ops mt76x0_ops;
257
258static inline bool is_mt7610e(struct mt76x0_dev *dev)
259{
260 /* TODO */
261 return false;
262}
263
264void mt76x0_init_debugfs(struct mt76x0_dev *dev);
265
266int mt76x0_wait_asic_ready(struct mt76x0_dev *dev);
267
268/* Compatibility with mt76 */
269#define mt76_rmw_field(_dev, _reg, _field, _val) \
270 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
271
272int mt76x0_write_reg_pairs(struct mt76x0_dev *dev, u32 base,
273 const struct mt76_reg_pair *data, int len);
274int mt76x0_read_reg_pairs(struct mt76x0_dev *dev, u32 base,
275 struct mt76_reg_pair *data, int len);
276int mt76x0_burst_write_regs(struct mt76x0_dev *dev, u32 offset,
277 const u32 *data, int n);
278void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr);
279
280/* Init */
281struct mt76x0_dev *mt76x0_alloc_device(struct device *dev);
282int mt76x0_init_hardware(struct mt76x0_dev *dev);
283int mt76x0_register_device(struct mt76x0_dev *dev);
284void mt76x0_cleanup(struct mt76x0_dev *dev);
285void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
286
287int mt76x0_mac_start(struct mt76x0_dev *dev);
288void mt76x0_mac_stop(struct mt76x0_dev *dev);
289
290/* PHY */
291void mt76x0_phy_init(struct mt76x0_dev *dev);
292int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
293void mt76x0_agc_save(struct mt76x0_dev *dev);
294void mt76x0_agc_restore(struct mt76x0_dev *dev);
295int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
296 struct cfg80211_chan_def *chandef);
297void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
298int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi);
299void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
300 struct ieee80211_bss_conf *info);
301
302/* MAC */
303void mt76x0_mac_work(struct work_struct *work);
304void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
305 int ht_mode);
306void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
307void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
308void
309mt76x0_mac_wcid_setup(struct mt76x0_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
310void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
311
312/* TX */
313void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
314 struct sk_buff *skb);
315int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
316 u16 queue, const struct ieee80211_tx_queue_params *params);
317void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb);
318void mt76x0_tx_stat(struct work_struct *work);
319
320/* util */
321void mt76x0_remove_hdr_pad(struct sk_buff *skb);
322int mt76x0_insert_hdr_pad(struct sk_buff *skb);
323
324int mt76x0_dma_init(struct mt76x0_dev *dev);
325void mt76x0_dma_cleanup(struct mt76x0_dev *dev);
326
327int mt76x0_dma_enqueue_tx(struct mt76x0_dev *dev, struct sk_buff *skb,
328 struct mt76_wcid *wcid, int hw_q);
329
330#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
new file mode 100644
index 000000000000..5da7bfbe907f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -0,0 +1,1008 @@
1/*
2 * (c) Copyright 2002-2010, Ralink Technology, Inc.
3 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
4 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
5 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include "mt76x0.h"
18#include "mcu.h"
19#include "eeprom.h"
20#include "trace.h"
21#include "phy.h"
22#include "initvals.h"
23#include "initvals_phy.h"
24
25#include <linux/etherdevice.h>
26
27static int
28mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
29{
30 int ret = 0;
31 u8 bank, reg;
32
33 if (test_bit(MT76_REMOVED, &dev->mt76.state))
34 return -ENODEV;
35
36 bank = MT_RF_BANK(offset);
37 reg = MT_RF_REG(offset);
38
39 if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
40 return -EINVAL;
41
42 mutex_lock(&dev->reg_atomic_mutex);
43
44 if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
45 ret = -ETIMEDOUT;
46 goto out;
47 }
48
49 mt76_wr(dev, MT_RF_CSR_CFG,
50 FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
51 FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
52 FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
53 MT_RF_CSR_CFG_WR |
54 MT_RF_CSR_CFG_KICK);
55 trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
56out:
57 mutex_unlock(&dev->reg_atomic_mutex);
58
59 if (ret < 0)
60 dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
61 bank, reg, ret);
62
63 return ret;
64}
65
66static int
67mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
68{
69 int ret = -ETIMEDOUT;
70 u32 val;
71 u8 bank, reg;
72
73 if (test_bit(MT76_REMOVED, &dev->mt76.state))
74 return -ENODEV;
75
76 bank = MT_RF_BANK(offset);
77 reg = MT_RF_REG(offset);
78
79 if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
80 return -EINVAL;
81
82 mutex_lock(&dev->reg_atomic_mutex);
83
84 if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
85 goto out;
86
87 mt76_wr(dev, MT_RF_CSR_CFG,
88 FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
89 FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
90 MT_RF_CSR_CFG_KICK);
91
92 if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
93 goto out;
94
95 val = mt76_rr(dev, MT_RF_CSR_CFG);
96 if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
97 FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
98 ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
99 trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
100 }
101out:
102 mutex_unlock(&dev->reg_atomic_mutex);
103
104 if (ret < 0)
105 dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
106 bank, reg, ret);
107
108 return ret;
109}
110
111static int
112rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
113{
114 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
115 struct mt76_reg_pair pair = {
116 .reg = offset,
117 .value = val,
118 };
119
120 return mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
121 } else {
122 WARN_ON_ONCE(1);
123 return mt76x0_rf_csr_wr(dev, offset, val);
124 }
125}
126
127static int
128rf_rr(struct mt76x0_dev *dev, u32 offset)
129{
130 int ret;
131 u32 val;
132
133 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
134 struct mt76_reg_pair pair = {
135 .reg = offset,
136 };
137
138 ret = mt76x0_read_reg_pairs(dev, MT_MCU_MEMMAP_RF, &pair, 1);
139 val = pair.value;
140 } else {
141 WARN_ON_ONCE(1);
142 ret = val = mt76x0_rf_csr_rr(dev, offset);
143 }
144
145 return (ret < 0) ? ret : val;
146}
147
148static int
149rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
150{
151 int ret;
152
153 ret = rf_rr(dev, offset);
154 if (ret < 0)
155 return ret;
156 val |= ret & ~mask;
157 ret = rf_wr(dev, offset, val);
158 if (ret)
159 return ret;
160
161 return val;
162}
163
164static int
165rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
166{
167 return rf_rmw(dev, offset, 0, val);
168}
169
170#if 0
171static int
172rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
173{
174 return rf_rmw(dev, offset, mask, 0);
175}
176#endif
177
178#define RF_RANDOM_WRITE(dev, tab) \
179 mt76x0_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));
180
181int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
182{
183 int i = 20;
184 u32 val;
185
186 do {
187 val = mt76_rr(dev, MT_BBP(CORE, 0));
188 printk("BBP version %08x\n", val);
189 if (val && ~val)
190 break;
191 } while (--i);
192
193 if (!i) {
194 dev_err(dev->mt76.dev, "Error: BBP is not ready\n");
195 return -EIO;
196 }
197
198 return 0;
199}
200
201static void
202mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
203 u8 ctrl)
204{
205 int core_val, agc_val;
206
207 switch (width) {
208 case NL80211_CHAN_WIDTH_80:
209 core_val = 3;
210 agc_val = 7;
211 break;
212 case NL80211_CHAN_WIDTH_40:
213 core_val = 2;
214 agc_val = 3;
215 break;
216 default:
217 core_val = 0;
218 agc_val = 1;
219 break;
220 }
221
222 mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
223 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
224 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
225 mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
226}
227
228int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x0_rxwi *rxwi)
229{
230 s8 lna_gain, rssi_offset;
231 int val;
232
233 if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
234 lna_gain = dev->ee->lna_gain_2ghz;
235 rssi_offset = dev->ee->rssi_offset_2ghz[0];
236 } else {
237 lna_gain = dev->ee->lna_gain_5ghz[0];
238 rssi_offset = dev->ee->rssi_offset_5ghz[0];
239 }
240
241 val = rxwi->rssi[0] + rssi_offset - lna_gain;
242
243 return val;
244}
245
246static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
247{
248 u8 val;
249
250 val = rf_rr(dev, MT_RF(0, 4));
251 if ((val & 0x70) != 0x30)
252 return;
253
254 /*
255 * Calibration Mode - Open loop, closed loop, and amplitude:
256 * B0.R06.[0]: 1
257 * B0.R06.[3:1] bp_close_code: 100
258 * B0.R05.[7:0] bp_open_code: 0x0
259 * B0.R04.[2:0] cal_bits: 000
260 * B0.R03.[2:0] startup_time: 011
261 * B0.R03.[6:4] settle_time:
262 * 80MHz channel: 110
263 * 40MHz channel: 101
264 * 20MHz channel: 100
265 */
266 val = rf_rr(dev, MT_RF(0, 6));
267 val &= ~0xf;
268 val |= 0x09;
269 rf_wr(dev, MT_RF(0, 6), val);
270
271 val = rf_rr(dev, MT_RF(0, 5));
272 if (val != 0)
273 rf_wr(dev, MT_RF(0, 5), 0x0);
274
275 val = rf_rr(dev, MT_RF(0, 4));
276 val &= ~0x07;
277 rf_wr(dev, MT_RF(0, 4), val);
278
279 val = rf_rr(dev, MT_RF(0, 3));
280 val &= ~0x77;
281 if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
282 val |= 0x63;
283 } else if (channel == 3 || channel == 4 || channel == 10) {
284 val |= 0x53;
285 } else if (channel == 2 || channel == 5 || channel == 6 ||
286 channel == 8 || channel == 11 || channel == 12) {
287 val |= 0x43;
288 } else {
289 WARN(1, "Unknown channel %u\n", channel);
290 return;
291 }
292 rf_wr(dev, MT_RF(0, 3), val);
293
294 /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
295 val = rf_rr(dev, MT_RF(0, 4));
296 val = ((val & ~(0x80)) | 0x80);
297 rf_wr(dev, MT_RF(0, 4), val);
298
299 msleep(2);
300}
301
302static void
303mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
304{
305 mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
306 primary_upper);
307}
308
309static void
310mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
311{
312 switch (band) {
313 case NL80211_BAND_2GHZ:
314 RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
315
316 rf_wr(dev, MT_RF(5, 0), 0x45);
317 rf_wr(dev, MT_RF(6, 0), 0x44);
318
319 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
320 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
321
322 mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
323 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
324 break;
325 case NL80211_BAND_5GHZ:
326 RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
327
328 rf_wr(dev, MT_RF(5, 0), 0x44);
329 rf_wr(dev, MT_RF(6, 0), 0x45);
330
331 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
332 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
333
334 mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
335 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
336 break;
337 default:
338 break;
339 }
340}
341
342#define EXT_PA_2G_5G 0x0
343#define EXT_PA_5G_ONLY 0x1
344#define EXT_PA_2G_ONLY 0x2
345#define INT_PA_2G_5G 0x3
346
347static void
348mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
349{
350 u16 rf_band = rf_bw_band & 0xff00;
351 u16 rf_bw = rf_bw_band & 0x00ff;
352 u32 mac_reg;
353 u8 rf_val;
354 int i;
355 bool bSDM = false;
356 const struct mt76x0_freq_item *freq_item;
357
358 for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
359 if (channel == mt76x0_sdm_channel[i]) {
360 bSDM = true;
361 break;
362 }
363 }
364
365 for (i = 0; i < ARRAY_SIZE(mt76x0_frequency_plan); i++) {
366 if (channel == mt76x0_frequency_plan[i].channel) {
367 rf_band = mt76x0_frequency_plan[i].band;
368
369 if (bSDM)
370 freq_item = &(mt76x0_sdm_frequency_plan[i]);
371 else
372 freq_item = &(mt76x0_frequency_plan[i]);
373
374 rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
375 rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
376 rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
377 rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
378 rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
379
380 rf_val = rf_rr(dev, MT_RF(0, 32));
381 rf_val &= ~0xE0;
382 rf_val |= freq_item->pllR32_b7b5;
383 rf_wr(dev, MT_RF(0, 32), rf_val);
384
385 /* R32<4:0> pll_den: (Denomina - 8) */
386 rf_val = rf_rr(dev, MT_RF(0, 32));
387 rf_val &= ~0x1F;
388 rf_val |= freq_item->pllR32_b4b0;
389 rf_wr(dev, MT_RF(0, 32), rf_val);
390
391 /* R31<7:5> */
392 rf_val = rf_rr(dev, MT_RF(0, 31));
393 rf_val &= ~0xE0;
394 rf_val |= freq_item->pllR31_b7b5;
395 rf_wr(dev, MT_RF(0, 31), rf_val);
396
397 /* R31<4:0> pll_k(Nominator) */
398 rf_val = rf_rr(dev, MT_RF(0, 31));
399 rf_val &= ~0x1F;
400 rf_val |= freq_item->pllR31_b4b0;
401 rf_wr(dev, MT_RF(0, 31), rf_val);
402
403 /* R30<7> sdm_reset_n */
404 rf_val = rf_rr(dev, MT_RF(0, 30));
405 rf_val &= ~0x80;
406 if (bSDM) {
407 rf_wr(dev, MT_RF(0, 30), rf_val);
408 rf_val |= 0x80;
409 rf_wr(dev, MT_RF(0, 30), rf_val);
410 } else {
411 rf_val |= freq_item->pllR30_b7;
412 rf_wr(dev, MT_RF(0, 30), rf_val);
413 }
414
415 /* R30<6:2> sdmmash_prbs,sin */
416 rf_val = rf_rr(dev, MT_RF(0, 30));
417 rf_val &= ~0x7C;
418 rf_val |= freq_item->pllR30_b6b2;
419 rf_wr(dev, MT_RF(0, 30), rf_val);
420
421 /* R30<1> sdm_bp */
422 rf_val = rf_rr(dev, MT_RF(0, 30));
423 rf_val &= ~0x02;
424 rf_val |= (freq_item->pllR30_b1 << 1);
425 rf_wr(dev, MT_RF(0, 30), rf_val);
426
427 /* R30<0> R29<7:0> (hex) pll_n */
428 rf_val = freq_item->pll_n & 0x00FF;
429 rf_wr(dev, MT_RF(0, 29), rf_val);
430
431 rf_val = rf_rr(dev, MT_RF(0, 30));
432 rf_val &= ~0x1;
433 rf_val |= ((freq_item->pll_n >> 8) & 0x0001);
434 rf_wr(dev, MT_RF(0, 30), rf_val);
435
436 /* R28<7:6> isi_iso */
437 rf_val = rf_rr(dev, MT_RF(0, 28));
438 rf_val &= ~0xC0;
439 rf_val |= freq_item->pllR28_b7b6;
440 rf_wr(dev, MT_RF(0, 28), rf_val);
441
442 /* R28<5:4> pfd_dly */
443 rf_val = rf_rr(dev, MT_RF(0, 28));
444 rf_val &= ~0x30;
445 rf_val |= freq_item->pllR28_b5b4;
446 rf_wr(dev, MT_RF(0, 28), rf_val);
447
448 /* R28<3:2> clksel option */
449 rf_val = rf_rr(dev, MT_RF(0, 28));
450 rf_val &= ~0x0C;
451 rf_val |= freq_item->pllR28_b3b2;
452 rf_wr(dev, MT_RF(0, 28), rf_val);
453
454 /* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
455 rf_val = freq_item->pll_sdm_k & 0x000000FF;
456 rf_wr(dev, MT_RF(0, 26), rf_val);
457
458 rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF);
459 rf_wr(dev, MT_RF(0, 27), rf_val);
460
461 rf_val = rf_rr(dev, MT_RF(0, 28));
462 rf_val &= ~0x3;
463 rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003);
464 rf_wr(dev, MT_RF(0, 28), rf_val);
465
466 /* R24<1:0> xo_div */
467 rf_val = rf_rr(dev, MT_RF(0, 24));
468 rf_val &= ~0x3;
469 rf_val |= freq_item->pllR24_b1b0;
470 rf_wr(dev, MT_RF(0, 24), rf_val);
471
472 break;
473 }
474 }
475
476 for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
477 if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
478 rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
479 mt76x0_rf_bw_switch_tab[i].value);
480 } else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
481 (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
482 rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
483 mt76x0_rf_bw_switch_tab[i].value);
484 }
485 }
486
487 for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
488 if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
489 rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg,
490 mt76x0_rf_band_switch_tab[i].value);
491 }
492 }
493
494 mac_reg = mt76_rr(dev, MT_RF_MISC);
495 mac_reg &= ~0xC; /* Clear 0x518[3:2] */
496 mt76_wr(dev, MT_RF_MISC, mac_reg);
497
498 if (dev->ee->pa_type == INT_PA_2G_5G ||
499 (dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
500 (dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
501 ; /* Internal PA - nothing to do. */
502 } else {
503 /*
504 MT_RF_MISC (offset: 0x0518)
505 [2]1'b1: enable external A band PA, 1'b0: disable external A band PA
506 [3]1'b1: enable external G band PA, 1'b0: disable external G band PA
507 */
508 if (rf_band & RF_A_BAND) {
509 mac_reg = mt76_rr(dev, MT_RF_MISC);
510 mac_reg |= 0x4;
511 mt76_wr(dev, MT_RF_MISC, mac_reg);
512 } else {
513 mac_reg = mt76_rr(dev, MT_RF_MISC);
514 mac_reg |= 0x8;
515 mt76_wr(dev, MT_RF_MISC, mac_reg);
516 }
517
518 /* External PA */
519 for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
520 if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
521 rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
522 mt76x0_rf_ext_pa_tab[i].value);
523 }
524
525 if (rf_band & RF_G_BAND) {
526 mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x63707400);
527 /* Set Atten mode = 2 For G band, Disable Tx Inc dcoc. */
528 mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
529 mac_reg &= 0x896400FF;
530 mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
531 } else {
532 mt76_wr(dev, MT_TX0_RF_GAIN_ATTEN, 0x686A7800);
533 /* Set Atten mode = 0 For Ext A band, Disable Tx Inc dcoc Cal. */
534 mac_reg = mt76_rr(dev, MT_TX_ALC_CFG_1);
535 mac_reg &= 0x890400FF;
536 mt76_wr(dev, MT_TX_ALC_CFG_1, mac_reg);
537 }
538}
539
540static void
541mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
542{
543 int i;
544
545 for (i = 0; i < ARRAY_SIZE(mt76x0_bbp_switch_tab); i++) {
546 const struct mt76x0_bbp_switch_item *item = &mt76x0_bbp_switch_tab[i];
547 const struct mt76_reg_pair *pair = &item->reg_pair;
548
549 if ((rf_bw_band & item->bw_band) != rf_bw_band)
550 continue;
551
552 if (pair->reg == MT_BBP(AGC, 8)) {
553 u32 val = pair->value;
554 u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
555
556 if (channel > 14) {
557 if (channel < 100)
558 gain -= dev->ee->lna_gain_5ghz[0]*2;
559 else if (channel < 137)
560 gain -= dev->ee->lna_gain_5ghz[1]*2;
561 else
562 gain -= dev->ee->lna_gain_5ghz[2]*2;
563
564 } else {
565 gain -= dev->ee->lna_gain_2ghz*2;
566 }
567
568 val &= ~MT_BBP_AGC_GAIN;
569 val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
570 mt76_wr(dev, pair->reg, val);
571 } else {
572 mt76_wr(dev, pair->reg, pair->value);
573 }
574 }
575}
576
577#if 0
578static void
579mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
580{
581 u32 val;
582
583 val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
584 val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
585 mt76_wr(dev, MT_TX_PWR_CFG_7, val);
586
587 /* TODO: fix VHT */
588 val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
589 mt76_wr(dev, MT_TX_PWR_CFG_8, val);
590
591 val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
592 mt76_wr(dev, MT_TX_PWR_CFG_9, val);
593}
594
595static void
596mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
597{
598 u32 val;
599 int i;
600 int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
601
602 for (i = 0; i < 4; i++) {
603 if (channel <= 14)
604 val = dev->ee->tx_pwr_cfg_2g[i][bw];
605 else
606 val = dev->ee->tx_pwr_cfg_5g[i][bw];
607
608 mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
609 }
610
611 mt76x0_extra_power_over_mac(dev);
612}
613#endif
614
615static void
616mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
617{
618 enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
619 int bw;
620
621 switch (width) {
622 default:
623 case NL80211_CHAN_WIDTH_20_NOHT:
624 case NL80211_CHAN_WIDTH_20:
625 bw = BW_20;
626 break;
627 case NL80211_CHAN_WIDTH_40:
628 bw = BW_40;
629 break;
630 case NL80211_CHAN_WIDTH_80:
631 bw = BW_80;
632 break;
633 case NL80211_CHAN_WIDTH_10:
634 bw = BW_10;
635 break;
636 case NL80211_CHAN_WIDTH_80P80:
637 case NL80211_CHAN_WIDTH_160:
638 case NL80211_CHAN_WIDTH_5:
639 /* TODO error */
640 return ;
641 }
642
643 mt76x0_mcu_function_select(dev, BW_SETTING, bw);
644}
645
646static void
647mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
648{
649 static const int mt76x0_tx_pwr_ch_list[] = {
650 1,2,3,4,5,6,7,8,9,10,11,12,13,14,
651 36,38,40,44,46,48,52,54,56,60,62,64,
652 100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
653 149,151,153,157,159,161,165,167,169,171,173,
654 42,58,106,122,155
655 };
656 int i;
657 u32 val;
658
659 for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
660 if (mt76x0_tx_pwr_ch_list[i] == channel)
661 break;
662
663 if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
664 return;
665
666 val = mt76_rr(dev, MT_TX_ALC_CFG_0);
667 val &= ~0x3f3f;
668 val |= dev->ee->tx_pwr_per_chan[i];
669 val |= 0x2f2f << 16;
670 mt76_wr(dev, MT_TX_ALC_CFG_0, val);
671}
672
673static int
674__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
675 struct cfg80211_chan_def *chandef)
676{
677 u32 ext_cca_chan[4] = {
678 [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
679 FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
680 FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
681 FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
682 FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
683 [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
684 FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
685 FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
686 FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
687 FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
688 [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
689 FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
690 FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
691 FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
692 FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
693 [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
694 FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
695 FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
696 FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
697 FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
698 };
699 bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
700 int ch_group_index, freq, freq1;
701 u8 channel;
702 u32 val;
703 u16 rf_bw_band;
704
705 freq = chandef->chan->center_freq;
706 freq1 = chandef->center_freq1;
707 channel = chandef->chan->hw_value;
708 rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
709
710 switch (chandef->width) {
711 case NL80211_CHAN_WIDTH_40:
712 if (freq1 > freq)
713 ch_group_index = 0;
714 else
715 ch_group_index = 1;
716 channel += 2 - ch_group_index * 4;
717 rf_bw_band |= RF_BW_40;
718 break;
719 case NL80211_CHAN_WIDTH_80:
720 ch_group_index = (freq - freq1 + 30) / 20;
721 if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
722 ch_group_index = 0;
723 channel += 6 - ch_group_index * 4;
724 rf_bw_band |= RF_BW_80;
725 break;
726 default:
727 ch_group_index = 0;
728 rf_bw_band |= RF_BW_20;
729 break;
730 }
731
732 mt76x0_bbp_set_bw(dev, chandef->width);
733 mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
734 mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
735
736 mt76_rmw(dev, MT_EXT_CCA_CFG,
737 (MT_EXT_CCA_CFG_CCA0 |
738 MT_EXT_CCA_CFG_CCA1 |
739 MT_EXT_CCA_CFG_CCA2 |
740 MT_EXT_CCA_CFG_CCA3 |
741 MT_EXT_CCA_CFG_CCA_MASK),
742 ext_cca_chan[ch_group_index]);
743
744 mt76x0_phy_set_band(dev, chandef->chan->band);
745 mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
746
747 /* set Japan Tx filter at channel 14 */
748 val = mt76_rr(dev, MT_BBP(CORE, 1));
749 if (channel == 14)
750 val |= 0x20;
751 else
752 val &= ~0x20;
753 mt76_wr(dev, MT_BBP(CORE, 1), val);
754
755 mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
756
757 /* Vendor driver don't do it */
758 /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
759
760 if (scan)
761 mt76x0_vco_cal(dev, channel);
762
763 mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
764 mt76x0_phy_set_chan_pwr(dev, channel);
765
766 dev->mt76.chandef = *chandef;
767 return 0;
768}
769
770int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
771 struct cfg80211_chan_def *chandef)
772{
773 int ret;
774
775 mutex_lock(&dev->hw_atomic_mutex);
776 ret = __mt76x0_phy_set_channel(dev, chandef);
777 mutex_unlock(&dev->hw_atomic_mutex);
778
779 return ret;
780}
781
782void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
783{
784 u32 tx_alc, reg_val;
785 u8 channel = dev->mt76.chandef.chan->hw_value;
786 int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
787
788 mt76x0_mcu_calibrate(dev, MCU_CAL_R, 0);
789
790 mt76x0_vco_cal(dev, channel);
791
792 tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
793 mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
794 usleep_range(500, 700);
795
796 reg_val = mt76_rr(dev, 0x2124);
797 reg_val &= 0xffffff7e;
798 mt76_wr(dev, 0x2124, reg_val);
799
800 mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0);
801
802 mt76x0_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
803 mt76x0_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz);
804 mt76x0_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
805 mt76x0_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz);
806 mt76x0_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz);
807 mt76x0_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz);
808
809 mt76_wr(dev, 0x2124, reg_val);
810 mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
811 msleep(100);
812
813 mt76x0_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
814}
815
816void mt76x0_agc_save(struct mt76x0_dev *dev)
817{
818 /* Only one RX path */
819 dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
820}
821
822void mt76x0_agc_restore(struct mt76x0_dev *dev)
823{
824 mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
825}
826
827static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
828{
829 u8 rf_b7_73, rf_b0_66, rf_b0_67;
830 int cycle, temp;
831 u32 val;
832 s32 sval;
833
834 rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
835 rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
836 rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
837
838 rf_wr(dev, MT_RF(7, 73), 0x02);
839 rf_wr(dev, MT_RF(0, 66), 0x23);
840 rf_wr(dev, MT_RF(0, 73), 0x01);
841
842 mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
843
844 for (cycle = 0; cycle < 2000; cycle++) {
845 val = mt76_rr(dev, MT_BBP(CORE, 34));
846 if (!(val & 0x10))
847 break;
848 udelay(3);
849 }
850
851 if (cycle >= 2000) {
852 val &= 0x10;
853 mt76_wr(dev, MT_BBP(CORE, 34), val);
854 goto done;
855 }
856
857 sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
858 if (!(sval & 0x80))
859 sval &= 0x7f; /* Positive */
860 else
861 sval |= 0xffffff00; /* Negative */
862
863 temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
864
865done:
866 rf_wr(dev, MT_RF(7, 73), rf_b7_73);
867 rf_wr(dev, MT_RF(0, 66), rf_b0_66);
868 rf_wr(dev, MT_RF(0, 73), rf_b0_67);
869}
870
871static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
872{
873 u32 val, init_vga;
874
875 init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
876 if (dev->avg_rssi > -60)
877 init_vga -= 0x20;
878 else if (dev->avg_rssi > -70)
879 init_vga -= 0x10;
880
881 val = mt76_rr(dev, MT_BBP(AGC, 8));
882 val &= 0xFFFF80FF;
883 val |= init_vga << 8;
884 mt76_wr(dev, MT_BBP(AGC,8), val);
885}
886
887static void mt76x0_phy_calibrate(struct work_struct *work)
888{
889 struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
890 cal_work.work);
891
892 mt76x0_dynamic_vga_tuning(dev);
893 mt76x0_temp_sensor(dev);
894
895 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
896 MT_CALIBRATE_INTERVAL);
897}
898
899void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
900 struct ieee80211_bss_conf *info)
901{
902 /* Start/stop collecting beacon data */
903 spin_lock_bh(&dev->con_mon_lock);
904 ether_addr_copy(dev->ap_bssid, info->bssid);
905 dev->avg_rssi = 0;
906 dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
907 spin_unlock_bh(&dev->con_mon_lock);
908}
909
910static void
911mt76x0_set_rx_chains(struct mt76x0_dev *dev)
912{
913 u32 val;
914
915 val = mt76_rr(dev, MT_BBP(AGC, 0));
916 val &= ~(BIT(3) | BIT(4));
917
918 if (dev->chainmask & BIT(1))
919 val |= BIT(3);
920
921 mt76_wr(dev, MT_BBP(AGC, 0), val);
922
923 mb();
924 val = mt76_rr(dev, MT_BBP(AGC, 0));
925}
926
927static void
928mt76x0_set_tx_dac(struct mt76x0_dev *dev)
929{
930 if (dev->chainmask & BIT(1))
931 mt76_set(dev, MT_BBP(TXBE, 5), 3);
932 else
933 mt76_clear(dev, MT_BBP(TXBE, 5), 3);
934}
935
936static void
937mt76x0_rf_init(struct mt76x0_dev *dev)
938{
939 int i;
940 u8 val;
941
942 RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab);
943 RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
944 RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
945 RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
946
947 for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
948 const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
949
950 if (item->bw_band == RF_BW_20)
951 rf_wr(dev, item->rf_bank_reg, item->value);
952 else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
953 rf_wr(dev, item->rf_bank_reg, item->value);
954 }
955
956 for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
957 if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
958 rf_wr(dev,
959 mt76x0_rf_band_switch_tab[i].rf_bank_reg,
960 mt76x0_rf_band_switch_tab[i].value);
961 }
962 }
963
964 /*
965 Frequency calibration
966 E1: B0.R22<6:0>: xo_cxo<6:0>
967 E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
968 */
969 rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
970 val = rf_rr(dev, MT_RF(0, 22));
971
972 /*
973 Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up.
974 */
975 val = rf_rr(dev, MT_RF(0, 73));
976 val |= 0x80;
977 rf_wr(dev, MT_RF(0, 73), val);
978 val &= ~0x80;
979 rf_wr(dev, MT_RF(0, 73), val);
980 val |= 0x80;
981 rf_wr(dev, MT_RF(0, 73), val);
982
983 /*
984 vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration.
985 */
986 rf_set(dev, MT_RF(0, 4), 0x80);
987}
988
989static void mt76x0_ant_select(struct mt76x0_dev *dev)
990{
991 /* Single antenna mode. */
992 mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
993 mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
994 mt76_clear(dev, MT_COEXCFG0, BIT(2));
995 mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
996}
997
998void mt76x0_phy_init(struct mt76x0_dev *dev)
999{
1000 INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
1001
1002 mt76x0_ant_select(dev);
1003
1004 mt76x0_rf_init(dev);
1005
1006 mt76x0_set_rx_chains(dev);
1007 mt76x0_set_tx_dac(dev);
1008}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
new file mode 100644
index 000000000000..2880a43c3cb0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -0,0 +1,81 @@
1/*
2 * (c) Copyright 2002-2010, Ralink Technology, Inc.
3 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef _MT76X0_PHY_H_
15#define _MT76X0_PHY_H_
16
17#define RF_G_BAND 0x0100
18#define RF_A_BAND 0x0200
19#define RF_A_BAND_LB 0x0400
20#define RF_A_BAND_MB 0x0800
21#define RF_A_BAND_HB 0x1000
22#define RF_A_BAND_11J 0x2000
23
24#define RF_BW_20 1
25#define RF_BW_40 2
26#define RF_BW_10 4
27#define RF_BW_80 8
28
29#define MT_RF(bank, reg) ((bank) << 16 | (reg))
30#define MT_RF_BANK(offset) (offset >> 16)
31#define MT_RF_REG(offset) (offset & 0xff)
32
33struct mt76x0_bbp_switch_item {
34 u16 bw_band;
35 struct mt76_reg_pair reg_pair;
36};
37
38struct mt76x0_rf_switch_item {
39 u32 rf_bank_reg;
40 u16 bw_band;
41 u8 value;
42};
43
44struct mt76x0_freq_item {
45 u8 channel;
46 u32 band;
47 u8 pllR37;
48 u8 pllR36;
49 u8 pllR35;
50 u8 pllR34;
51 u8 pllR33;
52 u8 pllR32_b7b5;
53 u8 pllR32_b4b0; /* PLL_DEN (Denomina - 8) */
54 u8 pllR31_b7b5;
55 u8 pllR31_b4b0; /* PLL_K (Nominator *)*/
56 u8 pllR30_b7; /* sdm_reset_n */
57 u8 pllR30_b6b2; /* sdmmash_prbs,sin */
58 u8 pllR30_b1; /* sdm_bp */
59 u16 pll_n; /* R30<0>, R29<7:0> (hex) */
60 u8 pllR28_b7b6; /* isi,iso */
61 u8 pllR28_b5b4; /* pfd_dly */
62 u8 pllR28_b3b2; /* clksel option */
63 u32 pll_sdm_k; /* R28<1:0>, R27<7:0>, R26<7:0> (hex) SDM_k */
64 u8 pllR24_b1b0; /* xo_div */
65};
66
67struct mt76x0_rate_pwr_item {
68 s8 mcs_power;
69 u8 rf_pa_mode;
70};
71
72struct mt76x0_rate_pwr_tab {
73 struct mt76x0_rate_pwr_item cck[4];
74 struct mt76x0_rate_pwr_item ofdm[8];
75 struct mt76x0_rate_pwr_item ht[8];
76 struct mt76x0_rate_pwr_item vht[10];
77 struct mt76x0_rate_pwr_item stbc[8];
78 struct mt76x0_rate_pwr_item mcs32;
79};
80
81#endif /* _MT76X0_PHY_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
new file mode 100644
index 000000000000..16bed4aaa242
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/regs.h
@@ -0,0 +1,651 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __MT76_REGS_H
17#define __MT76_REGS_H
18
19#include <linux/bitops.h>
20
21#define MT_ASIC_VERSION 0x0000
22
23#define MT76XX_REV_E3 0x22
24#define MT76XX_REV_E4 0x33
25
26#define MT_CMB_CTRL 0x0020
27#define MT_CMB_CTRL_XTAL_RDY BIT(22)
28#define MT_CMB_CTRL_PLL_LD BIT(23)
29
30#define MT_EFUSE_CTRL 0x0024
31#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
32#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
33#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
34#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
35#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
36#define MT_EFUSE_CTRL_KICK BIT(30)
37#define MT_EFUSE_CTRL_SEL BIT(31)
38
39#define MT_EFUSE_DATA_BASE 0x0028
40#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
41
42#define MT_COEXCFG0 0x0040
43#define MT_COEXCFG0_COEX_EN BIT(0)
44
45#define MT_COEXCFG3 0x004c
46
47#define MT_LDO_CTRL_0 0x006c
48#define MT_LDO_CTRL_1 0x0070
49
50#define MT_WLAN_FUN_CTRL 0x0080
51#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
52#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
53#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
54
55#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
56#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
57
58#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
59#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
60#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
61#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
62
63#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
64#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
65
66#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
67#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
68#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
69
70#define MT_XO_CTRL0 0x0100
71#define MT_XO_CTRL1 0x0104
72#define MT_XO_CTRL2 0x0108
73#define MT_XO_CTRL3 0x010c
74#define MT_XO_CTRL4 0x0110
75
76#define MT_XO_CTRL5 0x0114
77#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
78
79#define MT_XO_CTRL6 0x0118
80#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
81
82#define MT_XO_CTRL7 0x011c
83
84#define MT_IOCFG_6 0x0124
85#define MT_WLAN_MTC_CTRL 0x10148
86#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
87#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
88#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
89#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
90#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
91#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
92#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
93#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
94#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
95#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
96#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
97#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
98
99#define MT_INT_SOURCE_CSR 0x0200
100#define MT_INT_MASK_CSR 0x0204
101
102#define MT_INT_RX_DONE(_n) BIT(_n)
103#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
104#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
105#define MT_INT_TX_DONE(_n) BIT(_n + 4)
106#define MT_INT_RX_COHERENT BIT(16)
107#define MT_INT_TX_COHERENT BIT(17)
108#define MT_INT_ANY_COHERENT BIT(18)
109#define MT_INT_MCU_CMD BIT(19)
110#define MT_INT_TBTT BIT(20)
111#define MT_INT_PRE_TBTT BIT(21)
112#define MT_INT_TX_STAT BIT(22)
113#define MT_INT_AUTO_WAKEUP BIT(23)
114#define MT_INT_GPTIMER BIT(24)
115#define MT_INT_RXDELAYINT BIT(26)
116#define MT_INT_TXDELAYINT BIT(27)
117
118#define MT_WPDMA_GLO_CFG 0x0208
119#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
120#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
121#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
122#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
123#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
124#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
125#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
126#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
127#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
128#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
129
130#define MT_WPDMA_RST_IDX 0x020c
131
132#define MT_WPDMA_DELAY_INT_CFG 0x0210
133
134#define MT_WMM_AIFSN 0x0214
135#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
136#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
137
138#define MT_WMM_CWMIN 0x0218
139#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
140#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
141
142#define MT_WMM_CWMAX 0x021c
143#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
144#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
145
146#define MT_WMM_TXOP_BASE 0x0220
147#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
148#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
149#define MT_WMM_TXOP_MASK GENMASK(15, 0)
150
151#define MT_WMM_CTRL 0x0230 /* MT76x0 */
152
153#define MT_FCE_DMA_ADDR 0x0230
154#define MT_FCE_DMA_LEN 0x0234
155
156#define MT_USB_DMA_CFG 0x238
157#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
158#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
159#define MT_USB_DMA_CFG_TX_WL_DROP BIT(16)
160#define MT_USB_DMA_CFG_WAKEUP_EN BIT(17)
161#define MT_USB_DMA_CFG_RX_DROP_OR_PADDING BIT(18)
162#define MT_USB_DMA_CFG_TX_CLR BIT(19)
163#define MT_USB_DMA_CFG_WL_LPK_EN BIT(20)
164#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
165#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
166#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
167#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
168#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
169#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
170#if 0
171#define MT_USB_DMA_CFG_TX_CLR BIT(19)
172#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
173#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
174#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
175#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
176#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
177#endif
178
179#define MT_TSO_CTRL 0x0250
180#define MT_HEADER_TRANS_CTRL_REG 0x0260
181
182#define MT_US_CYC_CFG 0x02a4
183#define MT_US_CYC_CNT GENMASK(7, 0)
184
185#define MT_TX_RING_BASE 0x0300
186#define MT_RX_RING_BASE 0x03c0
187#define MT_RING_SIZE 0x10
188
189#define MT_TX_HW_QUEUE_MCU 8
190#define MT_TX_HW_QUEUE_MGMT 9
191
192#define MT_PBF_SYS_CTRL 0x0400
193#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
194#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
195#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
196#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
197#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
198
199#define MT_PBF_CFG 0x0404
200#define MT_PBF_CFG_TX0Q_EN BIT(0)
201#define MT_PBF_CFG_TX1Q_EN BIT(1)
202#define MT_PBF_CFG_TX2Q_EN BIT(2)
203#define MT_PBF_CFG_TX3Q_EN BIT(3)
204#define MT_PBF_CFG_RX0Q_EN BIT(4)
205#define MT_PBF_CFG_RX_DROP_EN BIT(8)
206
207#define MT_PBF_TX_MAX_PCNT 0x0408
208#define MT_PBF_RX_MAX_PCNT 0x040c
209
210#define MT_BCN_OFFSET_BASE 0x041c
211#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
212
213#define MT_RXQ_STA 0x0430
214#define MT_TXQ_STA 0x0434
215#define MT_RF_CSR_CFG 0x0500
216#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
217#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
218#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
219#define MT_RF_CSR_CFG_WR BIT(30)
220#define MT_RF_CSR_CFG_KICK BIT(31)
221
222#define MT_RF_BYPASS_0 0x0504
223#define MT_RF_BYPASS_1 0x0508
224#define MT_RF_SETTING_0 0x050c
225
226#define MT_RF_MISC 0x0518
227#define MT_RF_DATA_WRITE 0x0524
228
229#define MT_RF_CTRL 0x0528
230#define MT_RF_CTRL_ADDR GENMASK(11, 0)
231#define MT_RF_CTRL_WRITE BIT(12)
232#define MT_RF_CTRL_BUSY BIT(13)
233#define MT_RF_CTRL_IDX BIT(16)
234
235#define MT_RF_DATA_READ 0x052c
236
237#define MT_COM_REG0 0x0730
238#define MT_COM_REG1 0x0734
239#define MT_COM_REG2 0x0738
240#define MT_COM_REG3 0x073C
241
242#define MT_FCE_PSE_CTRL 0x0800
243#define MT_FCE_PARAMETERS 0x0804
244#define MT_FCE_CSO 0x0808
245
246#define MT_FCE_L2_STUFF 0x080c
247#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
248#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
249#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
250#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
251#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
252#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
253#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
254#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
255#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
256
257#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
258
259#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
260#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
261#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
262
263#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
264
265#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
266
267#define MT_FCE_SKIP_FS 0x0a6c
268
269#define MT_MAC_CSR0 0x1000
270#define MT_MAC_SYS_CTRL 0x1004
271#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
272#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
273#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
274#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
275
276#define MT_MAC_ADDR_DW0 0x1008
277#define MT_MAC_ADDR_DW1 0x100c
278#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
279
280#define MT_MAC_BSSID_DW0 0x1010
281#define MT_MAC_BSSID_DW1 0x1014
282#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
283#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
284#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
285#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
286#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
287#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
288#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
289
290#define MT_MAX_LEN_CFG 0x1018
291#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
292
293#define MT_LED_CFG 0x102c
294
295#define MT_AMPDU_MAX_LEN_20M1S 0x1030
296#define MT_AMPDU_MAX_LEN_20M2S 0x1034
297#define MT_AMPDU_MAX_LEN_40M1S 0x1038
298#define MT_AMPDU_MAX_LEN_40M2S 0x103c
299#define MT_AMPDU_MAX_LEN 0x1040
300
301#define MT_WCID_DROP_BASE 0x106c
302#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
303#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
304
305#define MT_BCN_BYPASS_MASK 0x108c
306
307#define MT_MAC_APC_BSSID_BASE 0x1090
308#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
309#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
310#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
311#define MT_MAC_APC_BSSID0_H_EN BIT(16)
312
313#define MT_XIFS_TIME_CFG 0x1100
314#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
315#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
316#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
317#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
318#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
319
320#define MT_BKOFF_SLOT_CFG 0x1104
321#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
322#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
323
324#define MT_BEACON_TIME_CFG 0x1114
325#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
326#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
327#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
328#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
329#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
330#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
331
332#define MT_TBTT_SYNC_CFG 0x1118
333#define MT_TBTT_TIMER_CFG 0x1124
334
335#define MT_INT_TIMER_CFG 0x1128
336#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
337#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
338
339#define MT_INT_TIMER_EN 0x112c
340#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
341#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
342
343#define MT_MAC_STATUS 0x1200
344#define MT_MAC_STATUS_TX BIT(0)
345#define MT_MAC_STATUS_RX BIT(1)
346
347#define MT_PWR_PIN_CFG 0x1204
348#define MT_AUX_CLK_CFG 0x120c
349
350#define MT_BB_PA_MODE_CFG0 0x1214
351#define MT_BB_PA_MODE_CFG1 0x1218
352#define MT_RF_PA_MODE_CFG0 0x121c
353#define MT_RF_PA_MODE_CFG1 0x1220
354
355#define MT_RF_PA_MODE_ADJ0 0x1228
356#define MT_RF_PA_MODE_ADJ1 0x122c
357
358#define MT_DACCLK_EN_DLY_CFG 0x1264
359
360#define MT_EDCA_CFG_BASE 0x1300
361#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
362#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
363#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
364#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
365#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
366
367#define MT_TX_PWR_CFG_0 0x1314
368#define MT_TX_PWR_CFG_1 0x1318
369#define MT_TX_PWR_CFG_2 0x131c
370#define MT_TX_PWR_CFG_3 0x1320
371#define MT_TX_PWR_CFG_4 0x1324
372
373#define MT_TX_BAND_CFG 0x132c
374#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
375#define MT_TX_BAND_CFG_5G BIT(1)
376#define MT_TX_BAND_CFG_2G BIT(2)
377
378#define MT_HT_FBK_TO_LEGACY 0x1384
379#define MT_TX_MPDU_ADJ_INT 0x1388
380
381#define MT_TX_PWR_CFG_7 0x13d4
382#define MT_TX_PWR_CFG_8 0x13d8
383#define MT_TX_PWR_CFG_9 0x13dc
384
385#define MT_TX_SW_CFG0 0x1330
386#define MT_TX_SW_CFG1 0x1334
387#define MT_TX_SW_CFG2 0x1338
388
389#define MT_TXOP_CTRL_CFG 0x1340
390#define MT_TXOP_TRUN_EN GENMASK(5, 0)
391#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
392#define MT_TXOP_CTRL
393
394#define MT_TX_RTS_CFG 0x1344
395#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
396#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
397#define MT_TX_RTS_FALLBACK BIT(24)
398
399#define MT_TX_TIMEOUT_CFG 0x1348
400#define MT_TX_RETRY_CFG 0x134c
401#define MT_TX_LINK_CFG 0x1350
402#define MT_HT_FBK_CFG0 0x1354
403#define MT_HT_FBK_CFG1 0x1358
404#define MT_LG_FBK_CFG0 0x135c
405#define MT_LG_FBK_CFG1 0x1360
406
407#define MT_CCK_PROT_CFG 0x1364
408#define MT_OFDM_PROT_CFG 0x1368
409#define MT_MM20_PROT_CFG 0x136c
410#define MT_MM40_PROT_CFG 0x1370
411#define MT_GF20_PROT_CFG 0x1374
412#define MT_GF40_PROT_CFG 0x1378
413
414#define MT_PROT_RATE GENMASK(15, 0)
415#define MT_PROT_CTRL_RTS_CTS BIT(16)
416#define MT_PROT_CTRL_CTS2SELF BIT(17)
417#define MT_PROT_NAV_SHORT BIT(18)
418#define MT_PROT_NAV_LONG BIT(19)
419#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
420#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
421#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
422#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
423#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
424#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
425#define MT_PROT_RTS_THR_EN BIT(26)
426#define MT_PROT_RATE_CCK_11 0x0003
427#define MT_PROT_RATE_OFDM_6 0x4000
428#define MT_PROT_RATE_OFDM_24 0x4004
429#define MT_PROT_RATE_DUP_OFDM_24 0x4084
430#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
431#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
432 ~MT_PROT_TXOP_ALLOW_MM40 & \
433 ~MT_PROT_TXOP_ALLOW_GF40)
434
435#define MT_EXP_ACK_TIME 0x1380
436
437#define MT_TX_PWR_CFG_0_EXT 0x1390
438#define MT_TX_PWR_CFG_1_EXT 0x1394
439
440#define MT_TX_FBK_LIMIT 0x1398
441#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
442#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
443#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
444#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
445#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
446
447#define MT_TX0_RF_GAIN_CORR 0x13a0
448#define MT_TX1_RF_GAIN_CORR 0x13a4
449#define MT_TX0_RF_GAIN_ATTEN 0x13a8
450
451#define MT_TX_ALC_CFG_0 0x13b0
452#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
453#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
454#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
455#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
456
457#define MT_TX_ALC_CFG_1 0x13b4
458#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
459
460#define MT_TX_ALC_CFG_2 0x13a8
461#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
462
463#define MT_TX0_BB_GAIN_ATTEN 0x13c0
464
465#define MT_TX_ALC_VGA3 0x13c8
466
467#define MT_TX_PROT_CFG6 0x13e0
468#define MT_TX_PROT_CFG7 0x13e4
469#define MT_TX_PROT_CFG8 0x13e8
470
471#define MT_PIFS_TX_CFG 0x13ec
472
473#define MT_RX_FILTR_CFG 0x1400
474
475#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
476#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
477#define MT_RX_FILTR_CFG_PROMISC BIT(2)
478#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
479#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
480#define MT_RX_FILTR_CFG_MCAST BIT(5)
481#define MT_RX_FILTR_CFG_BCAST BIT(6)
482#define MT_RX_FILTR_CFG_DUP BIT(7)
483#define MT_RX_FILTR_CFG_CFACK BIT(8)
484#define MT_RX_FILTR_CFG_CFEND BIT(9)
485#define MT_RX_FILTR_CFG_ACK BIT(10)
486#define MT_RX_FILTR_CFG_CTS BIT(11)
487#define MT_RX_FILTR_CFG_RTS BIT(12)
488#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
489#define MT_RX_FILTR_CFG_BA BIT(14)
490#define MT_RX_FILTR_CFG_BAR BIT(15)
491#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
492
493#define MT_AUTO_RSP_CFG 0x1404
494
495#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
496
497#define MT_LEGACY_BASIC_RATE 0x1408
498#define MT_HT_BASIC_RATE 0x140c
499#define MT_HT_CTRL_CFG 0x1410
500#define MT_RX_PARSER_CFG 0x1418
501#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
502
503#define MT_EXT_CCA_CFG 0x141c
504#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
505#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
506#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
507#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
508#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
509#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
510
511#define MT_TX_SW_CFG3 0x1478
512
513#define MT_PN_PAD_MODE 0x150c
514
515#define MT_TXOP_HLDR_ET 0x1608
516
517#define MT_PROT_AUTO_TX_CFG 0x1648
518
519#define MT_RX_STA_CNT0 0x1700
520#define MT_RX_STA_CNT1 0x1704
521#define MT_RX_STA_CNT2 0x1708
522#define MT_TX_STA_CNT0 0x170c
523#define MT_TX_STA_CNT1 0x1710
524#define MT_TX_STA_CNT2 0x1714
525
526/* Vendor driver defines content of the second word of STAT_FIFO as follows:
527 * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
528 * MT_TX_STAT_FIFO_ETXBF BIT(27)
529 * MT_TX_STAT_FIFO_SND BIT(28)
530 * MT_TX_STAT_FIFO_ITXBF BIT(29)
531 * However, tests show that b16-31 have the same layout as TXWI rate_ctl
532 * with rate set to rate at which frame was acked.
533 */
534#define MT_TX_STAT_FIFO 0x1718
535#define MT_TX_STAT_FIFO_VALID BIT(0)
536#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
537#define MT_TX_STAT_FIFO_AGGR BIT(6)
538#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
539#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
540#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
541
542#define MT_TX_AGG_STAT 0x171c
543
544#define MT_TX_AGG_CNT_BASE0 0x1720
545
546#define MT_MPDU_DENSITY_CNT 0x1740
547
548#define MT_TX_AGG_CNT_BASE1 0x174c
549
550#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
551 MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
552 MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
553
554#define MT_TX_STAT_FIFO_EXT 0x1798
555#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
556#define MT_TX_STAT_FIFO_EXT_PKTID GENMASK(15, 8)
557
558#define MT_BBP_CORE_BASE 0x2000
559#define MT_BBP_IBI_BASE 0x2100
560#define MT_BBP_AGC_BASE 0x2300
561#define MT_BBP_TXC_BASE 0x2400
562#define MT_BBP_RXC_BASE 0x2500
563#define MT_BBP_TXO_BASE 0x2600
564#define MT_BBP_TXBE_BASE 0x2700
565#define MT_BBP_RXFE_BASE 0x2800
566#define MT_BBP_RXO_BASE 0x2900
567#define MT_BBP_DFS_BASE 0x2a00
568#define MT_BBP_TR_BASE 0x2b00
569#define MT_BBP_CAL_BASE 0x2c00
570#define MT_BBP_DSC_BASE 0x2e00
571#define MT_BBP_PFMU_BASE 0x2f00
572
573#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
574
575#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
576
577#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
578#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
579
580/* AGC, R4/R5 */
581#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
582
583/* AGC, R8/R9 */
584#define MT_BBP_AGC_GAIN GENMASK(14, 8)
585
586#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
587#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
588
589#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
590
591#define MT_WCID_ADDR_BASE 0x1800
592#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
593
594#define MT_SRAM_BASE 0x4000
595
596#define MT_WCID_KEY_BASE 0x8000
597#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
598
599#define MT_WCID_IV_BASE 0xa000
600#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
601
602#define MT_WCID_ATTR_BASE 0xa800
603#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
604
605#define MT_WCID_ATTR_PAIRWISE BIT(0)
606#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
607#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
608#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
609#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
610#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
611#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
612#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
613
614#define MT_SKEY_BASE_0 0xac00
615#define MT_SKEY_BASE_1 0xb400
616#define MT_SKEY_0(_bss, _idx) \
617 (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
618#define MT_SKEY_1(_bss, _idx) \
619 (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
620#define MT_SKEY(_bss, _idx) \
621 ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
622
623#define MT_SKEY_MODE_BASE_0 0xb000
624#define MT_SKEY_MODE_BASE_1 0xb3f0
625#define MT_SKEY_MODE_0(_bss) \
626 (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
627#define MT_SKEY_MODE_1(_bss) \
628 (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
629#define MT_SKEY_MODE(_bss) \
630 ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
631#define MT_SKEY_MODE_MASK GENMASK(3, 0)
632#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
633
634#define MT_BEACON_BASE 0xc000
635
636#define MT_TEMP_SENSOR 0x1d000
637#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
638
639enum mt76_cipher_type {
640 MT_CIPHER_NONE,
641 MT_CIPHER_WEP40,
642 MT_CIPHER_WEP104,
643 MT_CIPHER_TKIP,
644 MT_CIPHER_AES_CCMP,
645 MT_CIPHER_CKIP40,
646 MT_CIPHER_CKIP104,
647 MT_CIPHER_CKIP128,
648 MT_CIPHER_WAPI,
649};
650
651#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
new file mode 100644
index 000000000000..8abdd3cd546d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/module.h>
16
17#ifndef __CHECKER__
18#define CREATE_TRACE_POINTS
19#include "trace.h"
20
21#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
new file mode 100644
index 000000000000..8a752a09f2dc
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
@@ -0,0 +1,313 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
16#define __MT76X0U_TRACE_H
17
18#include <linux/tracepoint.h>
19#include "mt76x0.h"
20#include "mac.h"
21
22#undef TRACE_SYSTEM
23#define TRACE_SYSTEM mt76x0
24
25#define MAXNAME 32
26#define DEV_ENTRY __array(char, wiphy_name, 32)
27#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
28 wiphy_name(dev->hw->wiphy), MAXNAME)
29#define DEV_PR_FMT "%s "
30#define DEV_PR_ARG __entry->wiphy_name
31
32#define REG_ENTRY __field(u32, reg) __field(u32, val)
33#define REG_ASSIGN __entry->reg = reg; __entry->val = val
34#define REG_PR_FMT "%04x=%08x"
35#define REG_PR_ARG __entry->reg, __entry->val
36
37DECLARE_EVENT_CLASS(dev_reg_evt,
38 TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
39 TP_ARGS(dev, reg, val),
40 TP_STRUCT__entry(
41 DEV_ENTRY
42 REG_ENTRY
43 ),
44 TP_fast_assign(
45 DEV_ASSIGN;
46 REG_ASSIGN;
47 ),
48 TP_printk(
49 DEV_PR_FMT REG_PR_FMT,
50 DEV_PR_ARG, REG_PR_ARG
51 )
52);
53
54DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read,
55 TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
56 TP_ARGS(dev, reg, val)
57);
58
59DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write,
60 TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
61 TP_ARGS(dev, reg, val)
62);
63
64TRACE_EVENT(mt76x0_submit_urb,
65 TP_PROTO(struct mt76_dev *dev, struct urb *u),
66 TP_ARGS(dev, u),
67 TP_STRUCT__entry(
68 DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
69 ),
70 TP_fast_assign(
71 DEV_ASSIGN;
72 __entry->pipe = u->pipe;
73 __entry->len = u->transfer_buffer_length;
74 ),
75 TP_printk(DEV_PR_FMT "p:%08x len:%u",
76 DEV_PR_ARG, __entry->pipe, __entry->len)
77);
78
79#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \
80 struct urb u; \
81 u.pipe = __pipe; \
82 u.transfer_buffer_length = __len; \
83 trace_mt76x0_submit_urb(__dev, &u); \
84})
85
86TRACE_EVENT(mt76x0_mcu_msg_send,
87 TP_PROTO(struct mt76_dev *dev,
88 struct sk_buff *skb, u32 csum, bool resp),
89 TP_ARGS(dev, skb, csum, resp),
90 TP_STRUCT__entry(
91 DEV_ENTRY
92 __field(u32, info)
93 __field(u32, csum)
94 __field(bool, resp)
95 ),
96 TP_fast_assign(
97 DEV_ASSIGN;
98 __entry->info = *(u32 *)skb->data;
99 __entry->csum = csum;
100 __entry->resp = resp;
101 ),
102 TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
103 DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
104);
105
106TRACE_EVENT(mt76x0_vend_req,
107 TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type,
108 u16 val, u16 offset, void *buf, size_t buflen, int ret),
109 TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
110 TP_STRUCT__entry(
111 DEV_ENTRY
112 __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
113 __field(u16, val) __field(u16, offset) __field(void*, buf)
114 __field(int, buflen) __field(int, ret)
115 ),
116 TP_fast_assign(
117 DEV_ASSIGN;
118 __entry->pipe = pipe;
119 __entry->req = req;
120 __entry->req_type = req_type;
121 __entry->val = val;
122 __entry->offset = offset;
123 __entry->buf = buf;
124 __entry->buflen = buflen;
125 __entry->ret = ret;
126 ),
127 TP_printk(DEV_PR_FMT
128 "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
129 DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
130 __entry->req_type, __entry->val, __entry->offset,
131 !!__entry->buf, __entry->buflen)
132);
133
134DECLARE_EVENT_CLASS(dev_rf_reg_evt,
135 TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
136 TP_ARGS(dev, bank, reg, val),
137 TP_STRUCT__entry(
138 DEV_ENTRY
139 __field(u8, bank)
140 __field(u8, reg)
141 __field(u8, val)
142 ),
143 TP_fast_assign(
144 DEV_ASSIGN;
145 REG_ASSIGN;
146 __entry->bank = bank;
147 ),
148 TP_printk(
149 DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
150 DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
151 )
152);
153
154DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read,
155 TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
156 TP_ARGS(dev, bank, reg, val)
157);
158
159DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write,
160 TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
161 TP_ARGS(dev, bank, reg, val)
162);
163
164DECLARE_EVENT_CLASS(dev_simple_evt,
165 TP_PROTO(struct mt76_dev *dev, u8 val),
166 TP_ARGS(dev, val),
167 TP_STRUCT__entry(
168 DEV_ENTRY
169 __field(u8, val)
170 ),
171 TP_fast_assign(
172 DEV_ASSIGN;
173 __entry->val = val;
174 ),
175 TP_printk(
176 DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
177 )
178);
179
180TRACE_EVENT(mt76x0_rx,
181 TP_PROTO(struct mt76_dev *dev, struct mt76x0_rxwi *rxwi, u32 f),
182 TP_ARGS(dev, rxwi, f),
183 TP_STRUCT__entry(
184 DEV_ENTRY
185 __field_struct(struct mt76x0_rxwi, rxwi)
186 __field(u32, fce_info)
187 ),
188 TP_fast_assign(
189 DEV_ASSIGN;
190 __entry->rxwi = *rxwi;
191 __entry->fce_info = f;
192 ),
193 TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG,
194 le32_to_cpu(__entry->rxwi.rxinfo),
195 le32_to_cpu(__entry->rxwi.ctl))
196);
197
198TRACE_EVENT(mt76x0_tx,
199 TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
200 struct mt76_sta *sta, struct mt76_txwi *h),
201 TP_ARGS(dev, skb, sta, h),
202 TP_STRUCT__entry(
203 DEV_ENTRY
204 __field_struct(struct mt76_txwi, h)
205 __field(struct sk_buff *, skb)
206 __field(struct mt76_sta *, sta)
207 ),
208 TP_fast_assign(
209 DEV_ASSIGN;
210 __entry->h = *h;
211 __entry->skb = skb;
212 __entry->sta = sta;
213 ),
214 TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
215 "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
216 __entry->skb, __entry->sta,
217 le16_to_cpu(__entry->h.flags),
218 le16_to_cpu(__entry->h.rate_ctl),
219 __entry->h.ack_ctl, __entry->h.wcid,
220 le16_to_cpu(__entry->h.len_ctl))
221);
222
223TRACE_EVENT(mt76x0_tx_dma_done,
224 TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb),
225 TP_ARGS(dev, skb),
226 TP_STRUCT__entry(
227 DEV_ENTRY
228 __field(struct sk_buff *, skb)
229 ),
230 TP_fast_assign(
231 DEV_ASSIGN;
232 __entry->skb = skb;
233 ),
234 TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
235);
236
237TRACE_EVENT(mt76x0_tx_status_cleaned,
238 TP_PROTO(struct mt76_dev *dev, int cleaned),
239 TP_ARGS(dev, cleaned),
240 TP_STRUCT__entry(
241 DEV_ENTRY
242 __field(int, cleaned)
243 ),
244 TP_fast_assign(
245 DEV_ASSIGN;
246 __entry->cleaned = cleaned;
247 ),
248 TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
249);
250
251TRACE_EVENT(mt76x0_tx_status,
252 TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2),
253 TP_ARGS(dev, stat1, stat2),
254 TP_STRUCT__entry(
255 DEV_ENTRY
256 __field(u32, stat1) __field(u32, stat2)
257 ),
258 TP_fast_assign(
259 DEV_ASSIGN;
260 __entry->stat1 = stat1;
261 __entry->stat2 = stat2;
262 ),
263 TP_printk(DEV_PR_FMT "%08x %08x",
264 DEV_PR_ARG, __entry->stat1, __entry->stat2)
265);
266
267TRACE_EVENT(mt76x0_rx_dma_aggr,
268 TP_PROTO(struct mt76_dev *dev, int cnt, bool paged),
269 TP_ARGS(dev, cnt, paged),
270 TP_STRUCT__entry(
271 DEV_ENTRY
272 __field(u8, cnt)
273 __field(bool, paged)
274 ),
275 TP_fast_assign(
276 DEV_ASSIGN;
277 __entry->cnt = cnt;
278 __entry->paged = paged;
279 ),
280 TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
281 DEV_PR_ARG, __entry->cnt, __entry->paged)
282);
283
284DEFINE_EVENT(dev_simple_evt, mt76x0_set_key,
285 TP_PROTO(struct mt76_dev *dev, u8 val),
286 TP_ARGS(dev, val)
287);
288
289TRACE_EVENT(mt76x0_set_shared_key,
290 TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key),
291 TP_ARGS(dev, vid, key),
292 TP_STRUCT__entry(
293 DEV_ENTRY
294 __field(u8, vid)
295 __field(u8, key)
296 ),
297 TP_fast_assign(
298 DEV_ASSIGN;
299 __entry->vid = vid;
300 __entry->key = key;
301 ),
302 TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
303 DEV_PR_ARG, __entry->vid, __entry->key)
304);
305
306#endif
307
308#undef TRACE_INCLUDE_PATH
309#define TRACE_INCLUDE_PATH .
310#undef TRACE_INCLUDE_FILE
311#define TRACE_INCLUDE_FILE trace
312
313#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
new file mode 100644
index 000000000000..751b49c28ae5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/tx.c
@@ -0,0 +1,270 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include "mt76x0.h"
16#include "trace.h"
17
18/* Take mac80211 Q id from the skb and translate it to hardware Q id */
19static u8 skb2q(struct sk_buff *skb)
20{
21 int qid = skb_get_queue_mapping(skb);
22
23 if (WARN_ON(qid >= MT_TXQ_PSD)) {
24 qid = MT_TXQ_BE;
25 skb_set_queue_mapping(skb, qid);
26 }
27
28 return q2hwq(qid);
29}
30
31static void mt76x0_tx_skb_remove_dma_overhead(struct sk_buff *skb,
32 struct ieee80211_tx_info *info)
33{
34 int pkt_len = (unsigned long)info->status.status_driver_data[0];
35
36 skb_pull(skb, sizeof(struct mt76_txwi) + 4);
37 if (ieee80211_get_hdrlen_from_skb(skb) % 4)
38 mt76x0_remove_hdr_pad(skb);
39
40 skb_trim(skb, pkt_len);
41}
42
43void mt76x0_tx_status(struct mt76x0_dev *dev, struct sk_buff *skb)
44{
45 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
46
47 mt76x0_tx_skb_remove_dma_overhead(skb, info);
48
49 ieee80211_tx_info_clear_status(info);
50 info->status.rates[0].idx = -1;
51 info->flags |= IEEE80211_TX_STAT_ACK;
52
53 spin_lock(&dev->mac_lock);
54 ieee80211_tx_status(dev->mt76.hw, skb);
55 spin_unlock(&dev->mac_lock);
56}
57
58static int mt76x0_skb_rooms(struct mt76x0_dev *dev, struct sk_buff *skb)
59{
60 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
61 u32 need_head;
62
63 need_head = sizeof(struct mt76_txwi) + 4;
64 if (hdr_len % 4)
65 need_head += 2;
66
67 return skb_cow(skb, need_head);
68}
69
70static struct mt76_txwi *
71mt76x0_push_txwi(struct mt76x0_dev *dev, struct sk_buff *skb,
72 struct ieee80211_sta *sta, struct mt76_wcid *wcid,
73 int pkt_len)
74{
75 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
76 struct ieee80211_tx_rate *rate = &info->control.rates[0];
77 struct mt76_txwi *txwi;
78 unsigned long flags;
79 u16 txwi_flags = 0;
80 u32 pkt_id;
81 u16 rate_ctl;
82 u8 nss;
83
84 txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
85 memset(txwi, 0, sizeof(*txwi));
86
87 if (!wcid->tx_rate_set)
88 ieee80211_get_tx_rates(info->control.vif, sta, skb,
89 info->control.rates, 1);
90
91 spin_lock_irqsave(&dev->mt76.lock, flags);
92 if (rate->idx < 0 || !rate->count) {
93 rate_ctl = wcid->tx_rate;
94 nss = wcid->tx_rate_nss;
95 } else {
96 rate_ctl = mt76x0_mac_tx_rate_val(dev, rate, &nss);
97 }
98 spin_unlock_irqrestore(&dev->mt76.lock, flags);
99
100 txwi->rate_ctl = cpu_to_le16(rate_ctl);
101
102 if (info->flags & IEEE80211_TX_CTL_LDPC)
103 txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_LDPC);
104 if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
105 txwi->rate_ctl |= cpu_to_le16(MT_RXWI_RATE_STBC);
106 if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
107 txwi_flags |= MT_TXWI_FLAGS_MMPS;
108
109 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
110 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
111 pkt_id = 1;
112 } else {
113 pkt_id = 0;
114 }
115
116 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
117 pkt_id |= MT_TXWI_PKTID_PROBE;
118
119 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
120 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
121
122 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
123 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
124
125 ba_size <<= sta->ht_cap.ampdu_factor;
126 ba_size = min_t(int, 7, ba_size - 1);
127 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
128 ba_size = 0;
129 } else {
130 txwi_flags |= MT_TXWI_FLAGS_AMPDU;
131 txwi_flags |= FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
132 sta->ht_cap.ampdu_density);
133 }
134 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
135 }
136
137 txwi->wcid = wcid->idx;
138 txwi->flags |= cpu_to_le16(txwi_flags);
139 txwi->len_ctl = cpu_to_le16(pkt_len);
140 txwi->pktid = pkt_id;
141
142 return txwi;
143}
144
145void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
146 struct sk_buff *skb)
147{
148 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
149 struct mt76x0_dev *dev = hw->priv;
150 struct ieee80211_vif *vif = info->control.vif;
151 struct ieee80211_sta *sta = control->sta;
152 struct mt76_sta *msta = NULL;
153 struct mt76_wcid *wcid = dev->mon_wcid;
154 struct mt76_txwi *txwi;
155 int pkt_len = skb->len;
156 int hw_q = skb2q(skb);
157
158 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
159 info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
160
161 if (mt76x0_skb_rooms(dev, skb) || mt76x0_insert_hdr_pad(skb)) {
162 ieee80211_free_txskb(dev->mt76.hw, skb);
163 return;
164 }
165
166 if (sta) {
167 msta = (struct mt76_sta *) sta->drv_priv;
168 wcid = &msta->wcid;
169 } else if (vif && (!info->control.hw_key && wcid->hw_key_idx != -1)) {
170 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
171
172 wcid = &mvif->group_wcid;
173 }
174
175 txwi = mt76x0_push_txwi(dev, skb, sta, wcid, pkt_len);
176
177 if (mt76x0_dma_enqueue_tx(dev, skb, wcid, hw_q))
178 return;
179
180 trace_mt76x0_tx(&dev->mt76, skb, msta, txwi);
181}
182
183void mt76x0_tx_stat(struct work_struct *work)
184{
185 struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
186 stat_work.work);
187 struct mt76_tx_status stat;
188 unsigned long flags;
189 int cleaned = 0;
190 u8 update = 1;
191
192 while (!test_bit(MT76_REMOVED, &dev->mt76.state)) {
193 stat = mt76x0_mac_fetch_tx_status(dev);
194 if (!stat.valid)
195 break;
196
197 mt76x0_send_tx_status(dev, &stat, &update);
198
199 cleaned++;
200 }
201 trace_mt76x0_tx_status_cleaned(&dev->mt76, cleaned);
202
203 spin_lock_irqsave(&dev->tx_lock, flags);
204 if (cleaned)
205 queue_delayed_work(dev->stat_wq, &dev->stat_work,
206 msecs_to_jiffies(10));
207 else if (test_and_clear_bit(MT76_MORE_STATS, &dev->mt76.state))
208 queue_delayed_work(dev->stat_wq, &dev->stat_work,
209 msecs_to_jiffies(20));
210 else
211 clear_bit(MT76_READING_STATS, &dev->mt76.state);
212 spin_unlock_irqrestore(&dev->tx_lock, flags);
213}
214
215int mt76x0_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
216 u16 queue, const struct ieee80211_tx_queue_params *params)
217{
218 struct mt76x0_dev *dev = hw->priv;
219 u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
220 u32 val;
221
222 /* TODO: should we do funny things with the parameters?
223 * See what mt76x0_set_default_edca() used to do in init.c.
224 */
225
226 if (params->cw_min)
227 cw_min = fls(params->cw_min);
228 if (params->cw_max)
229 cw_max = fls(params->cw_max);
230
231 WARN_ON(params->txop > 0xff);
232 WARN_ON(params->aifs > 0xf);
233 WARN_ON(cw_min > 0xf);
234 WARN_ON(cw_max > 0xf);
235
236 val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
237 FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
238 FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
239 /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
240 * a really long txop on AC0 (see connect.c:2009) but only on
241 * connect? When not connected should be 0.
242 */
243 if (!hw_q)
244 val |= 0x60;
245 else
246 val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
247 mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
248
249 val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
250 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
251 val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
252 mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
253
254 val = mt76_rr(dev, MT_WMM_AIFSN);
255 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
256 val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
257 mt76_wr(dev, MT_WMM_AIFSN, val);
258
259 val = mt76_rr(dev, MT_WMM_CWMIN);
260 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
261 val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
262 mt76_wr(dev, MT_WMM_CWMIN, val);
263
264 val = mt76_rr(dev, MT_WMM_CWMAX);
265 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
266 val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
267 mt76_wr(dev, MT_WMM_CWMAX, val);
268
269 return 0;
270}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
new file mode 100644
index 000000000000..54ae1f113be2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -0,0 +1,381 @@
1/*
2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/usb.h>
17
18#include "mt76x0.h"
19#include "usb.h"
20#include "trace.h"
21
22static struct usb_device_id mt76x0_device_table[] = {
23 { USB_DEVICE(0x148F, 0x7610) }, /* MT7610U */
24 { USB_DEVICE(0x13B1, 0x003E) }, /* Linksys AE6000 */
25 { USB_DEVICE(0x0E8D, 0x7610) }, /* Sabrent NTWLAC */
26 { USB_DEVICE(0x7392, 0xa711) }, /* Edimax 7711mac */
27 { USB_DEVICE(0x7392, 0xb711) }, /* Edimax / Elecom */
28 { USB_DEVICE(0x148f, 0x761a) }, /* TP-Link TL-WDN5200 */
29 { USB_DEVICE(0x148f, 0x760a) }, /* TP-Link unknown */
30 { USB_DEVICE(0x0b05, 0x17d1) }, /* Asus USB-AC51 */
31 { USB_DEVICE(0x0b05, 0x17db) }, /* Asus USB-AC50 */
32 { USB_DEVICE(0x0df6, 0x0075) }, /* Sitecom WLA-3100 */
33 { USB_DEVICE(0x2019, 0xab31) }, /* Planex GW-450D */
34 { USB_DEVICE(0x2001, 0x3d02) }, /* D-LINK DWA-171 rev B1 */
35 { USB_DEVICE(0x0586, 0x3425) }, /* Zyxel NWD6505 */
36 { USB_DEVICE(0x07b8, 0x7610) }, /* AboCom AU7212 */
37 { USB_DEVICE(0x04bb, 0x0951) }, /* I-O DATA WN-AC433UK */
38 { USB_DEVICE(0x057c, 0x8502) }, /* AVM FRITZ!WLAN USB Stick AC 430 */
39 { USB_DEVICE(0x293c, 0x5702) }, /* Comcast Xfinity KXW02AAA */
40 { USB_DEVICE(0x20f4, 0x806b) }, /* TRENDnet TEW-806UBH */
41 { USB_DEVICE(0x7392, 0xc711) }, /* Devolo Wifi ac Stick */
42 { USB_DEVICE(0x0df6, 0x0079) }, /* Sitecom Europe B.V. ac Stick */
43 { USB_DEVICE(0x2357, 0x0105) }, /* TP-LINK Archer T1U */
44 { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7630, 0xff, 0x2, 0xff)}, /* MT7630U */
45 { USB_DEVICE_AND_INTERFACE_INFO(0x0E8D, 0x7650, 0xff, 0x2, 0xff)}, /* MT7650U */
46 { 0, }
47};
48
49bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
50 struct mt76x0_dma_buf *buf)
51{
52 struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
53
54 buf->len = len;
55 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
56 buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
57
58 return !buf->urb || !buf->buf;
59}
60
61void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf)
62{
63 struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
64
65 usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
66 usb_free_urb(buf->urb);
67}
68
69int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
70 struct mt76x0_dma_buf *buf, gfp_t gfp,
71 usb_complete_t complete_fn, void *context)
72{
73 struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
74 unsigned pipe;
75 int ret;
76
77 if (dir == USB_DIR_IN)
78 pipe = usb_rcvbulkpipe(usb_dev, dev->in_ep[ep_idx]);
79 else
80 pipe = usb_sndbulkpipe(usb_dev, dev->out_ep[ep_idx]);
81
82 usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
83 complete_fn, context);
84 buf->urb->transfer_dma = buf->dma;
85 buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
86
87 trace_mt76x0_submit_urb(&dev->mt76, buf->urb);
88 ret = usb_submit_urb(buf->urb, gfp);
89 if (ret)
90 dev_err(dev->mt76.dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
91 dir, ep_idx, ret);
92 return ret;
93}
94
95void mt76x0_complete_urb(struct urb *urb)
96{
97 struct completion *cmpl = urb->context;
98
99 complete(cmpl);
100}
101
102int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
103 const u8 direction, const u16 val, const u16 offset,
104 void *buf, const size_t buflen)
105{
106 int i, ret;
107 struct usb_device *usb_dev = mt76x0_to_usb_dev(dev);
108 const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
109 const unsigned int pipe = (direction == USB_DIR_IN) ?
110 usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
111
112 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
113 ret = usb_control_msg(usb_dev, pipe, req, req_type,
114 val, offset, buf, buflen,
115 MT_VEND_REQ_TOUT_MS);
116 trace_mt76x0_vend_req(&dev->mt76, pipe, req, req_type, val, offset,
117 buf, buflen, ret);
118
119 if (ret == -ENODEV)
120 set_bit(MT76_REMOVED, &dev->mt76.state);
121 if (ret >= 0 || ret == -ENODEV)
122 return ret;
123
124 msleep(5);
125 }
126
127 dev_err(dev->mt76.dev, "Vendor request req:%02x off:%04x failed:%d\n",
128 req, offset, ret);
129
130 return ret;
131}
132
133void mt76x0_vendor_reset(struct mt76x0_dev *dev)
134{
135 mt76x0_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
136 MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
137}
138
139static u32 mt76x0_rr(struct mt76_dev *dev, u32 offset)
140{
141 struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
142 int ret;
143 u32 val = ~0;
144
145 WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
146
147 mutex_lock(&mdev->usb_ctrl_mtx);
148
149 ret = mt76x0_vendor_request((struct mt76x0_dev *)dev, MT_VEND_MULTI_READ, USB_DIR_IN,
150 0, offset, mdev->data, MT_VEND_BUF);
151 if (ret == MT_VEND_BUF)
152 val = get_unaligned_le32(mdev->data);
153 else if (ret > 0)
154 dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
155 ret, offset);
156
157 mutex_unlock(&mdev->usb_ctrl_mtx);
158
159 trace_mt76x0_reg_read(dev, offset, val);
160 return val;
161}
162
163int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
164 const u16 offset, const u32 val)
165{
166 struct mt76x0_dev *mdev = dev;
167 int ret;
168
169 mutex_lock(&mdev->usb_ctrl_mtx);
170
171 ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
172 val & 0xffff, offset, NULL, 0);
173 if (!ret)
174 ret = mt76x0_vendor_request(dev, req, USB_DIR_OUT,
175 val >> 16, offset + 2, NULL, 0);
176
177 mutex_unlock(&mdev->usb_ctrl_mtx);
178
179 return ret;
180}
181
182static void mt76x0_wr(struct mt76_dev *dev, u32 offset, u32 val)
183{
184 struct mt76x0_dev *mdev = (struct mt76x0_dev *) dev;
185 int ret;
186
187 WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
188
189 mutex_lock(&mdev->usb_ctrl_mtx);
190
191 put_unaligned_le32(val, mdev->data);
192 ret = mt76x0_vendor_request(mdev, MT_VEND_MULTI_WRITE, USB_DIR_OUT,
193 0, offset, mdev->data, MT_VEND_BUF);
194 trace_mt76x0_reg_write(dev, offset, val);
195
196 mutex_unlock(&mdev->usb_ctrl_mtx);
197}
198
199static u32 mt76x0_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
200{
201 val |= mt76x0_rr(dev, offset) & ~mask;
202 mt76x0_wr(dev, offset, val);
203 return val;
204}
205
206static void mt76x0_wr_copy(struct mt76_dev *dev, u32 offset,
207 const void *data, int len)
208{
209 WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
210 WARN_ONCE(len & 3, "short write copy off:%08x", offset);
211
212 mt76x0_burst_write_regs((struct mt76x0_dev *) dev, offset, data, len / 4);
213}
214
215void mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
216{
217 mt76_wr(dev, offset, get_unaligned_le32(addr));
218 mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
219}
220
221static int mt76x0_assign_pipes(struct usb_interface *usb_intf,
222 struct mt76x0_dev *dev)
223{
224 struct usb_endpoint_descriptor *ep_desc;
225 struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
226 unsigned i, ep_i = 0, ep_o = 0;
227
228 BUILD_BUG_ON(sizeof(dev->in_ep) < __MT_EP_IN_MAX);
229 BUILD_BUG_ON(sizeof(dev->out_ep) < __MT_EP_OUT_MAX);
230
231 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
232 ep_desc = &intf_desc->endpoint[i].desc;
233
234 if (usb_endpoint_is_bulk_in(ep_desc) &&
235 ep_i++ < __MT_EP_IN_MAX) {
236 dev->in_ep[ep_i - 1] = usb_endpoint_num(ep_desc);
237 dev->in_max_packet = usb_endpoint_maxp(ep_desc);
238 /* Note: this is ignored by usb sub-system but vendor
239 * code does it. We can drop this at some point.
240 */
241 dev->in_ep[ep_i - 1] |= USB_DIR_IN;
242 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
243 ep_o++ < __MT_EP_OUT_MAX) {
244 dev->out_ep[ep_o - 1] = usb_endpoint_num(ep_desc);
245 dev->out_max_packet = usb_endpoint_maxp(ep_desc);
246 }
247 }
248
249 if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
250 dev_err(dev->mt76.dev, "Error: wrong pipe number in:%d out:%d\n",
251 ep_i, ep_o);
252 return -EINVAL;
253 }
254
255 return 0;
256}
257
258static int mt76x0_probe(struct usb_interface *usb_intf,
259 const struct usb_device_id *id)
260{
261 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
262 struct mt76x0_dev *dev;
263 u32 asic_rev, mac_rev;
264 int ret;
265 static const struct mt76_bus_ops usb_ops = {
266 .rr = mt76x0_rr,
267 .wr = mt76x0_wr,
268 .rmw = mt76x0_rmw,
269 .copy = mt76x0_wr_copy,
270 };
271
272 dev = mt76x0_alloc_device(&usb_intf->dev);
273 if (!dev)
274 return -ENOMEM;
275
276 usb_dev = usb_get_dev(usb_dev);
277 usb_reset_device(usb_dev);
278
279 usb_set_intfdata(usb_intf, dev);
280
281 dev->mt76.bus = &usb_ops;
282
283 ret = mt76x0_assign_pipes(usb_intf, dev);
284 if (ret)
285 goto err;
286
287 /* Disable the HW, otherwise MCU fail to initalize on hot reboot */
288 mt76x0_chip_onoff(dev, false, false);
289
290 ret = mt76x0_wait_asic_ready(dev);
291 if (ret)
292 goto err;
293
294 asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
295 mac_rev = mt76_rr(dev, MT_MAC_CSR0);
296 dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n",
297 asic_rev, mac_rev);
298
299 /* Note: vendor driver skips this check for MT76X0U */
300 if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
301 dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
302
303 ret = mt76x0_init_hardware(dev);
304 if (ret)
305 goto err;
306
307 ret = mt76x0_register_device(dev);
308 if (ret)
309 goto err_hw;
310
311 set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
312
313 return 0;
314err_hw:
315 mt76x0_cleanup(dev);
316err:
317 usb_set_intfdata(usb_intf, NULL);
318 usb_put_dev(interface_to_usbdev(usb_intf));
319
320 destroy_workqueue(dev->stat_wq);
321 ieee80211_free_hw(dev->mt76.hw);
322 return ret;
323}
324
325static void mt76x0_disconnect(struct usb_interface *usb_intf)
326{
327 struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
328 bool initalized = test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
329
330 if (!initalized)
331 return;
332
333 ieee80211_unregister_hw(dev->mt76.hw);
334 mt76x0_cleanup(dev);
335
336 usb_set_intfdata(usb_intf, NULL);
337 usb_put_dev(interface_to_usbdev(usb_intf));
338
339 destroy_workqueue(dev->stat_wq);
340 ieee80211_free_hw(dev->mt76.hw);
341}
342
343static int mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state)
344{
345 struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
346
347 mt76x0_cleanup(dev);
348
349 return 0;
350}
351
352static int mt76x0_resume(struct usb_interface *usb_intf)
353{
354 struct mt76x0_dev *dev = usb_get_intfdata(usb_intf);
355 int ret;
356
357 ret = mt76x0_init_hardware(dev);
358 if (ret)
359 return ret;
360
361 set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
362
363 return 0;
364}
365
366MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
367MODULE_FIRMWARE(MT7610_FIRMWARE);
368MODULE_LICENSE("GPL");
369
370static struct usb_driver mt76x0_driver = {
371 .name = KBUILD_MODNAME,
372 .id_table = mt76x0_device_table,
373 .probe = mt76x0_probe,
374 .disconnect = mt76x0_disconnect,
375 .suspend = mt76x0_suspend,
376 .resume = mt76x0_resume,
377 .reset_resume = mt76x0_resume,
378 .soft_unbind = 1,
379 .disable_hub_initiated_lpm = 1,
380};
381module_usb_driver(mt76x0_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
new file mode 100644
index 000000000000..492e431390a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.h
@@ -0,0 +1,61 @@
1/*
2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MT76X0U_USB_H
15#define __MT76X0U_USB_H
16
17#include "mt76x0.h"
18
19#define MT7610_FIRMWARE "mediatek/mt7610u.bin"
20
21#define MT_VEND_REQ_MAX_RETRY 10
22#define MT_VEND_REQ_TOUT_MS 300
23
24#define MT_VEND_DEV_MODE_RESET 1
25
26#define MT_VEND_BUF sizeof(__le32)
27
28static inline struct usb_device *mt76x0_to_usb_dev(struct mt76x0_dev *mt76x0)
29{
30 return interface_to_usbdev(to_usb_interface(mt76x0->mt76.dev));
31}
32
33static inline struct usb_device *mt76_to_usb_dev(struct mt76_dev *mt76)
34{
35 return interface_to_usbdev(to_usb_interface(mt76->dev));
36}
37
38static inline bool mt76x0_urb_has_error(struct urb *urb)
39{
40 return urb->status &&
41 urb->status != -ENOENT &&
42 urb->status != -ECONNRESET &&
43 urb->status != -ESHUTDOWN;
44}
45
46bool mt76x0_usb_alloc_buf(struct mt76x0_dev *dev, size_t len,
47 struct mt76x0_dma_buf *buf);
48void mt76x0_usb_free_buf(struct mt76x0_dev *dev, struct mt76x0_dma_buf *buf);
49int mt76x0_usb_submit_buf(struct mt76x0_dev *dev, int dir, int ep_idx,
50 struct mt76x0_dma_buf *buf, gfp_t gfp,
51 usb_complete_t complete_fn, void *context);
52void mt76x0_complete_urb(struct urb *urb);
53
54int mt76x0_vendor_request(struct mt76x0_dev *dev, const u8 req,
55 const u8 direction, const u16 val, const u16 offset,
56 void *buf, const size_t buflen);
57void mt76x0_vendor_reset(struct mt76x0_dev *dev);
58int mt76x0_vendor_single_wr(struct mt76x0_dev *dev, const u8 req,
59 const u16 offset, const u32 val);
60
61#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/util.c b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
new file mode 100644
index 000000000000..7856dd760419
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/util.c
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mt76x0.h"
15
16void mt76x0_remove_hdr_pad(struct sk_buff *skb)
17{
18 int len = ieee80211_get_hdrlen_from_skb(skb);
19
20 memmove(skb->data + 2, skb->data, len);
21 skb_pull(skb, 2);
22}
23
24int mt76x0_insert_hdr_pad(struct sk_buff *skb)
25{
26 int len = ieee80211_get_hdrlen_from_skb(skb);
27 int ret;
28
29 if (len % 4 == 0)
30 return 0;
31
32 ret = skb_cow(skb, 2);
33 if (ret)
34 return ret;
35
36 skb_push(skb, 2);
37 memmove(skb->data, skb->data + 2, len);
38
39 skb->data[len] = 0;
40 skb->data[len + 1] = 0;
41 return 0;
42}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
index 71fcfa44fb2e..dca3209bf5f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -33,6 +33,9 @@
33#define MT7662_ROM_PATCH "mt7662_rom_patch.bin" 33#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
34#define MT7662_EEPROM_SIZE 512 34#define MT7662_EEPROM_SIZE 512
35 35
36#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
37#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
38
36#define MT76x2_RX_RING_SIZE 256 39#define MT76x2_RX_RING_SIZE 256
37#define MT_RX_HEADROOM 32 40#define MT_RX_HEADROOM 32
38 41
@@ -55,6 +58,7 @@ struct mt76x2_mcu {
55 58
56 wait_queue_head_t wait; 59 wait_queue_head_t wait;
57 struct sk_buff_head res_q; 60 struct sk_buff_head res_q;
61 struct mt76u_buf res_u;
58 62
59 u32 msg_seq; 63 u32 msg_seq;
60}; 64};
@@ -159,6 +163,23 @@ struct mt76x2_sta {
159 int inactive_count; 163 int inactive_count;
160}; 164};
161 165
166static inline bool mt76x2_wait_for_mac(struct mt76x2_dev *dev)
167{
168 int i;
169
170 for (i = 0; i < 500; i++) {
171 switch (mt76_rr(dev, MT_MAC_CSR0)) {
172 case 0:
173 case ~0:
174 break;
175 default:
176 return true;
177 }
178 usleep_range(5000, 10000);
179 }
180 return false;
181}
182
162static inline bool is_mt7612(struct mt76x2_dev *dev) 183static inline bool is_mt7612(struct mt76x2_dev *dev)
163{ 184{
164 return mt76_chip(&dev->mt76) == 0x7612; 185 return mt76_chip(&dev->mt76) == 0x7612;
@@ -166,6 +187,14 @@ static inline bool is_mt7612(struct mt76x2_dev *dev)
166 187
167void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set); 188void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
168 189
190static inline bool mt76x2_channel_silent(struct mt76x2_dev *dev)
191{
192 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
193
194 return ((chan->flags & IEEE80211_CHAN_RADAR) &&
195 chan->dfs_state != NL80211_DFS_AVAILABLE);
196}
197
169static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask) 198static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
170{ 199{
171 mt76x2_set_irq_mask(dev, 0, mask); 200 mt76x2_set_irq_mask(dev, 0, mask);
@@ -176,11 +205,29 @@ static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
176 mt76x2_set_irq_mask(dev, mask, 0); 205 mt76x2_set_irq_mask(dev, mask, 0);
177} 206}
178 207
208static inline bool mt76x2_wait_for_bbp(struct mt76x2_dev *dev)
209{
210 return mt76_poll_msec(dev, MT_MAC_STATUS,
211 MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
212 0, 100);
213}
214
215static inline bool wait_for_wpdma(struct mt76x2_dev *dev)
216{
217 return mt76_poll(dev, MT_WPDMA_GLO_CFG,
218 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
219 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
220 0, 1000);
221}
222
179extern const struct ieee80211_ops mt76x2_ops; 223extern const struct ieee80211_ops mt76x2_ops;
180 224
225extern struct ieee80211_rate mt76x2_rates[12];
226
181struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev); 227struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
182int mt76x2_register_device(struct mt76x2_dev *dev); 228int mt76x2_register_device(struct mt76x2_dev *dev);
183void mt76x2_init_debugfs(struct mt76x2_dev *dev); 229void mt76x2_init_debugfs(struct mt76x2_dev *dev);
230void mt76x2_init_device(struct mt76x2_dev *dev);
184 231
185irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance); 232irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
186void mt76x2_phy_power_on(struct mt76x2_dev *dev); 233void mt76x2_phy_power_on(struct mt76x2_dev *dev);
@@ -194,7 +241,7 @@ void mt76x2_phy_set_antenna(struct mt76x2_dev *dev);
194int mt76x2_phy_start(struct mt76x2_dev *dev); 241int mt76x2_phy_start(struct mt76x2_dev *dev);
195int mt76x2_phy_set_channel(struct mt76x2_dev *dev, 242int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
196 struct cfg80211_chan_def *chandef); 243 struct cfg80211_chan_def *chandef);
197int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain); 244int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
198void mt76x2_phy_calibrate(struct work_struct *work); 245void mt76x2_phy_calibrate(struct work_struct *work);
199void mt76x2_phy_set_txpower(struct mt76x2_dev *dev); 246void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
200 247
@@ -222,6 +269,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
222 u32 *tx_info); 269 u32 *tx_info);
223void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, 270void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
224 struct mt76_queue_entry *e, bool flush); 271 struct mt76_queue_entry *e, bool flush);
272void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val);
225 273
226void mt76x2_pre_tbtt_tasklet(unsigned long arg); 274void mt76x2_pre_tbtt_tasklet(unsigned long arg);
227 275
@@ -238,4 +286,45 @@ s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
238s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj); 286s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
239void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr); 287void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
240 288
289int mt76x2_insert_hdr_pad(struct sk_buff *skb);
290
291bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
292 struct mt76x2_tx_status *stat);
293void mt76x2_send_tx_status(struct mt76x2_dev *dev,
294 struct mt76x2_tx_status *stat, u8 *update);
295void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable);
296void mt76x2_init_txpower(struct mt76x2_dev *dev,
297 struct ieee80211_supported_band *sband);
298void mt76_write_mac_initvals(struct mt76x2_dev *dev);
299
300int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
301 struct ieee80211_ampdu_params *params);
302int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
303 struct ieee80211_sta *sta);
304int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
305 struct ieee80211_sta *sta);
306void mt76x2_remove_interface(struct ieee80211_hw *hw,
307 struct ieee80211_vif *vif);
308int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
309 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
310 struct ieee80211_key_conf *key);
311int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
312 u16 queue, const struct ieee80211_tx_queue_params *params);
313void mt76x2_configure_filter(struct ieee80211_hw *hw,
314 unsigned int changed_flags,
315 unsigned int *total_flags, u64 multicast);
316void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq);
317void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
318 struct ieee80211_vif *vif,
319 struct ieee80211_sta *sta);
320
321void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
322 enum nl80211_band band);
323void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
324 enum nl80211_band band, u8 bw);
325void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl);
326void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper);
327int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev);
328void mt76x2_apply_gain_adj(struct mt76x2_dev *dev);
329
241#endif 330#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
new file mode 100644
index 000000000000..a2338ba139b4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_common.c
@@ -0,0 +1,350 @@
1/*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "mt76x2.h"
19
20void mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
21{
22 struct mt76_txq *mtxq;
23
24 if (!txq)
25 return;
26
27 mtxq = (struct mt76_txq *) txq->drv_priv;
28 if (txq->sta) {
29 struct mt76x2_sta *sta;
30
31 sta = (struct mt76x2_sta *) txq->sta->drv_priv;
32 mtxq->wcid = &sta->wcid;
33 } else {
34 struct mt76x2_vif *mvif;
35
36 mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
37 mtxq->wcid = &mvif->group_wcid;
38 }
39
40 mt76_txq_init(&dev->mt76, txq);
41}
42EXPORT_SYMBOL_GPL(mt76x2_txq_init);
43
44int mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
45 struct ieee80211_ampdu_params *params)
46{
47 enum ieee80211_ampdu_mlme_action action = params->action;
48 struct ieee80211_sta *sta = params->sta;
49 struct mt76x2_dev *dev = hw->priv;
50 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
51 struct ieee80211_txq *txq = sta->txq[params->tid];
52 u16 tid = params->tid;
53 u16 *ssn = &params->ssn;
54 struct mt76_txq *mtxq;
55
56 if (!txq)
57 return -EINVAL;
58
59 mtxq = (struct mt76_txq *)txq->drv_priv;
60
61 switch (action) {
62 case IEEE80211_AMPDU_RX_START:
63 mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
64 mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
65 break;
66 case IEEE80211_AMPDU_RX_STOP:
67 mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
68 mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
69 BIT(16 + tid));
70 break;
71 case IEEE80211_AMPDU_TX_OPERATIONAL:
72 mtxq->aggr = true;
73 mtxq->send_bar = false;
74 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
75 break;
76 case IEEE80211_AMPDU_TX_STOP_FLUSH:
77 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
78 mtxq->aggr = false;
79 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
80 break;
81 case IEEE80211_AMPDU_TX_START:
82 mtxq->agg_ssn = *ssn << 4;
83 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
84 break;
85 case IEEE80211_AMPDU_TX_STOP_CONT:
86 mtxq->aggr = false;
87 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
88 break;
89 }
90
91 return 0;
92}
93EXPORT_SYMBOL_GPL(mt76x2_ampdu_action);
94
95int mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
96 struct ieee80211_sta *sta)
97{
98 struct mt76x2_dev *dev = hw->priv;
99 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
100 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
101 int ret = 0;
102 int idx = 0;
103 int i;
104
105 mutex_lock(&dev->mutex);
106
107 idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
108 if (idx < 0) {
109 ret = -ENOSPC;
110 goto out;
111 }
112
113 msta->vif = mvif;
114 msta->wcid.sta = 1;
115 msta->wcid.idx = idx;
116 msta->wcid.hw_key_idx = -1;
117 mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
118 mt76x2_mac_wcid_set_drop(dev, idx, false);
119 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
120 mt76x2_txq_init(dev, sta->txq[i]);
121
122 if (vif->type == NL80211_IFTYPE_AP)
123 set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
124
125 ewma_signal_init(&msta->rssi);
126
127 rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
128
129out:
130 mutex_unlock(&dev->mutex);
131
132 return ret;
133}
134EXPORT_SYMBOL_GPL(mt76x2_sta_add);
135
136int mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
137 struct ieee80211_sta *sta)
138{
139 struct mt76x2_dev *dev = hw->priv;
140 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
141 int idx = msta->wcid.idx;
142 int i;
143
144 mutex_lock(&dev->mutex);
145 rcu_assign_pointer(dev->wcid[idx], NULL);
146 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
147 mt76_txq_remove(&dev->mt76, sta->txq[i]);
148 mt76x2_mac_wcid_set_drop(dev, idx, true);
149 mt76_wcid_free(dev->wcid_mask, idx);
150 mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
151 mutex_unlock(&dev->mutex);
152
153 return 0;
154}
155EXPORT_SYMBOL_GPL(mt76x2_sta_remove);
156
157void mt76x2_remove_interface(struct ieee80211_hw *hw,
158 struct ieee80211_vif *vif)
159{
160 struct mt76x2_dev *dev = hw->priv;
161
162 mt76_txq_remove(&dev->mt76, vif->txq);
163}
164EXPORT_SYMBOL_GPL(mt76x2_remove_interface);
165
166int mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
167 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
168 struct ieee80211_key_conf *key)
169{
170 struct mt76x2_dev *dev = hw->priv;
171 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
172 struct mt76x2_sta *msta;
173 struct mt76_wcid *wcid;
174 int idx = key->keyidx;
175 int ret;
176
177 /* fall back to sw encryption for unsupported ciphers */
178 switch (key->cipher) {
179 case WLAN_CIPHER_SUITE_WEP40:
180 case WLAN_CIPHER_SUITE_WEP104:
181 case WLAN_CIPHER_SUITE_TKIP:
182 case WLAN_CIPHER_SUITE_CCMP:
183 break;
184 default:
185 return -EOPNOTSUPP;
186 }
187
188 /*
189 * The hardware does not support per-STA RX GTK, fall back
190 * to software mode for these.
191 */
192 if ((vif->type == NL80211_IFTYPE_ADHOC ||
193 vif->type == NL80211_IFTYPE_MESH_POINT) &&
194 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
195 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
196 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
197 return -EOPNOTSUPP;
198
199 msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
200 wcid = msta ? &msta->wcid : &mvif->group_wcid;
201
202 if (cmd == SET_KEY) {
203 key->hw_key_idx = wcid->idx;
204 wcid->hw_key_idx = idx;
205 if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
206 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
207 wcid->sw_iv = true;
208 }
209 } else {
210 if (idx == wcid->hw_key_idx) {
211 wcid->hw_key_idx = -1;
212 wcid->sw_iv = true;
213 }
214
215 key = NULL;
216 }
217 mt76_wcid_key_setup(&dev->mt76, wcid, key);
218
219 if (!msta) {
220 if (key || wcid->hw_key_idx == idx) {
221 ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
222 if (ret)
223 return ret;
224 }
225
226 return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
227 }
228
229 return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
230}
231EXPORT_SYMBOL_GPL(mt76x2_set_key);
232
233int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
234 u16 queue, const struct ieee80211_tx_queue_params *params)
235{
236 struct mt76x2_dev *dev = hw->priv;
237 u8 cw_min = 5, cw_max = 10, qid;
238 u32 val;
239
240 qid = dev->mt76.q_tx[queue].hw_idx;
241
242 if (params->cw_min)
243 cw_min = fls(params->cw_min);
244 if (params->cw_max)
245 cw_max = fls(params->cw_max);
246
247 val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
248 FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
249 FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
250 FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
251 mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
252
253 val = mt76_rr(dev, MT_WMM_TXOP(qid));
254 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
255 val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
256 mt76_wr(dev, MT_WMM_TXOP(qid), val);
257
258 val = mt76_rr(dev, MT_WMM_AIFSN);
259 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
260 val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
261 mt76_wr(dev, MT_WMM_AIFSN, val);
262
263 val = mt76_rr(dev, MT_WMM_CWMIN);
264 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
265 val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
266 mt76_wr(dev, MT_WMM_CWMIN, val);
267
268 val = mt76_rr(dev, MT_WMM_CWMAX);
269 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
270 val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
271 mt76_wr(dev, MT_WMM_CWMAX, val);
272
273 return 0;
274}
275EXPORT_SYMBOL_GPL(mt76x2_conf_tx);
276
277void mt76x2_configure_filter(struct ieee80211_hw *hw,
278 unsigned int changed_flags,
279 unsigned int *total_flags, u64 multicast)
280{
281 struct mt76x2_dev *dev = hw->priv;
282 u32 flags = 0;
283
284#define MT76_FILTER(_flag, _hw) do { \
285 flags |= *total_flags & FIF_##_flag; \
286 dev->rxfilter &= ~(_hw); \
287 dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
288 } while (0)
289
290 mutex_lock(&dev->mutex);
291
292 dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
293
294 MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
295 MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
296 MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
297 MT_RX_FILTR_CFG_CTS |
298 MT_RX_FILTR_CFG_CFEND |
299 MT_RX_FILTR_CFG_CFACK |
300 MT_RX_FILTR_CFG_BA |
301 MT_RX_FILTR_CFG_CTRL_RSV);
302 MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
303
304 *total_flags = flags;
305 mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
306
307 mutex_unlock(&dev->mutex);
308}
309EXPORT_SYMBOL_GPL(mt76x2_configure_filter);
310
311void mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw,
312 struct ieee80211_vif *vif,
313 struct ieee80211_sta *sta)
314{
315 struct mt76x2_dev *dev = hw->priv;
316 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
317 struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
318 struct ieee80211_tx_rate rate = {};
319
320 if (!rates)
321 return;
322
323 rate.idx = rates->rate[0].idx;
324 rate.flags = rates->rate[0].flags;
325 mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
326 msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
327}
328EXPORT_SYMBOL_GPL(mt76x2_sta_rate_tbl_update);
329
330void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
331 struct sk_buff *skb)
332{
333 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
334 void *rxwi = skb->data;
335
336 if (q == MT_RXQ_MCU) {
337 skb_queue_tail(&dev->mcu.res_q, skb);
338 wake_up(&dev->mcu.wait);
339 return;
340 }
341
342 skb_pull(skb, sizeof(struct mt76x2_rxwi));
343 if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
344 dev_kfree_skb(skb);
345 return;
346 }
347
348 mt76_rx(&dev->mt76, q, skb);
349}
350EXPORT_SYMBOL_GPL(mt76x2_queue_rx_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
index 74725902e6dc..77b5ff1be05f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -153,3 +153,4 @@ void mt76x2_init_debugfs(struct mt76x2_dev *dev)
153 153
154 debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); 154 debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
155} 155}
156EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
index fd1ec4743e0b..6720a6a1313f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -66,27 +66,6 @@ mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
66 return 0; 66 return 0;
67} 67}
68 68
69void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
70 struct sk_buff *skb)
71{
72 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
73 void *rxwi = skb->data;
74
75 if (q == MT_RXQ_MCU) {
76 skb_queue_tail(&dev->mcu.res_q, skb);
77 wake_up(&dev->mcu.wait);
78 return;
79 }
80
81 skb_pull(skb, sizeof(struct mt76x2_rxwi));
82 if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
83 dev_kfree_skb(skb);
84 return;
85 }
86
87 mt76_rx(&dev->mt76, q, skb);
88}
89
90static int 69static int
91mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q, 70mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
92 int idx, int n_desc, int bufsize) 71 int idx, int n_desc, int bufsize)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
index e9d426bbf91a..da294558c268 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -19,34 +19,6 @@
19 19
20#include "dma.h" 20#include "dma.h"
21 21
22#define MT_TXD_INFO_LEN GENMASK(15, 0)
23#define MT_TXD_INFO_NEXT_VLD BIT(16)
24#define MT_TXD_INFO_TX_BURST BIT(17)
25#define MT_TXD_INFO_80211 BIT(19)
26#define MT_TXD_INFO_TSO BIT(20)
27#define MT_TXD_INFO_CSO BIT(21)
28#define MT_TXD_INFO_WIV BIT(24)
29#define MT_TXD_INFO_QSEL GENMASK(26, 25)
30#define MT_TXD_INFO_DPORT GENMASK(29, 27)
31#define MT_TXD_INFO_TYPE GENMASK(31, 30)
32
33#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
34#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
35#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
36#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
37#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
38#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
39#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
40#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
41
42/* MCU request message header */
43#define MT_MCU_MSG_LEN GENMASK(15, 0)
44#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
45#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
46#define MT_MCU_MSG_PORT GENMASK(29, 27)
47#define MT_MCU_MSG_TYPE GENMASK(31, 30)
48#define MT_MCU_MSG_TYPE_CMD BIT(30)
49
50enum mt76x2_qsel { 22enum mt76x2_qsel {
51 MT_QSEL_MGMT, 23 MT_QSEL_MGMT,
52 MT_QSEL_HCCA, 24 MT_QSEL_HCCA,
@@ -54,14 +26,4 @@ enum mt76x2_qsel {
54 MT_QSEL_EDCA_2, 26 MT_QSEL_EDCA_2,
55}; 27};
56 28
57enum dma_msg_port {
58 WLAN_PORT,
59 CPU_RX_PORT,
60 CPU_TX_PORT,
61 HOST_PORT,
62 VIRTUAL_CPU_RX_PORT,
63 VIRTUAL_CPU_TX_PORT,
64 DISCARD,
65};
66
67#endif 29#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
index 95d5f7d888f0..1753bcb36356 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -40,8 +40,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
40 return 0; 40 return 0;
41} 41}
42 42
43static void 43void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
44mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
45{ 44{
46 u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0); 45 u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
47 46
@@ -58,6 +57,7 @@ mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
58 break; 57 break;
59 } 58 }
60} 59}
60EXPORT_SYMBOL_GPL(mt76x2_eeprom_parse_hw_cap);
61 61
62static int 62static int
63mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data) 63mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
@@ -415,6 +415,7 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
415 415
416 dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8); 416 dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
417} 417}
418EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
418 419
419static s8 420static s8
420mt76x2_rate_power_val(u8 val) 421mt76x2_rate_power_val(u8 val)
@@ -482,6 +483,7 @@ void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t,
482 val >>= 8; 483 val >>= 8;
483 t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8); 484 t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
484} 485}
486EXPORT_SYMBOL_GPL(mt76x2_get_rate_power);
485 487
486int mt76x2_get_max_rate_power(struct mt76_rate_power *r) 488int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
487{ 489{
@@ -493,6 +495,7 @@ int mt76x2_get_max_rate_power(struct mt76_rate_power *r)
493 495
494 return ret; 496 return ret;
495} 497}
498EXPORT_SYMBOL_GPL(mt76x2_get_max_rate_power);
496 499
497static void 500static void
498mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t, 501mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
@@ -600,6 +603,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
600 t->delta_bw40 = mt76x2_rate_power_val(bw40); 603 t->delta_bw40 = mt76x2_rate_power_val(bw40);
601 t->delta_bw80 = mt76x2_rate_power_val(bw80); 604 t->delta_bw80 = mt76x2_rate_power_val(bw80);
602} 605}
606EXPORT_SYMBOL_GPL(mt76x2_get_power_info);
603 607
604int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) 608int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
605{ 609{
@@ -632,6 +636,7 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
632 636
633 return 0; 637 return 0;
634} 638}
639EXPORT_SYMBOL_GPL(mt76x2_get_temp_comp);
635 640
636bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band) 641bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
637{ 642{
@@ -642,6 +647,7 @@ bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
642 else 647 else
643 return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G); 648 return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
644} 649}
650EXPORT_SYMBOL_GPL(mt76x2_ext_pa_enabled);
645 651
646int mt76x2_eeprom_init(struct mt76x2_dev *dev) 652int mt76x2_eeprom_init(struct mt76x2_dev *dev)
647{ 653{
@@ -658,3 +664,6 @@ int mt76x2_eeprom_init(struct mt76x2_dev *dev)
658 664
659 return 0; 665 return 0;
660} 666}
667EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
668
669MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
index aa0b0c040375..0f3e4d2f4fee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -155,6 +155,7 @@ void mt76x2_get_power_info(struct mt76x2_dev *dev,
155int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t); 155int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
156bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band); 156bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
157void mt76x2_read_rx_gain(struct mt76x2_dev *dev); 157void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
158void mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev);
158 159
159static inline bool 160static inline bool
160mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev) 161mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
index 79ab93613e06..b814391f79ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -19,39 +19,6 @@
19#include "mt76x2_eeprom.h" 19#include "mt76x2_eeprom.h"
20#include "mt76x2_mcu.h" 20#include "mt76x2_mcu.h"
21 21
22struct mt76x2_reg_pair {
23 u32 reg;
24 u32 value;
25};
26
27static bool
28mt76x2_wait_for_mac(struct mt76x2_dev *dev)
29{
30 int i;
31
32 for (i = 0; i < 500; i++) {
33 switch (mt76_rr(dev, MT_MAC_CSR0)) {
34 case 0:
35 case ~0:
36 break;
37 default:
38 return true;
39 }
40 usleep_range(5000, 10000);
41 }
42
43 return false;
44}
45
46static bool
47wait_for_wpdma(struct mt76x2_dev *dev)
48{
49 return mt76_poll(dev, MT_WPDMA_GLO_CFG,
50 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
51 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
52 0, 1000);
53}
54
55static void 22static void
56mt76x2_mac_pbf_init(struct mt76x2_dev *dev) 23mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
57{ 24{
@@ -71,107 +38,6 @@ mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
71} 38}
72 39
73static void 40static void
74mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
75 const struct mt76x2_reg_pair *data, int len)
76{
77 while (len > 0) {
78 mt76_wr(dev, data->reg, data->value);
79 len--;
80 data++;
81 }
82}
83
84static void
85mt76_write_mac_initvals(struct mt76x2_dev *dev)
86{
87#define DEFAULT_PROT_CFG \
88 (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
89 FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
90 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
91 MT_PROT_CFG_RTS_THRESH)
92
93#define DEFAULT_PROT_CFG_20 \
94 (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
95 FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
96 FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
97 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
98
99#define DEFAULT_PROT_CFG_40 \
100 (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
101 FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
102 FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
103 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
104
105 static const struct mt76x2_reg_pair vals[] = {
106 /* Copied from MediaTek reference source */
107 { MT_PBF_SYS_CTRL, 0x00080c00 },
108 { MT_PBF_CFG, 0x1efebcff },
109 { MT_FCE_PSE_CTRL, 0x00000001 },
110 { MT_MAC_SYS_CTRL, 0x0000000c },
111 { MT_MAX_LEN_CFG, 0x003e3f00 },
112 { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
113 { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
114 { MT_XIFS_TIME_CFG, 0x33a40d0a },
115 { MT_BKOFF_SLOT_CFG, 0x00000209 },
116 { MT_TBTT_SYNC_CFG, 0x00422010 },
117 { MT_PWR_PIN_CFG, 0x00000000 },
118 { 0x1238, 0x001700c8 },
119 { MT_TX_SW_CFG0, 0x00101001 },
120 { MT_TX_SW_CFG1, 0x00010000 },
121 { MT_TX_SW_CFG2, 0x00000000 },
122 { MT_TXOP_CTRL_CFG, 0x0400583f },
123 { MT_TX_RTS_CFG, 0x00100020 },
124 { MT_TX_TIMEOUT_CFG, 0x000a2290 },
125 { MT_TX_RETRY_CFG, 0x47f01f0f },
126 { MT_EXP_ACK_TIME, 0x002c00dc },
127 { MT_TX_PROT_CFG6, 0xe3f42004 },
128 { MT_TX_PROT_CFG7, 0xe3f42084 },
129 { MT_TX_PROT_CFG8, 0xe3f42104 },
130 { MT_PIFS_TX_CFG, 0x00060fff },
131 { MT_RX_FILTR_CFG, 0x00015f97 },
132 { MT_LEGACY_BASIC_RATE, 0x0000017f },
133 { MT_HT_BASIC_RATE, 0x00004003 },
134 { MT_PN_PAD_MODE, 0x00000003 },
135 { MT_TXOP_HLDR_ET, 0x00000002 },
136 { 0xa44, 0x00000000 },
137 { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
138 { MT_TSO_CTRL, 0x00000000 },
139 { MT_AUX_CLK_CFG, 0x00000000 },
140 { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
141 { MT_TX_ALC_CFG_4, 0x00000000 },
142 { MT_TX_ALC_VGA3, 0x00000000 },
143 { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
144 { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
145 { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
146 { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
147 { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
148 { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
149 { MT_TX_PWR_CFG_8, 0x0000003a },
150 { MT_TX_PWR_CFG_9, 0x0000003a },
151 { MT_EFUSE_CTRL, 0x0000d000 },
152 { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
153 { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
154 { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
155 { MT_TX_SW_CFG3, 0x00000004 },
156 { MT_HT_FBK_TO_LEGACY, 0x00001818 },
157 { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
158 { MT_PROT_AUTO_TX_CFG, 0x00830083 },
159 { MT_HT_CTRL_CFG, 0x000001ff },
160 };
161 struct mt76x2_reg_pair prot_vals[] = {
162 { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG },
163 { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG },
164 { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
165 { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
166 { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
167 { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
168 };
169
170 mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
171 mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
172}
173
174static void
175mt76x2_fixup_xtal(struct mt76x2_dev *dev) 41mt76x2_fixup_xtal(struct mt76x2_dev *dev)
176{ 42{
177 u16 eep_val; 43 u16 eep_val;
@@ -360,41 +226,6 @@ int mt76x2_mac_start(struct mt76x2_dev *dev)
360 return 0; 226 return 0;
361} 227}
362 228
363void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
364{
365 bool stopped = false;
366 u32 rts_cfg;
367 int i;
368
369 mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
370
371 rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
372 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
373
374 /* Wait for MAC to become idle */
375 for (i = 0; i < 300; i++) {
376 if ((mt76_rr(dev, MT_MAC_STATUS) &
377 (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
378 mt76_rr(dev, MT_BBP(IBI, 12))) {
379 udelay(1);
380 continue;
381 }
382
383 stopped = true;
384 break;
385 }
386
387 if (force && !stopped) {
388 mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
389 mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
390
391 mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
392 mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
393 }
394
395 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
396}
397
398void mt76x2_mac_resume(struct mt76x2_dev *dev) 229void mt76x2_mac_resume(struct mt76x2_dev *dev)
399{ 230{
400 mt76_wr(dev, MT_MAC_SYS_CTRL, 231 mt76_wr(dev, MT_MAC_SYS_CTRL,
@@ -498,45 +329,6 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
498 MT_TX_TIMEOUT_CFG_ACKTO, ackto); 329 MT_TX_TIMEOUT_CFG_ACKTO, ackto);
499} 330}
500 331
501static void
502mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
503{
504 u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
505
506 if (enable)
507 val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
508 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
509 else
510 val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
511 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
512
513 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
514 udelay(20);
515}
516
517static void
518mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
519{
520 u32 val;
521
522 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
523
524 val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
525
526 if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
527 val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
528 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
529 udelay(20);
530
531 val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
532 }
533
534 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
535 udelay(20);
536
537 mt76x2_set_wlan_state(dev, enable);
538}
539
540int mt76x2_init_hardware(struct mt76x2_dev *dev) 332int mt76x2_init_hardware(struct mt76x2_dev *dev)
541{ 333{
542 static const u16 beacon_offsets[16] = { 334 static const u16 beacon_offsets[16] = {
@@ -567,11 +359,6 @@ int mt76x2_init_hardware(struct mt76x2_dev *dev)
567 tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet, 359 tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
568 (unsigned long) dev); 360 (unsigned long) dev);
569 361
570 dev->chainmask = 0x202;
571 dev->global_wcid.idx = 255;
572 dev->global_wcid.hw_key_idx = -1;
573 dev->slottime = 9;
574
575 val = mt76_rr(dev, MT_WPDMA_GLO_CFG); 362 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
576 val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | 363 val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
577 MT_WPDMA_GLO_CFG_BIG_ENDIAN | 364 MT_WPDMA_GLO_CFG_BIG_ENDIAN |
@@ -663,34 +450,6 @@ static void mt76x2_regd_notifier(struct wiphy *wiphy,
663 mt76x2_dfs_set_domain(dev, request->dfs_region); 450 mt76x2_dfs_set_domain(dev, request->dfs_region);
664} 451}
665 452
666#define CCK_RATE(_idx, _rate) { \
667 .bitrate = _rate, \
668 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
669 .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
670 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
671}
672
673#define OFDM_RATE(_idx, _rate) { \
674 .bitrate = _rate, \
675 .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
676 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
677}
678
679static struct ieee80211_rate mt76x2_rates[] = {
680 CCK_RATE(0, 10),
681 CCK_RATE(1, 20),
682 CCK_RATE(2, 55),
683 CCK_RATE(3, 110),
684 OFDM_RATE(0, 60),
685 OFDM_RATE(1, 90),
686 OFDM_RATE(2, 120),
687 OFDM_RATE(3, 180),
688 OFDM_RATE(4, 240),
689 OFDM_RATE(5, 360),
690 OFDM_RATE(6, 480),
691 OFDM_RATE(7, 540),
692};
693
694static const struct ieee80211_iface_limit if_limits[] = { 453static const struct ieee80211_iface_limit if_limits[] = {
695 { 454 {
696 .max = 1, 455 .max = 1,
@@ -767,37 +526,6 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
767 mt76x2_led_set_config(mt76, 0xff, 0); 526 mt76x2_led_set_config(mt76, 0xff, 0);
768} 527}
769 528
770static void
771mt76x2_init_txpower(struct mt76x2_dev *dev,
772 struct ieee80211_supported_band *sband)
773{
774 struct ieee80211_channel *chan;
775 struct mt76x2_tx_power_info txp;
776 struct mt76_rate_power t = {};
777 int target_power;
778 int i;
779
780 for (i = 0; i < sband->n_channels; i++) {
781 chan = &sband->channels[i];
782
783 mt76x2_get_power_info(dev, &txp, chan);
784
785 target_power = max_t(int, (txp.chain[0].target_power +
786 txp.chain[0].delta),
787 (txp.chain[1].target_power +
788 txp.chain[1].delta));
789
790 mt76x2_get_rate_power(dev, &t, chan);
791
792 chan->max_power = mt76x2_get_max_rate_power(&t) +
793 target_power;
794 chan->max_power /= 2;
795
796 /* convert to combined output power on 2x2 devices */
797 chan->max_power += 3;
798 }
799}
800
801int mt76x2_register_device(struct mt76x2_dev *dev) 529int mt76x2_register_device(struct mt76x2_dev *dev)
802{ 530{
803 struct ieee80211_hw *hw = mt76_hw(dev); 531 struct ieee80211_hw *hw = mt76_hw(dev);
@@ -812,20 +540,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
812 return -ENOMEM; 540 return -ENOMEM;
813 541
814 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); 542 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
543 INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
544 INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
545
546 mt76x2_init_device(dev);
815 547
816 ret = mt76x2_init_hardware(dev); 548 ret = mt76x2_init_hardware(dev);
817 if (ret) 549 if (ret)
818 return ret; 550 return ret;
819 551
820 hw->queues = 4;
821 hw->max_rates = 1;
822 hw->max_report_rates = 7;
823 hw->max_rate_tries = 1;
824 hw->extra_tx_headroom = 2;
825
826 hw->sta_data_size = sizeof(struct mt76x2_sta);
827 hw->vif_data_size = sizeof(struct mt76x2_vif);
828
829 for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { 552 for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
830 u8 *addr = dev->macaddr_list[i].addr; 553 u8 *addr = dev->macaddr_list[i].addr;
831 554
@@ -845,16 +568,15 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
845 568
846 wiphy->reg_notifier = mt76x2_regd_notifier; 569 wiphy->reg_notifier = mt76x2_regd_notifier;
847 570
848 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 571 wiphy->interface_modes =
849 572 BIT(NL80211_IFTYPE_STATION) |
850 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); 573 BIT(NL80211_IFTYPE_AP) |
851 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 574#ifdef CONFIG_MAC80211_MESH
852 575 BIT(NL80211_IFTYPE_MESH_POINT) |
853 INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); 576#endif
854 INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); 577 BIT(NL80211_IFTYPE_ADHOC);
855 578
856 dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 579 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
857 dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
858 580
859 mt76x2_dfs_init_detector(dev); 581 mt76x2_dfs_init_detector(dev);
860 582
@@ -862,9 +584,6 @@ int mt76x2_register_device(struct mt76x2_dev *dev)
862 dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; 584 dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
863 dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; 585 dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
864 586
865 /* init antenna configuration */
866 dev->mt76.antenna_mask = 3;
867
868 ret = mt76_register_device(&dev->mt76, true, mt76x2_rates, 587 ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
869 ARRAY_SIZE(mt76x2_rates)); 588 ARRAY_SIZE(mt76x2_rates));
870 if (ret) 589 if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
new file mode 100644
index 000000000000..324b2a4b8b67
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
@@ -0,0 +1,259 @@
1/*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "mt76x2.h"
19#include "mt76x2_eeprom.h"
20
21#define CCK_RATE(_idx, _rate) { \
22 .bitrate = _rate, \
23 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
24 .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
25 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
26}
27
28#define OFDM_RATE(_idx, _rate) { \
29 .bitrate = _rate, \
30 .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
31 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
32}
33
34struct ieee80211_rate mt76x2_rates[] = {
35 CCK_RATE(0, 10),
36 CCK_RATE(1, 20),
37 CCK_RATE(2, 55),
38 CCK_RATE(3, 110),
39 OFDM_RATE(0, 60),
40 OFDM_RATE(1, 90),
41 OFDM_RATE(2, 120),
42 OFDM_RATE(3, 180),
43 OFDM_RATE(4, 240),
44 OFDM_RATE(5, 360),
45 OFDM_RATE(6, 480),
46 OFDM_RATE(7, 540),
47};
48EXPORT_SYMBOL_GPL(mt76x2_rates);
49
50struct mt76x2_reg_pair {
51 u32 reg;
52 u32 value;
53};
54
55static void
56mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
57{
58 u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
59
60 if (enable)
61 val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
62 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
63 else
64 val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
65 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
66
67 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
68 udelay(20);
69}
70
71void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
72{
73 u32 val;
74
75 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
76
77 val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
78
79 if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
80 val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
81 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
82 udelay(20);
83
84 val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
85 }
86
87 mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
88 udelay(20);
89
90 mt76x2_set_wlan_state(dev, enable);
91}
92EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
93
94static void
95mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
96 const struct mt76x2_reg_pair *data, int len)
97{
98 while (len > 0) {
99 mt76_wr(dev, data->reg, data->value);
100 len--;
101 data++;
102 }
103}
104
105void mt76_write_mac_initvals(struct mt76x2_dev *dev)
106{
107#define DEFAULT_PROT_CFG_CCK \
108 (FIELD_PREP(MT_PROT_CFG_RATE, 0x3) | \
109 FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
110 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
111 MT_PROT_CFG_RTS_THRESH)
112
113#define DEFAULT_PROT_CFG_OFDM \
114 (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
115 FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
116 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) | \
117 MT_PROT_CFG_RTS_THRESH)
118
119#define DEFAULT_PROT_CFG_20 \
120 (FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) | \
121 FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
122 FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
123 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
124
125#define DEFAULT_PROT_CFG_40 \
126 (FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) | \
127 FIELD_PREP(MT_PROT_CFG_CTRL, 1) | \
128 FIELD_PREP(MT_PROT_CFG_NAV, 1) | \
129 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
130
131 static const struct mt76x2_reg_pair vals[] = {
132 /* Copied from MediaTek reference source */
133 { MT_PBF_SYS_CTRL, 0x00080c00 },
134 { MT_PBF_CFG, 0x1efebcff },
135 { MT_FCE_PSE_CTRL, 0x00000001 },
136 { MT_MAC_SYS_CTRL, 0x0000000c },
137 { MT_MAX_LEN_CFG, 0x003e3f00 },
138 { MT_AMPDU_MAX_LEN_20M1S, 0xaaa99887 },
139 { MT_AMPDU_MAX_LEN_20M2S, 0x000000aa },
140 { MT_XIFS_TIME_CFG, 0x33a40d0a },
141 { MT_BKOFF_SLOT_CFG, 0x00000209 },
142 { MT_TBTT_SYNC_CFG, 0x00422010 },
143 { MT_PWR_PIN_CFG, 0x00000000 },
144 { 0x1238, 0x001700c8 },
145 { MT_TX_SW_CFG0, 0x00101001 },
146 { MT_TX_SW_CFG1, 0x00010000 },
147 { MT_TX_SW_CFG2, 0x00000000 },
148 { MT_TXOP_CTRL_CFG, 0x0400583f },
149 { MT_TX_RTS_CFG, 0x00100020 },
150 { MT_TX_TIMEOUT_CFG, 0x000a2290 },
151 { MT_TX_RETRY_CFG, 0x47f01f0f },
152 { MT_EXP_ACK_TIME, 0x002c00dc },
153 { MT_TX_PROT_CFG6, 0xe3f42004 },
154 { MT_TX_PROT_CFG7, 0xe3f42084 },
155 { MT_TX_PROT_CFG8, 0xe3f42104 },
156 { MT_PIFS_TX_CFG, 0x00060fff },
157 { MT_RX_FILTR_CFG, 0x00015f97 },
158 { MT_LEGACY_BASIC_RATE, 0x0000017f },
159 { MT_HT_BASIC_RATE, 0x00004003 },
160 { MT_PN_PAD_MODE, 0x00000003 },
161 { MT_TXOP_HLDR_ET, 0x00000002 },
162 { 0xa44, 0x00000000 },
163 { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
164 { MT_TSO_CTRL, 0x00000000 },
165 { MT_AUX_CLK_CFG, 0x00000000 },
166 { MT_DACCLK_EN_DLY_CFG, 0x00000000 },
167 { MT_TX_ALC_CFG_4, 0x00000000 },
168 { MT_TX_ALC_VGA3, 0x00000000 },
169 { MT_TX_PWR_CFG_0, 0x3a3a3a3a },
170 { MT_TX_PWR_CFG_1, 0x3a3a3a3a },
171 { MT_TX_PWR_CFG_2, 0x3a3a3a3a },
172 { MT_TX_PWR_CFG_3, 0x3a3a3a3a },
173 { MT_TX_PWR_CFG_4, 0x3a3a3a3a },
174 { MT_TX_PWR_CFG_7, 0x3a3a3a3a },
175 { MT_TX_PWR_CFG_8, 0x0000003a },
176 { MT_TX_PWR_CFG_9, 0x0000003a },
177 { MT_EFUSE_CTRL, 0x0000d000 },
178 { MT_PAUSE_ENABLE_CONTROL1, 0x0000000a },
179 { MT_FCE_WLAN_FLOW_CONTROL1, 0x60401c18 },
180 { MT_WPDMA_DELAY_INT_CFG, 0x94ff0000 },
181 { MT_TX_SW_CFG3, 0x00000004 },
182 { MT_HT_FBK_TO_LEGACY, 0x00001818 },
183 { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
184 { MT_PROT_AUTO_TX_CFG, 0x00830083 },
185 { MT_HT_CTRL_CFG, 0x000001ff },
186 };
187 struct mt76x2_reg_pair prot_vals[] = {
188 { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK },
189 { MT_OFDM_PROT_CFG, DEFAULT_PROT_CFG_OFDM },
190 { MT_MM20_PROT_CFG, DEFAULT_PROT_CFG_20 },
191 { MT_MM40_PROT_CFG, DEFAULT_PROT_CFG_40 },
192 { MT_GF20_PROT_CFG, DEFAULT_PROT_CFG_20 },
193 { MT_GF40_PROT_CFG, DEFAULT_PROT_CFG_40 },
194 };
195
196 mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
197 mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
198}
199EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
200
201void mt76x2_init_device(struct mt76x2_dev *dev)
202{
203 struct ieee80211_hw *hw = mt76_hw(dev);
204
205 hw->queues = 4;
206 hw->max_rates = 1;
207 hw->max_report_rates = 7;
208 hw->max_rate_tries = 1;
209 hw->extra_tx_headroom = 2;
210
211 hw->sta_data_size = sizeof(struct mt76x2_sta);
212 hw->vif_data_size = sizeof(struct mt76x2_vif);
213
214 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
215 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
216
217 dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
218 dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
219
220 dev->chainmask = 0x202;
221 dev->global_wcid.idx = 255;
222 dev->global_wcid.hw_key_idx = -1;
223 dev->slottime = 9;
224
225 /* init antenna configuration */
226 dev->mt76.antenna_mask = 3;
227}
228EXPORT_SYMBOL_GPL(mt76x2_init_device);
229
230void mt76x2_init_txpower(struct mt76x2_dev *dev,
231 struct ieee80211_supported_band *sband)
232{
233 struct ieee80211_channel *chan;
234 struct mt76x2_tx_power_info txp;
235 struct mt76_rate_power t = {};
236 int target_power;
237 int i;
238
239 for (i = 0; i < sband->n_channels; i++) {
240 chan = &sband->channels[i];
241
242 mt76x2_get_power_info(dev, &txp, chan);
243
244 target_power = max_t(int, (txp.chain[0].target_power +
245 txp.chain[0].delta),
246 (txp.chain[1].target_power +
247 txp.chain[1].delta));
248
249 mt76x2_get_rate_power(dev, &t, chan);
250
251 chan->max_power = mt76x2_get_max_rate_power(&t) +
252 target_power;
253 chan->max_power /= 2;
254
255 /* convert to combined output power on 2x2 devices */
256 chan->max_power += 3;
257 }
258}
259EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
index fc9af79b3e69..23cf437d14f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -28,515 +28,12 @@ void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
28 get_unaligned_le16(addr + 4)); 28 get_unaligned_le16(addr + 4));
29} 29}
30 30
31static int
32mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
33{
34 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
35
36 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
37 case MT_PHY_TYPE_OFDM:
38 if (idx >= 8)
39 idx = 0;
40
41 if (status->band == NL80211_BAND_2GHZ)
42 idx += 4;
43
44 status->rate_idx = idx;
45 return 0;
46 case MT_PHY_TYPE_CCK:
47 if (idx >= 8) {
48 idx -= 8;
49 status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
50 }
51
52 if (idx >= 4)
53 idx = 0;
54
55 status->rate_idx = idx;
56 return 0;
57 case MT_PHY_TYPE_HT_GF:
58 status->enc_flags |= RX_ENC_FLAG_HT_GF;
59 /* fall through */
60 case MT_PHY_TYPE_HT:
61 status->encoding = RX_ENC_HT;
62 status->rate_idx = idx;
63 break;
64 case MT_PHY_TYPE_VHT:
65 status->encoding = RX_ENC_VHT;
66 status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
67 status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
68 break;
69 default:
70 return -EINVAL;
71 }
72
73 if (rate & MT_RXWI_RATE_LDPC)
74 status->enc_flags |= RX_ENC_FLAG_LDPC;
75
76 if (rate & MT_RXWI_RATE_SGI)
77 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
78
79 if (rate & MT_RXWI_RATE_STBC)
80 status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
81
82 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
83 case MT_PHY_BW_20:
84 break;
85 case MT_PHY_BW_40:
86 status->bw = RATE_INFO_BW_40;
87 break;
88 case MT_PHY_BW_80:
89 status->bw = RATE_INFO_BW_80;
90 break;
91 default:
92 break;
93 }
94
95 return 0;
96}
97
98static __le16
99mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
100 const struct ieee80211_tx_rate *rate, u8 *nss_val)
101{
102 u16 rateval;
103 u8 phy, rate_idx;
104 u8 nss = 1;
105 u8 bw = 0;
106
107 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
108 rate_idx = rate->idx;
109 nss = 1 + (rate->idx >> 4);
110 phy = MT_PHY_TYPE_VHT;
111 if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
112 bw = 2;
113 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
114 bw = 1;
115 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
116 rate_idx = rate->idx;
117 nss = 1 + (rate->idx >> 3);
118 phy = MT_PHY_TYPE_HT;
119 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
120 phy = MT_PHY_TYPE_HT_GF;
121 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
122 bw = 1;
123 } else {
124 const struct ieee80211_rate *r;
125 int band = dev->mt76.chandef.chan->band;
126 u16 val;
127
128 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
129 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
130 val = r->hw_value_short;
131 else
132 val = r->hw_value;
133
134 phy = val >> 8;
135 rate_idx = val & 0xff;
136 bw = 0;
137 }
138
139 rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
140 rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
141 rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
142 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
143 rateval |= MT_RXWI_RATE_SGI;
144
145 *nss_val = nss;
146 return cpu_to_le16(rateval);
147}
148
149void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
150{
151 u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
152 u32 bit = MT_WCID_DROP_MASK(idx);
153
154 /* prevent unnecessary writes */
155 if ((val & bit) != (bit * drop))
156 mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
157}
158
159void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
160 const struct ieee80211_tx_rate *rate)
161{
162 spin_lock_bh(&dev->mt76.lock);
163 wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
164 wcid->tx_rate_set = true;
165 spin_unlock_bh(&dev->mt76.lock);
166}
167
168void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
169 struct sk_buff *skb, struct mt76_wcid *wcid,
170 struct ieee80211_sta *sta)
171{
172 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
173 struct ieee80211_tx_rate *rate = &info->control.rates[0];
174 struct ieee80211_key_conf *key = info->control.hw_key;
175 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
176 u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
177 u16 txwi_flags = 0;
178 u8 nss;
179 s8 txpwr_adj, max_txpwr_adj;
180 u8 ccmp_pn[8];
181
182 memset(txwi, 0, sizeof(*txwi));
183
184 if (wcid)
185 txwi->wcid = wcid->idx;
186 else
187 txwi->wcid = 0xff;
188
189 txwi->pktid = 1;
190
191 if (wcid && wcid->sw_iv && key) {
192 u64 pn = atomic64_inc_return(&key->tx_pn);
193 ccmp_pn[0] = pn;
194 ccmp_pn[1] = pn >> 8;
195 ccmp_pn[2] = 0;
196 ccmp_pn[3] = 0x20 | (key->keyidx << 6);
197 ccmp_pn[4] = pn >> 16;
198 ccmp_pn[5] = pn >> 24;
199 ccmp_pn[6] = pn >> 32;
200 ccmp_pn[7] = pn >> 40;
201 txwi->iv = *((__le32 *)&ccmp_pn[0]);
202 txwi->eiv = *((__le32 *)&ccmp_pn[1]);
203 }
204
205 spin_lock_bh(&dev->mt76.lock);
206 if (wcid && (rate->idx < 0 || !rate->count)) {
207 txwi->rate = wcid->tx_rate;
208 max_txpwr_adj = wcid->max_txpwr_adj;
209 nss = wcid->tx_rate_nss;
210 } else {
211 txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
212 max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
213 }
214 spin_unlock_bh(&dev->mt76.lock);
215
216 txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
217 max_txpwr_adj);
218 txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
219
220 if (mt76xx_rev(dev) >= MT76XX_REV_E4)
221 txwi->txstream = 0x13;
222 else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
223 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
224 txwi->txstream = 0x93;
225
226 if (info->flags & IEEE80211_TX_CTL_LDPC)
227 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
228 if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
229 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
230 if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
231 txwi_flags |= MT_TXWI_FLAGS_MMPS;
232 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
233 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
234 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
235 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
236 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
237 txwi->pktid |= MT_TXWI_PKTID_PROBE;
238 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
239 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
240
241 ba_size <<= sta->ht_cap.ampdu_factor;
242 ba_size = min_t(int, 63, ba_size - 1);
243 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
244 ba_size = 0;
245 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
246
247 txwi_flags |= MT_TXWI_FLAGS_AMPDU |
248 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
249 sta->ht_cap.ampdu_density);
250 }
251
252 if (ieee80211_is_probe_resp(hdr->frame_control) ||
253 ieee80211_is_beacon(hdr->frame_control))
254 txwi_flags |= MT_TXWI_FLAGS_TS;
255
256 txwi->flags |= cpu_to_le16(txwi_flags);
257 txwi->len_ctl = cpu_to_le16(skb->len);
258}
259
260static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
261{
262 int hdrlen;
263
264 if (!len)
265 return;
266
267 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
268 memmove(skb->data + len, skb->data, hdrlen);
269 skb_pull(skb, len);
270}
271
272static struct mt76x2_sta *
273mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
274{
275 struct mt76_wcid *wcid;
276
277 if (idx >= ARRAY_SIZE(dev->wcid))
278 return NULL;
279
280 wcid = rcu_dereference(dev->wcid[idx]);
281 if (!wcid)
282 return NULL;
283
284 return container_of(wcid, struct mt76x2_sta, wcid);
285}
286
287static struct mt76_wcid *
288mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta, bool unicast)
289{
290 if (!sta)
291 return NULL;
292
293 if (unicast)
294 return &sta->wcid;
295 else
296 return &sta->vif->group_wcid;
297}
298
299int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
300 void *rxi)
301{
302 struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
303 struct mt76x2_rxwi *rxwi = rxi;
304 struct mt76x2_sta *sta;
305 u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
306 u32 ctl = le32_to_cpu(rxwi->ctl);
307 u16 rate = le16_to_cpu(rxwi->rate);
308 u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
309 bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
310 int pad_len = 0;
311 u8 pn_len;
312 u8 wcid;
313 int len;
314
315 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
316 return -EINVAL;
317
318 if (rxinfo & MT_RXINFO_L2PAD)
319 pad_len += 2;
320
321 if (rxinfo & MT_RXINFO_DECRYPT) {
322 status->flag |= RX_FLAG_DECRYPTED;
323 status->flag |= RX_FLAG_MMIC_STRIPPED;
324 status->flag |= RX_FLAG_MIC_STRIPPED;
325 status->flag |= RX_FLAG_IV_STRIPPED;
326 }
327
328 wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
329 sta = mt76x2_rx_get_sta(dev, wcid);
330 status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
331
332 len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
333 pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
334 if (pn_len) {
335 int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
336 u8 *data = skb->data + offset;
337
338 status->iv[0] = data[7];
339 status->iv[1] = data[6];
340 status->iv[2] = data[5];
341 status->iv[3] = data[4];
342 status->iv[4] = data[1];
343 status->iv[5] = data[0];
344
345 /*
346 * Driver CCMP validation can't deal with fragments.
347 * Let mac80211 take care of it.
348 */
349 if (rxinfo & MT_RXINFO_FRAG) {
350 status->flag &= ~RX_FLAG_IV_STRIPPED;
351 } else {
352 pad_len += pn_len << 2;
353 len -= pn_len << 2;
354 }
355 }
356
357 mt76x2_remove_hdr_pad(skb, pad_len);
358
359 if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
360 status->aggr = true;
361
362 if (WARN_ON_ONCE(len > skb->len))
363 return -EINVAL;
364
365 pskb_trim(skb, len);
366 status->chains = BIT(0) | BIT(1);
367 status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
368 status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
369 status->signal = max(status->chain_signal[0], status->chain_signal[1]);
370 status->freq = dev->mt76.chandef.chan->center_freq;
371 status->band = dev->mt76.chandef.chan->band;
372
373 status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
374 status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
375
376 if (sta) {
377 ewma_signal_add(&sta->rssi, status->signal);
378 sta->inactive_count = 0;
379 }
380
381 return mt76x2_mac_process_rate(status, rate);
382}
383
384static int
385mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
386 enum nl80211_band band)
387{
388 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
389
390 txrate->idx = 0;
391 txrate->flags = 0;
392 txrate->count = 1;
393
394 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
395 case MT_PHY_TYPE_OFDM:
396 if (band == NL80211_BAND_2GHZ)
397 idx += 4;
398
399 txrate->idx = idx;
400 return 0;
401 case MT_PHY_TYPE_CCK:
402 if (idx >= 8)
403 idx -= 8;
404
405 txrate->idx = idx;
406 return 0;
407 case MT_PHY_TYPE_HT_GF:
408 txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
409 /* fall through */
410 case MT_PHY_TYPE_HT:
411 txrate->flags |= IEEE80211_TX_RC_MCS;
412 txrate->idx = idx;
413 break;
414 case MT_PHY_TYPE_VHT:
415 txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
416 txrate->idx = idx;
417 break;
418 default:
419 return -EINVAL;
420 }
421
422 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
423 case MT_PHY_BW_20:
424 break;
425 case MT_PHY_BW_40:
426 txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
427 break;
428 case MT_PHY_BW_80:
429 txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
430 break;
431 default:
432 return -EINVAL;
433 }
434
435 if (rate & MT_RXWI_RATE_SGI)
436 txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
437
438 return 0;
439}
440
441static void
442mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
443 struct ieee80211_tx_info *info,
444 struct mt76x2_tx_status *st, int n_frames)
445{
446 struct ieee80211_tx_rate *rate = info->status.rates;
447 int cur_idx, last_rate;
448 int i;
449
450 if (!n_frames)
451 return;
452
453 last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
454 mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
455 dev->mt76.chandef.chan->band);
456 if (last_rate < IEEE80211_TX_MAX_RATES - 1)
457 rate[last_rate + 1].idx = -1;
458
459 cur_idx = rate[last_rate].idx + last_rate;
460 for (i = 0; i <= last_rate; i++) {
461 rate[i].flags = rate[last_rate].flags;
462 rate[i].idx = max_t(int, 0, cur_idx - i);
463 rate[i].count = 1;
464 }
465 rate[last_rate].count = st->retry + 1 - last_rate;
466
467 info->status.ampdu_len = n_frames;
468 info->status.ampdu_ack_len = st->success ? n_frames : 0;
469
470 if (st->pktid & MT_TXWI_PKTID_PROBE)
471 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
472
473 if (st->aggr)
474 info->flags |= IEEE80211_TX_CTL_AMPDU |
475 IEEE80211_TX_STAT_AMPDU;
476
477 if (!st->ack_req)
478 info->flags |= IEEE80211_TX_CTL_NO_ACK;
479 else if (st->success)
480 info->flags |= IEEE80211_TX_STAT_ACK;
481}
482
483static void
484mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
485 u8 *update)
486{
487 struct ieee80211_tx_info info = {};
488 struct ieee80211_sta *sta = NULL;
489 struct mt76_wcid *wcid = NULL;
490 struct mt76x2_sta *msta = NULL;
491
492 rcu_read_lock();
493 if (stat->wcid < ARRAY_SIZE(dev->wcid))
494 wcid = rcu_dereference(dev->wcid[stat->wcid]);
495
496 if (wcid) {
497 void *priv;
498
499 priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
500 sta = container_of(priv, struct ieee80211_sta,
501 drv_priv);
502 }
503
504 if (msta && stat->aggr) {
505 u32 stat_val, stat_cache;
506
507 stat_val = stat->rate;
508 stat_val |= ((u32) stat->retry) << 16;
509 stat_cache = msta->status.rate;
510 stat_cache |= ((u32) msta->status.retry) << 16;
511
512 if (*update == 0 && stat_val == stat_cache &&
513 stat->wcid == msta->status.wcid && msta->n_frames < 32) {
514 msta->n_frames++;
515 goto out;
516 }
517
518 mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
519 msta->n_frames);
520
521 msta->status = *stat;
522 msta->n_frames = 1;
523 *update = 0;
524 } else {
525 mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
526 *update = 1;
527 }
528
529 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
530
531out:
532 rcu_read_unlock();
533}
534
535void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq) 31void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
536{ 32{
537 struct mt76x2_tx_status stat = {}; 33 struct mt76x2_tx_status stat = {};
538 unsigned long flags; 34 unsigned long flags;
539 u8 update = 1; 35 u8 update = 1;
36 bool ret;
540 37
541 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) 38 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
542 return; 39 return;
@@ -544,26 +41,13 @@ void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
544 trace_mac_txstat_poll(dev); 41 trace_mac_txstat_poll(dev);
545 42
546 while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { 43 while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
547 u32 stat1, stat2;
548
549 spin_lock_irqsave(&dev->irq_lock, flags); 44 spin_lock_irqsave(&dev->irq_lock, flags);
550 stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); 45 ret = mt76x2_mac_load_tx_status(dev, &stat);
551 stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
552 if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
553 spin_unlock_irqrestore(&dev->irq_lock, flags);
554 break;
555 }
556
557 spin_unlock_irqrestore(&dev->irq_lock, flags); 46 spin_unlock_irqrestore(&dev->irq_lock, flags);
558 47
559 stat.valid = 1; 48 if (!ret)
560 stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS); 49 break;
561 stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR); 50
562 stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
563 stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
564 stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
565 stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
566 stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
567 trace_mac_txstat_fetch(dev, &stat); 51 trace_mac_txstat_fetch(dev, &stat);
568 52
569 if (!irq) { 53 if (!irq) {
@@ -612,104 +96,6 @@ void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
612 dev_kfree_skb_any(e->skb); 96 dev_kfree_skb_any(e->skb);
613} 97}
614 98
615static enum mt76x2_cipher_type
616mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
617{
618 memset(key_data, 0, 32);
619 if (!key)
620 return MT_CIPHER_NONE;
621
622 if (key->keylen > 32)
623 return MT_CIPHER_NONE;
624
625 memcpy(key_data, key->key, key->keylen);
626
627 switch (key->cipher) {
628 case WLAN_CIPHER_SUITE_WEP40:
629 return MT_CIPHER_WEP40;
630 case WLAN_CIPHER_SUITE_WEP104:
631 return MT_CIPHER_WEP104;
632 case WLAN_CIPHER_SUITE_TKIP:
633 return MT_CIPHER_TKIP;
634 case WLAN_CIPHER_SUITE_CCMP:
635 return MT_CIPHER_AES_CCMP;
636 default:
637 return MT_CIPHER_NONE;
638 }
639}
640
641void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
642{
643 struct mt76_wcid_addr addr = {};
644 u32 attr;
645
646 attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
647 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
648
649 mt76_wr(dev, MT_WCID_ATTR(idx), attr);
650
651 mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
652 mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
653
654 if (idx >= 128)
655 return;
656
657 if (mac)
658 memcpy(addr.macaddr, mac, ETH_ALEN);
659
660 mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
661}
662
663int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
664 struct ieee80211_key_conf *key)
665{
666 enum mt76x2_cipher_type cipher;
667 u8 key_data[32];
668 u8 iv_data[8];
669
670 cipher = mt76x2_mac_get_key_info(key, key_data);
671 if (cipher == MT_CIPHER_NONE && key)
672 return -EOPNOTSUPP;
673
674 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
675 mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
676
677 memset(iv_data, 0, sizeof(iv_data));
678 if (key) {
679 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
680 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
681 iv_data[3] = key->keyidx << 6;
682 if (cipher >= MT_CIPHER_TKIP)
683 iv_data[3] |= 0x20;
684 }
685
686 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
687
688 return 0;
689}
690
691int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
692 struct ieee80211_key_conf *key)
693{
694 enum mt76x2_cipher_type cipher;
695 u8 key_data[32];
696 u32 val;
697
698 cipher = mt76x2_mac_get_key_info(key, key_data);
699 if (cipher == MT_CIPHER_NONE && key)
700 return -EOPNOTSUPP;
701
702 val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
703 val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
704 val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
705 mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
706
707 mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
708 sizeof(key_data));
709
710 return 0;
711}
712
713static int 99static int
714mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb) 100mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
715{ 101{
@@ -719,7 +105,7 @@ mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
719 if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi))) 105 if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
720 return -ENOSPC; 106 return -ENOSPC;
721 107
722 mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL); 108 mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
723 109
724 mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); 110 mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
725 offset += sizeof(txwi); 111 offset += sizeof(txwi);
@@ -854,3 +240,33 @@ void mt76x2_mac_work(struct work_struct *work)
854 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, 240 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
855 MT_CALIBRATE_INTERVAL); 241 MT_CALIBRATE_INTERVAL);
856} 242}
243
244void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val)
245{
246 u32 data = 0;
247
248 if (val != ~0)
249 data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
250 MT_PROT_CFG_RTS_THRESH;
251
252 mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
253
254 mt76_rmw(dev, MT_CCK_PROT_CFG,
255 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
256 mt76_rmw(dev, MT_OFDM_PROT_CFG,
257 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
258 mt76_rmw(dev, MT_MM20_PROT_CFG,
259 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
260 mt76_rmw(dev, MT_MM40_PROT_CFG,
261 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
262 mt76_rmw(dev, MT_GF20_PROT_CFG,
263 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
264 mt76_rmw(dev, MT_GF40_PROT_CFG,
265 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
266 mt76_rmw(dev, MT_TX_PROT_CFG6,
267 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
268 mt76_rmw(dev, MT_TX_PROT_CFG7,
269 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
270 mt76_rmw(dev, MT_TX_PROT_CFG8,
271 MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
272}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
index c048cd06df6b..5af0107ba748 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -166,7 +166,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
166 void *rxi); 166 void *rxi);
167void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi, 167void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
168 struct sk_buff *skb, struct mt76_wcid *wcid, 168 struct sk_buff *skb, struct mt76_wcid *wcid,
169 struct ieee80211_sta *sta); 169 struct ieee80211_sta *sta, int len);
170void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac); 170void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
171int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx, 171int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
172 struct ieee80211_key_conf *key); 172 struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
new file mode 100644
index 000000000000..6542644bc325
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
@@ -0,0 +1,699 @@
1/*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "mt76x2.h"
19
20void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
21{
22 bool stopped = false;
23 u32 rts_cfg;
24 int i;
25
26 mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
27
28 rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
29 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
30
31 /* Wait for MAC to become idle */
32 for (i = 0; i < 300; i++) {
33 if ((mt76_rr(dev, MT_MAC_STATUS) &
34 (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) ||
35 mt76_rr(dev, MT_BBP(IBI, 12))) {
36 udelay(1);
37 continue;
38 }
39
40 stopped = true;
41 break;
42 }
43
44 if (force && !stopped) {
45 mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
46 mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
47
48 mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
49 mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
50 }
51
52 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
53}
54EXPORT_SYMBOL_GPL(mt76x2_mac_stop);
55
56bool mt76x2_mac_load_tx_status(struct mt76x2_dev *dev,
57 struct mt76x2_tx_status *stat)
58{
59 u32 stat1, stat2;
60
61 stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
62 stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
63
64 stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
65 if (!stat->valid)
66 return false;
67
68 stat->success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
69 stat->aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
70 stat->ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
71 stat->wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
72 stat->rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
73
74 stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
75 stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
76
77 return true;
78}
79EXPORT_SYMBOL_GPL(mt76x2_mac_load_tx_status);
80
81static int
82mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
83 enum nl80211_band band)
84{
85 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
86
87 txrate->idx = 0;
88 txrate->flags = 0;
89 txrate->count = 1;
90
91 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
92 case MT_PHY_TYPE_OFDM:
93 if (band == NL80211_BAND_2GHZ)
94 idx += 4;
95
96 txrate->idx = idx;
97 return 0;
98 case MT_PHY_TYPE_CCK:
99 if (idx >= 8)
100 idx -= 8;
101
102 txrate->idx = idx;
103 return 0;
104 case MT_PHY_TYPE_HT_GF:
105 txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
106 /* fall through */
107 case MT_PHY_TYPE_HT:
108 txrate->flags |= IEEE80211_TX_RC_MCS;
109 txrate->idx = idx;
110 break;
111 case MT_PHY_TYPE_VHT:
112 txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
113 txrate->idx = idx;
114 break;
115 default:
116 return -EINVAL;
117 }
118
119 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
120 case MT_PHY_BW_20:
121 break;
122 case MT_PHY_BW_40:
123 txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
124 break;
125 case MT_PHY_BW_80:
126 txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
127 break;
128 default:
129 return -EINVAL;
130 }
131
132 if (rate & MT_RXWI_RATE_SGI)
133 txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
134
135 return 0;
136}
137
138static void
139mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
140 struct ieee80211_tx_info *info,
141 struct mt76x2_tx_status *st, int n_frames)
142{
143 struct ieee80211_tx_rate *rate = info->status.rates;
144 int cur_idx, last_rate;
145 int i;
146
147 if (!n_frames)
148 return;
149
150 last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
151 mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
152 dev->mt76.chandef.chan->band);
153 if (last_rate < IEEE80211_TX_MAX_RATES - 1)
154 rate[last_rate + 1].idx = -1;
155
156 cur_idx = rate[last_rate].idx + last_rate;
157 for (i = 0; i <= last_rate; i++) {
158 rate[i].flags = rate[last_rate].flags;
159 rate[i].idx = max_t(int, 0, cur_idx - i);
160 rate[i].count = 1;
161 }
162 rate[last_rate].count = st->retry + 1 - last_rate;
163
164 info->status.ampdu_len = n_frames;
165 info->status.ampdu_ack_len = st->success ? n_frames : 0;
166
167 if (st->pktid & MT_TXWI_PKTID_PROBE)
168 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
169
170 if (st->aggr)
171 info->flags |= IEEE80211_TX_CTL_AMPDU |
172 IEEE80211_TX_STAT_AMPDU;
173
174 if (!st->ack_req)
175 info->flags |= IEEE80211_TX_CTL_NO_ACK;
176 else if (st->success)
177 info->flags |= IEEE80211_TX_STAT_ACK;
178}
179
180void mt76x2_send_tx_status(struct mt76x2_dev *dev,
181 struct mt76x2_tx_status *stat, u8 *update)
182{
183 struct ieee80211_tx_info info = {};
184 struct ieee80211_sta *sta = NULL;
185 struct mt76_wcid *wcid = NULL;
186 struct mt76x2_sta *msta = NULL;
187
188 rcu_read_lock();
189 if (stat->wcid < ARRAY_SIZE(dev->wcid))
190 wcid = rcu_dereference(dev->wcid[stat->wcid]);
191
192 if (wcid) {
193 void *priv;
194
195 priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
196 sta = container_of(priv, struct ieee80211_sta,
197 drv_priv);
198 }
199
200 if (msta && stat->aggr) {
201 u32 stat_val, stat_cache;
202
203 stat_val = stat->rate;
204 stat_val |= ((u32) stat->retry) << 16;
205 stat_cache = msta->status.rate;
206 stat_cache |= ((u32) msta->status.retry) << 16;
207
208 if (*update == 0 && stat_val == stat_cache &&
209 stat->wcid == msta->status.wcid && msta->n_frames < 32) {
210 msta->n_frames++;
211 goto out;
212 }
213
214 mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
215 msta->n_frames);
216
217 msta->status = *stat;
218 msta->n_frames = 1;
219 *update = 0;
220 } else {
221 mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
222 *update = 1;
223 }
224
225 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
226
227out:
228 rcu_read_unlock();
229}
230EXPORT_SYMBOL_GPL(mt76x2_send_tx_status);
231
232static enum mt76x2_cipher_type
233mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
234{
235 memset(key_data, 0, 32);
236 if (!key)
237 return MT_CIPHER_NONE;
238
239 if (key->keylen > 32)
240 return MT_CIPHER_NONE;
241
242 memcpy(key_data, key->key, key->keylen);
243
244 switch (key->cipher) {
245 case WLAN_CIPHER_SUITE_WEP40:
246 return MT_CIPHER_WEP40;
247 case WLAN_CIPHER_SUITE_WEP104:
248 return MT_CIPHER_WEP104;
249 case WLAN_CIPHER_SUITE_TKIP:
250 return MT_CIPHER_TKIP;
251 case WLAN_CIPHER_SUITE_CCMP:
252 return MT_CIPHER_AES_CCMP;
253 default:
254 return MT_CIPHER_NONE;
255 }
256}
257
258int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
259 struct ieee80211_key_conf *key)
260{
261 enum mt76x2_cipher_type cipher;
262 u8 key_data[32];
263 u32 val;
264
265 cipher = mt76x2_mac_get_key_info(key, key_data);
266 if (cipher == MT_CIPHER_NONE && key)
267 return -EOPNOTSUPP;
268
269 val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
270 val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
271 val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
272 mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
273
274 mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
275 sizeof(key_data));
276
277 return 0;
278}
279EXPORT_SYMBOL_GPL(mt76x2_mac_shared_key_setup);
280
281int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
282 struct ieee80211_key_conf *key)
283{
284 enum mt76x2_cipher_type cipher;
285 u8 key_data[32];
286 u8 iv_data[8];
287
288 cipher = mt76x2_mac_get_key_info(key, key_data);
289 if (cipher == MT_CIPHER_NONE && key)
290 return -EOPNOTSUPP;
291
292 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
293 mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
294
295 memset(iv_data, 0, sizeof(iv_data));
296 if (key) {
297 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
298 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
299 iv_data[3] = key->keyidx << 6;
300 if (cipher >= MT_CIPHER_TKIP)
301 iv_data[3] |= 0x20;
302 }
303
304 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
305
306 return 0;
307}
308EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_key);
309
310static __le16
311mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
312 const struct ieee80211_tx_rate *rate, u8 *nss_val)
313{
314 u16 rateval;
315 u8 phy, rate_idx;
316 u8 nss = 1;
317 u8 bw = 0;
318
319 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
320 rate_idx = rate->idx;
321 nss = 1 + (rate->idx >> 4);
322 phy = MT_PHY_TYPE_VHT;
323 if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
324 bw = 2;
325 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
326 bw = 1;
327 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
328 rate_idx = rate->idx;
329 nss = 1 + (rate->idx >> 3);
330 phy = MT_PHY_TYPE_HT;
331 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
332 phy = MT_PHY_TYPE_HT_GF;
333 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
334 bw = 1;
335 } else {
336 const struct ieee80211_rate *r;
337 int band = dev->mt76.chandef.chan->band;
338 u16 val;
339
340 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
341 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
342 val = r->hw_value_short;
343 else
344 val = r->hw_value;
345
346 phy = val >> 8;
347 rate_idx = val & 0xff;
348 bw = 0;
349 }
350
351 rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
352 rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
353 rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
354 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
355 rateval |= MT_RXWI_RATE_SGI;
356
357 *nss_val = nss;
358 return cpu_to_le16(rateval);
359}
360
361void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
362 const struct ieee80211_tx_rate *rate)
363{
364 spin_lock_bh(&dev->mt76.lock);
365 wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
366 wcid->tx_rate_set = true;
367 spin_unlock_bh(&dev->mt76.lock);
368}
369EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_rate);
370
371void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
372 struct sk_buff *skb, struct mt76_wcid *wcid,
373 struct ieee80211_sta *sta, int len)
374{
375 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
376 struct ieee80211_tx_rate *rate = &info->control.rates[0];
377 struct ieee80211_key_conf *key = info->control.hw_key;
378 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
379 u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
380 u16 txwi_flags = 0;
381 u8 nss;
382 s8 txpwr_adj, max_txpwr_adj;
383 u8 ccmp_pn[8];
384
385 memset(txwi, 0, sizeof(*txwi));
386
387 if (wcid)
388 txwi->wcid = wcid->idx;
389 else
390 txwi->wcid = 0xff;
391
392 txwi->pktid = 1;
393
394 if (wcid && wcid->sw_iv && key) {
395 u64 pn = atomic64_inc_return(&key->tx_pn);
396 ccmp_pn[0] = pn;
397 ccmp_pn[1] = pn >> 8;
398 ccmp_pn[2] = 0;
399 ccmp_pn[3] = 0x20 | (key->keyidx << 6);
400 ccmp_pn[4] = pn >> 16;
401 ccmp_pn[5] = pn >> 24;
402 ccmp_pn[6] = pn >> 32;
403 ccmp_pn[7] = pn >> 40;
404 txwi->iv = *((__le32 *)&ccmp_pn[0]);
405 txwi->eiv = *((__le32 *)&ccmp_pn[1]);
406 }
407
408 spin_lock_bh(&dev->mt76.lock);
409 if (wcid && (rate->idx < 0 || !rate->count)) {
410 txwi->rate = wcid->tx_rate;
411 max_txpwr_adj = wcid->max_txpwr_adj;
412 nss = wcid->tx_rate_nss;
413 } else {
414 txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
415 max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
416 }
417 spin_unlock_bh(&dev->mt76.lock);
418
419 txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
420 max_txpwr_adj);
421 txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
422
423 if (mt76xx_rev(dev) >= MT76XX_REV_E4)
424 txwi->txstream = 0x13;
425 else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
426 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
427 txwi->txstream = 0x93;
428
429 if (info->flags & IEEE80211_TX_CTL_LDPC)
430 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
431 if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
432 txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
433 if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
434 txwi_flags |= MT_TXWI_FLAGS_MMPS;
435 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
436 txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
437 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
438 txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
439 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
440 txwi->pktid |= MT_TXWI_PKTID_PROBE;
441 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
442 u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
443
444 ba_size <<= sta->ht_cap.ampdu_factor;
445 ba_size = min_t(int, 63, ba_size - 1);
446 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
447 ba_size = 0;
448 txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
449
450 txwi_flags |= MT_TXWI_FLAGS_AMPDU |
451 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
452 sta->ht_cap.ampdu_density);
453 }
454
455 if (ieee80211_is_probe_resp(hdr->frame_control) ||
456 ieee80211_is_beacon(hdr->frame_control))
457 txwi_flags |= MT_TXWI_FLAGS_TS;
458
459 txwi->flags |= cpu_to_le16(txwi_flags);
460 txwi->len_ctl = cpu_to_le16(len);
461}
462EXPORT_SYMBOL_GPL(mt76x2_mac_write_txwi);
463
464void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
465{
466 u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
467 u32 bit = MT_WCID_DROP_MASK(idx);
468
469 /* prevent unnecessary writes */
470 if ((val & bit) != (bit * drop))
471 mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
472}
473EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_set_drop);
474
475void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
476{
477 struct mt76_wcid_addr addr = {};
478 u32 attr;
479
480 attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
481 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
482
483 mt76_wr(dev, MT_WCID_ATTR(idx), attr);
484
485 mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
486 mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
487
488 if (idx >= 128)
489 return;
490
491 if (mac)
492 memcpy(addr.macaddr, mac, ETH_ALEN);
493
494 mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
495}
496EXPORT_SYMBOL_GPL(mt76x2_mac_wcid_setup);
497
498static int
499mt76x2_mac_process_rate(struct mt76_rx_status *status, u16 rate)
500{
501 u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
502
503 switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
504 case MT_PHY_TYPE_OFDM:
505 if (idx >= 8)
506 idx = 0;
507
508 if (status->band == NL80211_BAND_2GHZ)
509 idx += 4;
510
511 status->rate_idx = idx;
512 return 0;
513 case MT_PHY_TYPE_CCK:
514 if (idx >= 8) {
515 idx -= 8;
516 status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
517 }
518
519 if (idx >= 4)
520 idx = 0;
521
522 status->rate_idx = idx;
523 return 0;
524 case MT_PHY_TYPE_HT_GF:
525 status->enc_flags |= RX_ENC_FLAG_HT_GF;
526 /* fall through */
527 case MT_PHY_TYPE_HT:
528 status->encoding = RX_ENC_HT;
529 status->rate_idx = idx;
530 break;
531 case MT_PHY_TYPE_VHT:
532 status->encoding = RX_ENC_VHT;
533 status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
534 status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
535 break;
536 default:
537 return -EINVAL;
538 }
539
540 if (rate & MT_RXWI_RATE_LDPC)
541 status->enc_flags |= RX_ENC_FLAG_LDPC;
542
543 if (rate & MT_RXWI_RATE_SGI)
544 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
545
546 if (rate & MT_RXWI_RATE_STBC)
547 status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
548
549 switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
550 case MT_PHY_BW_20:
551 break;
552 case MT_PHY_BW_40:
553 status->bw = RATE_INFO_BW_40;
554 break;
555 case MT_PHY_BW_80:
556 status->bw = RATE_INFO_BW_80;
557 break;
558 default:
559 break;
560 }
561
562 return 0;
563}
564
565static void mt76x2_remove_hdr_pad(struct sk_buff *skb, int len)
566{
567 int hdrlen;
568
569 if (!len)
570 return;
571
572 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
573 memmove(skb->data + len, skb->data, hdrlen);
574 skb_pull(skb, len);
575}
576
577int mt76x2_mac_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
578{
579 struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
580
581 rssi += cal->rssi_offset[chain];
582 rssi -= cal->lna_gain;
583
584 return rssi;
585}
586
587static struct mt76x2_sta *
588mt76x2_rx_get_sta(struct mt76x2_dev *dev, u8 idx)
589{
590 struct mt76_wcid *wcid;
591
592 if (idx >= ARRAY_SIZE(dev->wcid))
593 return NULL;
594
595 wcid = rcu_dereference(dev->wcid[idx]);
596 if (!wcid)
597 return NULL;
598
599 return container_of(wcid, struct mt76x2_sta, wcid);
600}
601
602static struct mt76_wcid *
603mt76x2_rx_get_sta_wcid(struct mt76x2_dev *dev, struct mt76x2_sta *sta,
604 bool unicast)
605{
606 if (!sta)
607 return NULL;
608
609 if (unicast)
610 return &sta->wcid;
611 else
612 return &sta->vif->group_wcid;
613}
614
615int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
616 void *rxi)
617{
618 struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
619 struct mt76x2_rxwi *rxwi = rxi;
620 struct mt76x2_sta *sta;
621 u32 rxinfo = le32_to_cpu(rxwi->rxinfo);
622 u32 ctl = le32_to_cpu(rxwi->ctl);
623 u16 rate = le16_to_cpu(rxwi->rate);
624 u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
625 bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
626 int pad_len = 0;
627 u8 pn_len;
628 u8 wcid;
629 int len;
630
631 if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
632 return -EINVAL;
633
634 if (rxinfo & MT_RXINFO_L2PAD)
635 pad_len += 2;
636
637 if (rxinfo & MT_RXINFO_DECRYPT) {
638 status->flag |= RX_FLAG_DECRYPTED;
639 status->flag |= RX_FLAG_MMIC_STRIPPED;
640 status->flag |= RX_FLAG_MIC_STRIPPED;
641 status->flag |= RX_FLAG_IV_STRIPPED;
642 }
643
644 wcid = FIELD_GET(MT_RXWI_CTL_WCID, ctl);
645 sta = mt76x2_rx_get_sta(dev, wcid);
646 status->wcid = mt76x2_rx_get_sta_wcid(dev, sta, unicast);
647
648 len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
649 pn_len = FIELD_GET(MT_RXINFO_PN_LEN, rxinfo);
650 if (pn_len) {
651 int offset = ieee80211_get_hdrlen_from_skb(skb) + pad_len;
652 u8 *data = skb->data + offset;
653
654 status->iv[0] = data[7];
655 status->iv[1] = data[6];
656 status->iv[2] = data[5];
657 status->iv[3] = data[4];
658 status->iv[4] = data[1];
659 status->iv[5] = data[0];
660
661 /*
662 * Driver CCMP validation can't deal with fragments.
663 * Let mac80211 take care of it.
664 */
665 if (rxinfo & MT_RXINFO_FRAG) {
666 status->flag &= ~RX_FLAG_IV_STRIPPED;
667 } else {
668 pad_len += pn_len << 2;
669 len -= pn_len << 2;
670 }
671 }
672
673 mt76x2_remove_hdr_pad(skb, pad_len);
674
675 if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
676 status->aggr = true;
677
678 if (WARN_ON_ONCE(len > skb->len))
679 return -EINVAL;
680
681 pskb_trim(skb, len);
682 status->chains = BIT(0) | BIT(1);
683 status->chain_signal[0] = mt76x2_mac_get_rssi(dev, rxwi->rssi[0], 0);
684 status->chain_signal[1] = mt76x2_mac_get_rssi(dev, rxwi->rssi[1], 1);
685 status->signal = max(status->chain_signal[0], status->chain_signal[1]);
686 status->freq = dev->mt76.chandef.chan->center_freq;
687 status->band = dev->mt76.chandef.chan->band;
688
689 status->tid = FIELD_GET(MT_RXWI_TID, tid_sn);
690 status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn);
691
692 if (sta) {
693 ewma_signal_add(&sta->rssi, status->signal);
694 sta->inactive_count = 0;
695 }
696
697 return mt76x2_mac_process_rate(status, rate);
698}
699EXPORT_SYMBOL_GPL(mt76x2_mac_process_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
index 3c0ebe6d231c..680a89f8aa87 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -53,30 +53,6 @@ mt76x2_stop(struct ieee80211_hw *hw)
53 mutex_unlock(&dev->mutex); 53 mutex_unlock(&dev->mutex);
54} 54}
55 55
56static void
57mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
58{
59 struct mt76_txq *mtxq;
60
61 if (!txq)
62 return;
63
64 mtxq = (struct mt76_txq *) txq->drv_priv;
65 if (txq->sta) {
66 struct mt76x2_sta *sta;
67
68 sta = (struct mt76x2_sta *) txq->sta->drv_priv;
69 mtxq->wcid = &sta->wcid;
70 } else {
71 struct mt76x2_vif *mvif;
72
73 mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
74 mtxq->wcid = &mvif->group_wcid;
75 }
76
77 mt76_txq_init(&dev->mt76, txq);
78}
79
80static int 56static int
81mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 57mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
82{ 58{
@@ -111,14 +87,6 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
111 return 0; 87 return 0;
112} 88}
113 89
114static void
115mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
116{
117 struct mt76x2_dev *dev = hw->priv;
118
119 mt76_txq_remove(&dev->mt76, vif->txq);
120}
121
122static int 90static int
123mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) 91mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
124{ 92{
@@ -194,39 +162,6 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
194} 162}
195 163
196static void 164static void
197mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
198 unsigned int *total_flags, u64 multicast)
199{
200 struct mt76x2_dev *dev = hw->priv;
201 u32 flags = 0;
202
203#define MT76_FILTER(_flag, _hw) do { \
204 flags |= *total_flags & FIF_##_flag; \
205 dev->rxfilter &= ~(_hw); \
206 dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
207 } while (0)
208
209 mutex_lock(&dev->mutex);
210
211 dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
212
213 MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
214 MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
215 MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
216 MT_RX_FILTR_CFG_CTS |
217 MT_RX_FILTR_CFG_CFEND |
218 MT_RX_FILTR_CFG_CFACK |
219 MT_RX_FILTR_CFG_BA |
220 MT_RX_FILTR_CFG_CTRL_RSV);
221 MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
222
223 *total_flags = flags;
224 mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
225
226 mutex_unlock(&dev->mutex);
227}
228
229static void
230mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 165mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
231 struct ieee80211_bss_conf *info, u32 changed) 166 struct ieee80211_bss_conf *info, u32 changed)
232{ 167{
@@ -263,68 +198,6 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
263 mutex_unlock(&dev->mutex); 198 mutex_unlock(&dev->mutex);
264} 199}
265 200
266static int
267mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
268 struct ieee80211_sta *sta)
269{
270 struct mt76x2_dev *dev = hw->priv;
271 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
272 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
273 int ret = 0;
274 int idx = 0;
275 int i;
276
277 mutex_lock(&dev->mutex);
278
279 idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
280 if (idx < 0) {
281 ret = -ENOSPC;
282 goto out;
283 }
284
285 msta->vif = mvif;
286 msta->wcid.sta = 1;
287 msta->wcid.idx = idx;
288 msta->wcid.hw_key_idx = -1;
289 mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
290 mt76x2_mac_wcid_set_drop(dev, idx, false);
291 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
292 mt76x2_txq_init(dev, sta->txq[i]);
293
294 if (vif->type == NL80211_IFTYPE_AP)
295 set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
296
297 ewma_signal_init(&msta->rssi);
298
299 rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
300
301out:
302 mutex_unlock(&dev->mutex);
303
304 return ret;
305}
306
307static int
308mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
309 struct ieee80211_sta *sta)
310{
311 struct mt76x2_dev *dev = hw->priv;
312 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
313 int idx = msta->wcid.idx;
314 int i;
315
316 mutex_lock(&dev->mutex);
317 rcu_assign_pointer(dev->wcid[idx], NULL);
318 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
319 mt76_txq_remove(&dev->mt76, sta->txq[i]);
320 mt76x2_mac_wcid_set_drop(dev, idx, true);
321 mt76_wcid_free(dev->wcid_mask, idx);
322 mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
323 mutex_unlock(&dev->mutex);
324
325 return 0;
326}
327
328void 201void
329mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 202mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
330{ 203{
@@ -336,117 +209,6 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
336 mt76x2_mac_wcid_set_drop(dev, idx, ps); 209 mt76x2_mac_wcid_set_drop(dev, idx, ps);
337} 210}
338 211
339static int
340mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
341 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
342 struct ieee80211_key_conf *key)
343{
344 struct mt76x2_dev *dev = hw->priv;
345 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
346 struct mt76x2_sta *msta;
347 struct mt76_wcid *wcid;
348 int idx = key->keyidx;
349 int ret;
350
351 /* fall back to sw encryption for unsupported ciphers */
352 switch (key->cipher) {
353 case WLAN_CIPHER_SUITE_WEP40:
354 case WLAN_CIPHER_SUITE_WEP104:
355 case WLAN_CIPHER_SUITE_TKIP:
356 case WLAN_CIPHER_SUITE_CCMP:
357 break;
358 default:
359 return -EOPNOTSUPP;
360 }
361
362 /*
363 * The hardware does not support per-STA RX GTK, fall back
364 * to software mode for these.
365 */
366 if ((vif->type == NL80211_IFTYPE_ADHOC ||
367 vif->type == NL80211_IFTYPE_MESH_POINT) &&
368 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
369 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
370 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
371 return -EOPNOTSUPP;
372
373 msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
374 wcid = msta ? &msta->wcid : &mvif->group_wcid;
375
376 if (cmd == SET_KEY) {
377 key->hw_key_idx = wcid->idx;
378 wcid->hw_key_idx = idx;
379 if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
380 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
381 wcid->sw_iv = true;
382 }
383 } else {
384 if (idx == wcid->hw_key_idx) {
385 wcid->hw_key_idx = -1;
386 wcid->sw_iv = true;
387 }
388
389 key = NULL;
390 }
391 mt76_wcid_key_setup(&dev->mt76, wcid, key);
392
393 if (!msta) {
394 if (key || wcid->hw_key_idx == idx) {
395 ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
396 if (ret)
397 return ret;
398 }
399
400 return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
401 }
402
403 return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
404}
405
406static int
407mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
408 const struct ieee80211_tx_queue_params *params)
409{
410 struct mt76x2_dev *dev = hw->priv;
411 u8 cw_min = 5, cw_max = 10, qid;
412 u32 val;
413
414 qid = dev->mt76.q_tx[queue].hw_idx;
415
416 if (params->cw_min)
417 cw_min = fls(params->cw_min);
418 if (params->cw_max)
419 cw_max = fls(params->cw_max);
420
421 val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
422 FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
423 FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
424 FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
425 mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
426
427 val = mt76_rr(dev, MT_WMM_TXOP(qid));
428 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
429 val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
430 mt76_wr(dev, MT_WMM_TXOP(qid), val);
431
432 val = mt76_rr(dev, MT_WMM_AIFSN);
433 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
434 val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
435 mt76_wr(dev, MT_WMM_AIFSN, val);
436
437 val = mt76_rr(dev, MT_WMM_CWMIN);
438 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
439 val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
440 mt76_wr(dev, MT_WMM_CWMIN, val);
441
442 val = mt76_rr(dev, MT_WMM_CWMAX);
443 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
444 val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
445 mt76_wr(dev, MT_WMM_CWMAX, val);
446
447 return 0;
448}
449
450static void 212static void
451mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 213mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
452 const u8 *mac) 214 const u8 *mac)
@@ -485,75 +247,6 @@ mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
485 return 0; 247 return 0;
486} 248}
487 249
488static int
489mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
490 struct ieee80211_ampdu_params *params)
491{
492 enum ieee80211_ampdu_mlme_action action = params->action;
493 struct ieee80211_sta *sta = params->sta;
494 struct mt76x2_dev *dev = hw->priv;
495 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
496 struct ieee80211_txq *txq = sta->txq[params->tid];
497 u16 tid = params->tid;
498 u16 *ssn = &params->ssn;
499 struct mt76_txq *mtxq;
500
501 if (!txq)
502 return -EINVAL;
503
504 mtxq = (struct mt76_txq *)txq->drv_priv;
505
506 switch (action) {
507 case IEEE80211_AMPDU_RX_START:
508 mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, params->buf_size);
509 mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
510 break;
511 case IEEE80211_AMPDU_RX_STOP:
512 mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
513 mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
514 BIT(16 + tid));
515 break;
516 case IEEE80211_AMPDU_TX_OPERATIONAL:
517 mtxq->aggr = true;
518 mtxq->send_bar = false;
519 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
520 break;
521 case IEEE80211_AMPDU_TX_STOP_FLUSH:
522 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
523 mtxq->aggr = false;
524 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
525 break;
526 case IEEE80211_AMPDU_TX_START:
527 mtxq->agg_ssn = *ssn << 4;
528 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
529 break;
530 case IEEE80211_AMPDU_TX_STOP_CONT:
531 mtxq->aggr = false;
532 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
533 break;
534 }
535
536 return 0;
537}
538
539static void
540mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
541 struct ieee80211_sta *sta)
542{
543 struct mt76x2_dev *dev = hw->priv;
544 struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
545 struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
546 struct ieee80211_tx_rate rate = {};
547
548 if (!rates)
549 return;
550
551 rate.idx = rates->rate[0].idx;
552 rate.flags = rates->rate[0].flags;
553 mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
554 msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
555}
556
557static void mt76x2_set_coverage_class(struct ieee80211_hw *hw, 250static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
558 s16 coverage_class) 251 s16 coverage_class)
559{ 252{
@@ -605,6 +298,21 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
605 return 0; 298 return 0;
606} 299}
607 300
301static int
302mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
303{
304 struct mt76x2_dev *dev = hw->priv;
305
306 if (val != ~0 && val > 0xffff)
307 return -EINVAL;
308
309 mutex_lock(&dev->mutex);
310 mt76x2_mac_set_tx_protection(dev, val);
311 mutex_unlock(&dev->mutex);
312
313 return 0;
314}
315
608const struct ieee80211_ops mt76x2_ops = { 316const struct ieee80211_ops mt76x2_ops = {
609 .tx = mt76x2_tx, 317 .tx = mt76x2_tx,
610 .start = mt76x2_start, 318 .start = mt76x2_start,
@@ -631,5 +339,6 @@ const struct ieee80211_ops mt76x2_ops = {
631 .set_tim = mt76x2_set_tim, 339 .set_tim = mt76x2_set_tim,
632 .set_antenna = mt76x2_set_antenna, 340 .set_antenna = mt76x2_set_antenna,
633 .get_antenna = mt76x2_get_antenna, 341 .get_antenna = mt76x2_get_antenna,
342 .set_rts_threshold = mt76x2_set_rts_threshold,
634}; 343};
635 344
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
index dfd36d736b06..743da57760dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -23,23 +23,6 @@
23#include "mt76x2_dma.h" 23#include "mt76x2_dma.h"
24#include "mt76x2_eeprom.h" 24#include "mt76x2_eeprom.h"
25 25
26struct mt76x2_fw_header {
27 __le32 ilm_len;
28 __le32 dlm_len;
29 __le16 build_ver;
30 __le16 fw_ver;
31 u8 pad[4];
32 char build_time[16];
33};
34
35struct mt76x2_patch_header {
36 char build_time[16];
37 char platform[4];
38 char hw_version[4];
39 char patch_version[4];
40 u8 pad[2];
41};
42
43static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len) 26static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
44{ 27{
45 struct sk_buff *skb; 28 struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
index d7a7e83262ce..e40293f21417 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -146,6 +146,23 @@ struct mt76x2_tssi_comp {
146 u8 offset1; 146 u8 offset1;
147} __packed __aligned(4); 147} __packed __aligned(4);
148 148
149struct mt76x2_fw_header {
150 __le32 ilm_len;
151 __le32 dlm_len;
152 __le16 build_ver;
153 __le16 fw_ver;
154 u8 pad[4];
155 char build_time[16];
156};
157
158struct mt76x2_patch_header {
159 char build_time[16];
160 char platform[4];
161 char hw_version[4];
162 char patch_version[4];
163 u8 pad[2];
164};
165
149int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type, 166int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
150 u32 param); 167 u32 param);
151int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data); 168int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
index 20ffa6a40d39..84c96c0415b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -19,172 +19,6 @@
19#include "mt76x2_mcu.h" 19#include "mt76x2_mcu.h"
20#include "mt76x2_eeprom.h" 20#include "mt76x2_eeprom.h"
21 21
22static void
23mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
24{
25 s8 gain;
26
27 gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
28 gain -= offset / 2;
29 mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
30}
31
32static void
33mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
34{
35 s8 gain;
36
37 gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
38 gain += offset;
39 mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
40}
41
42static void
43mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
44{
45 s8 *gain_adj = dev->cal.rx.high_gain;
46
47 mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
48 mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
49
50 mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
51 mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
52}
53
54static u32
55mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
56{
57 u32 val = 0;
58
59 val |= (v1 & (BIT(6) - 1)) << 0;
60 val |= (v2 & (BIT(6) - 1)) << 8;
61 val |= (v3 & (BIT(6) - 1)) << 16;
62 val |= (v4 & (BIT(6) - 1)) << 24;
63 return val;
64}
65
66int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
67{
68 struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
69
70 rssi += cal->rssi_offset[chain];
71 rssi -= cal->lna_gain;
72
73 return rssi;
74}
75
76static void
77mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
78{
79 int i;
80
81 for (i = 0; i < sizeof(r->all); i++)
82 r->all[i] += offset;
83}
84
85static void
86mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
87{
88 int i;
89
90 for (i = 0; i < sizeof(r->all); i++)
91 if (r->all[i] > limit)
92 r->all[i] = limit;
93}
94
95static int
96mt76x2_get_min_rate_power(struct mt76_rate_power *r)
97{
98 int i;
99 s8 ret = 0;
100
101 for (i = 0; i < sizeof(r->all); i++) {
102 if (!r->all[i])
103 continue;
104
105 if (ret)
106 ret = min(ret, r->all[i]);
107 else
108 ret = r->all[i];
109 }
110
111 return ret;
112}
113
114
115void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
116{
117 enum nl80211_chan_width width = dev->mt76.chandef.width;
118 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
119 struct mt76x2_tx_power_info txp;
120 int txp_0, txp_1, delta = 0;
121 struct mt76_rate_power t = {};
122 int base_power, gain;
123
124 mt76x2_get_power_info(dev, &txp, chan);
125
126 if (width == NL80211_CHAN_WIDTH_40)
127 delta = txp.delta_bw40;
128 else if (width == NL80211_CHAN_WIDTH_80)
129 delta = txp.delta_bw80;
130
131 mt76x2_get_rate_power(dev, &t, chan);
132 mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
133 mt76x2_limit_rate_power(&t, dev->txpower_conf);
134 dev->txpower_cur = mt76x2_get_max_rate_power(&t);
135
136 base_power = mt76x2_get_min_rate_power(&t);
137 delta += base_power - txp.chain[0].target_power;
138 txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
139 txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
140
141 gain = min(txp_0, txp_1);
142 if (gain < 0) {
143 base_power -= gain;
144 txp_0 -= gain;
145 txp_1 -= gain;
146 } else if (gain > 0x2f) {
147 base_power -= gain - 0x2f;
148 txp_0 = 0x2f;
149 txp_1 = 0x2f;
150 }
151
152 mt76x2_add_rate_power_offset(&t, -base_power);
153 dev->target_power = txp.chain[0].target_power;
154 dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
155 dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
156 dev->rate_power = t;
157
158 mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
159 mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
160
161 mt76_wr(dev, MT_TX_PWR_CFG_0,
162 mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
163 mt76_wr(dev, MT_TX_PWR_CFG_1,
164 mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
165 mt76_wr(dev, MT_TX_PWR_CFG_2,
166 mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
167 mt76_wr(dev, MT_TX_PWR_CFG_3,
168 mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
169 mt76_wr(dev, MT_TX_PWR_CFG_4,
170 mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
171 mt76_wr(dev, MT_TX_PWR_CFG_7,
172 mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
173 mt76_wr(dev, MT_TX_PWR_CFG_8,
174 mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
175 mt76_wr(dev, MT_TX_PWR_CFG_9,
176 mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
177}
178
179static bool
180mt76x2_channel_silent(struct mt76x2_dev *dev)
181{
182 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
183
184 return ((chan->flags & IEEE80211_CHAN_RADAR) &&
185 chan->dfs_state != NL80211_DFS_AVAILABLE);
186}
187
188static bool 22static bool
189mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) 23mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
190{ 24{
@@ -243,140 +77,6 @@ mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
243 dev->cal.channel_cal_done = true; 77 dev->cal.channel_cal_done = true;
244} 78}
245 79
246static void
247mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
248{
249 u32 pa_mode[2];
250 u32 pa_mode_adj;
251
252 if (band == NL80211_BAND_2GHZ) {
253 pa_mode[0] = 0x010055ff;
254 pa_mode[1] = 0x00550055;
255
256 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
257 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
258
259 if (mt76x2_ext_pa_enabled(dev, band)) {
260 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
261 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
262 } else {
263 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
264 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
265 }
266 } else {
267 pa_mode[0] = 0x0000ffff;
268 pa_mode[1] = 0x00ff00ff;
269
270 if (mt76x2_ext_pa_enabled(dev, band)) {
271 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
272 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
273 } else {
274 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
275 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
276 }
277
278 if (mt76x2_ext_pa_enabled(dev, band))
279 pa_mode_adj = 0x04000000;
280 else
281 pa_mode_adj = 0;
282
283 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
284 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
285 }
286
287 mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
288 mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
289 mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
290 mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
291
292 if (mt76x2_ext_pa_enabled(dev, band)) {
293 u32 val;
294
295 if (band == NL80211_BAND_2GHZ)
296 val = 0x3c3c023c;
297 else
298 val = 0x363c023c;
299
300 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
301 mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
302 mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
303 } else {
304 if (band == NL80211_BAND_2GHZ) {
305 u32 val = 0x0f3c3c3c;
306
307 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
308 mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
309 mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
310 } else {
311 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
312 mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
313 mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
314 }
315 }
316}
317
318static void
319mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
320{
321 u32 cfg0, cfg1;
322
323 if (mt76x2_ext_pa_enabled(dev, band)) {
324 cfg0 = bw ? 0x000b0c01 : 0x00101101;
325 cfg1 = 0x00011414;
326 } else {
327 cfg0 = bw ? 0x000b0b01 : 0x00101001;
328 cfg1 = 0x00021414;
329 }
330 mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
331 mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
332
333 mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
334}
335
336static void
337mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
338{
339 int core_val, agc_val;
340
341 switch (width) {
342 case NL80211_CHAN_WIDTH_80:
343 core_val = 3;
344 agc_val = 7;
345 break;
346 case NL80211_CHAN_WIDTH_40:
347 core_val = 2;
348 agc_val = 3;
349 break;
350 default:
351 core_val = 0;
352 agc_val = 1;
353 break;
354 }
355
356 mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
357 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
358 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
359 mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
360}
361
362static void
363mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
364{
365 switch (band) {
366 case NL80211_BAND_2GHZ:
367 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
368 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
369 break;
370 case NL80211_BAND_5GHZ:
371 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
372 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
373 break;
374 }
375
376 mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
377 primary_upper);
378}
379
380void mt76x2_phy_set_antenna(struct mt76x2_dev *dev) 80void mt76x2_phy_set_antenna(struct mt76x2_dev *dev)
381{ 81{
382 u32 val; 82 u32 val;
@@ -500,53 +200,6 @@ mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
500 mt76x2_phy_set_gain_val(dev); 200 mt76x2_phy_set_gain_val(dev);
501} 201}
502 202
503static int
504mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
505{
506 struct mt76x2_sta *sta;
507 struct mt76_wcid *wcid;
508 int i, j, min_rssi = 0;
509 s8 cur_rssi;
510
511 local_bh_disable();
512 rcu_read_lock();
513
514 for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
515 unsigned long mask = dev->wcid_mask[i];
516
517 if (!mask)
518 continue;
519
520 for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
521 if (!(mask & 1))
522 continue;
523
524 wcid = rcu_dereference(dev->wcid[j]);
525 if (!wcid)
526 continue;
527
528 sta = container_of(wcid, struct mt76x2_sta, wcid);
529 spin_lock(&dev->mt76.rx_lock);
530 if (sta->inactive_count++ < 5)
531 cur_rssi = ewma_signal_read(&sta->rssi);
532 else
533 cur_rssi = 0;
534 spin_unlock(&dev->mt76.rx_lock);
535
536 if (cur_rssi < min_rssi)
537 min_rssi = cur_rssi;
538 }
539 }
540
541 rcu_read_unlock();
542 local_bh_enable();
543
544 if (!min_rssi)
545 return -75;
546
547 return min_rssi;
548}
549
550static void 203static void
551mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) 204mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
552{ 205{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
new file mode 100644
index 000000000000..9fd6ab4cbb94
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
@@ -0,0 +1,349 @@
1/*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "mt76x2.h"
19#include "mt76x2_eeprom.h"
20
21static void
22mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
23{
24 s8 gain;
25
26 gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
27 gain -= offset / 2;
28 mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
29}
30
31static void
32mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
33{
34 s8 gain;
35
36 gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
37 gain += offset;
38 mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
39}
40
41void mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
42{
43 s8 *gain_adj = dev->cal.rx.high_gain;
44
45 mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
46 mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
47
48 mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
49 mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
50}
51EXPORT_SYMBOL_GPL(mt76x2_apply_gain_adj);
52
53void mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev,
54 enum nl80211_band band)
55{
56 u32 pa_mode[2];
57 u32 pa_mode_adj;
58
59 if (band == NL80211_BAND_2GHZ) {
60 pa_mode[0] = 0x010055ff;
61 pa_mode[1] = 0x00550055;
62
63 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
64 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
65
66 if (mt76x2_ext_pa_enabled(dev, band)) {
67 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
68 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
69 } else {
70 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
71 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
72 }
73 } else {
74 pa_mode[0] = 0x0000ffff;
75 pa_mode[1] = 0x00ff00ff;
76
77 if (mt76x2_ext_pa_enabled(dev, band)) {
78 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
79 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
80 } else {
81 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
82 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
83 }
84
85 if (mt76x2_ext_pa_enabled(dev, band))
86 pa_mode_adj = 0x04000000;
87 else
88 pa_mode_adj = 0;
89
90 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
91 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
92 }
93
94 mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
95 mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
96 mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
97 mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
98
99 if (mt76x2_ext_pa_enabled(dev, band)) {
100 u32 val;
101
102 if (band == NL80211_BAND_2GHZ)
103 val = 0x3c3c023c;
104 else
105 val = 0x363c023c;
106
107 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
108 mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
109 mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
110 } else {
111 if (band == NL80211_BAND_2GHZ) {
112 u32 val = 0x0f3c3c3c;
113
114 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
115 mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
116 mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
117 } else {
118 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
119 mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
120 mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
121 }
122 }
123}
124EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower_regs);
125
126static void
127mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
128{
129 int i;
130
131 for (i = 0; i < sizeof(r->all); i++)
132 if (r->all[i] > limit)
133 r->all[i] = limit;
134}
135
136static u32
137mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
138{
139 u32 val = 0;
140
141 val |= (v1 & (BIT(6) - 1)) << 0;
142 val |= (v2 & (BIT(6) - 1)) << 8;
143 val |= (v3 & (BIT(6) - 1)) << 16;
144 val |= (v4 & (BIT(6) - 1)) << 24;
145 return val;
146}
147
148static void
149mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
150{
151 int i;
152
153 for (i = 0; i < sizeof(r->all); i++)
154 r->all[i] += offset;
155}
156
157static int
158mt76x2_get_min_rate_power(struct mt76_rate_power *r)
159{
160 int i;
161 s8 ret = 0;
162
163 for (i = 0; i < sizeof(r->all); i++) {
164 if (!r->all[i])
165 continue;
166
167 if (ret)
168 ret = min(ret, r->all[i]);
169 else
170 ret = r->all[i];
171 }
172
173 return ret;
174}
175
176void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
177{
178 enum nl80211_chan_width width = dev->mt76.chandef.width;
179 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
180 struct mt76x2_tx_power_info txp;
181 int txp_0, txp_1, delta = 0;
182 struct mt76_rate_power t = {};
183 int base_power, gain;
184
185 mt76x2_get_power_info(dev, &txp, chan);
186
187 if (width == NL80211_CHAN_WIDTH_40)
188 delta = txp.delta_bw40;
189 else if (width == NL80211_CHAN_WIDTH_80)
190 delta = txp.delta_bw80;
191
192 mt76x2_get_rate_power(dev, &t, chan);
193 mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power);
194 mt76x2_limit_rate_power(&t, dev->txpower_conf);
195 dev->txpower_cur = mt76x2_get_max_rate_power(&t);
196
197 base_power = mt76x2_get_min_rate_power(&t);
198 delta += base_power - txp.chain[0].target_power;
199 txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
200 txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
201
202 gain = min(txp_0, txp_1);
203 if (gain < 0) {
204 base_power -= gain;
205 txp_0 -= gain;
206 txp_1 -= gain;
207 } else if (gain > 0x2f) {
208 base_power -= gain - 0x2f;
209 txp_0 = 0x2f;
210 txp_1 = 0x2f;
211 }
212
213 mt76x2_add_rate_power_offset(&t, -base_power);
214 dev->target_power = txp.chain[0].target_power;
215 dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
216 dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
217 dev->rate_power = t;
218
219 mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
220 mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
221
222 mt76_wr(dev, MT_TX_PWR_CFG_0,
223 mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
224 mt76_wr(dev, MT_TX_PWR_CFG_1,
225 mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
226 mt76_wr(dev, MT_TX_PWR_CFG_2,
227 mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
228 mt76_wr(dev, MT_TX_PWR_CFG_3,
229 mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
230 mt76_wr(dev, MT_TX_PWR_CFG_4,
231 mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
232 mt76_wr(dev, MT_TX_PWR_CFG_7,
233 mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
234 mt76_wr(dev, MT_TX_PWR_CFG_8,
235 mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
236 mt76_wr(dev, MT_TX_PWR_CFG_9,
237 mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
238}
239EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
240
241void mt76x2_configure_tx_delay(struct mt76x2_dev *dev,
242 enum nl80211_band band, u8 bw)
243{
244 u32 cfg0, cfg1;
245
246 if (mt76x2_ext_pa_enabled(dev, band)) {
247 cfg0 = bw ? 0x000b0c01 : 0x00101101;
248 cfg1 = 0x00011414;
249 } else {
250 cfg0 = bw ? 0x000b0b01 : 0x00101001;
251 cfg1 = 0x00021414;
252 }
253 mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
254 mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
255
256 mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15);
257}
258EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
259
260void mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
261{
262 int core_val, agc_val;
263
264 switch (width) {
265 case NL80211_CHAN_WIDTH_80:
266 core_val = 3;
267 agc_val = 7;
268 break;
269 case NL80211_CHAN_WIDTH_40:
270 core_val = 2;
271 agc_val = 3;
272 break;
273 default:
274 core_val = 0;
275 agc_val = 1;
276 break;
277 }
278
279 mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
280 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
281 mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
282 mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
283}
284EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
285
286void mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
287{
288 switch (band) {
289 case NL80211_BAND_2GHZ:
290 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
291 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
292 break;
293 case NL80211_BAND_5GHZ:
294 mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
295 mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
296 break;
297 }
298
299 mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
300 primary_upper);
301}
302EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
303
304int mt76x2_phy_get_min_avg_rssi(struct mt76x2_dev *dev)
305{
306 struct mt76x2_sta *sta;
307 struct mt76_wcid *wcid;
308 int i, j, min_rssi = 0;
309 s8 cur_rssi;
310
311 local_bh_disable();
312 rcu_read_lock();
313
314 for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
315 unsigned long mask = dev->wcid_mask[i];
316
317 if (!mask)
318 continue;
319
320 for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) {
321 if (!(mask & 1))
322 continue;
323
324 wcid = rcu_dereference(dev->wcid[j]);
325 if (!wcid)
326 continue;
327
328 sta = container_of(wcid, struct mt76x2_sta, wcid);
329 spin_lock(&dev->mt76.rx_lock);
330 if (sta->inactive_count++ < 5)
331 cur_rssi = ewma_signal_read(&sta->rssi);
332 else
333 cur_rssi = 0;
334 spin_unlock(&dev->mt76.rx_lock);
335
336 if (cur_rssi < min_rssi)
337 min_rssi = cur_rssi;
338 }
339 }
340
341 rcu_read_unlock();
342 local_bh_enable();
343
344 if (!min_rssi)
345 return -75;
346
347 return min_rssi;
348}
349EXPORT_SYMBOL_GPL(mt76x2_phy_get_min_avg_rssi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
index b9c334d9e5b8..1551ea453180 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -75,6 +75,21 @@
75 75
76#define MT_XO_CTRL7 0x011c 76#define MT_XO_CTRL7 0x011c
77 77
78#define MT_USB_U3DMA_CFG 0x9018
79#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
80#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
81#define MT_USB_DMA_CFG_UDMA_TX_WL_DROP BIT(16)
82#define MT_USB_DMA_CFG_WAKE_UP_EN BIT(17)
83#define MT_USB_DMA_CFG_RX_DROP_OR_PAD BIT(18)
84#define MT_USB_DMA_CFG_TX_CLR BIT(19)
85#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
86#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
87#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
88#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
89#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 24)
90#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
91#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
92
78#define MT_WLAN_MTC_CTRL 0x10148 93#define MT_WLAN_MTC_CTRL 0x10148
79#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0) 94#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
80#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12) 95#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
@@ -150,6 +165,9 @@
150#define MT_TX_HW_QUEUE_MCU 8 165#define MT_TX_HW_QUEUE_MCU 8
151#define MT_TX_HW_QUEUE_MGMT 9 166#define MT_TX_HW_QUEUE_MGMT 9
152 167
168#define MT_US_CYC_CFG 0x02a4
169#define MT_US_CYC_CNT GENMASK(7, 0)
170
153#define MT_PBF_SYS_CTRL 0x0400 171#define MT_PBF_SYS_CTRL 0x0400
154#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0) 172#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
155#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1) 173#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
@@ -202,6 +220,11 @@
202 220
203#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824 221#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
204 222
223#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
224#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
225#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
226#define MT_FCE_SKIP_FS 0x0a6c
227
205#define MT_PAUSE_ENABLE_CONTROL1 0x0a38 228#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
206 229
207#define MT_MAC_CSR0 0x1000 230#define MT_MAC_CSR0 0x1000
@@ -214,6 +237,7 @@
214 237
215#define MT_MAC_ADDR_DW0 0x1008 238#define MT_MAC_ADDR_DW0 0x1008
216#define MT_MAC_ADDR_DW1 0x100c 239#define MT_MAC_ADDR_DW1 0x100c
240#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
217 241
218#define MT_MAC_BSSID_DW0 0x1010 242#define MT_MAC_BSSID_DW0 0x1010
219#define MT_MAC_BSSID_DW1 0x1014 243#define MT_MAC_BSSID_DW1 0x1014
@@ -351,6 +375,7 @@
351#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8) 375#define MT_TX_TIMEOUT_CFG_ACKTO GENMASK(15, 8)
352 376
353#define MT_TX_RETRY_CFG 0x134c 377#define MT_TX_RETRY_CFG 0x134c
378#define MT_TX_LINK_CFG 0x1350
354#define MT_VHT_HT_FBK_CFG1 0x1358 379#define MT_VHT_HT_FBK_CFG1 0x1358
355 380
356#define MT_PROT_CFG_RATE GENMASK(15, 0) 381#define MT_PROT_CFG_RATE GENMASK(15, 0)
@@ -425,6 +450,7 @@
425#define MT_RX_FILTR_CFG_BAR BIT(15) 450#define MT_RX_FILTR_CFG_BAR BIT(15)
426#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) 451#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
427 452
453#define MT_AUTO_RSP_CFG 0x1404
428#define MT_LEGACY_BASIC_RATE 0x1408 454#define MT_LEGACY_BASIC_RATE 0x1408
429#define MT_HT_BASIC_RATE 0x140c 455#define MT_HT_BASIC_RATE 0x140c
430 456
@@ -460,6 +486,10 @@
460#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0) 486#define MT_RX_STAT_2_DUP_ERRORS GENMASK(15, 0)
461#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16) 487#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
462 488
489#define MT_TX_STA_0 0x170c
490#define MT_TX_STA_1 0x1710
491#define MT_TX_STA_2 0x1714
492
463#define MT_TX_STAT_FIFO 0x1718 493#define MT_TX_STAT_FIFO 0x1718
464#define MT_TX_STAT_FIFO_VALID BIT(0) 494#define MT_TX_STAT_FIFO_VALID BIT(0)
465#define MT_TX_STAT_FIFO_SUCCESS BIT(5) 495#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
index 560376dd1133..4c907882e8b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -23,129 +23,6 @@ struct beacon_bc_data {
23 struct sk_buff *tail[8]; 23 struct sk_buff *tail[8];
24}; 24};
25 25
26void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
27 struct sk_buff *skb)
28{
29 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
30 struct mt76x2_dev *dev = hw->priv;
31 struct ieee80211_vif *vif = info->control.vif;
32 struct mt76_wcid *wcid = &dev->global_wcid;
33
34 if (control->sta) {
35 struct mt76x2_sta *msta;
36
37 msta = (struct mt76x2_sta *) control->sta->drv_priv;
38 wcid = &msta->wcid;
39 /* sw encrypted frames */
40 if (!info->control.hw_key && wcid->hw_key_idx != -1)
41 control->sta = NULL;
42 }
43
44 if (vif && !control->sta) {
45 struct mt76x2_vif *mvif;
46
47 mvif = (struct mt76x2_vif *) vif->drv_priv;
48 wcid = &mvif->group_wcid;
49 }
50
51 mt76_tx(&dev->mt76, control->sta, wcid, skb);
52}
53
54void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
55{
56 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
57
58 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
59 ieee80211_free_txskb(mt76_hw(dev), skb);
60 } else {
61 ieee80211_tx_info_clear_status(info);
62 info->status.rates[0].idx = -1;
63 info->flags |= IEEE80211_TX_STAT_ACK;
64 ieee80211_tx_status(mt76_hw(dev), skb);
65 }
66}
67
68s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
69 const struct ieee80211_tx_rate *rate)
70{
71 s8 max_txpwr;
72
73 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
74 u8 mcs = ieee80211_rate_get_vht_mcs(rate);
75
76 if (mcs == 8 || mcs == 9) {
77 max_txpwr = dev->rate_power.vht[8];
78 } else {
79 u8 nss, idx;
80
81 nss = ieee80211_rate_get_vht_nss(rate);
82 idx = ((nss - 1) << 3) + mcs;
83 max_txpwr = dev->rate_power.ht[idx & 0xf];
84 }
85 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
86 max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
87 } else {
88 enum nl80211_band band = dev->mt76.chandef.chan->band;
89
90 if (band == NL80211_BAND_2GHZ) {
91 const struct ieee80211_rate *r;
92 struct wiphy *wiphy = mt76_hw(dev)->wiphy;
93 struct mt76_rate_power *rp = &dev->rate_power;
94
95 r = &wiphy->bands[band]->bitrates[rate->idx];
96 if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
97 max_txpwr = rp->cck[r->hw_value & 0x3];
98 else
99 max_txpwr = rp->ofdm[r->hw_value & 0x7];
100 } else {
101 max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
102 }
103 }
104
105 return max_txpwr;
106}
107
108s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
109{
110 txpwr = min_t(s8, txpwr, dev->txpower_conf);
111 txpwr -= (dev->target_power + dev->target_power_delta[0]);
112 txpwr = min_t(s8, txpwr, max_txpwr_adj);
113
114 if (!dev->enable_tpc)
115 return 0;
116 else if (txpwr >= 0)
117 return min_t(s8, txpwr, 7);
118 else
119 return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
120}
121
122void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
123{
124 s8 txpwr_adj;
125
126 txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
127 dev->rate_power.ofdm[4]);
128 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
129 MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
130 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
131 MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
132}
133
134static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
135{
136 int len = ieee80211_get_hdrlen_from_skb(skb);
137
138 if (len % 4 == 0)
139 return 0;
140
141 skb_push(skb, 2);
142 memmove(skb->data, skb->data + 2, len);
143
144 skb->data[len] = 0;
145 skb->data[len + 1] = 0;
146 return 2;
147}
148
149int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, 26int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
150 struct sk_buff *skb, struct mt76_queue *q, 27 struct sk_buff *skb, struct mt76_queue *q,
151 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 28 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
@@ -159,7 +36,7 @@ int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
159 if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) 36 if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
160 mt76x2_mac_wcid_set_drop(dev, wcid->idx, false); 37 mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
161 38
162 mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta); 39 mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
163 40
164 ret = mt76x2_insert_hdr_pad(skb); 41 ret = mt76x2_insert_hdr_pad(skb);
165 if (ret < 0) 42 if (ret < 0)
@@ -289,7 +166,8 @@ void mt76x2_pre_tbtt_tasklet(unsigned long arg)
289 struct ieee80211_vif *vif = info->control.vif; 166 struct ieee80211_vif *vif = info->control.vif;
290 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv; 167 struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
291 168
292 mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL); 169 mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
170 NULL);
293 } 171 }
294 spin_unlock_bh(&q->lock); 172 spin_unlock_bh(&q->lock);
295} 173}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
new file mode 100644
index 000000000000..36afb166fa3f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx_common.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include "mt76x2.h"
19#include "mt76x2_dma.h"
20
21void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
22 struct sk_buff *skb)
23{
24 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
25 struct mt76x2_dev *dev = hw->priv;
26 struct ieee80211_vif *vif = info->control.vif;
27 struct mt76_wcid *wcid = &dev->global_wcid;
28
29 if (control->sta) {
30 struct mt76x2_sta *msta;
31
32 msta = (struct mt76x2_sta *)control->sta->drv_priv;
33 wcid = &msta->wcid;
34 /* sw encrypted frames */
35 if (!info->control.hw_key && wcid->hw_key_idx != -1)
36 control->sta = NULL;
37 }
38
39 if (vif && !control->sta) {
40 struct mt76x2_vif *mvif;
41
42 mvif = (struct mt76x2_vif *)vif->drv_priv;
43 wcid = &mvif->group_wcid;
44 }
45
46 mt76_tx(&dev->mt76, control->sta, wcid, skb);
47}
48EXPORT_SYMBOL_GPL(mt76x2_tx);
49
50int mt76x2_insert_hdr_pad(struct sk_buff *skb)
51{
52 int len = ieee80211_get_hdrlen_from_skb(skb);
53
54 if (len % 4 == 0)
55 return 0;
56
57 skb_push(skb, 2);
58 memmove(skb->data, skb->data + 2, len);
59
60 skb->data[len] = 0;
61 skb->data[len + 1] = 0;
62 return 2;
63}
64EXPORT_SYMBOL_GPL(mt76x2_insert_hdr_pad);
65
66s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
67 const struct ieee80211_tx_rate *rate)
68{
69 s8 max_txpwr;
70
71 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
72 u8 mcs = ieee80211_rate_get_vht_mcs(rate);
73
74 if (mcs == 8 || mcs == 9) {
75 max_txpwr = dev->rate_power.vht[8];
76 } else {
77 u8 nss, idx;
78
79 nss = ieee80211_rate_get_vht_nss(rate);
80 idx = ((nss - 1) << 3) + mcs;
81 max_txpwr = dev->rate_power.ht[idx & 0xf];
82 }
83 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
84 max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
85 } else {
86 enum nl80211_band band = dev->mt76.chandef.chan->band;
87
88 if (band == NL80211_BAND_2GHZ) {
89 const struct ieee80211_rate *r;
90 struct wiphy *wiphy = mt76_hw(dev)->wiphy;
91 struct mt76_rate_power *rp = &dev->rate_power;
92
93 r = &wiphy->bands[band]->bitrates[rate->idx];
94 if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
95 max_txpwr = rp->cck[r->hw_value & 0x3];
96 else
97 max_txpwr = rp->ofdm[r->hw_value & 0x7];
98 } else {
99 max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
100 }
101 }
102
103 return max_txpwr;
104}
105EXPORT_SYMBOL_GPL(mt76x2_tx_get_max_txpwr_adj);
106
107s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
108{
109 txpwr = min_t(s8, txpwr, dev->txpower_conf);
110 txpwr -= (dev->target_power + dev->target_power_delta[0]);
111 txpwr = min_t(s8, txpwr, max_txpwr_adj);
112
113 if (!dev->enable_tpc)
114 return 0;
115 else if (txpwr >= 0)
116 return min_t(s8, txpwr, 7);
117 else
118 return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
119}
120EXPORT_SYMBOL_GPL(mt76x2_tx_get_txpwr_adj);
121
122void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
123{
124 s8 txpwr_adj;
125
126 txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
127 dev->rate_power.ofdm[4]);
128 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
129 MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
130 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
131 MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
132}
133EXPORT_SYMBOL_GPL(mt76x2_tx_set_txpwr_auto);
134
135void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
136{
137 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
138
139 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
140 ieee80211_free_txskb(mt76_hw(dev), skb);
141 } else {
142 ieee80211_tx_info_clear_status(info);
143 info->status.rates[0].idx = -1;
144 info->flags |= IEEE80211_TX_STAT_ACK;
145 ieee80211_tx_status(mt76_hw(dev), skb);
146 }
147}
148EXPORT_SYMBOL_GPL(mt76x2_tx_complete);
149
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
new file mode 100644
index 000000000000..1428cfdee579
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
@@ -0,0 +1,142 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19
20#include "mt76x2u.h"
21
22static const struct usb_device_id mt76x2u_device_table[] = {
23 { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
24 { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
25 { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
26 { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
27 { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
28 { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
29 { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
30 { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
31 { },
32};
33
34static int mt76x2u_probe(struct usb_interface *intf,
35 const struct usb_device_id *id)
36{
37 struct usb_device *udev = interface_to_usbdev(intf);
38 struct mt76x2_dev *dev;
39 int err;
40
41 dev = mt76x2u_alloc_device(&intf->dev);
42 if (!dev)
43 return -ENOMEM;
44
45 udev = usb_get_dev(udev);
46 usb_reset_device(udev);
47
48 err = mt76u_init(&dev->mt76, intf);
49 if (err < 0)
50 goto err;
51
52 dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
53 dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
54
55 err = mt76x2u_register_device(dev);
56 if (err < 0)
57 goto err;
58
59 return 0;
60
61err:
62 ieee80211_free_hw(mt76_hw(dev));
63 usb_set_intfdata(intf, NULL);
64 usb_put_dev(udev);
65
66 return err;
67}
68
69static void mt76x2u_disconnect(struct usb_interface *intf)
70{
71 struct usb_device *udev = interface_to_usbdev(intf);
72 struct mt76x2_dev *dev = usb_get_intfdata(intf);
73 struct ieee80211_hw *hw = mt76_hw(dev);
74
75 set_bit(MT76_REMOVED, &dev->mt76.state);
76 ieee80211_unregister_hw(hw);
77 mt76x2u_cleanup(dev);
78
79 ieee80211_free_hw(hw);
80 usb_set_intfdata(intf, NULL);
81 usb_put_dev(udev);
82}
83
84static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
85 pm_message_t state)
86{
87 struct mt76x2_dev *dev = usb_get_intfdata(intf);
88 struct mt76_usb *usb = &dev->mt76.usb;
89
90 mt76u_stop_queues(&dev->mt76);
91 mt76x2u_stop_hw(dev);
92 usb_kill_urb(usb->mcu.res.urb);
93
94 return 0;
95}
96
97static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
98{
99 struct mt76x2_dev *dev = usb_get_intfdata(intf);
100 struct mt76_usb *usb = &dev->mt76.usb;
101 int err;
102
103 reinit_completion(&usb->mcu.cmpl);
104 err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN,
105 MT_EP_IN_CMD_RESP,
106 &usb->mcu.res, GFP_KERNEL,
107 mt76u_mcu_complete_urb,
108 &usb->mcu.cmpl);
109 if (err < 0)
110 return err;
111
112 err = mt76u_submit_rx_buffers(&dev->mt76);
113 if (err < 0)
114 return err;
115
116 tasklet_enable(&usb->rx_tasklet);
117 tasklet_enable(&usb->tx_tasklet);
118
119 return mt76x2u_init_hardware(dev);
120}
121
122MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
123MODULE_FIRMWARE(MT7662U_FIRMWARE);
124MODULE_FIRMWARE(MT7662U_ROM_PATCH);
125
126static struct usb_driver mt76x2u_driver = {
127 .name = KBUILD_MODNAME,
128 .id_table = mt76x2u_device_table,
129 .probe = mt76x2u_probe,
130 .disconnect = mt76x2u_disconnect,
131#ifdef CONFIG_PM
132 .suspend = mt76x2u_suspend,
133 .resume = mt76x2u_resume,
134 .reset_resume = mt76x2u_resume,
135#endif /* CONFIG_PM */
136 .soft_unbind = 1,
137 .disable_hub_initiated_lpm = 1,
138};
139module_usb_driver(mt76x2u_driver);
140
141MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
142MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
new file mode 100644
index 000000000000..008092f0cd8a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u.h
@@ -0,0 +1,83 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __MT76x2U_H
18#define __MT76x2U_H
19
20#include <linux/device.h>
21
22#include "mt76x2.h"
23#include "mt76x2_dma.h"
24#include "mt76x2_mcu.h"
25
26#define MT7612U_EEPROM_SIZE 512
27
28#define MT_USB_AGGR_SIZE_LIMIT 21 /* 1024B unit */
29#define MT_USB_AGGR_TIMEOUT 0x80 /* 33ns unit */
30
31extern const struct ieee80211_ops mt76x2u_ops;
32
33struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev);
34int mt76x2u_register_device(struct mt76x2_dev *dev);
35int mt76x2u_init_hardware(struct mt76x2_dev *dev);
36void mt76x2u_cleanup(struct mt76x2_dev *dev);
37void mt76x2u_stop_hw(struct mt76x2_dev *dev);
38
39void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr);
40int mt76x2u_mac_reset(struct mt76x2_dev *dev);
41void mt76x2u_mac_resume(struct mt76x2_dev *dev);
42int mt76x2u_mac_start(struct mt76x2_dev *dev);
43int mt76x2u_mac_stop(struct mt76x2_dev *dev);
44
45int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
46 struct cfg80211_chan_def *chandef);
47void mt76x2u_phy_calibrate(struct work_struct *work);
48void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev);
49void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev);
50void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev);
51
52void mt76x2u_mcu_complete_urb(struct urb *urb);
53int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
54 u8 bw_index, bool scan);
55int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
56 u32 val);
57int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
58 struct mt76x2_tssi_comp *tssi_data);
59int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
60 bool force);
61int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
62 bool ext, int rssi, u32 false_cca);
63int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val);
64int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type,
65 u8 temp_level, u8 channel);
66int mt76x2u_mcu_init(struct mt76x2_dev *dev);
67int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev);
68void mt76x2u_mcu_deinit(struct mt76x2_dev *dev);
69
70int mt76x2u_alloc_queues(struct mt76x2_dev *dev);
71void mt76x2u_queues_deinit(struct mt76x2_dev *dev);
72void mt76x2u_stop_queues(struct mt76x2_dev *dev);
73bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update);
74int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
75 struct sk_buff *skb, struct mt76_queue *q,
76 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
77 u32 *tx_info);
78void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
79 struct mt76_queue_entry *e, bool flush);
80int mt76x2u_skb_dma_info(struct sk_buff *skb, enum dma_msg_port port,
81 u32 flags);
82
83#endif /* __MT76x2U_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
new file mode 100644
index 000000000000..1ca5dd05b265
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_core.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "mt76x2u.h"
18#include "dma.h"
19
20static void mt76x2u_remove_dma_hdr(struct sk_buff *skb)
21{
22 int hdr_len;
23
24 skb_pull(skb, sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN);
25 hdr_len = ieee80211_get_hdrlen_from_skb(skb);
26 if (hdr_len % 4) {
27 memmove(skb->data + 2, skb->data, hdr_len);
28 skb_pull(skb, 2);
29 }
30}
31
32static int
33mt76x2u_check_skb_rooms(struct sk_buff *skb)
34{
35 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
36 u32 need_head;
37
38 need_head = sizeof(struct mt76x2_txwi) + MT_DMA_HDR_LEN;
39 if (hdr_len % 4)
40 need_head += 2;
41 return skb_cow(skb, need_head);
42}
43
44static int
45mt76x2u_set_txinfo(struct sk_buff *skb,
46 struct mt76_wcid *wcid, u8 ep)
47{
48 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
49 enum mt76x2_qsel qsel;
50 u32 flags;
51
52 if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
53 ep == MT_EP_OUT_HCCA)
54 qsel = MT_QSEL_MGMT;
55 else
56 qsel = MT_QSEL_EDCA;
57
58 flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
59 MT_TXD_INFO_80211;
60 if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
61 flags |= MT_TXD_INFO_WIV;
62
63 return mt76u_skb_dma_info(skb, WLAN_PORT, flags);
64}
65
66bool mt76x2u_tx_status_data(struct mt76_dev *mdev, u8 *update)
67{
68 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
69 struct mt76x2_tx_status stat;
70
71 if (!mt76x2_mac_load_tx_status(dev, &stat))
72 return false;
73
74 mt76x2_send_tx_status(dev, &stat, update);
75
76 return true;
77}
78
79int mt76x2u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
80 struct sk_buff *skb, struct mt76_queue *q,
81 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
82 u32 *tx_info)
83{
84 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
85 struct mt76x2_txwi *txwi;
86 int err, len = skb->len;
87
88 err = mt76x2u_check_skb_rooms(skb);
89 if (err < 0)
90 return -ENOMEM;
91
92 mt76x2_insert_hdr_pad(skb);
93
94 txwi = skb_push(skb, sizeof(struct mt76x2_txwi));
95 mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
96
97 return mt76x2u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
98}
99
100void mt76x2u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
101 struct mt76_queue_entry *e, bool flush)
102{
103 struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
104
105 mt76x2u_remove_dma_hdr(e->skb);
106 mt76x2_tx_complete(dev, e->skb);
107}
108
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
new file mode 100644
index 000000000000..9b81e7641c06
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_init.c
@@ -0,0 +1,318 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/delay.h>
18
19#include "mt76x2u.h"
20#include "mt76x2_eeprom.h"
21
22static void mt76x2u_init_dma(struct mt76x2_dev *dev)
23{
24 u32 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
25
26 val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD |
27 MT_USB_DMA_CFG_RX_BULK_EN |
28 MT_USB_DMA_CFG_TX_BULK_EN;
29
30 /* disable AGGR_BULK_RX in order to receive one
31 * frame in each rx urb and avoid copies
32 */
33 val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
34 mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
35}
36
37static void mt76x2u_power_on_rf_patch(struct mt76x2_dev *dev)
38{
39 mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) | BIT(16));
40 udelay(1);
41
42 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1c), 0xff);
43 mt76_set(dev, MT_VEND_ADDR(CFG, 0x1c), 0x30);
44
45 mt76_wr(dev, MT_VEND_ADDR(CFG, 0x14), 0x484f);
46 udelay(1);
47
48 mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(17));
49 usleep_range(150, 200);
50
51 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(16));
52 usleep_range(50, 100);
53
54 mt76_set(dev, MT_VEND_ADDR(CFG, 0x14c), BIT(19) | BIT(20));
55}
56
57static void mt76x2u_power_on_rf(struct mt76x2_dev *dev, int unit)
58{
59 int shift = unit ? 8 : 0;
60 u32 val = (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift;
61
62 /* Enable RF BG */
63 mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), BIT(0) << shift);
64 usleep_range(10, 20);
65
66 /* Enable RFDIG LDO/AFE/ABB/ADDA */
67 mt76_set(dev, MT_VEND_ADDR(CFG, 0x130), val);
68 usleep_range(10, 20);
69
70 /* Switch RFDIG power to internal LDO */
71 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x130), BIT(2) << shift);
72 usleep_range(10, 20);
73
74 mt76x2u_power_on_rf_patch(dev);
75
76 mt76_set(dev, 0x530, 0xf);
77}
78
79static void mt76x2u_power_on(struct mt76x2_dev *dev)
80{
81 u32 val;
82
83 /* Turn on WL MTCMOS */
84 mt76_set(dev, MT_VEND_ADDR(CFG, 0x148),
85 MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
86
87 val = MT_WLAN_MTC_CTRL_STATE_UP |
88 MT_WLAN_MTC_CTRL_PWR_ACK |
89 MT_WLAN_MTC_CTRL_PWR_ACK_S;
90
91 mt76_poll(dev, MT_VEND_ADDR(CFG, 0x148), val, val, 1000);
92
93 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0x7f << 16);
94 usleep_range(10, 20);
95
96 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
97 usleep_range(10, 20);
98
99 mt76_set(dev, MT_VEND_ADDR(CFG, 0x148), 0xf << 24);
100 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x148), 0xfff);
101
102 /* Turn on AD/DA power down */
103 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x1204), BIT(3));
104
105 /* WLAN function enable */
106 mt76_set(dev, MT_VEND_ADDR(CFG, 0x80), BIT(0));
107
108 /* Release BBP software reset */
109 mt76_clear(dev, MT_VEND_ADDR(CFG, 0x64), BIT(18));
110
111 mt76x2u_power_on_rf(dev, 0);
112 mt76x2u_power_on_rf(dev, 1);
113}
114
115static int mt76x2u_init_eeprom(struct mt76x2_dev *dev)
116{
117 u32 val, i;
118
119 dev->mt76.eeprom.data = devm_kzalloc(dev->mt76.dev,
120 MT7612U_EEPROM_SIZE,
121 GFP_KERNEL);
122 dev->mt76.eeprom.size = MT7612U_EEPROM_SIZE;
123 if (!dev->mt76.eeprom.data)
124 return -ENOMEM;
125
126 for (i = 0; i + 4 <= MT7612U_EEPROM_SIZE; i += 4) {
127 val = mt76_rr(dev, MT_VEND_ADDR(EEPROM, i));
128 put_unaligned_le32(val, dev->mt76.eeprom.data + i);
129 }
130
131 mt76x2_eeprom_parse_hw_cap(dev);
132 return 0;
133}
134
135struct mt76x2_dev *mt76x2u_alloc_device(struct device *pdev)
136{
137 static const struct mt76_driver_ops drv_ops = {
138 .tx_prepare_skb = mt76x2u_tx_prepare_skb,
139 .tx_complete_skb = mt76x2u_tx_complete_skb,
140 .tx_status_data = mt76x2u_tx_status_data,
141 .rx_skb = mt76x2_queue_rx_skb,
142 };
143 struct mt76x2_dev *dev;
144 struct mt76_dev *mdev;
145
146 mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
147 if (!mdev)
148 return NULL;
149
150 dev = container_of(mdev, struct mt76x2_dev, mt76);
151 mdev->dev = pdev;
152 mdev->drv = &drv_ops;
153
154 mutex_init(&dev->mutex);
155
156 return dev;
157}
158
159static void mt76x2u_init_beacon_offsets(struct mt76x2_dev *dev)
160{
161 mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
162 mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
163 mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
164 mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
165}
166
167int mt76x2u_init_hardware(struct mt76x2_dev *dev)
168{
169 static const u16 beacon_offsets[] = {
170 /* 512 byte per beacon */
171 0xc000, 0xc200, 0xc400, 0xc600,
172 0xc800, 0xca00, 0xcc00, 0xce00,
173 0xd000, 0xd200, 0xd400, 0xd600,
174 0xd800, 0xda00, 0xdc00, 0xde00
175 };
176 const struct mt76_wcid_addr addr = {
177 .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
178 .ba_mask = 0,
179 };
180 int i, err;
181
182 dev->beacon_offsets = beacon_offsets;
183
184 mt76x2_reset_wlan(dev, true);
185 mt76x2u_power_on(dev);
186
187 if (!mt76x2_wait_for_mac(dev))
188 return -ETIMEDOUT;
189
190 err = mt76x2u_mcu_fw_init(dev);
191 if (err < 0)
192 return err;
193
194 if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
195 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
196 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
197 return -EIO;
198
199 /* wait for asic ready after fw load. */
200 if (!mt76x2_wait_for_mac(dev))
201 return -ETIMEDOUT;
202
203 mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
204 mt76_wr(dev, MT_TSO_CTRL, 0);
205
206 mt76x2u_init_dma(dev);
207
208 err = mt76x2u_mcu_init(dev);
209 if (err < 0)
210 return err;
211
212 err = mt76x2u_mac_reset(dev);
213 if (err < 0)
214 return err;
215
216 mt76x2u_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
217 dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
218
219 mt76x2u_init_beacon_offsets(dev);
220
221 if (!mt76x2_wait_for_bbp(dev))
222 return -ETIMEDOUT;
223
224 /* reset wcid table */
225 for (i = 0; i < 254; i++)
226 mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
227 sizeof(struct mt76_wcid_addr));
228
229 /* reset shared key table and pairwise key table */
230 for (i = 0; i < 4; i++)
231 mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
232 for (i = 0; i < 256; i++)
233 mt76_wr(dev, MT_WCID_ATTR(i), 1);
234
235 mt76_clear(dev, MT_BEACON_TIME_CFG,
236 MT_BEACON_TIME_CFG_TIMER_EN |
237 MT_BEACON_TIME_CFG_SYNC_MODE |
238 MT_BEACON_TIME_CFG_TBTT_EN |
239 MT_BEACON_TIME_CFG_BEACON_TX);
240
241 mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
242 mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
243
244 err = mt76x2u_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
245 if (err < 0)
246 return err;
247
248 mt76x2u_phy_set_rxpath(dev);
249 mt76x2u_phy_set_txdac(dev);
250
251 return mt76x2u_mac_stop(dev);
252}
253
254int mt76x2u_register_device(struct mt76x2_dev *dev)
255{
256 struct ieee80211_hw *hw = mt76_hw(dev);
257 struct wiphy *wiphy = hw->wiphy;
258 int err;
259
260 INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
261 mt76x2_init_device(dev);
262
263 err = mt76x2u_init_eeprom(dev);
264 if (err < 0)
265 return err;
266
267 err = mt76u_mcu_init_rx(&dev->mt76);
268 if (err < 0)
269 return err;
270
271 err = mt76u_alloc_queues(&dev->mt76);
272 if (err < 0)
273 goto fail;
274
275 err = mt76x2u_init_hardware(dev);
276 if (err < 0)
277 goto fail;
278
279 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
280
281 err = mt76_register_device(&dev->mt76, true, mt76x2_rates,
282 ARRAY_SIZE(mt76x2_rates));
283 if (err)
284 goto fail;
285
286 /* check hw sg support in order to enable AMSDU */
287 if (mt76u_check_sg(&dev->mt76))
288 hw->max_tx_fragments = MT_SG_MAX_SIZE;
289 else
290 hw->max_tx_fragments = 1;
291
292 set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
293
294 mt76x2_init_debugfs(dev);
295 mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
296 mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
297
298 return 0;
299
300fail:
301 mt76x2u_cleanup(dev);
302 return err;
303}
304
305void mt76x2u_stop_hw(struct mt76x2_dev *dev)
306{
307 mt76u_stop_stat_wk(&dev->mt76);
308 cancel_delayed_work_sync(&dev->cal_work);
309 mt76x2u_mac_stop(dev);
310}
311
312void mt76x2u_cleanup(struct mt76x2_dev *dev)
313{
314 mt76x2u_mcu_set_radio_state(dev, false);
315 mt76x2u_stop_hw(dev);
316 mt76u_queues_deinit(&dev->mt76);
317 mt76x2u_mcu_deinit(dev);
318}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
new file mode 100644
index 000000000000..eab7ab297aa6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mac.c
@@ -0,0 +1,240 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "mt76x2u.h"
18#include "mt76x2_eeprom.h"
19
20static void mt76x2u_mac_reset_counters(struct mt76x2_dev *dev)
21{
22 mt76_rr(dev, MT_RX_STAT_0);
23 mt76_rr(dev, MT_RX_STAT_1);
24 mt76_rr(dev, MT_RX_STAT_2);
25 mt76_rr(dev, MT_TX_STA_0);
26 mt76_rr(dev, MT_TX_STA_1);
27 mt76_rr(dev, MT_TX_STA_2);
28}
29
30static void mt76x2u_mac_fixup_xtal(struct mt76x2_dev *dev)
31{
32 s8 offset = 0;
33 u16 eep_val;
34
35 eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
36
37 offset = eep_val & 0x7f;
38 if ((eep_val & 0xff) == 0xff)
39 offset = 0;
40 else if (eep_val & 0x80)
41 offset = 0 - offset;
42
43 eep_val >>= 8;
44 if (eep_val == 0x00 || eep_val == 0xff) {
45 eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
46 eep_val &= 0xff;
47
48 if (eep_val == 0x00 || eep_val == 0xff)
49 eep_val = 0x14;
50 }
51
52 eep_val &= 0x7f;
53 mt76_rmw_field(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL5),
54 MT_XO_CTRL5_C2_VAL, eep_val + offset);
55 mt76_set(dev, MT_VEND_ADDR(CFG, MT_XO_CTRL6), MT_XO_CTRL6_C2_CTRL);
56
57 mt76_wr(dev, 0x504, 0x06000000);
58 mt76_wr(dev, 0x50c, 0x08800000);
59 mdelay(5);
60 mt76_wr(dev, 0x504, 0x0);
61
62 /* decrease SIFS from 16us to 13us */
63 mt76_rmw_field(dev, MT_XIFS_TIME_CFG,
64 MT_XIFS_TIME_CFG_OFDM_SIFS, 0xd);
65 mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_CC_DELAY, 1);
66
67 /* init fce */
68 mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
69
70 eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
71 switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
72 case 0:
73 mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
74 break;
75 case 1:
76 mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
77 break;
78 default:
79 break;
80 }
81}
82
83int mt76x2u_mac_reset(struct mt76x2_dev *dev)
84{
85 mt76_wr(dev, MT_WPDMA_GLO_CFG, BIT(4) | BIT(5));
86
87 /* init pbf regs */
88 mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
89 mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
90
91 mt76_write_mac_initvals(dev);
92
93 mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
94 mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
95 mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
96 mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
97
98 mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
99 mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
100 mt76_wr(dev, MT_WMM_CWMAX, 0x34aa);
101
102 mt76_clear(dev, MT_MAC_SYS_CTRL,
103 MT_MAC_SYS_CTRL_RESET_CSR |
104 MT_MAC_SYS_CTRL_RESET_BBP);
105
106 if (is_mt7612(dev))
107 mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
108
109 mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
110 mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
111
112 mt76x2u_mac_fixup_xtal(dev);
113
114 return 0;
115}
116
117int mt76x2u_mac_start(struct mt76x2_dev *dev)
118{
119 mt76x2u_mac_reset_counters(dev);
120
121 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
122 wait_for_wpdma(dev);
123 usleep_range(50, 100);
124
125 mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
126
127 mt76_wr(dev, MT_MAC_SYS_CTRL,
128 MT_MAC_SYS_CTRL_ENABLE_TX |
129 MT_MAC_SYS_CTRL_ENABLE_RX);
130
131 return 0;
132}
133
134int mt76x2u_mac_stop(struct mt76x2_dev *dev)
135{
136 int i, count = 0, val;
137 bool stopped = false;
138 u32 rts_cfg;
139
140 if (test_bit(MT76_REMOVED, &dev->mt76.state))
141 return -EIO;
142
143 rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
144 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
145
146 mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
147 mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
148
149 /* wait tx dma to stop */
150 for (i = 0; i < 2000; i++) {
151 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
152 if (!(val & MT_USB_DMA_CFG_TX_BUSY) && i > 10)
153 break;
154 usleep_range(50, 100);
155 }
156
157 /* page count on TxQ */
158 for (i = 0; i < 200; i++) {
159 if (!(mt76_rr(dev, 0x0438) & 0xffffffff) &&
160 !(mt76_rr(dev, 0x0a30) & 0x000000ff) &&
161 !(mt76_rr(dev, 0x0a34) & 0xff00ff00))
162 break;
163 usleep_range(10, 20);
164 }
165
166 /* disable tx-rx */
167 mt76_clear(dev, MT_MAC_SYS_CTRL,
168 MT_MAC_SYS_CTRL_ENABLE_RX |
169 MT_MAC_SYS_CTRL_ENABLE_TX);
170
171 /* Wait for MAC to become idle */
172 for (i = 0; i < 1000; i++) {
173 if (!(mt76_rr(dev, MT_MAC_STATUS) & MT_MAC_STATUS_TX) &&
174 !mt76_rr(dev, MT_BBP(IBI, 12))) {
175 stopped = true;
176 break;
177 }
178 usleep_range(10, 20);
179 }
180
181 if (!stopped) {
182 mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
183 mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
184
185 mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
186 mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
187 }
188
189 /* page count on RxQ */
190 for (i = 0; i < 200; i++) {
191 if (!(mt76_rr(dev, 0x0430) & 0x00ff0000) &&
192 !(mt76_rr(dev, 0x0a30) & 0xffffffff) &&
193 !(mt76_rr(dev, 0x0a34) & 0xffffffff) &&
194 ++count > 10)
195 break;
196 msleep(50);
197 }
198
199 if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 2000))
200 dev_warn(dev->mt76.dev, "MAC RX failed to stop\n");
201
202 /* wait rx dma to stop */
203 for (i = 0; i < 2000; i++) {
204 val = mt76_rr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG));
205 if (!(val & MT_USB_DMA_CFG_RX_BUSY) && i > 10)
206 break;
207 usleep_range(50, 100);
208 }
209
210 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
211
212 return 0;
213}
214
215void mt76x2u_mac_resume(struct mt76x2_dev *dev)
216{
217 mt76_wr(dev, MT_MAC_SYS_CTRL,
218 MT_MAC_SYS_CTRL_ENABLE_TX |
219 MT_MAC_SYS_CTRL_ENABLE_RX);
220 mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20));
221 mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1));
222}
223
224void mt76x2u_mac_setaddr(struct mt76x2_dev *dev, u8 *addr)
225{
226 ether_addr_copy(dev->mt76.macaddr, addr);
227
228 if (!is_valid_ether_addr(dev->mt76.macaddr)) {
229 eth_random_addr(dev->mt76.macaddr);
230 dev_info(dev->mt76.dev,
231 "Invalid MAC address, using random address %pM\n",
232 dev->mt76.macaddr);
233 }
234
235 mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
236 mt76_wr(dev, MT_MAC_ADDR_DW1,
237 get_unaligned_le16(dev->mt76.macaddr + 4) |
238 FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
239}
240
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
new file mode 100644
index 000000000000..7367ba111119
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_main.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "mt76x2u.h"
18
19static int mt76x2u_start(struct ieee80211_hw *hw)
20{
21 struct mt76x2_dev *dev = hw->priv;
22 int ret;
23
24 mutex_lock(&dev->mutex);
25
26 ret = mt76x2u_mac_start(dev);
27 if (ret)
28 goto out;
29
30 set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
31
32out:
33 mutex_unlock(&dev->mutex);
34 return ret;
35}
36
37static void mt76x2u_stop(struct ieee80211_hw *hw)
38{
39 struct mt76x2_dev *dev = hw->priv;
40
41 mutex_lock(&dev->mutex);
42 clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
43 mt76x2u_stop_hw(dev);
44 mutex_unlock(&dev->mutex);
45}
46
47static int mt76x2u_add_interface(struct ieee80211_hw *hw,
48 struct ieee80211_vif *vif)
49{
50 struct mt76x2_dev *dev = hw->priv;
51 struct mt76x2_vif *mvif = (struct mt76x2_vif *)vif->drv_priv;
52 unsigned int idx = 0;
53
54 if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
55 mt76x2u_mac_setaddr(dev, vif->addr);
56
57 mvif->idx = idx;
58 mvif->group_wcid.idx = MT_VIF_WCID(idx);
59 mvif->group_wcid.hw_key_idx = -1;
60 mt76x2_txq_init(dev, vif->txq);
61
62 return 0;
63}
64
65static int
66mt76x2u_set_channel(struct mt76x2_dev *dev,
67 struct cfg80211_chan_def *chandef)
68{
69 int err;
70
71 cancel_delayed_work_sync(&dev->cal_work);
72 set_bit(MT76_RESET, &dev->mt76.state);
73
74 mt76_set_channel(&dev->mt76);
75
76 mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20));
77 mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1));
78 mt76x2_mac_stop(dev, false);
79
80 err = mt76x2u_phy_set_channel(dev, chandef);
81
82 mt76x2u_mac_resume(dev);
83
84 clear_bit(MT76_RESET, &dev->mt76.state);
85 mt76_txq_schedule_all(&dev->mt76);
86
87 return err;
88}
89
90static void
91mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
92 struct ieee80211_bss_conf *info, u32 changed)
93{
94 struct mt76x2_dev *dev = hw->priv;
95
96 mutex_lock(&dev->mutex);
97
98 if (changed & BSS_CHANGED_ASSOC) {
99 mt76x2u_phy_channel_calibrate(dev);
100 mt76x2_apply_gain_adj(dev);
101 }
102
103 if (changed & BSS_CHANGED_BSSID) {
104 mt76_wr(dev, MT_MAC_BSSID_DW0,
105 get_unaligned_le32(info->bssid));
106 mt76_wr(dev, MT_MAC_BSSID_DW1,
107 get_unaligned_le16(info->bssid + 4));
108 }
109
110 mutex_unlock(&dev->mutex);
111}
112
113static int
114mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
115{
116 struct mt76x2_dev *dev = hw->priv;
117 int err = 0;
118
119 mutex_lock(&dev->mutex);
120
121 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
122 if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
123 dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
124 else
125 dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
126 mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
127 }
128
129 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
130 ieee80211_stop_queues(hw);
131 err = mt76x2u_set_channel(dev, &hw->conf.chandef);
132 ieee80211_wake_queues(hw);
133 }
134
135 if (changed & IEEE80211_CONF_CHANGE_POWER) {
136 dev->txpower_conf = hw->conf.power_level * 2;
137
138 /* convert to per-chain power for 2x2 devices */
139 dev->txpower_conf -= 6;
140
141 if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
142 mt76x2_phy_set_txpower(dev);
143 }
144
145 mutex_unlock(&dev->mutex);
146
147 return err;
148}
149
150static void
151mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
152 const u8 *mac)
153{
154 struct mt76x2_dev *dev = hw->priv;
155
156 set_bit(MT76_SCANNING, &dev->mt76.state);
157}
158
159static void
160mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
161{
162 struct mt76x2_dev *dev = hw->priv;
163
164 clear_bit(MT76_SCANNING, &dev->mt76.state);
165}
166
167const struct ieee80211_ops mt76x2u_ops = {
168 .tx = mt76x2_tx,
169 .start = mt76x2u_start,
170 .stop = mt76x2u_stop,
171 .add_interface = mt76x2u_add_interface,
172 .remove_interface = mt76x2_remove_interface,
173 .sta_add = mt76x2_sta_add,
174 .sta_remove = mt76x2_sta_remove,
175 .set_key = mt76x2_set_key,
176 .ampdu_action = mt76x2_ampdu_action,
177 .config = mt76x2u_config,
178 .wake_tx_queue = mt76_wake_tx_queue,
179 .bss_info_changed = mt76x2u_bss_info_changed,
180 .configure_filter = mt76x2_configure_filter,
181 .conf_tx = mt76x2_conf_tx,
182 .sw_scan_start = mt76x2u_sw_scan,
183 .sw_scan_complete = mt76x2u_sw_scan_complete,
184 .sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
185};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
new file mode 100644
index 000000000000..22c16d638baa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_mcu.c
@@ -0,0 +1,463 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/firmware.h>
18
19#include "mt76x2u.h"
20#include "mt76x2_eeprom.h"
21
22#define MT_CMD_HDR_LEN 4
23#define MT_INBAND_PACKET_MAX_LEN 192
24#define MT_MCU_MEMMAP_WLAN 0x410000
25
26#define MCU_FW_URB_MAX_PAYLOAD 0x3900
27#define MCU_ROM_PATCH_MAX_PAYLOAD 2048
28
29#define MT76U_MCU_ILM_OFFSET 0x80000
30#define MT76U_MCU_DLM_OFFSET 0x110000
31#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
32
33static int
34mt76x2u_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
35 u32 val)
36{
37 struct {
38 __le32 id;
39 __le32 value;
40 } __packed __aligned(4) msg = {
41 .id = cpu_to_le32(func),
42 .value = cpu_to_le32(val),
43 };
44 struct sk_buff *skb;
45
46 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
47 if (!skb)
48 return -ENOMEM;
49 return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_FUN_SET_OP,
50 func != Q_SELECT);
51}
52
53int mt76x2u_mcu_set_radio_state(struct mt76x2_dev *dev, bool val)
54{
55 struct {
56 __le32 mode;
57 __le32 level;
58 } __packed __aligned(4) msg = {
59 .mode = cpu_to_le32(val ? RADIO_ON : RADIO_OFF),
60 .level = cpu_to_le32(0),
61 };
62 struct sk_buff *skb;
63
64 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
65 if (!skb)
66 return -ENOMEM;
67 return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_POWER_SAVING_OP,
68 false);
69}
70
71int mt76x2u_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
72 u8 channel)
73{
74 struct {
75 u8 cr_mode;
76 u8 temp;
77 u8 ch;
78 u8 _pad0;
79 __le32 cfg;
80 } __packed __aligned(4) msg = {
81 .cr_mode = type,
82 .temp = temp_level,
83 .ch = channel,
84 };
85 struct sk_buff *skb;
86 u32 val;
87
88 val = BIT(31);
89 val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
90 val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
91 msg.cfg = cpu_to_le32(val);
92
93 /* first set the channel without the extension channel info */
94 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
95 if (!skb)
96 return -ENOMEM;
97 return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_LOAD_CR, true);
98}
99
100int mt76x2u_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
101 u8 bw_index, bool scan)
102{
103 struct {
104 u8 idx;
105 u8 scan;
106 u8 bw;
107 u8 _pad0;
108
109 __le16 chainmask;
110 u8 ext_chan;
111 u8 _pad1;
112
113 } __packed __aligned(4) msg = {
114 .idx = channel,
115 .scan = scan,
116 .bw = bw,
117 .chainmask = cpu_to_le16(dev->chainmask),
118 };
119 struct sk_buff *skb;
120
121 /* first set the channel without the extension channel info */
122 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
123 if (!skb)
124 return -ENOMEM;
125
126 mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
127
128 usleep_range(5000, 10000);
129
130 msg.ext_chan = 0xe0 + bw_index;
131 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
132 if (!skb)
133 return -ENOMEM;
134
135 return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_SWITCH_CHANNEL_OP, true);
136}
137
138int mt76x2u_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
139 u32 val)
140{
141 struct {
142 __le32 id;
143 __le32 value;
144 } __packed __aligned(4) msg = {
145 .id = cpu_to_le32(type),
146 .value = cpu_to_le32(val),
147 };
148 struct sk_buff *skb;
149
150 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
151 if (!skb)
152 return -ENOMEM;
153 return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
154}
155
156int mt76x2u_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
157 bool force)
158{
159 struct {
160 __le32 channel;
161 __le32 gain_val;
162 } __packed __aligned(4) msg = {
163 .channel = cpu_to_le32(channel),
164 .gain_val = cpu_to_le32(gain),
165 };
166 struct sk_buff *skb;
167
168 if (force)
169 msg.channel |= cpu_to_le32(BIT(31));
170
171 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
172 if (!skb)
173 return -ENOMEM;
174 return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_INIT_GAIN_OP, true);
175}
176
177int mt76x2u_mcu_set_dynamic_vga(struct mt76x2_dev *dev, u8 channel, bool ap,
178 bool ext, int rssi, u32 false_cca)
179{
180 struct {
181 __le32 channel;
182 __le32 rssi_val;
183 __le32 false_cca_val;
184 } __packed __aligned(4) msg = {
185 .rssi_val = cpu_to_le32(rssi),
186 .false_cca_val = cpu_to_le32(false_cca),
187 };
188 struct sk_buff *skb;
189 u32 val = channel;
190
191 if (ap)
192 val |= BIT(31);
193 if (ext)
194 val |= BIT(30);
195 msg.channel = cpu_to_le32(val);
196
197 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
198 if (!skb)
199 return -ENOMEM;
200 return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_DYNC_VGA_OP, true);
201}
202
203int mt76x2u_mcu_tssi_comp(struct mt76x2_dev *dev,
204 struct mt76x2_tssi_comp *tssi_data)
205{
206 struct {
207 __le32 id;
208 struct mt76x2_tssi_comp data;
209 } __packed __aligned(4) msg = {
210 .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
211 .data = *tssi_data,
212 };
213 struct sk_buff *skb;
214
215 skb = mt76u_mcu_msg_alloc(&msg, sizeof(msg));
216 if (!skb)
217 return -ENOMEM;
218 return mt76u_mcu_send_msg(&dev->mt76, skb, CMD_CALIBRATION_OP, true);
219}
220
221static void mt76x2u_mcu_load_ivb(struct mt76x2_dev *dev)
222{
223 mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
224 USB_DIR_OUT | USB_TYPE_VENDOR,
225 0x12, 0, NULL, 0);
226}
227
228static void mt76x2u_mcu_enable_patch(struct mt76x2_dev *dev)
229{
230 struct mt76_usb *usb = &dev->mt76.usb;
231 const u8 data[] = {
232 0x6f, 0xfc, 0x08, 0x01,
233 0x20, 0x04, 0x00, 0x00,
234 0x00, 0x09, 0x00,
235 };
236
237 memcpy(usb->data, data, sizeof(data));
238 mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
239 USB_DIR_OUT | USB_TYPE_CLASS,
240 0x12, 0, usb->data, sizeof(data));
241}
242
243static void mt76x2u_mcu_reset_wmt(struct mt76x2_dev *dev)
244{
245 struct mt76_usb *usb = &dev->mt76.usb;
246 u8 data[] = {
247 0x6f, 0xfc, 0x05, 0x01,
248 0x07, 0x01, 0x00, 0x04
249 };
250
251 memcpy(usb->data, data, sizeof(data));
252 mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
253 USB_DIR_OUT | USB_TYPE_CLASS,
254 0x12, 0, usb->data, sizeof(data));
255}
256
257static int mt76x2u_mcu_load_rom_patch(struct mt76x2_dev *dev)
258{
259 bool rom_protect = !is_mt7612(dev);
260 struct mt76x2_patch_header *hdr;
261 u32 val, patch_mask, patch_reg;
262 const struct firmware *fw;
263 int err;
264
265 if (rom_protect &&
266 !mt76_poll_msec(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
267 dev_err(dev->mt76.dev,
268 "could not get hardware semaphore for ROM PATCH\n");
269 return -ETIMEDOUT;
270 }
271
272 if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
273 patch_mask = BIT(0);
274 patch_reg = MT_MCU_CLOCK_CTL;
275 } else {
276 patch_mask = BIT(1);
277 patch_reg = MT_MCU_COM_REG0;
278 }
279
280 if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
281 dev_info(dev->mt76.dev, "ROM patch already applied\n");
282 return 0;
283 }
284
285 err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
286 if (err < 0)
287 return err;
288
289 if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
290 dev_err(dev->mt76.dev, "failed to load firmware\n");
291 err = -EIO;
292 goto out;
293 }
294
295 hdr = (struct mt76x2_patch_header *)fw->data;
296 dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
297
298 /* enable USB_DMA_CFG */
299 val = MT_USB_DMA_CFG_RX_BULK_EN |
300 MT_USB_DMA_CFG_TX_BULK_EN |
301 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
302 mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
303
304 /* vendor reset */
305 mt76u_mcu_fw_reset(&dev->mt76);
306 usleep_range(5000, 10000);
307
308 /* enable FCE to send in-band cmd */
309 mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
310 /* FCE tx_fs_base_ptr */
311 mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
312 /* FCE tx_fs_max_cnt */
313 mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
314 /* FCE pdma enable */
315 mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
316 /* FCE skip_fs_en */
317 mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
318
319 err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
320 fw->size - sizeof(*hdr),
321 MCU_ROM_PATCH_MAX_PAYLOAD,
322 MT76U_MCU_ROM_PATCH_OFFSET);
323 if (err < 0) {
324 err = -EIO;
325 goto out;
326 }
327
328 mt76x2u_mcu_enable_patch(dev);
329 mt76x2u_mcu_reset_wmt(dev);
330 mdelay(20);
331
332 if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 100)) {
333 dev_err(dev->mt76.dev, "failed to load ROM patch\n");
334 err = -ETIMEDOUT;
335 }
336
337out:
338 if (rom_protect)
339 mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
340 release_firmware(fw);
341 return err;
342}
343
344static int mt76x2u_mcu_load_firmware(struct mt76x2_dev *dev)
345{
346 u32 val, dlm_offset = MT76U_MCU_DLM_OFFSET;
347 const struct mt76x2_fw_header *hdr;
348 int err, len, ilm_len, dlm_len;
349 const struct firmware *fw;
350
351 err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
352 if (err < 0)
353 return err;
354
355 if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
356 err = -EINVAL;
357 goto out;
358 }
359
360 hdr = (const struct mt76x2_fw_header *)fw->data;
361 ilm_len = le32_to_cpu(hdr->ilm_len);
362 dlm_len = le32_to_cpu(hdr->dlm_len);
363 len = sizeof(*hdr) + ilm_len + dlm_len;
364 if (fw->size != len) {
365 err = -EINVAL;
366 goto out;
367 }
368
369 val = le16_to_cpu(hdr->fw_ver);
370 dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
371 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
372
373 val = le16_to_cpu(hdr->build_ver);
374 dev_info(dev->mt76.dev, "Build: %x\n", val);
375 dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
376
377 /* vendor reset */
378 mt76u_mcu_fw_reset(&dev->mt76);
379 usleep_range(5000, 10000);
380
381 /* enable USB_DMA_CFG */
382 val = MT_USB_DMA_CFG_RX_BULK_EN |
383 MT_USB_DMA_CFG_TX_BULK_EN |
384 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20);
385 mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
386 /* enable FCE to send in-band cmd */
387 mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
388 /* FCE tx_fs_base_ptr */
389 mt76_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
390 /* FCE tx_fs_max_cnt */
391 mt76_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 0x1);
392 /* FCE pdma enable */
393 mt76_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
394 /* FCE skip_fs_en */
395 mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
396
397 /* load ILM */
398 err = mt76u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
399 ilm_len, MCU_FW_URB_MAX_PAYLOAD,
400 MT76U_MCU_ILM_OFFSET);
401 if (err < 0) {
402 err = -EIO;
403 goto out;
404 }
405
406 /* load DLM */
407 if (mt76xx_rev(dev) >= MT76XX_REV_E3)
408 dlm_offset += 0x800;
409 err = mt76u_mcu_fw_send_data(&dev->mt76,
410 fw->data + sizeof(*hdr) + ilm_len,
411 dlm_len, MCU_FW_URB_MAX_PAYLOAD,
412 dlm_offset);
413 if (err < 0) {
414 err = -EIO;
415 goto out;
416 }
417
418 mt76x2u_mcu_load_ivb(dev);
419 if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 100)) {
420 dev_err(dev->mt76.dev, "firmware failed to start\n");
421 err = -ETIMEDOUT;
422 goto out;
423 }
424
425 mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
426 /* enable FCE to send in-band cmd */
427 mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
428 dev_dbg(dev->mt76.dev, "firmware running\n");
429
430out:
431 release_firmware(fw);
432 return err;
433}
434
435int mt76x2u_mcu_fw_init(struct mt76x2_dev *dev)
436{
437 int err;
438
439 err = mt76x2u_mcu_load_rom_patch(dev);
440 if (err < 0)
441 return err;
442
443 return mt76x2u_mcu_load_firmware(dev);
444}
445
446int mt76x2u_mcu_init(struct mt76x2_dev *dev)
447{
448 int err;
449
450 err = mt76x2u_mcu_function_select(dev, Q_SELECT, 1);
451 if (err < 0)
452 return err;
453
454 return mt76x2u_mcu_set_radio_state(dev, true);
455}
456
457void mt76x2u_mcu_deinit(struct mt76x2_dev *dev)
458{
459 struct mt76_usb *usb = &dev->mt76.usb;
460
461 usb_kill_urb(usb->mcu.res.urb);
462 mt76u_buf_free(&usb->mcu.res);
463}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
new file mode 100644
index 000000000000..5158063d0c2e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2u_phy.c
@@ -0,0 +1,303 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "mt76x2u.h"
18#include "mt76x2_eeprom.h"
19
20void mt76x2u_phy_set_rxpath(struct mt76x2_dev *dev)
21{
22 u32 val;
23
24 val = mt76_rr(dev, MT_BBP(AGC, 0));
25 val &= ~BIT(4);
26
27 switch (dev->chainmask & 0xf) {
28 case 2:
29 val |= BIT(3);
30 break;
31 default:
32 val &= ~BIT(3);
33 break;
34 }
35 mt76_wr(dev, MT_BBP(AGC, 0), val);
36}
37
38void mt76x2u_phy_set_txdac(struct mt76x2_dev *dev)
39{
40 int txpath;
41
42 txpath = (dev->chainmask >> 8) & 0xf;
43 switch (txpath) {
44 case 2:
45 mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
46 break;
47 default:
48 mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
49 break;
50 }
51}
52
53void mt76x2u_phy_channel_calibrate(struct mt76x2_dev *dev)
54{
55 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
56 bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
57
58 if (mt76x2_channel_silent(dev))
59 return;
60
61 mt76x2u_mac_stop(dev);
62
63 if (is_5ghz)
64 mt76x2u_mcu_calibrate(dev, MCU_CAL_LC, 0);
65
66 mt76x2u_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
67 mt76x2u_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
68 mt76x2u_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
69 mt76x2u_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
70
71 mt76x2u_mac_resume(dev);
72}
73
74static void
75mt76x2u_phy_tssi_compensate(struct mt76x2_dev *dev)
76{
77 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
78 struct mt76x2_tx_power_info txp;
79 struct mt76x2_tssi_comp t = {};
80
81 if (!dev->cal.tssi_cal_done)
82 return;
83
84 if (!dev->cal.tssi_comp_pending) {
85 /* TSSI trigger */
86 t.cal_mode = BIT(0);
87 mt76x2u_mcu_tssi_comp(dev, &t);
88 dev->cal.tssi_comp_pending = true;
89 } else {
90 if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
91 return;
92
93 dev->cal.tssi_comp_pending = false;
94 mt76x2_get_power_info(dev, &txp, chan);
95
96 if (mt76x2_ext_pa_enabled(dev, chan->band))
97 t.pa_mode = 1;
98
99 t.cal_mode = BIT(1);
100 t.slope0 = txp.chain[0].tssi_slope;
101 t.offset0 = txp.chain[0].tssi_offset;
102 t.slope1 = txp.chain[1].tssi_slope;
103 t.offset1 = txp.chain[1].tssi_offset;
104 mt76x2u_mcu_tssi_comp(dev, &t);
105
106 if (t.pa_mode || dev->cal.dpd_cal_done)
107 return;
108
109 usleep_range(10000, 20000);
110 mt76x2u_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
111 dev->cal.dpd_cal_done = true;
112 }
113}
114
115static void
116mt76x2u_phy_update_channel_gain(struct mt76x2_dev *dev)
117{
118 u8 channel = dev->mt76.chandef.chan->hw_value;
119 int freq, freq1;
120 u32 false_cca;
121
122 freq = dev->mt76.chandef.chan->center_freq;
123 freq1 = dev->mt76.chandef.center_freq1;
124
125 switch (dev->mt76.chandef.width) {
126 case NL80211_CHAN_WIDTH_80: {
127 int ch_group_index;
128
129 ch_group_index = (freq - freq1 + 30) / 20;
130 if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
131 ch_group_index = 0;
132 channel += 6 - ch_group_index * 4;
133 break;
134 }
135 case NL80211_CHAN_WIDTH_40:
136 if (freq1 > freq)
137 channel += 2;
138 else
139 channel -= 2;
140 break;
141 default:
142 break;
143 }
144
145 dev->cal.avg_rssi_all = mt76x2_phy_get_min_avg_rssi(dev);
146 false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
147 mt76_rr(dev, MT_RX_STAT_1));
148
149 mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
150 dev->cal.avg_rssi_all, false_cca);
151}
152
153void mt76x2u_phy_calibrate(struct work_struct *work)
154{
155 struct mt76x2_dev *dev;
156
157 dev = container_of(work, struct mt76x2_dev, cal_work.work);
158 mt76x2u_phy_tssi_compensate(dev);
159 mt76x2u_phy_update_channel_gain(dev);
160
161 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
162 MT_CALIBRATE_INTERVAL);
163}
164
165int mt76x2u_phy_set_channel(struct mt76x2_dev *dev,
166 struct cfg80211_chan_def *chandef)
167{
168 u32 ext_cca_chan[4] = {
169 [0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
170 FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
171 FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
172 FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
173 FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
174 [1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
175 FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
176 FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
177 FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
178 FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
179 [2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
180 FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
181 FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
182 FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
183 FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
184 [3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
185 FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
186 FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
187 FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
188 FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
189 };
190 bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
191 struct ieee80211_channel *chan = chandef->chan;
192 u8 channel = chan->hw_value, bw, bw_index;
193 int ch_group_index, freq, freq1, ret;
194
195 dev->cal.channel_cal_done = false;
196 freq = chandef->chan->center_freq;
197 freq1 = chandef->center_freq1;
198
199 switch (chandef->width) {
200 case NL80211_CHAN_WIDTH_40:
201 bw = 1;
202 if (freq1 > freq) {
203 bw_index = 1;
204 ch_group_index = 0;
205 } else {
206 bw_index = 3;
207 ch_group_index = 1;
208 }
209 channel += 2 - ch_group_index * 4;
210 break;
211 case NL80211_CHAN_WIDTH_80:
212 ch_group_index = (freq - freq1 + 30) / 20;
213 if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
214 ch_group_index = 0;
215 bw = 2;
216 bw_index = ch_group_index;
217 channel += 6 - ch_group_index * 4;
218 break;
219 default:
220 bw = 0;
221 bw_index = 0;
222 ch_group_index = 0;
223 break;
224 }
225
226 mt76x2_read_rx_gain(dev);
227 mt76x2_phy_set_txpower_regs(dev, chan->band);
228 mt76x2_configure_tx_delay(dev, chan->band, bw);
229 mt76x2_phy_set_txpower(dev);
230
231 mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
232 mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
233
234 mt76_rmw(dev, MT_EXT_CCA_CFG,
235 (MT_EXT_CCA_CFG_CCA0 |
236 MT_EXT_CCA_CFG_CCA1 |
237 MT_EXT_CCA_CFG_CCA2 |
238 MT_EXT_CCA_CFG_CCA3 |
239 MT_EXT_CCA_CFG_CCA_MASK),
240 ext_cca_chan[ch_group_index]);
241
242 ret = mt76x2u_mcu_set_channel(dev, channel, bw, bw_index, scan);
243 if (ret)
244 return ret;
245
246 mt76x2u_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
247
248 /* Enable LDPC Rx */
249 if (mt76xx_rev(dev) >= MT76XX_REV_E3)
250 mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
251
252 if (!dev->cal.init_cal_done) {
253 u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
254
255 if (val != 0xff)
256 mt76x2u_mcu_calibrate(dev, MCU_CAL_R, 0);
257 }
258
259 mt76x2u_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
260
261 /* Rx LPF calibration */
262 if (!dev->cal.init_cal_done)
263 mt76x2u_mcu_calibrate(dev, MCU_CAL_RC, 0);
264 dev->cal.init_cal_done = true;
265
266 mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
267 mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
268 mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
269 mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
270 mt76_wr(dev, MT_TXOP_CTRL_CFG, 0X04101b3f);
271
272 mt76_set(dev, MT_BBP(TXO, 4), BIT(25));
273 mt76_set(dev, MT_BBP(RXO, 13), BIT(8));
274
275 if (scan)
276 return 0;
277
278 if (mt76x2_tssi_enabled(dev)) {
279 /* init default values for temp compensation */
280 mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
281 0x38);
282 mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
283 0x38);
284
285 /* init tssi calibration */
286 if (!mt76x2_channel_silent(dev)) {
287 struct ieee80211_channel *chan;
288 u32 flag = 0;
289
290 chan = dev->mt76.chandef.chan;
291 if (chan->band == NL80211_BAND_5GHZ)
292 flag |= BIT(0);
293 if (mt76x2_ext_pa_enabled(dev, chan->band))
294 flag |= BIT(8);
295 mt76x2u_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
296 dev->cal.tssi_cal_done = true;
297 }
298 }
299
300 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
301 MT_CALIBRATE_INTERVAL);
302 return 0;
303}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e96956710fb2..af48d43bb7dc 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -51,7 +51,7 @@ __mt76_get_txwi(struct mt76_dev *dev)
51 return t; 51 return t;
52} 52}
53 53
54static struct mt76_txwi_cache * 54struct mt76_txwi_cache *
55mt76_get_txwi(struct mt76_dev *dev) 55mt76_get_txwi(struct mt76_dev *dev)
56{ 56{
57 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 57 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
@@ -91,80 +91,6 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
91 return txq->ac; 91 return txq->ac;
92} 92}
93 93
94int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
95 struct sk_buff *skb, struct mt76_wcid *wcid,
96 struct ieee80211_sta *sta)
97{
98 struct mt76_queue_entry e;
99 struct mt76_txwi_cache *t;
100 struct mt76_queue_buf buf[32];
101 struct sk_buff *iter;
102 dma_addr_t addr;
103 int len;
104 u32 tx_info = 0;
105 int n, ret;
106
107 t = mt76_get_txwi(dev);
108 if (!t) {
109 ieee80211_free_txskb(dev->hw, skb);
110 return -ENOMEM;
111 }
112
113 dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
114 DMA_TO_DEVICE);
115 ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
116 &tx_info);
117 dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
118 DMA_TO_DEVICE);
119 if (ret < 0)
120 goto free;
121
122 len = skb->len - skb->data_len;
123 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
124 if (dma_mapping_error(dev->dev, addr)) {
125 ret = -ENOMEM;
126 goto free;
127 }
128
129 n = 0;
130 buf[n].addr = t->dma_addr;
131 buf[n++].len = dev->drv->txwi_size;
132 buf[n].addr = addr;
133 buf[n++].len = len;
134
135 skb_walk_frags(skb, iter) {
136 if (n == ARRAY_SIZE(buf))
137 goto unmap;
138
139 addr = dma_map_single(dev->dev, iter->data, iter->len,
140 DMA_TO_DEVICE);
141 if (dma_mapping_error(dev->dev, addr))
142 goto unmap;
143
144 buf[n].addr = addr;
145 buf[n++].len = iter->len;
146 }
147
148 if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
149 goto unmap;
150
151 return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
152
153unmap:
154 ret = -ENOMEM;
155 for (n--; n > 0; n--)
156 dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
157 DMA_TO_DEVICE);
158
159free:
160 e.skb = skb;
161 e.txwi = t;
162 dev->drv->tx_complete_skb(dev, q, &e, true);
163 mt76_put_txwi(dev, t);
164 return ret;
165}
166EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
167
168void 94void
169mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 95mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
170 struct mt76_wcid *wcid, struct sk_buff *skb) 96 struct mt76_wcid *wcid, struct sk_buff *skb)
@@ -185,7 +111,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
185 q = &dev->q_tx[qid]; 111 q = &dev->q_tx[qid];
186 112
187 spin_lock_bh(&q->lock); 113 spin_lock_bh(&q->lock);
188 mt76_tx_queue_skb(dev, q, skb, wcid, sta); 114 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
189 dev->queue_ops->kick(dev, q); 115 dev->queue_ops->kick(dev, q);
190 116
191 if (q->queued > q->ndesc - 8) 117 if (q->queued > q->ndesc - 8)
@@ -241,7 +167,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
241 info->flags |= IEEE80211_TX_STATUS_EOSP; 167 info->flags |= IEEE80211_TX_STATUS_EOSP;
242 168
243 mt76_skb_set_moredata(skb, !last); 169 mt76_skb_set_moredata(skb, !last);
244 mt76_tx_queue_skb(dev, hwq, skb, wcid, sta); 170 dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
245} 171}
246 172
247void 173void
@@ -321,7 +247,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
321 if (ampdu) 247 if (ampdu)
322 mt76_check_agg_ssn(mtxq, skb); 248 mt76_check_agg_ssn(mtxq, skb);
323 249
324 idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); 250 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
325 251
326 if (idx < 0) 252 if (idx < 0)
327 return idx; 253 return idx;
@@ -356,7 +282,8 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
356 if (cur_ampdu) 282 if (cur_ampdu)
357 mt76_check_agg_ssn(mtxq, skb); 283 mt76_check_agg_ssn(mtxq, skb);
358 284
359 idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta); 285 idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
286 txq->sta);
360 if (idx < 0) 287 if (idx < 0)
361 return idx; 288 return idx;
362 289
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
new file mode 100644
index 000000000000..7780b07543bb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "mt76.h"
18#include "usb_trace.h"
19#include "dma.h"
20
21#define MT_VEND_REQ_MAX_RETRY 10
22#define MT_VEND_REQ_TOUT_MS 300
23
24/* should be called with usb_ctrl_mtx locked */
25static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
26 u8 req_type, u16 val, u16 offset,
27 void *buf, size_t len)
28{
29 struct usb_interface *intf = to_usb_interface(dev->dev);
30 struct usb_device *udev = interface_to_usbdev(intf);
31 unsigned int pipe;
32 int i, ret;
33
34 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
35 : usb_sndctrlpipe(udev, 0);
36 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
37 if (test_bit(MT76_REMOVED, &dev->state))
38 return -EIO;
39
40 ret = usb_control_msg(udev, pipe, req, req_type, val,
41 offset, buf, len, MT_VEND_REQ_TOUT_MS);
42 if (ret == -ENODEV)
43 set_bit(MT76_REMOVED, &dev->state);
44 if (ret >= 0 || ret == -ENODEV)
45 return ret;
46 usleep_range(5000, 10000);
47 }
48
49 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
50 req, offset, ret);
51 return ret;
52}
53
54int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
55 u8 req_type, u16 val, u16 offset,
56 void *buf, size_t len)
57{
58 int ret;
59
60 mutex_lock(&dev->usb.usb_ctrl_mtx);
61 ret = __mt76u_vendor_request(dev, req, req_type,
62 val, offset, buf, len);
63 trace_usb_reg_wr(dev, offset, val);
64 mutex_unlock(&dev->usb.usb_ctrl_mtx);
65
66 return ret;
67}
68EXPORT_SYMBOL_GPL(mt76u_vendor_request);
69
70/* should be called with usb_ctrl_mtx locked */
71static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
72{
73 struct mt76_usb *usb = &dev->usb;
74 u32 data = ~0;
75 u16 offset;
76 int ret;
77 u8 req;
78
79 switch (addr & MT_VEND_TYPE_MASK) {
80 case MT_VEND_TYPE_EEPROM:
81 req = MT_VEND_READ_EEPROM;
82 break;
83 case MT_VEND_TYPE_CFG:
84 req = MT_VEND_READ_CFG;
85 break;
86 default:
87 req = MT_VEND_MULTI_READ;
88 break;
89 }
90 offset = addr & ~MT_VEND_TYPE_MASK;
91
92 ret = __mt76u_vendor_request(dev, req,
93 USB_DIR_IN | USB_TYPE_VENDOR,
94 0, offset, usb->data, sizeof(__le32));
95 if (ret == sizeof(__le32))
96 data = get_unaligned_le32(usb->data);
97 trace_usb_reg_rr(dev, addr, data);
98
99 return data;
100}
101
102u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
103{
104 u32 ret;
105
106 mutex_lock(&dev->usb.usb_ctrl_mtx);
107 ret = __mt76u_rr(dev, addr);
108 mutex_unlock(&dev->usb.usb_ctrl_mtx);
109
110 return ret;
111}
112
113/* should be called with usb_ctrl_mtx locked */
114static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
115{
116 struct mt76_usb *usb = &dev->usb;
117 u16 offset;
118 u8 req;
119
120 switch (addr & MT_VEND_TYPE_MASK) {
121 case MT_VEND_TYPE_CFG:
122 req = MT_VEND_WRITE_CFG;
123 break;
124 default:
125 req = MT_VEND_MULTI_WRITE;
126 break;
127 }
128 offset = addr & ~MT_VEND_TYPE_MASK;
129
130 put_unaligned_le32(val, usb->data);
131 __mt76u_vendor_request(dev, req,
132 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
133 offset, usb->data, sizeof(__le32));
134 trace_usb_reg_wr(dev, addr, val);
135}
136
137void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
138{
139 mutex_lock(&dev->usb.usb_ctrl_mtx);
140 __mt76u_wr(dev, addr, val);
141 mutex_unlock(&dev->usb.usb_ctrl_mtx);
142}
143
144static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
145 u32 mask, u32 val)
146{
147 mutex_lock(&dev->usb.usb_ctrl_mtx);
148 val |= __mt76u_rr(dev, addr) & ~mask;
149 __mt76u_wr(dev, addr, val);
150 mutex_unlock(&dev->usb.usb_ctrl_mtx);
151
152 return val;
153}
154
155static void mt76u_copy(struct mt76_dev *dev, u32 offset,
156 const void *data, int len)
157{
158 struct mt76_usb *usb = &dev->usb;
159 const u32 *val = data;
160 int i, ret;
161
162 mutex_lock(&usb->usb_ctrl_mtx);
163 for (i = 0; i < (len / 4); i++) {
164 put_unaligned_le32(val[i], usb->data);
165 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
166 USB_DIR_OUT | USB_TYPE_VENDOR,
167 0, offset + i * 4, usb->data,
168 sizeof(__le32));
169 if (ret < 0)
170 break;
171 }
172 mutex_unlock(&usb->usb_ctrl_mtx);
173}
174
175void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
176 const u16 offset, const u32 val)
177{
178 mutex_lock(&dev->usb.usb_ctrl_mtx);
179 __mt76u_vendor_request(dev, req,
180 USB_DIR_OUT | USB_TYPE_VENDOR,
181 val & 0xffff, offset, NULL, 0);
182 __mt76u_vendor_request(dev, req,
183 USB_DIR_OUT | USB_TYPE_VENDOR,
184 val >> 16, offset + 2, NULL, 0);
185 mutex_unlock(&dev->usb.usb_ctrl_mtx);
186}
187EXPORT_SYMBOL_GPL(mt76u_single_wr);
188
189static int
190mt76u_set_endpoints(struct usb_interface *intf,
191 struct mt76_usb *usb)
192{
193 struct usb_host_interface *intf_desc = intf->cur_altsetting;
194 struct usb_endpoint_descriptor *ep_desc;
195 int i, in_ep = 0, out_ep = 0;
196
197 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
198 ep_desc = &intf_desc->endpoint[i].desc;
199
200 if (usb_endpoint_is_bulk_in(ep_desc) &&
201 in_ep < __MT_EP_IN_MAX) {
202 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
203 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
204 in_ep++;
205 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
206 out_ep < __MT_EP_OUT_MAX) {
207 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
208 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
209 out_ep++;
210 }
211 }
212
213 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
214 return -EINVAL;
215 return 0;
216}
217
218static int
219mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
220 int nsgs, int len, int sglen)
221{
222 struct urb *urb = buf->urb;
223 int i;
224
225 for (i = 0; i < nsgs; i++) {
226 struct page *page;
227 void *data;
228 int offset;
229
230 data = netdev_alloc_frag(len);
231 if (!data)
232 break;
233
234 page = virt_to_head_page(data);
235 offset = data - page_address(page);
236 sg_set_page(&urb->sg[i], page, sglen, offset);
237 }
238
239 if (i < nsgs) {
240 int j;
241
242 for (j = nsgs; j < urb->num_sgs; j++)
243 skb_free_frag(sg_virt(&urb->sg[j]));
244 urb->num_sgs = i;
245 }
246
247 urb->num_sgs = max_t(int, i, urb->num_sgs);
248 buf->len = urb->num_sgs * sglen,
249 sg_init_marker(urb->sg, urb->num_sgs);
250
251 return i ? : -ENOMEM;
252}
253
254int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
255 int nsgs, int len, int sglen, gfp_t gfp)
256{
257 buf->urb = usb_alloc_urb(0, gfp);
258 if (!buf->urb)
259 return -ENOMEM;
260
261 buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
262 gfp);
263 if (!buf->urb->sg)
264 return -ENOMEM;
265
266 sg_init_table(buf->urb->sg, nsgs);
267 buf->dev = dev;
268
269 return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
270}
271EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
272
273void mt76u_buf_free(struct mt76u_buf *buf)
274{
275 struct urb *urb = buf->urb;
276 int i;
277
278 for (i = 0; i < urb->num_sgs; i++)
279 skb_free_frag(sg_virt(&urb->sg[i]));
280 usb_free_urb(buf->urb);
281}
282EXPORT_SYMBOL_GPL(mt76u_buf_free);
283
284int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
285 struct mt76u_buf *buf, gfp_t gfp,
286 usb_complete_t complete_fn, void *context)
287{
288 struct usb_interface *intf = to_usb_interface(dev->dev);
289 struct usb_device *udev = interface_to_usbdev(intf);
290 unsigned int pipe;
291
292 if (dir == USB_DIR_IN)
293 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
294 else
295 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
296
297 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
298 complete_fn, context);
299
300 return usb_submit_urb(buf->urb, gfp);
301}
302EXPORT_SYMBOL_GPL(mt76u_submit_buf);
303
304static inline struct mt76u_buf
305*mt76u_get_next_rx_entry(struct mt76_queue *q)
306{
307 struct mt76u_buf *buf = NULL;
308 unsigned long flags;
309
310 spin_lock_irqsave(&q->lock, flags);
311 if (q->queued > 0) {
312 buf = &q->entry[q->head].ubuf;
313 q->head = (q->head + 1) % q->ndesc;
314 q->queued--;
315 }
316 spin_unlock_irqrestore(&q->lock, flags);
317
318 return buf;
319}
320
321static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
322{
323 u16 dma_len, min_len;
324
325 dma_len = get_unaligned_le16(data);
326 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
327 MT_FCE_INFO_LEN;
328
329 if (data_len < min_len || WARN_ON(!dma_len) ||
330 WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
331 WARN_ON(dma_len & 0x3))
332 return -EINVAL;
333 return dma_len;
334}
335
336static int
337mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
338{
339 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
340 u8 *data = sg_virt(&urb->sg[0]);
341 int data_len, len, nsgs = 1;
342 struct sk_buff *skb;
343
344 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
345 return 0;
346
347 len = mt76u_get_rx_entry_len(data, urb->actual_length);
348 if (len < 0)
349 return 0;
350
351 skb = build_skb(data, q->buf_size);
352 if (!skb)
353 return 0;
354
355 data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
356 skb_reserve(skb, MT_DMA_HDR_LEN);
357 if (skb->tail + data_len > skb->end) {
358 dev_kfree_skb(skb);
359 return 1;
360 }
361
362 __skb_put(skb, data_len);
363 len -= data_len;
364
365 while (len > 0) {
366 data_len = min_t(int, len, urb->sg[nsgs].length);
367 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
368 sg_page(&urb->sg[nsgs]),
369 urb->sg[nsgs].offset,
370 data_len, q->buf_size);
371 len -= data_len;
372 nsgs++;
373 }
374 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
375
376 return nsgs;
377}
378
379static void mt76u_complete_rx(struct urb *urb)
380{
381 struct mt76_dev *dev = urb->context;
382 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
383 unsigned long flags;
384
385 switch (urb->status) {
386 case -ECONNRESET:
387 case -ESHUTDOWN:
388 case -ENOENT:
389 return;
390 default:
391 dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
392 /* fall through */
393 case 0:
394 break;
395 }
396
397 spin_lock_irqsave(&q->lock, flags);
398 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
399 goto out;
400
401 q->tail = (q->tail + 1) % q->ndesc;
402 q->queued++;
403 tasklet_schedule(&dev->usb.rx_tasklet);
404out:
405 spin_unlock_irqrestore(&q->lock, flags);
406}
407
408static void mt76u_rx_tasklet(unsigned long data)
409{
410 struct mt76_dev *dev = (struct mt76_dev *)data;
411 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
412 int err, nsgs, buf_len = q->buf_size;
413 struct mt76u_buf *buf;
414
415 rcu_read_lock();
416
417 while (true) {
418 buf = mt76u_get_next_rx_entry(q);
419 if (!buf)
420 break;
421
422 nsgs = mt76u_process_rx_entry(dev, buf->urb);
423 if (nsgs > 0) {
424 err = mt76u_fill_rx_sg(dev, buf, nsgs,
425 buf_len,
426 SKB_WITH_OVERHEAD(buf_len));
427 if (err < 0)
428 break;
429 }
430 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
431 buf, GFP_ATOMIC,
432 mt76u_complete_rx, dev);
433 }
434 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
435
436 rcu_read_unlock();
437}
438
439int mt76u_submit_rx_buffers(struct mt76_dev *dev)
440{
441 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
442 unsigned long flags;
443 int i, err = 0;
444
445 spin_lock_irqsave(&q->lock, flags);
446 for (i = 0; i < q->ndesc; i++) {
447 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
448 &q->entry[i].ubuf, GFP_ATOMIC,
449 mt76u_complete_rx, dev);
450 if (err < 0)
451 break;
452 }
453 q->head = q->tail = 0;
454 q->queued = 0;
455 spin_unlock_irqrestore(&q->lock, flags);
456
457 return err;
458}
459EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
460
461static int mt76u_alloc_rx(struct mt76_dev *dev)
462{
463 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
464 int i, err, nsgs;
465
466 spin_lock_init(&q->lock);
467 q->entry = devm_kzalloc(dev->dev,
468 MT_NUM_RX_ENTRIES * sizeof(*q->entry),
469 GFP_KERNEL);
470 if (!q->entry)
471 return -ENOMEM;
472
473 if (mt76u_check_sg(dev)) {
474 q->buf_size = MT_RX_BUF_SIZE;
475 nsgs = MT_SG_MAX_SIZE;
476 } else {
477 q->buf_size = PAGE_SIZE;
478 nsgs = 1;
479 }
480
481 for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
482 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
483 nsgs, q->buf_size,
484 SKB_WITH_OVERHEAD(q->buf_size),
485 GFP_KERNEL);
486 if (err < 0)
487 return err;
488 }
489 q->ndesc = MT_NUM_RX_ENTRIES;
490
491 return mt76u_submit_rx_buffers(dev);
492}
493
494static void mt76u_free_rx(struct mt76_dev *dev)
495{
496 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
497 int i;
498
499 for (i = 0; i < q->ndesc; i++)
500 mt76u_buf_free(&q->entry[i].ubuf);
501}
502
503static void mt76u_stop_rx(struct mt76_dev *dev)
504{
505 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
506 int i;
507
508 for (i = 0; i < q->ndesc; i++)
509 usb_kill_urb(q->entry[i].ubuf.urb);
510}
511
512int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
513{
514 struct sk_buff *iter, *last = skb;
515 u32 info, pad;
516
517 /* Buffer layout:
518 * | 4B | xfer len | pad | 4B |
519 * | TXINFO | pkt/cmd | zero pad to 4B | zero |
520 *
521 * length field of TXINFO should be set to 'xfer len'.
522 */
523 info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
524 FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
525 put_unaligned_le32(info, skb_push(skb, sizeof(info)));
526
527 pad = round_up(skb->len, 4) + 4 - skb->len;
528 skb_walk_frags(skb, iter) {
529 last = iter;
530 if (!iter->next) {
531 skb->data_len += pad;
532 skb->len += pad;
533 break;
534 }
535 }
536
537 if (unlikely(pad)) {
538 if (__skb_pad(last, pad, true))
539 return -ENOMEM;
540 __skb_put(last, pad);
541 }
542 return 0;
543}
544EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
545
546static void mt76u_tx_tasklet(unsigned long data)
547{
548 struct mt76_dev *dev = (struct mt76_dev *)data;
549 struct mt76u_buf *buf;
550 struct mt76_queue *q;
551 bool wake;
552 int i;
553
554 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
555 q = &dev->q_tx[i];
556
557 spin_lock_bh(&q->lock);
558 while (true) {
559 buf = &q->entry[q->head].ubuf;
560 if (!buf->done || !q->queued)
561 break;
562
563 dev->drv->tx_complete_skb(dev, q,
564 &q->entry[q->head],
565 false);
566
567 if (q->entry[q->head].schedule) {
568 q->entry[q->head].schedule = false;
569 q->swq_queued--;
570 }
571
572 q->head = (q->head + 1) % q->ndesc;
573 q->queued--;
574 }
575 mt76_txq_schedule(dev, q);
576 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
577 if (!q->queued)
578 wake_up(&dev->tx_wait);
579
580 spin_unlock_bh(&q->lock);
581
582 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
583 ieee80211_queue_delayed_work(dev->hw,
584 &dev->usb.stat_work,
585 msecs_to_jiffies(10));
586
587 if (wake)
588 ieee80211_wake_queue(dev->hw, i);
589 }
590}
591
592static void mt76u_tx_status_data(struct work_struct *work)
593{
594 struct mt76_usb *usb;
595 struct mt76_dev *dev;
596 u8 update = 1;
597 u16 count = 0;
598
599 usb = container_of(work, struct mt76_usb, stat_work.work);
600 dev = container_of(usb, struct mt76_dev, usb);
601
602 while (true) {
603 if (test_bit(MT76_REMOVED, &dev->state))
604 break;
605
606 if (!dev->drv->tx_status_data(dev, &update))
607 break;
608 count++;
609 }
610
611 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
612 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
613 msecs_to_jiffies(10));
614 else
615 clear_bit(MT76_READING_STATS, &dev->state);
616}
617
618static void mt76u_complete_tx(struct urb *urb)
619{
620 struct mt76u_buf *buf = urb->context;
621 struct mt76_dev *dev = buf->dev;
622
623 if (mt76u_urb_error(urb))
624 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
625 buf->done = true;
626
627 tasklet_schedule(&dev->usb.tx_tasklet);
628}
629
630static int
631mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
632{
633 int nsgs = 1 + skb_shinfo(skb)->nr_frags;
634 struct sk_buff *iter;
635
636 skb_walk_frags(skb, iter)
637 nsgs += 1 + skb_shinfo(iter)->nr_frags;
638
639 memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
640
641 nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
642 sg_init_marker(urb->sg, nsgs);
643 urb->num_sgs = nsgs;
644
645 return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
646}
647
648static int
649mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
650 struct sk_buff *skb, struct mt76_wcid *wcid,
651 struct ieee80211_sta *sta)
652{
653 struct usb_interface *intf = to_usb_interface(dev->dev);
654 struct usb_device *udev = interface_to_usbdev(intf);
655 u8 ep = q2ep(q->hw_idx);
656 struct mt76u_buf *buf;
657 u16 idx = q->tail;
658 unsigned int pipe;
659 int err;
660
661 if (q->queued == q->ndesc)
662 return -ENOSPC;
663
664 err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
665 if (err < 0)
666 return err;
667
668 buf = &q->entry[idx].ubuf;
669 buf->done = false;
670
671 err = mt76u_tx_build_sg(skb, buf->urb);
672 if (err < 0)
673 return err;
674
675 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
676 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
677 mt76u_complete_tx, buf);
678
679 q->tail = (q->tail + 1) % q->ndesc;
680 q->entry[idx].skb = skb;
681 q->queued++;
682
683 return idx;
684}
685
686static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
687{
688 struct mt76u_buf *buf;
689 int err;
690
691 while (q->first != q->tail) {
692 buf = &q->entry[q->first].ubuf;
693 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
694 if (err < 0) {
695 if (err == -ENODEV)
696 set_bit(MT76_REMOVED, &dev->state);
697 else
698 dev_err(dev->dev, "tx urb submit failed:%d\n",
699 err);
700 break;
701 }
702 q->first = (q->first + 1) % q->ndesc;
703 }
704}
705
706static int mt76u_alloc_tx(struct mt76_dev *dev)
707{
708 struct mt76u_buf *buf;
709 struct mt76_queue *q;
710 size_t size;
711 int i, j;
712
713 size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
714 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
715 q = &dev->q_tx[i];
716 spin_lock_init(&q->lock);
717 INIT_LIST_HEAD(&q->swq);
718 q->hw_idx = q2hwq(i);
719
720 q->entry = devm_kzalloc(dev->dev,
721 MT_NUM_TX_ENTRIES * sizeof(*q->entry),
722 GFP_KERNEL);
723 if (!q->entry)
724 return -ENOMEM;
725
726 q->ndesc = MT_NUM_TX_ENTRIES;
727 for (j = 0; j < q->ndesc; j++) {
728 buf = &q->entry[j].ubuf;
729 buf->dev = dev;
730
731 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
732 if (!buf->urb)
733 return -ENOMEM;
734
735 buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
736 if (!buf->urb->sg)
737 return -ENOMEM;
738 }
739 }
740 return 0;
741}
742
743static void mt76u_free_tx(struct mt76_dev *dev)
744{
745 struct mt76_queue *q;
746 int i, j;
747
748 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
749 q = &dev->q_tx[i];
750 for (j = 0; j < q->ndesc; j++)
751 usb_free_urb(q->entry[j].ubuf.urb);
752 }
753}
754
755static void mt76u_stop_tx(struct mt76_dev *dev)
756{
757 struct mt76_queue *q;
758 int i, j;
759
760 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
761 q = &dev->q_tx[i];
762 for (j = 0; j < q->ndesc; j++)
763 usb_kill_urb(q->entry[j].ubuf.urb);
764 }
765}
766
767void mt76u_stop_queues(struct mt76_dev *dev)
768{
769 tasklet_disable(&dev->usb.rx_tasklet);
770 tasklet_disable(&dev->usb.tx_tasklet);
771
772 mt76u_stop_rx(dev);
773 mt76u_stop_tx(dev);
774}
775EXPORT_SYMBOL_GPL(mt76u_stop_queues);
776
777void mt76u_stop_stat_wk(struct mt76_dev *dev)
778{
779 cancel_delayed_work_sync(&dev->usb.stat_work);
780 clear_bit(MT76_READING_STATS, &dev->state);
781}
782EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
783
784void mt76u_queues_deinit(struct mt76_dev *dev)
785{
786 mt76u_stop_queues(dev);
787
788 mt76u_free_rx(dev);
789 mt76u_free_tx(dev);
790}
791EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
792
793int mt76u_alloc_queues(struct mt76_dev *dev)
794{
795 int err;
796
797 err = mt76u_alloc_rx(dev);
798 if (err < 0)
799 goto err;
800
801 err = mt76u_alloc_tx(dev);
802 if (err < 0)
803 goto err;
804
805 return 0;
806err:
807 mt76u_queues_deinit(dev);
808 return err;
809}
810EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
811
812static const struct mt76_queue_ops usb_queue_ops = {
813 .tx_queue_skb = mt76u_tx_queue_skb,
814 .kick = mt76u_tx_kick,
815};
816
817int mt76u_init(struct mt76_dev *dev,
818 struct usb_interface *intf)
819{
820 static const struct mt76_bus_ops mt76u_ops = {
821 .rr = mt76u_rr,
822 .wr = mt76u_wr,
823 .rmw = mt76u_rmw,
824 .copy = mt76u_copy,
825 };
826 struct mt76_usb *usb = &dev->usb;
827
828 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
829 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
830 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
831 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
832
833 init_completion(&usb->mcu.cmpl);
834 mutex_init(&usb->mcu.mutex);
835
836 mutex_init(&usb->usb_ctrl_mtx);
837 dev->bus = &mt76u_ops;
838 dev->queue_ops = &usb_queue_ops;
839
840 return mt76u_set_endpoints(intf, usb);
841}
842EXPORT_SYMBOL_GPL(mt76u_init);
843
844MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
845MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
new file mode 100644
index 000000000000..070be803d463
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_mcu.c
@@ -0,0 +1,242 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/firmware.h>
18
19#include "mt76.h"
20#include "dma.h"
21
22#define MT_CMD_HDR_LEN 4
23
24#define MT_FCE_DMA_ADDR 0x0230
25#define MT_FCE_DMA_LEN 0x0234
26
27#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
28
29struct sk_buff *mt76u_mcu_msg_alloc(const void *data, int len)
30{
31 struct sk_buff *skb;
32
33 skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
34 if (!skb)
35 return NULL;
36
37 skb_reserve(skb, MT_CMD_HDR_LEN);
38 skb_put_data(skb, data, len);
39
40 return skb;
41}
42EXPORT_SYMBOL_GPL(mt76u_mcu_msg_alloc);
43
44void mt76u_mcu_complete_urb(struct urb *urb)
45{
46 struct completion *cmpl = urb->context;
47
48 complete(cmpl);
49}
50EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb);
51
52static int mt76u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
53{
54 struct mt76_usb *usb = &dev->usb;
55 struct mt76u_buf *buf = &usb->mcu.res;
56 int i, ret;
57 u32 rxfce;
58
59 for (i = 0; i < 5; i++) {
60 if (!wait_for_completion_timeout(&usb->mcu.cmpl,
61 msecs_to_jiffies(300)))
62 continue;
63
64 if (buf->urb->status)
65 return -EIO;
66
67 rxfce = get_unaligned_le32(sg_virt(&buf->urb->sg[0]));
68 ret = mt76u_submit_buf(dev, USB_DIR_IN,
69 MT_EP_IN_CMD_RESP,
70 buf, GFP_KERNEL,
71 mt76u_mcu_complete_urb,
72 &usb->mcu.cmpl);
73 if (ret)
74 return ret;
75
76 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce))
77 return 0;
78
79 dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
80 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
81 seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
82 }
83
84 dev_err(dev->dev, "error: %s timed out\n", __func__);
85 return -ETIMEDOUT;
86}
87
88int mt76u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
89 int cmd, bool wait_resp)
90{
91 struct usb_interface *intf = to_usb_interface(dev->dev);
92 struct usb_device *udev = interface_to_usbdev(intf);
93 struct mt76_usb *usb = &dev->usb;
94 unsigned int pipe;
95 int ret, sent;
96 u8 seq = 0;
97 u32 info;
98
99 if (test_bit(MT76_REMOVED, &dev->state))
100 return 0;
101
102 mutex_lock(&usb->mcu.mutex);
103
104 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
105 if (wait_resp) {
106 seq = ++usb->mcu.msg_seq & 0xf;
107 if (!seq)
108 seq = ++usb->mcu.msg_seq & 0xf;
109 }
110
111 info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
112 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
113 MT_MCU_MSG_TYPE_CMD;
114 ret = mt76u_skb_dma_info(skb, CPU_TX_PORT, info);
115 if (ret)
116 goto out;
117
118 ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
119 if (ret)
120 goto out;
121
122 if (wait_resp)
123 ret = mt76u_mcu_wait_resp(dev, seq);
124
125out:
126 mutex_unlock(&usb->mcu.mutex);
127
128 consume_skb(skb);
129
130 return ret;
131}
132EXPORT_SYMBOL_GPL(mt76u_mcu_send_msg);
133
134void mt76u_mcu_fw_reset(struct mt76_dev *dev)
135{
136 mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
137 USB_DIR_OUT | USB_TYPE_VENDOR,
138 0x1, 0, NULL, 0);
139}
140EXPORT_SYMBOL_GPL(mt76u_mcu_fw_reset);
141
142static int
143__mt76u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
144 const void *fw_data, int len, u32 dst_addr)
145{
146 u8 *data = sg_virt(&buf->urb->sg[0]);
147 DECLARE_COMPLETION_ONSTACK(cmpl);
148 __le32 info;
149 u32 val;
150 int err;
151
152 info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
153 FIELD_PREP(MT_MCU_MSG_LEN, len) |
154 MT_MCU_MSG_TYPE_CMD);
155
156 memcpy(data, &info, sizeof(info));
157 memcpy(data + sizeof(info), fw_data, len);
158 memset(data + sizeof(info) + len, 0, 4);
159
160 mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
161 MT_FCE_DMA_ADDR, dst_addr);
162 len = roundup(len, 4);
163 mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
164 MT_FCE_DMA_LEN, len << 16);
165
166 buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
167 err = mt76u_submit_buf(dev, USB_DIR_OUT,
168 MT_EP_OUT_INBAND_CMD,
169 buf, GFP_KERNEL,
170 mt76u_mcu_complete_urb, &cmpl);
171 if (err < 0)
172 return err;
173
174 if (!wait_for_completion_timeout(&cmpl,
175 msecs_to_jiffies(1000))) {
176 dev_err(dev->dev, "firmware upload timed out\n");
177 usb_kill_urb(buf->urb);
178 return -ETIMEDOUT;
179 }
180
181 if (mt76u_urb_error(buf->urb)) {
182 dev_err(dev->dev, "firmware upload failed: %d\n",
183 buf->urb->status);
184 return buf->urb->status;
185 }
186
187 val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
188 val++;
189 mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
190
191 return 0;
192}
193
194int mt76u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
195 int data_len, u32 max_payload, u32 offset)
196{
197 int err, len, pos = 0, max_len = max_payload - 8;
198 struct mt76u_buf buf;
199
200 err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
201 GFP_KERNEL);
202 if (err < 0)
203 return err;
204
205 while (data_len > 0) {
206 len = min_t(int, data_len, max_len);
207 err = __mt76u_mcu_fw_send_data(dev, &buf, data + pos,
208 len, offset + pos);
209 if (err < 0)
210 break;
211
212 data_len -= len;
213 pos += len;
214 usleep_range(5000, 10000);
215 }
216 mt76u_buf_free(&buf);
217
218 return err;
219}
220EXPORT_SYMBOL_GPL(mt76u_mcu_fw_send_data);
221
222int mt76u_mcu_init_rx(struct mt76_dev *dev)
223{
224 struct mt76_usb *usb = &dev->usb;
225 int err;
226
227 err = mt76u_buf_alloc(dev, &usb->mcu.res, 1,
228 MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE,
229 GFP_KERNEL);
230 if (err < 0)
231 return err;
232
233 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
234 &usb->mcu.res, GFP_KERNEL,
235 mt76u_mcu_complete_urb,
236 &usb->mcu.cmpl);
237 if (err < 0)
238 mt76u_buf_free(&usb->mcu.res);
239
240 return err;
241}
242EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.c b/drivers/net/wireless/mediatek/mt76/usb_trace.c
new file mode 100644
index 000000000000..7e1f540f0b7a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.c
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/module.h>
18
19#ifndef __CHECKER__
20#define CREATE_TRACE_POINTS
21#include "usb_trace.h"
22
23#endif
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h
new file mode 100644
index 000000000000..52db7012304a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#if !defined(__MT76_USB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
18#define __MT76_USB_TRACE_H
19
20#include <linux/tracepoint.h>
21#include "mt76.h"
22
23#undef TRACE_SYSTEM
24#define TRACE_SYSTEM mt76_usb
25
26#define MAXNAME 32
27#define DEV_ENTRY __array(char, wiphy_name, 32)
28#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
29#define DEV_PR_FMT "%s"
30#define DEV_PR_ARG __entry->wiphy_name
31
32#define REG_ENTRY __field(u32, reg) __field(u32, val)
33#define REG_ASSIGN __entry->reg = reg; __entry->val = val
34#define REG_PR_FMT " %04x=%08x"
35#define REG_PR_ARG __entry->reg, __entry->val
36
37DECLARE_EVENT_CLASS(dev_reg_evt,
38 TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
39 TP_ARGS(dev, reg, val),
40 TP_STRUCT__entry(
41 DEV_ENTRY
42 REG_ENTRY
43 ),
44 TP_fast_assign(
45 DEV_ASSIGN;
46 REG_ASSIGN;
47 ),
48 TP_printk(
49 DEV_PR_FMT REG_PR_FMT,
50 DEV_PR_ARG, REG_PR_ARG
51 )
52);
53
54DEFINE_EVENT(dev_reg_evt, usb_reg_rr,
55 TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
56 TP_ARGS(dev, reg, val)
57);
58
59DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
60 TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
61 TP_ARGS(dev, reg, val)
62);
63
64#endif
65
66#undef TRACE_INCLUDE_PATH
67#define TRACE_INCLUDE_PATH .
68#undef TRACE_INCLUDE_FILE
69#define TRACE_INCLUDE_FILE usb_trace
70
71#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index d3b611aaf061..faea99b7a445 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -603,6 +603,7 @@ int mt7601u_register_device(struct mt7601u_dev *dev)
603 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); 603 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
604 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 604 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
605 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); 605 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
606 ieee80211_hw_set(hw, MFP_CAPABLE);
606 hw->max_rates = 1; 607 hw->max_rates = 1;
607 hw->max_report_rates = 7; 608 hw->max_report_rates = 7;
608 hw->max_rate_tries = 1; 609 hw->max_rate_tries = 1;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 7b21016012c3..0f1789020960 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -308,6 +308,17 @@ mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
308 int idx = key->keyidx; 308 int idx = key->keyidx;
309 int ret; 309 int ret;
310 310
311 /* fall back to sw encryption for unsupported ciphers */
312 switch (key->cipher) {
313 case WLAN_CIPHER_SUITE_WEP40:
314 case WLAN_CIPHER_SUITE_WEP104:
315 case WLAN_CIPHER_SUITE_TKIP:
316 case WLAN_CIPHER_SUITE_CCMP:
317 break;
318 default:
319 return -EOPNOTSUPP;
320 }
321
311 if (cmd == SET_KEY) { 322 if (cmd == SET_KEY) {
312 key->hw_key_idx = wcid->idx; 323 key->hw_key_idx = wcid->idx;
313 wcid->hw_key_idx = idx; 324 wcid->hw_key_idx = idx;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 656ddc659218..4aa332f4646b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -843,6 +843,88 @@ static int qtnf_set_mac_acl(struct wiphy *wiphy,
843 return ret; 843 return ret;
844} 844}
845 845
846static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
847 bool enabled, int timeout)
848{
849 struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
850 int ret;
851
852 ret = qtnf_cmd_send_pm_set(vif, enabled ? QLINK_PM_AUTO_STANDBY :
853 QLINK_PM_OFF, timeout);
854 if (ret) {
855 pr_err("%s: failed to set PM mode ret=%d\n", dev->name, ret);
856 return ret;
857 }
858
859 return ret;
860}
861
862#ifdef CONFIG_PM
863static int qtnf_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan)
864{
865 struct qtnf_wmac *mac = wiphy_priv(wiphy);
866 struct qtnf_vif *vif;
867 int ret = 0;
868
869 vif = qtnf_mac_get_base_vif(mac);
870 if (!vif) {
871 pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
872 ret = -EFAULT;
873 goto exit;
874 }
875
876 if (!wowlan) {
877 pr_debug("WoWLAN triggers are not enabled\n");
878 qtnf_virtual_intf_cleanup(vif->netdev);
879 goto exit;
880 }
881
882 qtnf_scan_done(vif->mac, true);
883
884 ret = qtnf_cmd_send_wowlan_set(vif, wowlan);
885 if (ret) {
886 pr_err("MAC%u: failed to set WoWLAN triggers\n",
887 mac->macid);
888 goto exit;
889 }
890
891exit:
892 return ret;
893}
894
895static int qtnf_resume(struct wiphy *wiphy)
896{
897 struct qtnf_wmac *mac = wiphy_priv(wiphy);
898 struct qtnf_vif *vif;
899 int ret = 0;
900
901 vif = qtnf_mac_get_base_vif(mac);
902 if (!vif) {
903 pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
904 ret = -EFAULT;
905 goto exit;
906 }
907
908 ret = qtnf_cmd_send_wowlan_set(vif, NULL);
909 if (ret) {
910 pr_err("MAC%u: failed to reset WoWLAN triggers\n",
911 mac->macid);
912 goto exit;
913 }
914
915exit:
916 return ret;
917}
918
919static void qtnf_set_wakeup(struct wiphy *wiphy, bool enabled)
920{
921 struct qtnf_wmac *mac = wiphy_priv(wiphy);
922 struct qtnf_bus *bus = mac->bus;
923
924 device_set_wakeup_enable(bus->dev, enabled);
925}
926#endif
927
846static struct cfg80211_ops qtn_cfg80211_ops = { 928static struct cfg80211_ops qtn_cfg80211_ops = {
847 .add_virtual_intf = qtnf_add_virtual_intf, 929 .add_virtual_intf = qtnf_add_virtual_intf,
848 .change_virtual_intf = qtnf_change_virtual_intf, 930 .change_virtual_intf = qtnf_change_virtual_intf,
@@ -869,6 +951,12 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
869 .channel_switch = qtnf_channel_switch, 951 .channel_switch = qtnf_channel_switch,
870 .start_radar_detection = qtnf_start_radar_detection, 952 .start_radar_detection = qtnf_start_radar_detection,
871 .set_mac_acl = qtnf_set_mac_acl, 953 .set_mac_acl = qtnf_set_mac_acl,
954 .set_power_mgmt = qtnf_set_power_mgmt,
955#ifdef CONFIG_PM
956 .suspend = qtnf_suspend,
957 .resume = qtnf_resume,
958 .set_wakeup = qtnf_set_wakeup,
959#endif
872}; 960};
873 961
874static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, 962static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
@@ -921,6 +1009,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
921 if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) 1009 if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
922 qtn_cfg80211_ops.start_radar_detection = NULL; 1010 qtn_cfg80211_ops.start_radar_detection = NULL;
923 1011
1012 if (!(bus->hw_info.hw_capab & QLINK_HW_CAPAB_PWR_MGMT))
1013 qtn_cfg80211_ops.set_power_mgmt = NULL;
1014
924 wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac)); 1015 wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac));
925 if (!wiphy) 1016 if (!wiphy)
926 return NULL; 1017 return NULL;
@@ -975,7 +1066,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
975 wiphy->retry_long = macinfo->lretry_limit; 1066 wiphy->retry_long = macinfo->lretry_limit;
976 wiphy->coverage_class = macinfo->coverage_class; 1067 wiphy->coverage_class = macinfo->coverage_class;
977 1068
978 wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH; 1069 wiphy->max_scan_ssids =
1070 (hw_info->max_scan_ssids) ? hw_info->max_scan_ssids : 1;
979 wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN; 1071 wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN;
980 wiphy->mgmt_stypes = qtnf_mgmt_stypes; 1072 wiphy->mgmt_stypes = qtnf_mgmt_stypes;
981 wiphy->max_remain_on_channel_duration = 5000; 1073 wiphy->max_remain_on_channel_duration = 5000;
@@ -994,6 +1086,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
994 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | 1086 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
995 WIPHY_FLAG_AP_UAPSD | 1087 WIPHY_FLAG_AP_UAPSD |
996 WIPHY_FLAG_HAS_CHANNEL_SWITCH; 1088 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
1089 wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
997 1090
998 if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) 1091 if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
999 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD); 1092 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD);
@@ -1016,6 +1109,11 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
1016 if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR) 1109 if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
1017 wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 1110 wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
1018 1111
1112#ifdef CONFIG_PM
1113 if (macinfo->wowlan)
1114 wiphy->wowlan = macinfo->wowlan;
1115#endif
1116
1019 if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) { 1117 if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
1020 wiphy->regulatory_flags |= REGULATORY_STRICT_REG | 1118 wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
1021 REGULATORY_CUSTOM_REG; 1119 REGULATORY_CUSTOM_REG;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 42a598f92539..ae9e77300533 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -1092,6 +1092,9 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
1092 case QTN_TLV_ID_UBOOT_VER: 1092 case QTN_TLV_ID_UBOOT_VER:
1093 uboot_ver = (const void *)tlv->val; 1093 uboot_ver = (const void *)tlv->val;
1094 break; 1094 break;
1095 case QTN_TLV_ID_MAX_SCAN_SSIDS:
1096 hwinfo->max_scan_ssids = *tlv->val;
1097 break;
1095 default: 1098 default:
1096 break; 1099 break;
1097 } 1100 }
@@ -1135,6 +1138,37 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
1135 return 0; 1138 return 0;
1136} 1139}
1137 1140
1141static void
1142qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
1143 const struct qlink_wowlan_capab_data *wowlan)
1144{
1145 struct qtnf_mac_info *mac_info = &mac->macinfo;
1146 const struct qlink_wowlan_support *data1;
1147 struct wiphy_wowlan_support *supp;
1148
1149 supp = kzalloc(sizeof(*supp), GFP_KERNEL);
1150 if (!supp)
1151 return;
1152
1153 switch (le16_to_cpu(wowlan->version)) {
1154 case 0x1:
1155 data1 = (struct qlink_wowlan_support *)wowlan->data;
1156
1157 supp->flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT;
1158 supp->n_patterns = le32_to_cpu(data1->n_patterns);
1159 supp->pattern_max_len = le32_to_cpu(data1->pattern_max_len);
1160 supp->pattern_min_len = le32_to_cpu(data1->pattern_min_len);
1161
1162 mac_info->wowlan = supp;
1163 break;
1164 default:
1165 pr_warn("MAC%u: unsupported WoWLAN version 0x%x\n",
1166 mac->macid, le16_to_cpu(wowlan->version));
1167 kfree(supp);
1168 break;
1169 }
1170}
1171
1138static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, 1172static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
1139 const u8 *tlv_buf, size_t tlv_buf_size) 1173 const u8 *tlv_buf, size_t tlv_buf_size)
1140{ 1174{
@@ -1144,6 +1178,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
1144 const struct qlink_iface_comb_num *comb_num; 1178 const struct qlink_iface_comb_num *comb_num;
1145 const struct qlink_iface_limit_record *rec; 1179 const struct qlink_iface_limit_record *rec;
1146 const struct qlink_iface_limit *lim; 1180 const struct qlink_iface_limit *lim;
1181 const struct qlink_wowlan_capab_data *wowlan;
1147 u16 rec_len; 1182 u16 rec_len;
1148 u16 tlv_type; 1183 u16 tlv_type;
1149 u16 tlv_value_len; 1184 u16 tlv_value_len;
@@ -1252,7 +1287,31 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
1252 ext_capa_mask = (u8 *)tlv->val; 1287 ext_capa_mask = (u8 *)tlv->val;
1253 ext_capa_mask_len = tlv_value_len; 1288 ext_capa_mask_len = tlv_value_len;
1254 break; 1289 break;
1290 case QTN_TLV_ID_WOWLAN_CAPAB:
1291 if (tlv_value_len < sizeof(*wowlan))
1292 return -EINVAL;
1293
1294 wowlan = (void *)tlv->val;
1295 if (!le16_to_cpu(wowlan->len)) {
1296 pr_warn("MAC%u: skip empty WoWLAN data\n",
1297 mac->macid);
1298 break;
1299 }
1300
1301 rec_len = sizeof(*wowlan) + le16_to_cpu(wowlan->len);
1302 if (unlikely(tlv_value_len != rec_len)) {
1303 pr_warn("MAC%u: WoWLAN data size mismatch\n",
1304 mac->macid);
1305 return -EINVAL;
1306 }
1307
1308 kfree(mac->macinfo.wowlan);
1309 mac->macinfo.wowlan = NULL;
1310 qtnf_parse_wowlan_info(mac, wowlan);
1311 break;
1255 default: 1312 default:
1313 pr_warn("MAC%u: unknown TLV type %u\n",
1314 mac->macid, tlv_type);
1256 break; 1315 break;
1257 } 1316 }
1258 1317
@@ -2260,11 +2319,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
2260 int count = 0; 2319 int count = 0;
2261 int ret; 2320 int ret;
2262 2321
2263 if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) {
2264 pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid);
2265 return -EINVAL;
2266 }
2267
2268 cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, 2322 cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
2269 QLINK_CMD_SCAN, 2323 QLINK_CMD_SCAN,
2270 sizeof(struct qlink_cmd)); 2324 sizeof(struct qlink_cmd));
@@ -2799,3 +2853,93 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
2799 2853
2800 return ret; 2854 return ret;
2801} 2855}
2856
2857int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout)
2858{
2859 struct qtnf_bus *bus = vif->mac->bus;
2860 struct sk_buff *cmd_skb;
2861 u16 res_code = QLINK_CMD_RESULT_OK;
2862 struct qlink_cmd_pm_set *cmd;
2863 int ret = 0;
2864
2865 cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
2866 QLINK_CMD_PM_SET, sizeof(*cmd));
2867 if (!cmd_skb)
2868 return -ENOMEM;
2869
2870 cmd = (struct qlink_cmd_pm_set *)cmd_skb->data;
2871 cmd->pm_mode = pm_mode;
2872 cmd->pm_standby_timer = cpu_to_le32(timeout);
2873
2874 qtnf_bus_lock(bus);
2875
2876 ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
2877
2878 if (unlikely(ret))
2879 goto out;
2880
2881 if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
2882 pr_err("cmd exec failed: 0x%.4X\n", res_code);
2883 ret = -EFAULT;
2884 }
2885
2886out:
2887 qtnf_bus_unlock(bus);
2888 return ret;
2889}
2890
2891int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
2892 const struct cfg80211_wowlan *wowl)
2893{
2894 struct qtnf_bus *bus = vif->mac->bus;
2895 struct sk_buff *cmd_skb;
2896 u16 res_code = QLINK_CMD_RESULT_OK;
2897 struct qlink_cmd_wowlan_set *cmd;
2898 u32 triggers = 0;
2899 int count = 0;
2900 int ret = 0;
2901
2902 cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
2903 QLINK_CMD_WOWLAN_SET, sizeof(*cmd));
2904 if (!cmd_skb)
2905 return -ENOMEM;
2906
2907 qtnf_bus_lock(bus);
2908
2909 cmd = (struct qlink_cmd_wowlan_set *)cmd_skb->data;
2910
2911 if (wowl) {
2912 if (wowl->disconnect)
2913 triggers |= QLINK_WOWLAN_TRIG_DISCONNECT;
2914
2915 if (wowl->magic_pkt)
2916 triggers |= QLINK_WOWLAN_TRIG_MAGIC_PKT;
2917
2918 if (wowl->n_patterns && wowl->patterns) {
2919 triggers |= QLINK_WOWLAN_TRIG_PATTERN_PKT;
2920 while (count < wowl->n_patterns) {
2921 qtnf_cmd_skb_put_tlv_arr(cmd_skb,
2922 QTN_TLV_ID_WOWLAN_PATTERN,
2923 wowl->patterns[count].pattern,
2924 wowl->patterns[count].pattern_len);
2925 count++;
2926 }
2927 }
2928 }
2929
2930 cmd->triggers = cpu_to_le32(triggers);
2931
2932 ret = qtnf_cmd_send(bus, cmd_skb, &res_code);
2933
2934 if (unlikely(ret))
2935 goto out;
2936
2937 if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
2938 pr_err("cmd exec failed: 0x%.4X\n", res_code);
2939 ret = -EFAULT;
2940 }
2941
2942out:
2943 qtnf_bus_unlock(bus);
2944 return ret;
2945}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index cf9274add26d..1ac41156c192 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -76,5 +76,8 @@ int qtnf_cmd_start_cac(const struct qtnf_vif *vif,
76 u32 cac_time_ms); 76 u32 cac_time_ms);
77int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif, 77int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
78 const struct cfg80211_acl_data *params); 78 const struct cfg80211_acl_data *params);
79int qtnf_cmd_send_pm_set(const struct qtnf_vif *vif, u8 pm_mode, int timeout);
80int qtnf_cmd_send_wowlan_set(const struct qtnf_vif *vif,
81 const struct cfg80211_wowlan *wowl);
79 82
80#endif /* QLINK_COMMANDS_H_ */ 83#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index c318340e1bd5..19abbc4e23e0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -495,6 +495,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
495 qtnf_mac_iface_comb_free(mac); 495 qtnf_mac_iface_comb_free(mac);
496 kfree(mac->macinfo.extended_capabilities); 496 kfree(mac->macinfo.extended_capabilities);
497 kfree(mac->macinfo.extended_capabilities_mask); 497 kfree(mac->macinfo.extended_capabilities_mask);
498 kfree(mac->macinfo.wowlan);
498 wiphy_free(wiphy); 499 wiphy_free(wiphy);
499 bus->mac[macid] = NULL; 500 bus->mac[macid] = NULL;
500} 501}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 214435448335..a1e338a1f055 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -40,7 +40,6 @@
40#undef pr_fmt 40#undef pr_fmt
41#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ 41#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
42 42
43#define QTNF_MAX_SSID_LIST_LENGTH 2
44#define QTNF_MAX_VSIE_LEN 255 43#define QTNF_MAX_VSIE_LEN 255
45#define QTNF_MAX_INTF 8 44#define QTNF_MAX_INTF 8
46#define QTNF_MAX_EVENT_QUEUE_LEN 255 45#define QTNF_MAX_EVENT_QUEUE_LEN 255
@@ -111,6 +110,7 @@ struct qtnf_mac_info {
111 u8 *extended_capabilities; 110 u8 *extended_capabilities;
112 u8 *extended_capabilities_mask; 111 u8 *extended_capabilities_mask;
113 u8 extended_capabilities_len; 112 u8 extended_capabilities_len;
113 struct wiphy_wowlan_support *wowlan;
114}; 114};
115 115
116struct qtnf_chan_stats { 116struct qtnf_chan_stats {
@@ -145,6 +145,7 @@ struct qtnf_hw_info {
145 u8 total_rx_chain; 145 u8 total_rx_chain;
146 char fw_version[ETHTOOL_FWVERS_LEN]; 146 char fw_version[ETHTOOL_FWVERS_LEN];
147 u32 hw_version; 147 u32 hw_version;
148 u8 max_scan_ssids;
148}; 149};
149 150
150struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); 151struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 4a32967d0479..99d37e3efba6 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -77,6 +77,7 @@ enum qlink_hw_capab {
77 QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1), 77 QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
78 QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), 78 QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2),
79 QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3), 79 QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR = BIT(3),
80 QLINK_HW_CAPAB_PWR_MGMT = BIT(4),
80}; 81};
81 82
82enum qlink_iface_type { 83enum qlink_iface_type {
@@ -256,6 +257,8 @@ enum qlink_cmd_type {
256 QLINK_CMD_CHAN_STATS = 0x0054, 257 QLINK_CMD_CHAN_STATS = 0x0054,
257 QLINK_CMD_CONNECT = 0x0060, 258 QLINK_CMD_CONNECT = 0x0060,
258 QLINK_CMD_DISCONNECT = 0x0061, 259 QLINK_CMD_DISCONNECT = 0x0061,
260 QLINK_CMD_PM_SET = 0x0062,
261 QLINK_CMD_WOWLAN_SET = 0x0063,
259}; 262};
260 263
261/** 264/**
@@ -668,6 +671,54 @@ struct qlink_acl_data {
668 struct qlink_mac_address mac_addrs[0]; 671 struct qlink_mac_address mac_addrs[0];
669} __packed; 672} __packed;
670 673
674/**
675 * enum qlink_pm_mode - Power Management mode
676 *
677 * @QLINK_PM_OFF: normal mode, no power saving enabled
678 * @QLINK_PM_AUTO_STANDBY: enable auto power save mode
679 */
680enum qlink_pm_mode {
681 QLINK_PM_OFF = 0,
682 QLINK_PM_AUTO_STANDBY = 1,
683};
684
685/**
686 * struct qlink_cmd_pm_set - data for QLINK_CMD_PM_SET command
687 *
688 * @pm_standby timer: period of network inactivity in seconds before
689 * putting a radio in power save mode
690 * @pm_mode: power management mode
691 */
692struct qlink_cmd_pm_set {
693 struct qlink_cmd chdr;
694 __le32 pm_standby_timer;
695 u8 pm_mode;
696} __packed;
697
698/**
699 * enum qlink_wowlan_trigger
700 *
701 * @QLINK_WOWLAN_TRIG_DISCONNECT: wakeup on disconnect
702 * @QLINK_WOWLAN_TRIG_MAGIC_PKT: wakeup on magic packet
703 * @QLINK_WOWLAN_TRIG_PATTERN_PKT: wakeup on user-defined packet
704 */
705enum qlink_wowlan_trigger {
706 QLINK_WOWLAN_TRIG_DISCONNECT = BIT(0),
707 QLINK_WOWLAN_TRIG_MAGIC_PKT = BIT(1),
708 QLINK_WOWLAN_TRIG_PATTERN_PKT = BIT(2),
709};
710
711/**
712 * struct qlink_cmd_wowlan_set - data for QLINK_CMD_WOWLAN_SET command
713 *
714 * @triggers: requested bitmask of WoWLAN triggers
715 */
716struct qlink_cmd_wowlan_set {
717 struct qlink_cmd chdr;
718 __le32 triggers;
719 u8 data[0];
720} __packed;
721
671/* QLINK Command Responses messages related definitions 722/* QLINK Command Responses messages related definitions
672 */ 723 */
673 724
@@ -1065,6 +1116,8 @@ struct qlink_event_radar {
1065 * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by 1116 * @QTN_TLV_ID_STA_STATS: per-STA statistics as defined by
1066 * &struct qlink_sta_stats. Valid values are marked as such in a bitmap 1117 * &struct qlink_sta_stats. Valid values are marked as such in a bitmap
1067 * carried by QTN_TLV_ID_STA_STATS_MAP. 1118 * carried by QTN_TLV_ID_STA_STATS_MAP.
1119 * @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
1120 * for in any given scan.
1068 */ 1121 */
1069enum qlink_tlv_id { 1122enum qlink_tlv_id {
1070 QTN_TLV_ID_FRAG_THRESH = 0x0201, 1123 QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1093,6 +1146,9 @@ enum qlink_tlv_id {
1093 QTN_TLV_ID_CALIBRATION_VER = 0x0406, 1146 QTN_TLV_ID_CALIBRATION_VER = 0x0406,
1094 QTN_TLV_ID_UBOOT_VER = 0x0407, 1147 QTN_TLV_ID_UBOOT_VER = 0x0407,
1095 QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408, 1148 QTN_TLV_ID_RANDOM_MAC_ADDR = 0x0408,
1149 QTN_TLV_ID_MAX_SCAN_SSIDS = 0x0409,
1150 QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
1151 QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
1096}; 1152};
1097 1153
1098struct qlink_tlv_hdr { 1154struct qlink_tlv_hdr {
@@ -1380,4 +1436,33 @@ struct qlink_random_mac_addr {
1380 u8 mac_addr_mask[ETH_ALEN]; 1436 u8 mac_addr_mask[ETH_ALEN];
1381} __packed; 1437} __packed;
1382 1438
1439/**
1440 * struct qlink_wowlan_capab_data - data for QTN_TLV_ID_WOWLAN_CAPAB TLV
1441 *
1442 * WoWLAN capabilities supported by cards.
1443 *
1444 * @version: version of WoWLAN data structure, to ensure backward
1445 * compatibility for firmwares with limited WoWLAN support
1446 * @len: Total length of WoWLAN data
1447 * @data: supported WoWLAN features
1448 */
1449struct qlink_wowlan_capab_data {
1450 __le16 version;
1451 __le16 len;
1452 u8 data[0];
1453} __packed;
1454
1455/**
1456 * struct qlink_wowlan_support - supported WoWLAN capabilities
1457 *
1458 * @n_patterns: number of supported wakeup patterns
1459 * @pattern_max_len: maximum length of each pattern
1460 * @pattern_min_len: minimum length of each pattern
1461 */
1462struct qlink_wowlan_support {
1463 __le32 n_patterns;
1464 __le32 pattern_max_len;
1465 __le32 pattern_min_len;
1466} __packed;
1467
1383#endif /* _QTN_QLINK_H_ */ 1468#endif /* _QTN_QLINK_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index c380c1f56ba6..fa2fd64084ac 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -527,24 +527,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
527EXPORT_SYMBOL_GPL(rt2x00mac_set_key); 527EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
528#endif /* CONFIG_RT2X00_LIB_CRYPTO */ 528#endif /* CONFIG_RT2X00_LIB_CRYPTO */
529 529
530int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
531 struct ieee80211_sta *sta)
532{
533 struct rt2x00_dev *rt2x00dev = hw->priv;
534
535 return rt2x00dev->ops->lib->sta_add(rt2x00dev, vif, sta);
536}
537EXPORT_SYMBOL_GPL(rt2x00mac_sta_add);
538
539int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
540 struct ieee80211_sta *sta)
541{
542 struct rt2x00_dev *rt2x00dev = hw->priv;
543
544 return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta);
545}
546EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
547
548void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw, 530void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw,
549 struct ieee80211_vif *vif, 531 struct ieee80211_vif *vif,
550 const u8 *mac_addr) 532 const u8 *mac_addr)
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index a7e0a17aa7e8..08c607c031bc 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -470,7 +470,6 @@ static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
470static int ray_init(struct net_device *dev) 470static int ray_init(struct net_device *dev)
471{ 471{
472 int i; 472 int i;
473 UCHAR *p;
474 struct ccs __iomem *pccs; 473 struct ccs __iomem *pccs;
475 ray_dev_t *local = netdev_priv(dev); 474 ray_dev_t *local = netdev_priv(dev);
476 struct pcmcia_device *link = local->finder; 475 struct pcmcia_device *link = local->finder;
@@ -513,12 +512,9 @@ static int ray_init(struct net_device *dev)
513 init_startup_params(local); 512 init_startup_params(local);
514 513
515 /* copy mac address to startup parameters */ 514 /* copy mac address to startup parameters */
516 if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) { 515 if (!parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
517 p = local->sparm.b4.a_mac_addr;
518 } else {
519 memcpy(&local->sparm.b4.a_mac_addr, 516 memcpy(&local->sparm.b4.a_mac_addr,
520 &local->startup_res.station_addr, ADDRLEN); 517 &local->startup_res.station_addr, ADDRLEN);
521 p = local->sparm.b4.a_mac_addr;
522 } 518 }
523 519
524 clear_interrupt(local); /* Clear any interrupt from the card */ 520 clear_interrupt(local); /* Clear any interrupt from the card */
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
index fde89866fa8d..51e32df6120b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
@@ -363,7 +363,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
363 rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11); 363 rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
364 rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11); 364 rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
365 rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11); 365 rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
366 rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221); 366 rtl8187se_rf_writereg(dev, 0x02, 0x088D); msleep(221);
367 rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11); 367 rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
368 rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1); 368 rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
369 rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1); 369 rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
@@ -386,7 +386,7 @@ void rtl8225se_rf_init(struct ieee80211_hw *dev)
386 rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1); 386 rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
387 rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1); 387 rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
388 rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1); 388 rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
389 rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31); 389 rtl8187se_rf_writereg(dev, 0x04, 0x0975); msleep(31);
390 rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1); 390 rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
391 rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1); 391 rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
392 392
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 0761e61591bd..27e6baf12e90 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -26,6 +26,9 @@ static struct ta_metadata metadata_flash_content[] = {
26 {"flash_content", 0x00010000}, 26 {"flash_content", 0x00010000},
27 {"rsi/rs9113_wlan_qspi.rps", 0x00010000}, 27 {"rsi/rs9113_wlan_qspi.rps", 0x00010000},
28 {"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000}, 28 {"rsi/rs9113_wlan_bt_dual_mode.rps", 0x00010000},
29 {"flash_content", 0x00010000},
30 {"rsi/rs9113_ap_bt_dual_mode.rps", 0x00010000},
31
29}; 32};
30 33
31int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb) 34int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
@@ -246,7 +249,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
246 } 249 }
247 } 250 }
248 251
249 data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff); 252 data_desc->mac_flags |= cpu_to_le16(seq_num & 0xfff);
250 data_desc->qid_tid = ((skb->priority & 0xf) | 253 data_desc->qid_tid = ((skb->priority & 0xf) |
251 ((tx_params->tid & 0xf) << 4)); 254 ((tx_params->tid & 0xf) << 4));
252 data_desc->sta_id = tx_params->sta_id; 255 data_desc->sta_id = tx_params->sta_id;
@@ -842,7 +845,6 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
842 const struct firmware *fw_entry = NULL; 845 const struct firmware *fw_entry = NULL;
843 u32 regout_val = 0, content_size; 846 u32 regout_val = 0, content_size;
844 u16 tmp_regout_val = 0; 847 u16 tmp_regout_val = 0;
845 u8 *flash_content = NULL;
846 struct ta_metadata *metadata_p; 848 struct ta_metadata *metadata_p;
847 int status; 849 int status;
848 850
@@ -904,28 +906,22 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
904 __func__, metadata_p->name); 906 __func__, metadata_p->name);
905 return status; 907 return status;
906 } 908 }
907 flash_content = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
908 if (!flash_content) {
909 rsi_dbg(ERR_ZONE, "%s: Failed to copy firmware\n", __func__);
910 status = -EIO;
911 goto fail;
912 }
913 content_size = fw_entry->size; 909 content_size = fw_entry->size;
914 rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size); 910 rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size);
915 911
916 /* Get the firmware version */ 912 /* Get the firmware version */
917 common->lmac_ver.ver.info.fw_ver[0] = 913 common->lmac_ver.ver.info.fw_ver[0] =
918 flash_content[LMAC_VER_OFFSET] & 0xFF; 914 fw_entry->data[LMAC_VER_OFFSET] & 0xFF;
919 common->lmac_ver.ver.info.fw_ver[1] = 915 common->lmac_ver.ver.info.fw_ver[1] =
920 flash_content[LMAC_VER_OFFSET + 1] & 0xFF; 916 fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF;
921 common->lmac_ver.major = flash_content[LMAC_VER_OFFSET + 2] & 0xFF; 917 common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF;
922 common->lmac_ver.release_num = 918 common->lmac_ver.release_num =
923 flash_content[LMAC_VER_OFFSET + 3] & 0xFF; 919 fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF;
924 common->lmac_ver.minor = flash_content[LMAC_VER_OFFSET + 4] & 0xFF; 920 common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF;
925 common->lmac_ver.patch_num = 0; 921 common->lmac_ver.patch_num = 0;
926 rsi_print_version(common); 922 rsi_print_version(common);
927 923
928 status = bl_write_header(adapter, flash_content, content_size); 924 status = bl_write_header(adapter, (u8 *)fw_entry->data, content_size);
929 if (status) { 925 if (status) {
930 rsi_dbg(ERR_ZONE, 926 rsi_dbg(ERR_ZONE,
931 "%s: RPS Image header loading failed\n", 927 "%s: RPS Image header loading failed\n",
@@ -967,7 +963,7 @@ fw_upgrade:
967 963
968 rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n"); 964 rsi_dbg(INFO_ZONE, "Burn Command Pass.. Upgrading the firmware\n");
969 965
970 status = auto_fw_upgrade(adapter, flash_content, content_size); 966 status = auto_fw_upgrade(adapter, (u8 *)fw_entry->data, content_size);
971 if (status == 0) { 967 if (status == 0) {
972 rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n"); 968 rsi_dbg(ERR_ZONE, "Firmware upgradation Done\n");
973 goto load_image_cmd; 969 goto load_image_cmd;
@@ -981,13 +977,11 @@ fw_upgrade:
981 977
982success: 978success:
983 rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n"); 979 rsi_dbg(ERR_ZONE, "***** Firmware Loading successful *****\n");
984 kfree(flash_content);
985 release_firmware(fw_entry); 980 release_firmware(fw_entry);
986 return 0; 981 return 0;
987 982
988fail: 983fail:
989 rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n"); 984 rsi_dbg(ERR_ZONE, "##### Firmware loading failed #####\n");
990 kfree(flash_content);
991 release_firmware(fw_entry); 985 release_firmware(fw_entry);
992 return status; 986 return status;
993} 987}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 2ca7464b7fa3..4e510cbe0a89 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -416,7 +416,8 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
416 416
417 /* Get free vap index */ 417 /* Get free vap index */
418 for (i = 0; i < RSI_MAX_VIFS; i++) { 418 for (i = 0; i < RSI_MAX_VIFS; i++) {
419 if (!adapter->vifs[i]) { 419 if (!adapter->vifs[i] ||
420 !memcmp(vif->addr, adapter->vifs[i]->addr, ETH_ALEN)) {
420 vap_idx = i; 421 vap_idx = i;
421 break; 422 break;
422 } 423 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 1485a0c89df2..01d99ed985ee 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -122,7 +122,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
122 u8 extended_desc) 122 u8 extended_desc)
123{ 123{
124 struct ieee80211_tx_info *info; 124 struct ieee80211_tx_info *info;
125 struct skb_info *rx_params;
126 struct sk_buff *skb = NULL; 125 struct sk_buff *skb = NULL;
127 u8 payload_offset; 126 u8 payload_offset;
128 struct ieee80211_vif *vif; 127 struct ieee80211_vif *vif;
@@ -149,10 +148,6 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
149 vif = rsi_get_vif(common->priv, wh->addr1); 148 vif = rsi_get_vif(common->priv, wh->addr1);
150 149
151 info = IEEE80211_SKB_CB(skb); 150 info = IEEE80211_SKB_CB(skb);
152 rx_params = (struct skb_info *)info->driver_data;
153 rx_params->rssi = rsi_get_rssi(buffer);
154 rx_params->channel = rsi_get_connected_channel(vif);
155
156 return skb; 151 return skb;
157} 152}
158 153
@@ -336,7 +331,6 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
336 spin_lock_init(&adapter->ps_lock); 331 spin_lock_init(&adapter->ps_lock);
337 timer_setup(&common->roc_timer, rsi_roc_timeout, 0); 332 timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
338 init_completion(&common->wlan_init_completion); 333 init_completion(&common->wlan_init_completion);
339 common->init_done = true;
340 adapter->device_model = RSI_DEV_9113; 334 adapter->device_model = RSI_DEV_9113;
341 common->oper_mode = oper_mode; 335 common->oper_mode = oper_mode;
342 336
@@ -374,6 +368,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
374 } 368 }
375#endif 369#endif
376 370
371 common->init_done = true;
377 return adapter; 372 return adapter;
378 373
379err: 374err:
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index d0e5937cad6d..1095df7d9573 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -334,20 +334,17 @@ static int rsi_load_radio_caps(struct rsi_common *common)
334 struct ieee80211_conf *conf = &hw->conf; 334 struct ieee80211_conf *conf = &hw->conf;
335 335
336 if (conf_is_ht40_plus(conf)) { 336 if (conf_is_ht40_plus(conf)) {
337 radio_caps->radio_cfg_info = 337 radio_caps->ppe_ack_rate =
338 RSI_CMDDESC_LOWER_20_ENABLE; 338 cpu_to_le16(LOWER_20_ENABLE |
339 radio_caps->radio_info = 339 (LOWER_20_ENABLE >> 12));
340 RSI_CMDDESC_LOWER_20_ENABLE;
341 } else if (conf_is_ht40_minus(conf)) { 340 } else if (conf_is_ht40_minus(conf)) {
342 radio_caps->radio_cfg_info = 341 radio_caps->ppe_ack_rate =
343 RSI_CMDDESC_UPPER_20_ENABLE; 342 cpu_to_le16(UPPER_20_ENABLE |
344 radio_caps->radio_info = 343 (UPPER_20_ENABLE >> 12));
345 RSI_CMDDESC_UPPER_20_ENABLE;
346 } else { 344 } else {
347 radio_caps->radio_cfg_info = 345 radio_caps->ppe_ack_rate =
348 RSI_CMDDESC_40MHZ; 346 cpu_to_le16((BW_40MHZ << 12) |
349 radio_caps->radio_info = 347 FULL40M_ENABLE);
350 RSI_CMDDESC_FULL_40_ENABLE;
351 } 348 }
352 } 349 }
353 } 350 }
@@ -749,7 +746,7 @@ int rsi_hal_load_key(struct rsi_common *common,
749 key_descriptor |= RSI_CIPHER_TKIP; 746 key_descriptor |= RSI_CIPHER_TKIP;
750 } 747 }
751 key_descriptor |= RSI_PROTECT_DATA_FRAMES; 748 key_descriptor |= RSI_PROTECT_DATA_FRAMES;
752 key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK); 749 key_descriptor |= (key_id << RSI_KEY_ID_OFFSET);
753 750
754 rsi_set_len_qno(&set_key->desc_dword0.len_qno, 751 rsi_set_len_qno(&set_key->desc_dword0.len_qno,
755 (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); 752 (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 416981d99229..5733e440ecaf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1394,10 +1394,7 @@ static const struct dev_pm_ops rsi_pm_ops = {
1394#endif 1394#endif
1395 1395
1396static const struct sdio_device_id rsi_dev_table[] = { 1396static const struct sdio_device_id rsi_dev_table[] = {
1397 { SDIO_DEVICE(0x303, 0x100) }, 1397 { SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) },
1398 { SDIO_DEVICE(0x041B, 0x0301) },
1399 { SDIO_DEVICE(0x041B, 0x0201) },
1400 { SDIO_DEVICE(0x041B, 0x9330) },
1401 { /* Blank */}, 1398 { /* Blank */},
1402}; 1399};
1403 1400
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 6ce6b754df12..c0a163e40402 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -835,11 +835,7 @@ static int rsi_resume(struct usb_interface *intf)
835#endif 835#endif
836 836
837static const struct usb_device_id rsi_dev_table[] = { 837static const struct usb_device_id rsi_dev_table[] = {
838 { USB_DEVICE(0x0303, 0x0100) }, 838 { USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) },
839 { USB_DEVICE(0x041B, 0x0301) },
840 { USB_DEVICE(0x041B, 0x0201) },
841 { USB_DEVICE(0x041B, 0x9330) },
842 { USB_DEVICE(0x1618, 0x9113) },
843 { /* Blank */}, 839 { /* Blank */},
844}; 840};
845 841
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index 14620935c925..359fbdf85739 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -22,7 +22,7 @@
22#include "rsi_main.h" 22#include "rsi_main.h"
23 23
24#define MAX_MGMT_PKT_SIZE 512 24#define MAX_MGMT_PKT_SIZE 512
25#define RSI_NEEDED_HEADROOM 80 25#define RSI_NEEDED_HEADROOM 84
26#define RSI_RCV_BUFFER_LEN 2000 26#define RSI_RCV_BUFFER_LEN 2000
27 27
28#define RSI_11B_MODE 0 28#define RSI_11B_MODE 0
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 353dbdf31e75..66dcd2ec9051 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -28,6 +28,9 @@
28#include <linux/mmc/sdio_ids.h> 28#include <linux/mmc/sdio_ids.h>
29#include "rsi_main.h" 29#include "rsi_main.h"
30 30
31#define RSI_SDIO_VID_9113 0x041B
32#define RSI_SDIO_PID_9113 0x9330
33
31enum sdio_interrupt_type { 34enum sdio_interrupt_type {
32 BUFFER_FULL = 0x0, 35 BUFFER_FULL = 0x0,
33 BUFFER_AVAILABLE = 0x2, 36 BUFFER_AVAILABLE = 0x2,
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index b6fe79f0a513..5b2eddd1a2ee 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -22,6 +22,9 @@
22#include "rsi_main.h" 22#include "rsi_main.h"
23#include "rsi_common.h" 23#include "rsi_common.h"
24 24
25#define RSI_USB_VID_9113 0x1618
26#define RSI_USB_PID_9113 0x9113
27
25#define USB_INTERNAL_REG_1 0x25000 28#define USB_INTERNAL_REG_1 0x25000
26#define RSI_USB_READY_MAGIC_NUM 0xab 29#define RSI_USB_READY_MAGIC_NUM 0xab
27#define FW_STATUS_REG 0x41050012 30#define FW_STATUS_REG 0x41050012
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 37f785f601c1..89b0d0fade9f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -6160,16 +6160,16 @@ static int wl1271_register_hw(struct wl1271 *wl)
6160 } 6160 }
6161 6161
6162 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) { 6162 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6163 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n"); 6163 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6164 if (!strcmp(pdev_data->family->name, "wl18xx")) { 6164 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6165 wl1271_warning("This default nvs file can be removed from the file system\n"); 6165 wl1271_warning("This default nvs file can be removed from the file system");
6166 } else { 6166 } else {
6167 wl1271_warning("Your device performance is not optimized.\n"); 6167 wl1271_warning("Your device performance is not optimized.");
6168 wl1271_warning("Please use the calibrator tool to configure your device.\n"); 6168 wl1271_warning("Please use the calibrator tool to configure your device.");
6169 } 6169 }
6170 6170
6171 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) { 6171 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6172 wl1271_warning("Fuse mac address is zero. using random mac\n"); 6172 wl1271_warning("Fuse mac address is zero. using random mac");
6173 /* Use TI oui and a random nic */ 6173 /* Use TI oui and a random nic */
6174 oui_addr = WLCORE_TI_OUI_ADDRESS; 6174 oui_addr = WLCORE_TI_OUI_ADDRESS;
6175 nic_addr = get_random_int(); 6175 nic_addr = get_random_int();
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 0f15696195f8..078a4940bc5c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
59static void wl1271_rx_status(struct wl1271 *wl, 59static void wl1271_rx_status(struct wl1271 *wl,
60 struct wl1271_rx_descriptor *desc, 60 struct wl1271_rx_descriptor *desc,
61 struct ieee80211_rx_status *status, 61 struct ieee80211_rx_status *status,
62 u8 beacon) 62 u8 beacon, u8 probe_rsp)
63{ 63{
64 memset(status, 0, sizeof(struct ieee80211_rx_status)); 64 memset(status, 0, sizeof(struct ieee80211_rx_status));
65 65
@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
106 } 106 }
107 } 107 }
108 108
109 if (beacon || probe_rsp)
110 status->boottime_ns = ktime_get_boot_ns();
111
109 if (beacon) 112 if (beacon)
110 wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel, 113 wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
111 status->band); 114 status->band);
@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
191 if (ieee80211_is_data_present(hdr->frame_control)) 194 if (ieee80211_is_data_present(hdr->frame_control))
192 is_data = 1; 195 is_data = 1;
193 196
194 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); 197 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
198 ieee80211_is_probe_resp(hdr->frame_control));
195 wlcore_hw_set_rx_csum(wl, desc, skb); 199 wlcore_hw_set_rx_csum(wl, desc, skb);
196 200
197 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; 201 seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;