aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-11-28 14:11:18 -0500
committerJohn W. Linville <linville@tuxdriver.com>2011-11-28 14:11:18 -0500
commit39338b56382ac640614851a80e0bd71994cc664d (patch)
treec319e23181be286b1d320c29f755a81e5ef61172
parent8b7ff200010600ef7cd9d002f9f8f97edfc7578e (diff)
parenteb1852b10593dc3ca73e02bf9ac4753a5a464905 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c7
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1428
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h30
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h16
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h202
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c849
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h34
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h49
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c (renamed from drivers/net/wireless/ath/ath6kl/htc_hif.c)150
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h61
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c725
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h18
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.h92
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c676
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c668
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c335
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c221
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c688
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h243
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c107
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h15
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c145
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c7
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c505
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c3977
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c995
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2751
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h626
-rw-r--r--drivers/net/wireless/iwlegacy/4965-calib.c (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.c)613
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c746
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c6536
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2860
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c2421
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h1309
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig43
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile24
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h (renamed from drivers/net/wireless/iwlegacy/iwl-commands.h)1134
-rw-r--r--drivers/net/wireless/iwlegacy/common.c5707
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3424
-rw-r--r--drivers/net/wireless/iwlegacy/csr.h (renamed from drivers/net/wireless/iwlegacy/iwl-csr.h)93
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c1411
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c63
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c996
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2741
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h75
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h811
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c73
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1194
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2871
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c215
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1378
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2183
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2661
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h636
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1314
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1364
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c42
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h210
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c553
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h196
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c205
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c550
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c540
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c659
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4016
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3281
-rw-r--r--drivers/net/wireless/iwlegacy/prph.h (renamed from drivers/net/wireless/iwlegacy/iwl-prph.h)133
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c12
-rw-r--r--drivers/net/wireless/libertas/ethtool.c7
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c38
-rw-r--r--drivers/net/wireless/mwifiex/fw.h1
-rw-r--r--drivers/net/wireless/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c17
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/p54/p54spi.c6
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c4
-rw-r--r--drivers/net/wireless/rndis_wlan.c86
-rw-r--r--drivers/net/wireless/rtlwifi/base.c6
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c60
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c55
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h7
-rw-r--r--include/linux/nl80211.h35
-rw-r--r--include/net/cfg80211.h94
-rw-r--r--include/net/mac80211.h22
-rw-r--r--include/net/nfc/nci.h84
-rw-r--r--include/net/regulatory.h6
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/driver-ops.h8
-rw-r--r--net/mac80211/ht.c83
-rw-r--r--net/mac80211/ieee80211_i.h75
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/main.c20
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mesh_hwmp.c7
-rw-r--r--net/mac80211/mesh_pathtbl.c27
-rw-r--r--net/mac80211/mesh_plink.c3
-rw-r--r--net/mac80211/mlme.c30
-rw-r--r--net/mac80211/offchannel.c4
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c62
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/tx.c222
-rw-r--r--net/mac80211/util.c186
-rw-r--r--net/mac80211/wep.c5
-rw-r--r--net/mac80211/work.c16
-rw-r--r--net/mac80211/wpa.c25
-rw-r--r--net/wireless/core.h10
-rw-r--r--net/wireless/mlme.c37
-rw-r--r--net/wireless/nl80211.c133
-rw-r--r--net/wireless/reg.c39
-rw-r--r--net/wireless/reg.h1
-rw-r--r--net/wireless/sme.c7
-rw-r--r--net/wireless/util.c186
167 files changed, 40219 insertions, 39230 deletions
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index c1c0678b1fb6..98db76196b59 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
42obj-$(CONFIG_MWL8K) += mwl8k.o 42obj-$(CONFIG_MWL8K) += mwl8k.o
43 43
44obj-$(CONFIG_IWLWIFI) += iwlwifi/ 44obj-$(CONFIG_IWLWIFI) += iwlwifi/
45obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/ 45obj-$(CONFIG_IWLEGACY) += iwlegacy/
46obj-$(CONFIG_RT2X00) += rt2x00/ 46obj-$(CONFIG_RT2X00) += rt2x00/
47 47
48obj-$(CONFIG_P54_COMMON) += p54/ 48obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 8f7a0d1c290c..707069303550 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -23,7 +23,7 @@
23 23
24obj-$(CONFIG_ATH6KL) := ath6kl.o 24obj-$(CONFIG_ATH6KL) := ath6kl.o
25ath6kl-y += debug.o 25ath6kl-y += debug.o
26ath6kl-y += htc_hif.o 26ath6kl-y += hif.o
27ath6kl-y += htc.o 27ath6kl-y += htc.o
28ath6kl-y += bmi.o 28ath6kl-y += bmi.o
29ath6kl-y += cfg80211.o 29ath6kl-y += cfg80211.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index c5d11cc536e0..a962fe4c6b7e 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -196,8 +196,6 @@ int ath6kl_bmi_done(struct ath6kl *ar)
196 return ret; 196 return ret;
197 } 197 }
198 198
199 ath6kl_bmi_cleanup(ar);
200
201 return 0; 199 return 0;
202} 200}
203 201
@@ -672,6 +670,11 @@ int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
672 return ret; 670 return ret;
673} 671}
674 672
673void ath6kl_bmi_reset(struct ath6kl *ar)
674{
675 ar->bmi.done_sent = false;
676}
677
675int ath6kl_bmi_init(struct ath6kl *ar) 678int ath6kl_bmi_init(struct ath6kl *ar)
676{ 679{
677 ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC); 680 ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
index 96851d5df24b..009e8f650ab1 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.h
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -230,6 +230,8 @@ struct ath6kl_bmi_target_info {
230 230
231int ath6kl_bmi_init(struct ath6kl *ar); 231int ath6kl_bmi_init(struct ath6kl *ar);
232void ath6kl_bmi_cleanup(struct ath6kl *ar); 232void ath6kl_bmi_cleanup(struct ath6kl *ar);
233void ath6kl_bmi_reset(struct ath6kl *ar);
234
233int ath6kl_bmi_done(struct ath6kl *ar); 235int ath6kl_bmi_done(struct ath6kl *ar);
234int ath6kl_bmi_get_target_info(struct ath6kl *ar, 236int ath6kl_bmi_get_target_info(struct ath6kl *ar,
235 struct ath6kl_bmi_target_info *targ_info); 237 struct ath6kl_bmi_target_info *targ_info);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 5ac2bc2ebee6..02526044acb9 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -23,8 +23,10 @@
23#include "testmode.h" 23#include "testmode.h"
24 24
25static unsigned int ath6kl_p2p; 25static unsigned int ath6kl_p2p;
26static unsigned int multi_norm_if_support;
26 27
27module_param(ath6kl_p2p, uint, 0644); 28module_param(ath6kl_p2p, uint, 0644);
29module_param(multi_norm_if_support, uint, 0644);
28 30
29#define RATETAB_ENT(_rate, _rateid, _flags) { \ 31#define RATETAB_ENT(_rate, _rateid, _flags) { \
30 .bitrate = (_rate), \ 32 .bitrate = (_rate), \
@@ -123,17 +125,19 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
123 .bitrates = ath6kl_a_rates, 125 .bitrates = ath6kl_a_rates,
124}; 126};
125 127
126static int ath6kl_set_wpa_version(struct ath6kl *ar, 128#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
129
130static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
127 enum nl80211_wpa_versions wpa_version) 131 enum nl80211_wpa_versions wpa_version)
128{ 132{
129 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version); 133 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
130 134
131 if (!wpa_version) { 135 if (!wpa_version) {
132 ar->auth_mode = NONE_AUTH; 136 vif->auth_mode = NONE_AUTH;
133 } else if (wpa_version & NL80211_WPA_VERSION_2) { 137 } else if (wpa_version & NL80211_WPA_VERSION_2) {
134 ar->auth_mode = WPA2_AUTH; 138 vif->auth_mode = WPA2_AUTH;
135 } else if (wpa_version & NL80211_WPA_VERSION_1) { 139 } else if (wpa_version & NL80211_WPA_VERSION_1) {
136 ar->auth_mode = WPA_AUTH; 140 vif->auth_mode = WPA_AUTH;
137 } else { 141 } else {
138 ath6kl_err("%s: %u not supported\n", __func__, wpa_version); 142 ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
139 return -ENOTSUPP; 143 return -ENOTSUPP;
@@ -142,25 +146,24 @@ static int ath6kl_set_wpa_version(struct ath6kl *ar,
142 return 0; 146 return 0;
143} 147}
144 148
145static int ath6kl_set_auth_type(struct ath6kl *ar, 149static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
146 enum nl80211_auth_type auth_type) 150 enum nl80211_auth_type auth_type)
147{ 151{
148
149 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type); 152 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
150 153
151 switch (auth_type) { 154 switch (auth_type) {
152 case NL80211_AUTHTYPE_OPEN_SYSTEM: 155 case NL80211_AUTHTYPE_OPEN_SYSTEM:
153 ar->dot11_auth_mode = OPEN_AUTH; 156 vif->dot11_auth_mode = OPEN_AUTH;
154 break; 157 break;
155 case NL80211_AUTHTYPE_SHARED_KEY: 158 case NL80211_AUTHTYPE_SHARED_KEY:
156 ar->dot11_auth_mode = SHARED_AUTH; 159 vif->dot11_auth_mode = SHARED_AUTH;
157 break; 160 break;
158 case NL80211_AUTHTYPE_NETWORK_EAP: 161 case NL80211_AUTHTYPE_NETWORK_EAP:
159 ar->dot11_auth_mode = LEAP_AUTH; 162 vif->dot11_auth_mode = LEAP_AUTH;
160 break; 163 break;
161 164
162 case NL80211_AUTHTYPE_AUTOMATIC: 165 case NL80211_AUTHTYPE_AUTOMATIC:
163 ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH; 166 vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
164 break; 167 break;
165 168
166 default: 169 default:
@@ -171,11 +174,11 @@ static int ath6kl_set_auth_type(struct ath6kl *ar,
171 return 0; 174 return 0;
172} 175}
173 176
174static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast) 177static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast)
175{ 178{
176 u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto; 179 u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto;
177 u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len : 180 u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len :
178 &ar->grp_crypto_len; 181 &vif->grp_crypto_len;
179 182
180 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n", 183 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
181 __func__, cipher, ucast); 184 __func__, cipher, ucast);
@@ -210,28 +213,35 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
210 return 0; 213 return 0;
211} 214}
212 215
213static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt) 216static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt)
214{ 217{
215 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt); 218 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
216 219
217 if (key_mgmt == WLAN_AKM_SUITE_PSK) { 220 if (key_mgmt == WLAN_AKM_SUITE_PSK) {
218 if (ar->auth_mode == WPA_AUTH) 221 if (vif->auth_mode == WPA_AUTH)
219 ar->auth_mode = WPA_PSK_AUTH; 222 vif->auth_mode = WPA_PSK_AUTH;
220 else if (ar->auth_mode == WPA2_AUTH) 223 else if (vif->auth_mode == WPA2_AUTH)
221 ar->auth_mode = WPA2_PSK_AUTH; 224 vif->auth_mode = WPA2_PSK_AUTH;
225 } else if (key_mgmt == 0x00409600) {
226 if (vif->auth_mode == WPA_AUTH)
227 vif->auth_mode = WPA_AUTH_CCKM;
228 else if (vif->auth_mode == WPA2_AUTH)
229 vif->auth_mode = WPA2_AUTH_CCKM;
222 } else if (key_mgmt != WLAN_AKM_SUITE_8021X) { 230 } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
223 ar->auth_mode = NONE_AUTH; 231 vif->auth_mode = NONE_AUTH;
224 } 232 }
225} 233}
226 234
227static bool ath6kl_cfg80211_ready(struct ath6kl *ar) 235static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
228{ 236{
237 struct ath6kl *ar = vif->ar;
238
229 if (!test_bit(WMI_READY, &ar->flag)) { 239 if (!test_bit(WMI_READY, &ar->flag)) {
230 ath6kl_err("wmi is not ready\n"); 240 ath6kl_err("wmi is not ready\n");
231 return false; 241 return false;
232 } 242 }
233 243
234 if (!test_bit(WLAN_ENABLED, &ar->flag)) { 244 if (!test_bit(WLAN_ENABLED, &vif->flags)) {
235 ath6kl_err("wlan disabled\n"); 245 ath6kl_err("wlan disabled\n");
236 return false; 246 return false;
237 } 247 }
@@ -239,15 +249,143 @@ static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
239 return true; 249 return true;
240} 250}
241 251
252static bool ath6kl_is_wpa_ie(const u8 *pos)
253{
254 return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
255 pos[2] == 0x00 && pos[3] == 0x50 &&
256 pos[4] == 0xf2 && pos[5] == 0x01;
257}
258
259static bool ath6kl_is_rsn_ie(const u8 *pos)
260{
261 return pos[0] == WLAN_EID_RSN;
262}
263
264static bool ath6kl_is_wps_ie(const u8 *pos)
265{
266 return (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
267 pos[1] >= 4 &&
268 pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 &&
269 pos[5] == 0x04);
270}
271
272static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies,
273 size_t ies_len)
274{
275 struct ath6kl *ar = vif->ar;
276 const u8 *pos;
277 u8 *buf = NULL;
278 size_t len = 0;
279 int ret;
280
281 /*
282 * Clear previously set flag
283 */
284
285 ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
286
287 /*
288 * Filter out RSN/WPA IE(s)
289 */
290
291 if (ies && ies_len) {
292 buf = kmalloc(ies_len, GFP_KERNEL);
293 if (buf == NULL)
294 return -ENOMEM;
295 pos = ies;
296
297 while (pos + 1 < ies + ies_len) {
298 if (pos + 2 + pos[1] > ies + ies_len)
299 break;
300 if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) {
301 memcpy(buf + len, pos, 2 + pos[1]);
302 len += 2 + pos[1];
303 }
304
305 if (ath6kl_is_wps_ie(pos))
306 ar->connect_ctrl_flags |= CONNECT_WPS_FLAG;
307
308 pos += 2 + pos[1];
309 }
310 }
311
312 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
313 WMI_FRAME_ASSOC_REQ, buf, len);
314 kfree(buf);
315 return ret;
316}
317
318static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
319{
320 switch (type) {
321 case NL80211_IFTYPE_STATION:
322 *nw_type = INFRA_NETWORK;
323 break;
324 case NL80211_IFTYPE_ADHOC:
325 *nw_type = ADHOC_NETWORK;
326 break;
327 case NL80211_IFTYPE_AP:
328 *nw_type = AP_NETWORK;
329 break;
330 case NL80211_IFTYPE_P2P_CLIENT:
331 *nw_type = INFRA_NETWORK;
332 break;
333 case NL80211_IFTYPE_P2P_GO:
334 *nw_type = AP_NETWORK;
335 break;
336 default:
337 ath6kl_err("invalid interface type %u\n", type);
338 return -ENOTSUPP;
339 }
340
341 return 0;
342}
343
344static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
345 u8 *if_idx, u8 *nw_type)
346{
347 int i;
348
349 if (ath6kl_nliftype_to_drv_iftype(type, nw_type))
350 return false;
351
352 if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
353 ar->num_vif))
354 return false;
355
356 if (type == NL80211_IFTYPE_STATION ||
357 type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
358 for (i = 0; i < MAX_NUM_VIF; i++) {
359 if ((ar->avail_idx_map >> i) & BIT(0)) {
360 *if_idx = i;
361 return true;
362 }
363 }
364 }
365
366 if (type == NL80211_IFTYPE_P2P_CLIENT ||
367 type == NL80211_IFTYPE_P2P_GO) {
368 for (i = ar->max_norm_iface; i < MAX_NUM_VIF; i++) {
369 if ((ar->avail_idx_map >> i) & BIT(0)) {
370 *if_idx = i;
371 return true;
372 }
373 }
374 }
375
376 return false;
377}
378
242static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, 379static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
243 struct cfg80211_connect_params *sme) 380 struct cfg80211_connect_params *sme)
244{ 381{
245 struct ath6kl *ar = ath6kl_priv(dev); 382 struct ath6kl *ar = ath6kl_priv(dev);
383 struct ath6kl_vif *vif = netdev_priv(dev);
246 int status; 384 int status;
247 385
248 ar->sme_state = SME_CONNECTING; 386 vif->sme_state = SME_CONNECTING;
249 387
250 if (!ath6kl_cfg80211_ready(ar)) 388 if (!ath6kl_cfg80211_ready(vif))
251 return -EIO; 389 return -EIO;
252 390
253 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { 391 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -287,12 +425,19 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
287 } 425 }
288 } 426 }
289 427
290 if (test_bit(CONNECTED, &ar->flag) && 428 if (sme->ie && (sme->ie_len > 0)) {
291 ar->ssid_len == sme->ssid_len && 429 status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
292 !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { 430 if (status)
293 ar->reconnect_flag = true; 431 return status;
294 status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid, 432 }
295 ar->ch_hint); 433
434 if (test_bit(CONNECTED, &vif->flags) &&
435 vif->ssid_len == sme->ssid_len &&
436 !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
437 vif->reconnect_flag = true;
438 status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx,
439 vif->req_bssid,
440 vif->ch_hint);
296 441
297 up(&ar->sem); 442 up(&ar->sem);
298 if (status) { 443 if (status) {
@@ -300,42 +445,43 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
300 return -EIO; 445 return -EIO;
301 } 446 }
302 return 0; 447 return 0;
303 } else if (ar->ssid_len == sme->ssid_len && 448 } else if (vif->ssid_len == sme->ssid_len &&
304 !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { 449 !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
305 ath6kl_disconnect(ar); 450 ath6kl_disconnect(vif);
306 } 451 }
307 452
308 memset(ar->ssid, 0, sizeof(ar->ssid)); 453 memset(vif->ssid, 0, sizeof(vif->ssid));
309 ar->ssid_len = sme->ssid_len; 454 vif->ssid_len = sme->ssid_len;
310 memcpy(ar->ssid, sme->ssid, sme->ssid_len); 455 memcpy(vif->ssid, sme->ssid, sme->ssid_len);
311 456
312 if (sme->channel) 457 if (sme->channel)
313 ar->ch_hint = sme->channel->center_freq; 458 vif->ch_hint = sme->channel->center_freq;
314 459
315 memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); 460 memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
316 if (sme->bssid && !is_broadcast_ether_addr(sme->bssid)) 461 if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
317 memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid)); 462 memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid));
318 463
319 ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions); 464 ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions);
320 465
321 status = ath6kl_set_auth_type(ar, sme->auth_type); 466 status = ath6kl_set_auth_type(vif, sme->auth_type);
322 if (status) { 467 if (status) {
323 up(&ar->sem); 468 up(&ar->sem);
324 return status; 469 return status;
325 } 470 }
326 471
327 if (sme->crypto.n_ciphers_pairwise) 472 if (sme->crypto.n_ciphers_pairwise)
328 ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true); 473 ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true);
329 else 474 else
330 ath6kl_set_cipher(ar, 0, true); 475 ath6kl_set_cipher(vif, 0, true);
331 476
332 ath6kl_set_cipher(ar, sme->crypto.cipher_group, false); 477 ath6kl_set_cipher(vif, sme->crypto.cipher_group, false);
333 478
334 if (sme->crypto.n_akm_suites) 479 if (sme->crypto.n_akm_suites)
335 ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]); 480 ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]);
336 481
337 if ((sme->key_len) && 482 if ((sme->key_len) &&
338 (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) { 483 (vif->auth_mode == NONE_AUTH) &&
484 (vif->prwise_crypto == WEP_CRYPT)) {
339 struct ath6kl_key *key = NULL; 485 struct ath6kl_key *key = NULL;
340 486
341 if (sme->key_idx < WMI_MIN_KEY_INDEX || 487 if (sme->key_idx < WMI_MIN_KEY_INDEX ||
@@ -346,56 +492,57 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
346 return -ENOENT; 492 return -ENOENT;
347 } 493 }
348 494
349 key = &ar->keys[sme->key_idx]; 495 key = &vif->keys[sme->key_idx];
350 key->key_len = sme->key_len; 496 key->key_len = sme->key_len;
351 memcpy(key->key, sme->key, key->key_len); 497 memcpy(key->key, sme->key, key->key_len);
352 key->cipher = ar->prwise_crypto; 498 key->cipher = vif->prwise_crypto;
353 ar->def_txkey_index = sme->key_idx; 499 vif->def_txkey_index = sme->key_idx;
354 500
355 ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx, 501 ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx,
356 ar->prwise_crypto, 502 vif->prwise_crypto,
357 GROUP_USAGE | TX_USAGE, 503 GROUP_USAGE | TX_USAGE,
358 key->key_len, 504 key->key_len,
359 NULL, 505 NULL, 0,
360 key->key, KEY_OP_INIT_VAL, NULL, 506 key->key, KEY_OP_INIT_VAL, NULL,
361 NO_SYNC_WMIFLAG); 507 NO_SYNC_WMIFLAG);
362 } 508 }
363 509
364 if (!ar->usr_bss_filter) { 510 if (!ar->usr_bss_filter) {
365 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); 511 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
366 if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) { 512 if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
513 ALL_BSS_FILTER, 0) != 0) {
367 ath6kl_err("couldn't set bss filtering\n"); 514 ath6kl_err("couldn't set bss filtering\n");
368 up(&ar->sem); 515 up(&ar->sem);
369 return -EIO; 516 return -EIO;
370 } 517 }
371 } 518 }
372 519
373 ar->nw_type = ar->next_mode; 520 vif->nw_type = vif->next_mode;
374 521
375 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 522 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
376 "%s: connect called with authmode %d dot11 auth %d" 523 "%s: connect called with authmode %d dot11 auth %d"
377 " PW crypto %d PW crypto len %d GRP crypto %d" 524 " PW crypto %d PW crypto len %d GRP crypto %d"
378 " GRP crypto len %d channel hint %u\n", 525 " GRP crypto len %d channel hint %u\n",
379 __func__, 526 __func__,
380 ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, 527 vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
381 ar->prwise_crypto_len, ar->grp_crypto, 528 vif->prwise_crypto_len, vif->grp_crypto,
382 ar->grp_crypto_len, ar->ch_hint); 529 vif->grp_crypto_len, vif->ch_hint);
383 530
384 ar->reconnect_flag = 0; 531 vif->reconnect_flag = 0;
385 status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, 532 status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
386 ar->dot11_auth_mode, ar->auth_mode, 533 vif->dot11_auth_mode, vif->auth_mode,
387 ar->prwise_crypto, 534 vif->prwise_crypto,
388 ar->prwise_crypto_len, 535 vif->prwise_crypto_len,
389 ar->grp_crypto, ar->grp_crypto_len, 536 vif->grp_crypto, vif->grp_crypto_len,
390 ar->ssid_len, ar->ssid, 537 vif->ssid_len, vif->ssid,
391 ar->req_bssid, ar->ch_hint, 538 vif->req_bssid, vif->ch_hint,
392 ar->connect_ctrl_flags); 539 ar->connect_ctrl_flags);
393 540
394 up(&ar->sem); 541 up(&ar->sem);
395 542
396 if (status == -EINVAL) { 543 if (status == -EINVAL) {
397 memset(ar->ssid, 0, sizeof(ar->ssid)); 544 memset(vif->ssid, 0, sizeof(vif->ssid));
398 ar->ssid_len = 0; 545 vif->ssid_len = 0;
399 ath6kl_err("invalid request\n"); 546 ath6kl_err("invalid request\n");
400 return -ENOENT; 547 return -ENOENT;
401 } else if (status) { 548 } else if (status) {
@@ -404,27 +551,28 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
404 } 551 }
405 552
406 if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) && 553 if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
407 ((ar->auth_mode == WPA_PSK_AUTH) 554 ((vif->auth_mode == WPA_PSK_AUTH)
408 || (ar->auth_mode == WPA2_PSK_AUTH))) { 555 || (vif->auth_mode == WPA2_PSK_AUTH))) {
409 mod_timer(&ar->disconnect_timer, 556 mod_timer(&vif->disconnect_timer,
410 jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL)); 557 jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
411 } 558 }
412 559
413 ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD; 560 ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
414 set_bit(CONNECT_PEND, &ar->flag); 561 set_bit(CONNECT_PEND, &vif->flags);
415 562
416 return 0; 563 return 0;
417} 564}
418 565
419static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid, 566static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, const u8 *bssid,
420 struct ieee80211_channel *chan, 567 struct ieee80211_channel *chan,
421 const u8 *beacon_ie, size_t beacon_ie_len) 568 const u8 *beacon_ie, size_t beacon_ie_len)
422{ 569{
570 struct ath6kl *ar = vif->ar;
423 struct cfg80211_bss *bss; 571 struct cfg80211_bss *bss;
424 u8 *ie; 572 u8 *ie;
425 573
426 bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid, 574 bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
427 ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS, 575 vif->ssid, vif->ssid_len, WLAN_CAPABILITY_ESS,
428 WLAN_CAPABILITY_ESS); 576 WLAN_CAPABILITY_ESS);
429 if (bss == NULL) { 577 if (bss == NULL) {
430 /* 578 /*
@@ -435,16 +583,16 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
435 * Prepend SSID element since it is not included in the Beacon 583 * Prepend SSID element since it is not included in the Beacon
436 * IEs from the target. 584 * IEs from the target.
437 */ 585 */
438 ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL); 586 ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
439 if (ie == NULL) 587 if (ie == NULL)
440 return -ENOMEM; 588 return -ENOMEM;
441 ie[0] = WLAN_EID_SSID; 589 ie[0] = WLAN_EID_SSID;
442 ie[1] = ar->ssid_len; 590 ie[1] = vif->ssid_len;
443 memcpy(ie + 2, ar->ssid, ar->ssid_len); 591 memcpy(ie + 2, vif->ssid, vif->ssid_len);
444 memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len); 592 memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len);
445 bss = cfg80211_inform_bss(ar->wdev->wiphy, chan, 593 bss = cfg80211_inform_bss(ar->wiphy, chan,
446 bssid, 0, WLAN_CAPABILITY_ESS, 100, 594 bssid, 0, WLAN_CAPABILITY_ESS, 100,
447 ie, 2 + ar->ssid_len + beacon_ie_len, 595 ie, 2 + vif->ssid_len + beacon_ie_len,
448 0, GFP_KERNEL); 596 0, GFP_KERNEL);
449 if (bss) 597 if (bss)
450 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for " 598 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
@@ -463,7 +611,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
463 return 0; 611 return 0;
464} 612}
465 613
466void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, 614void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
467 u8 *bssid, u16 listen_intvl, 615 u8 *bssid, u16 listen_intvl,
468 u16 beacon_intvl, 616 u16 beacon_intvl,
469 enum network_type nw_type, 617 enum network_type nw_type,
@@ -471,6 +619,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
471 u8 assoc_resp_len, u8 *assoc_info) 619 u8 assoc_resp_len, u8 *assoc_info)
472{ 620{
473 struct ieee80211_channel *chan; 621 struct ieee80211_channel *chan;
622 struct ath6kl *ar = vif->ar;
474 623
475 /* capinfo + listen interval */ 624 /* capinfo + listen interval */
476 u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16); 625 u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
@@ -489,11 +638,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
489 * Store Beacon interval here; DTIM period will be available only once 638 * Store Beacon interval here; DTIM period will be available only once
490 * a Beacon frame from the AP is seen. 639 * a Beacon frame from the AP is seen.
491 */ 640 */
492 ar->assoc_bss_beacon_int = beacon_intvl; 641 vif->assoc_bss_beacon_int = beacon_intvl;
493 clear_bit(DTIM_PERIOD_AVAIL, &ar->flag); 642 clear_bit(DTIM_PERIOD_AVAIL, &vif->flags);
494 643
495 if (nw_type & ADHOC_NETWORK) { 644 if (nw_type & ADHOC_NETWORK) {
496 if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) { 645 if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
497 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 646 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
498 "%s: ath6k not in ibss mode\n", __func__); 647 "%s: ath6k not in ibss mode\n", __func__);
499 return; 648 return;
@@ -501,39 +650,39 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
501 } 650 }
502 651
503 if (nw_type & INFRA_NETWORK) { 652 if (nw_type & INFRA_NETWORK) {
504 if (ar->wdev->iftype != NL80211_IFTYPE_STATION && 653 if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
505 ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) { 654 vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
506 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 655 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
507 "%s: ath6k not in station mode\n", __func__); 656 "%s: ath6k not in station mode\n", __func__);
508 return; 657 return;
509 } 658 }
510 } 659 }
511 660
512 chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel); 661 chan = ieee80211_get_channel(ar->wiphy, (int) channel);
513 662
514 663
515 if (nw_type & ADHOC_NETWORK) { 664 if (nw_type & ADHOC_NETWORK) {
516 cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL); 665 cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
517 return; 666 return;
518 } 667 }
519 668
520 if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info, 669 if (ath6kl_add_bss_if_needed(vif, bssid, chan, assoc_info,
521 beacon_ie_len) < 0) { 670 beacon_ie_len) < 0) {
522 ath6kl_err("could not add cfg80211 bss entry for " 671 ath6kl_err("could not add cfg80211 bss entry for "
523 "connect/roamed notification\n"); 672 "connect/roamed notification\n");
524 return; 673 return;
525 } 674 }
526 675
527 if (ar->sme_state == SME_CONNECTING) { 676 if (vif->sme_state == SME_CONNECTING) {
528 /* inform connect result to cfg80211 */ 677 /* inform connect result to cfg80211 */
529 ar->sme_state = SME_CONNECTED; 678 vif->sme_state = SME_CONNECTED;
530 cfg80211_connect_result(ar->net_dev, bssid, 679 cfg80211_connect_result(vif->ndev, bssid,
531 assoc_req_ie, assoc_req_len, 680 assoc_req_ie, assoc_req_len,
532 assoc_resp_ie, assoc_resp_len, 681 assoc_resp_ie, assoc_resp_len,
533 WLAN_STATUS_SUCCESS, GFP_KERNEL); 682 WLAN_STATUS_SUCCESS, GFP_KERNEL);
534 } else if (ar->sme_state == SME_CONNECTED) { 683 } else if (vif->sme_state == SME_CONNECTED) {
535 /* inform roam event to cfg80211 */ 684 /* inform roam event to cfg80211 */
536 cfg80211_roamed(ar->net_dev, chan, bssid, 685 cfg80211_roamed(vif->ndev, chan, bssid,
537 assoc_req_ie, assoc_req_len, 686 assoc_req_ie, assoc_req_len,
538 assoc_resp_ie, assoc_resp_len, GFP_KERNEL); 687 assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
539 } 688 }
@@ -543,11 +692,12 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
543 struct net_device *dev, u16 reason_code) 692 struct net_device *dev, u16 reason_code)
544{ 693{
545 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev); 694 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
695 struct ath6kl_vif *vif = netdev_priv(dev);
546 696
547 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__, 697 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
548 reason_code); 698 reason_code);
549 699
550 if (!ath6kl_cfg80211_ready(ar)) 700 if (!ath6kl_cfg80211_ready(vif))
551 return -EIO; 701 return -EIO;
552 702
553 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { 703 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -560,44 +710,46 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
560 return -ERESTARTSYS; 710 return -ERESTARTSYS;
561 } 711 }
562 712
563 ar->reconnect_flag = 0; 713 vif->reconnect_flag = 0;
564 ath6kl_disconnect(ar); 714 ath6kl_disconnect(vif);
565 memset(ar->ssid, 0, sizeof(ar->ssid)); 715 memset(vif->ssid, 0, sizeof(vif->ssid));
566 ar->ssid_len = 0; 716 vif->ssid_len = 0;
567 717
568 if (!test_bit(SKIP_SCAN, &ar->flag)) 718 if (!test_bit(SKIP_SCAN, &ar->flag))
569 memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); 719 memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
570 720
571 up(&ar->sem); 721 up(&ar->sem);
572 722
573 ar->sme_state = SME_DISCONNECTED; 723 vif->sme_state = SME_DISCONNECTED;
574 724
575 return 0; 725 return 0;
576} 726}
577 727
578void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, 728void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
579 u8 *bssid, u8 assoc_resp_len, 729 u8 *bssid, u8 assoc_resp_len,
580 u8 *assoc_info, u16 proto_reason) 730 u8 *assoc_info, u16 proto_reason)
581{ 731{
582 if (ar->scan_req) { 732 struct ath6kl *ar = vif->ar;
583 cfg80211_scan_done(ar->scan_req, true); 733
584 ar->scan_req = NULL; 734 if (vif->scan_req) {
735 cfg80211_scan_done(vif->scan_req, true);
736 vif->scan_req = NULL;
585 } 737 }
586 738
587 if (ar->nw_type & ADHOC_NETWORK) { 739 if (vif->nw_type & ADHOC_NETWORK) {
588 if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) { 740 if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
589 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 741 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
590 "%s: ath6k not in ibss mode\n", __func__); 742 "%s: ath6k not in ibss mode\n", __func__);
591 return; 743 return;
592 } 744 }
593 memset(bssid, 0, ETH_ALEN); 745 memset(bssid, 0, ETH_ALEN);
594 cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL); 746 cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
595 return; 747 return;
596 } 748 }
597 749
598 if (ar->nw_type & INFRA_NETWORK) { 750 if (vif->nw_type & INFRA_NETWORK) {
599 if (ar->wdev->iftype != NL80211_IFTYPE_STATION && 751 if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
600 ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) { 752 vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
601 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 753 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
602 "%s: ath6k not in station mode\n", __func__); 754 "%s: ath6k not in station mode\n", __func__);
603 return; 755 return;
@@ -614,42 +766,44 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
614 */ 766 */
615 767
616 if (reason != DISCONNECT_CMD) { 768 if (reason != DISCONNECT_CMD) {
617 ath6kl_wmi_disconnect_cmd(ar->wmi); 769 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
618 return; 770 return;
619 } 771 }
620 772
621 clear_bit(CONNECT_PEND, &ar->flag); 773 clear_bit(CONNECT_PEND, &vif->flags);
622 774
623 if (ar->sme_state == SME_CONNECTING) { 775 if (vif->sme_state == SME_CONNECTING) {
624 cfg80211_connect_result(ar->net_dev, 776 cfg80211_connect_result(vif->ndev,
625 bssid, NULL, 0, 777 bssid, NULL, 0,
626 NULL, 0, 778 NULL, 0,
627 WLAN_STATUS_UNSPECIFIED_FAILURE, 779 WLAN_STATUS_UNSPECIFIED_FAILURE,
628 GFP_KERNEL); 780 GFP_KERNEL);
629 } else if (ar->sme_state == SME_CONNECTED) { 781 } else if (vif->sme_state == SME_CONNECTED) {
630 cfg80211_disconnected(ar->net_dev, reason, 782 cfg80211_disconnected(vif->ndev, reason,
631 NULL, 0, GFP_KERNEL); 783 NULL, 0, GFP_KERNEL);
632 } 784 }
633 785
634 ar->sme_state = SME_DISCONNECTED; 786 vif->sme_state = SME_DISCONNECTED;
635} 787}
636 788
637static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, 789static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
638 struct cfg80211_scan_request *request) 790 struct cfg80211_scan_request *request)
639{ 791{
640 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); 792 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
793 struct ath6kl_vif *vif = netdev_priv(ndev);
641 s8 n_channels = 0; 794 s8 n_channels = 0;
642 u16 *channels = NULL; 795 u16 *channels = NULL;
643 int ret = 0; 796 int ret = 0;
797 u32 force_fg_scan = 0;
644 798
645 if (!ath6kl_cfg80211_ready(ar)) 799 if (!ath6kl_cfg80211_ready(vif))
646 return -EIO; 800 return -EIO;
647 801
648 if (!ar->usr_bss_filter) { 802 if (!ar->usr_bss_filter) {
649 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); 803 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
650 ret = ath6kl_wmi_bssfilter_cmd( 804 ret = ath6kl_wmi_bssfilter_cmd(
651 ar->wmi, 805 ar->wmi, vif->fw_vif_idx,
652 (test_bit(CONNECTED, &ar->flag) ? 806 (test_bit(CONNECTED, &vif->flags) ?
653 ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0); 807 ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
654 if (ret) { 808 if (ret) {
655 ath6kl_err("couldn't set bss filtering\n"); 809 ath6kl_err("couldn't set bss filtering\n");
@@ -664,14 +818,15 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
664 request->n_ssids = MAX_PROBED_SSID_INDEX - 1; 818 request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
665 819
666 for (i = 0; i < request->n_ssids; i++) 820 for (i = 0; i < request->n_ssids; i++)
667 ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1, 821 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
668 SPECIFIC_SSID_FLAG, 822 i + 1, SPECIFIC_SSID_FLAG,
669 request->ssids[i].ssid_len, 823 request->ssids[i].ssid_len,
670 request->ssids[i].ssid); 824 request->ssids[i].ssid);
671 } 825 }
672 826
673 if (request->ie) { 827 if (request->ie) {
674 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ, 828 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
829 WMI_FRAME_PROBE_REQ,
675 request->ie, request->ie_len); 830 request->ie, request->ie_len);
676 if (ret) { 831 if (ret) {
677 ath6kl_err("failed to set Probe Request appie for " 832 ath6kl_err("failed to set Probe Request appie for "
@@ -702,44 +857,47 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
702 channels[i] = request->channels[i]->center_freq; 857 channels[i] = request->channels[i]->center_freq;
703 } 858 }
704 859
705 ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0, 860 if (test_bit(CONNECTED, &vif->flags))
706 false, 0, 0, n_channels, channels); 861 force_fg_scan = 1;
862
863 ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx, WMI_LONG_SCAN,
864 force_fg_scan, false, 0, 0, n_channels,
865 channels);
707 if (ret) 866 if (ret)
708 ath6kl_err("wmi_startscan_cmd failed\n"); 867 ath6kl_err("wmi_startscan_cmd failed\n");
709 else 868 else
710 ar->scan_req = request; 869 vif->scan_req = request;
711 870
712 kfree(channels); 871 kfree(channels);
713 872
714 return ret; 873 return ret;
715} 874}
716 875
717void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status) 876void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
718{ 877{
878 struct ath6kl *ar = vif->ar;
719 int i; 879 int i;
720 880
721 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status); 881 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
882 aborted ? " aborted" : "");
722 883
723 if (!ar->scan_req) 884 if (!vif->scan_req)
724 return; 885 return;
725 886
726 if ((status == -ECANCELED) || (status == -EBUSY)) { 887 if (aborted)
727 cfg80211_scan_done(ar->scan_req, true);
728 goto out; 888 goto out;
729 }
730
731 cfg80211_scan_done(ar->scan_req, false);
732 889
733 if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) { 890 if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
734 for (i = 0; i < ar->scan_req->n_ssids; i++) { 891 for (i = 0; i < vif->scan_req->n_ssids; i++) {
735 ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1, 892 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
736 DISABLE_SSID_FLAG, 893 i + 1, DISABLE_SSID_FLAG,
737 0, NULL); 894 0, NULL);
738 } 895 }
739 } 896 }
740 897
741out: 898out:
742 ar->scan_req = NULL; 899 cfg80211_scan_done(vif->scan_req, aborted);
900 vif->scan_req = NULL;
743} 901}
744 902
745static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, 903static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -748,14 +906,21 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
748 struct key_params *params) 906 struct key_params *params)
749{ 907{
750 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); 908 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
909 struct ath6kl_vif *vif = netdev_priv(ndev);
751 struct ath6kl_key *key = NULL; 910 struct ath6kl_key *key = NULL;
752 u8 key_usage; 911 u8 key_usage;
753 u8 key_type; 912 u8 key_type;
754 int status = 0;
755 913
756 if (!ath6kl_cfg80211_ready(ar)) 914 if (!ath6kl_cfg80211_ready(vif))
757 return -EIO; 915 return -EIO;
758 916
917 if (params->cipher == CCKM_KRK_CIPHER_SUITE) {
918 if (params->key_len != WMI_KRK_LEN)
919 return -EINVAL;
920 return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx,
921 params->key);
922 }
923
759 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { 924 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
760 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 925 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
761 "%s: key index %d out of bounds\n", __func__, 926 "%s: key index %d out of bounds\n", __func__,
@@ -763,7 +928,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
763 return -ENOENT; 928 return -ENOENT;
764 } 929 }
765 930
766 key = &ar->keys[key_index]; 931 key = &vif->keys[key_index];
767 memset(key, 0, sizeof(struct ath6kl_key)); 932 memset(key, 0, sizeof(struct ath6kl_key));
768 933
769 if (pairwise) 934 if (pairwise)
@@ -801,26 +966,26 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
801 return -ENOTSUPP; 966 return -ENOTSUPP;
802 } 967 }
803 968
804 if (((ar->auth_mode == WPA_PSK_AUTH) 969 if (((vif->auth_mode == WPA_PSK_AUTH)
805 || (ar->auth_mode == WPA2_PSK_AUTH)) 970 || (vif->auth_mode == WPA2_PSK_AUTH))
806 && (key_usage & GROUP_USAGE)) 971 && (key_usage & GROUP_USAGE))
807 del_timer(&ar->disconnect_timer); 972 del_timer(&vif->disconnect_timer);
808 973
809 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 974 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
810 "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n", 975 "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
811 __func__, key_index, key->key_len, key_type, 976 __func__, key_index, key->key_len, key_type,
812 key_usage, key->seq_len); 977 key_usage, key->seq_len);
813 978
814 ar->def_txkey_index = key_index; 979 vif->def_txkey_index = key_index;
815 980
816 if (ar->nw_type == AP_NETWORK && !pairwise && 981 if (vif->nw_type == AP_NETWORK && !pairwise &&
817 (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) { 982 (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
818 ar->ap_mode_bkey.valid = true; 983 ar->ap_mode_bkey.valid = true;
819 ar->ap_mode_bkey.key_index = key_index; 984 ar->ap_mode_bkey.key_index = key_index;
820 ar->ap_mode_bkey.key_type = key_type; 985 ar->ap_mode_bkey.key_type = key_type;
821 ar->ap_mode_bkey.key_len = key->key_len; 986 ar->ap_mode_bkey.key_len = key->key_len;
822 memcpy(ar->ap_mode_bkey.key, key->key, key->key_len); 987 memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
823 if (!test_bit(CONNECTED, &ar->flag)) { 988 if (!test_bit(CONNECTED, &vif->flags)) {
824 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group " 989 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
825 "key configuration until AP mode has been " 990 "key configuration until AP mode has been "
826 "started\n"); 991 "started\n");
@@ -832,8 +997,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
832 } 997 }
833 } 998 }
834 999
835 if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT && 1000 if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
836 !test_bit(CONNECTED, &ar->flag)) { 1001 !test_bit(CONNECTED, &vif->flags)) {
837 /* 1002 /*
838 * Store the key locally so that it can be re-configured after 1003 * Store the key locally so that it can be re-configured after
839 * the AP mode has properly started 1004 * the AP mode has properly started
@@ -841,20 +1006,18 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
841 */ 1006 */
842 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration " 1007 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
843 "until AP mode has been started\n"); 1008 "until AP mode has been started\n");
844 ar->wep_key_list[key_index].key_len = key->key_len; 1009 vif->wep_key_list[key_index].key_len = key->key_len;
845 memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len); 1010 memcpy(vif->wep_key_list[key_index].key, key->key,
1011 key->key_len);
846 return 0; 1012 return 0;
847 } 1013 }
848 1014
849 status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index, 1015 return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
850 key_type, key_usage, key->key_len, 1016 vif->def_txkey_index,
851 key->seq, key->key, KEY_OP_INIT_VAL, 1017 key_type, key_usage, key->key_len,
852 (u8 *) mac_addr, SYNC_BOTH_WMIFLAG); 1018 key->seq, key->seq_len, key->key,
853 1019 KEY_OP_INIT_VAL,
854 if (status) 1020 (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
855 return -EIO;
856
857 return 0;
858} 1021}
859 1022
860static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 1023static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -862,10 +1025,11 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
862 const u8 *mac_addr) 1025 const u8 *mac_addr)
863{ 1026{
864 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); 1027 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
1028 struct ath6kl_vif *vif = netdev_priv(ndev);
865 1029
866 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); 1030 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
867 1031
868 if (!ath6kl_cfg80211_ready(ar)) 1032 if (!ath6kl_cfg80211_ready(vif))
869 return -EIO; 1033 return -EIO;
870 1034
871 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { 1035 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -875,15 +1039,15 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
875 return -ENOENT; 1039 return -ENOENT;
876 } 1040 }
877 1041
878 if (!ar->keys[key_index].key_len) { 1042 if (!vif->keys[key_index].key_len) {
879 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 1043 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
880 "%s: index %d is empty\n", __func__, key_index); 1044 "%s: index %d is empty\n", __func__, key_index);
881 return 0; 1045 return 0;
882 } 1046 }
883 1047
884 ar->keys[key_index].key_len = 0; 1048 vif->keys[key_index].key_len = 0;
885 1049
886 return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index); 1050 return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index);
887} 1051}
888 1052
889static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, 1053static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -892,13 +1056,13 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
892 void (*callback) (void *cookie, 1056 void (*callback) (void *cookie,
893 struct key_params *)) 1057 struct key_params *))
894{ 1058{
895 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); 1059 struct ath6kl_vif *vif = netdev_priv(ndev);
896 struct ath6kl_key *key = NULL; 1060 struct ath6kl_key *key = NULL;
897 struct key_params params; 1061 struct key_params params;
898 1062
899 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); 1063 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
900 1064
901 if (!ath6kl_cfg80211_ready(ar)) 1065 if (!ath6kl_cfg80211_ready(vif))
902 return -EIO; 1066 return -EIO;
903 1067
904 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { 1068 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -908,7 +1072,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
908 return -ENOENT; 1072 return -ENOENT;
909 } 1073 }
910 1074
911 key = &ar->keys[key_index]; 1075 key = &vif->keys[key_index];
912 memset(&params, 0, sizeof(params)); 1076 memset(&params, 0, sizeof(params));
913 params.cipher = key->cipher; 1077 params.cipher = key->cipher;
914 params.key_len = key->key_len; 1078 params.key_len = key->key_len;
@@ -927,14 +1091,14 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
927 bool multicast) 1091 bool multicast)
928{ 1092{
929 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); 1093 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
1094 struct ath6kl_vif *vif = netdev_priv(ndev);
930 struct ath6kl_key *key = NULL; 1095 struct ath6kl_key *key = NULL;
931 int status = 0;
932 u8 key_usage; 1096 u8 key_usage;
933 enum crypto_type key_type = NONE_CRYPT; 1097 enum crypto_type key_type = NONE_CRYPT;
934 1098
935 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); 1099 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
936 1100
937 if (!ath6kl_cfg80211_ready(ar)) 1101 if (!ath6kl_cfg80211_ready(vif))
938 return -EIO; 1102 return -EIO;
939 1103
940 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { 1104 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -944,43 +1108,41 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
944 return -ENOENT; 1108 return -ENOENT;
945 } 1109 }
946 1110
947 if (!ar->keys[key_index].key_len) { 1111 if (!vif->keys[key_index].key_len) {
948 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n", 1112 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
949 __func__, key_index); 1113 __func__, key_index);
950 return -EINVAL; 1114 return -EINVAL;
951 } 1115 }
952 1116
953 ar->def_txkey_index = key_index; 1117 vif->def_txkey_index = key_index;
954 key = &ar->keys[ar->def_txkey_index]; 1118 key = &vif->keys[vif->def_txkey_index];
955 key_usage = GROUP_USAGE; 1119 key_usage = GROUP_USAGE;
956 if (ar->prwise_crypto == WEP_CRYPT) 1120 if (vif->prwise_crypto == WEP_CRYPT)
957 key_usage |= TX_USAGE; 1121 key_usage |= TX_USAGE;
958 if (unicast) 1122 if (unicast)
959 key_type = ar->prwise_crypto; 1123 key_type = vif->prwise_crypto;
960 if (multicast) 1124 if (multicast)
961 key_type = ar->grp_crypto; 1125 key_type = vif->grp_crypto;
962 1126
963 if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag)) 1127 if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags))
964 return 0; /* Delay until AP mode has been started */ 1128 return 0; /* Delay until AP mode has been started */
965 1129
966 status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index, 1130 return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
967 key_type, key_usage, 1131 vif->def_txkey_index,
968 key->key_len, key->seq, key->key, 1132 key_type, key_usage,
969 KEY_OP_INIT_VAL, NULL, 1133 key->key_len, key->seq, key->seq_len,
970 SYNC_BOTH_WMIFLAG); 1134 key->key,
971 if (status) 1135 KEY_OP_INIT_VAL, NULL,
972 return -EIO; 1136 SYNC_BOTH_WMIFLAG);
973
974 return 0;
975} 1137}
976 1138
977void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, 1139void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
978 bool ismcast) 1140 bool ismcast)
979{ 1141{
980 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 1142 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
981 "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast); 1143 "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
982 1144
983 cfg80211_michael_mic_failure(ar->net_dev, ar->bssid, 1145 cfg80211_michael_mic_failure(vif->ndev, vif->bssid,
984 (ismcast ? NL80211_KEYTYPE_GROUP : 1146 (ismcast ? NL80211_KEYTYPE_GROUP :
985 NL80211_KEYTYPE_PAIRWISE), keyid, NULL, 1147 NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
986 GFP_KERNEL); 1148 GFP_KERNEL);
@@ -989,12 +1151,17 @@ void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
989static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) 1151static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
990{ 1152{
991 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); 1153 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1154 struct ath6kl_vif *vif;
992 int ret; 1155 int ret;
993 1156
994 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__, 1157 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
995 changed); 1158 changed);
996 1159
997 if (!ath6kl_cfg80211_ready(ar)) 1160 vif = ath6kl_vif_first(ar);
1161 if (!vif)
1162 return -EIO;
1163
1164 if (!ath6kl_cfg80211_ready(vif))
998 return -EIO; 1165 return -EIO;
999 1166
1000 if (changed & WIPHY_PARAM_RTS_THRESHOLD) { 1167 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -1017,12 +1184,17 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
1017 int dbm) 1184 int dbm)
1018{ 1185{
1019 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); 1186 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1187 struct ath6kl_vif *vif;
1020 u8 ath6kl_dbm; 1188 u8 ath6kl_dbm;
1021 1189
1022 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__, 1190 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
1023 type, dbm); 1191 type, dbm);
1024 1192
1025 if (!ath6kl_cfg80211_ready(ar)) 1193 vif = ath6kl_vif_first(ar);
1194 if (!vif)
1195 return -EIO;
1196
1197 if (!ath6kl_cfg80211_ready(vif))
1026 return -EIO; 1198 return -EIO;
1027 1199
1028 switch (type) { 1200 switch (type) {
@@ -1037,7 +1209,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
1037 return -EOPNOTSUPP; 1209 return -EOPNOTSUPP;
1038 } 1210 }
1039 1211
1040 ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm); 1212 ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm);
1041 1213
1042 return 0; 1214 return 0;
1043} 1215}
@@ -1045,14 +1217,19 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
1045static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm) 1217static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
1046{ 1218{
1047 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); 1219 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1220 struct ath6kl_vif *vif;
1048 1221
1049 if (!ath6kl_cfg80211_ready(ar)) 1222 vif = ath6kl_vif_first(ar);
1223 if (!vif)
1050 return -EIO; 1224 return -EIO;
1051 1225
1052 if (test_bit(CONNECTED, &ar->flag)) { 1226 if (!ath6kl_cfg80211_ready(vif))
1227 return -EIO;
1228
1229 if (test_bit(CONNECTED, &vif->flags)) {
1053 ar->tx_pwr = 0; 1230 ar->tx_pwr = 0;
1054 1231
1055 if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) { 1232 if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
1056 ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n"); 1233 ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
1057 return -EIO; 1234 return -EIO;
1058 } 1235 }
@@ -1076,11 +1253,12 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
1076{ 1253{
1077 struct ath6kl *ar = ath6kl_priv(dev); 1254 struct ath6kl *ar = ath6kl_priv(dev);
1078 struct wmi_power_mode_cmd mode; 1255 struct wmi_power_mode_cmd mode;
1256 struct ath6kl_vif *vif = netdev_priv(dev);
1079 1257
1080 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n", 1258 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
1081 __func__, pmgmt, timeout); 1259 __func__, pmgmt, timeout);
1082 1260
1083 if (!ath6kl_cfg80211_ready(ar)) 1261 if (!ath6kl_cfg80211_ready(vif))
1084 return -EIO; 1262 return -EIO;
1085 1263
1086 if (pmgmt) { 1264 if (pmgmt) {
@@ -1091,7 +1269,8 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
1091 mode.pwr_mode = MAX_PERF_POWER; 1269 mode.pwr_mode = MAX_PERF_POWER;
1092 } 1270 }
1093 1271
1094 if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) { 1272 if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
1273 mode.pwr_mode) != 0) {
1095 ath6kl_err("wmi_powermode_cmd failed\n"); 1274 ath6kl_err("wmi_powermode_cmd failed\n");
1096 return -EIO; 1275 return -EIO;
1097 } 1276 }
@@ -1099,41 +1278,86 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
1099 return 0; 1278 return 0;
1100} 1279}
1101 1280
1281static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
1282 char *name,
1283 enum nl80211_iftype type,
1284 u32 *flags,
1285 struct vif_params *params)
1286{
1287 struct ath6kl *ar = wiphy_priv(wiphy);
1288 struct net_device *ndev;
1289 u8 if_idx, nw_type;
1290
1291 if (ar->num_vif == MAX_NUM_VIF) {
1292 ath6kl_err("Reached maximum number of supported vif\n");
1293 return ERR_PTR(-EINVAL);
1294 }
1295
1296 if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) {
1297 ath6kl_err("Not a supported interface type\n");
1298 return ERR_PTR(-EINVAL);
1299 }
1300
1301 ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
1302 if (!ndev)
1303 return ERR_PTR(-ENOMEM);
1304
1305 ar->num_vif++;
1306
1307 return ndev;
1308}
1309
1310static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
1311 struct net_device *ndev)
1312{
1313 struct ath6kl *ar = wiphy_priv(wiphy);
1314 struct ath6kl_vif *vif = netdev_priv(ndev);
1315
1316 spin_lock_bh(&ar->list_lock);
1317 list_del(&vif->list);
1318 spin_unlock_bh(&ar->list_lock);
1319
1320 ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
1321
1322 ath6kl_deinit_if_data(vif);
1323
1324 return 0;
1325}
1326
1102static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy, 1327static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
1103 struct net_device *ndev, 1328 struct net_device *ndev,
1104 enum nl80211_iftype type, u32 *flags, 1329 enum nl80211_iftype type, u32 *flags,
1105 struct vif_params *params) 1330 struct vif_params *params)
1106{ 1331{
1107 struct ath6kl *ar = ath6kl_priv(ndev); 1332 struct ath6kl_vif *vif = netdev_priv(ndev);
1108 struct wireless_dev *wdev = ar->wdev;
1109 1333
1110 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type); 1334 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
1111 1335
1112 if (!ath6kl_cfg80211_ready(ar)) 1336 if (!ath6kl_cfg80211_ready(vif))
1113 return -EIO; 1337 return -EIO;
1114 1338
1115 switch (type) { 1339 switch (type) {
1116 case NL80211_IFTYPE_STATION: 1340 case NL80211_IFTYPE_STATION:
1117 ar->next_mode = INFRA_NETWORK; 1341 vif->next_mode = INFRA_NETWORK;
1118 break; 1342 break;
1119 case NL80211_IFTYPE_ADHOC: 1343 case NL80211_IFTYPE_ADHOC:
1120 ar->next_mode = ADHOC_NETWORK; 1344 vif->next_mode = ADHOC_NETWORK;
1121 break; 1345 break;
1122 case NL80211_IFTYPE_AP: 1346 case NL80211_IFTYPE_AP:
1123 ar->next_mode = AP_NETWORK; 1347 vif->next_mode = AP_NETWORK;
1124 break; 1348 break;
1125 case NL80211_IFTYPE_P2P_CLIENT: 1349 case NL80211_IFTYPE_P2P_CLIENT:
1126 ar->next_mode = INFRA_NETWORK; 1350 vif->next_mode = INFRA_NETWORK;
1127 break; 1351 break;
1128 case NL80211_IFTYPE_P2P_GO: 1352 case NL80211_IFTYPE_P2P_GO:
1129 ar->next_mode = AP_NETWORK; 1353 vif->next_mode = AP_NETWORK;
1130 break; 1354 break;
1131 default: 1355 default:
1132 ath6kl_err("invalid interface type %u\n", type); 1356 ath6kl_err("invalid interface type %u\n", type);
1133 return -EOPNOTSUPP; 1357 return -EOPNOTSUPP;
1134 } 1358 }
1135 1359
1136 wdev->iftype = type; 1360 vif->wdev.iftype = type;
1137 1361
1138 return 0; 1362 return 0;
1139} 1363}
@@ -1143,16 +1367,17 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
1143 struct cfg80211_ibss_params *ibss_param) 1367 struct cfg80211_ibss_params *ibss_param)
1144{ 1368{
1145 struct ath6kl *ar = ath6kl_priv(dev); 1369 struct ath6kl *ar = ath6kl_priv(dev);
1370 struct ath6kl_vif *vif = netdev_priv(dev);
1146 int status; 1371 int status;
1147 1372
1148 if (!ath6kl_cfg80211_ready(ar)) 1373 if (!ath6kl_cfg80211_ready(vif))
1149 return -EIO; 1374 return -EIO;
1150 1375
1151 ar->ssid_len = ibss_param->ssid_len; 1376 vif->ssid_len = ibss_param->ssid_len;
1152 memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len); 1377 memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
1153 1378
1154 if (ibss_param->channel) 1379 if (ibss_param->channel)
1155 ar->ch_hint = ibss_param->channel->center_freq; 1380 vif->ch_hint = ibss_param->channel->center_freq;
1156 1381
1157 if (ibss_param->channel_fixed) { 1382 if (ibss_param->channel_fixed) {
1158 /* 1383 /*
@@ -1164,44 +1389,45 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
1164 return -EOPNOTSUPP; 1389 return -EOPNOTSUPP;
1165 } 1390 }
1166 1391
1167 memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); 1392 memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
1168 if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid)) 1393 if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
1169 memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid)); 1394 memcpy(vif->req_bssid, ibss_param->bssid,
1395 sizeof(vif->req_bssid));
1170 1396
1171 ath6kl_set_wpa_version(ar, 0); 1397 ath6kl_set_wpa_version(vif, 0);
1172 1398
1173 status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM); 1399 status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM);
1174 if (status) 1400 if (status)
1175 return status; 1401 return status;
1176 1402
1177 if (ibss_param->privacy) { 1403 if (ibss_param->privacy) {
1178 ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true); 1404 ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true);
1179 ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false); 1405 ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false);
1180 } else { 1406 } else {
1181 ath6kl_set_cipher(ar, 0, true); 1407 ath6kl_set_cipher(vif, 0, true);
1182 ath6kl_set_cipher(ar, 0, false); 1408 ath6kl_set_cipher(vif, 0, false);
1183 } 1409 }
1184 1410
1185 ar->nw_type = ar->next_mode; 1411 vif->nw_type = vif->next_mode;
1186 1412
1187 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 1413 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
1188 "%s: connect called with authmode %d dot11 auth %d" 1414 "%s: connect called with authmode %d dot11 auth %d"
1189 " PW crypto %d PW crypto len %d GRP crypto %d" 1415 " PW crypto %d PW crypto len %d GRP crypto %d"
1190 " GRP crypto len %d channel hint %u\n", 1416 " GRP crypto len %d channel hint %u\n",
1191 __func__, 1417 __func__,
1192 ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, 1418 vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
1193 ar->prwise_crypto_len, ar->grp_crypto, 1419 vif->prwise_crypto_len, vif->grp_crypto,
1194 ar->grp_crypto_len, ar->ch_hint); 1420 vif->grp_crypto_len, vif->ch_hint);
1195 1421
1196 status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, 1422 status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
1197 ar->dot11_auth_mode, ar->auth_mode, 1423 vif->dot11_auth_mode, vif->auth_mode,
1198 ar->prwise_crypto, 1424 vif->prwise_crypto,
1199 ar->prwise_crypto_len, 1425 vif->prwise_crypto_len,
1200 ar->grp_crypto, ar->grp_crypto_len, 1426 vif->grp_crypto, vif->grp_crypto_len,
1201 ar->ssid_len, ar->ssid, 1427 vif->ssid_len, vif->ssid,
1202 ar->req_bssid, ar->ch_hint, 1428 vif->req_bssid, vif->ch_hint,
1203 ar->connect_ctrl_flags); 1429 ar->connect_ctrl_flags);
1204 set_bit(CONNECT_PEND, &ar->flag); 1430 set_bit(CONNECT_PEND, &vif->flags);
1205 1431
1206 return 0; 1432 return 0;
1207} 1433}
@@ -1209,14 +1435,14 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
1209static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy, 1435static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
1210 struct net_device *dev) 1436 struct net_device *dev)
1211{ 1437{
1212 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev); 1438 struct ath6kl_vif *vif = netdev_priv(dev);
1213 1439
1214 if (!ath6kl_cfg80211_ready(ar)) 1440 if (!ath6kl_cfg80211_ready(vif))
1215 return -EIO; 1441 return -EIO;
1216 1442
1217 ath6kl_disconnect(ar); 1443 ath6kl_disconnect(vif);
1218 memset(ar->ssid, 0, sizeof(ar->ssid)); 1444 memset(vif->ssid, 0, sizeof(vif->ssid));
1219 ar->ssid_len = 0; 1445 vif->ssid_len = 0;
1220 1446
1221 return 0; 1447 return 0;
1222} 1448}
@@ -1226,6 +1452,7 @@ static const u32 cipher_suites[] = {
1226 WLAN_CIPHER_SUITE_WEP104, 1452 WLAN_CIPHER_SUITE_WEP104,
1227 WLAN_CIPHER_SUITE_TKIP, 1453 WLAN_CIPHER_SUITE_TKIP,
1228 WLAN_CIPHER_SUITE_CCMP, 1454 WLAN_CIPHER_SUITE_CCMP,
1455 CCKM_KRK_CIPHER_SUITE,
1229}; 1456};
1230 1457
1231static bool is_rate_legacy(s32 rate) 1458static bool is_rate_legacy(s32 rate)
@@ -1293,21 +1520,22 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1293 u8 *mac, struct station_info *sinfo) 1520 u8 *mac, struct station_info *sinfo)
1294{ 1521{
1295 struct ath6kl *ar = ath6kl_priv(dev); 1522 struct ath6kl *ar = ath6kl_priv(dev);
1523 struct ath6kl_vif *vif = netdev_priv(dev);
1296 long left; 1524 long left;
1297 bool sgi; 1525 bool sgi;
1298 s32 rate; 1526 s32 rate;
1299 int ret; 1527 int ret;
1300 u8 mcs; 1528 u8 mcs;
1301 1529
1302 if (memcmp(mac, ar->bssid, ETH_ALEN) != 0) 1530 if (memcmp(mac, vif->bssid, ETH_ALEN) != 0)
1303 return -ENOENT; 1531 return -ENOENT;
1304 1532
1305 if (down_interruptible(&ar->sem)) 1533 if (down_interruptible(&ar->sem))
1306 return -EBUSY; 1534 return -EBUSY;
1307 1535
1308 set_bit(STATS_UPDATE_PEND, &ar->flag); 1536 set_bit(STATS_UPDATE_PEND, &vif->flags);
1309 1537
1310 ret = ath6kl_wmi_get_stats_cmd(ar->wmi); 1538 ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx);
1311 1539
1312 if (ret != 0) { 1540 if (ret != 0) {
1313 up(&ar->sem); 1541 up(&ar->sem);
@@ -1316,7 +1544,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1316 1544
1317 left = wait_event_interruptible_timeout(ar->event_wq, 1545 left = wait_event_interruptible_timeout(ar->event_wq,
1318 !test_bit(STATS_UPDATE_PEND, 1546 !test_bit(STATS_UPDATE_PEND,
1319 &ar->flag), 1547 &vif->flags),
1320 WMI_TIMEOUT); 1548 WMI_TIMEOUT);
1321 1549
1322 up(&ar->sem); 1550 up(&ar->sem);
@@ -1326,24 +1554,24 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1326 else if (left < 0) 1554 else if (left < 0)
1327 return left; 1555 return left;
1328 1556
1329 if (ar->target_stats.rx_byte) { 1557 if (vif->target_stats.rx_byte) {
1330 sinfo->rx_bytes = ar->target_stats.rx_byte; 1558 sinfo->rx_bytes = vif->target_stats.rx_byte;
1331 sinfo->filled |= STATION_INFO_RX_BYTES; 1559 sinfo->filled |= STATION_INFO_RX_BYTES;
1332 sinfo->rx_packets = ar->target_stats.rx_pkt; 1560 sinfo->rx_packets = vif->target_stats.rx_pkt;
1333 sinfo->filled |= STATION_INFO_RX_PACKETS; 1561 sinfo->filled |= STATION_INFO_RX_PACKETS;
1334 } 1562 }
1335 1563
1336 if (ar->target_stats.tx_byte) { 1564 if (vif->target_stats.tx_byte) {
1337 sinfo->tx_bytes = ar->target_stats.tx_byte; 1565 sinfo->tx_bytes = vif->target_stats.tx_byte;
1338 sinfo->filled |= STATION_INFO_TX_BYTES; 1566 sinfo->filled |= STATION_INFO_TX_BYTES;
1339 sinfo->tx_packets = ar->target_stats.tx_pkt; 1567 sinfo->tx_packets = vif->target_stats.tx_pkt;
1340 sinfo->filled |= STATION_INFO_TX_PACKETS; 1568 sinfo->filled |= STATION_INFO_TX_PACKETS;
1341 } 1569 }
1342 1570
1343 sinfo->signal = ar->target_stats.cs_rssi; 1571 sinfo->signal = vif->target_stats.cs_rssi;
1344 sinfo->filled |= STATION_INFO_SIGNAL; 1572 sinfo->filled |= STATION_INFO_SIGNAL;
1345 1573
1346 rate = ar->target_stats.tx_ucast_rate; 1574 rate = vif->target_stats.tx_ucast_rate;
1347 1575
1348 if (is_rate_legacy(rate)) { 1576 if (is_rate_legacy(rate)) {
1349 sinfo->txrate.legacy = rate / 100; 1577 sinfo->txrate.legacy = rate / 100;
@@ -1375,13 +1603,13 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1375 1603
1376 sinfo->filled |= STATION_INFO_TX_BITRATE; 1604 sinfo->filled |= STATION_INFO_TX_BITRATE;
1377 1605
1378 if (test_bit(CONNECTED, &ar->flag) && 1606 if (test_bit(CONNECTED, &vif->flags) &&
1379 test_bit(DTIM_PERIOD_AVAIL, &ar->flag) && 1607 test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
1380 ar->nw_type == INFRA_NETWORK) { 1608 vif->nw_type == INFRA_NETWORK) {
1381 sinfo->filled |= STATION_INFO_BSS_PARAM; 1609 sinfo->filled |= STATION_INFO_BSS_PARAM;
1382 sinfo->bss_param.flags = 0; 1610 sinfo->bss_param.flags = 0;
1383 sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period; 1611 sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
1384 sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int; 1612 sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
1385 } 1613 }
1386 1614
1387 return 0; 1615 return 0;
@@ -1391,7 +1619,9 @@ static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
1391 struct cfg80211_pmksa *pmksa) 1619 struct cfg80211_pmksa *pmksa)
1392{ 1620{
1393 struct ath6kl *ar = ath6kl_priv(netdev); 1621 struct ath6kl *ar = ath6kl_priv(netdev);
1394 return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid, 1622 struct ath6kl_vif *vif = netdev_priv(netdev);
1623
1624 return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
1395 pmksa->pmkid, true); 1625 pmksa->pmkid, true);
1396} 1626}
1397 1627
@@ -1399,25 +1629,292 @@ static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
1399 struct cfg80211_pmksa *pmksa) 1629 struct cfg80211_pmksa *pmksa)
1400{ 1630{
1401 struct ath6kl *ar = ath6kl_priv(netdev); 1631 struct ath6kl *ar = ath6kl_priv(netdev);
1402 return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid, 1632 struct ath6kl_vif *vif = netdev_priv(netdev);
1633
1634 return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
1403 pmksa->pmkid, false); 1635 pmksa->pmkid, false);
1404} 1636}
1405 1637
1406static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) 1638static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
1407{ 1639{
1408 struct ath6kl *ar = ath6kl_priv(netdev); 1640 struct ath6kl *ar = ath6kl_priv(netdev);
1409 if (test_bit(CONNECTED, &ar->flag)) 1641 struct ath6kl_vif *vif = netdev_priv(netdev);
1410 return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false); 1642
1643 if (test_bit(CONNECTED, &vif->flags))
1644 return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx,
1645 vif->bssid, NULL, false);
1646 return 0;
1647}
1648
1649static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
1650{
1651 struct ath6kl_vif *vif;
1652 int ret, pos, left;
1653 u32 filter = 0;
1654 u16 i;
1655 u8 mask[WOW_MASK_SIZE];
1656
1657 vif = ath6kl_vif_first(ar);
1658 if (!vif)
1659 return -EIO;
1660
1661 if (!ath6kl_cfg80211_ready(vif))
1662 return -EIO;
1663
1664 if (!test_bit(CONNECTED, &vif->flags))
1665 return -EINVAL;
1666
1667 /* Clear existing WOW patterns */
1668 for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
1669 ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
1670 WOW_LIST_ID, i);
1671 /* Configure new WOW patterns */
1672 for (i = 0; i < wow->n_patterns; i++) {
1673
1674 /*
1675 * Convert given nl80211 specific mask value to equivalent
1676 * driver specific mask value and send it to the chip along
1677 * with patterns. For example, If the mask value defined in
1678 * struct cfg80211_wowlan is 0xA (equivalent binary is 1010),
1679 * then equivalent driver specific mask value is
1680 * "0xFF 0x00 0xFF 0x00".
1681 */
1682 memset(&mask, 0, sizeof(mask));
1683 for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) {
1684 if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8)))
1685 mask[pos] = 0xFF;
1686 }
1687 /*
1688 * Note: Pattern's offset is not passed as part of wowlan
1689 * parameter from CFG layer. So it's always passed as ZERO
1690 * to the firmware. It means, given WOW patterns are always
1691 * matched from the first byte of received pkt in the firmware.
1692 */
1693 ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
1694 vif->fw_vif_idx, WOW_LIST_ID,
1695 wow->patterns[i].pattern_len,
1696 0 /* pattern offset */,
1697 wow->patterns[i].pattern, mask);
1698 if (ret)
1699 return ret;
1700 }
1701
1702 if (wow->disconnect)
1703 filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
1704
1705 if (wow->magic_pkt)
1706 filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
1707
1708 if (wow->gtk_rekey_failure)
1709 filter |= WOW_FILTER_OPTION_GTK_ERROR;
1710
1711 if (wow->eap_identity_req)
1712 filter |= WOW_FILTER_OPTION_EAP_REQ;
1713
1714 if (wow->four_way_handshake)
1715 filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
1716
1717 ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
1718 ATH6KL_WOW_MODE_ENABLE,
1719 filter,
1720 WOW_HOST_REQ_DELAY);
1721 if (ret)
1722 return ret;
1723
1724 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
1725 ATH6KL_HOST_MODE_ASLEEP);
1726 if (ret)
1727 return ret;
1728
1729 if (ar->tx_pending[ar->ctrl_ep]) {
1730 left = wait_event_interruptible_timeout(ar->event_wq,
1731 ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
1732 if (left == 0) {
1733 ath6kl_warn("clear wmi ctrl data timeout\n");
1734 ret = -ETIMEDOUT;
1735 } else if (left < 0) {
1736 ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
1737 ret = left;
1738 }
1739 }
1740
1741 return ret;
1742}
1743
1744static int ath6kl_wow_resume(struct ath6kl *ar)
1745{
1746 struct ath6kl_vif *vif;
1747 int ret;
1748
1749 vif = ath6kl_vif_first(ar);
1750 if (!vif)
1751 return -EIO;
1752
1753 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
1754 ATH6KL_HOST_MODE_AWAKE);
1755 return ret;
1756}
1757
1758int ath6kl_cfg80211_suspend(struct ath6kl *ar,
1759 enum ath6kl_cfg_suspend_mode mode,
1760 struct cfg80211_wowlan *wow)
1761{
1762 int ret;
1763
1764 switch (mode) {
1765 case ATH6KL_CFG_SUSPEND_WOW:
1766
1767 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n");
1768
1769 /* Flush all non control pkts in TX path */
1770 ath6kl_tx_data_cleanup(ar);
1771
1772 ret = ath6kl_wow_suspend(ar, wow);
1773 if (ret) {
1774 ath6kl_err("wow suspend failed: %d\n", ret);
1775 return ret;
1776 }
1777 ar->state = ATH6KL_STATE_WOW;
1778 break;
1779
1780 case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
1781
1782 ath6kl_cfg80211_stop(ar);
1783
1784 /* save the current power mode before enabling power save */
1785 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
1786
1787 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
1788 if (ret) {
1789 ath6kl_warn("wmi powermode command failed during suspend: %d\n",
1790 ret);
1791 }
1792
1793 ar->state = ATH6KL_STATE_DEEPSLEEP;
1794
1795 break;
1796
1797 case ATH6KL_CFG_SUSPEND_CUTPOWER:
1798
1799 ath6kl_cfg80211_stop(ar);
1800
1801 if (ar->state == ATH6KL_STATE_OFF) {
1802 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
1803 "suspend hw off, no action for cutpower\n");
1804 break;
1805 }
1806
1807 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n");
1808
1809 ret = ath6kl_init_hw_stop(ar);
1810 if (ret) {
1811 ath6kl_warn("failed to stop hw during suspend: %d\n",
1812 ret);
1813 }
1814
1815 ar->state = ATH6KL_STATE_CUTPOWER;
1816
1817 break;
1818
1819 default:
1820 break;
1821 }
1822
1823 return 0;
1824}
1825
1826int ath6kl_cfg80211_resume(struct ath6kl *ar)
1827{
1828 int ret;
1829
1830 switch (ar->state) {
1831 case ATH6KL_STATE_WOW:
1832 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n");
1833
1834 ret = ath6kl_wow_resume(ar);
1835 if (ret) {
1836 ath6kl_warn("wow mode resume failed: %d\n", ret);
1837 return ret;
1838 }
1839
1840 ar->state = ATH6KL_STATE_ON;
1841 break;
1842
1843 case ATH6KL_STATE_DEEPSLEEP:
1844 if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
1845 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
1846 ar->wmi->saved_pwr_mode);
1847 if (ret) {
1848 ath6kl_warn("wmi powermode command failed during resume: %d\n",
1849 ret);
1850 }
1851 }
1852
1853 ar->state = ATH6KL_STATE_ON;
1854
1855 break;
1856
1857 case ATH6KL_STATE_CUTPOWER:
1858 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n");
1859
1860 ret = ath6kl_init_hw_start(ar);
1861 if (ret) {
1862 ath6kl_warn("Failed to boot hw in resume: %d\n", ret);
1863 return ret;
1864 }
1865 break;
1866
1867 default:
1868 break;
1869 }
1870
1411 return 0; 1871 return 0;
1412} 1872}
1413 1873
1414#ifdef CONFIG_PM 1874#ifdef CONFIG_PM
1415static int ar6k_cfg80211_suspend(struct wiphy *wiphy, 1875
1876/* hif layer decides what suspend mode to use */
1877static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
1416 struct cfg80211_wowlan *wow) 1878 struct cfg80211_wowlan *wow)
1417{ 1879{
1418 struct ath6kl *ar = wiphy_priv(wiphy); 1880 struct ath6kl *ar = wiphy_priv(wiphy);
1419 1881
1420 return ath6kl_hif_suspend(ar); 1882 return ath6kl_hif_suspend(ar, wow);
1883}
1884
1885static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
1886{
1887 struct ath6kl *ar = wiphy_priv(wiphy);
1888
1889 return ath6kl_hif_resume(ar);
1890}
1891
1892/*
1893 * FIXME: WOW suspend mode is selected if the host sdio controller supports
1894 * both sdio irq wake up and keep power. The target pulls sdio data line to
1895 * wake up the host when WOW pattern matches. This causes sdio irq handler
1896 * is being called in the host side which internally hits ath6kl's RX path.
1897 *
1898 * Since sdio interrupt is not disabled, RX path executes even before
1899 * the host executes the actual resume operation from PM module.
1900 *
1901 * In the current scenario, WOW resume should happen before start processing
1902 * any data from the target. So It's required to perform WOW resume in RX path.
1903 * Ideally we should perform WOW resume only in the actual platform
1904 * resume path. This area needs bit rework to avoid WOW resume in RX path.
1905 *
1906 * ath6kl_check_wow_status() is called from ath6kl_rx().
1907 */
1908void ath6kl_check_wow_status(struct ath6kl *ar)
1909{
1910 if (ar->state == ATH6KL_STATE_WOW)
1911 ath6kl_cfg80211_resume(ar);
1912}
1913
1914#else
1915
1916void ath6kl_check_wow_status(struct ath6kl *ar)
1917{
1421} 1918}
1422#endif 1919#endif
1423 1920
@@ -1425,14 +1922,14 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
1425 struct ieee80211_channel *chan, 1922 struct ieee80211_channel *chan,
1426 enum nl80211_channel_type channel_type) 1923 enum nl80211_channel_type channel_type)
1427{ 1924{
1428 struct ath6kl *ar = ath6kl_priv(dev); 1925 struct ath6kl_vif *vif = netdev_priv(dev);
1429 1926
1430 if (!ath6kl_cfg80211_ready(ar)) 1927 if (!ath6kl_cfg80211_ready(vif))
1431 return -EIO; 1928 return -EIO;
1432 1929
1433 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", 1930 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
1434 __func__, chan->center_freq, chan->hw_value); 1931 __func__, chan->center_freq, chan->hw_value);
1435 ar->next_chan = chan->center_freq; 1932 vif->next_chan = chan->center_freq;
1436 1933
1437 return 0; 1934 return 0;
1438} 1935}
@@ -1444,9 +1941,10 @@ static bool ath6kl_is_p2p_ie(const u8 *pos)
1444 pos[4] == 0x9a && pos[5] == 0x09; 1941 pos[4] == 0x9a && pos[5] == 0x09;
1445} 1942}
1446 1943
1447static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies, 1944static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
1448 size_t ies_len) 1945 const u8 *ies, size_t ies_len)
1449{ 1946{
1947 struct ath6kl *ar = vif->ar;
1450 const u8 *pos; 1948 const u8 *pos;
1451 u8 *buf = NULL; 1949 u8 *buf = NULL;
1452 size_t len = 0; 1950 size_t len = 0;
@@ -1473,8 +1971,8 @@ static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
1473 } 1971 }
1474 } 1972 }
1475 1973
1476 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP, 1974 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
1477 buf, len); 1975 WMI_FRAME_PROBE_RESP, buf, len);
1478 kfree(buf); 1976 kfree(buf);
1479 return ret; 1977 return ret;
1480} 1978}
@@ -1483,6 +1981,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
1483 struct beacon_parameters *info, bool add) 1981 struct beacon_parameters *info, bool add)
1484{ 1982{
1485 struct ath6kl *ar = ath6kl_priv(dev); 1983 struct ath6kl *ar = ath6kl_priv(dev);
1984 struct ath6kl_vif *vif = netdev_priv(dev);
1486 struct ieee80211_mgmt *mgmt; 1985 struct ieee80211_mgmt *mgmt;
1487 u8 *ies; 1986 u8 *ies;
1488 int ies_len; 1987 int ies_len;
@@ -1492,27 +1991,29 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
1492 1991
1493 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add); 1992 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
1494 1993
1495 if (!ath6kl_cfg80211_ready(ar)) 1994 if (!ath6kl_cfg80211_ready(vif))
1496 return -EIO; 1995 return -EIO;
1497 1996
1498 if (ar->next_mode != AP_NETWORK) 1997 if (vif->next_mode != AP_NETWORK)
1499 return -EOPNOTSUPP; 1998 return -EOPNOTSUPP;
1500 1999
1501 if (info->beacon_ies) { 2000 if (info->beacon_ies) {
1502 res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON, 2001 res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
2002 WMI_FRAME_BEACON,
1503 info->beacon_ies, 2003 info->beacon_ies,
1504 info->beacon_ies_len); 2004 info->beacon_ies_len);
1505 if (res) 2005 if (res)
1506 return res; 2006 return res;
1507 } 2007 }
1508 if (info->proberesp_ies) { 2008 if (info->proberesp_ies) {
1509 res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies, 2009 res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
1510 info->proberesp_ies_len); 2010 info->proberesp_ies_len);
1511 if (res) 2011 if (res)
1512 return res; 2012 return res;
1513 } 2013 }
1514 if (info->assocresp_ies) { 2014 if (info->assocresp_ies) {
1515 res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP, 2015 res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
2016 WMI_FRAME_ASSOC_RESP,
1516 info->assocresp_ies, 2017 info->assocresp_ies,
1517 info->assocresp_ies_len); 2018 info->assocresp_ies_len);
1518 if (res) 2019 if (res)
@@ -1539,12 +2040,12 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
1539 2040
1540 if (info->ssid == NULL) 2041 if (info->ssid == NULL)
1541 return -EINVAL; 2042 return -EINVAL;
1542 memcpy(ar->ssid, info->ssid, info->ssid_len); 2043 memcpy(vif->ssid, info->ssid, info->ssid_len);
1543 ar->ssid_len = info->ssid_len; 2044 vif->ssid_len = info->ssid_len;
1544 if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) 2045 if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
1545 return -EOPNOTSUPP; /* TODO */ 2046 return -EOPNOTSUPP; /* TODO */
1546 2047
1547 ar->dot11_auth_mode = OPEN_AUTH; 2048 vif->dot11_auth_mode = OPEN_AUTH;
1548 2049
1549 memset(&p, 0, sizeof(p)); 2050 memset(&p, 0, sizeof(p));
1550 2051
@@ -1566,7 +2067,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
1566 } 2067 }
1567 if (p.auth_mode == 0) 2068 if (p.auth_mode == 0)
1568 p.auth_mode = NONE_AUTH; 2069 p.auth_mode = NONE_AUTH;
1569 ar->auth_mode = p.auth_mode; 2070 vif->auth_mode = p.auth_mode;
1570 2071
1571 for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) { 2072 for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
1572 switch (info->crypto.ciphers_pairwise[i]) { 2073 switch (info->crypto.ciphers_pairwise[i]) {
@@ -1584,9 +2085,9 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
1584 } 2085 }
1585 if (p.prwise_crypto_type == 0) { 2086 if (p.prwise_crypto_type == 0) {
1586 p.prwise_crypto_type = NONE_CRYPT; 2087 p.prwise_crypto_type = NONE_CRYPT;
1587 ath6kl_set_cipher(ar, 0, true); 2088 ath6kl_set_cipher(vif, 0, true);
1588 } else if (info->crypto.n_ciphers_pairwise == 1) 2089 } else if (info->crypto.n_ciphers_pairwise == 1)
1589 ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true); 2090 ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
1590 2091
1591 switch (info->crypto.cipher_group) { 2092 switch (info->crypto.cipher_group) {
1592 case WLAN_CIPHER_SUITE_WEP40: 2093 case WLAN_CIPHER_SUITE_WEP40:
@@ -1603,17 +2104,17 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
1603 p.grp_crypto_type = NONE_CRYPT; 2104 p.grp_crypto_type = NONE_CRYPT;
1604 break; 2105 break;
1605 } 2106 }
1606 ath6kl_set_cipher(ar, info->crypto.cipher_group, false); 2107 ath6kl_set_cipher(vif, info->crypto.cipher_group, false);
1607 2108
1608 p.nw_type = AP_NETWORK; 2109 p.nw_type = AP_NETWORK;
1609 ar->nw_type = ar->next_mode; 2110 vif->nw_type = vif->next_mode;
1610 2111
1611 p.ssid_len = ar->ssid_len; 2112 p.ssid_len = vif->ssid_len;
1612 memcpy(p.ssid, ar->ssid, ar->ssid_len); 2113 memcpy(p.ssid, vif->ssid, vif->ssid_len);
1613 p.dot11_auth_mode = ar->dot11_auth_mode; 2114 p.dot11_auth_mode = vif->dot11_auth_mode;
1614 p.ch = cpu_to_le16(ar->next_chan); 2115 p.ch = cpu_to_le16(vif->next_chan);
1615 2116
1616 res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p); 2117 res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
1617 if (res < 0) 2118 if (res < 0)
1618 return res; 2119 return res;
1619 2120
@@ -1635,14 +2136,15 @@ static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
1635static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev) 2136static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
1636{ 2137{
1637 struct ath6kl *ar = ath6kl_priv(dev); 2138 struct ath6kl *ar = ath6kl_priv(dev);
2139 struct ath6kl_vif *vif = netdev_priv(dev);
1638 2140
1639 if (ar->nw_type != AP_NETWORK) 2141 if (vif->nw_type != AP_NETWORK)
1640 return -EOPNOTSUPP; 2142 return -EOPNOTSUPP;
1641 if (!test_bit(CONNECTED, &ar->flag)) 2143 if (!test_bit(CONNECTED, &vif->flags))
1642 return -ENOTCONN; 2144 return -ENOTCONN;
1643 2145
1644 ath6kl_wmi_disconnect_cmd(ar->wmi); 2146 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
1645 clear_bit(CONNECTED, &ar->flag); 2147 clear_bit(CONNECTED, &vif->flags);
1646 2148
1647 return 0; 2149 return 0;
1648} 2150}
@@ -1651,8 +2153,9 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
1651 u8 *mac, struct station_parameters *params) 2153 u8 *mac, struct station_parameters *params)
1652{ 2154{
1653 struct ath6kl *ar = ath6kl_priv(dev); 2155 struct ath6kl *ar = ath6kl_priv(dev);
2156 struct ath6kl_vif *vif = netdev_priv(dev);
1654 2157
1655 if (ar->nw_type != AP_NETWORK) 2158 if (vif->nw_type != AP_NETWORK)
1656 return -EOPNOTSUPP; 2159 return -EOPNOTSUPP;
1657 2160
1658 /* Use this only for authorizing/unauthorizing a station */ 2161 /* Use this only for authorizing/unauthorizing a station */
@@ -1660,10 +2163,10 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
1660 return -EOPNOTSUPP; 2163 return -EOPNOTSUPP;
1661 2164
1662 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) 2165 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
1663 return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE, 2166 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
1664 mac, 0); 2167 WMI_AP_MLME_AUTHORIZE, mac, 0);
1665 return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac, 2168 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
1666 0); 2169 WMI_AP_MLME_UNAUTHORIZE, mac, 0);
1667} 2170}
1668 2171
1669static int ath6kl_remain_on_channel(struct wiphy *wiphy, 2172static int ath6kl_remain_on_channel(struct wiphy *wiphy,
@@ -1674,13 +2177,20 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy,
1674 u64 *cookie) 2177 u64 *cookie)
1675{ 2178{
1676 struct ath6kl *ar = ath6kl_priv(dev); 2179 struct ath6kl *ar = ath6kl_priv(dev);
2180 struct ath6kl_vif *vif = netdev_priv(dev);
2181 u32 id;
1677 2182
1678 /* TODO: if already pending or ongoing remain-on-channel, 2183 /* TODO: if already pending or ongoing remain-on-channel,
1679 * return -EBUSY */ 2184 * return -EBUSY */
1680 *cookie = 1; /* only a single pending request is supported */ 2185 id = ++vif->last_roc_id;
2186 if (id == 0) {
2187 /* Do not use 0 as the cookie value */
2188 id = ++vif->last_roc_id;
2189 }
2190 *cookie = id;
1681 2191
1682 return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq, 2192 return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx,
1683 duration); 2193 chan->center_freq, duration);
1684} 2194}
1685 2195
1686static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy, 2196static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1688,16 +2198,20 @@ static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
1688 u64 cookie) 2198 u64 cookie)
1689{ 2199{
1690 struct ath6kl *ar = ath6kl_priv(dev); 2200 struct ath6kl *ar = ath6kl_priv(dev);
2201 struct ath6kl_vif *vif = netdev_priv(dev);
1691 2202
1692 if (cookie != 1) 2203 if (cookie != vif->last_roc_id)
1693 return -ENOENT; 2204 return -ENOENT;
2205 vif->last_cancel_roc_id = cookie;
1694 2206
1695 return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi); 2207 return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx);
1696} 2208}
1697 2209
1698static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf, 2210static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
1699 size_t len, unsigned int freq) 2211 const u8 *buf, size_t len,
2212 unsigned int freq)
1700{ 2213{
2214 struct ath6kl *ar = vif->ar;
1701 const u8 *pos; 2215 const u8 *pos;
1702 u8 *p2p; 2216 u8 *p2p;
1703 int p2p_len; 2217 int p2p_len;
@@ -1724,8 +2238,8 @@ static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
1724 pos += 2 + pos[1]; 2238 pos += 2 + pos[1];
1725 } 2239 }
1726 2240
1727 ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da, 2241 ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq,
1728 p2p, p2p_len); 2242 mgmt->da, p2p, p2p_len);
1729 kfree(p2p); 2243 kfree(p2p);
1730 return ret; 2244 return ret;
1731} 2245}
@@ -1738,33 +2252,35 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1738 bool dont_wait_for_ack, u64 *cookie) 2252 bool dont_wait_for_ack, u64 *cookie)
1739{ 2253{
1740 struct ath6kl *ar = ath6kl_priv(dev); 2254 struct ath6kl *ar = ath6kl_priv(dev);
2255 struct ath6kl_vif *vif = netdev_priv(dev);
1741 u32 id; 2256 u32 id;
1742 const struct ieee80211_mgmt *mgmt; 2257 const struct ieee80211_mgmt *mgmt;
1743 2258
1744 mgmt = (const struct ieee80211_mgmt *) buf; 2259 mgmt = (const struct ieee80211_mgmt *) buf;
1745 if (buf + len >= mgmt->u.probe_resp.variable && 2260 if (buf + len >= mgmt->u.probe_resp.variable &&
1746 ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) && 2261 vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
1747 ieee80211_is_probe_resp(mgmt->frame_control)) { 2262 ieee80211_is_probe_resp(mgmt->frame_control)) {
1748 /* 2263 /*
1749 * Send Probe Response frame in AP mode using a separate WMI 2264 * Send Probe Response frame in AP mode using a separate WMI
1750 * command to allow the target to fill in the generic IEs. 2265 * command to allow the target to fill in the generic IEs.
1751 */ 2266 */
1752 *cookie = 0; /* TX status not supported */ 2267 *cookie = 0; /* TX status not supported */
1753 return ath6kl_send_go_probe_resp(ar, buf, len, 2268 return ath6kl_send_go_probe_resp(vif, buf, len,
1754 chan->center_freq); 2269 chan->center_freq);
1755 } 2270 }
1756 2271
1757 id = ar->send_action_id++; 2272 id = vif->send_action_id++;
1758 if (id == 0) { 2273 if (id == 0) {
1759 /* 2274 /*
1760 * 0 is a reserved value in the WMI command and shall not be 2275 * 0 is a reserved value in the WMI command and shall not be
1761 * used for the command. 2276 * used for the command.
1762 */ 2277 */
1763 id = ar->send_action_id++; 2278 id = vif->send_action_id++;
1764 } 2279 }
1765 2280
1766 *cookie = id; 2281 *cookie = id;
1767 return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait, 2282 return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id,
2283 chan->center_freq, wait,
1768 buf, len); 2284 buf, len);
1769} 2285}
1770 2286
@@ -1772,7 +2288,7 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
1772 struct net_device *dev, 2288 struct net_device *dev,
1773 u16 frame_type, bool reg) 2289 u16 frame_type, bool reg)
1774{ 2290{
1775 struct ath6kl *ar = ath6kl_priv(dev); 2291 struct ath6kl_vif *vif = netdev_priv(dev);
1776 2292
1777 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n", 2293 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
1778 __func__, frame_type, reg); 2294 __func__, frame_type, reg);
@@ -1782,7 +2298,7 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
1782 * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we 2298 * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
1783 * hardcode target to report Probe Request frames all the time. 2299 * hardcode target to report Probe Request frames all the time.
1784 */ 2300 */
1785 ar->probe_req_report = reg; 2301 vif->probe_req_report = reg;
1786 } 2302 }
1787} 2303}
1788 2304
@@ -1809,6 +2325,8 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
1809}; 2325};
1810 2326
1811static struct cfg80211_ops ath6kl_cfg80211_ops = { 2327static struct cfg80211_ops ath6kl_cfg80211_ops = {
2328 .add_virtual_intf = ath6kl_cfg80211_add_iface,
2329 .del_virtual_intf = ath6kl_cfg80211_del_iface,
1812 .change_virtual_intf = ath6kl_cfg80211_change_iface, 2330 .change_virtual_intf = ath6kl_cfg80211_change_iface,
1813 .scan = ath6kl_cfg80211_scan, 2331 .scan = ath6kl_cfg80211_scan,
1814 .connect = ath6kl_cfg80211_connect, 2332 .connect = ath6kl_cfg80211_connect,
@@ -1829,7 +2347,8 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
1829 .flush_pmksa = ath6kl_flush_pmksa, 2347 .flush_pmksa = ath6kl_flush_pmksa,
1830 CFG80211_TESTMODE_CMD(ath6kl_tm_cmd) 2348 CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
1831#ifdef CONFIG_PM 2349#ifdef CONFIG_PM
1832 .suspend = ar6k_cfg80211_suspend, 2350 .suspend = __ath6kl_cfg80211_suspend,
2351 .resume = __ath6kl_cfg80211_resume,
1833#endif 2352#endif
1834 .set_channel = ath6kl_set_channel, 2353 .set_channel = ath6kl_set_channel,
1835 .add_beacon = ath6kl_add_beacon, 2354 .add_beacon = ath6kl_add_beacon,
@@ -1842,76 +2361,269 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
1842 .mgmt_frame_register = ath6kl_mgmt_frame_register, 2361 .mgmt_frame_register = ath6kl_mgmt_frame_register,
1843}; 2362};
1844 2363
1845struct wireless_dev *ath6kl_cfg80211_init(struct device *dev) 2364void ath6kl_cfg80211_stop(struct ath6kl *ar)
1846{ 2365{
1847 int ret = 0; 2366 struct ath6kl_vif *vif;
1848 struct wireless_dev *wdev;
1849 struct ath6kl *ar;
1850 2367
1851 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); 2368 /* FIXME: for multi vif */
1852 if (!wdev) { 2369 vif = ath6kl_vif_first(ar);
1853 ath6kl_err("couldn't allocate wireless device\n"); 2370 if (!vif) {
1854 return NULL; 2371 /* save the current power mode before enabling power save */
2372 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
2373
2374 if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
2375 ath6kl_warn("ath6kl_deep_sleep_enable: "
2376 "wmi_powermode_cmd failed\n");
2377 return;
1855 } 2378 }
1856 2379
2380 switch (vif->sme_state) {
2381 case SME_CONNECTING:
2382 cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
2383 NULL, 0,
2384 WLAN_STATUS_UNSPECIFIED_FAILURE,
2385 GFP_KERNEL);
2386 break;
2387 case SME_CONNECTED:
2388 default:
2389 /*
2390 * FIXME: oddly enough smeState is in DISCONNECTED during
2391 * suspend, why? Need to send disconnected event in that
2392 * state.
2393 */
2394 cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
2395 break;
2396 }
2397
2398 if (test_bit(CONNECTED, &vif->flags) ||
2399 test_bit(CONNECT_PEND, &vif->flags))
2400 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
2401
2402 vif->sme_state = SME_DISCONNECTED;
2403 clear_bit(CONNECTED, &vif->flags);
2404 clear_bit(CONNECT_PEND, &vif->flags);
2405
2406 /* disable scanning */
2407 if (ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0xFFFF, 0, 0,
2408 0, 0, 0, 0, 0, 0, 0) != 0)
2409 printk(KERN_WARNING "ath6kl: failed to disable scan "
2410 "during suspend\n");
2411
2412 ath6kl_cfg80211_scan_complete_event(vif, true);
2413}
2414
2415struct ath6kl *ath6kl_core_alloc(struct device *dev)
2416{
2417 struct ath6kl *ar;
2418 struct wiphy *wiphy;
2419 u8 ctr;
2420
1857 /* create a new wiphy for use with cfg80211 */ 2421 /* create a new wiphy for use with cfg80211 */
1858 wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl)); 2422 wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
1859 if (!wdev->wiphy) { 2423
2424 if (!wiphy) {
1860 ath6kl_err("couldn't allocate wiphy device\n"); 2425 ath6kl_err("couldn't allocate wiphy device\n");
1861 kfree(wdev);
1862 return NULL; 2426 return NULL;
1863 } 2427 }
1864 2428
1865 ar = wiphy_priv(wdev->wiphy); 2429 ar = wiphy_priv(wiphy);
1866 ar->p2p = !!ath6kl_p2p; 2430 if (!multi_norm_if_support)
2431 ar->p2p = !!ath6kl_p2p;
2432 ar->wiphy = wiphy;
2433 ar->dev = dev;
2434
2435 if (multi_norm_if_support)
2436 ar->max_norm_iface = 2;
2437 else
2438 ar->max_norm_iface = 1;
2439
2440 /* FIXME: Remove this once the multivif support is enabled */
2441 ar->max_norm_iface = 1;
2442
2443 spin_lock_init(&ar->lock);
2444 spin_lock_init(&ar->mcastpsq_lock);
2445 spin_lock_init(&ar->list_lock);
1867 2446
1868 wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes; 2447 init_waitqueue_head(&ar->event_wq);
2448 sema_init(&ar->sem, 1);
1869 2449
1870 wdev->wiphy->max_remain_on_channel_duration = 5000; 2450 INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
2451 INIT_LIST_HEAD(&ar->vif_list);
2452
2453 clear_bit(WMI_ENABLED, &ar->flag);
2454 clear_bit(SKIP_SCAN, &ar->flag);
2455 clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
2456
2457 ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
2458 ar->listen_intvl_b = 0;
2459 ar->tx_pwr = 0;
2460
2461 ar->intra_bss = 1;
2462 memset(&ar->sc_params, 0, sizeof(ar->sc_params));
2463 ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
2464 ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
2465 ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
2466
2467 ar->state = ATH6KL_STATE_OFF;
2468
2469 memset((u8 *)ar->sta_list, 0,
2470 AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
2471
2472 /* Init the PS queues */
2473 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
2474 spin_lock_init(&ar->sta_list[ctr].psq_lock);
2475 skb_queue_head_init(&ar->sta_list[ctr].psq);
2476 }
2477
2478 skb_queue_head_init(&ar->mcastpsq);
2479
2480 memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
2481
2482 return ar;
2483}
2484
2485int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
2486{
2487 struct wiphy *wiphy = ar->wiphy;
2488 int ret;
2489
2490 wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
2491
2492 wiphy->max_remain_on_channel_duration = 5000;
1871 2493
1872 /* set device pointer for wiphy */ 2494 /* set device pointer for wiphy */
1873 set_wiphy_dev(wdev->wiphy, dev); 2495 set_wiphy_dev(wiphy, ar->dev);
1874 2496
1875 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 2497 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1876 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); 2498 BIT(NL80211_IFTYPE_ADHOC) |
2499 BIT(NL80211_IFTYPE_AP);
1877 if (ar->p2p) { 2500 if (ar->p2p) {
1878 wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) | 2501 wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
1879 BIT(NL80211_IFTYPE_P2P_CLIENT); 2502 BIT(NL80211_IFTYPE_P2P_CLIENT);
1880 } 2503 }
1881 /* max num of ssids that can be probed during scanning */
1882 wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
1883 wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
1884 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
1885 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
1886 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1887
1888 wdev->wiphy->cipher_suites = cipher_suites;
1889 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
1890 2504
1891 ret = wiphy_register(wdev->wiphy); 2505 /* max num of ssids that can be probed during scanning */
2506 wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
2507 wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
2508 wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
2509 wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
2510 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
2511
2512 wiphy->cipher_suites = cipher_suites;
2513 wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
2514
2515 wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
2516 WIPHY_WOWLAN_DISCONNECT |
2517 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
2518 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
2519 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
2520 WIPHY_WOWLAN_4WAY_HANDSHAKE;
2521 wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
2522 wiphy->wowlan.pattern_min_len = 1;
2523 wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
2524
2525 ret = wiphy_register(wiphy);
1892 if (ret < 0) { 2526 if (ret < 0) {
1893 ath6kl_err("couldn't register wiphy device\n"); 2527 ath6kl_err("couldn't register wiphy device\n");
1894 wiphy_free(wdev->wiphy); 2528 return ret;
1895 kfree(wdev);
1896 return NULL;
1897 } 2529 }
1898 2530
1899 return wdev; 2531 return 0;
1900} 2532}
1901 2533
1902void ath6kl_cfg80211_deinit(struct ath6kl *ar) 2534static int ath6kl_init_if_data(struct ath6kl_vif *vif)
1903{ 2535{
1904 struct wireless_dev *wdev = ar->wdev; 2536 vif->aggr_cntxt = aggr_init(vif->ndev);
1905 2537 if (!vif->aggr_cntxt) {
1906 if (ar->scan_req) { 2538 ath6kl_err("failed to initialize aggr\n");
1907 cfg80211_scan_done(ar->scan_req, true); 2539 return -ENOMEM;
1908 ar->scan_req = NULL;
1909 } 2540 }
1910 2541
1911 if (!wdev) 2542 setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
1912 return; 2543 (unsigned long) vif->ndev);
2544 set_bit(WMM_ENABLED, &vif->flags);
2545 spin_lock_init(&vif->if_lock);
2546
2547 return 0;
2548}
2549
2550void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
2551{
2552 struct ath6kl *ar = vif->ar;
2553
2554 aggr_module_destroy(vif->aggr_cntxt);
2555
2556 ar->avail_idx_map |= BIT(vif->fw_vif_idx);
1913 2557
1914 wiphy_unregister(wdev->wiphy); 2558 if (vif->nw_type == ADHOC_NETWORK)
1915 wiphy_free(wdev->wiphy); 2559 ar->ibss_if_active = false;
1916 kfree(wdev); 2560
2561 unregister_netdevice(vif->ndev);
2562
2563 ar->num_vif--;
2564}
2565
2566struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
2567 enum nl80211_iftype type, u8 fw_vif_idx,
2568 u8 nw_type)
2569{
2570 struct net_device *ndev;
2571 struct ath6kl_vif *vif;
2572
2573 ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
2574 if (!ndev)
2575 return NULL;
2576
2577 vif = netdev_priv(ndev);
2578 ndev->ieee80211_ptr = &vif->wdev;
2579 vif->wdev.wiphy = ar->wiphy;
2580 vif->ar = ar;
2581 vif->ndev = ndev;
2582 SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
2583 vif->wdev.netdev = ndev;
2584 vif->wdev.iftype = type;
2585 vif->fw_vif_idx = fw_vif_idx;
2586 vif->nw_type = vif->next_mode = nw_type;
2587
2588 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
2589 if (fw_vif_idx != 0)
2590 ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
2591 0x2;
2592
2593 init_netdev(ndev);
2594
2595 ath6kl_init_control_info(vif);
2596
2597 /* TODO: Pass interface specific pointer instead of ar */
2598 if (ath6kl_init_if_data(vif))
2599 goto err;
2600
2601 if (register_netdevice(ndev))
2602 goto err;
2603
2604 ar->avail_idx_map &= ~BIT(fw_vif_idx);
2605 vif->sme_state = SME_DISCONNECTED;
2606 set_bit(WLAN_ENABLED, &vif->flags);
2607 ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
2608 set_bit(NETDEV_REGISTERED, &vif->flags);
2609
2610 if (type == NL80211_IFTYPE_ADHOC)
2611 ar->ibss_if_active = true;
2612
2613 spin_lock_bh(&ar->list_lock);
2614 list_add_tail(&vif->list, &ar->vif_list);
2615 spin_unlock_bh(&ar->list_lock);
2616
2617 return ndev;
2618
2619err:
2620 aggr_module_destroy(vif->aggr_cntxt);
2621 free_netdev(ndev);
2622 return NULL;
2623}
2624
2625void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar)
2626{
2627 wiphy_unregister(ar->wiphy);
2628 wiphy_free(ar->wiphy);
1917} 2629}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index a84adc249c61..59fa9d859def 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -17,23 +17,41 @@
17#ifndef ATH6KL_CFG80211_H 17#ifndef ATH6KL_CFG80211_H
18#define ATH6KL_CFG80211_H 18#define ATH6KL_CFG80211_H
19 19
20struct wireless_dev *ath6kl_cfg80211_init(struct device *dev); 20enum ath6kl_cfg_suspend_mode {
21void ath6kl_cfg80211_deinit(struct ath6kl *ar); 21 ATH6KL_CFG_SUSPEND_DEEPSLEEP,
22 ATH6KL_CFG_SUSPEND_CUTPOWER,
23 ATH6KL_CFG_SUSPEND_WOW
24};
22 25
23void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status); 26struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
27 enum nl80211_iftype type,
28 u8 fw_vif_idx, u8 nw_type);
29int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
30struct ath6kl *ath6kl_core_alloc(struct device *dev);
31void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
24 32
25void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, 33void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
34
35void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
26 u8 *bssid, u16 listen_intvl, 36 u8 *bssid, u16 listen_intvl,
27 u16 beacon_intvl, 37 u16 beacon_intvl,
28 enum network_type nw_type, 38 enum network_type nw_type,
29 u8 beacon_ie_len, u8 assoc_req_len, 39 u8 beacon_ie_len, u8 assoc_req_len,
30 u8 assoc_resp_len, u8 *assoc_info); 40 u8 assoc_resp_len, u8 *assoc_info);
31 41
32void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, 42void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
33 u8 *bssid, u8 assoc_resp_len, 43 u8 *bssid, u8 assoc_resp_len,
34 u8 *assoc_info, u16 proto_reason); 44 u8 *assoc_info, u16 proto_reason);
35 45
36void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, 46void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
37 bool ismcast); 47 bool ismcast);
38 48
49int ath6kl_cfg80211_suspend(struct ath6kl *ar,
50 enum ath6kl_cfg_suspend_mode mode,
51 struct cfg80211_wowlan *wow);
52
53int ath6kl_cfg80211_resume(struct ath6kl *ar);
54
55void ath6kl_cfg80211_stop(struct ath6kl *ar);
56
39#endif /* ATH6KL_CFG80211_H */ 57#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index b92f0e5d2336..41e465f29e63 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -23,8 +23,6 @@
23 23
24extern int ath6kl_printk(const char *level, const char *fmt, ...); 24extern int ath6kl_printk(const char *level, const char *fmt, ...);
25 25
26#define A_CACHE_LINE_PAD 128
27
28/* 26/*
29 * Reflects the version of binary interface exposed by ATH6KL target 27 * Reflects the version of binary interface exposed by ATH6KL target
30 * firmware. Needs to be incremented by 1 for any change in the firmware 28 * firmware. Needs to be incremented by 1 for any change in the firmware
@@ -78,20 +76,10 @@ enum crypto_type {
78struct htc_endpoint_credit_dist; 76struct htc_endpoint_credit_dist;
79struct ath6kl; 77struct ath6kl;
80enum htc_credit_dist_reason; 78enum htc_credit_dist_reason;
81struct htc_credit_state_info; 79struct ath6kl_htc_credit_info;
82 80
83int ath6k_setup_credit_dist(void *htc_handle,
84 struct htc_credit_state_info *cred_info);
85void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
86 struct list_head *epdist_list,
87 enum htc_credit_dist_reason reason);
88void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
89 struct list_head *ep_list,
90 int tot_credits);
91void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
92 struct htc_endpoint_credit_dist *ep_dist);
93struct ath6kl *ath6kl_core_alloc(struct device *sdev); 81struct ath6kl *ath6kl_core_alloc(struct device *sdev);
94int ath6kl_core_init(struct ath6kl *ar); 82int ath6kl_core_init(struct ath6kl *ar);
95int ath6kl_unavail_ev(struct ath6kl *ar); 83void ath6kl_core_cleanup(struct ath6kl *ar);
96struct sk_buff *ath6kl_buf_alloc(int size); 84struct sk_buff *ath6kl_buf_alloc(int size);
97#endif /* COMMON_H */ 85#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 6d8a4845baaf..e7e095e536a7 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -166,6 +166,7 @@ struct ath6kl_fw_ie {
166#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1) 166#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
167#define ATH6KL_CONF_ENABLE_11N BIT(2) 167#define ATH6KL_CONF_ENABLE_11N BIT(2)
168#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) 168#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
169#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4)
169 170
170enum wlan_low_pwr_state { 171enum wlan_low_pwr_state {
171 WLAN_POWER_STATE_ON, 172 WLAN_POWER_STATE_ON,
@@ -380,40 +381,33 @@ struct ath6kl_req_key {
380 u8 key_len; 381 u8 key_len;
381}; 382};
382 383
383/* Flag info */ 384#define MAX_NUM_VIF 1
384#define WMI_ENABLED 0 385
385#define WMI_READY 1 386/* vif flags info */
386#define CONNECTED 2 387enum ath6kl_vif_state {
387#define STATS_UPDATE_PEND 3 388 CONNECTED,
388#define CONNECT_PEND 4 389 CONNECT_PEND,
389#define WMM_ENABLED 5 390 WMM_ENABLED,
390#define NETQ_STOPPED 6 391 NETQ_STOPPED,
391#define WMI_CTRL_EP_FULL 7 392 DTIM_EXPIRED,
392#define DTIM_EXPIRED 8 393 NETDEV_REGISTERED,
393#define DESTROY_IN_PROGRESS 9 394 CLEAR_BSSFILTER_ON_BEACON,
394#define NETDEV_REGISTERED 10 395 DTIM_PERIOD_AVAIL,
395#define SKIP_SCAN 11 396 WLAN_ENABLED,
396#define WLAN_ENABLED 12 397 STATS_UPDATE_PEND,
397#define TESTMODE 13 398};
398#define CLEAR_BSSFILTER_ON_BEACON 14
399#define DTIM_PERIOD_AVAIL 15
400 399
401struct ath6kl { 400struct ath6kl_vif {
402 struct device *dev; 401 struct list_head list;
403 struct net_device *net_dev; 402 struct wireless_dev wdev;
404 struct ath6kl_bmi bmi; 403 struct net_device *ndev;
405 const struct ath6kl_hif_ops *hif_ops; 404 struct ath6kl *ar;
406 struct wmi *wmi; 405 /* Lock to protect vif specific net_stats and flags */
407 int tx_pending[ENDPOINT_MAX]; 406 spinlock_t if_lock;
408 int total_tx_data_pend; 407 u8 fw_vif_idx;
409 struct htc_target *htc_target; 408 unsigned long flags;
410 void *hif_priv;
411 spinlock_t lock;
412 struct semaphore sem;
413 int ssid_len; 409 int ssid_len;
414 u8 ssid[IEEE80211_MAX_SSID_LEN]; 410 u8 ssid[IEEE80211_MAX_SSID_LEN];
415 u8 next_mode;
416 u8 nw_type;
417 u8 dot11_auth_mode; 411 u8 dot11_auth_mode;
418 u8 auth_mode; 412 u8 auth_mode;
419 u8 prwise_crypto; 413 u8 prwise_crypto;
@@ -421,21 +415,83 @@ struct ath6kl {
421 u8 grp_crypto; 415 u8 grp_crypto;
422 u8 grp_crypto_len; 416 u8 grp_crypto_len;
423 u8 def_txkey_index; 417 u8 def_txkey_index;
424 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; 418 u8 next_mode;
419 u8 nw_type;
425 u8 bssid[ETH_ALEN]; 420 u8 bssid[ETH_ALEN];
426 u8 req_bssid[ETH_ALEN]; 421 u8 req_bssid[ETH_ALEN];
427 u16 ch_hint; 422 u16 ch_hint;
428 u16 bss_ch; 423 u16 bss_ch;
424 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
425 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
426 struct aggr_info *aggr_cntxt;
427 struct timer_list disconnect_timer;
428 struct cfg80211_scan_request *scan_req;
429 enum sme_state sme_state;
430 int reconnect_flag;
431 u32 last_roc_id;
432 u32 last_cancel_roc_id;
433 u32 send_action_id;
434 bool probe_req_report;
435 u16 next_chan;
436 u16 assoc_bss_beacon_int;
437 u8 assoc_bss_dtim_period;
438 struct net_device_stats net_stats;
439 struct target_stats target_stats;
440};
441
442#define WOW_LIST_ID 0
443#define WOW_HOST_REQ_DELAY 500 /* ms */
444
445/* Flag info */
446enum ath6kl_dev_state {
447 WMI_ENABLED,
448 WMI_READY,
449 WMI_CTRL_EP_FULL,
450 TESTMODE,
451 DESTROY_IN_PROGRESS,
452 SKIP_SCAN,
453 ROAM_TBL_PEND,
454 FIRST_BOOT,
455};
456
457enum ath6kl_state {
458 ATH6KL_STATE_OFF,
459 ATH6KL_STATE_ON,
460 ATH6KL_STATE_DEEPSLEEP,
461 ATH6KL_STATE_CUTPOWER,
462 ATH6KL_STATE_WOW,
463};
464
465struct ath6kl {
466 struct device *dev;
467 struct wiphy *wiphy;
468
469 enum ath6kl_state state;
470
471 struct ath6kl_bmi bmi;
472 const struct ath6kl_hif_ops *hif_ops;
473 struct wmi *wmi;
474 int tx_pending[ENDPOINT_MAX];
475 int total_tx_data_pend;
476 struct htc_target *htc_target;
477 void *hif_priv;
478 struct list_head vif_list;
479 /* Lock to avoid race in vif_list entries among add/del/traverse */
480 spinlock_t list_lock;
481 u8 num_vif;
482 u8 max_norm_iface;
483 u8 avail_idx_map;
484 spinlock_t lock;
485 struct semaphore sem;
429 u16 listen_intvl_b; 486 u16 listen_intvl_b;
430 u16 listen_intvl_t; 487 u16 listen_intvl_t;
431 u8 lrssi_roam_threshold; 488 u8 lrssi_roam_threshold;
432 struct ath6kl_version version; 489 struct ath6kl_version version;
433 u32 target_type; 490 u32 target_type;
434 u8 tx_pwr; 491 u8 tx_pwr;
435 struct net_device_stats net_stats;
436 struct target_stats target_stats;
437 struct ath6kl_node_mapping node_map[MAX_NODE_NUM]; 492 struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
438 u8 ibss_ps_enable; 493 u8 ibss_ps_enable;
494 bool ibss_if_active;
439 u8 node_num; 495 u8 node_num;
440 u8 next_ep_id; 496 u8 next_ep_id;
441 struct ath6kl_cookie *cookie_list; 497 struct ath6kl_cookie *cookie_list;
@@ -446,7 +502,7 @@ struct ath6kl {
446 u8 hiac_stream_active_pri; 502 u8 hiac_stream_active_pri;
447 u8 ep2ac_map[ENDPOINT_MAX]; 503 u8 ep2ac_map[ENDPOINT_MAX];
448 enum htc_endpoint_id ctrl_ep; 504 enum htc_endpoint_id ctrl_ep;
449 struct htc_credit_state_info credit_state_info; 505 struct ath6kl_htc_credit_info credit_state_info;
450 u32 connect_ctrl_flags; 506 u32 connect_ctrl_flags;
451 u32 user_key_ctrl; 507 u32 user_key_ctrl;
452 u8 usr_bss_filter; 508 u8 usr_bss_filter;
@@ -456,18 +512,13 @@ struct ath6kl {
456 struct sk_buff_head mcastpsq; 512 struct sk_buff_head mcastpsq;
457 spinlock_t mcastpsq_lock; 513 spinlock_t mcastpsq_lock;
458 u8 intra_bss; 514 u8 intra_bss;
459 struct aggr_info *aggr_cntxt;
460 struct wmi_ap_mode_stat ap_stats; 515 struct wmi_ap_mode_stat ap_stats;
461 u8 ap_country_code[3]; 516 u8 ap_country_code[3];
462 struct list_head amsdu_rx_buffer_queue; 517 struct list_head amsdu_rx_buffer_queue;
463 struct timer_list disconnect_timer;
464 u8 rx_meta_ver; 518 u8 rx_meta_ver;
465 struct wireless_dev *wdev;
466 struct cfg80211_scan_request *scan_req;
467 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
468 enum sme_state sme_state;
469 enum wlan_low_pwr_state wlan_pwr_state; 519 enum wlan_low_pwr_state wlan_pwr_state;
470 struct wmi_scan_params_cmd sc_params; 520 struct wmi_scan_params_cmd sc_params;
521 u8 mac_addr[ETH_ALEN];
471#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4 522#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
472 struct { 523 struct {
473 void *rx_report; 524 void *rx_report;
@@ -487,7 +538,6 @@ struct ath6kl {
487 struct ath6kl_mbox_info mbox_info; 538 struct ath6kl_mbox_info mbox_info;
488 539
489 struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM]; 540 struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
490 int reconnect_flag;
491 unsigned long flag; 541 unsigned long flag;
492 542
493 u8 *fw_board; 543 u8 *fw_board;
@@ -508,13 +558,7 @@ struct ath6kl {
508 558
509 struct dentry *debugfs_phy; 559 struct dentry *debugfs_phy;
510 560
511 u32 send_action_id;
512 bool probe_req_report;
513 u16 next_chan;
514
515 bool p2p; 561 bool p2p;
516 u16 assoc_bss_beacon_int;
517 u8 assoc_bss_dtim_period;
518 562
519#ifdef CONFIG_ATH6KL_DEBUG 563#ifdef CONFIG_ATH6KL_DEBUG
520 struct { 564 struct {
@@ -529,23 +573,19 @@ struct ath6kl {
529 struct { 573 struct {
530 unsigned int invalid_rate; 574 unsigned int invalid_rate;
531 } war_stats; 575 } war_stats;
576
577 u8 *roam_tbl;
578 unsigned int roam_tbl_len;
579
580 u8 keepalive;
581 u8 disc_timeout;
532 } debug; 582 } debug;
533#endif /* CONFIG_ATH6KL_DEBUG */ 583#endif /* CONFIG_ATH6KL_DEBUG */
534}; 584};
535 585
536static inline void *ath6kl_priv(struct net_device *dev) 586static inline void *ath6kl_priv(struct net_device *dev)
537{ 587{
538 return wdev_priv(dev->ieee80211_ptr); 588 return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
539}
540
541static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
542 *cred_info,
543 struct htc_endpoint_credit_dist
544 *ep_dist, int credits)
545{
546 ep_dist->credits += credits;
547 ep_dist->cred_assngd += credits;
548 cred_info->cur_free_credits -= credits;
549} 589}
550 590
551static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar, 591static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
@@ -561,7 +601,6 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
561 return addr; 601 return addr;
562} 602}
563 603
564void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
565int ath6kl_configure_target(struct ath6kl *ar); 604int ath6kl_configure_target(struct ath6kl *ar);
566void ath6kl_detect_error(unsigned long ptr); 605void ath6kl_detect_error(unsigned long ptr);
567void disconnect_timer_handler(unsigned long ptr); 606void disconnect_timer_handler(unsigned long ptr);
@@ -579,10 +618,8 @@ int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
579int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value); 618int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
580int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length); 619int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
581int ath6kl_read_fwlogs(struct ath6kl *ar); 620int ath6kl_read_fwlogs(struct ath6kl *ar);
582void ath6kl_init_profile_info(struct ath6kl *ar); 621void ath6kl_init_profile_info(struct ath6kl_vif *vif);
583void ath6kl_tx_data_cleanup(struct ath6kl *ar); 622void ath6kl_tx_data_cleanup(struct ath6kl *ar);
584void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
585 bool get_dbglogs);
586 623
587struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); 624struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
588void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); 625void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
@@ -598,40 +635,49 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
598void aggr_module_destroy(struct aggr_info *aggr_info); 635void aggr_module_destroy(struct aggr_info *aggr_info);
599void aggr_reset_state(struct aggr_info *aggr_info); 636void aggr_reset_state(struct aggr_info *aggr_info);
600 637
601struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr); 638struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
602struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid); 639struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
603 640
604void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver); 641void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
605int ath6kl_control_tx(void *devt, struct sk_buff *skb, 642int ath6kl_control_tx(void *devt, struct sk_buff *skb,
606 enum htc_endpoint_id eid); 643 enum htc_endpoint_id eid);
607void ath6kl_connect_event(struct ath6kl *ar, u16 channel, 644void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
608 u8 *bssid, u16 listen_int, 645 u8 *bssid, u16 listen_int,
609 u16 beacon_int, enum network_type net_type, 646 u16 beacon_int, enum network_type net_type,
610 u8 beacon_ie_len, u8 assoc_req_len, 647 u8 beacon_ie_len, u8 assoc_req_len,
611 u8 assoc_resp_len, u8 *assoc_info); 648 u8 assoc_resp_len, u8 *assoc_info);
612void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel); 649void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
613void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, 650void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
614 u8 keymgmt, u8 ucipher, u8 auth, 651 u8 keymgmt, u8 ucipher, u8 auth,
615 u8 assoc_req_len, u8 *assoc_info); 652 u8 assoc_req_len, u8 *assoc_info);
616void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, 653void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
617 u8 *bssid, u8 assoc_resp_len, 654 u8 *bssid, u8 assoc_resp_len,
618 u8 *assoc_info, u16 prot_reason_status); 655 u8 *assoc_info, u16 prot_reason_status);
619void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast); 656void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
620void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr); 657void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
621void ath6kl_scan_complete_evt(struct ath6kl *ar, int status); 658void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
622void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len); 659void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
623void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active); 660void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
624enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac); 661enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
625 662
626void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid); 663void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
627 664
628void ath6kl_dtimexpiry_event(struct ath6kl *ar); 665void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
629void ath6kl_disconnect(struct ath6kl *ar); 666void ath6kl_disconnect(struct ath6kl_vif *vif);
630void ath6kl_deep_sleep_enable(struct ath6kl *ar); 667void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
631void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid); 668void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
632void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
633 u8 win_sz); 669 u8 win_sz);
634void ath6kl_wakeup_event(void *dev); 670void ath6kl_wakeup_event(void *dev);
635void ath6kl_target_failure(struct ath6kl *ar); 671
672void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
673 bool wait_fot_compltn, bool cold_reset);
674void ath6kl_init_control_info(struct ath6kl_vif *vif);
675void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
676void ath6kl_core_free(struct ath6kl *ar);
677struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
678void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
679int ath6kl_init_hw_start(struct ath6kl *ar);
680int ath6kl_init_hw_stop(struct ath6kl *ar);
681void ath6kl_check_wow_status(struct ath6kl *ar);
636 682
637#endif /* CORE_H */ 683#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7879b5314285..9eff0d010bb1 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -143,49 +143,48 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
143 143
144static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist) 144static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
145{ 145{
146 ath6kl_dbg(ATH6KL_DBG_ANY, 146 ath6kl_dbg(ATH6KL_DBG_CREDIT,
147 "--- endpoint: %d svc_id: 0x%X ---\n", 147 "--- endpoint: %d svc_id: 0x%X ---\n",
148 ep_dist->endpoint, ep_dist->svc_id); 148 ep_dist->endpoint, ep_dist->svc_id);
149 ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n", 149 ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n",
150 ep_dist->dist_flags); 150 ep_dist->dist_flags);
151 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n", 151 ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n",
152 ep_dist->cred_norm); 152 ep_dist->cred_norm);
153 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n", 153 ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n",
154 ep_dist->cred_min); 154 ep_dist->cred_min);
155 ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n", 155 ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n",
156 ep_dist->credits); 156 ep_dist->credits);
157 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n", 157 ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n",
158 ep_dist->cred_assngd); 158 ep_dist->cred_assngd);
159 ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n", 159 ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n",
160 ep_dist->seek_cred); 160 ep_dist->seek_cred);
161 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n", 161 ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n",
162 ep_dist->cred_sz); 162 ep_dist->cred_sz);
163 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n", 163 ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n",
164 ep_dist->cred_per_msg); 164 ep_dist->cred_per_msg);
165 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n", 165 ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n",
166 ep_dist->cred_to_dist); 166 ep_dist->cred_to_dist);
167 ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n", 167 ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n",
168 get_queue_depth(&((struct htc_endpoint *) 168 get_queue_depth(&ep_dist->htc_ep->txq));
169 ep_dist->htc_rsvd)->txq)); 169 ath6kl_dbg(ATH6KL_DBG_CREDIT,
170 ath6kl_dbg(ATH6KL_DBG_ANY,
171 "----------------------------------\n"); 170 "----------------------------------\n");
172} 171}
173 172
173/* FIXME: move to htc.c */
174void dump_cred_dist_stats(struct htc_target *target) 174void dump_cred_dist_stats(struct htc_target *target)
175{ 175{
176 struct htc_endpoint_credit_dist *ep_list; 176 struct htc_endpoint_credit_dist *ep_list;
177 177
178 if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC)) 178 if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
179 return; 179 return;
180 180
181 list_for_each_entry(ep_list, &target->cred_dist_list, list) 181 list_for_each_entry(ep_list, &target->cred_dist_list, list)
182 dump_cred_dist(ep_list); 182 dump_cred_dist(ep_list);
183 183
184 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n", 184 ath6kl_dbg(ATH6KL_DBG_CREDIT,
185 target->cred_dist_cntxt, NULL); 185 "credit distribution total %d free %d\n",
186 ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n", 186 target->credit_info->total_avail_credits,
187 target->cred_dist_cntxt->total_avail_credits, 187 target->credit_info->cur_free_credits);
188 target->cred_dist_cntxt->cur_free_credits);
189} 188}
190 189
191static int ath6kl_debugfs_open(struct inode *inode, struct file *file) 190static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
@@ -397,13 +396,20 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
397 size_t count, loff_t *ppos) 396 size_t count, loff_t *ppos)
398{ 397{
399 struct ath6kl *ar = file->private_data; 398 struct ath6kl *ar = file->private_data;
400 struct target_stats *tgt_stats = &ar->target_stats; 399 struct ath6kl_vif *vif;
400 struct target_stats *tgt_stats;
401 char *buf; 401 char *buf;
402 unsigned int len = 0, buf_len = 1500; 402 unsigned int len = 0, buf_len = 1500;
403 int i; 403 int i;
404 long left; 404 long left;
405 ssize_t ret_cnt; 405 ssize_t ret_cnt;
406 406
407 vif = ath6kl_vif_first(ar);
408 if (!vif)
409 return -EIO;
410
411 tgt_stats = &vif->target_stats;
412
407 buf = kzalloc(buf_len, GFP_KERNEL); 413 buf = kzalloc(buf_len, GFP_KERNEL);
408 if (!buf) 414 if (!buf)
409 return -ENOMEM; 415 return -ENOMEM;
@@ -413,9 +419,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
413 return -EBUSY; 419 return -EBUSY;
414 } 420 }
415 421
416 set_bit(STATS_UPDATE_PEND, &ar->flag); 422 set_bit(STATS_UPDATE_PEND, &vif->flags);
417 423
418 if (ath6kl_wmi_get_stats_cmd(ar->wmi)) { 424 if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
419 up(&ar->sem); 425 up(&ar->sem);
420 kfree(buf); 426 kfree(buf);
421 return -EIO; 427 return -EIO;
@@ -423,7 +429,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
423 429
424 left = wait_event_interruptible_timeout(ar->event_wq, 430 left = wait_event_interruptible_timeout(ar->event_wq,
425 !test_bit(STATS_UPDATE_PEND, 431 !test_bit(STATS_UPDATE_PEND,
426 &ar->flag), WMI_TIMEOUT); 432 &vif->flags), WMI_TIMEOUT);
427 433
428 up(&ar->sem); 434 up(&ar->sem);
429 435
@@ -555,10 +561,10 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
555 561
556 len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", 562 len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
557 "Total Avail Credits: ", 563 "Total Avail Credits: ",
558 target->cred_dist_cntxt->total_avail_credits); 564 target->credit_info->total_avail_credits);
559 len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", 565 len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
560 "Free credits :", 566 "Free credits :",
561 target->cred_dist_cntxt->cur_free_credits); 567 target->credit_info->cur_free_credits);
562 568
563 len += scnprintf(buf + len, buf_len - len, 569 len += scnprintf(buf + len, buf_len - len,
564 " Epid Flags Cred_norm Cred_min Credits Cred_assngd" 570 " Epid Flags Cred_norm Cred_min Credits Cred_assngd"
@@ -577,8 +583,7 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
577 print_credit_info("%9d", cred_per_msg); 583 print_credit_info("%9d", cred_per_msg);
578 print_credit_info("%14d", cred_to_dist); 584 print_credit_info("%14d", cred_to_dist);
579 len += scnprintf(buf + len, buf_len - len, "%12d\n", 585 len += scnprintf(buf + len, buf_len - len, "%12d\n",
580 get_queue_depth(&((struct htc_endpoint *) 586 get_queue_depth(&ep_list->htc_ep->txq));
581 ep_list->htc_rsvd)->txq));
582 } 587 }
583 588
584 if (len > buf_len) 589 if (len > buf_len)
@@ -596,6 +601,107 @@ static const struct file_operations fops_credit_dist_stats = {
596 .llseek = default_llseek, 601 .llseek = default_llseek,
597}; 602};
598 603
604static unsigned int print_endpoint_stat(struct htc_target *target, char *buf,
605 unsigned int buf_len, unsigned int len,
606 int offset, const char *name)
607{
608 int i;
609 struct htc_endpoint_stats *ep_st;
610 u32 *counter;
611
612 len += scnprintf(buf + len, buf_len - len, "%s:", name);
613 for (i = 0; i < ENDPOINT_MAX; i++) {
614 ep_st = &target->endpoint[i].ep_st;
615 counter = ((u32 *) ep_st) + (offset / 4);
616 len += scnprintf(buf + len, buf_len - len, " %u", *counter);
617 }
618 len += scnprintf(buf + len, buf_len - len, "\n");
619
620 return len;
621}
622
623static ssize_t ath6kl_endpoint_stats_read(struct file *file,
624 char __user *user_buf,
625 size_t count, loff_t *ppos)
626{
627 struct ath6kl *ar = file->private_data;
628 struct htc_target *target = ar->htc_target;
629 char *buf;
630 unsigned int buf_len, len = 0;
631 ssize_t ret_cnt;
632
633 buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) *
634 (25 + ENDPOINT_MAX * 11);
635 buf = kmalloc(buf_len, GFP_KERNEL);
636 if (!buf)
637 return -ENOMEM;
638
639#define EPSTAT(name) \
640 len = print_endpoint_stat(target, buf, buf_len, len, \
641 offsetof(struct htc_endpoint_stats, name), \
642 #name)
643 EPSTAT(cred_low_indicate);
644 EPSTAT(tx_issued);
645 EPSTAT(tx_pkt_bundled);
646 EPSTAT(tx_bundles);
647 EPSTAT(tx_dropped);
648 EPSTAT(tx_cred_rpt);
649 EPSTAT(cred_rpt_from_rx);
650 EPSTAT(cred_rpt_from_other);
651 EPSTAT(cred_rpt_ep0);
652 EPSTAT(cred_from_rx);
653 EPSTAT(cred_from_other);
654 EPSTAT(cred_from_ep0);
655 EPSTAT(cred_cosumd);
656 EPSTAT(cred_retnd);
657 EPSTAT(rx_pkts);
658 EPSTAT(rx_lkahds);
659 EPSTAT(rx_bundl);
660 EPSTAT(rx_bundle_lkahd);
661 EPSTAT(rx_bundle_from_hdr);
662 EPSTAT(rx_alloc_thresh_hit);
663 EPSTAT(rxalloc_thresh_byte);
664#undef EPSTAT
665
666 if (len > buf_len)
667 len = buf_len;
668
669 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
670 kfree(buf);
671 return ret_cnt;
672}
673
674static ssize_t ath6kl_endpoint_stats_write(struct file *file,
675 const char __user *user_buf,
676 size_t count, loff_t *ppos)
677{
678 struct ath6kl *ar = file->private_data;
679 struct htc_target *target = ar->htc_target;
680 int ret, i;
681 u32 val;
682 struct htc_endpoint_stats *ep_st;
683
684 ret = kstrtou32_from_user(user_buf, count, 0, &val);
685 if (ret)
686 return ret;
687 if (val == 0) {
688 for (i = 0; i < ENDPOINT_MAX; i++) {
689 ep_st = &target->endpoint[i].ep_st;
690 memset(ep_st, 0, sizeof(*ep_st));
691 }
692 }
693
694 return count;
695}
696
697static const struct file_operations fops_endpoint_stats = {
698 .open = ath6kl_debugfs_open,
699 .read = ath6kl_endpoint_stats_read,
700 .write = ath6kl_endpoint_stats_write,
701 .owner = THIS_MODULE,
702 .llseek = default_llseek,
703};
704
599static unsigned long ath6kl_get_num_reg(void) 705static unsigned long ath6kl_get_num_reg(void)
600{ 706{
601 int i; 707 int i;
@@ -868,6 +974,660 @@ static const struct file_operations fops_diag_reg_write = {
868 .llseek = default_llseek, 974 .llseek = default_llseek,
869}; 975};
870 976
977int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
978 size_t len)
979{
980 const struct wmi_target_roam_tbl *tbl;
981 u16 num_entries;
982
983 if (len < sizeof(*tbl))
984 return -EINVAL;
985
986 tbl = (const struct wmi_target_roam_tbl *) buf;
987 num_entries = le16_to_cpu(tbl->num_entries);
988 if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
989 len)
990 return -EINVAL;
991
992 if (ar->debug.roam_tbl == NULL ||
993 ar->debug.roam_tbl_len < (unsigned int) len) {
994 kfree(ar->debug.roam_tbl);
995 ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC);
996 if (ar->debug.roam_tbl == NULL)
997 return -ENOMEM;
998 }
999
1000 memcpy(ar->debug.roam_tbl, buf, len);
1001 ar->debug.roam_tbl_len = len;
1002
1003 if (test_bit(ROAM_TBL_PEND, &ar->flag)) {
1004 clear_bit(ROAM_TBL_PEND, &ar->flag);
1005 wake_up(&ar->event_wq);
1006 }
1007
1008 return 0;
1009}
1010
1011static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf,
1012 size_t count, loff_t *ppos)
1013{
1014 struct ath6kl *ar = file->private_data;
1015 int ret;
1016 long left;
1017 struct wmi_target_roam_tbl *tbl;
1018 u16 num_entries, i;
1019 char *buf;
1020 unsigned int len, buf_len;
1021 ssize_t ret_cnt;
1022
1023 if (down_interruptible(&ar->sem))
1024 return -EBUSY;
1025
1026 set_bit(ROAM_TBL_PEND, &ar->flag);
1027
1028 ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi);
1029 if (ret) {
1030 up(&ar->sem);
1031 return ret;
1032 }
1033
1034 left = wait_event_interruptible_timeout(
1035 ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT);
1036 up(&ar->sem);
1037
1038 if (left <= 0)
1039 return -ETIMEDOUT;
1040
1041 if (ar->debug.roam_tbl == NULL)
1042 return -ENOMEM;
1043
1044 tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl;
1045 num_entries = le16_to_cpu(tbl->num_entries);
1046
1047 buf_len = 100 + num_entries * 100;
1048 buf = kzalloc(buf_len, GFP_KERNEL);
1049 if (buf == NULL)
1050 return -ENOMEM;
1051 len = 0;
1052 len += scnprintf(buf + len, buf_len - len,
1053 "roam_mode=%u\n\n"
1054 "# roam_util bssid rssi rssidt last_rssi util bias\n",
1055 le16_to_cpu(tbl->roam_mode));
1056
1057 for (i = 0; i < num_entries; i++) {
1058 struct wmi_bss_roam_info *info = &tbl->info[i];
1059 len += scnprintf(buf + len, buf_len - len,
1060 "%d %pM %d %d %d %d %d\n",
1061 a_sle32_to_cpu(info->roam_util), info->bssid,
1062 info->rssi, info->rssidt, info->last_rssi,
1063 info->util, info->bias);
1064 }
1065
1066 if (len > buf_len)
1067 len = buf_len;
1068
1069 ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1070
1071 kfree(buf);
1072 return ret_cnt;
1073}
1074
1075static const struct file_operations fops_roam_table = {
1076 .read = ath6kl_roam_table_read,
1077 .open = ath6kl_debugfs_open,
1078 .owner = THIS_MODULE,
1079 .llseek = default_llseek,
1080};
1081
1082static ssize_t ath6kl_force_roam_write(struct file *file,
1083 const char __user *user_buf,
1084 size_t count, loff_t *ppos)
1085{
1086 struct ath6kl *ar = file->private_data;
1087 int ret;
1088 char buf[20];
1089 size_t len;
1090 u8 bssid[ETH_ALEN];
1091 int i;
1092 int addr[ETH_ALEN];
1093
1094 len = min(count, sizeof(buf) - 1);
1095 if (copy_from_user(buf, user_buf, len))
1096 return -EFAULT;
1097 buf[len] = '\0';
1098
1099 if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
1100 &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
1101 != ETH_ALEN)
1102 return -EINVAL;
1103 for (i = 0; i < ETH_ALEN; i++)
1104 bssid[i] = addr[i];
1105
1106 ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
1107 if (ret)
1108 return ret;
1109
1110 return count;
1111}
1112
1113static const struct file_operations fops_force_roam = {
1114 .write = ath6kl_force_roam_write,
1115 .open = ath6kl_debugfs_open,
1116 .owner = THIS_MODULE,
1117 .llseek = default_llseek,
1118};
1119
1120static ssize_t ath6kl_roam_mode_write(struct file *file,
1121 const char __user *user_buf,
1122 size_t count, loff_t *ppos)
1123{
1124 struct ath6kl *ar = file->private_data;
1125 int ret;
1126 char buf[20];
1127 size_t len;
1128 enum wmi_roam_mode mode;
1129
1130 len = min(count, sizeof(buf) - 1);
1131 if (copy_from_user(buf, user_buf, len))
1132 return -EFAULT;
1133 buf[len] = '\0';
1134 if (len > 0 && buf[len - 1] == '\n')
1135 buf[len - 1] = '\0';
1136
1137 if (strcasecmp(buf, "default") == 0)
1138 mode = WMI_DEFAULT_ROAM_MODE;
1139 else if (strcasecmp(buf, "bssbias") == 0)
1140 mode = WMI_HOST_BIAS_ROAM_MODE;
1141 else if (strcasecmp(buf, "lock") == 0)
1142 mode = WMI_LOCK_BSS_MODE;
1143 else
1144 return -EINVAL;
1145
1146 ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode);
1147 if (ret)
1148 return ret;
1149
1150 return count;
1151}
1152
1153static const struct file_operations fops_roam_mode = {
1154 .write = ath6kl_roam_mode_write,
1155 .open = ath6kl_debugfs_open,
1156 .owner = THIS_MODULE,
1157 .llseek = default_llseek,
1158};
1159
1160void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
1161{
1162 ar->debug.keepalive = keepalive;
1163}
1164
1165static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf,
1166 size_t count, loff_t *ppos)
1167{
1168 struct ath6kl *ar = file->private_data;
1169 char buf[16];
1170 int len;
1171
1172 len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive);
1173
1174 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1175}
1176
1177static ssize_t ath6kl_keepalive_write(struct file *file,
1178 const char __user *user_buf,
1179 size_t count, loff_t *ppos)
1180{
1181 struct ath6kl *ar = file->private_data;
1182 int ret;
1183 u8 val;
1184
1185 ret = kstrtou8_from_user(user_buf, count, 0, &val);
1186 if (ret)
1187 return ret;
1188
1189 ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val);
1190 if (ret)
1191 return ret;
1192
1193 return count;
1194}
1195
1196static const struct file_operations fops_keepalive = {
1197 .open = ath6kl_debugfs_open,
1198 .read = ath6kl_keepalive_read,
1199 .write = ath6kl_keepalive_write,
1200 .owner = THIS_MODULE,
1201 .llseek = default_llseek,
1202};
1203
1204void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout)
1205{
1206 ar->debug.disc_timeout = timeout;
1207}
1208
1209static ssize_t ath6kl_disconnect_timeout_read(struct file *file,
1210 char __user *user_buf,
1211 size_t count, loff_t *ppos)
1212{
1213 struct ath6kl *ar = file->private_data;
1214 char buf[16];
1215 int len;
1216
1217 len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout);
1218
1219 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1220}
1221
1222static ssize_t ath6kl_disconnect_timeout_write(struct file *file,
1223 const char __user *user_buf,
1224 size_t count, loff_t *ppos)
1225{
1226 struct ath6kl *ar = file->private_data;
1227 int ret;
1228 u8 val;
1229
1230 ret = kstrtou8_from_user(user_buf, count, 0, &val);
1231 if (ret)
1232 return ret;
1233
1234 ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val);
1235 if (ret)
1236 return ret;
1237
1238 return count;
1239}
1240
1241static const struct file_operations fops_disconnect_timeout = {
1242 .open = ath6kl_debugfs_open,
1243 .read = ath6kl_disconnect_timeout_read,
1244 .write = ath6kl_disconnect_timeout_write,
1245 .owner = THIS_MODULE,
1246 .llseek = default_llseek,
1247};
1248
1249static ssize_t ath6kl_create_qos_write(struct file *file,
1250 const char __user *user_buf,
1251 size_t count, loff_t *ppos)
1252{
1253
1254 struct ath6kl *ar = file->private_data;
1255 struct ath6kl_vif *vif;
1256 char buf[200];
1257 ssize_t len;
1258 char *sptr, *token;
1259 struct wmi_create_pstream_cmd pstream;
1260 u32 val32;
1261 u16 val16;
1262
1263 vif = ath6kl_vif_first(ar);
1264 if (!vif)
1265 return -EIO;
1266
1267 len = min(count, sizeof(buf) - 1);
1268 if (copy_from_user(buf, user_buf, len))
1269 return -EFAULT;
1270 buf[len] = '\0';
1271 sptr = buf;
1272
1273 token = strsep(&sptr, " ");
1274 if (!token)
1275 return -EINVAL;
1276 if (kstrtou8(token, 0, &pstream.user_pri))
1277 return -EINVAL;
1278
1279 token = strsep(&sptr, " ");
1280 if (!token)
1281 return -EINVAL;
1282 if (kstrtou8(token, 0, &pstream.traffic_direc))
1283 return -EINVAL;
1284
1285 token = strsep(&sptr, " ");
1286 if (!token)
1287 return -EINVAL;
1288 if (kstrtou8(token, 0, &pstream.traffic_class))
1289 return -EINVAL;
1290
1291 token = strsep(&sptr, " ");
1292 if (!token)
1293 return -EINVAL;
1294 if (kstrtou8(token, 0, &pstream.traffic_type))
1295 return -EINVAL;
1296
1297 token = strsep(&sptr, " ");
1298 if (!token)
1299 return -EINVAL;
1300 if (kstrtou8(token, 0, &pstream.voice_psc_cap))
1301 return -EINVAL;
1302
1303 token = strsep(&sptr, " ");
1304 if (!token)
1305 return -EINVAL;
1306 if (kstrtou32(token, 0, &val32))
1307 return -EINVAL;
1308 pstream.min_service_int = cpu_to_le32(val32);
1309
1310 token = strsep(&sptr, " ");
1311 if (!token)
1312 return -EINVAL;
1313 if (kstrtou32(token, 0, &val32))
1314 return -EINVAL;
1315 pstream.max_service_int = cpu_to_le32(val32);
1316
1317 token = strsep(&sptr, " ");
1318 if (!token)
1319 return -EINVAL;
1320 if (kstrtou32(token, 0, &val32))
1321 return -EINVAL;
1322 pstream.inactivity_int = cpu_to_le32(val32);
1323
1324 token = strsep(&sptr, " ");
1325 if (!token)
1326 return -EINVAL;
1327 if (kstrtou32(token, 0, &val32))
1328 return -EINVAL;
1329 pstream.suspension_int = cpu_to_le32(val32);
1330
1331 token = strsep(&sptr, " ");
1332 if (!token)
1333 return -EINVAL;
1334 if (kstrtou32(token, 0, &val32))
1335 return -EINVAL;
1336 pstream.service_start_time = cpu_to_le32(val32);
1337
1338 token = strsep(&sptr, " ");
1339 if (!token)
1340 return -EINVAL;
1341 if (kstrtou8(token, 0, &pstream.tsid))
1342 return -EINVAL;
1343
1344 token = strsep(&sptr, " ");
1345 if (!token)
1346 return -EINVAL;
1347 if (kstrtou16(token, 0, &val16))
1348 return -EINVAL;
1349 pstream.nominal_msdu = cpu_to_le16(val16);
1350
1351 token = strsep(&sptr, " ");
1352 if (!token)
1353 return -EINVAL;
1354 if (kstrtou16(token, 0, &val16))
1355 return -EINVAL;
1356 pstream.max_msdu = cpu_to_le16(val16);
1357
1358 token = strsep(&sptr, " ");
1359 if (!token)
1360 return -EINVAL;
1361 if (kstrtou32(token, 0, &val32))
1362 return -EINVAL;
1363 pstream.min_data_rate = cpu_to_le32(val32);
1364
1365 token = strsep(&sptr, " ");
1366 if (!token)
1367 return -EINVAL;
1368 if (kstrtou32(token, 0, &val32))
1369 return -EINVAL;
1370 pstream.mean_data_rate = cpu_to_le32(val32);
1371
1372 token = strsep(&sptr, " ");
1373 if (!token)
1374 return -EINVAL;
1375 if (kstrtou32(token, 0, &val32))
1376 return -EINVAL;
1377 pstream.peak_data_rate = cpu_to_le32(val32);
1378
1379 token = strsep(&sptr, " ");
1380 if (!token)
1381 return -EINVAL;
1382 if (kstrtou32(token, 0, &val32))
1383 return -EINVAL;
1384 pstream.max_burst_size = cpu_to_le32(val32);
1385
1386 token = strsep(&sptr, " ");
1387 if (!token)
1388 return -EINVAL;
1389 if (kstrtou32(token, 0, &val32))
1390 return -EINVAL;
1391 pstream.delay_bound = cpu_to_le32(val32);
1392
1393 token = strsep(&sptr, " ");
1394 if (!token)
1395 return -EINVAL;
1396 if (kstrtou32(token, 0, &val32))
1397 return -EINVAL;
1398 pstream.min_phy_rate = cpu_to_le32(val32);
1399
1400 token = strsep(&sptr, " ");
1401 if (!token)
1402 return -EINVAL;
1403 if (kstrtou32(token, 0, &val32))
1404 return -EINVAL;
1405 pstream.sba = cpu_to_le32(val32);
1406
1407 token = strsep(&sptr, " ");
1408 if (!token)
1409 return -EINVAL;
1410 if (kstrtou32(token, 0, &val32))
1411 return -EINVAL;
1412 pstream.medium_time = cpu_to_le32(val32);
1413
1414 ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
1415
1416 return count;
1417}
1418
1419static const struct file_operations fops_create_qos = {
1420 .write = ath6kl_create_qos_write,
1421 .open = ath6kl_debugfs_open,
1422 .owner = THIS_MODULE,
1423 .llseek = default_llseek,
1424};
1425
1426static ssize_t ath6kl_delete_qos_write(struct file *file,
1427 const char __user *user_buf,
1428 size_t count, loff_t *ppos)
1429{
1430
1431 struct ath6kl *ar = file->private_data;
1432 struct ath6kl_vif *vif;
1433 char buf[100];
1434 ssize_t len;
1435 char *sptr, *token;
1436 u8 traffic_class;
1437 u8 tsid;
1438
1439 vif = ath6kl_vif_first(ar);
1440 if (!vif)
1441 return -EIO;
1442
1443 len = min(count, sizeof(buf) - 1);
1444 if (copy_from_user(buf, user_buf, len))
1445 return -EFAULT;
1446 buf[len] = '\0';
1447 sptr = buf;
1448
1449 token = strsep(&sptr, " ");
1450 if (!token)
1451 return -EINVAL;
1452 if (kstrtou8(token, 0, &traffic_class))
1453 return -EINVAL;
1454
1455 token = strsep(&sptr, " ");
1456 if (!token)
1457 return -EINVAL;
1458 if (kstrtou8(token, 0, &tsid))
1459 return -EINVAL;
1460
1461 ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx,
1462 traffic_class, tsid);
1463
1464 return count;
1465}
1466
1467static const struct file_operations fops_delete_qos = {
1468 .write = ath6kl_delete_qos_write,
1469 .open = ath6kl_debugfs_open,
1470 .owner = THIS_MODULE,
1471 .llseek = default_llseek,
1472};
1473
1474static ssize_t ath6kl_bgscan_int_write(struct file *file,
1475 const char __user *user_buf,
1476 size_t count, loff_t *ppos)
1477{
1478 struct ath6kl *ar = file->private_data;
1479 u16 bgscan_int;
1480 char buf[32];
1481 ssize_t len;
1482
1483 len = min(count, sizeof(buf) - 1);
1484 if (copy_from_user(buf, user_buf, len))
1485 return -EFAULT;
1486
1487 buf[len] = '\0';
1488 if (kstrtou16(buf, 0, &bgscan_int))
1489 return -EINVAL;
1490
1491 if (bgscan_int == 0)
1492 bgscan_int = 0xffff;
1493
1494 ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
1495 0, 0, 0);
1496
1497 return count;
1498}
1499
1500static const struct file_operations fops_bgscan_int = {
1501 .write = ath6kl_bgscan_int_write,
1502 .open = ath6kl_debugfs_open,
1503 .owner = THIS_MODULE,
1504 .llseek = default_llseek,
1505};
1506
1507static ssize_t ath6kl_listen_int_write(struct file *file,
1508 const char __user *user_buf,
1509 size_t count, loff_t *ppos)
1510{
1511 struct ath6kl *ar = file->private_data;
1512 u16 listen_int_t, listen_int_b;
1513 char buf[32];
1514 char *sptr, *token;
1515 ssize_t len;
1516
1517 len = min(count, sizeof(buf) - 1);
1518 if (copy_from_user(buf, user_buf, len))
1519 return -EFAULT;
1520
1521 buf[len] = '\0';
1522 sptr = buf;
1523
1524 token = strsep(&sptr, " ");
1525 if (!token)
1526 return -EINVAL;
1527
1528 if (kstrtou16(token, 0, &listen_int_t))
1529 return -EINVAL;
1530
1531 if (kstrtou16(sptr, 0, &listen_int_b))
1532 return -EINVAL;
1533
1534 if ((listen_int_t < 15) || (listen_int_t > 5000))
1535 return -EINVAL;
1536
1537 if ((listen_int_b < 1) || (listen_int_b > 50))
1538 return -EINVAL;
1539
1540 ar->listen_intvl_t = listen_int_t;
1541 ar->listen_intvl_b = listen_int_b;
1542
1543 ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
1544 ar->listen_intvl_b);
1545
1546 return count;
1547}
1548
1549static ssize_t ath6kl_listen_int_read(struct file *file,
1550 char __user *user_buf,
1551 size_t count, loff_t *ppos)
1552{
1553 struct ath6kl *ar = file->private_data;
1554 char buf[16];
1555 int len;
1556
1557 len = snprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
1558 ar->listen_intvl_b);
1559
1560 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1561}
1562
1563static const struct file_operations fops_listen_int = {
1564 .read = ath6kl_listen_int_read,
1565 .write = ath6kl_listen_int_write,
1566 .open = ath6kl_debugfs_open,
1567 .owner = THIS_MODULE,
1568 .llseek = default_llseek,
1569};
1570
1571static ssize_t ath6kl_power_params_write(struct file *file,
1572 const char __user *user_buf,
1573 size_t count, loff_t *ppos)
1574{
1575 struct ath6kl *ar = file->private_data;
1576 u8 buf[100];
1577 unsigned int len = 0;
1578 char *sptr, *token;
1579 u16 idle_period, ps_poll_num, dtim,
1580 tx_wakeup, num_tx;
1581
1582 len = min(count, sizeof(buf) - 1);
1583 if (copy_from_user(buf, user_buf, len))
1584 return -EFAULT;
1585 buf[len] = '\0';
1586 sptr = buf;
1587
1588 token = strsep(&sptr, " ");
1589 if (!token)
1590 return -EINVAL;
1591 if (kstrtou16(token, 0, &idle_period))
1592 return -EINVAL;
1593
1594 token = strsep(&sptr, " ");
1595 if (!token)
1596 return -EINVAL;
1597 if (kstrtou16(token, 0, &ps_poll_num))
1598 return -EINVAL;
1599
1600 token = strsep(&sptr, " ");
1601 if (!token)
1602 return -EINVAL;
1603 if (kstrtou16(token, 0, &dtim))
1604 return -EINVAL;
1605
1606 token = strsep(&sptr, " ");
1607 if (!token)
1608 return -EINVAL;
1609 if (kstrtou16(token, 0, &tx_wakeup))
1610 return -EINVAL;
1611
1612 token = strsep(&sptr, " ");
1613 if (!token)
1614 return -EINVAL;
1615 if (kstrtou16(token, 0, &num_tx))
1616 return -EINVAL;
1617
1618 ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num,
1619 dtim, tx_wakeup, num_tx, 0);
1620
1621 return count;
1622}
1623
1624static const struct file_operations fops_power_params = {
1625 .write = ath6kl_power_params_write,
1626 .open = ath6kl_debugfs_open,
1627 .owner = THIS_MODULE,
1628 .llseek = default_llseek,
1629};
1630
871int ath6kl_debug_init(struct ath6kl *ar) 1631int ath6kl_debug_init(struct ath6kl *ar)
872{ 1632{
873 ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE); 1633 ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
@@ -889,7 +1649,7 @@ int ath6kl_debug_init(struct ath6kl *ar)
889 ar->debug.fwlog_mask = 0; 1649 ar->debug.fwlog_mask = 0;
890 1650
891 ar->debugfs_phy = debugfs_create_dir("ath6kl", 1651 ar->debugfs_phy = debugfs_create_dir("ath6kl",
892 ar->wdev->wiphy->debugfsdir); 1652 ar->wiphy->debugfsdir);
893 if (!ar->debugfs_phy) { 1653 if (!ar->debugfs_phy) {
894 vfree(ar->debug.fwlog_buf.buf); 1654 vfree(ar->debug.fwlog_buf.buf);
895 kfree(ar->debug.fwlog_tmp); 1655 kfree(ar->debug.fwlog_tmp);
@@ -902,6 +1662,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
902 debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar, 1662 debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
903 &fops_credit_dist_stats); 1663 &fops_credit_dist_stats);
904 1664
1665 debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
1666 ar->debugfs_phy, ar, &fops_endpoint_stats);
1667
905 debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar, 1668 debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
906 &fops_fwlog); 1669 &fops_fwlog);
907 1670
@@ -923,6 +1686,33 @@ int ath6kl_debug_init(struct ath6kl *ar)
923 debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar, 1686 debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
924 &fops_war_stats); 1687 &fops_war_stats);
925 1688
1689 debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar,
1690 &fops_roam_table);
1691
1692 debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar,
1693 &fops_force_roam);
1694
1695 debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar,
1696 &fops_roam_mode);
1697
1698 debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
1699 &fops_keepalive);
1700
1701 debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR,
1702 ar->debugfs_phy, ar, &fops_disconnect_timeout);
1703
1704 debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
1705 &fops_create_qos);
1706
1707 debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
1708 &fops_delete_qos);
1709
1710 debugfs_create_file("bgscan_interval", S_IWUSR,
1711 ar->debugfs_phy, ar, &fops_bgscan_int);
1712
1713 debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
1714 &fops_power_params);
1715
926 return 0; 1716 return 0;
927} 1717}
928 1718
@@ -930,6 +1720,7 @@ void ath6kl_debug_cleanup(struct ath6kl *ar)
930{ 1720{
931 vfree(ar->debug.fwlog_buf.buf); 1721 vfree(ar->debug.fwlog_buf.buf);
932 kfree(ar->debug.fwlog_tmp); 1722 kfree(ar->debug.fwlog_tmp);
1723 kfree(ar->debug.roam_tbl);
933} 1724}
934 1725
935#endif 1726#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 7b7675f70a10..9853c9c125c1 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -17,19 +17,19 @@
17#ifndef DEBUG_H 17#ifndef DEBUG_H
18#define DEBUG_H 18#define DEBUG_H
19 19
20#include "htc_hif.h" 20#include "hif.h"
21 21
22enum ATH6K_DEBUG_MASK { 22enum ATH6K_DEBUG_MASK {
23 ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */ 23 ATH6KL_DBG_CREDIT = BIT(0),
24 ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */ 24 /* hole */
25 ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */ 25 ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
26 ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */ 26 ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
27 ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */ 27 ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
28 ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */ 28 ATH6KL_DBG_HTC = BIT(5),
29 ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */ 29 ATH6KL_DBG_HIF = BIT(6),
30 ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */ 30 ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
31 ATH6KL_DBG_PM = BIT(8), /* power management */ 31 /* hole */
32 ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */ 32 /* hole */
33 ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */ 33 ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
34 ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */ 34 ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
35 ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */ 35 ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
@@ -40,6 +40,7 @@ enum ATH6K_DEBUG_MASK {
40 ATH6KL_DBG_SDIO_DUMP = BIT(17), 40 ATH6KL_DBG_SDIO_DUMP = BIT(17),
41 ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */ 41 ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
42 ATH6KL_DBG_WMI_DUMP = BIT(19), 42 ATH6KL_DBG_WMI_DUMP = BIT(19),
43 ATH6KL_DBG_SUSPEND = BIT(20),
43 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ 44 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
44}; 45};
45 46
@@ -90,6 +91,10 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
90void dump_cred_dist_stats(struct htc_target *target); 91void dump_cred_dist_stats(struct htc_target *target);
91void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len); 92void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
92void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war); 93void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
94int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
95 size_t len);
96void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
97void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
93int ath6kl_debug_init(struct ath6kl *ar); 98int ath6kl_debug_init(struct ath6kl *ar);
94void ath6kl_debug_cleanup(struct ath6kl *ar); 99void ath6kl_debug_cleanup(struct ath6kl *ar);
95 100
@@ -125,6 +130,21 @@ static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
125{ 130{
126} 131}
127 132
133static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
134 const void *buf, size_t len)
135{
136 return 0;
137}
138
139static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
140{
141}
142
143static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
144 u8 timeout)
145{
146}
147
128static inline int ath6kl_debug_init(struct ath6kl *ar) 148static inline int ath6kl_debug_init(struct ath6kl *ar)
129{ 149{
130 return 0; 150 return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index d6c898f3d0b3..eed22870448b 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -18,10 +18,16 @@
18#define HIF_OPS_H 18#define HIF_OPS_H
19 19
20#include "hif.h" 20#include "hif.h"
21#include "debug.h"
21 22
22static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, 23static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
23 u32 len, u32 request) 24 u32 len, u32 request)
24{ 25{
26 ath6kl_dbg(ATH6KL_DBG_HIF,
27 "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
28 (request & HIF_WRITE) ? "write" : "read",
29 addr, buf, len, request);
30
25 return ar->hif_ops->read_write_sync(ar, addr, buf, len, request); 31 return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
26} 32}
27 33
@@ -29,16 +35,24 @@ static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
29 u32 length, u32 request, 35 u32 length, u32 request,
30 struct htc_packet *packet) 36 struct htc_packet *packet)
31{ 37{
38 ath6kl_dbg(ATH6KL_DBG_HIF,
39 "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
40 address, buffer, length, request);
41
32 return ar->hif_ops->write_async(ar, address, buffer, length, 42 return ar->hif_ops->write_async(ar, address, buffer, length,
33 request, packet); 43 request, packet);
34} 44}
35static inline void ath6kl_hif_irq_enable(struct ath6kl *ar) 45static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
36{ 46{
47 ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
48
37 return ar->hif_ops->irq_enable(ar); 49 return ar->hif_ops->irq_enable(ar);
38} 50}
39 51
40static inline void ath6kl_hif_irq_disable(struct ath6kl *ar) 52static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
41{ 53{
54 ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
55
42 return ar->hif_ops->irq_disable(ar); 56 return ar->hif_ops->irq_disable(ar);
43} 57}
44 58
@@ -69,9 +83,40 @@ static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
69 return ar->hif_ops->cleanup_scatter(ar); 83 return ar->hif_ops->cleanup_scatter(ar);
70} 84}
71 85
72static inline int ath6kl_hif_suspend(struct ath6kl *ar) 86static inline int ath6kl_hif_suspend(struct ath6kl *ar,
87 struct cfg80211_wowlan *wow)
73{ 88{
74 return ar->hif_ops->suspend(ar); 89 ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
90
91 return ar->hif_ops->suspend(ar, wow);
92}
93
94static inline int ath6kl_hif_resume(struct ath6kl *ar)
95{
96 ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
97
98 return ar->hif_ops->resume(ar);
99}
100
101static inline int ath6kl_hif_power_on(struct ath6kl *ar)
102{
103 ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
104
105 return ar->hif_ops->power_on(ar);
106}
107
108static inline int ath6kl_hif_power_off(struct ath6kl *ar)
109{
110 ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
111
112 return ar->hif_ops->power_off(ar);
113}
114
115static inline void ath6kl_hif_stop(struct ath6kl *ar)
116{
117 ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
118
119 ar->hif_ops->stop(ar);
75} 120}
76 121
77#endif 122#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 86b1cc7409c2..e57da35e59fa 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -13,18 +13,19 @@
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16#include "hif.h"
16 17
17#include "core.h" 18#include "core.h"
18#include "target.h" 19#include "target.h"
19#include "hif-ops.h" 20#include "hif-ops.h"
20#include "htc_hif.h"
21#include "debug.h" 21#include "debug.h"
22 22
23#define MAILBOX_FOR_BLOCK_SIZE 1 23#define MAILBOX_FOR_BLOCK_SIZE 1
24 24
25#define ATH6KL_TIME_QUANTUM 10 /* in ms */ 25#define ATH6KL_TIME_QUANTUM 10 /* in ms */
26 26
27static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) 27static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
28 bool from_dma)
28{ 29{
29 u8 *buf; 30 u8 *buf;
30 int i; 31 int i;
@@ -46,12 +47,11 @@ static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
46 return 0; 47 return 0;
47} 48}
48 49
49int ath6kldev_rw_comp_handler(void *context, int status) 50int ath6kl_hif_rw_comp_handler(void *context, int status)
50{ 51{
51 struct htc_packet *packet = context; 52 struct htc_packet *packet = context;
52 53
53 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 54 ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
54 "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
55 packet, status); 55 packet, status);
56 56
57 packet->status = status; 57 packet->status = status;
@@ -59,30 +59,83 @@ int ath6kldev_rw_comp_handler(void *context, int status)
59 59
60 return 0; 60 return 0;
61} 61}
62#define REG_DUMP_COUNT_AR6003 60
63#define REGISTER_DUMP_LEN_MAX 60
62 64
63static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev) 65static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
64{ 66{
65 u32 dummy; 67 __le32 regdump_val[REGISTER_DUMP_LEN_MAX];
66 int status; 68 u32 i, address, regdump_addr = 0;
69 int ret;
70
71 if (ar->target_type != TARGET_TYPE_AR6003)
72 return;
73
74 /* the reg dump pointer is copied to the host interest area */
75 address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
76 address = TARG_VTOP(ar->target_type, address);
77
78 /* read RAM location through diagnostic window */
79 ret = ath6kl_diag_read32(ar, address, &regdump_addr);
80
81 if (ret || !regdump_addr) {
82 ath6kl_warn("failed to get ptr to register dump area: %d\n",
83 ret);
84 return;
85 }
86
87 ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
88 regdump_addr);
89 regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
90
91 /* fetch register dump data */
92 ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
93 REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
94 if (ret) {
95 ath6kl_warn("failed to get register dump: %d\n", ret);
96 return;
97 }
98
99 ath6kl_info("crash dump:\n");
100 ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
101 ar->wiphy->fw_version);
102
103 BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
67 104
68 ath6kl_err("target debug interrupt\n"); 105 for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
106 ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
107 4 * i,
108 le32_to_cpu(regdump_val[i]),
109 le32_to_cpu(regdump_val[i + 1]),
110 le32_to_cpu(regdump_val[i + 2]),
111 le32_to_cpu(regdump_val[i + 3]));
112 }
113
114}
115
116static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
117{
118 u32 dummy;
119 int ret;
69 120
70 ath6kl_target_failure(dev->ar); 121 ath6kl_warn("firmware crashed\n");
71 122
72 /* 123 /*
73 * read counter to clear the interrupt, the debug error interrupt is 124 * read counter to clear the interrupt, the debug error interrupt is
74 * counter 0. 125 * counter 0.
75 */ 126 */
76 status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, 127 ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
77 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); 128 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
78 if (status) 129 if (ret)
79 WARN_ON(1); 130 ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
80 131
81 return status; 132 ath6kl_hif_dump_fw_crash(dev->ar);
133
134 return ret;
82} 135}
83 136
84/* mailbox recv message polling */ 137/* mailbox recv message polling */
85int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, 138int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
86 int timeout) 139 int timeout)
87{ 140{
88 struct ath6kl_irq_proc_registers *rg; 141 struct ath6kl_irq_proc_registers *rg;
@@ -118,7 +171,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
118 171
119 /* delay a little */ 172 /* delay a little */
120 mdelay(ATH6KL_TIME_QUANTUM); 173 mdelay(ATH6KL_TIME_QUANTUM);
121 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i); 174 ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
122 } 175 }
123 176
124 if (i == 0) { 177 if (i == 0) {
@@ -131,7 +184,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
131 * Target failure handler will be called in case of 184 * Target failure handler will be called in case of
132 * an assert. 185 * an assert.
133 */ 186 */
134 ath6kldev_proc_dbg_intr(dev); 187 ath6kl_hif_proc_dbg_intr(dev);
135 } 188 }
136 189
137 return status; 190 return status;
@@ -141,11 +194,14 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
141 * Disable packet reception (used in case the host runs out of buffers) 194 * Disable packet reception (used in case the host runs out of buffers)
142 * using the interrupt enable registers through the host I/F 195 * using the interrupt enable registers through the host I/F
143 */ 196 */
144int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx) 197int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
145{ 198{
146 struct ath6kl_irq_enable_reg regs; 199 struct ath6kl_irq_enable_reg regs;
147 int status = 0; 200 int status = 0;
148 201
202 ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
203 enable_rx ? "enable" : "disable");
204
149 /* take the lock to protect interrupt enable shadows */ 205 /* take the lock to protect interrupt enable shadows */
150 spin_lock_bh(&dev->lock); 206 spin_lock_bh(&dev->lock);
151 207
@@ -168,7 +224,7 @@ int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
168 return status; 224 return status;
169} 225}
170 226
171int ath6kldev_submit_scat_req(struct ath6kl_device *dev, 227int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
172 struct hif_scatter_req *scat_req, bool read) 228 struct hif_scatter_req *scat_req, bool read)
173{ 229{
174 int status = 0; 230 int status = 0;
@@ -185,14 +241,14 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
185 dev->ar->mbox_info.htc_addr; 241 dev->ar->mbox_info.htc_addr;
186 } 242 }
187 243
188 ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND), 244 ath6kl_dbg(ATH6KL_DBG_HIF,
189 "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n", 245 "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
190 scat_req->scat_entries, scat_req->len, 246 scat_req->scat_entries, scat_req->len,
191 scat_req->addr, !read ? "async" : "sync", 247 scat_req->addr, !read ? "async" : "sync",
192 (read) ? "rd" : "wr"); 248 (read) ? "rd" : "wr");
193 249
194 if (!read && scat_req->virt_scat) { 250 if (!read && scat_req->virt_scat) {
195 status = ath6kldev_cp_scat_dma_buf(scat_req, false); 251 status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
196 if (status) { 252 if (status) {
197 scat_req->status = status; 253 scat_req->status = status;
198 scat_req->complete(dev->ar->htc_target, scat_req); 254 scat_req->complete(dev->ar->htc_target, scat_req);
@@ -207,13 +263,13 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
207 scat_req->status = status; 263 scat_req->status = status;
208 if (!status && scat_req->virt_scat) 264 if (!status && scat_req->virt_scat)
209 scat_req->status = 265 scat_req->status =
210 ath6kldev_cp_scat_dma_buf(scat_req, true); 266 ath6kl_hif_cp_scat_dma_buf(scat_req, true);
211 } 267 }
212 268
213 return status; 269 return status;
214} 270}
215 271
216static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev) 272static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
217{ 273{
218 u8 counter_int_status; 274 u8 counter_int_status;
219 275
@@ -232,12 +288,12 @@ static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
232 * the debug assertion counter interrupt. 288 * the debug assertion counter interrupt.
233 */ 289 */
234 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) 290 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
235 return ath6kldev_proc_dbg_intr(dev); 291 return ath6kl_hif_proc_dbg_intr(dev);
236 292
237 return 0; 293 return 0;
238} 294}
239 295
240static int ath6kldev_proc_err_intr(struct ath6kl_device *dev) 296static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
241{ 297{
242 int status; 298 int status;
243 u8 error_int_status; 299 u8 error_int_status;
@@ -282,7 +338,7 @@ static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
282 return status; 338 return status;
283} 339}
284 340
285static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev) 341static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
286{ 342{
287 int status; 343 int status;
288 u8 cpu_int_status; 344 u8 cpu_int_status;
@@ -417,7 +473,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
417 * we rapidly pull packets. 473 * we rapidly pull packets.
418 */ 474 */
419 status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt, 475 status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
420 &lk_ahd, &fetched); 476 lk_ahd, &fetched);
421 if (status) 477 if (status)
422 goto out; 478 goto out;
423 479
@@ -436,21 +492,21 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
436 492
437 if (MS(HOST_INT_STATUS_CPU, host_int_status)) { 493 if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
438 /* CPU Interrupt */ 494 /* CPU Interrupt */
439 status = ath6kldev_proc_cpu_intr(dev); 495 status = ath6kl_hif_proc_cpu_intr(dev);
440 if (status) 496 if (status)
441 goto out; 497 goto out;
442 } 498 }
443 499
444 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { 500 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
445 /* Error Interrupt */ 501 /* Error Interrupt */
446 status = ath6kldev_proc_err_intr(dev); 502 status = ath6kl_hif_proc_err_intr(dev);
447 if (status) 503 if (status)
448 goto out; 504 goto out;
449 } 505 }
450 506
451 if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) 507 if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
452 /* Counter Interrupt */ 508 /* Counter Interrupt */
453 status = ath6kldev_proc_counter_intr(dev); 509 status = ath6kl_hif_proc_counter_intr(dev);
454 510
455out: 511out:
456 /* 512 /*
@@ -479,9 +535,10 @@ out:
479} 535}
480 536
481/* interrupt handler, kicks off all interrupt processing */ 537/* interrupt handler, kicks off all interrupt processing */
482int ath6kldev_intr_bh_handler(struct ath6kl *ar) 538int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
483{ 539{
484 struct ath6kl_device *dev = ar->htc_target->dev; 540 struct ath6kl_device *dev = ar->htc_target->dev;
541 unsigned long timeout;
485 int status = 0; 542 int status = 0;
486 bool done = false; 543 bool done = false;
487 544
@@ -495,7 +552,8 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
495 * IRQ processing is synchronous, interrupt status registers can be 552 * IRQ processing is synchronous, interrupt status registers can be
496 * re-read. 553 * re-read.
497 */ 554 */
498 while (!done) { 555 timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
556 while (time_before(jiffies, timeout) && !done) {
499 status = proc_pending_irqs(dev, &done); 557 status = proc_pending_irqs(dev, &done);
500 if (status) 558 if (status)
501 break; 559 break;
@@ -504,7 +562,7 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
504 return status; 562 return status;
505} 563}
506 564
507static int ath6kldev_enable_intrs(struct ath6kl_device *dev) 565static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
508{ 566{
509 struct ath6kl_irq_enable_reg regs; 567 struct ath6kl_irq_enable_reg regs;
510 int status; 568 int status;
@@ -552,7 +610,7 @@ static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
552 return status; 610 return status;
553} 611}
554 612
555int ath6kldev_disable_intrs(struct ath6kl_device *dev) 613int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
556{ 614{
557 struct ath6kl_irq_enable_reg regs; 615 struct ath6kl_irq_enable_reg regs;
558 616
@@ -571,7 +629,7 @@ int ath6kldev_disable_intrs(struct ath6kl_device *dev)
571} 629}
572 630
573/* enable device interrupts */ 631/* enable device interrupts */
574int ath6kldev_unmask_intrs(struct ath6kl_device *dev) 632int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
575{ 633{
576 int status = 0; 634 int status = 0;
577 635
@@ -583,29 +641,29 @@ int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
583 * target "soft" resets. The ATH6KL interrupt enables reset back to an 641 * target "soft" resets. The ATH6KL interrupt enables reset back to an
584 * "enabled" state when this happens. 642 * "enabled" state when this happens.
585 */ 643 */
586 ath6kldev_disable_intrs(dev); 644 ath6kl_hif_disable_intrs(dev);
587 645
588 /* unmask the host controller interrupts */ 646 /* unmask the host controller interrupts */
589 ath6kl_hif_irq_enable(dev->ar); 647 ath6kl_hif_irq_enable(dev->ar);
590 status = ath6kldev_enable_intrs(dev); 648 status = ath6kl_hif_enable_intrs(dev);
591 649
592 return status; 650 return status;
593} 651}
594 652
595/* disable all device interrupts */ 653/* disable all device interrupts */
596int ath6kldev_mask_intrs(struct ath6kl_device *dev) 654int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
597{ 655{
598 /* 656 /*
599 * Mask the interrupt at the HIF layer to avoid any stray interrupt 657 * Mask the interrupt at the HIF layer to avoid any stray interrupt
600 * taken while we zero out our shadow registers in 658 * taken while we zero out our shadow registers in
601 * ath6kldev_disable_intrs(). 659 * ath6kl_hif_disable_intrs().
602 */ 660 */
603 ath6kl_hif_irq_disable(dev->ar); 661 ath6kl_hif_irq_disable(dev->ar);
604 662
605 return ath6kldev_disable_intrs(dev); 663 return ath6kl_hif_disable_intrs(dev);
606} 664}
607 665
608int ath6kldev_setup(struct ath6kl_device *dev) 666int ath6kl_hif_setup(struct ath6kl_device *dev)
609{ 667{
610 int status = 0; 668 int status = 0;
611 669
@@ -621,19 +679,17 @@ int ath6kldev_setup(struct ath6kl_device *dev)
621 /* must be a power of 2 */ 679 /* must be a power of 2 */
622 if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) { 680 if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
623 WARN_ON(1); 681 WARN_ON(1);
682 status = -EINVAL;
624 goto fail_setup; 683 goto fail_setup;
625 } 684 }
626 685
627 /* assemble mask, used for padding to a block */ 686 /* assemble mask, used for padding to a block */
628 dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1; 687 dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
629 688
630 ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n", 689 ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
631 dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); 690 dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
632 691
633 ath6kl_dbg(ATH6KL_DBG_TRC, 692 status = ath6kl_hif_disable_intrs(dev);
634 "hif interrupt processing is sync only\n");
635
636 status = ath6kldev_disable_intrs(dev);
637 693
638fail_setup: 694fail_setup:
639 return status; 695 return status;
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 797e2d1d9bf9..f2dc3bcdae4a 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -59,6 +59,18 @@
59/* mode to enable special 4-bit interrupt assertion without clock */ 59/* mode to enable special 4-bit interrupt assertion without clock */
60#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0) 60#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
61 61
62/* HTC runs over mailbox 0 */
63#define HTC_MAILBOX 0
64
65#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
66
67/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
68#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
69#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
70#define ATH6KL_SCATTER_REQS 4
71
72#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000
73
62struct bus_request { 74struct bus_request {
63 struct list_head list; 75 struct list_head list;
64 76
@@ -186,6 +198,34 @@ struct hif_scatter_req {
186 struct hif_scatter_item scat_list[1]; 198 struct hif_scatter_item scat_list[1];
187}; 199};
188 200
201struct ath6kl_irq_proc_registers {
202 u8 host_int_status;
203 u8 cpu_int_status;
204 u8 error_int_status;
205 u8 counter_int_status;
206 u8 mbox_frame;
207 u8 rx_lkahd_valid;
208 u8 host_int_status2;
209 u8 gmbox_rx_avail;
210 __le32 rx_lkahd[2];
211 __le32 rx_gmbox_lkahd_alias[2];
212} __packed;
213
214struct ath6kl_irq_enable_reg {
215 u8 int_status_en;
216 u8 cpu_int_status_en;
217 u8 err_int_status_en;
218 u8 cntr_int_status_en;
219} __packed;
220
221struct ath6kl_device {
222 spinlock_t lock;
223 struct ath6kl_irq_proc_registers irq_proc_reg;
224 struct ath6kl_irq_enable_reg irq_en_reg;
225 struct htc_target *htc_cnxt;
226 struct ath6kl *ar;
227};
228
189struct ath6kl_hif_ops { 229struct ath6kl_hif_ops {
190 int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf, 230 int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
191 u32 len, u32 request); 231 u32 len, u32 request);
@@ -202,7 +242,26 @@ struct ath6kl_hif_ops {
202 int (*scat_req_rw) (struct ath6kl *ar, 242 int (*scat_req_rw) (struct ath6kl *ar,
203 struct hif_scatter_req *scat_req); 243 struct hif_scatter_req *scat_req);
204 void (*cleanup_scatter)(struct ath6kl *ar); 244 void (*cleanup_scatter)(struct ath6kl *ar);
205 int (*suspend)(struct ath6kl *ar); 245 int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
246 int (*resume)(struct ath6kl *ar);
247 int (*power_on)(struct ath6kl *ar);
248 int (*power_off)(struct ath6kl *ar);
249 void (*stop)(struct ath6kl *ar);
206}; 250};
207 251
252int ath6kl_hif_setup(struct ath6kl_device *dev);
253int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
254int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
255int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
256 u32 *lk_ahd, int timeout);
257int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
258int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
259
260int ath6kl_hif_rw_comp_handler(void *context, int status);
261int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
262
263/* Scatter Function and Definitions */
264int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
265 struct hif_scatter_req *scat_req, bool read);
266
208#endif 267#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
index f88a7c9e4148..f3b63ca25c7e 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -15,13 +15,321 @@
15 */ 15 */
16 16
17#include "core.h" 17#include "core.h"
18#include "htc_hif.h" 18#include "hif.h"
19#include "debug.h" 19#include "debug.h"
20#include "hif-ops.h" 20#include "hif-ops.h"
21#include <asm/unaligned.h> 21#include <asm/unaligned.h>
22 22
23#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) 23#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24 24
25/* Functions for Tx credit handling */
26static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
27 struct htc_endpoint_credit_dist *ep_dist,
28 int credits)
29{
30 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
31 ep_dist->endpoint, credits);
32
33 ep_dist->credits += credits;
34 ep_dist->cred_assngd += credits;
35 cred_info->cur_free_credits -= credits;
36}
37
38static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
39 struct list_head *ep_list,
40 int tot_credits)
41{
42 struct htc_endpoint_credit_dist *cur_ep_dist;
43 int count;
44
45 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
46
47 cred_info->cur_free_credits = tot_credits;
48 cred_info->total_avail_credits = tot_credits;
49
50 list_for_each_entry(cur_ep_dist, ep_list, list) {
51 if (cur_ep_dist->endpoint == ENDPOINT_0)
52 continue;
53
54 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
55
56 if (tot_credits > 4) {
57 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
58 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
59 ath6kl_credit_deposit(cred_info,
60 cur_ep_dist,
61 cur_ep_dist->cred_min);
62 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
63 }
64 }
65
66 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
67 ath6kl_credit_deposit(cred_info, cur_ep_dist,
68 cur_ep_dist->cred_min);
69 /*
70 * Control service is always marked active, it
71 * never goes inactive EVER.
72 */
73 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
74 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
75 /* this is the lowest priority data endpoint */
76 /* FIXME: this looks fishy, check */
77 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
78
79 /*
80 * Streams have to be created (explicit | implicit) for all
81 * kinds of traffic. BE endpoints are also inactive in the
82 * beginning. When BE traffic starts it creates implicit
83 * streams that redistributes credits.
84 *
85 * Note: all other endpoints have minimums set but are
86 * initially given NO credits. credits will be distributed
87 * as traffic activity demands
88 */
89 }
90
91 WARN_ON(cred_info->cur_free_credits <= 0);
92
93 list_for_each_entry(cur_ep_dist, ep_list, list) {
94 if (cur_ep_dist->endpoint == ENDPOINT_0)
95 continue;
96
97 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
98 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
99 else {
100 /*
101 * For the remaining data endpoints, we assume that
102 * each cred_per_msg are the same. We use a simple
103 * calculation here, we take the remaining credits
104 * and determine how many max messages this can
105 * cover and then set each endpoint's normal value
106 * equal to 3/4 this amount.
107 */
108 count = (cred_info->cur_free_credits /
109 cur_ep_dist->cred_per_msg)
110 * cur_ep_dist->cred_per_msg;
111 count = (count * 3) >> 2;
112 count = max(count, cur_ep_dist->cred_per_msg);
113 cur_ep_dist->cred_norm = count;
114
115 }
116
117 ath6kl_dbg(ATH6KL_DBG_CREDIT,
118 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
119 cur_ep_dist->endpoint,
120 cur_ep_dist->svc_id,
121 cur_ep_dist->credits,
122 cur_ep_dist->cred_per_msg,
123 cur_ep_dist->cred_norm,
124 cur_ep_dist->cred_min);
125 }
126}
127
128/* initialize and setup credit distribution */
129int ath6kl_credit_setup(void *htc_handle,
130 struct ath6kl_htc_credit_info *cred_info)
131{
132 u16 servicepriority[5];
133
134 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
135
136 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
137 servicepriority[1] = WMI_DATA_VO_SVC;
138 servicepriority[2] = WMI_DATA_VI_SVC;
139 servicepriority[3] = WMI_DATA_BE_SVC;
140 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
141
142 /* set priority list */
143 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
144
145 return 0;
146}
147
148/* reduce an ep's credits back to a set limit */
149static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
150 struct htc_endpoint_credit_dist *ep_dist,
151 int limit)
152{
153 int credits;
154
155 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
156 ep_dist->endpoint, limit);
157
158 ep_dist->cred_assngd = limit;
159
160 if (ep_dist->credits <= limit)
161 return;
162
163 credits = ep_dist->credits - limit;
164 ep_dist->credits -= credits;
165 cred_info->cur_free_credits += credits;
166}
167
168static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
169 struct list_head *epdist_list)
170{
171 struct htc_endpoint_credit_dist *cur_dist_list;
172
173 list_for_each_entry(cur_dist_list, epdist_list, list) {
174 if (cur_dist_list->endpoint == ENDPOINT_0)
175 continue;
176
177 if (cur_dist_list->cred_to_dist > 0) {
178 cur_dist_list->credits +=
179 cur_dist_list->cred_to_dist;
180 cur_dist_list->cred_to_dist = 0;
181 if (cur_dist_list->credits >
182 cur_dist_list->cred_assngd)
183 ath6kl_credit_reduce(cred_info,
184 cur_dist_list,
185 cur_dist_list->cred_assngd);
186
187 if (cur_dist_list->credits >
188 cur_dist_list->cred_norm)
189 ath6kl_credit_reduce(cred_info, cur_dist_list,
190 cur_dist_list->cred_norm);
191
192 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
193 if (cur_dist_list->txq_depth == 0)
194 ath6kl_credit_reduce(cred_info,
195 cur_dist_list, 0);
196 }
197 }
198 }
199}
200
201/*
202 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
203 * question.
204 */
205static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
206 struct htc_endpoint_credit_dist *ep_dist)
207{
208 struct htc_endpoint_credit_dist *curdist_list;
209 int credits = 0;
210 int need;
211
212 if (ep_dist->svc_id == WMI_CONTROL_SVC)
213 goto out;
214
215 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
216 (ep_dist->svc_id == WMI_DATA_VO_SVC))
217 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
218 goto out;
219
220 /*
221 * For all other services, we follow a simple algorithm of:
222 *
223 * 1. checking the free pool for credits
224 * 2. checking lower priority endpoints for credits to take
225 */
226
227 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
228
229 if (credits >= ep_dist->seek_cred)
230 goto out;
231
232 /*
233 * We don't have enough in the free pool, try taking away from
234 * lower priority services The rule for taking away credits:
235 *
236 * 1. Only take from lower priority endpoints
237 * 2. Only take what is allocated above the minimum (never
238 * starve an endpoint completely)
239 * 3. Only take what you need.
240 */
241
242 list_for_each_entry_reverse(curdist_list,
243 &cred_info->lowestpri_ep_dist,
244 list) {
245 if (curdist_list == ep_dist)
246 break;
247
248 need = ep_dist->seek_cred - cred_info->cur_free_credits;
249
250 if ((curdist_list->cred_assngd - need) >=
251 curdist_list->cred_min) {
252 /*
253 * The current one has been allocated more than
254 * it's minimum and it has enough credits assigned
255 * above it's minimum to fulfill our need try to
256 * take away just enough to fulfill our need.
257 */
258 ath6kl_credit_reduce(cred_info, curdist_list,
259 curdist_list->cred_assngd - need);
260
261 if (cred_info->cur_free_credits >=
262 ep_dist->seek_cred)
263 break;
264 }
265
266 if (curdist_list->endpoint == ENDPOINT_0)
267 break;
268 }
269
270 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
271
272out:
273 /* did we find some credits? */
274 if (credits)
275 ath6kl_credit_deposit(cred_info, ep_dist, credits);
276
277 ep_dist->seek_cred = 0;
278}
279
280/* redistribute credits based on activity change */
281static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
282 struct list_head *ep_dist_list)
283{
284 struct htc_endpoint_credit_dist *curdist_list;
285
286 list_for_each_entry(curdist_list, ep_dist_list, list) {
287 if (curdist_list->endpoint == ENDPOINT_0)
288 continue;
289
290 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
291 (curdist_list->svc_id == WMI_DATA_BE_SVC))
292 curdist_list->dist_flags |= HTC_EP_ACTIVE;
293
294 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
295 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
296 if (curdist_list->txq_depth == 0)
297 ath6kl_credit_reduce(info, curdist_list, 0);
298 else
299 ath6kl_credit_reduce(info,
300 curdist_list,
301 curdist_list->cred_min);
302 }
303 }
304}
305
306/*
307 *
308 * This function is invoked whenever endpoints require credit
309 * distributions. A lock is held while this function is invoked, this
310 * function shall NOT block. The ep_dist_list is a list of distribution
311 * structures in prioritized order as defined by the call to the
312 * htc_set_credit_dist() api.
313 */
314static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
315 struct list_head *ep_dist_list,
316 enum htc_credit_dist_reason reason)
317{
318 switch (reason) {
319 case HTC_CREDIT_DIST_SEND_COMPLETE:
320 ath6kl_credit_update(cred_info, ep_dist_list);
321 break;
322 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
323 ath6kl_credit_redistribute(cred_info, ep_dist_list);
324 break;
325 default:
326 break;
327 }
328
329 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
330 WARN_ON(cred_info->cur_free_credits < 0);
331}
332
25static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len) 333static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
26{ 334{
27 u8 *align_addr; 335 u8 *align_addr;
@@ -102,12 +410,12 @@ static void htc_tx_comp_update(struct htc_target *target,
102 packet->info.tx.cred_used; 410 packet->info.tx.cred_used;
103 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); 411 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
104 412
105 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", 413 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
106 target->cred_dist_cntxt, &target->cred_dist_list); 414 target->credit_info, &target->cred_dist_list);
107 415
108 ath6k_credit_distribute(target->cred_dist_cntxt, 416 ath6kl_credit_distribute(target->credit_info,
109 &target->cred_dist_list, 417 &target->cred_dist_list,
110 HTC_CREDIT_DIST_SEND_COMPLETE); 418 HTC_CREDIT_DIST_SEND_COMPLETE);
111 419
112 spin_unlock_bh(&target->tx_lock); 420 spin_unlock_bh(&target->tx_lock);
113} 421}
@@ -118,8 +426,8 @@ static void htc_tx_complete(struct htc_endpoint *endpoint,
118 if (list_empty(txq)) 426 if (list_empty(txq))
119 return; 427 return;
120 428
121 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 429 ath6kl_dbg(ATH6KL_DBG_HTC,
122 "send complete ep %d, (%d pkts)\n", 430 "htc tx complete ep %d pkts %d\n",
123 endpoint->eid, get_queue_depth(txq)); 431 endpoint->eid, get_queue_depth(txq));
124 432
125 ath6kl_tx_complete(endpoint->target->dev->ar, txq); 433 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
@@ -131,6 +439,9 @@ static void htc_tx_comp_handler(struct htc_target *target,
131 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; 439 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
132 struct list_head container; 440 struct list_head container;
133 441
442 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
443 packet->info.tx.seqno);
444
134 htc_tx_comp_update(target, endpoint, packet); 445 htc_tx_comp_update(target, endpoint, packet);
135 INIT_LIST_HEAD(&container); 446 INIT_LIST_HEAD(&container);
136 list_add_tail(&packet->list, &container); 447 list_add_tail(&packet->list, &container);
@@ -148,8 +459,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target,
148 459
149 INIT_LIST_HEAD(&tx_compq); 460 INIT_LIST_HEAD(&tx_compq);
150 461
151 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 462 ath6kl_dbg(ATH6KL_DBG_HTC,
152 "htc_async_tx_scat_complete total len: %d entries: %d\n", 463 "htc tx scat complete len %d entries %d\n",
153 scat_req->len, scat_req->scat_entries); 464 scat_req->len, scat_req->scat_entries);
154 465
155 if (scat_req->status) 466 if (scat_req->status)
@@ -190,16 +501,13 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
190 501
191 send_len = packet->act_len + HTC_HDR_LENGTH; 502 send_len = packet->act_len + HTC_HDR_LENGTH;
192 503
193 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
194 __func__, send_len, sync ? "sync" : "async");
195
196 padded_len = CALC_TXRX_PADDED_LEN(target, send_len); 504 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
197 505
198 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 506 ath6kl_dbg(ATH6KL_DBG_HTC,
199 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n", 507 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
200 padded_len, 508 send_len, packet->info.tx.seqno, padded_len,
201 target->dev->ar->mbox_info.htc_addr, 509 target->dev->ar->mbox_info.htc_addr,
202 sync ? "sync" : "async"); 510 sync ? "sync" : "async");
203 511
204 if (sync) { 512 if (sync) {
205 status = hif_read_write_sync(target->dev->ar, 513 status = hif_read_write_sync(target->dev->ar,
@@ -227,7 +535,7 @@ static int htc_check_credits(struct htc_target *target,
227 *req_cred = (len > target->tgt_cred_sz) ? 535 *req_cred = (len > target->tgt_cred_sz) ?
228 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; 536 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
229 537
230 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n", 538 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
231 *req_cred, ep->cred_dist.credits); 539 *req_cred, ep->cred_dist.credits);
232 540
233 if (ep->cred_dist.credits < *req_cred) { 541 if (ep->cred_dist.credits < *req_cred) {
@@ -237,16 +545,13 @@ static int htc_check_credits(struct htc_target *target,
237 /* Seek more credits */ 545 /* Seek more credits */
238 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; 546 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
239 547
240 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", 548 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
241 target->cred_dist_cntxt, &ep->cred_dist);
242
243 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
244 549
245 ep->cred_dist.seek_cred = 0; 550 ep->cred_dist.seek_cred = 0;
246 551
247 if (ep->cred_dist.credits < *req_cred) { 552 if (ep->cred_dist.credits < *req_cred) {
248 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 553 ath6kl_dbg(ATH6KL_DBG_CREDIT,
249 "not enough credits for ep %d - leaving packet in queue\n", 554 "credit not found for ep %d\n",
250 eid); 555 eid);
251 return -EINVAL; 556 return -EINVAL;
252 } 557 }
@@ -260,17 +565,15 @@ static int htc_check_credits(struct htc_target *target,
260 ep->cred_dist.seek_cred = 565 ep->cred_dist.seek_cred =
261 ep->cred_dist.cred_per_msg - ep->cred_dist.credits; 566 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
262 567
263 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", 568 ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
264 target->cred_dist_cntxt, &ep->cred_dist);
265
266 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
267 569
268 /* see if we were successful in getting more */ 570 /* see if we were successful in getting more */
269 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { 571 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
270 /* tell the target we need credits ASAP! */ 572 /* tell the target we need credits ASAP! */
271 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; 573 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
272 ep->ep_st.cred_low_indicate += 1; 574 ep->ep_st.cred_low_indicate += 1;
273 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n"); 575 ath6kl_dbg(ATH6KL_DBG_CREDIT,
576 "credit we need credits asap\n");
274 } 577 }
275 } 578 }
276 579
@@ -295,8 +598,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
295 packet = list_first_entry(&endpoint->txq, struct htc_packet, 598 packet = list_first_entry(&endpoint->txq, struct htc_packet,
296 list); 599 list);
297 600
298 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 601 ath6kl_dbg(ATH6KL_DBG_HTC,
299 "got head pkt:0x%p , queue depth: %d\n", 602 "htc tx got packet 0x%p queue depth %d\n",
300 packet, get_queue_depth(&endpoint->txq)); 603 packet, get_queue_depth(&endpoint->txq));
301 604
302 len = CALC_TXRX_PADDED_LEN(target, 605 len = CALC_TXRX_PADDED_LEN(target,
@@ -404,9 +707,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
404 707
405 scat_req->len += len; 708 scat_req->len += len;
406 scat_req->scat_entries++; 709 scat_req->scat_entries++;
407 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 710 ath6kl_dbg(ATH6KL_DBG_HTC,
408 "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n", 711 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
409 i, packet, len, rem_scat); 712 i, packet, packet->info.tx.seqno, len, rem_scat);
410 } 713 }
411 714
412 /* Roll back scatter setup in case of any failure */ 715 /* Roll back scatter setup in case of any failure */
@@ -455,12 +758,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
455 758
456 if (!scat_req) { 759 if (!scat_req) {
457 /* no scatter resources */ 760 /* no scatter resources */
458 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 761 ath6kl_dbg(ATH6KL_DBG_HTC,
459 "no more scatter resources\n"); 762 "htc tx no more scatter resources\n");
460 break; 763 break;
461 } 764 }
462 765
463 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n", 766 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
464 n_scat); 767 n_scat);
465 768
466 scat_req->len = 0; 769 scat_req->len = 0;
@@ -479,10 +782,10 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
479 n_sent_bundle++; 782 n_sent_bundle++;
480 tot_pkts_bundle += scat_req->scat_entries; 783 tot_pkts_bundle += scat_req->scat_entries;
481 784
482 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 785 ath6kl_dbg(ATH6KL_DBG_HTC,
483 "send scatter total bytes: %d , entries: %d\n", 786 "htc tx scatter bytes %d entries %d\n",
484 scat_req->len, scat_req->scat_entries); 787 scat_req->len, scat_req->scat_entries);
485 ath6kldev_submit_scat_req(target->dev, scat_req, false); 788 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
486 789
487 if (status) 790 if (status)
488 break; 791 break;
@@ -490,8 +793,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
490 793
491 *sent_bundle = n_sent_bundle; 794 *sent_bundle = n_sent_bundle;
492 *n_bundle_pkts = tot_pkts_bundle; 795 *n_bundle_pkts = tot_pkts_bundle;
493 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n", 796 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
494 __func__, n_sent_bundle); 797 n_sent_bundle);
495 798
496 return; 799 return;
497} 800}
@@ -510,7 +813,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
510 if (endpoint->tx_proc_cnt > 1) { 813 if (endpoint->tx_proc_cnt > 1) {
511 endpoint->tx_proc_cnt--; 814 endpoint->tx_proc_cnt--;
512 spin_unlock_bh(&target->tx_lock); 815 spin_unlock_bh(&target->tx_lock);
513 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n"); 816 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
514 return; 817 return;
515 } 818 }
516 819
@@ -588,15 +891,12 @@ static bool ath6kl_htc_tx_try(struct htc_target *target,
588 overflow = true; 891 overflow = true;
589 892
590 if (overflow) 893 if (overflow)
591 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 894 ath6kl_dbg(ATH6KL_DBG_HTC,
592 "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n", 895 "htc tx overflow ep %d depth %d max %d\n",
593 endpoint->eid, overflow, txq_depth, 896 endpoint->eid, txq_depth,
594 endpoint->max_txq_depth); 897 endpoint->max_txq_depth);
595 898
596 if (overflow && ep_cb.tx_full) { 899 if (overflow && ep_cb.tx_full) {
597 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
598 "indicating overflowed tx packet: 0x%p\n", tx_pkt);
599
600 if (ep_cb.tx_full(endpoint->target, tx_pkt) == 900 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
601 HTC_SEND_FULL_DROP) { 901 HTC_SEND_FULL_DROP) {
602 endpoint->ep_st.tx_dropped += 1; 902 endpoint->ep_st.tx_dropped += 1;
@@ -625,12 +925,12 @@ static void htc_chk_ep_txq(struct htc_target *target)
625 * are not modifying any state. 925 * are not modifying any state.
626 */ 926 */
627 list_for_each_entry(cred_dist, &target->cred_dist_list, list) { 927 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
628 endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd; 928 endpoint = cred_dist->htc_ep;
629 929
630 spin_lock_bh(&target->tx_lock); 930 spin_lock_bh(&target->tx_lock);
631 if (!list_empty(&endpoint->txq)) { 931 if (!list_empty(&endpoint->txq)) {
632 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 932 ath6kl_dbg(ATH6KL_DBG_HTC,
633 "ep %d has %d credits and %d packets in tx queue\n", 933 "htc creds ep %d credits %d pkts %d\n",
634 cred_dist->endpoint, 934 cred_dist->endpoint,
635 endpoint->cred_dist.credits, 935 endpoint->cred_dist.credits,
636 get_queue_depth(&endpoint->txq)); 936 get_queue_depth(&endpoint->txq));
@@ -704,13 +1004,13 @@ static int htc_setup_tx_complete(struct htc_target *target)
704} 1004}
705 1005
706void ath6kl_htc_set_credit_dist(struct htc_target *target, 1006void ath6kl_htc_set_credit_dist(struct htc_target *target,
707 struct htc_credit_state_info *cred_dist_cntxt, 1007 struct ath6kl_htc_credit_info *credit_info,
708 u16 srvc_pri_order[], int list_len) 1008 u16 srvc_pri_order[], int list_len)
709{ 1009{
710 struct htc_endpoint *endpoint; 1010 struct htc_endpoint *endpoint;
711 int i, ep; 1011 int i, ep;
712 1012
713 target->cred_dist_cntxt = cred_dist_cntxt; 1013 target->credit_info = credit_info;
714 1014
715 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, 1015 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
716 &target->cred_dist_list); 1016 &target->cred_dist_list);
@@ -736,8 +1036,8 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
736 struct htc_endpoint *endpoint; 1036 struct htc_endpoint *endpoint;
737 struct list_head queue; 1037 struct list_head queue;
738 1038
739 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, 1039 ath6kl_dbg(ATH6KL_DBG_HTC,
740 "htc_tx: ep id: %d, buf: 0x%p, len: %d\n", 1040 "htc tx ep id %d buf 0x%p len %d\n",
741 packet->endpoint, packet->buf, packet->act_len); 1041 packet->endpoint, packet->buf, packet->act_len);
742 1042
743 if (packet->endpoint >= ENDPOINT_MAX) { 1043 if (packet->endpoint >= ENDPOINT_MAX) {
@@ -787,8 +1087,8 @@ void ath6kl_htc_flush_txep(struct htc_target *target,
787 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) { 1087 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
788 packet->status = -ECANCELED; 1088 packet->status = -ECANCELED;
789 list_del(&packet->list); 1089 list_del(&packet->list);
790 ath6kl_dbg(ATH6KL_DBG_TRC, 1090 ath6kl_dbg(ATH6KL_DBG_HTC,
791 "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n", 1091 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
792 packet, packet->act_len, 1092 packet, packet->act_len,
793 packet->endpoint, packet->info.tx.tag); 1093 packet->endpoint, packet->info.tx.tag);
794 1094
@@ -844,12 +1144,13 @@ void ath6kl_htc_indicate_activity_change(struct htc_target *target,
844 endpoint->cred_dist.txq_depth = 1144 endpoint->cred_dist.txq_depth =
845 get_queue_depth(&endpoint->txq); 1145 get_queue_depth(&endpoint->txq);
846 1146
847 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", 1147 ath6kl_dbg(ATH6KL_DBG_HTC,
848 target->cred_dist_cntxt, &target->cred_dist_list); 1148 "htc tx activity ctxt 0x%p dist 0x%p\n",
1149 target->credit_info, &target->cred_dist_list);
849 1150
850 ath6k_credit_distribute(target->cred_dist_cntxt, 1151 ath6kl_credit_distribute(target->credit_info,
851 &target->cred_dist_list, 1152 &target->cred_dist_list,
852 HTC_CREDIT_DIST_ACTIVITY_CHANGE); 1153 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
853 } 1154 }
854 1155
855 spin_unlock_bh(&target->tx_lock); 1156 spin_unlock_bh(&target->tx_lock);
@@ -919,15 +1220,15 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
919 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len); 1220 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
920 1221
921 if (padded_len > packet->buf_len) { 1222 if (padded_len > packet->buf_len) {
922 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n", 1223 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
923 padded_len, rx_len, packet->buf_len); 1224 padded_len, rx_len, packet->buf_len);
924 return -ENOMEM; 1225 return -ENOMEM;
925 } 1226 }
926 1227
927 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 1228 ath6kl_dbg(ATH6KL_DBG_HTC,
928 "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n", 1229 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
929 packet, packet->info.rx.exp_hdr, 1230 packet, packet->info.rx.exp_hdr,
930 padded_len, dev->ar->mbox_info.htc_addr, "sync"); 1231 padded_len, dev->ar->mbox_info.htc_addr);
931 1232
932 status = hif_read_write_sync(dev->ar, 1233 status = hif_read_write_sync(dev->ar,
933 dev->ar->mbox_info.htc_addr, 1234 dev->ar->mbox_info.htc_addr,
@@ -1137,8 +1438,8 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
1137 } 1438 }
1138 1439
1139 endpoint->ep_st.rx_bundle_from_hdr += 1; 1440 endpoint->ep_st.rx_bundle_from_hdr += 1;
1140 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 1441 ath6kl_dbg(ATH6KL_DBG_HTC,
1141 "htc hdr indicates :%d msg can be fetched as a bundle\n", 1442 "htc rx bundle pkts %d\n",
1142 n_msg); 1443 n_msg);
1143 } else 1444 } else
1144 /* HTC header only indicates 1 message to fetch */ 1445 /* HTC header only indicates 1 message to fetch */
@@ -1191,8 +1492,8 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1191 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", 1492 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1192 packets->act_len + HTC_HDR_LENGTH); 1493 packets->act_len + HTC_HDR_LENGTH);
1193 1494
1194 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, 1495 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1195 "Unexpected ENDPOINT 0 Message", "", 1496 "htc rx unexpected endpoint 0 message", "",
1196 packets->buf - HTC_HDR_LENGTH, 1497 packets->buf - HTC_HDR_LENGTH,
1197 packets->act_len + HTC_HDR_LENGTH); 1498 packets->act_len + HTC_HDR_LENGTH);
1198 } 1499 }
@@ -1209,9 +1510,6 @@ static void htc_proc_cred_rpt(struct htc_target *target,
1209 int tot_credits = 0, i; 1510 int tot_credits = 0, i;
1210 bool dist = false; 1511 bool dist = false;
1211 1512
1212 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1213 "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
1214
1215 spin_lock_bh(&target->tx_lock); 1513 spin_lock_bh(&target->tx_lock);
1216 1514
1217 for (i = 0; i < n_entries; i++, rpt++) { 1515 for (i = 0; i < n_entries; i++, rpt++) {
@@ -1223,8 +1521,9 @@ static void htc_proc_cred_rpt(struct htc_target *target,
1223 1521
1224 endpoint = &target->endpoint[rpt->eid]; 1522 endpoint = &target->endpoint[rpt->eid];
1225 1523
1226 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n", 1524 ath6kl_dbg(ATH6KL_DBG_CREDIT,
1227 rpt->eid, rpt->credits); 1525 "credit report ep %d credits %d\n",
1526 rpt->eid, rpt->credits);
1228 1527
1229 endpoint->ep_st.tx_cred_rpt += 1; 1528 endpoint->ep_st.tx_cred_rpt += 1;
1230 endpoint->ep_st.cred_retnd += rpt->credits; 1529 endpoint->ep_st.cred_retnd += rpt->credits;
@@ -1264,21 +1563,14 @@ static void htc_proc_cred_rpt(struct htc_target *target,
1264 tot_credits += rpt->credits; 1563 tot_credits += rpt->credits;
1265 } 1564 }
1266 1565
1267 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1268 "report indicated %d credits to distribute\n",
1269 tot_credits);
1270
1271 if (dist) { 1566 if (dist) {
1272 /* 1567 /*
1273 * This was a credit return based on a completed send 1568 * This was a credit return based on a completed send
1274 * operations note, this is done with the lock held 1569 * operations note, this is done with the lock held
1275 */ 1570 */
1276 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n", 1571 ath6kl_credit_distribute(target->credit_info,
1277 target->cred_dist_cntxt, &target->cred_dist_list); 1572 &target->cred_dist_list,
1278 1573 HTC_CREDIT_DIST_SEND_COMPLETE);
1279 ath6k_credit_distribute(target->cred_dist_cntxt,
1280 &target->cred_dist_list,
1281 HTC_CREDIT_DIST_SEND_COMPLETE);
1282 } 1574 }
1283 1575
1284 spin_unlock_bh(&target->tx_lock); 1576 spin_unlock_bh(&target->tx_lock);
@@ -1320,14 +1612,15 @@ static int htc_parse_trailer(struct htc_target *target,
1320 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) 1612 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1321 && next_lk_ahds) { 1613 && next_lk_ahds) {
1322 1614
1323 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 1615 ath6kl_dbg(ATH6KL_DBG_HTC,
1324 "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n", 1616 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
1325 lk_ahd->pre_valid, lk_ahd->post_valid); 1617 lk_ahd->pre_valid, lk_ahd->post_valid);
1326 1618
1327 /* look ahead bytes are valid, copy them over */ 1619 /* look ahead bytes are valid, copy them over */
1328 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4); 1620 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1329 1621
1330 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead", 1622 ath6kl_dbg_dump(ATH6KL_DBG_HTC,
1623 "htc rx next look ahead",
1331 "", next_lk_ahds, 4); 1624 "", next_lk_ahds, 4);
1332 1625
1333 *n_lk_ahds = 1; 1626 *n_lk_ahds = 1;
@@ -1346,7 +1639,7 @@ static int htc_parse_trailer(struct htc_target *target,
1346 bundle_lkahd_rpt = 1639 bundle_lkahd_rpt =
1347 (struct htc_bundle_lkahd_rpt *) record_buf; 1640 (struct htc_bundle_lkahd_rpt *) record_buf;
1348 1641
1349 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd", 1642 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
1350 "", record_buf, record->len); 1643 "", record_buf, record->len);
1351 1644
1352 for (i = 0; i < len; i++) { 1645 for (i = 0; i < len; i++) {
@@ -1378,10 +1671,8 @@ static int htc_proc_trailer(struct htc_target *target,
1378 u8 *record_buf; 1671 u8 *record_buf;
1379 u8 *orig_buf; 1672 u8 *orig_buf;
1380 1673
1381 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len); 1674 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
1382 1675 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
1383 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
1384 buf, len);
1385 1676
1386 orig_buf = buf; 1677 orig_buf = buf;
1387 orig_len = len; 1678 orig_len = len;
@@ -1418,7 +1709,7 @@ static int htc_proc_trailer(struct htc_target *target,
1418 } 1709 }
1419 1710
1420 if (status) 1711 if (status)
1421 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer", 1712 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
1422 "", orig_buf, orig_len); 1713 "", orig_buf, orig_len);
1423 1714
1424 return status; 1715 return status;
@@ -1436,9 +1727,6 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1436 if (n_lkahds != NULL) 1727 if (n_lkahds != NULL)
1437 *n_lkahds = 0; 1728 *n_lkahds = 0;
1438 1729
1439 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
1440 packet->buf, packet->act_len);
1441
1442 /* 1730 /*
1443 * NOTE: we cannot assume the alignment of buf, so we use the safe 1731 * NOTE: we cannot assume the alignment of buf, so we use the safe
1444 * macros to retrieve 16 bit fields. 1732 * macros to retrieve 16 bit fields.
@@ -1480,9 +1768,9 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1480 if (lk_ahd != packet->info.rx.exp_hdr) { 1768 if (lk_ahd != packet->info.rx.exp_hdr) {
1481 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n", 1769 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1482 __func__, packet, packet->info.rx.rx_flags); 1770 __func__, packet, packet->info.rx.rx_flags);
1483 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd", 1771 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
1484 "", &packet->info.rx.exp_hdr, 4); 1772 "", &packet->info.rx.exp_hdr, 4);
1485 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header", 1773 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
1486 "", (u8 *)&lk_ahd, sizeof(lk_ahd)); 1774 "", (u8 *)&lk_ahd, sizeof(lk_ahd));
1487 status = -ENOMEM; 1775 status = -ENOMEM;
1488 goto fail_rx; 1776 goto fail_rx;
@@ -1518,15 +1806,8 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
1518 1806
1519fail_rx: 1807fail_rx:
1520 if (status) 1808 if (status)
1521 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT", 1809 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
1522 "", packet->buf, 1810 "", packet->buf, packet->act_len);
1523 packet->act_len < 256 ? packet->act_len : 256);
1524 else {
1525 if (packet->act_len > 0)
1526 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1527 "HTC - Application Msg", "",
1528 packet->buf, packet->act_len);
1529 }
1530 1811
1531 return status; 1812 return status;
1532} 1813}
@@ -1534,8 +1815,8 @@ fail_rx:
1534static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint, 1815static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1535 struct htc_packet *packet) 1816 struct htc_packet *packet)
1536{ 1817{
1537 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 1818 ath6kl_dbg(ATH6KL_DBG_HTC,
1538 "htc calling ep %d recv callback on packet 0x%p\n", 1819 "htc rx complete ep %d packet 0x%p\n",
1539 endpoint->eid, packet); 1820 endpoint->eid, packet);
1540 endpoint->ep_cb.rx(endpoint->target, packet); 1821 endpoint->ep_cb.rx(endpoint->target, packet);
1541} 1822}
@@ -1571,9 +1852,9 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
1571 1852
1572 len = 0; 1853 len = 0;
1573 1854
1574 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 1855 ath6kl_dbg(ATH6KL_DBG_HTC,
1575 "%s(): (numpackets: %d , actual : %d)\n", 1856 "htc rx bundle depth %d pkts %d\n",
1576 __func__, get_queue_depth(rxq), n_scat_pkt); 1857 get_queue_depth(rxq), n_scat_pkt);
1577 1858
1578 scat_req = hif_scatter_req_get(target->dev->ar); 1859 scat_req = hif_scatter_req_get(target->dev->ar);
1579 1860
@@ -1620,7 +1901,7 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
1620 scat_req->len = len; 1901 scat_req->len = len;
1621 scat_req->scat_entries = i; 1902 scat_req->scat_entries = i;
1622 1903
1623 status = ath6kldev_submit_scat_req(target->dev, scat_req, true); 1904 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
1624 1905
1625 if (!status) 1906 if (!status)
1626 *n_pkt_fetched = i; 1907 *n_pkt_fetched = i;
@@ -1643,7 +1924,6 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1643 int status = 0; 1924 int status = 0;
1644 1925
1645 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { 1926 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1646 list_del(&packet->list);
1647 ep = &target->endpoint[packet->endpoint]; 1927 ep = &target->endpoint[packet->endpoint];
1648 1928
1649 /* process header for each of the recv packet */ 1929 /* process header for each of the recv packet */
@@ -1652,6 +1932,8 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
1652 if (status) 1932 if (status)
1653 return status; 1933 return status;
1654 1934
1935 list_del(&packet->list);
1936
1655 if (list_empty(comp_pktq)) { 1937 if (list_empty(comp_pktq)) {
1656 /* 1938 /*
1657 * Last packet's more packet flag is set 1939 * Last packet's more packet flag is set
@@ -1686,11 +1968,15 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
1686 int fetched_pkts; 1968 int fetched_pkts;
1687 bool part_bundle = false; 1969 bool part_bundle = false;
1688 int status = 0; 1970 int status = 0;
1971 struct list_head tmp_rxq;
1972 struct htc_packet *packet, *tmp_pkt;
1689 1973
1690 /* now go fetch the list of HTC packets */ 1974 /* now go fetch the list of HTC packets */
1691 while (!list_empty(rx_pktq)) { 1975 while (!list_empty(rx_pktq)) {
1692 fetched_pkts = 0; 1976 fetched_pkts = 0;
1693 1977
1978 INIT_LIST_HEAD(&tmp_rxq);
1979
1694 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { 1980 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1695 /* 1981 /*
1696 * There are enough packets to attempt a 1982 * There are enough packets to attempt a
@@ -1698,28 +1984,27 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
1698 * allowed. 1984 * allowed.
1699 */ 1985 */
1700 status = ath6kl_htc_rx_bundle(target, rx_pktq, 1986 status = ath6kl_htc_rx_bundle(target, rx_pktq,
1701 comp_pktq, 1987 &tmp_rxq,
1702 &fetched_pkts, 1988 &fetched_pkts,
1703 part_bundle); 1989 part_bundle);
1704 if (status) 1990 if (status)
1705 return status; 1991 goto fail_rx;
1706 1992
1707 if (!list_empty(rx_pktq)) 1993 if (!list_empty(rx_pktq))
1708 part_bundle = true; 1994 part_bundle = true;
1995
1996 list_splice_tail_init(&tmp_rxq, comp_pktq);
1709 } 1997 }
1710 1998
1711 if (!fetched_pkts) { 1999 if (!fetched_pkts) {
1712 struct htc_packet *packet;
1713 2000
1714 packet = list_first_entry(rx_pktq, struct htc_packet, 2001 packet = list_first_entry(rx_pktq, struct htc_packet,
1715 list); 2002 list);
1716 2003
1717 list_del(&packet->list);
1718
1719 /* fully synchronous */ 2004 /* fully synchronous */
1720 packet->completion = NULL; 2005 packet->completion = NULL;
1721 2006
1722 if (!list_empty(rx_pktq)) 2007 if (!list_is_singular(rx_pktq))
1723 /* 2008 /*
1724 * look_aheads in all packet 2009 * look_aheads in all packet
1725 * except the last one in the 2010 * except the last one in the
@@ -1731,18 +2016,42 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
1731 /* go fetch the packet */ 2016 /* go fetch the packet */
1732 status = ath6kl_htc_rx_packet(target, packet, 2017 status = ath6kl_htc_rx_packet(target, packet,
1733 packet->act_len); 2018 packet->act_len);
2019
2020 list_move_tail(&packet->list, &tmp_rxq);
2021
1734 if (status) 2022 if (status)
1735 return status; 2023 goto fail_rx;
1736 2024
1737 list_add_tail(&packet->list, comp_pktq); 2025 list_splice_tail_init(&tmp_rxq, comp_pktq);
1738 } 2026 }
1739 } 2027 }
1740 2028
2029 return 0;
2030
2031fail_rx:
2032
2033 /*
2034 * Cleanup any packets we allocated but didn't use to
2035 * actually fetch any packets.
2036 */
2037
2038 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
2039 list_del(&packet->list);
2040 htc_reclaim_rxbuf(target, packet,
2041 &target->endpoint[packet->endpoint]);
2042 }
2043
2044 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
2045 list_del(&packet->list);
2046 htc_reclaim_rxbuf(target, packet,
2047 &target->endpoint[packet->endpoint]);
2048 }
2049
1741 return status; 2050 return status;
1742} 2051}
1743 2052
1744int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, 2053int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1745 u32 msg_look_ahead[], int *num_pkts) 2054 u32 msg_look_ahead, int *num_pkts)
1746{ 2055{
1747 struct htc_packet *packets, *tmp_pkt; 2056 struct htc_packet *packets, *tmp_pkt;
1748 struct htc_endpoint *endpoint; 2057 struct htc_endpoint *endpoint;
@@ -1759,7 +2068,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1759 * On first entry copy the look_aheads into our temp array for 2068 * On first entry copy the look_aheads into our temp array for
1760 * processing 2069 * processing
1761 */ 2070 */
1762 memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads)); 2071 look_aheads[0] = msg_look_ahead;
1763 2072
1764 while (true) { 2073 while (true) {
1765 2074
@@ -1827,15 +2136,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1827 if (status) { 2136 if (status) {
1828 ath6kl_err("failed to get pending recv messages: %d\n", 2137 ath6kl_err("failed to get pending recv messages: %d\n",
1829 status); 2138 status);
1830 /*
1831 * Cleanup any packets we allocated but didn't use to
1832 * actually fetch any packets.
1833 */
1834 list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
1835 list_del(&packets->list);
1836 htc_reclaim_rxbuf(target, packets,
1837 &target->endpoint[packets->endpoint]);
1838 }
1839 2139
1840 /* cleanup any packets in sync completion queue */ 2140 /* cleanup any packets in sync completion queue */
1841 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { 2141 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
@@ -1846,7 +2146,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1846 2146
1847 if (target->htc_flags & HTC_OP_STATE_STOPPING) { 2147 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1848 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n"); 2148 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1849 ath6kldev_rx_control(target->dev, false); 2149 ath6kl_hif_rx_control(target->dev, false);
1850 } 2150 }
1851 } 2151 }
1852 2152
@@ -1856,7 +2156,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
1856 */ 2156 */
1857 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { 2157 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1858 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n"); 2158 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1859 ath6kldev_rx_control(target->dev, false); 2159 ath6kl_hif_rx_control(target->dev, false);
1860 } 2160 }
1861 *num_pkts = n_fetched; 2161 *num_pkts = n_fetched;
1862 2162
@@ -1874,12 +2174,12 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
1874 struct htc_frame_hdr *htc_hdr; 2174 struct htc_frame_hdr *htc_hdr;
1875 u32 look_ahead; 2175 u32 look_ahead;
1876 2176
1877 if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead, 2177 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
1878 HTC_TARGET_RESPONSE_TIMEOUT)) 2178 HTC_TARGET_RESPONSE_TIMEOUT))
1879 return NULL; 2179 return NULL;
1880 2180
1881 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 2181 ath6kl_dbg(ATH6KL_DBG_HTC,
1882 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead); 2182 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
1883 2183
1884 htc_hdr = (struct htc_frame_hdr *)&look_ahead; 2184 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
1885 2185
@@ -1943,8 +2243,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
1943 2243
1944 depth = get_queue_depth(pkt_queue); 2244 depth = get_queue_depth(pkt_queue);
1945 2245
1946 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 2246 ath6kl_dbg(ATH6KL_DBG_HTC,
1947 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n", 2247 "htc rx add multiple ep id %d cnt %d len %d\n",
1948 first_pkt->endpoint, depth, first_pkt->buf_len); 2248 first_pkt->endpoint, depth, first_pkt->buf_len);
1949 2249
1950 endpoint = &target->endpoint[first_pkt->endpoint]; 2250 endpoint = &target->endpoint[first_pkt->endpoint];
@@ -1969,8 +2269,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
1969 /* check if we are blocked waiting for a new buffer */ 2269 /* check if we are blocked waiting for a new buffer */
1970 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { 2270 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1971 if (target->ep_waiting == first_pkt->endpoint) { 2271 if (target->ep_waiting == first_pkt->endpoint) {
1972 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 2272 ath6kl_dbg(ATH6KL_DBG_HTC,
1973 "receiver was blocked on ep:%d, unblocking.\n", 2273 "htc rx blocked on ep %d, unblocking\n",
1974 target->ep_waiting); 2274 target->ep_waiting);
1975 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; 2275 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
1976 target->ep_waiting = ENDPOINT_MAX; 2276 target->ep_waiting = ENDPOINT_MAX;
@@ -1982,7 +2282,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
1982 2282
1983 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) 2283 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
1984 /* TODO : implement a buffer threshold count? */ 2284 /* TODO : implement a buffer threshold count? */
1985 ath6kldev_rx_control(target->dev, true); 2285 ath6kl_hif_rx_control(target->dev, true);
1986 2286
1987 return status; 2287 return status;
1988} 2288}
@@ -2004,8 +2304,8 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
2004 &endpoint->rx_bufq, list) { 2304 &endpoint->rx_bufq, list) {
2005 list_del(&packet->list); 2305 list_del(&packet->list);
2006 spin_unlock_bh(&target->rx_lock); 2306 spin_unlock_bh(&target->rx_lock);
2007 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 2307 ath6kl_dbg(ATH6KL_DBG_HTC,
2008 "flushing rx pkt:0x%p, len:%d, ep:%d\n", 2308 "htc rx flush pkt 0x%p len %d ep %d\n",
2009 packet, packet->buf_len, 2309 packet, packet->buf_len,
2010 packet->endpoint); 2310 packet->endpoint);
2011 dev_kfree_skb(packet->pkt_cntxt); 2311 dev_kfree_skb(packet->pkt_cntxt);
@@ -2028,8 +2328,8 @@ int ath6kl_htc_conn_service(struct htc_target *target,
2028 unsigned int max_msg_sz = 0; 2328 unsigned int max_msg_sz = 0;
2029 int status = 0; 2329 int status = 0;
2030 2330
2031 ath6kl_dbg(ATH6KL_DBG_TRC, 2331 ath6kl_dbg(ATH6KL_DBG_HTC,
2032 "htc_conn_service, target:0x%p service id:0x%X\n", 2332 "htc connect service target 0x%p service id 0x%x\n",
2033 target, conn_req->svc_id); 2333 target, conn_req->svc_id);
2034 2334
2035 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { 2335 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
@@ -2115,7 +2415,7 @@ int ath6kl_htc_conn_service(struct htc_target *target,
2115 endpoint->len_max = max_msg_sz; 2415 endpoint->len_max = max_msg_sz;
2116 endpoint->ep_cb = conn_req->ep_cb; 2416 endpoint->ep_cb = conn_req->ep_cb;
2117 endpoint->cred_dist.svc_id = conn_req->svc_id; 2417 endpoint->cred_dist.svc_id = conn_req->svc_id;
2118 endpoint->cred_dist.htc_rsvd = endpoint; 2418 endpoint->cred_dist.htc_ep = endpoint;
2119 endpoint->cred_dist.endpoint = assigned_ep; 2419 endpoint->cred_dist.endpoint = assigned_ep;
2120 endpoint->cred_dist.cred_sz = target->tgt_cred_sz; 2420 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2121 2421
@@ -2172,6 +2472,7 @@ static void reset_ep_state(struct htc_target *target)
2172 } 2472 }
2173 2473
2174 /* reset distribution list */ 2474 /* reset distribution list */
2475 /* FIXME: free existing entries */
2175 INIT_LIST_HEAD(&target->cred_dist_list); 2476 INIT_LIST_HEAD(&target->cred_dist_list);
2176} 2477}
2177 2478
@@ -2201,8 +2502,8 @@ static void htc_setup_msg_bndl(struct htc_target *target)
2201 target->msg_per_bndl_max = min(target->max_scat_entries, 2502 target->msg_per_bndl_max = min(target->max_scat_entries,
2202 target->msg_per_bndl_max); 2503 target->msg_per_bndl_max);
2203 2504
2204 ath6kl_dbg(ATH6KL_DBG_TRC, 2505 ath6kl_dbg(ATH6KL_DBG_BOOT,
2205 "htc bundling allowed. max msg per htc bundle: %d\n", 2506 "htc bundling allowed msg_per_bndl_max %d\n",
2206 target->msg_per_bndl_max); 2507 target->msg_per_bndl_max);
2207 2508
2208 /* Max rx bundle size is limited by the max tx bundle size */ 2509 /* Max rx bundle size is limited by the max tx bundle size */
@@ -2211,7 +2512,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
2211 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, 2512 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2212 target->max_xfer_szper_scatreq); 2513 target->max_xfer_szper_scatreq);
2213 2514
2214 ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n", 2515 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
2215 target->max_rx_bndl_sz, target->max_tx_bndl_sz); 2516 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2216 2517
2217 if (target->max_tx_bndl_sz) 2518 if (target->max_tx_bndl_sz)
@@ -2265,8 +2566,8 @@ int ath6kl_htc_wait_target(struct htc_target *target)
2265 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); 2566 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2266 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); 2567 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2267 2568
2268 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, 2569 ath6kl_dbg(ATH6KL_DBG_BOOT,
2269 "target ready: credits: %d credit size: %d\n", 2570 "htc target ready credits %d size %d\n",
2270 target->tgt_creds, target->tgt_cred_sz); 2571 target->tgt_creds, target->tgt_cred_sz);
2271 2572
2272 /* check if this is an extended ready message */ 2573 /* check if this is an extended ready message */
@@ -2280,7 +2581,7 @@ int ath6kl_htc_wait_target(struct htc_target *target)
2280 target->msg_per_bndl_max = 0; 2581 target->msg_per_bndl_max = 0;
2281 } 2582 }
2282 2583
2283 ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n", 2584 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
2284 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", 2585 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2285 target->htc_tgt_ver); 2586 target->htc_tgt_ver);
2286 2587
@@ -2300,6 +2601,10 @@ int ath6kl_htc_wait_target(struct htc_target *target)
2300 status = ath6kl_htc_conn_service((void *)target, &connect, &resp); 2601 status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
2301 2602
2302 if (status) 2603 if (status)
2604 /*
2605 * FIXME: this call doesn't make sense, the caller should
2606 * call ath6kl_htc_cleanup() when it wants remove htc
2607 */
2303 ath6kl_hif_cleanup_scatter(target->dev->ar); 2608 ath6kl_hif_cleanup_scatter(target->dev->ar);
2304 2609
2305fail_wait_target: 2610fail_wait_target:
@@ -2320,8 +2625,11 @@ int ath6kl_htc_start(struct htc_target *target)
2320 struct htc_packet *packet; 2625 struct htc_packet *packet;
2321 int status; 2626 int status;
2322 2627
2628 memset(&target->dev->irq_proc_reg, 0,
2629 sizeof(target->dev->irq_proc_reg));
2630
2323 /* Disable interrupts at the chip level */ 2631 /* Disable interrupts at the chip level */
2324 ath6kldev_disable_intrs(target->dev); 2632 ath6kl_hif_disable_intrs(target->dev);
2325 2633
2326 target->htc_flags = 0; 2634 target->htc_flags = 0;
2327 target->rx_st_flags = 0; 2635 target->rx_st_flags = 0;
@@ -2334,8 +2642,8 @@ int ath6kl_htc_start(struct htc_target *target)
2334 } 2642 }
2335 2643
2336 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */ 2644 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2337 ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list, 2645 ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
2338 target->tgt_creds); 2646 target->tgt_creds);
2339 2647
2340 dump_cred_dist_stats(target); 2648 dump_cred_dist_stats(target);
2341 2649
@@ -2346,7 +2654,7 @@ int ath6kl_htc_start(struct htc_target *target)
2346 return status; 2654 return status;
2347 2655
2348 /* unmask interrupts */ 2656 /* unmask interrupts */
2349 status = ath6kldev_unmask_intrs(target->dev); 2657 status = ath6kl_hif_unmask_intrs(target->dev);
2350 2658
2351 if (status) 2659 if (status)
2352 ath6kl_htc_stop(target); 2660 ath6kl_htc_stop(target);
@@ -2354,6 +2662,44 @@ int ath6kl_htc_start(struct htc_target *target)
2354 return status; 2662 return status;
2355} 2663}
2356 2664
2665static int ath6kl_htc_reset(struct htc_target *target)
2666{
2667 u32 block_size, ctrl_bufsz;
2668 struct htc_packet *packet;
2669 int i;
2670
2671 reset_ep_state(target);
2672
2673 block_size = target->dev->ar->mbox_info.block_size;
2674
2675 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2676 (block_size + HTC_HDR_LENGTH) :
2677 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2678
2679 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2680 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2681 if (!packet)
2682 return -ENOMEM;
2683
2684 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2685 if (!packet->buf_start) {
2686 kfree(packet);
2687 return -ENOMEM;
2688 }
2689
2690 packet->buf_len = ctrl_bufsz;
2691 if (i < NUM_CONTROL_RX_BUFFERS) {
2692 packet->act_len = 0;
2693 packet->buf = packet->buf_start;
2694 packet->endpoint = ENDPOINT_0;
2695 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2696 } else
2697 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2698 }
2699
2700 return 0;
2701}
2702
2357/* htc_stop: stop interrupt reception, and flush all queued buffers */ 2703/* htc_stop: stop interrupt reception, and flush all queued buffers */
2358void ath6kl_htc_stop(struct htc_target *target) 2704void ath6kl_htc_stop(struct htc_target *target)
2359{ 2705{
@@ -2366,21 +2712,19 @@ void ath6kl_htc_stop(struct htc_target *target)
2366 * function returns all pending HIF I/O has completed, we can 2712 * function returns all pending HIF I/O has completed, we can
2367 * safely flush the queues. 2713 * safely flush the queues.
2368 */ 2714 */
2369 ath6kldev_mask_intrs(target->dev); 2715 ath6kl_hif_mask_intrs(target->dev);
2370 2716
2371 ath6kl_htc_flush_txep_all(target); 2717 ath6kl_htc_flush_txep_all(target);
2372 2718
2373 ath6kl_htc_flush_rx_buf(target); 2719 ath6kl_htc_flush_rx_buf(target);
2374 2720
2375 reset_ep_state(target); 2721 ath6kl_htc_reset(target);
2376} 2722}
2377 2723
2378void *ath6kl_htc_create(struct ath6kl *ar) 2724void *ath6kl_htc_create(struct ath6kl *ar)
2379{ 2725{
2380 struct htc_target *target = NULL; 2726 struct htc_target *target = NULL;
2381 struct htc_packet *packet; 2727 int status = 0;
2382 int status = 0, i = 0;
2383 u32 block_size, ctrl_bufsz;
2384 2728
2385 target = kzalloc(sizeof(*target), GFP_KERNEL); 2729 target = kzalloc(sizeof(*target), GFP_KERNEL);
2386 if (!target) { 2730 if (!target) {
@@ -2392,7 +2736,7 @@ void *ath6kl_htc_create(struct ath6kl *ar)
2392 if (!target->dev) { 2736 if (!target->dev) {
2393 ath6kl_err("unable to allocate memory\n"); 2737 ath6kl_err("unable to allocate memory\n");
2394 status = -ENOMEM; 2738 status = -ENOMEM;
2395 goto fail_create_htc; 2739 goto err_htc_cleanup;
2396 } 2740 }
2397 2741
2398 spin_lock_init(&target->htc_lock); 2742 spin_lock_init(&target->htc_lock);
@@ -2407,49 +2751,20 @@ void *ath6kl_htc_create(struct ath6kl *ar)
2407 target->dev->htc_cnxt = target; 2751 target->dev->htc_cnxt = target;
2408 target->ep_waiting = ENDPOINT_MAX; 2752 target->ep_waiting = ENDPOINT_MAX;
2409 2753
2410 reset_ep_state(target); 2754 status = ath6kl_hif_setup(target->dev);
2411
2412 status = ath6kldev_setup(target->dev);
2413
2414 if (status) 2755 if (status)
2415 goto fail_create_htc; 2756 goto err_htc_cleanup;
2416
2417 block_size = ar->mbox_info.block_size;
2418
2419 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2420 (block_size + HTC_HDR_LENGTH) :
2421 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2422
2423 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2424 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2425 if (!packet)
2426 break;
2427 2757
2428 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); 2758 status = ath6kl_htc_reset(target);
2429 if (!packet->buf_start) { 2759 if (status)
2430 kfree(packet); 2760 goto err_htc_cleanup;
2431 break;
2432 }
2433 2761
2434 packet->buf_len = ctrl_bufsz; 2762 return target;
2435 if (i < NUM_CONTROL_RX_BUFFERS) {
2436 packet->act_len = 0;
2437 packet->buf = packet->buf_start;
2438 packet->endpoint = ENDPOINT_0;
2439 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2440 } else
2441 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2442 }
2443 2763
2444fail_create_htc: 2764err_htc_cleanup:
2445 if (i != NUM_CONTROL_BUFFERS || status) { 2765 ath6kl_htc_cleanup(target);
2446 if (target) {
2447 ath6kl_htc_cleanup(target);
2448 target = NULL;
2449 }
2450 }
2451 2766
2452 return target; 2767 return NULL;
2453} 2768}
2454 2769
2455/* cleanup the HTC instance */ 2770/* cleanup the HTC instance */
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 8ce0c2c07ded..57672e1ed1a6 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -393,7 +393,7 @@ struct htc_endpoint_credit_dist {
393 int cred_per_msg; 393 int cred_per_msg;
394 394
395 /* reserved for HTC use */ 395 /* reserved for HTC use */
396 void *htc_rsvd; 396 struct htc_endpoint *htc_ep;
397 397
398 /* 398 /*
399 * current depth of TX queue , i.e. messages waiting for credits 399 * current depth of TX queue , i.e. messages waiting for credits
@@ -414,9 +414,11 @@ enum htc_credit_dist_reason {
414 HTC_CREDIT_DIST_SEEK_CREDITS, 414 HTC_CREDIT_DIST_SEEK_CREDITS,
415}; 415};
416 416
417struct htc_credit_state_info { 417struct ath6kl_htc_credit_info {
418 int total_avail_credits; 418 int total_avail_credits;
419 int cur_free_credits; 419 int cur_free_credits;
420
421 /* list of lowest priority endpoints */
420 struct list_head lowestpri_ep_dist; 422 struct list_head lowestpri_ep_dist;
421}; 423};
422 424
@@ -508,10 +510,13 @@ struct ath6kl_device;
508/* our HTC target state */ 510/* our HTC target state */
509struct htc_target { 511struct htc_target {
510 struct htc_endpoint endpoint[ENDPOINT_MAX]; 512 struct htc_endpoint endpoint[ENDPOINT_MAX];
513
514 /* contains struct htc_endpoint_credit_dist */
511 struct list_head cred_dist_list; 515 struct list_head cred_dist_list;
516
512 struct list_head free_ctrl_txbuf; 517 struct list_head free_ctrl_txbuf;
513 struct list_head free_ctrl_rxbuf; 518 struct list_head free_ctrl_rxbuf;
514 struct htc_credit_state_info *cred_dist_cntxt; 519 struct ath6kl_htc_credit_info *credit_info;
515 int tgt_creds; 520 int tgt_creds;
516 unsigned int tgt_cred_sz; 521 unsigned int tgt_cred_sz;
517 spinlock_t htc_lock; 522 spinlock_t htc_lock;
@@ -542,7 +547,7 @@ struct htc_target {
542 547
543void *ath6kl_htc_create(struct ath6kl *ar); 548void *ath6kl_htc_create(struct ath6kl *ar);
544void ath6kl_htc_set_credit_dist(struct htc_target *target, 549void ath6kl_htc_set_credit_dist(struct htc_target *target,
545 struct htc_credit_state_info *cred_info, 550 struct ath6kl_htc_credit_info *cred_info,
546 u16 svc_pri_order[], int len); 551 u16 svc_pri_order[], int len);
547int ath6kl_htc_wait_target(struct htc_target *target); 552int ath6kl_htc_wait_target(struct htc_target *target);
548int ath6kl_htc_start(struct htc_target *target); 553int ath6kl_htc_start(struct htc_target *target);
@@ -563,7 +568,10 @@ int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
563int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, 568int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
564 struct list_head *pktq); 569 struct list_head *pktq);
565int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, 570int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
566 u32 msg_look_ahead[], int *n_pkts); 571 u32 msg_look_ahead, int *n_pkts);
572
573int ath6kl_credit_setup(void *htc_handle,
574 struct ath6kl_htc_credit_info *cred_info);
567 575
568static inline void set_htc_pkt_info(struct htc_packet *packet, void *context, 576static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
569 u8 *buf, unsigned int len, 577 u8 *buf, unsigned int len,
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
deleted file mode 100644
index 171ad63d89b0..000000000000
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.h
+++ /dev/null
@@ -1,92 +0,0 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_HIF_H
18#define HTC_HIF_H
19
20#include "htc.h"
21#include "hif.h"
22
23#define ATH6KL_MAILBOXES 4
24
25/* HTC runs over mailbox 0 */
26#define HTC_MAILBOX 0
27
28#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
29
30#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
31 INT_STATUS_ENABLE_CPU_MASK | \
32 INT_STATUS_ENABLE_COUNTER_MASK)
33
34#define ATH6KL_REG_IO_BUFFER_SIZE 32
35#define ATH6KL_MAX_REG_IO_BUFFERS 8
36#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
37#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
38#define ATH6KL_SCATTER_REQS 4
39
40#ifndef A_CACHE_LINE_PAD
41#define A_CACHE_LINE_PAD 128
42#endif
43#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
44#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
45
46struct ath6kl_irq_proc_registers {
47 u8 host_int_status;
48 u8 cpu_int_status;
49 u8 error_int_status;
50 u8 counter_int_status;
51 u8 mbox_frame;
52 u8 rx_lkahd_valid;
53 u8 host_int_status2;
54 u8 gmbox_rx_avail;
55 __le32 rx_lkahd[2];
56 __le32 rx_gmbox_lkahd_alias[2];
57} __packed;
58
59struct ath6kl_irq_enable_reg {
60 u8 int_status_en;
61 u8 cpu_int_status_en;
62 u8 err_int_status_en;
63 u8 cntr_int_status_en;
64} __packed;
65
66struct ath6kl_device {
67 spinlock_t lock;
68 u8 pad1[A_CACHE_LINE_PAD];
69 struct ath6kl_irq_proc_registers irq_proc_reg;
70 u8 pad2[A_CACHE_LINE_PAD];
71 struct ath6kl_irq_enable_reg irq_en_reg;
72 u8 pad3[A_CACHE_LINE_PAD];
73 struct htc_target *htc_cnxt;
74 struct ath6kl *ar;
75};
76
77int ath6kldev_setup(struct ath6kl_device *dev);
78int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
79int ath6kldev_mask_intrs(struct ath6kl_device *dev);
80int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
81 u32 *lk_ahd, int timeout);
82int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
83int ath6kldev_disable_intrs(struct ath6kl_device *dev);
84
85int ath6kldev_rw_comp_handler(void *context, int status);
86int ath6kldev_intr_bh_handler(struct ath6kl *ar);
87
88/* Scatter Function and Definitions */
89int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
90 struct hif_scatter_req *scat_req, bool read);
91
92#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 81e0031012ca..30050af9d4c6 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/errno.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/mmc/sdio_func.h> 21#include <linux/mmc/sdio_func.h>
21#include "core.h" 22#include "core.h"
@@ -26,9 +27,11 @@
26 27
27unsigned int debug_mask; 28unsigned int debug_mask;
28static unsigned int testmode; 29static unsigned int testmode;
30static bool suspend_cutpower;
29 31
30module_param(debug_mask, uint, 0644); 32module_param(debug_mask, uint, 0644);
31module_param(testmode, uint, 0644); 33module_param(testmode, uint, 0644);
34module_param(suspend_cutpower, bool, 0444);
32 35
33/* 36/*
34 * Include definitions here that can be used to tune the WLAN module 37 * Include definitions here that can be used to tune the WLAN module
@@ -73,37 +76,21 @@ struct sk_buff *ath6kl_buf_alloc(int size)
73 return skb; 76 return skb;
74} 77}
75 78
76void ath6kl_init_profile_info(struct ath6kl *ar) 79void ath6kl_init_profile_info(struct ath6kl_vif *vif)
77{ 80{
78 ar->ssid_len = 0; 81 vif->ssid_len = 0;
79 memset(ar->ssid, 0, sizeof(ar->ssid)); 82 memset(vif->ssid, 0, sizeof(vif->ssid));
80 83
81 ar->dot11_auth_mode = OPEN_AUTH; 84 vif->dot11_auth_mode = OPEN_AUTH;
82 ar->auth_mode = NONE_AUTH; 85 vif->auth_mode = NONE_AUTH;
83 ar->prwise_crypto = NONE_CRYPT; 86 vif->prwise_crypto = NONE_CRYPT;
84 ar->prwise_crypto_len = 0; 87 vif->prwise_crypto_len = 0;
85 ar->grp_crypto = NONE_CRYPT; 88 vif->grp_crypto = NONE_CRYPT;
86 ar->grp_crypto_len = 0; 89 vif->grp_crypto_len = 0;
87 memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); 90 memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
88 memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); 91 memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
89 memset(ar->bssid, 0, sizeof(ar->bssid)); 92 memset(vif->bssid, 0, sizeof(vif->bssid));
90 ar->bss_ch = 0; 93 vif->bss_ch = 0;
91 ar->nw_type = ar->next_mode = INFRA_NETWORK;
92}
93
94static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
95{
96 switch (ar->nw_type) {
97 case INFRA_NETWORK:
98 return HI_OPTION_FW_MODE_BSS_STA;
99 case ADHOC_NETWORK:
100 return HI_OPTION_FW_MODE_IBSS;
101 case AP_NETWORK:
102 return HI_OPTION_FW_MODE_AP;
103 default:
104 ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
105 return 0xff;
106 }
107} 94}
108 95
109static int ath6kl_set_host_app_area(struct ath6kl *ar) 96static int ath6kl_set_host_app_area(struct ath6kl *ar)
@@ -120,7 +107,7 @@ static int ath6kl_set_host_app_area(struct ath6kl *ar)
120 return -EIO; 107 return -EIO;
121 108
122 address = TARG_VTOP(ar->target_type, data); 109 address = TARG_VTOP(ar->target_type, data);
123 host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION; 110 host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION);
124 if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area, 111 if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area,
125 sizeof(struct host_app_area))) 112 sizeof(struct host_app_area)))
126 return -EIO; 113 return -EIO;
@@ -258,40 +245,12 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
258 return 0; 245 return 0;
259} 246}
260 247
261static void ath6kl_init_control_info(struct ath6kl *ar) 248void ath6kl_init_control_info(struct ath6kl_vif *vif)
262{ 249{
263 u8 ctr; 250 ath6kl_init_profile_info(vif);
264 251 vif->def_txkey_index = 0;
265 clear_bit(WMI_ENABLED, &ar->flag); 252 memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
266 ath6kl_init_profile_info(ar); 253 vif->ch_hint = 0;
267 ar->def_txkey_index = 0;
268 memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
269 ar->ch_hint = 0;
270 ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
271 ar->listen_intvl_b = 0;
272 ar->tx_pwr = 0;
273 clear_bit(SKIP_SCAN, &ar->flag);
274 set_bit(WMM_ENABLED, &ar->flag);
275 ar->intra_bss = 1;
276 memset(&ar->sc_params, 0, sizeof(ar->sc_params));
277 ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
278 ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
279 ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
280
281 memset((u8 *)ar->sta_list, 0,
282 AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
283
284 spin_lock_init(&ar->mcastpsq_lock);
285
286 /* Init the PS queues */
287 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
288 spin_lock_init(&ar->sta_list[ctr].psq_lock);
289 skb_queue_head_init(&ar->sta_list[ctr].psq);
290 }
291
292 skb_queue_head_init(&ar->mcastpsq);
293
294 memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
295} 254}
296 255
297/* 256/*
@@ -341,62 +300,7 @@ out:
341 return status; 300 return status;
342} 301}
343 302
344#define REG_DUMP_COUNT_AR6003 60 303static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
345#define REGISTER_DUMP_LEN_MAX 60
346
347static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
348{
349 u32 address;
350 u32 regdump_loc = 0;
351 int status;
352 u32 regdump_val[REGISTER_DUMP_LEN_MAX];
353 u32 i;
354
355 if (ar->target_type != TARGET_TYPE_AR6003)
356 return;
357
358 /* the reg dump pointer is copied to the host interest area */
359 address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
360 address = TARG_VTOP(ar->target_type, address);
361
362 /* read RAM location through diagnostic window */
363 status = ath6kl_diag_read32(ar, address, &regdump_loc);
364
365 if (status || !regdump_loc) {
366 ath6kl_err("failed to get ptr to register dump area\n");
367 return;
368 }
369
370 ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
371 regdump_loc);
372 regdump_loc = TARG_VTOP(ar->target_type, regdump_loc);
373
374 /* fetch register dump data */
375 status = ath6kl_diag_read(ar, regdump_loc, (u8 *)&regdump_val[0],
376 REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
377
378 if (status) {
379 ath6kl_err("failed to get register dump\n");
380 return;
381 }
382 ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
383
384 for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
385 ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n",
386 i, regdump_val[i]);
387
388}
389
390void ath6kl_target_failure(struct ath6kl *ar)
391{
392 ath6kl_err("target asserted\n");
393
394 /* try dumping target assertion information (if any) */
395 ath6kl_dump_target_assert_info(ar);
396
397}
398
399static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
400{ 304{
401 int status = 0; 305 int status = 0;
402 int ret; 306 int ret;
@@ -406,46 +310,50 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
406 * default values. Required if checksum offload is needed. Set 310 * default values. Required if checksum offload is needed. Set
407 * RxMetaVersion to 2. 311 * RxMetaVersion to 2.
408 */ 312 */
409 if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, 313 if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
410 ar->rx_meta_ver, 0, 0)) { 314 ar->rx_meta_ver, 0, 0)) {
411 ath6kl_err("unable to set the rx frame format\n"); 315 ath6kl_err("unable to set the rx frame format\n");
412 status = -EIO; 316 status = -EIO;
413 } 317 }
414 318
415 if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) 319 if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
416 if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1, 320 if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
417 IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) { 321 IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
418 ath6kl_err("unable to set power save fail event policy\n"); 322 ath6kl_err("unable to set power save fail event policy\n");
419 status = -EIO; 323 status = -EIO;
420 } 324 }
421 325
422 if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) 326 if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
423 if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0, 327 if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
424 WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) { 328 WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
425 ath6kl_err("unable to set barker preamble policy\n"); 329 ath6kl_err("unable to set barker preamble policy\n");
426 status = -EIO; 330 status = -EIO;
427 } 331 }
428 332
429 if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, 333 if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
430 WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) { 334 WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
431 ath6kl_err("unable to set keep alive interval\n"); 335 ath6kl_err("unable to set keep alive interval\n");
432 status = -EIO; 336 status = -EIO;
433 } 337 }
434 338
435 if (ath6kl_wmi_disctimeout_cmd(ar->wmi, 339 if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
436 WLAN_CONFIG_DISCONNECT_TIMEOUT)) { 340 WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
437 ath6kl_err("unable to set disconnect timeout\n"); 341 ath6kl_err("unable to set disconnect timeout\n");
438 status = -EIO; 342 status = -EIO;
439 } 343 }
440 344
441 if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) 345 if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
442 if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) { 346 if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) {
443 ath6kl_err("unable to set txop bursting\n"); 347 ath6kl_err("unable to set txop bursting\n");
444 status = -EIO; 348 status = -EIO;
445 } 349 }
446 350
351 /*
352 * FIXME: Make sure p2p configurations are not applied to
353 * non-p2p capable interfaces when multivif support is enabled.
354 */
447 if (ar->p2p) { 355 if (ar->p2p) {
448 ret = ath6kl_wmi_info_req_cmd(ar->wmi, 356 ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
449 P2P_FLAG_CAPABILITIES_REQ | 357 P2P_FLAG_CAPABILITIES_REQ |
450 P2P_FLAG_MACADDR_REQ | 358 P2P_FLAG_MACADDR_REQ |
451 P2P_FLAG_HMODEL_REQ); 359 P2P_FLAG_HMODEL_REQ);
@@ -457,9 +365,13 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
457 } 365 }
458 } 366 }
459 367
368 /*
369 * FIXME: Make sure p2p configurations are not applied to
370 * non-p2p capable interfaces when multivif support is enabled.
371 */
460 if (ar->p2p) { 372 if (ar->p2p) {
461 /* Enable Probe Request reporting for P2P */ 373 /* Enable Probe Request reporting for P2P */
462 ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true); 374 ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
463 if (ret) { 375 if (ret) {
464 ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe " 376 ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
465 "Request reporting (%d)\n", ret); 377 "Request reporting (%d)\n", ret);
@@ -472,13 +384,44 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
472int ath6kl_configure_target(struct ath6kl *ar) 384int ath6kl_configure_target(struct ath6kl *ar)
473{ 385{
474 u32 param, ram_reserved_size; 386 u32 param, ram_reserved_size;
475 u8 fw_iftype; 387 u8 fw_iftype, fw_mode = 0, fw_submode = 0;
388 int i;
476 389
477 fw_iftype = ath6kl_get_fw_iftype(ar); 390 /*
478 if (fw_iftype == 0xff) 391 * Note: Even though the firmware interface type is
479 return -EINVAL; 392 * chosen as BSS_STA for all three interfaces, can
393 * be configured to IBSS/AP as long as the fw submode
394 * remains normal mode (0 - AP, STA and IBSS). But
395 * due to an target assert in firmware only one interface is
396 * configured for now.
397 */
398 fw_iftype = HI_OPTION_FW_MODE_BSS_STA;
399
400 for (i = 0; i < MAX_NUM_VIF; i++)
401 fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
402
403 /*
404 * By default, submodes :
405 * vif[0] - AP/STA/IBSS
406 * vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
407 * vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
408 */
409
410 for (i = 0; i < ar->max_norm_iface; i++)
411 fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
412 (i * HI_OPTION_FW_SUBMODE_BITS);
413
414 for (i = ar->max_norm_iface; i < MAX_NUM_VIF; i++)
415 fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
416 (i * HI_OPTION_FW_SUBMODE_BITS);
417
418 /*
419 * FIXME: This needs to be removed once the multivif
420 * support is enabled.
421 */
422 if (ar->p2p)
423 fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
480 424
481 /* Tell target which HTC version it is used*/
482 param = HTC_PROTOCOL_VERSION; 425 param = HTC_PROTOCOL_VERSION;
483 if (ath6kl_bmi_write(ar, 426 if (ath6kl_bmi_write(ar,
484 ath6kl_get_hi_item_addr(ar, 427 ath6kl_get_hi_item_addr(ar,
@@ -499,12 +442,10 @@ int ath6kl_configure_target(struct ath6kl *ar)
499 return -EIO; 442 return -EIO;
500 } 443 }
501 444
502 param |= (1 << HI_OPTION_NUM_DEV_SHIFT); 445 param |= (MAX_NUM_VIF << HI_OPTION_NUM_DEV_SHIFT);
503 param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT); 446 param |= fw_mode << HI_OPTION_FW_MODE_SHIFT;
504 if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) { 447 param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT;
505 param |= HI_OPTION_FW_SUBMODE_P2PDEV << 448
506 HI_OPTION_FW_SUBMODE_SHIFT;
507 }
508 param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT); 449 param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
509 param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT); 450 param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
510 451
@@ -553,68 +494,34 @@ int ath6kl_configure_target(struct ath6kl *ar)
553 return 0; 494 return 0;
554} 495}
555 496
556struct ath6kl *ath6kl_core_alloc(struct device *sdev) 497void ath6kl_core_free(struct ath6kl *ar)
557{ 498{
558 struct net_device *dev; 499 wiphy_free(ar->wiphy);
559 struct ath6kl *ar; 500}
560 struct wireless_dev *wdev;
561
562 wdev = ath6kl_cfg80211_init(sdev);
563 if (!wdev) {
564 ath6kl_err("ath6kl_cfg80211_init failed\n");
565 return NULL;
566 }
567
568 ar = wdev_priv(wdev);
569 ar->dev = sdev;
570 ar->wdev = wdev;
571 wdev->iftype = NL80211_IFTYPE_STATION;
572
573 if (ath6kl_debug_init(ar)) {
574 ath6kl_err("Failed to initialize debugfs\n");
575 ath6kl_cfg80211_deinit(ar);
576 return NULL;
577 }
578
579 dev = alloc_netdev(0, "wlan%d", ether_setup);
580 if (!dev) {
581 ath6kl_err("no memory for network device instance\n");
582 ath6kl_cfg80211_deinit(ar);
583 return NULL;
584 }
585
586 dev->ieee80211_ptr = wdev;
587 SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
588 wdev->netdev = dev;
589 ar->sme_state = SME_DISCONNECTED;
590
591 init_netdev(dev);
592 501
593 ar->net_dev = dev; 502void ath6kl_core_cleanup(struct ath6kl *ar)
594 set_bit(WLAN_ENABLED, &ar->flag); 503{
504 ath6kl_hif_power_off(ar);
595 505
596 ar->wlan_pwr_state = WLAN_POWER_STATE_ON; 506 destroy_workqueue(ar->ath6kl_wq);
597 507
598 spin_lock_init(&ar->lock); 508 if (ar->htc_target)
509 ath6kl_htc_cleanup(ar->htc_target);
599 510
600 ath6kl_init_control_info(ar); 511 ath6kl_cookie_cleanup(ar);
601 init_waitqueue_head(&ar->event_wq);
602 sema_init(&ar->sem, 1);
603 clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
604 512
605 INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue); 513 ath6kl_cleanup_amsdu_rxbufs(ar);
606 514
607 setup_timer(&ar->disconnect_timer, disconnect_timer_handler, 515 ath6kl_bmi_cleanup(ar);
608 (unsigned long) dev);
609 516
610 return ar; 517 ath6kl_debug_cleanup(ar);
611}
612 518
613int ath6kl_unavail_ev(struct ath6kl *ar) 519 kfree(ar->fw_board);
614{ 520 kfree(ar->fw_otp);
615 ath6kl_destroy(ar->net_dev, 1); 521 kfree(ar->fw);
522 kfree(ar->fw_patch);
616 523
617 return 0; 524 ath6kl_deinit_ieee80211_hw(ar);
618} 525}
619 526
620/* firmware upload */ 527/* firmware upload */
@@ -1182,6 +1089,7 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
1182static int ath6kl_upload_otp(struct ath6kl *ar) 1089static int ath6kl_upload_otp(struct ath6kl *ar)
1183{ 1090{
1184 u32 address, param; 1091 u32 address, param;
1092 bool from_hw = false;
1185 int ret; 1093 int ret;
1186 1094
1187 if (WARN_ON(ar->fw_otp == NULL)) 1095 if (WARN_ON(ar->fw_otp == NULL))
@@ -1210,15 +1118,20 @@ static int ath6kl_upload_otp(struct ath6kl *ar)
1210 return ret; 1118 return ret;
1211 } 1119 }
1212 1120
1213 ar->hw.app_start_override_addr = address; 1121 if (ar->hw.app_start_override_addr == 0) {
1122 ar->hw.app_start_override_addr = address;
1123 from_hw = true;
1124 }
1214 1125
1215 ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n", 1126 ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n",
1127 from_hw ? " (from hw)" : "",
1216 ar->hw.app_start_override_addr); 1128 ar->hw.app_start_override_addr);
1217 1129
1218 /* execute the OTP code */ 1130 /* execute the OTP code */
1219 ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address); 1131 ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n",
1132 ar->hw.app_start_override_addr);
1220 param = 0; 1133 param = 0;
1221 ath6kl_bmi_execute(ar, address, &param); 1134 ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param);
1222 1135
1223 return ret; 1136 return ret;
1224} 1137}
@@ -1420,6 +1333,10 @@ static int ath6kl_init_hw_params(struct ath6kl *ar)
1420 ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS; 1333 ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS;
1421 ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS; 1334 ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
1422 ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE; 1335 ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE;
1336
1337 /* hw2.0 needs override address hardcoded */
1338 ar->hw.app_start_override_addr = 0x944C00;
1339
1423 break; 1340 break;
1424 case AR6003_REV3_VERSION: 1341 case AR6003_REV3_VERSION:
1425 ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS; 1342 ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS;
@@ -1451,71 +1368,56 @@ static int ath6kl_init_hw_params(struct ath6kl *ar)
1451 return 0; 1368 return 0;
1452} 1369}
1453 1370
1454static int ath6kl_init(struct net_device *dev) 1371int ath6kl_init_hw_start(struct ath6kl *ar)
1455{ 1372{
1456 struct ath6kl *ar = ath6kl_priv(dev); 1373 long timeleft;
1457 int status = 0; 1374 int ret, i;
1458 s32 timeleft;
1459 1375
1460 if (!ar) 1376 ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
1461 return -EIO; 1377
1378 ret = ath6kl_hif_power_on(ar);
1379 if (ret)
1380 return ret;
1381
1382 ret = ath6kl_configure_target(ar);
1383 if (ret)
1384 goto err_power_off;
1385
1386 ret = ath6kl_init_upload(ar);
1387 if (ret)
1388 goto err_power_off;
1462 1389
1463 /* Do we need to finish the BMI phase */ 1390 /* Do we need to finish the BMI phase */
1391 /* FIXME: return error from ath6kl_bmi_done() */
1464 if (ath6kl_bmi_done(ar)) { 1392 if (ath6kl_bmi_done(ar)) {
1465 status = -EIO; 1393 ret = -EIO;
1466 goto ath6kl_init_done; 1394 goto err_power_off;
1467 } 1395 }
1468 1396
1469 /* Indicate that WMI is enabled (although not ready yet) */
1470 set_bit(WMI_ENABLED, &ar->flag);
1471 ar->wmi = ath6kl_wmi_init(ar);
1472 if (!ar->wmi) {
1473 ath6kl_err("failed to initialize wmi\n");
1474 status = -EIO;
1475 goto ath6kl_init_done;
1476 }
1477
1478 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
1479
1480 /* 1397 /*
1481 * The reason we have to wait for the target here is that the 1398 * The reason we have to wait for the target here is that the
1482 * driver layer has to init BMI in order to set the host block 1399 * driver layer has to init BMI in order to set the host block
1483 * size. 1400 * size.
1484 */ 1401 */
1485 if (ath6kl_htc_wait_target(ar->htc_target)) { 1402 if (ath6kl_htc_wait_target(ar->htc_target)) {
1486 status = -EIO; 1403 ret = -EIO;
1487 goto err_node_cleanup; 1404 goto err_power_off;
1488 } 1405 }
1489 1406
1490 if (ath6kl_init_service_ep(ar)) { 1407 if (ath6kl_init_service_ep(ar)) {
1491 status = -EIO; 1408 ret = -EIO;
1492 goto err_cleanup_scatter; 1409 goto err_cleanup_scatter;
1493 } 1410 }
1494 1411
1495 /* setup access class priority mappings */
1496 ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
1497 ar->ac_stream_pri_map[WMM_AC_BE] = 1;
1498 ar->ac_stream_pri_map[WMM_AC_VI] = 2;
1499 ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
1500
1501 /* give our connected endpoints some buffers */
1502 ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
1503 ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
1504
1505 /* allocate some buffers that handle larger AMSDU frames */
1506 ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
1507
1508 /* setup credit distribution */ 1412 /* setup credit distribution */
1509 ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info); 1413 ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info);
1510
1511 ath6kl_cookie_init(ar);
1512 1414
1513 /* start HTC */ 1415 /* start HTC */
1514 status = ath6kl_htc_start(ar->htc_target); 1416 ret = ath6kl_htc_start(ar->htc_target);
1515 1417 if (ret) {
1516 if (status) { 1418 /* FIXME: call this */
1517 ath6kl_cookie_cleanup(ar); 1419 ath6kl_cookie_cleanup(ar);
1518 goto err_rxbuf_cleanup; 1420 goto err_cleanup_scatter;
1519 } 1421 }
1520 1422
1521 /* Wait for Wmi event to be ready */ 1423 /* Wait for Wmi event to be ready */
@@ -1529,52 +1431,69 @@ static int ath6kl_init(struct net_device *dev)
1529 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) { 1431 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
1530 ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n", 1432 ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
1531 ATH6KL_ABI_VERSION, ar->version.abi_ver); 1433 ATH6KL_ABI_VERSION, ar->version.abi_ver);
1532 status = -EIO; 1434 ret = -EIO;
1533 goto err_htc_stop; 1435 goto err_htc_stop;
1534 } 1436 }
1535 1437
1536 if (!timeleft || signal_pending(current)) { 1438 if (!timeleft || signal_pending(current)) {
1537 ath6kl_err("wmi is not ready or wait was interrupted\n"); 1439 ath6kl_err("wmi is not ready or wait was interrupted\n");
1538 status = -EIO; 1440 ret = -EIO;
1539 goto err_htc_stop; 1441 goto err_htc_stop;
1540 } 1442 }
1541 1443
1542 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); 1444 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
1543 1445
1544 /* communicate the wmi protocol verision to the target */ 1446 /* communicate the wmi protocol verision to the target */
1447 /* FIXME: return error */
1545 if ((ath6kl_set_host_app_area(ar)) != 0) 1448 if ((ath6kl_set_host_app_area(ar)) != 0)
1546 ath6kl_err("unable to set the host app area\n"); 1449 ath6kl_err("unable to set the host app area\n");
1547 1450
1548 ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER | 1451 for (i = 0; i < MAX_NUM_VIF; i++) {
1549 ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST; 1452 ret = ath6kl_target_config_wlan_params(ar, i);
1453 if (ret)
1454 goto err_htc_stop;
1455 }
1550 1456
1551 ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM | 1457 ar->state = ATH6KL_STATE_ON;
1552 WIPHY_FLAG_HAVE_AP_SME;
1553 1458
1554 status = ath6kl_target_config_wlan_params(ar); 1459 return 0;
1555 if (!status)
1556 goto ath6kl_init_done;
1557 1460
1558err_htc_stop: 1461err_htc_stop:
1559 ath6kl_htc_stop(ar->htc_target); 1462 ath6kl_htc_stop(ar->htc_target);
1560err_rxbuf_cleanup:
1561 ath6kl_htc_flush_rx_buf(ar->htc_target);
1562 ath6kl_cleanup_amsdu_rxbufs(ar);
1563err_cleanup_scatter: 1463err_cleanup_scatter:
1564 ath6kl_hif_cleanup_scatter(ar); 1464 ath6kl_hif_cleanup_scatter(ar);
1565err_node_cleanup: 1465err_power_off:
1566 ath6kl_wmi_shutdown(ar->wmi); 1466 ath6kl_hif_power_off(ar);
1567 clear_bit(WMI_ENABLED, &ar->flag);
1568 ar->wmi = NULL;
1569 1467
1570ath6kl_init_done: 1468 return ret;
1571 return status; 1469}
1470
1471int ath6kl_init_hw_stop(struct ath6kl *ar)
1472{
1473 int ret;
1474
1475 ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n");
1476
1477 ath6kl_htc_stop(ar->htc_target);
1478
1479 ath6kl_hif_stop(ar);
1480
1481 ath6kl_bmi_reset(ar);
1482
1483 ret = ath6kl_hif_power_off(ar);
1484 if (ret)
1485 ath6kl_warn("failed to power off hif: %d\n", ret);
1486
1487 ar->state = ATH6KL_STATE_OFF;
1488
1489 return 0;
1572} 1490}
1573 1491
1574int ath6kl_core_init(struct ath6kl *ar) 1492int ath6kl_core_init(struct ath6kl *ar)
1575{ 1493{
1576 int ret = 0;
1577 struct ath6kl_bmi_target_info targ_info; 1494 struct ath6kl_bmi_target_info targ_info;
1495 struct net_device *ndev;
1496 int ret = 0, i;
1578 1497
1579 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); 1498 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
1580 if (!ar->ath6kl_wq) 1499 if (!ar->ath6kl_wq)
@@ -1584,145 +1503,226 @@ int ath6kl_core_init(struct ath6kl *ar)
1584 if (ret) 1503 if (ret)
1585 goto err_wq; 1504 goto err_wq;
1586 1505
1587 ret = ath6kl_bmi_get_target_info(ar, &targ_info); 1506 /*
1507 * Turn on power to get hardware (target) version and leave power
1508 * on delibrately as we will boot the hardware anyway within few
1509 * seconds.
1510 */
1511 ret = ath6kl_hif_power_on(ar);
1588 if (ret) 1512 if (ret)
1589 goto err_bmi_cleanup; 1513 goto err_bmi_cleanup;
1590 1514
1515 ret = ath6kl_bmi_get_target_info(ar, &targ_info);
1516 if (ret)
1517 goto err_power_off;
1518
1591 ar->version.target_ver = le32_to_cpu(targ_info.version); 1519 ar->version.target_ver = le32_to_cpu(targ_info.version);
1592 ar->target_type = le32_to_cpu(targ_info.type); 1520 ar->target_type = le32_to_cpu(targ_info.type);
1593 ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version); 1521 ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
1594 1522
1595 ret = ath6kl_init_hw_params(ar); 1523 ret = ath6kl_init_hw_params(ar);
1596 if (ret) 1524 if (ret)
1597 goto err_bmi_cleanup; 1525 goto err_power_off;
1598
1599 ret = ath6kl_configure_target(ar);
1600 if (ret)
1601 goto err_bmi_cleanup;
1602 1526
1603 ar->htc_target = ath6kl_htc_create(ar); 1527 ar->htc_target = ath6kl_htc_create(ar);
1604 1528
1605 if (!ar->htc_target) { 1529 if (!ar->htc_target) {
1606 ret = -ENOMEM; 1530 ret = -ENOMEM;
1607 goto err_bmi_cleanup; 1531 goto err_power_off;
1608 }
1609
1610 ar->aggr_cntxt = aggr_init(ar->net_dev);
1611 if (!ar->aggr_cntxt) {
1612 ath6kl_err("failed to initialize aggr\n");
1613 ret = -ENOMEM;
1614 goto err_htc_cleanup;
1615 } 1532 }
1616 1533
1617 ret = ath6kl_fetch_firmwares(ar); 1534 ret = ath6kl_fetch_firmwares(ar);
1618 if (ret) 1535 if (ret)
1619 goto err_htc_cleanup; 1536 goto err_htc_cleanup;
1620 1537
1621 ret = ath6kl_init_upload(ar); 1538 /* FIXME: we should free all firmwares in the error cases below */
1622 if (ret) 1539
1540 /* Indicate that WMI is enabled (although not ready yet) */
1541 set_bit(WMI_ENABLED, &ar->flag);
1542 ar->wmi = ath6kl_wmi_init(ar);
1543 if (!ar->wmi) {
1544 ath6kl_err("failed to initialize wmi\n");
1545 ret = -EIO;
1623 goto err_htc_cleanup; 1546 goto err_htc_cleanup;
1547 }
1624 1548
1625 ret = ath6kl_init(ar->net_dev); 1549 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
1550
1551 ret = ath6kl_register_ieee80211_hw(ar);
1626 if (ret) 1552 if (ret)
1627 goto err_htc_cleanup; 1553 goto err_node_cleanup;
1628 1554
1629 /* This runs the init function if registered */ 1555 ret = ath6kl_debug_init(ar);
1630 ret = register_netdev(ar->net_dev);
1631 if (ret) { 1556 if (ret) {
1632 ath6kl_err("register_netdev failed\n"); 1557 wiphy_unregister(ar->wiphy);
1633 ath6kl_destroy(ar->net_dev, 0); 1558 goto err_node_cleanup;
1634 return ret; 1559 }
1560
1561 for (i = 0; i < MAX_NUM_VIF; i++)
1562 ar->avail_idx_map |= BIT(i);
1563
1564 rtnl_lock();
1565
1566 /* Add an initial station interface */
1567 ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
1568 INFRA_NETWORK);
1569
1570 rtnl_unlock();
1571
1572 if (!ndev) {
1573 ath6kl_err("Failed to instantiate a network device\n");
1574 ret = -ENOMEM;
1575 wiphy_unregister(ar->wiphy);
1576 goto err_debug_init;
1635 } 1577 }
1636 1578
1637 set_bit(NETDEV_REGISTERED, &ar->flag);
1638 1579
1639 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n", 1580 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
1640 __func__, ar->net_dev->name, ar->net_dev, ar); 1581 __func__, ndev->name, ndev, ar);
1582
1583 /* setup access class priority mappings */
1584 ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
1585 ar->ac_stream_pri_map[WMM_AC_BE] = 1;
1586 ar->ac_stream_pri_map[WMM_AC_VI] = 2;
1587 ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
1588
1589 /* give our connected endpoints some buffers */
1590 ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
1591 ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
1592
1593 /* allocate some buffers that handle larger AMSDU frames */
1594 ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
1595
1596 ath6kl_cookie_init(ar);
1597
1598 ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
1599 ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
1600
1601 if (suspend_cutpower)
1602 ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER;
1603
1604 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
1605 WIPHY_FLAG_HAVE_AP_SME |
1606 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
1607
1608 set_bit(FIRST_BOOT, &ar->flag);
1609
1610 ret = ath6kl_init_hw_start(ar);
1611 if (ret) {
1612 ath6kl_err("Failed to start hardware: %d\n", ret);
1613 goto err_rxbuf_cleanup;
1614 }
1615
1616 /*
1617 * Set mac address which is received in ready event
1618 * FIXME: Move to ath6kl_interface_add()
1619 */
1620 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
1641 1621
1642 return ret; 1622 return ret;
1643 1623
1624err_rxbuf_cleanup:
1625 ath6kl_htc_flush_rx_buf(ar->htc_target);
1626 ath6kl_cleanup_amsdu_rxbufs(ar);
1627 rtnl_lock();
1628 ath6kl_deinit_if_data(netdev_priv(ndev));
1629 rtnl_unlock();
1630 wiphy_unregister(ar->wiphy);
1631err_debug_init:
1632 ath6kl_debug_cleanup(ar);
1633err_node_cleanup:
1634 ath6kl_wmi_shutdown(ar->wmi);
1635 clear_bit(WMI_ENABLED, &ar->flag);
1636 ar->wmi = NULL;
1644err_htc_cleanup: 1637err_htc_cleanup:
1645 ath6kl_htc_cleanup(ar->htc_target); 1638 ath6kl_htc_cleanup(ar->htc_target);
1639err_power_off:
1640 ath6kl_hif_power_off(ar);
1646err_bmi_cleanup: 1641err_bmi_cleanup:
1647 ath6kl_bmi_cleanup(ar); 1642 ath6kl_bmi_cleanup(ar);
1648err_wq: 1643err_wq:
1649 destroy_workqueue(ar->ath6kl_wq); 1644 destroy_workqueue(ar->ath6kl_wq);
1645
1650 return ret; 1646 return ret;
1651} 1647}
1652 1648
1653void ath6kl_stop_txrx(struct ath6kl *ar) 1649void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
1654{ 1650{
1655 struct net_device *ndev = ar->net_dev; 1651 static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1652 bool discon_issued;
1656 1653
1657 if (!ndev) 1654 netif_stop_queue(vif->ndev);
1658 return;
1659 1655
1660 set_bit(DESTROY_IN_PROGRESS, &ar->flag); 1656 clear_bit(WLAN_ENABLED, &vif->flags);
1661 1657
1662 if (down_interruptible(&ar->sem)) { 1658 if (wmi_ready) {
1663 ath6kl_err("down_interruptible failed\n"); 1659 discon_issued = test_bit(CONNECTED, &vif->flags) ||
1664 return; 1660 test_bit(CONNECT_PEND, &vif->flags);
1665 } 1661 ath6kl_disconnect(vif);
1662 del_timer(&vif->disconnect_timer);
1666 1663
1667 if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR) 1664 if (discon_issued)
1668 ath6kl_stop_endpoint(ndev, false, true); 1665 ath6kl_disconnect_event(vif, DISCONNECT_CMD,
1666 (vif->nw_type & AP_NETWORK) ?
1667 bcast_mac : vif->bssid,
1668 0, NULL, 0);
1669 }
1669 1670
1670 clear_bit(WLAN_ENABLED, &ar->flag); 1671 if (vif->scan_req) {
1672 cfg80211_scan_done(vif->scan_req, true);
1673 vif->scan_req = NULL;
1674 }
1671} 1675}
1672 1676
1673/* 1677void ath6kl_stop_txrx(struct ath6kl *ar)
1674 * We need to differentiate between the surprise and planned removal of the
1675 * device because of the following consideration:
1676 *
1677 * - In case of surprise removal, the hcd already frees up the pending
1678 * for the device and hence there is no need to unregister the function
1679 * driver inorder to get these requests. For planned removal, the function
1680 * driver has to explicitly unregister itself to have the hcd return all the
1681 * pending requests before the data structures for the devices are freed up.
1682 * Note that as per the current implementation, the function driver will
1683 * end up releasing all the devices since there is no API to selectively
1684 * release a particular device.
1685 *
1686 * - Certain commands issued to the target can be skipped for surprise
1687 * removal since they will anyway not go through.
1688 */
1689void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
1690{ 1678{
1691 struct ath6kl *ar; 1679 struct ath6kl_vif *vif, *tmp_vif;
1692 1680
1693 if (!dev || !ath6kl_priv(dev)) { 1681 set_bit(DESTROY_IN_PROGRESS, &ar->flag);
1694 ath6kl_err("failed to get device structure\n"); 1682
1683 if (down_interruptible(&ar->sem)) {
1684 ath6kl_err("down_interruptible failed\n");
1695 return; 1685 return;
1696 } 1686 }
1697 1687
1698 ar = ath6kl_priv(dev); 1688 spin_lock_bh(&ar->list_lock);
1699 1689 list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
1700 destroy_workqueue(ar->ath6kl_wq); 1690 list_del(&vif->list);
1701 1691 spin_unlock_bh(&ar->list_lock);
1702 if (ar->htc_target) 1692 ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
1703 ath6kl_htc_cleanup(ar->htc_target); 1693 rtnl_lock();
1704 1694 ath6kl_deinit_if_data(vif);
1705 aggr_module_destroy(ar->aggr_cntxt); 1695 rtnl_unlock();
1706 1696 spin_lock_bh(&ar->list_lock);
1707 ath6kl_cookie_cleanup(ar); 1697 }
1708 1698 spin_unlock_bh(&ar->list_lock);
1709 ath6kl_cleanup_amsdu_rxbufs(ar);
1710 1699
1711 ath6kl_bmi_cleanup(ar); 1700 clear_bit(WMI_READY, &ar->flag);
1712 1701
1713 ath6kl_debug_cleanup(ar); 1702 /*
1703 * After wmi_shudown all WMI events will be dropped. We
1704 * need to cleanup the buffers allocated in AP mode and
1705 * give disconnect notification to stack, which usually
1706 * happens in the disconnect_event. Simulate the disconnect
1707 * event by calling the function directly. Sometimes
1708 * disconnect_event will be received when the debug logs
1709 * are collected.
1710 */
1711 ath6kl_wmi_shutdown(ar->wmi);
1714 1712
1715 if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) { 1713 clear_bit(WMI_ENABLED, &ar->flag);
1716 unregister_netdev(dev); 1714 if (ar->htc_target) {
1717 clear_bit(NETDEV_REGISTERED, &ar->flag); 1715 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
1716 ath6kl_htc_stop(ar->htc_target);
1718 } 1717 }
1719 1718
1720 free_netdev(dev); 1719 /*
1721 1720 * Try to reset the device if we can. The driver may have been
1722 kfree(ar->fw_board); 1721 * configure NOT to reset the target during a debug session.
1723 kfree(ar->fw_otp); 1722 */
1724 kfree(ar->fw); 1723 ath6kl_dbg(ATH6KL_DBG_TRC,
1725 kfree(ar->fw_patch); 1724 "attempting to reset target on instance destroy\n");
1725 ath6kl_reset_device(ar, ar->target_type, true, true);
1726 1726
1727 ath6kl_cfg80211_deinit(ar); 1727 clear_bit(WLAN_ENABLED, &ar->flag);
1728} 1728}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 30b5a53db9ed..5e5f4ca8f3f0 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -20,12 +20,13 @@
20#include "target.h" 20#include "target.h"
21#include "debug.h" 21#include "debug.h"
22 22
23struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr) 23struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
24{ 24{
25 struct ath6kl *ar = vif->ar;
25 struct ath6kl_sta *conn = NULL; 26 struct ath6kl_sta *conn = NULL;
26 u8 i, max_conn; 27 u8 i, max_conn;
27 28
28 max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; 29 max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
29 30
30 for (i = 0; i < max_conn; i++) { 31 for (i = 0; i < max_conn; i++) {
31 if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) { 32 if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
@@ -393,8 +394,8 @@ out:
393#define AR6003_RESET_CONTROL_ADDRESS 0x00004000 394#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
394#define AR6004_RESET_CONTROL_ADDRESS 0x00004000 395#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
395 396
396static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, 397void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
397 bool wait_fot_compltn, bool cold_reset) 398 bool wait_fot_compltn, bool cold_reset)
398{ 399{
399 int status = 0; 400 int status = 0;
400 u32 address; 401 u32 address;
@@ -425,102 +426,33 @@ static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
425 ath6kl_err("failed to reset target\n"); 426 ath6kl_err("failed to reset target\n");
426} 427}
427 428
428void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile, 429static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
429 bool get_dbglogs)
430{
431 struct ath6kl *ar = ath6kl_priv(dev);
432 static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
433 bool discon_issued;
434
435 netif_stop_queue(dev);
436
437 /* disable the target and the interrupts associated with it */
438 if (test_bit(WMI_READY, &ar->flag)) {
439 discon_issued = (test_bit(CONNECTED, &ar->flag) ||
440 test_bit(CONNECT_PEND, &ar->flag));
441 ath6kl_disconnect(ar);
442 if (!keep_profile)
443 ath6kl_init_profile_info(ar);
444
445 del_timer(&ar->disconnect_timer);
446
447 clear_bit(WMI_READY, &ar->flag);
448 ath6kl_wmi_shutdown(ar->wmi);
449 clear_bit(WMI_ENABLED, &ar->flag);
450 ar->wmi = NULL;
451
452 /*
453 * After wmi_shudown all WMI events will be dropped. We
454 * need to cleanup the buffers allocated in AP mode and
455 * give disconnect notification to stack, which usually
456 * happens in the disconnect_event. Simulate the disconnect
457 * event by calling the function directly. Sometimes
458 * disconnect_event will be received when the debug logs
459 * are collected.
460 */
461 if (discon_issued)
462 ath6kl_disconnect_event(ar, DISCONNECT_CMD,
463 (ar->nw_type & AP_NETWORK) ?
464 bcast_mac : ar->bssid,
465 0, NULL, 0);
466
467 ar->user_key_ctrl = 0;
468
469 } else {
470 ath6kl_dbg(ATH6KL_DBG_TRC,
471 "%s: wmi is not ready 0x%p 0x%p\n",
472 __func__, ar, ar->wmi);
473
474 /* Shut down WMI if we have started it */
475 if (test_bit(WMI_ENABLED, &ar->flag)) {
476 ath6kl_dbg(ATH6KL_DBG_TRC,
477 "%s: shut down wmi\n", __func__);
478 ath6kl_wmi_shutdown(ar->wmi);
479 clear_bit(WMI_ENABLED, &ar->flag);
480 ar->wmi = NULL;
481 }
482 }
483
484 if (ar->htc_target) {
485 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
486 ath6kl_htc_stop(ar->htc_target);
487 }
488
489 /*
490 * Try to reset the device if we can. The driver may have been
491 * configure NOT to reset the target during a debug session.
492 */
493 ath6kl_dbg(ATH6KL_DBG_TRC,
494 "attempting to reset target on instance destroy\n");
495 ath6kl_reset_device(ar, ar->target_type, true, true);
496}
497
498static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
499{ 430{
500 u8 index; 431 u8 index;
501 u8 keyusage; 432 u8 keyusage;
502 433
503 for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) { 434 for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
504 if (ar->wep_key_list[index].key_len) { 435 if (vif->wep_key_list[index].key_len) {
505 keyusage = GROUP_USAGE; 436 keyusage = GROUP_USAGE;
506 if (index == ar->def_txkey_index) 437 if (index == vif->def_txkey_index)
507 keyusage |= TX_USAGE; 438 keyusage |= TX_USAGE;
508 439
509 ath6kl_wmi_addkey_cmd(ar->wmi, 440 ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
510 index, 441 index,
511 WEP_CRYPT, 442 WEP_CRYPT,
512 keyusage, 443 keyusage,
513 ar->wep_key_list[index].key_len, 444 vif->wep_key_list[index].key_len,
514 NULL, 445 NULL, 0,
515 ar->wep_key_list[index].key, 446 vif->wep_key_list[index].key,
516 KEY_OP_INIT_VAL, NULL, 447 KEY_OP_INIT_VAL, NULL,
517 NO_SYNC_WMIFLAG); 448 NO_SYNC_WMIFLAG);
518 } 449 }
519 } 450 }
520} 451}
521 452
522void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel) 453void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
523{ 454{
455 struct ath6kl *ar = vif->ar;
524 struct ath6kl_req_key *ik; 456 struct ath6kl_req_key *ik;
525 int res; 457 int res;
526 u8 key_rsc[ATH6KL_KEY_SEQ_LEN]; 458 u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
@@ -529,10 +461,10 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
529 461
530 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel); 462 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
531 463
532 switch (ar->auth_mode) { 464 switch (vif->auth_mode) {
533 case NONE_AUTH: 465 case NONE_AUTH:
534 if (ar->prwise_crypto == WEP_CRYPT) 466 if (vif->prwise_crypto == WEP_CRYPT)
535 ath6kl_install_static_wep_keys(ar); 467 ath6kl_install_static_wep_keys(vif);
536 break; 468 break;
537 case WPA_PSK_AUTH: 469 case WPA_PSK_AUTH:
538 case WPA2_PSK_AUTH: 470 case WPA2_PSK_AUTH:
@@ -544,8 +476,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
544 "the initial group key for AP mode\n"); 476 "the initial group key for AP mode\n");
545 memset(key_rsc, 0, sizeof(key_rsc)); 477 memset(key_rsc, 0, sizeof(key_rsc));
546 res = ath6kl_wmi_addkey_cmd( 478 res = ath6kl_wmi_addkey_cmd(
547 ar->wmi, ik->key_index, ik->key_type, 479 ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
548 GROUP_USAGE, ik->key_len, key_rsc, ik->key, 480 GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
481 ik->key,
549 KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG); 482 KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
550 if (res) { 483 if (res) {
551 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed " 484 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
@@ -554,15 +487,16 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
554 break; 487 break;
555 } 488 }
556 489
557 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); 490 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
558 set_bit(CONNECTED, &ar->flag); 491 set_bit(CONNECTED, &vif->flags);
559 netif_carrier_on(ar->net_dev); 492 netif_carrier_on(vif->ndev);
560} 493}
561 494
562void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr, 495void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
563 u8 keymgmt, u8 ucipher, u8 auth, 496 u8 keymgmt, u8 ucipher, u8 auth,
564 u8 assoc_req_len, u8 *assoc_info) 497 u8 assoc_req_len, u8 *assoc_info)
565{ 498{
499 struct ath6kl *ar = vif->ar;
566 u8 *ies = NULL, *wpa_ie = NULL, *pos; 500 u8 *ies = NULL, *wpa_ie = NULL, *pos;
567 size_t ies_len = 0; 501 size_t ies_len = 0;
568 struct station_info sinfo; 502 struct station_info sinfo;
@@ -617,348 +551,32 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
617 sinfo.assoc_req_ies_len = ies_len; 551 sinfo.assoc_req_ies_len = ies_len;
618 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; 552 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
619 553
620 cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL); 554 cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
621 555
622 netif_wake_queue(ar->net_dev); 556 netif_wake_queue(vif->ndev);
623}
624
625/* Functions for Tx credit handling */
626void ath6k_credit_init(struct htc_credit_state_info *cred_info,
627 struct list_head *ep_list,
628 int tot_credits)
629{
630 struct htc_endpoint_credit_dist *cur_ep_dist;
631 int count;
632
633 cred_info->cur_free_credits = tot_credits;
634 cred_info->total_avail_credits = tot_credits;
635
636 list_for_each_entry(cur_ep_dist, ep_list, list) {
637 if (cur_ep_dist->endpoint == ENDPOINT_0)
638 continue;
639
640 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
641
642 if (tot_credits > 4)
643 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
644 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
645 ath6kl_deposit_credit_to_ep(cred_info,
646 cur_ep_dist,
647 cur_ep_dist->cred_min);
648 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
649 }
650
651 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
652 ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
653 cur_ep_dist->cred_min);
654 /*
655 * Control service is always marked active, it
656 * never goes inactive EVER.
657 */
658 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
659 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
660 /* this is the lowest priority data endpoint */
661 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
662
663 /*
664 * Streams have to be created (explicit | implicit) for all
665 * kinds of traffic. BE endpoints are also inactive in the
666 * beginning. When BE traffic starts it creates implicit
667 * streams that redistributes credits.
668 *
669 * Note: all other endpoints have minimums set but are
670 * initially given NO credits. credits will be distributed
671 * as traffic activity demands
672 */
673 }
674
675 WARN_ON(cred_info->cur_free_credits <= 0);
676
677 list_for_each_entry(cur_ep_dist, ep_list, list) {
678 if (cur_ep_dist->endpoint == ENDPOINT_0)
679 continue;
680
681 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
682 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
683 else {
684 /*
685 * For the remaining data endpoints, we assume that
686 * each cred_per_msg are the same. We use a simple
687 * calculation here, we take the remaining credits
688 * and determine how many max messages this can
689 * cover and then set each endpoint's normal value
690 * equal to 3/4 this amount.
691 */
692 count = (cred_info->cur_free_credits /
693 cur_ep_dist->cred_per_msg)
694 * cur_ep_dist->cred_per_msg;
695 count = (count * 3) >> 2;
696 count = max(count, cur_ep_dist->cred_per_msg);
697 cur_ep_dist->cred_norm = count;
698
699 }
700 }
701}
702
703/* initialize and setup credit distribution */
704int ath6k_setup_credit_dist(void *htc_handle,
705 struct htc_credit_state_info *cred_info)
706{
707 u16 servicepriority[5];
708
709 memset(cred_info, 0, sizeof(struct htc_credit_state_info));
710
711 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
712 servicepriority[1] = WMI_DATA_VO_SVC;
713 servicepriority[2] = WMI_DATA_VI_SVC;
714 servicepriority[3] = WMI_DATA_BE_SVC;
715 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
716
717 /* set priority list */
718 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
719
720 return 0;
721}
722
723/* reduce an ep's credits back to a set limit */
724static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
725 struct htc_endpoint_credit_dist *ep_dist,
726 int limit)
727{
728 int credits;
729
730 ep_dist->cred_assngd = limit;
731
732 if (ep_dist->credits <= limit)
733 return;
734
735 credits = ep_dist->credits - limit;
736 ep_dist->credits -= credits;
737 cred_info->cur_free_credits += credits;
738}
739
740static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
741 struct list_head *epdist_list)
742{
743 struct htc_endpoint_credit_dist *cur_dist_list;
744
745 list_for_each_entry(cur_dist_list, epdist_list, list) {
746 if (cur_dist_list->endpoint == ENDPOINT_0)
747 continue;
748
749 if (cur_dist_list->cred_to_dist > 0) {
750 cur_dist_list->credits +=
751 cur_dist_list->cred_to_dist;
752 cur_dist_list->cred_to_dist = 0;
753 if (cur_dist_list->credits >
754 cur_dist_list->cred_assngd)
755 ath6k_reduce_credits(cred_info,
756 cur_dist_list,
757 cur_dist_list->cred_assngd);
758
759 if (cur_dist_list->credits >
760 cur_dist_list->cred_norm)
761 ath6k_reduce_credits(cred_info, cur_dist_list,
762 cur_dist_list->cred_norm);
763
764 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
765 if (cur_dist_list->txq_depth == 0)
766 ath6k_reduce_credits(cred_info,
767 cur_dist_list, 0);
768 }
769 }
770 }
771}
772
773/*
774 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
775 * question.
776 */
777void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
778 struct htc_endpoint_credit_dist *ep_dist)
779{
780 struct htc_endpoint_credit_dist *curdist_list;
781 int credits = 0;
782 int need;
783
784 if (ep_dist->svc_id == WMI_CONTROL_SVC)
785 goto out;
786
787 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
788 (ep_dist->svc_id == WMI_DATA_VO_SVC))
789 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
790 goto out;
791
792 /*
793 * For all other services, we follow a simple algorithm of:
794 *
795 * 1. checking the free pool for credits
796 * 2. checking lower priority endpoints for credits to take
797 */
798
799 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
800
801 if (credits >= ep_dist->seek_cred)
802 goto out;
803
804 /*
805 * We don't have enough in the free pool, try taking away from
806 * lower priority services The rule for taking away credits:
807 *
808 * 1. Only take from lower priority endpoints
809 * 2. Only take what is allocated above the minimum (never
810 * starve an endpoint completely)
811 * 3. Only take what you need.
812 */
813
814 list_for_each_entry_reverse(curdist_list,
815 &cred_info->lowestpri_ep_dist,
816 list) {
817 if (curdist_list == ep_dist)
818 break;
819
820 need = ep_dist->seek_cred - cred_info->cur_free_credits;
821
822 if ((curdist_list->cred_assngd - need) >=
823 curdist_list->cred_min) {
824 /*
825 * The current one has been allocated more than
826 * it's minimum and it has enough credits assigned
827 * above it's minimum to fulfill our need try to
828 * take away just enough to fulfill our need.
829 */
830 ath6k_reduce_credits(cred_info, curdist_list,
831 curdist_list->cred_assngd - need);
832
833 if (cred_info->cur_free_credits >=
834 ep_dist->seek_cred)
835 break;
836 }
837
838 if (curdist_list->endpoint == ENDPOINT_0)
839 break;
840 }
841
842 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
843
844out:
845 /* did we find some credits? */
846 if (credits)
847 ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
848
849 ep_dist->seek_cred = 0;
850}
851
852/* redistribute credits based on activity change */
853static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
854 struct list_head *ep_dist_list)
855{
856 struct htc_endpoint_credit_dist *curdist_list;
857
858 list_for_each_entry(curdist_list, ep_dist_list, list) {
859 if (curdist_list->endpoint == ENDPOINT_0)
860 continue;
861
862 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
863 (curdist_list->svc_id == WMI_DATA_BE_SVC))
864 curdist_list->dist_flags |= HTC_EP_ACTIVE;
865
866 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
867 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
868 if (curdist_list->txq_depth == 0)
869 ath6k_reduce_credits(info,
870 curdist_list, 0);
871 else
872 ath6k_reduce_credits(info,
873 curdist_list,
874 curdist_list->cred_min);
875 }
876 }
877}
878
879/*
880 *
881 * This function is invoked whenever endpoints require credit
882 * distributions. A lock is held while this function is invoked, this
883 * function shall NOT block. The ep_dist_list is a list of distribution
884 * structures in prioritized order as defined by the call to the
885 * htc_set_credit_dist() api.
886 */
887void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
888 struct list_head *ep_dist_list,
889 enum htc_credit_dist_reason reason)
890{
891 switch (reason) {
892 case HTC_CREDIT_DIST_SEND_COMPLETE:
893 ath6k_credit_update(cred_info, ep_dist_list);
894 break;
895 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
896 ath6k_redistribute_credits(cred_info, ep_dist_list);
897 break;
898 default:
899 break;
900 }
901
902 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
903 WARN_ON(cred_info->cur_free_credits < 0);
904} 557}
905 558
906void disconnect_timer_handler(unsigned long ptr) 559void disconnect_timer_handler(unsigned long ptr)
907{ 560{
908 struct net_device *dev = (struct net_device *)ptr; 561 struct net_device *dev = (struct net_device *)ptr;
909 struct ath6kl *ar = ath6kl_priv(dev); 562 struct ath6kl_vif *vif = netdev_priv(dev);
910 563
911 ath6kl_init_profile_info(ar); 564 ath6kl_init_profile_info(vif);
912 ath6kl_disconnect(ar); 565 ath6kl_disconnect(vif);
913} 566}
914 567
915void ath6kl_disconnect(struct ath6kl *ar) 568void ath6kl_disconnect(struct ath6kl_vif *vif)
916{ 569{
917 if (test_bit(CONNECTED, &ar->flag) || 570 if (test_bit(CONNECTED, &vif->flags) ||
918 test_bit(CONNECT_PEND, &ar->flag)) { 571 test_bit(CONNECT_PEND, &vif->flags)) {
919 ath6kl_wmi_disconnect_cmd(ar->wmi); 572 ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
920 /* 573 /*
921 * Disconnect command is issued, clear the connect pending 574 * Disconnect command is issued, clear the connect pending
922 * flag. The connected flag will be cleared in 575 * flag. The connected flag will be cleared in
923 * disconnect event notification. 576 * disconnect event notification.
924 */ 577 */
925 clear_bit(CONNECT_PEND, &ar->flag); 578 clear_bit(CONNECT_PEND, &vif->flags);
926 }
927}
928
929void ath6kl_deep_sleep_enable(struct ath6kl *ar)
930{
931 switch (ar->sme_state) {
932 case SME_CONNECTING:
933 cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0,
934 NULL, 0,
935 WLAN_STATUS_UNSPECIFIED_FAILURE,
936 GFP_KERNEL);
937 break;
938 case SME_CONNECTED:
939 default:
940 /*
941 * FIXME: oddly enough smeState is in DISCONNECTED during
942 * suspend, why? Need to send disconnected event in that
943 * state.
944 */
945 cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL);
946 break;
947 } 579 }
948
949 if (test_bit(CONNECTED, &ar->flag) ||
950 test_bit(CONNECT_PEND, &ar->flag))
951 ath6kl_wmi_disconnect_cmd(ar->wmi);
952
953 ar->sme_state = SME_DISCONNECTED;
954
955 /* disable scanning */
956 if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0,
957 0, 0) != 0)
958 printk(KERN_WARNING "ath6kl: failed to disable scan "
959 "during suspend\n");
960
961 ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
962} 580}
963 581
964/* WMI Event handlers */ 582/* WMI Event handlers */
@@ -980,17 +598,16 @@ static const char *get_hw_id_string(u32 id)
980void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver) 598void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
981{ 599{
982 struct ath6kl *ar = devt; 600 struct ath6kl *ar = devt;
983 struct net_device *dev = ar->net_dev;
984 601
985 memcpy(dev->dev_addr, datap, ETH_ALEN); 602 memcpy(ar->mac_addr, datap, ETH_ALEN);
986 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n", 603 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
987 __func__, dev->dev_addr); 604 __func__, ar->mac_addr);
988 605
989 ar->version.wlan_ver = sw_ver; 606 ar->version.wlan_ver = sw_ver;
990 ar->version.abi_ver = abi_ver; 607 ar->version.abi_ver = abi_ver;
991 608
992 snprintf(ar->wdev->wiphy->fw_version, 609 snprintf(ar->wiphy->fw_version,
993 sizeof(ar->wdev->wiphy->fw_version), 610 sizeof(ar->wiphy->fw_version),
994 "%u.%u.%u.%u", 611 "%u.%u.%u.%u",
995 (ar->version.wlan_ver & 0xf0000000) >> 28, 612 (ar->version.wlan_ver & 0xf0000000) >> 28,
996 (ar->version.wlan_ver & 0x0f000000) >> 24, 613 (ar->version.wlan_ver & 0x0f000000) >> 24,
@@ -1001,78 +618,91 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
1001 set_bit(WMI_READY, &ar->flag); 618 set_bit(WMI_READY, &ar->flag);
1002 wake_up(&ar->event_wq); 619 wake_up(&ar->event_wq);
1003 620
1004 ath6kl_info("hw %s fw %s%s\n", 621 if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
1005 get_hw_id_string(ar->wdev->wiphy->hw_version), 622 ath6kl_info("hw %s fw %s%s\n",
1006 ar->wdev->wiphy->fw_version, 623 get_hw_id_string(ar->wiphy->hw_version),
1007 test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); 624 ar->wiphy->fw_version,
625 test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
626 }
1008} 627}
1009 628
1010void ath6kl_scan_complete_evt(struct ath6kl *ar, int status) 629void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
1011{ 630{
1012 ath6kl_cfg80211_scan_complete_event(ar, status); 631 struct ath6kl *ar = vif->ar;
632 bool aborted = false;
633
634 if (status != WMI_SCAN_STATUS_SUCCESS)
635 aborted = true;
636
637 ath6kl_cfg80211_scan_complete_event(vif, aborted);
1013 638
1014 if (!ar->usr_bss_filter) { 639 if (!ar->usr_bss_filter) {
1015 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); 640 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
1016 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); 641 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
642 NONE_BSS_FILTER, 0);
1017 } 643 }
1018 644
1019 ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status); 645 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
1020} 646}
1021 647
1022void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid, 648void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
1023 u16 listen_int, u16 beacon_int, 649 u16 listen_int, u16 beacon_int,
1024 enum network_type net_type, u8 beacon_ie_len, 650 enum network_type net_type, u8 beacon_ie_len,
1025 u8 assoc_req_len, u8 assoc_resp_len, 651 u8 assoc_req_len, u8 assoc_resp_len,
1026 u8 *assoc_info) 652 u8 *assoc_info)
1027{ 653{
1028 unsigned long flags; 654 struct ath6kl *ar = vif->ar;
1029 655
1030 ath6kl_cfg80211_connect_event(ar, channel, bssid, 656 ath6kl_cfg80211_connect_event(vif, channel, bssid,
1031 listen_int, beacon_int, 657 listen_int, beacon_int,
1032 net_type, beacon_ie_len, 658 net_type, beacon_ie_len,
1033 assoc_req_len, assoc_resp_len, 659 assoc_req_len, assoc_resp_len,
1034 assoc_info); 660 assoc_info);
1035 661
1036 memcpy(ar->bssid, bssid, sizeof(ar->bssid)); 662 memcpy(vif->bssid, bssid, sizeof(vif->bssid));
1037 ar->bss_ch = channel; 663 vif->bss_ch = channel;
1038 664
1039 if ((ar->nw_type == INFRA_NETWORK)) 665 if ((vif->nw_type == INFRA_NETWORK))
1040 ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t, 666 ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
667 ar->listen_intvl_t,
1041 ar->listen_intvl_b); 668 ar->listen_intvl_b);
1042 669
1043 netif_wake_queue(ar->net_dev); 670 netif_wake_queue(vif->ndev);
1044 671
1045 /* Update connect & link status atomically */ 672 /* Update connect & link status atomically */
1046 spin_lock_irqsave(&ar->lock, flags); 673 spin_lock_bh(&vif->if_lock);
1047 set_bit(CONNECTED, &ar->flag); 674 set_bit(CONNECTED, &vif->flags);
1048 clear_bit(CONNECT_PEND, &ar->flag); 675 clear_bit(CONNECT_PEND, &vif->flags);
1049 netif_carrier_on(ar->net_dev); 676 netif_carrier_on(vif->ndev);
1050 spin_unlock_irqrestore(&ar->lock, flags); 677 spin_unlock_bh(&vif->if_lock);
1051 678
1052 aggr_reset_state(ar->aggr_cntxt); 679 aggr_reset_state(vif->aggr_cntxt);
1053 ar->reconnect_flag = 0; 680 vif->reconnect_flag = 0;
1054 681
1055 if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { 682 if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
1056 memset(ar->node_map, 0, sizeof(ar->node_map)); 683 memset(ar->node_map, 0, sizeof(ar->node_map));
1057 ar->node_num = 0; 684 ar->node_num = 0;
1058 ar->next_ep_id = ENDPOINT_2; 685 ar->next_ep_id = ENDPOINT_2;
1059 } 686 }
1060 687
1061 if (!ar->usr_bss_filter) { 688 if (!ar->usr_bss_filter) {
1062 set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); 689 set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
1063 ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0); 690 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
691 CURRENT_BSS_FILTER, 0);
1064 } 692 }
1065} 693}
1066 694
1067void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast) 695void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
1068{ 696{
1069 struct ath6kl_sta *sta; 697 struct ath6kl_sta *sta;
698 struct ath6kl *ar = vif->ar;
1070 u8 tsc[6]; 699 u8 tsc[6];
700
1071 /* 701 /*
1072 * For AP case, keyid will have aid of STA which sent pkt with 702 * For AP case, keyid will have aid of STA which sent pkt with
1073 * MIC error. Use this aid to get MAC & send it to hostapd. 703 * MIC error. Use this aid to get MAC & send it to hostapd.
1074 */ 704 */
1075 if (ar->nw_type == AP_NETWORK) { 705 if (vif->nw_type == AP_NETWORK) {
1076 sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2)); 706 sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
1077 if (!sta) 707 if (!sta)
1078 return; 708 return;
@@ -1081,19 +711,20 @@ void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
1081 "ap tkip mic error received from aid=%d\n", keyid); 711 "ap tkip mic error received from aid=%d\n", keyid);
1082 712
1083 memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */ 713 memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
1084 cfg80211_michael_mic_failure(ar->net_dev, sta->mac, 714 cfg80211_michael_mic_failure(vif->ndev, sta->mac,
1085 NL80211_KEYTYPE_PAIRWISE, keyid, 715 NL80211_KEYTYPE_PAIRWISE, keyid,
1086 tsc, GFP_KERNEL); 716 tsc, GFP_KERNEL);
1087 } else 717 } else
1088 ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast); 718 ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
1089 719
1090} 720}
1091 721
1092static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len) 722static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
1093{ 723{
1094 struct wmi_target_stats *tgt_stats = 724 struct wmi_target_stats *tgt_stats =
1095 (struct wmi_target_stats *) ptr; 725 (struct wmi_target_stats *) ptr;
1096 struct target_stats *stats = &ar->target_stats; 726 struct ath6kl *ar = vif->ar;
727 struct target_stats *stats = &vif->target_stats;
1097 struct tkip_ccmp_stats *ccmp_stats; 728 struct tkip_ccmp_stats *ccmp_stats;
1098 u8 ac; 729 u8 ac;
1099 730
@@ -1189,8 +820,8 @@ static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
1189 stats->wow_evt_discarded += 820 stats->wow_evt_discarded +=
1190 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); 821 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
1191 822
1192 if (test_bit(STATS_UPDATE_PEND, &ar->flag)) { 823 if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
1193 clear_bit(STATS_UPDATE_PEND, &ar->flag); 824 clear_bit(STATS_UPDATE_PEND, &vif->flags);
1194 wake_up(&ar->event_wq); 825 wake_up(&ar->event_wq);
1195 } 826 }
1196} 827}
@@ -1200,14 +831,15 @@ static void ath6kl_add_le32(__le32 *var, __le32 val)
1200 *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val)); 831 *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
1201} 832}
1202 833
1203void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len) 834void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
1204{ 835{
1205 struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr; 836 struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
837 struct ath6kl *ar = vif->ar;
1206 struct wmi_ap_mode_stat *ap = &ar->ap_stats; 838 struct wmi_ap_mode_stat *ap = &ar->ap_stats;
1207 struct wmi_per_sta_stat *st_ap, *st_p; 839 struct wmi_per_sta_stat *st_ap, *st_p;
1208 u8 ac; 840 u8 ac;
1209 841
1210 if (ar->nw_type == AP_NETWORK) { 842 if (vif->nw_type == AP_NETWORK) {
1211 if (len < sizeof(*p)) 843 if (len < sizeof(*p))
1212 return; 844 return;
1213 845
@@ -1226,7 +858,7 @@ void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
1226 } 858 }
1227 859
1228 } else { 860 } else {
1229 ath6kl_update_target_stats(ar, ptr, len); 861 ath6kl_update_target_stats(vif, ptr, len);
1230 } 862 }
1231} 863}
1232 864
@@ -1245,11 +877,12 @@ void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
1245 wake_up(&ar->event_wq); 877 wake_up(&ar->event_wq);
1246} 878}
1247 879
1248void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid) 880void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
1249{ 881{
1250 struct ath6kl_sta *conn; 882 struct ath6kl_sta *conn;
1251 struct sk_buff *skb; 883 struct sk_buff *skb;
1252 bool psq_empty = false; 884 bool psq_empty = false;
885 struct ath6kl *ar = vif->ar;
1253 886
1254 conn = ath6kl_find_sta_by_aid(ar, aid); 887 conn = ath6kl_find_sta_by_aid(ar, aid);
1255 888
@@ -1272,7 +905,7 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
1272 spin_unlock_bh(&conn->psq_lock); 905 spin_unlock_bh(&conn->psq_lock);
1273 906
1274 conn->sta_flags |= STA_PS_POLLED; 907 conn->sta_flags |= STA_PS_POLLED;
1275 ath6kl_data_tx(skb, ar->net_dev); 908 ath6kl_data_tx(skb, vif->ndev);
1276 conn->sta_flags &= ~STA_PS_POLLED; 909 conn->sta_flags &= ~STA_PS_POLLED;
1277 910
1278 spin_lock_bh(&conn->psq_lock); 911 spin_lock_bh(&conn->psq_lock);
@@ -1280,13 +913,14 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
1280 spin_unlock_bh(&conn->psq_lock); 913 spin_unlock_bh(&conn->psq_lock);
1281 914
1282 if (psq_empty) 915 if (psq_empty)
1283 ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); 916 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
1284} 917}
1285 918
1286void ath6kl_dtimexpiry_event(struct ath6kl *ar) 919void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
1287{ 920{
1288 bool mcastq_empty = false; 921 bool mcastq_empty = false;
1289 struct sk_buff *skb; 922 struct sk_buff *skb;
923 struct ath6kl *ar = vif->ar;
1290 924
1291 /* 925 /*
1292 * If there are no associated STAs, ignore the DTIM expiry event. 926 * If there are no associated STAs, ignore the DTIM expiry event.
@@ -1308,31 +942,31 @@ void ath6kl_dtimexpiry_event(struct ath6kl *ar)
1308 return; 942 return;
1309 943
1310 /* set the STA flag to dtim_expired for the frame to go out */ 944 /* set the STA flag to dtim_expired for the frame to go out */
1311 set_bit(DTIM_EXPIRED, &ar->flag); 945 set_bit(DTIM_EXPIRED, &vif->flags);
1312 946
1313 spin_lock_bh(&ar->mcastpsq_lock); 947 spin_lock_bh(&ar->mcastpsq_lock);
1314 while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) { 948 while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
1315 spin_unlock_bh(&ar->mcastpsq_lock); 949 spin_unlock_bh(&ar->mcastpsq_lock);
1316 950
1317 ath6kl_data_tx(skb, ar->net_dev); 951 ath6kl_data_tx(skb, vif->ndev);
1318 952
1319 spin_lock_bh(&ar->mcastpsq_lock); 953 spin_lock_bh(&ar->mcastpsq_lock);
1320 } 954 }
1321 spin_unlock_bh(&ar->mcastpsq_lock); 955 spin_unlock_bh(&ar->mcastpsq_lock);
1322 956
1323 clear_bit(DTIM_EXPIRED, &ar->flag); 957 clear_bit(DTIM_EXPIRED, &vif->flags);
1324 958
1325 /* clear the LSB of the BitMapCtl field of the TIM IE */ 959 /* clear the LSB of the BitMapCtl field of the TIM IE */
1326 ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0); 960 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
1327} 961}
1328 962
1329void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid, 963void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
1330 u8 assoc_resp_len, u8 *assoc_info, 964 u8 assoc_resp_len, u8 *assoc_info,
1331 u16 prot_reason_status) 965 u16 prot_reason_status)
1332{ 966{
1333 unsigned long flags; 967 struct ath6kl *ar = vif->ar;
1334 968
1335 if (ar->nw_type == AP_NETWORK) { 969 if (vif->nw_type == AP_NETWORK) {
1336 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) 970 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
1337 return; 971 return;
1338 972
@@ -1344,31 +978,31 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
1344 978
1345 /* clear the LSB of the TIM IE's BitMapCtl field */ 979 /* clear the LSB of the TIM IE's BitMapCtl field */
1346 if (test_bit(WMI_READY, &ar->flag)) 980 if (test_bit(WMI_READY, &ar->flag))
1347 ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0); 981 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
982 MCAST_AID, 0);
1348 } 983 }
1349 984
1350 if (!is_broadcast_ether_addr(bssid)) { 985 if (!is_broadcast_ether_addr(bssid)) {
1351 /* send event to application */ 986 /* send event to application */
1352 cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL); 987 cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
1353 } 988 }
1354 989
1355 if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) { 990 if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
1356 memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list)); 991 memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
1357 clear_bit(CONNECTED, &ar->flag); 992 clear_bit(CONNECTED, &vif->flags);
1358 } 993 }
1359 return; 994 return;
1360 } 995 }
1361 996
1362 ath6kl_cfg80211_disconnect_event(ar, reason, bssid, 997 ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
1363 assoc_resp_len, assoc_info, 998 assoc_resp_len, assoc_info,
1364 prot_reason_status); 999 prot_reason_status);
1365 1000
1366 aggr_reset_state(ar->aggr_cntxt); 1001 aggr_reset_state(vif->aggr_cntxt);
1367 1002
1368 del_timer(&ar->disconnect_timer); 1003 del_timer(&vif->disconnect_timer);
1369 1004
1370 ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT, 1005 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
1371 "disconnect reason is %d\n", reason);
1372 1006
1373 /* 1007 /*
1374 * If the event is due to disconnect cmd from the host, only they 1008 * If the event is due to disconnect cmd from the host, only they
@@ -1377,83 +1011,98 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
1377 */ 1011 */
1378 if (reason == DISCONNECT_CMD) { 1012 if (reason == DISCONNECT_CMD) {
1379 if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag)) 1013 if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
1380 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); 1014 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
1015 NONE_BSS_FILTER, 0);
1381 } else { 1016 } else {
1382 set_bit(CONNECT_PEND, &ar->flag); 1017 set_bit(CONNECT_PEND, &vif->flags);
1383 if (((reason == ASSOC_FAILED) && 1018 if (((reason == ASSOC_FAILED) &&
1384 (prot_reason_status == 0x11)) || 1019 (prot_reason_status == 0x11)) ||
1385 ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) 1020 ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
1386 && (ar->reconnect_flag == 1))) { 1021 && (vif->reconnect_flag == 1))) {
1387 set_bit(CONNECTED, &ar->flag); 1022 set_bit(CONNECTED, &vif->flags);
1388 return; 1023 return;
1389 } 1024 }
1390 } 1025 }
1391 1026
1392 /* update connect & link status atomically */ 1027 /* update connect & link status atomically */
1393 spin_lock_irqsave(&ar->lock, flags); 1028 spin_lock_bh(&vif->if_lock);
1394 clear_bit(CONNECTED, &ar->flag); 1029 clear_bit(CONNECTED, &vif->flags);
1395 netif_carrier_off(ar->net_dev); 1030 netif_carrier_off(vif->ndev);
1396 spin_unlock_irqrestore(&ar->lock, flags); 1031 spin_unlock_bh(&vif->if_lock);
1397 1032
1398 if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1)) 1033 if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
1399 ar->reconnect_flag = 0; 1034 vif->reconnect_flag = 0;
1400 1035
1401 if (reason != CSERV_DISCONNECT) 1036 if (reason != CSERV_DISCONNECT)
1402 ar->user_key_ctrl = 0; 1037 ar->user_key_ctrl = 0;
1403 1038
1404 netif_stop_queue(ar->net_dev); 1039 netif_stop_queue(vif->ndev);
1405 memset(ar->bssid, 0, sizeof(ar->bssid)); 1040 memset(vif->bssid, 0, sizeof(vif->bssid));
1406 ar->bss_ch = 0; 1041 vif->bss_ch = 0;
1407 1042
1408 ath6kl_tx_data_cleanup(ar); 1043 ath6kl_tx_data_cleanup(ar);
1409} 1044}
1410 1045
1411static int ath6kl_open(struct net_device *dev) 1046struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
1412{ 1047{
1413 struct ath6kl *ar = ath6kl_priv(dev); 1048 struct ath6kl_vif *vif;
1414 unsigned long flags; 1049
1050 spin_lock_bh(&ar->list_lock);
1051 if (list_empty(&ar->vif_list)) {
1052 spin_unlock_bh(&ar->list_lock);
1053 return NULL;
1054 }
1415 1055
1416 spin_lock_irqsave(&ar->lock, flags); 1056 vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
1417 1057
1418 set_bit(WLAN_ENABLED, &ar->flag); 1058 spin_unlock_bh(&ar->list_lock);
1419 1059
1420 if (test_bit(CONNECTED, &ar->flag)) { 1060 return vif;
1061}
1062
1063static int ath6kl_open(struct net_device *dev)
1064{
1065 struct ath6kl_vif *vif = netdev_priv(dev);
1066
1067 set_bit(WLAN_ENABLED, &vif->flags);
1068
1069 if (test_bit(CONNECTED, &vif->flags)) {
1421 netif_carrier_on(dev); 1070 netif_carrier_on(dev);
1422 netif_wake_queue(dev); 1071 netif_wake_queue(dev);
1423 } else 1072 } else
1424 netif_carrier_off(dev); 1073 netif_carrier_off(dev);
1425 1074
1426 spin_unlock_irqrestore(&ar->lock, flags);
1427
1428 return 0; 1075 return 0;
1429} 1076}
1430 1077
1431static int ath6kl_close(struct net_device *dev) 1078static int ath6kl_close(struct net_device *dev)
1432{ 1079{
1433 struct ath6kl *ar = ath6kl_priv(dev); 1080 struct ath6kl *ar = ath6kl_priv(dev);
1081 struct ath6kl_vif *vif = netdev_priv(dev);
1434 1082
1435 netif_stop_queue(dev); 1083 netif_stop_queue(dev);
1436 1084
1437 ath6kl_disconnect(ar); 1085 ath6kl_disconnect(vif);
1438 1086
1439 if (test_bit(WMI_READY, &ar->flag)) { 1087 if (test_bit(WMI_READY, &ar->flag)) {
1440 if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 1088 if (ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0xFFFF,
1441 0, 0, 0)) 1089 0, 0, 0, 0, 0, 0, 0, 0, 0))
1442 return -EIO; 1090 return -EIO;
1443 1091
1444 clear_bit(WLAN_ENABLED, &ar->flag);
1445 } 1092 }
1446 1093
1447 ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED); 1094 ath6kl_cfg80211_scan_complete_event(vif, true);
1095
1096 clear_bit(WLAN_ENABLED, &vif->flags);
1448 1097
1449 return 0; 1098 return 0;
1450} 1099}
1451 1100
1452static struct net_device_stats *ath6kl_get_stats(struct net_device *dev) 1101static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
1453{ 1102{
1454 struct ath6kl *ar = ath6kl_priv(dev); 1103 struct ath6kl_vif *vif = netdev_priv(dev);
1455 1104
1456 return &ar->net_stats; 1105 return &vif->net_stats;
1457} 1106}
1458 1107
1459static struct net_device_ops ath6kl_netdev_ops = { 1108static struct net_device_ops ath6kl_netdev_ops = {
@@ -1466,6 +1115,7 @@ static struct net_device_ops ath6kl_netdev_ops = {
1466void init_netdev(struct net_device *dev) 1115void init_netdev(struct net_device *dev)
1467{ 1116{
1468 dev->netdev_ops = &ath6kl_netdev_ops; 1117 dev->netdev_ops = &ath6kl_netdev_ops;
1118 dev->destructor = free_netdev;
1469 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; 1119 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1470 1120
1471 dev->needed_headroom = ETH_HLEN; 1121 dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 066d4f88807f..e69ca5ee5bb1 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -22,7 +22,7 @@
22#include <linux/mmc/sdio_ids.h> 22#include <linux/mmc/sdio_ids.h>
23#include <linux/mmc/sdio.h> 23#include <linux/mmc/sdio.h>
24#include <linux/mmc/sd.h> 24#include <linux/mmc/sd.h>
25#include "htc_hif.h" 25#include "hif.h"
26#include "hif-ops.h" 26#include "hif-ops.h"
27#include "target.h" 27#include "target.h"
28#include "debug.h" 28#include "debug.h"
@@ -46,6 +46,8 @@ struct ath6kl_sdio {
46 struct list_head scat_req; 46 struct list_head scat_req;
47 47
48 spinlock_t scat_lock; 48 spinlock_t scat_lock;
49 bool scatter_enabled;
50
49 bool is_disabled; 51 bool is_disabled;
50 atomic_t irq_handling; 52 atomic_t irq_handling;
51 const struct sdio_device_id *id; 53 const struct sdio_device_id *id;
@@ -135,6 +137,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
135{ 137{
136 int ret = 0; 138 int ret = 0;
137 139
140 sdio_claim_host(func);
141
138 if (request & HIF_WRITE) { 142 if (request & HIF_WRITE) {
139 /* FIXME: looks like ugly workaround for something */ 143 /* FIXME: looks like ugly workaround for something */
140 if (addr >= HIF_MBOX_BASE_ADDR && 144 if (addr >= HIF_MBOX_BASE_ADDR &&
@@ -156,6 +160,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
156 ret = sdio_memcpy_fromio(func, buf, addr, len); 160 ret = sdio_memcpy_fromio(func, buf, addr, len);
157 } 161 }
158 162
163 sdio_release_host(func);
164
159 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", 165 ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
160 request & HIF_WRITE ? "wr" : "rd", addr, 166 request & HIF_WRITE ? "wr" : "rd", addr,
161 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); 167 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
@@ -167,12 +173,11 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
167static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) 173static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
168{ 174{
169 struct bus_request *bus_req; 175 struct bus_request *bus_req;
170 unsigned long flag;
171 176
172 spin_lock_irqsave(&ar_sdio->lock, flag); 177 spin_lock_bh(&ar_sdio->lock);
173 178
174 if (list_empty(&ar_sdio->bus_req_freeq)) { 179 if (list_empty(&ar_sdio->bus_req_freeq)) {
175 spin_unlock_irqrestore(&ar_sdio->lock, flag); 180 spin_unlock_bh(&ar_sdio->lock);
176 return NULL; 181 return NULL;
177 } 182 }
178 183
@@ -180,7 +185,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
180 struct bus_request, list); 185 struct bus_request, list);
181 list_del(&bus_req->list); 186 list_del(&bus_req->list);
182 187
183 spin_unlock_irqrestore(&ar_sdio->lock, flag); 188 spin_unlock_bh(&ar_sdio->lock);
184 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 189 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
185 __func__, bus_req); 190 __func__, bus_req);
186 191
@@ -190,14 +195,12 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
190static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, 195static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
191 struct bus_request *bus_req) 196 struct bus_request *bus_req)
192{ 197{
193 unsigned long flag;
194
195 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", 198 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
196 __func__, bus_req); 199 __func__, bus_req);
197 200
198 spin_lock_irqsave(&ar_sdio->lock, flag); 201 spin_lock_bh(&ar_sdio->lock);
199 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); 202 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
200 spin_unlock_irqrestore(&ar_sdio->lock, flag); 203 spin_unlock_bh(&ar_sdio->lock);
201} 204}
202 205
203static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, 206static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
@@ -291,10 +294,14 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
291 mmc_req.cmd = &cmd; 294 mmc_req.cmd = &cmd;
292 mmc_req.data = &data; 295 mmc_req.data = &data;
293 296
297 sdio_claim_host(ar_sdio->func);
298
294 mmc_set_data_timeout(&data, ar_sdio->func->card); 299 mmc_set_data_timeout(&data, ar_sdio->func->card);
295 /* synchronous call to process request */ 300 /* synchronous call to process request */
296 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); 301 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
297 302
303 sdio_release_host(ar_sdio->func);
304
298 status = cmd.error ? cmd.error : data.error; 305 status = cmd.error ? cmd.error : data.error;
299 306
300scat_complete: 307scat_complete:
@@ -395,11 +402,9 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
395 } else 402 } else
396 tbuf = buf; 403 tbuf = buf;
397 404
398 sdio_claim_host(ar_sdio->func);
399 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); 405 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
400 if ((request & HIF_READ) && bounced) 406 if ((request & HIF_READ) && bounced)
401 memcpy(buf, tbuf, len); 407 memcpy(buf, tbuf, len);
402 sdio_release_host(ar_sdio->func);
403 408
404 return ret; 409 return ret;
405} 410}
@@ -418,29 +423,25 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
418 req->request); 423 req->request);
419 context = req->packet; 424 context = req->packet;
420 ath6kl_sdio_free_bus_req(ar_sdio, req); 425 ath6kl_sdio_free_bus_req(ar_sdio, req);
421 ath6kldev_rw_comp_handler(context, status); 426 ath6kl_hif_rw_comp_handler(context, status);
422 } 427 }
423} 428}
424 429
425static void ath6kl_sdio_write_async_work(struct work_struct *work) 430static void ath6kl_sdio_write_async_work(struct work_struct *work)
426{ 431{
427 struct ath6kl_sdio *ar_sdio; 432 struct ath6kl_sdio *ar_sdio;
428 unsigned long flags;
429 struct bus_request *req, *tmp_req; 433 struct bus_request *req, *tmp_req;
430 434
431 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); 435 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
432 sdio_claim_host(ar_sdio->func);
433 436
434 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); 437 spin_lock_bh(&ar_sdio->wr_async_lock);
435 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { 438 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
436 list_del(&req->list); 439 list_del(&req->list);
437 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); 440 spin_unlock_bh(&ar_sdio->wr_async_lock);
438 __ath6kl_sdio_write_async(ar_sdio, req); 441 __ath6kl_sdio_write_async(ar_sdio, req);
439 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); 442 spin_lock_bh(&ar_sdio->wr_async_lock);
440 } 443 }
441 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); 444 spin_unlock_bh(&ar_sdio->wr_async_lock);
442
443 sdio_release_host(ar_sdio->func);
444} 445}
445 446
446static void ath6kl_sdio_irq_handler(struct sdio_func *func) 447static void ath6kl_sdio_irq_handler(struct sdio_func *func)
@@ -459,20 +460,23 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
459 */ 460 */
460 sdio_release_host(ar_sdio->func); 461 sdio_release_host(ar_sdio->func);
461 462
462 status = ath6kldev_intr_bh_handler(ar_sdio->ar); 463 status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
463 sdio_claim_host(ar_sdio->func); 464 sdio_claim_host(ar_sdio->func);
464 atomic_set(&ar_sdio->irq_handling, 0); 465 atomic_set(&ar_sdio->irq_handling, 0);
465 WARN_ON(status && status != -ECANCELED); 466 WARN_ON(status && status != -ECANCELED);
466} 467}
467 468
468static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio) 469static int ath6kl_sdio_power_on(struct ath6kl *ar)
469{ 470{
471 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
470 struct sdio_func *func = ar_sdio->func; 472 struct sdio_func *func = ar_sdio->func;
471 int ret = 0; 473 int ret = 0;
472 474
473 if (!ar_sdio->is_disabled) 475 if (!ar_sdio->is_disabled)
474 return 0; 476 return 0;
475 477
478 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
479
476 sdio_claim_host(func); 480 sdio_claim_host(func);
477 481
478 ret = sdio_enable_func(func); 482 ret = sdio_enable_func(func);
@@ -495,13 +499,16 @@ static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
495 return ret; 499 return ret;
496} 500}
497 501
498static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio) 502static int ath6kl_sdio_power_off(struct ath6kl *ar)
499{ 503{
504 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
500 int ret; 505 int ret;
501 506
502 if (ar_sdio->is_disabled) 507 if (ar_sdio->is_disabled)
503 return 0; 508 return 0;
504 509
510 ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
511
505 /* Disable the card */ 512 /* Disable the card */
506 sdio_claim_host(ar_sdio->func); 513 sdio_claim_host(ar_sdio->func);
507 ret = sdio_disable_func(ar_sdio->func); 514 ret = sdio_disable_func(ar_sdio->func);
@@ -521,7 +528,6 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
521{ 528{
522 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 529 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
523 struct bus_request *bus_req; 530 struct bus_request *bus_req;
524 unsigned long flags;
525 531
526 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); 532 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
527 533
@@ -534,9 +540,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
534 bus_req->request = request; 540 bus_req->request = request;
535 bus_req->packet = packet; 541 bus_req->packet = packet;
536 542
537 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); 543 spin_lock_bh(&ar_sdio->wr_async_lock);
538 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); 544 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
539 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); 545 spin_unlock_bh(&ar_sdio->wr_async_lock);
540 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 546 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
541 547
542 return 0; 548 return 0;
@@ -582,9 +588,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
582{ 588{
583 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 589 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
584 struct hif_scatter_req *node = NULL; 590 struct hif_scatter_req *node = NULL;
585 unsigned long flag;
586 591
587 spin_lock_irqsave(&ar_sdio->scat_lock, flag); 592 spin_lock_bh(&ar_sdio->scat_lock);
588 593
589 if (!list_empty(&ar_sdio->scat_req)) { 594 if (!list_empty(&ar_sdio->scat_req)) {
590 node = list_first_entry(&ar_sdio->scat_req, 595 node = list_first_entry(&ar_sdio->scat_req,
@@ -592,7 +597,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
592 list_del(&node->list); 597 list_del(&node->list);
593 } 598 }
594 599
595 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); 600 spin_unlock_bh(&ar_sdio->scat_lock);
596 601
597 return node; 602 return node;
598} 603}
@@ -601,13 +606,12 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
601 struct hif_scatter_req *s_req) 606 struct hif_scatter_req *s_req)
602{ 607{
603 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 608 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
604 unsigned long flag;
605 609
606 spin_lock_irqsave(&ar_sdio->scat_lock, flag); 610 spin_lock_bh(&ar_sdio->scat_lock);
607 611
608 list_add_tail(&s_req->list, &ar_sdio->scat_req); 612 list_add_tail(&s_req->list, &ar_sdio->scat_req);
609 613
610 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); 614 spin_unlock_bh(&ar_sdio->scat_lock);
611 615
612} 616}
613 617
@@ -618,7 +622,6 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
618 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 622 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
619 u32 request = scat_req->req; 623 u32 request = scat_req->req;
620 int status = 0; 624 int status = 0;
621 unsigned long flags;
622 625
623 if (!scat_req->len) 626 if (!scat_req->len)
624 return -EINVAL; 627 return -EINVAL;
@@ -627,14 +630,12 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
627 "hif-scatter: total len: %d scatter entries: %d\n", 630 "hif-scatter: total len: %d scatter entries: %d\n",
628 scat_req->len, scat_req->scat_entries); 631 scat_req->len, scat_req->scat_entries);
629 632
630 if (request & HIF_SYNCHRONOUS) { 633 if (request & HIF_SYNCHRONOUS)
631 sdio_claim_host(ar_sdio->func);
632 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); 634 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
633 sdio_release_host(ar_sdio->func); 635 else {
634 } else { 636 spin_lock_bh(&ar_sdio->wr_async_lock);
635 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
636 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); 637 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
637 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); 638 spin_unlock_bh(&ar_sdio->wr_async_lock);
638 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); 639 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
639 } 640 }
640 641
@@ -646,23 +647,27 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
646{ 647{
647 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 648 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
648 struct hif_scatter_req *s_req, *tmp_req; 649 struct hif_scatter_req *s_req, *tmp_req;
649 unsigned long flag;
650 650
651 /* empty the free list */ 651 /* empty the free list */
652 spin_lock_irqsave(&ar_sdio->scat_lock, flag); 652 spin_lock_bh(&ar_sdio->scat_lock);
653 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { 653 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
654 list_del(&s_req->list); 654 list_del(&s_req->list);
655 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); 655 spin_unlock_bh(&ar_sdio->scat_lock);
656 656
657 /*
658 * FIXME: should we also call completion handler with
659 * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
660 * that the packet is properly freed?
661 */
657 if (s_req->busrequest) 662 if (s_req->busrequest)
658 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); 663 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
659 kfree(s_req->virt_dma_buf); 664 kfree(s_req->virt_dma_buf);
660 kfree(s_req->sgentries); 665 kfree(s_req->sgentries);
661 kfree(s_req); 666 kfree(s_req);
662 667
663 spin_lock_irqsave(&ar_sdio->scat_lock, flag); 668 spin_lock_bh(&ar_sdio->scat_lock);
664 } 669 }
665 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); 670 spin_unlock_bh(&ar_sdio->scat_lock);
666} 671}
667 672
668/* setup of HIF scatter resources */ 673/* setup of HIF scatter resources */
@@ -673,6 +678,11 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
673 int ret; 678 int ret;
674 bool virt_scat = false; 679 bool virt_scat = false;
675 680
681 if (ar_sdio->scatter_enabled)
682 return 0;
683
684 ar_sdio->scatter_enabled = true;
685
676 /* check if host supports scatter and it meets our requirements */ 686 /* check if host supports scatter and it meets our requirements */
677 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { 687 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
678 ath6kl_err("host only supports scatter of :%d entries, need: %d\n", 688 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
@@ -687,8 +697,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
687 MAX_SCATTER_REQUESTS, virt_scat); 697 MAX_SCATTER_REQUESTS, virt_scat);
688 698
689 if (!ret) { 699 if (!ret) {
690 ath6kl_dbg(ATH6KL_DBG_SCATTER, 700 ath6kl_dbg(ATH6KL_DBG_BOOT,
691 "hif-scatter enabled: max scatter req : %d entries: %d\n", 701 "hif-scatter enabled requests %d entries %d\n",
692 MAX_SCATTER_REQUESTS, 702 MAX_SCATTER_REQUESTS,
693 MAX_SCATTER_ENTRIES_PER_REQ); 703 MAX_SCATTER_ENTRIES_PER_REQ);
694 704
@@ -712,8 +722,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
712 return ret; 722 return ret;
713 } 723 }
714 724
715 ath6kl_dbg(ATH6KL_DBG_SCATTER, 725 ath6kl_dbg(ATH6KL_DBG_BOOT,
716 "Vitual scatter enabled, max_scat_req:%d, entries:%d\n", 726 "virtual scatter enabled requests %d entries %d\n",
717 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); 727 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
718 728
719 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; 729 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
@@ -724,7 +734,47 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
724 return 0; 734 return 0;
725} 735}
726 736
727static int ath6kl_sdio_suspend(struct ath6kl *ar) 737static int ath6kl_sdio_config(struct ath6kl *ar)
738{
739 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
740 struct sdio_func *func = ar_sdio->func;
741 int ret;
742
743 sdio_claim_host(func);
744
745 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
746 MANUFACTURER_ID_AR6003_BASE) {
747 /* enable 4-bit ASYNC interrupt on AR6003 or later */
748 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
749 CCCR_SDIO_IRQ_MODE_REG,
750 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
751 if (ret) {
752 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
753 ret);
754 goto out;
755 }
756
757 ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
758 }
759
760 /* give us some time to enable, in ms */
761 func->enable_timeout = 100;
762
763 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
764 if (ret) {
765 ath6kl_err("Set sdio block size %d failed: %d)\n",
766 HIF_MBOX_BLOCK_SIZE, ret);
767 sdio_release_host(func);
768 goto out;
769 }
770
771out:
772 sdio_release_host(func);
773
774 return ret;
775}
776
777static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
728{ 778{
729 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 779 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
730 struct sdio_func *func = ar_sdio->func; 780 struct sdio_func *func = ar_sdio->func;
@@ -733,12 +783,14 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
733 783
734 flags = sdio_get_host_pm_caps(func); 784 flags = sdio_get_host_pm_caps(func);
735 785
736 if (!(flags & MMC_PM_KEEP_POWER)) 786 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
737 /* as host doesn't support keep power we need to bail out */ 787
738 ath6kl_dbg(ATH6KL_DBG_SDIO, 788 if (!(flags & MMC_PM_KEEP_POWER) ||
739 "func %d doesn't support MMC_PM_KEEP_POWER\n", 789 (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
740 func->num); 790 /* as host doesn't support keep power we need to cut power */
741 return -EINVAL; 791 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
792 NULL);
793 }
742 794
743 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 795 ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
744 if (ret) { 796 if (ret) {
@@ -747,11 +799,85 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
747 return ret; 799 return ret;
748 } 800 }
749 801
750 ath6kl_deep_sleep_enable(ar); 802 if ((flags & MMC_PM_WAKE_SDIO_IRQ) && wow) {
803 /*
804 * The host sdio controller is capable of keep power and
805 * sdio irq wake up at this point. It's fine to continue
806 * wow suspend operation.
807 */
808 ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
809 if (ret)
810 return ret;
811
812 ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
813 if (ret)
814 ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
815
816 return ret;
817 }
818
819 return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
820}
821
822static int ath6kl_sdio_resume(struct ath6kl *ar)
823{
824 switch (ar->state) {
825 case ATH6KL_STATE_OFF:
826 case ATH6KL_STATE_CUTPOWER:
827 ath6kl_dbg(ATH6KL_DBG_SUSPEND,
828 "sdio resume configuring sdio\n");
829
830 /* need to set sdio settings after power is cut from sdio */
831 ath6kl_sdio_config(ar);
832 break;
833
834 case ATH6KL_STATE_ON:
835 break;
836
837 case ATH6KL_STATE_DEEPSLEEP:
838 break;
839
840 case ATH6KL_STATE_WOW:
841 break;
842 }
843
844 ath6kl_cfg80211_resume(ar);
751 845
752 return 0; 846 return 0;
753} 847}
754 848
849static void ath6kl_sdio_stop(struct ath6kl *ar)
850{
851 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
852 struct bus_request *req, *tmp_req;
853 void *context;
854
855 /* FIXME: make sure that wq is not queued again */
856
857 cancel_work_sync(&ar_sdio->wr_async_work);
858
859 spin_lock_bh(&ar_sdio->wr_async_lock);
860
861 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
862 list_del(&req->list);
863
864 if (req->scat_req) {
865 /* this is a scatter gather request */
866 req->scat_req->status = -ECANCELED;
867 req->scat_req->complete(ar_sdio->ar->htc_target,
868 req->scat_req);
869 } else {
870 context = req->packet;
871 ath6kl_sdio_free_bus_req(ar_sdio, req);
872 ath6kl_hif_rw_comp_handler(context, -ECANCELED);
873 }
874 }
875
876 spin_unlock_bh(&ar_sdio->wr_async_lock);
877
878 WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
879}
880
755static const struct ath6kl_hif_ops ath6kl_sdio_ops = { 881static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
756 .read_write_sync = ath6kl_sdio_read_write_sync, 882 .read_write_sync = ath6kl_sdio_read_write_sync,
757 .write_async = ath6kl_sdio_write_async, 883 .write_async = ath6kl_sdio_write_async,
@@ -763,8 +889,43 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
763 .scat_req_rw = ath6kl_sdio_async_rw_scatter, 889 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
764 .cleanup_scatter = ath6kl_sdio_cleanup_scatter, 890 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
765 .suspend = ath6kl_sdio_suspend, 891 .suspend = ath6kl_sdio_suspend,
892 .resume = ath6kl_sdio_resume,
893 .power_on = ath6kl_sdio_power_on,
894 .power_off = ath6kl_sdio_power_off,
895 .stop = ath6kl_sdio_stop,
766}; 896};
767 897
898#ifdef CONFIG_PM_SLEEP
899
900/*
901 * Empty handlers so that mmc subsystem doesn't remove us entirely during
902 * suspend. We instead follow cfg80211 suspend/resume handlers.
903 */
904static int ath6kl_sdio_pm_suspend(struct device *device)
905{
906 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
907
908 return 0;
909}
910
911static int ath6kl_sdio_pm_resume(struct device *device)
912{
913 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
914
915 return 0;
916}
917
918static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
919 ath6kl_sdio_pm_resume);
920
921#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
922
923#else
924
925#define ATH6KL_SDIO_PM_OPS NULL
926
927#endif /* CONFIG_PM_SLEEP */
928
768static int ath6kl_sdio_probe(struct sdio_func *func, 929static int ath6kl_sdio_probe(struct sdio_func *func,
769 const struct sdio_device_id *id) 930 const struct sdio_device_id *id)
770{ 931{
@@ -773,8 +934,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
773 struct ath6kl *ar; 934 struct ath6kl *ar;
774 int count; 935 int count;
775 936
776 ath6kl_dbg(ATH6KL_DBG_SDIO, 937 ath6kl_dbg(ATH6KL_DBG_BOOT,
777 "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", 938 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
778 func->num, func->vendor, func->device, 939 func->num, func->vendor, func->device,
779 func->max_blksize, func->cur_blksize); 940 func->max_blksize, func->cur_blksize);
780 941
@@ -820,57 +981,22 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
820 981
821 ath6kl_sdio_set_mbox_info(ar); 982 ath6kl_sdio_set_mbox_info(ar);
822 983
823 sdio_claim_host(func); 984 ret = ath6kl_sdio_config(ar);
824
825 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
826 MANUFACTURER_ID_AR6003_BASE) {
827 /* enable 4-bit ASYNC interrupt on AR6003 or later */
828 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
829 CCCR_SDIO_IRQ_MODE_REG,
830 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
831 if (ret) {
832 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
833 ret);
834 sdio_release_host(func);
835 goto err_cfg80211;
836 }
837
838 ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
839 }
840
841 /* give us some time to enable, in ms */
842 func->enable_timeout = 100;
843
844 sdio_release_host(func);
845
846 ret = ath6kl_sdio_power_on(ar_sdio);
847 if (ret)
848 goto err_cfg80211;
849
850 sdio_claim_host(func);
851
852 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
853 if (ret) { 985 if (ret) {
854 ath6kl_err("Set sdio block size %d failed: %d)\n", 986 ath6kl_err("Failed to config sdio: %d\n", ret);
855 HIF_MBOX_BLOCK_SIZE, ret); 987 goto err_core_alloc;
856 sdio_release_host(func);
857 goto err_off;
858 } 988 }
859 989
860 sdio_release_host(func);
861
862 ret = ath6kl_core_init(ar); 990 ret = ath6kl_core_init(ar);
863 if (ret) { 991 if (ret) {
864 ath6kl_err("Failed to init ath6kl core\n"); 992 ath6kl_err("Failed to init ath6kl core\n");
865 goto err_off; 993 goto err_core_alloc;
866 } 994 }
867 995
868 return ret; 996 return ret;
869 997
870err_off: 998err_core_alloc:
871 ath6kl_sdio_power_off(ar_sdio); 999 ath6kl_core_free(ar_sdio->ar);
872err_cfg80211:
873 ath6kl_cfg80211_deinit(ar_sdio->ar);
874err_dma: 1000err_dma:
875 kfree(ar_sdio->dma_buffer); 1001 kfree(ar_sdio->dma_buffer);
876err_hif: 1002err_hif:
@@ -883,8 +1009,8 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
883{ 1009{
884 struct ath6kl_sdio *ar_sdio; 1010 struct ath6kl_sdio *ar_sdio;
885 1011
886 ath6kl_dbg(ATH6KL_DBG_SDIO, 1012 ath6kl_dbg(ATH6KL_DBG_BOOT,
887 "removed func %d vendor 0x%x device 0x%x\n", 1013 "sdio removed func %d vendor 0x%x device 0x%x\n",
888 func->num, func->vendor, func->device); 1014 func->num, func->vendor, func->device);
889 1015
890 ar_sdio = sdio_get_drvdata(func); 1016 ar_sdio = sdio_get_drvdata(func);
@@ -892,9 +1018,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
892 ath6kl_stop_txrx(ar_sdio->ar); 1018 ath6kl_stop_txrx(ar_sdio->ar);
893 cancel_work_sync(&ar_sdio->wr_async_work); 1019 cancel_work_sync(&ar_sdio->wr_async_work);
894 1020
895 ath6kl_unavail_ev(ar_sdio->ar); 1021 ath6kl_core_cleanup(ar_sdio->ar);
896
897 ath6kl_sdio_power_off(ar_sdio);
898 1022
899 kfree(ar_sdio->dma_buffer); 1023 kfree(ar_sdio->dma_buffer);
900 kfree(ar_sdio); 1024 kfree(ar_sdio);
@@ -909,10 +1033,11 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
909MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); 1033MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
910 1034
911static struct sdio_driver ath6kl_sdio_driver = { 1035static struct sdio_driver ath6kl_sdio_driver = {
912 .name = "ath6kl_sdio", 1036 .name = "ath6kl",
913 .id_table = ath6kl_sdio_devices, 1037 .id_table = ath6kl_sdio_devices,
914 .probe = ath6kl_sdio_probe, 1038 .probe = ath6kl_sdio_probe,
915 .remove = ath6kl_sdio_remove, 1039 .remove = ath6kl_sdio_remove,
1040 .drv.pm = ATH6KL_SDIO_PM_OPS,
916}; 1041};
917 1042
918static int __init ath6kl_sdio_init(void) 1043static int __init ath6kl_sdio_init(void)
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index c9a76051f042..687e2b350e8f 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -320,7 +320,10 @@ struct host_interest {
320| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2) 320| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
321|------------------------------------------------------------------------------| 321|------------------------------------------------------------------------------|
322*/ 322*/
323#define HI_OPTION_FW_MODE_BITS 0x2
323#define HI_OPTION_FW_MODE_SHIFT 0xC 324#define HI_OPTION_FW_MODE_SHIFT 0xC
325
326#define HI_OPTION_FW_SUBMODE_BITS 0x2
324#define HI_OPTION_FW_SUBMODE_SHIFT 0x14 327#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
325 328
326/* Convert a Target virtual address into a Target physical address */ 329/* Convert a Target virtual address into a Target physical address */
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index a7117074f81c..d9cff2b950b1 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -77,12 +77,13 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
77 return ar->node_map[ep_map].ep_id; 77 return ar->node_map[ep_map].ep_id;
78} 78}
79 79
80static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb, 80static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
81 bool *more_data) 81 bool *more_data)
82{ 82{
83 struct ethhdr *datap = (struct ethhdr *) skb->data; 83 struct ethhdr *datap = (struct ethhdr *) skb->data;
84 struct ath6kl_sta *conn = NULL; 84 struct ath6kl_sta *conn = NULL;
85 bool ps_queued = false, is_psq_empty = false; 85 bool ps_queued = false, is_psq_empty = false;
86 struct ath6kl *ar = vif->ar;
86 87
87 if (is_multicast_ether_addr(datap->h_dest)) { 88 if (is_multicast_ether_addr(datap->h_dest)) {
88 u8 ctr = 0; 89 u8 ctr = 0;
@@ -100,7 +101,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
100 * If this transmit is not because of a Dtim Expiry 101 * If this transmit is not because of a Dtim Expiry
101 * q it. 102 * q it.
102 */ 103 */
103 if (!test_bit(DTIM_EXPIRED, &ar->flag)) { 104 if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
104 bool is_mcastq_empty = false; 105 bool is_mcastq_empty = false;
105 106
106 spin_lock_bh(&ar->mcastpsq_lock); 107 spin_lock_bh(&ar->mcastpsq_lock);
@@ -116,6 +117,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
116 */ 117 */
117 if (is_mcastq_empty) 118 if (is_mcastq_empty)
118 ath6kl_wmi_set_pvb_cmd(ar->wmi, 119 ath6kl_wmi_set_pvb_cmd(ar->wmi,
120 vif->fw_vif_idx,
119 MCAST_AID, 1); 121 MCAST_AID, 1);
120 122
121 ps_queued = true; 123 ps_queued = true;
@@ -131,7 +133,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
131 } 133 }
132 } 134 }
133 } else { 135 } else {
134 conn = ath6kl_find_sta(ar, datap->h_dest); 136 conn = ath6kl_find_sta(vif, datap->h_dest);
135 if (!conn) { 137 if (!conn) {
136 dev_kfree_skb(skb); 138 dev_kfree_skb(skb);
137 139
@@ -154,6 +156,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
154 */ 156 */
155 if (is_psq_empty) 157 if (is_psq_empty)
156 ath6kl_wmi_set_pvb_cmd(ar->wmi, 158 ath6kl_wmi_set_pvb_cmd(ar->wmi,
159 vif->fw_vif_idx,
157 conn->aid, 1); 160 conn->aid, 1);
158 161
159 ps_queued = true; 162 ps_queued = true;
@@ -235,6 +238,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
235 struct ath6kl *ar = ath6kl_priv(dev); 238 struct ath6kl *ar = ath6kl_priv(dev);
236 struct ath6kl_cookie *cookie = NULL; 239 struct ath6kl_cookie *cookie = NULL;
237 enum htc_endpoint_id eid = ENDPOINT_UNUSED; 240 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
241 struct ath6kl_vif *vif = netdev_priv(dev);
238 u32 map_no = 0; 242 u32 map_no = 0;
239 u16 htc_tag = ATH6KL_DATA_PKT_TAG; 243 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
240 u8 ac = 99 ; /* initialize to unmapped ac */ 244 u8 ac = 99 ; /* initialize to unmapped ac */
@@ -246,7 +250,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
246 skb, skb->data, skb->len); 250 skb, skb->data, skb->len);
247 251
248 /* If target is not associated */ 252 /* If target is not associated */
249 if (!test_bit(CONNECTED, &ar->flag)) { 253 if (!test_bit(CONNECTED, &vif->flags)) {
250 dev_kfree_skb(skb); 254 dev_kfree_skb(skb);
251 return 0; 255 return 0;
252 } 256 }
@@ -255,15 +259,21 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
255 goto fail_tx; 259 goto fail_tx;
256 260
257 /* AP mode Power saving processing */ 261 /* AP mode Power saving processing */
258 if (ar->nw_type == AP_NETWORK) { 262 if (vif->nw_type == AP_NETWORK) {
259 if (ath6kl_powersave_ap(ar, skb, &more_data)) 263 if (ath6kl_powersave_ap(vif, skb, &more_data))
260 return 0; 264 return 0;
261 } 265 }
262 266
263 if (test_bit(WMI_ENABLED, &ar->flag)) { 267 if (test_bit(WMI_ENABLED, &ar->flag)) {
264 if (skb_headroom(skb) < dev->needed_headroom) { 268 if (skb_headroom(skb) < dev->needed_headroom) {
265 WARN_ON(1); 269 struct sk_buff *tmp_skb = skb;
266 goto fail_tx; 270
271 skb = skb_realloc_headroom(skb, dev->needed_headroom);
272 kfree_skb(tmp_skb);
273 if (skb == NULL) {
274 vif->net_stats.tx_dropped++;
275 return 0;
276 }
267 } 277 }
268 278
269 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) { 279 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
@@ -272,18 +282,20 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
272 } 282 }
273 283
274 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE, 284 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
275 more_data, 0, 0, NULL)) { 285 more_data, 0, 0, NULL,
286 vif->fw_vif_idx)) {
276 ath6kl_err("wmi_data_hdr_add failed\n"); 287 ath6kl_err("wmi_data_hdr_add failed\n");
277 goto fail_tx; 288 goto fail_tx;
278 } 289 }
279 290
280 if ((ar->nw_type == ADHOC_NETWORK) && 291 if ((vif->nw_type == ADHOC_NETWORK) &&
281 ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag)) 292 ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
282 chk_adhoc_ps_mapping = true; 293 chk_adhoc_ps_mapping = true;
283 else { 294 else {
284 /* get the stream mapping */ 295 /* get the stream mapping */
285 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb, 296 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
286 0, test_bit(WMM_ENABLED, &ar->flag), &ac); 297 vif->fw_vif_idx, skb,
298 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
287 if (ret) 299 if (ret)
288 goto fail_tx; 300 goto fail_tx;
289 } 301 }
@@ -354,8 +366,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
354fail_tx: 366fail_tx:
355 dev_kfree_skb(skb); 367 dev_kfree_skb(skb);
356 368
357 ar->net_stats.tx_dropped++; 369 vif->net_stats.tx_dropped++;
358 ar->net_stats.tx_aborted_errors++; 370 vif->net_stats.tx_aborted_errors++;
359 371
360 return 0; 372 return 0;
361} 373}
@@ -426,7 +438,9 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
426 struct htc_packet *packet) 438 struct htc_packet *packet)
427{ 439{
428 struct ath6kl *ar = target->dev->ar; 440 struct ath6kl *ar = target->dev->ar;
441 struct ath6kl_vif *vif;
429 enum htc_endpoint_id endpoint = packet->endpoint; 442 enum htc_endpoint_id endpoint = packet->endpoint;
443 enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
430 444
431 if (endpoint == ar->ctrl_ep) { 445 if (endpoint == ar->ctrl_ep) {
432 /* 446 /*
@@ -439,19 +453,11 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
439 set_bit(WMI_CTRL_EP_FULL, &ar->flag); 453 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
440 spin_unlock_bh(&ar->lock); 454 spin_unlock_bh(&ar->lock);
441 ath6kl_err("wmi ctrl ep is full\n"); 455 ath6kl_err("wmi ctrl ep is full\n");
442 return HTC_SEND_FULL_KEEP; 456 goto stop_adhoc_netq;
443 } 457 }
444 458
445 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG) 459 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
446 return HTC_SEND_FULL_KEEP; 460 goto stop_adhoc_netq;
447
448 if (ar->nw_type == ADHOC_NETWORK)
449 /*
450 * In adhoc mode, we cannot differentiate traffic
451 * priorities so there is no need to continue, however we
452 * should stop the network.
453 */
454 goto stop_net_queues;
455 461
456 /* 462 /*
457 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for 463 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
@@ -459,29 +465,43 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
459 */ 465 */
460 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] < 466 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
461 ar->hiac_stream_active_pri && 467 ar->hiac_stream_active_pri &&
462 ar->cookie_count <= MAX_HI_COOKIE_NUM) 468 ar->cookie_count <= MAX_HI_COOKIE_NUM) {
463 /* 469 /*
464 * Give preference to the highest priority stream by 470 * Give preference to the highest priority stream by
465 * dropping the packets which overflowed. 471 * dropping the packets which overflowed.
466 */ 472 */
467 return HTC_SEND_FULL_DROP; 473 action = HTC_SEND_FULL_DROP;
474 goto stop_adhoc_netq;
475 }
468 476
469stop_net_queues: 477stop_adhoc_netq:
470 spin_lock_bh(&ar->lock); 478 /* FIXME: Locking */
471 set_bit(NETQ_STOPPED, &ar->flag); 479 spin_lock_bh(&ar->list_lock);
472 spin_unlock_bh(&ar->lock); 480 list_for_each_entry(vif, &ar->vif_list, list) {
473 netif_stop_queue(ar->net_dev); 481 if (vif->nw_type == ADHOC_NETWORK) {
482 spin_unlock_bh(&ar->list_lock);
474 483
475 return HTC_SEND_FULL_KEEP; 484 spin_lock_bh(&vif->if_lock);
485 set_bit(NETQ_STOPPED, &vif->flags);
486 spin_unlock_bh(&vif->if_lock);
487 netif_stop_queue(vif->ndev);
488
489 return action;
490 }
491 }
492 spin_unlock_bh(&ar->list_lock);
493
494 return action;
476} 495}
477 496
478/* TODO this needs to be looked at */ 497/* TODO this needs to be looked at */
479static void ath6kl_tx_clear_node_map(struct ath6kl *ar, 498static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
480 enum htc_endpoint_id eid, u32 map_no) 499 enum htc_endpoint_id eid, u32 map_no)
481{ 500{
501 struct ath6kl *ar = vif->ar;
482 u32 i; 502 u32 i;
483 503
484 if (ar->nw_type != ADHOC_NETWORK) 504 if (vif->nw_type != ADHOC_NETWORK)
485 return; 505 return;
486 506
487 if (!ar->ibss_ps_enable) 507 if (!ar->ibss_ps_enable)
@@ -523,7 +543,9 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
523 int status; 543 int status;
524 enum htc_endpoint_id eid; 544 enum htc_endpoint_id eid;
525 bool wake_event = false; 545 bool wake_event = false;
526 bool flushing = false; 546 bool flushing[MAX_NUM_VIF] = {false};
547 u8 if_idx;
548 struct ath6kl_vif *vif;
527 549
528 skb_queue_head_init(&skb_queue); 550 skb_queue_head_init(&skb_queue);
529 551
@@ -569,15 +591,30 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
569 wake_event = true; 591 wake_event = true;
570 } 592 }
571 593
594 if (eid == ar->ctrl_ep) {
595 if_idx = wmi_cmd_hdr_get_if_idx(
596 (struct wmi_cmd_hdr *) skb->data);
597 } else {
598 if_idx = wmi_data_hdr_get_if_idx(
599 (struct wmi_data_hdr *) skb->data);
600 }
601
602 vif = ath6kl_get_vif_by_index(ar, if_idx);
603 if (!vif) {
604 ath6kl_free_cookie(ar, ath6kl_cookie);
605 continue;
606 }
607
572 if (status) { 608 if (status) {
573 if (status == -ECANCELED) 609 if (status == -ECANCELED)
574 /* a packet was flushed */ 610 /* a packet was flushed */
575 flushing = true; 611 flushing[if_idx] = true;
612
613 vif->net_stats.tx_errors++;
576 614
577 ar->net_stats.tx_errors++; 615 if (status != -ENOSPC && status != -ECANCELED)
616 ath6kl_warn("tx complete error: %d\n", status);
578 617
579 if (status != -ENOSPC)
580 ath6kl_err("tx error, status: 0x%x\n", status);
581 ath6kl_dbg(ATH6KL_DBG_WLAN_TX, 618 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
582 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n", 619 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
583 __func__, skb, packet->buf, packet->act_len, 620 __func__, skb, packet->buf, packet->act_len,
@@ -588,27 +625,34 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
588 __func__, skb, packet->buf, packet->act_len, 625 __func__, skb, packet->buf, packet->act_len,
589 eid, "OK"); 626 eid, "OK");
590 627
591 flushing = false; 628 flushing[if_idx] = false;
592 ar->net_stats.tx_packets++; 629 vif->net_stats.tx_packets++;
593 ar->net_stats.tx_bytes += skb->len; 630 vif->net_stats.tx_bytes += skb->len;
594 } 631 }
595 632
596 ath6kl_tx_clear_node_map(ar, eid, map_no); 633 ath6kl_tx_clear_node_map(vif, eid, map_no);
597 634
598 ath6kl_free_cookie(ar, ath6kl_cookie); 635 ath6kl_free_cookie(ar, ath6kl_cookie);
599 636
600 if (test_bit(NETQ_STOPPED, &ar->flag)) 637 if (test_bit(NETQ_STOPPED, &vif->flags))
601 clear_bit(NETQ_STOPPED, &ar->flag); 638 clear_bit(NETQ_STOPPED, &vif->flags);
602 } 639 }
603 640
604 spin_unlock_bh(&ar->lock); 641 spin_unlock_bh(&ar->lock);
605 642
606 __skb_queue_purge(&skb_queue); 643 __skb_queue_purge(&skb_queue);
607 644
608 if (test_bit(CONNECTED, &ar->flag)) { 645 /* FIXME: Locking */
609 if (!flushing) 646 spin_lock_bh(&ar->list_lock);
610 netif_wake_queue(ar->net_dev); 647 list_for_each_entry(vif, &ar->vif_list, list) {
648 if (test_bit(CONNECTED, &vif->flags) &&
649 !flushing[vif->fw_vif_idx]) {
650 spin_unlock_bh(&ar->list_lock);
651 netif_wake_queue(vif->ndev);
652 spin_lock_bh(&ar->list_lock);
653 }
611 } 654 }
655 spin_unlock_bh(&ar->list_lock);
612 656
613 if (wake_event) 657 if (wake_event)
614 wake_up(&ar->event_wq); 658 wake_up(&ar->event_wq);
@@ -1041,8 +1085,9 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1041 struct ath6kl_sta *conn = NULL; 1085 struct ath6kl_sta *conn = NULL;
1042 struct sk_buff *skb1 = NULL; 1086 struct sk_buff *skb1 = NULL;
1043 struct ethhdr *datap = NULL; 1087 struct ethhdr *datap = NULL;
1088 struct ath6kl_vif *vif;
1044 u16 seq_no, offset; 1089 u16 seq_no, offset;
1045 u8 tid; 1090 u8 tid, if_idx;
1046 1091
1047 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, 1092 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1048 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d", 1093 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
@@ -1050,7 +1095,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1050 packet->act_len, status); 1095 packet->act_len, status);
1051 1096
1052 if (status || !(skb->data + HTC_HDR_LENGTH)) { 1097 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1053 ar->net_stats.rx_errors++; 1098 dev_kfree_skb(skb);
1099 return;
1100 }
1101
1102 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1103 skb_pull(skb, HTC_HDR_LENGTH);
1104
1105 if (ept == ar->ctrl_ep) {
1106 if_idx =
1107 wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1108 } else {
1109 if_idx =
1110 wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1111 }
1112
1113 vif = ath6kl_get_vif_by_index(ar, if_idx);
1114 if (!vif) {
1054 dev_kfree_skb(skb); 1115 dev_kfree_skb(skb);
1055 return; 1116 return;
1056 } 1117 }
@@ -1059,28 +1120,28 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1059 * Take lock to protect buffer counts and adaptive power throughput 1120 * Take lock to protect buffer counts and adaptive power throughput
1060 * state. 1121 * state.
1061 */ 1122 */
1062 spin_lock_bh(&ar->lock); 1123 spin_lock_bh(&vif->if_lock);
1063 1124
1064 ar->net_stats.rx_packets++; 1125 vif->net_stats.rx_packets++;
1065 ar->net_stats.rx_bytes += packet->act_len; 1126 vif->net_stats.rx_bytes += packet->act_len;
1066 1127
1067 spin_unlock_bh(&ar->lock); 1128 spin_unlock_bh(&vif->if_lock);
1068 1129
1069 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1070 skb_pull(skb, HTC_HDR_LENGTH);
1071 1130
1072 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ", 1131 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1073 skb->data, skb->len); 1132 skb->data, skb->len);
1074 1133
1075 skb->dev = ar->net_dev; 1134 skb->dev = vif->ndev;
1076 1135
1077 if (!test_bit(WMI_ENABLED, &ar->flag)) { 1136 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1078 if (EPPING_ALIGNMENT_PAD > 0) 1137 if (EPPING_ALIGNMENT_PAD > 0)
1079 skb_pull(skb, EPPING_ALIGNMENT_PAD); 1138 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1080 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); 1139 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1081 return; 1140 return;
1082 } 1141 }
1083 1142
1143 ath6kl_check_wow_status(ar);
1144
1084 if (ept == ar->ctrl_ep) { 1145 if (ept == ar->ctrl_ep) {
1085 ath6kl_wmi_control_rx(ar->wmi, skb); 1146 ath6kl_wmi_control_rx(ar->wmi, skb);
1086 return; 1147 return;
@@ -1096,18 +1157,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1096 * that do not have LLC hdr. They are 16 bytes in size. 1157 * that do not have LLC hdr. They are 16 bytes in size.
1097 * Allow these frames in the AP mode. 1158 * Allow these frames in the AP mode.
1098 */ 1159 */
1099 if (ar->nw_type != AP_NETWORK && 1160 if (vif->nw_type != AP_NETWORK &&
1100 ((packet->act_len < min_hdr_len) || 1161 ((packet->act_len < min_hdr_len) ||
1101 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) { 1162 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1102 ath6kl_info("frame len is too short or too long\n"); 1163 ath6kl_info("frame len is too short or too long\n");
1103 ar->net_stats.rx_errors++; 1164 vif->net_stats.rx_errors++;
1104 ar->net_stats.rx_length_errors++; 1165 vif->net_stats.rx_length_errors++;
1105 dev_kfree_skb(skb); 1166 dev_kfree_skb(skb);
1106 return; 1167 return;
1107 } 1168 }
1108 1169
1109 /* Get the Power save state of the STA */ 1170 /* Get the Power save state of the STA */
1110 if (ar->nw_type == AP_NETWORK) { 1171 if (vif->nw_type == AP_NETWORK) {
1111 meta_type = wmi_data_hdr_get_meta(dhdr); 1172 meta_type = wmi_data_hdr_get_meta(dhdr);
1112 1173
1113 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & 1174 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
@@ -1129,7 +1190,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1129 } 1190 }
1130 1191
1131 datap = (struct ethhdr *) (skb->data + offset); 1192 datap = (struct ethhdr *) (skb->data + offset);
1132 conn = ath6kl_find_sta(ar, datap->h_source); 1193 conn = ath6kl_find_sta(vif, datap->h_source);
1133 1194
1134 if (!conn) { 1195 if (!conn) {
1135 dev_kfree_skb(skb); 1196 dev_kfree_skb(skb);
@@ -1160,12 +1221,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1160 while ((skbuff = skb_dequeue(&conn->psq)) 1221 while ((skbuff = skb_dequeue(&conn->psq))
1161 != NULL) { 1222 != NULL) {
1162 spin_unlock_bh(&conn->psq_lock); 1223 spin_unlock_bh(&conn->psq_lock);
1163 ath6kl_data_tx(skbuff, ar->net_dev); 1224 ath6kl_data_tx(skbuff, vif->ndev);
1164 spin_lock_bh(&conn->psq_lock); 1225 spin_lock_bh(&conn->psq_lock);
1165 } 1226 }
1166 spin_unlock_bh(&conn->psq_lock); 1227 spin_unlock_bh(&conn->psq_lock);
1167 /* Clear the PVB for this STA */ 1228 /* Clear the PVB for this STA */
1168 ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0); 1229 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1230 conn->aid, 0);
1169 } 1231 }
1170 } 1232 }
1171 1233
@@ -1215,12 +1277,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1215 return; 1277 return;
1216 } 1278 }
1217 1279
1218 if (!(ar->net_dev->flags & IFF_UP)) { 1280 if (!(vif->ndev->flags & IFF_UP)) {
1219 dev_kfree_skb(skb); 1281 dev_kfree_skb(skb);
1220 return; 1282 return;
1221 } 1283 }
1222 1284
1223 if (ar->nw_type == AP_NETWORK) { 1285 if (vif->nw_type == AP_NETWORK) {
1224 datap = (struct ethhdr *) skb->data; 1286 datap = (struct ethhdr *) skb->data;
1225 if (is_multicast_ether_addr(datap->h_dest)) 1287 if (is_multicast_ether_addr(datap->h_dest))
1226 /* 1288 /*
@@ -1235,8 +1297,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1235 * frame to it on the air else send the 1297 * frame to it on the air else send the
1236 * frame up the stack. 1298 * frame up the stack.
1237 */ 1299 */
1238 struct ath6kl_sta *conn = NULL; 1300 conn = ath6kl_find_sta(vif, datap->h_dest);
1239 conn = ath6kl_find_sta(ar, datap->h_dest);
1240 1301
1241 if (conn && ar->intra_bss) { 1302 if (conn && ar->intra_bss) {
1242 skb1 = skb; 1303 skb1 = skb;
@@ -1247,18 +1308,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1247 } 1308 }
1248 } 1309 }
1249 if (skb1) 1310 if (skb1)
1250 ath6kl_data_tx(skb1, ar->net_dev); 1311 ath6kl_data_tx(skb1, vif->ndev);
1312
1313 if (skb == NULL) {
1314 /* nothing to deliver up the stack */
1315 return;
1316 }
1251 } 1317 }
1252 1318
1253 datap = (struct ethhdr *) skb->data; 1319 datap = (struct ethhdr *) skb->data;
1254 1320
1255 if (is_unicast_ether_addr(datap->h_dest) && 1321 if (is_unicast_ether_addr(datap->h_dest) &&
1256 aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no, 1322 aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
1257 is_amsdu, skb)) 1323 is_amsdu, skb))
1258 /* aggregation code will handle the skb */ 1324 /* aggregation code will handle the skb */
1259 return; 1325 return;
1260 1326
1261 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb); 1327 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1262} 1328}
1263 1329
1264static void aggr_timeout(unsigned long arg) 1330static void aggr_timeout(unsigned long arg)
@@ -1336,9 +1402,10 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1336 memset(stats, 0, sizeof(struct rxtid_stats)); 1402 memset(stats, 0, sizeof(struct rxtid_stats));
1337} 1403}
1338 1404
1339void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz) 1405void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
1406 u8 win_sz)
1340{ 1407{
1341 struct aggr_info *p_aggr = ar->aggr_cntxt; 1408 struct aggr_info *p_aggr = vif->aggr_cntxt;
1342 struct rxtid *rxtid; 1409 struct rxtid *rxtid;
1343 struct rxtid_stats *stats; 1410 struct rxtid_stats *stats;
1344 u16 hold_q_size; 1411 u16 hold_q_size;
@@ -1405,9 +1472,9 @@ struct aggr_info *aggr_init(struct net_device *dev)
1405 return p_aggr; 1472 return p_aggr;
1406} 1473}
1407 1474
1408void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid) 1475void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
1409{ 1476{
1410 struct aggr_info *p_aggr = ar->aggr_cntxt; 1477 struct aggr_info *p_aggr = vif->aggr_cntxt;
1411 struct rxtid *rxtid; 1478 struct rxtid *rxtid;
1412 1479
1413 if (!p_aggr) 1480 if (!p_aggr)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a7de23cbd2c7..922344d3b262 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -21,7 +21,7 @@
21#include "../regd.h" 21#include "../regd.h"
22#include "../regd_common.h" 22#include "../regd_common.h"
23 23
24static int ath6kl_wmi_sync_point(struct wmi *wmi); 24static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx);
25 25
26static const s32 wmi_rate_tbl[][2] = { 26static const s32 wmi_rate_tbl[][2] = {
27 /* {W/O SGI, with SGI} */ 27 /* {W/O SGI, with SGI} */
@@ -81,6 +81,26 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
81 return wmi->ep_id; 81 return wmi->ep_id;
82} 82}
83 83
84struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
85{
86 struct ath6kl_vif *vif, *found = NULL;
87
88 if (WARN_ON(if_idx > (MAX_NUM_VIF - 1)))
89 return NULL;
90
91 /* FIXME: Locking */
92 spin_lock_bh(&ar->list_lock);
93 list_for_each_entry(vif, &ar->vif_list, list) {
94 if (vif->fw_vif_idx == if_idx) {
95 found = vif;
96 break;
97 }
98 }
99 spin_unlock_bh(&ar->list_lock);
100
101 return found;
102}
103
84/* Performs DIX to 802.3 encapsulation for transmit packets. 104/* Performs DIX to 802.3 encapsulation for transmit packets.
85 * Assumes the entire DIX header is contigous and that there is 105 * Assumes the entire DIX header is contigous and that there is
86 * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers. 106 * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
@@ -162,12 +182,12 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
162int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, 182int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
163 u8 msg_type, bool more_data, 183 u8 msg_type, bool more_data,
164 enum wmi_data_hdr_data_type data_type, 184 enum wmi_data_hdr_data_type data_type,
165 u8 meta_ver, void *tx_meta_info) 185 u8 meta_ver, void *tx_meta_info, u8 if_idx)
166{ 186{
167 struct wmi_data_hdr *data_hdr; 187 struct wmi_data_hdr *data_hdr;
168 int ret; 188 int ret;
169 189
170 if (WARN_ON(skb == NULL)) 190 if (WARN_ON(skb == NULL || (if_idx > MAX_NUM_VIF - 1)))
171 return -EINVAL; 191 return -EINVAL;
172 192
173 if (tx_meta_info) { 193 if (tx_meta_info) {
@@ -189,7 +209,7 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
189 WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT; 209 WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
190 210
191 data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT); 211 data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
192 data_hdr->info3 = 0; 212 data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
193 213
194 return 0; 214 return 0;
195} 215}
@@ -216,7 +236,8 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
216 return ip_pri; 236 return ip_pri;
217} 237}
218 238
219int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, 239int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
240 struct sk_buff *skb,
220 u32 layer2_priority, bool wmm_enabled, 241 u32 layer2_priority, bool wmm_enabled,
221 u8 *ac) 242 u8 *ac)
222{ 243{
@@ -262,7 +283,12 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
262 usr_pri = layer2_priority & 0x7; 283 usr_pri = layer2_priority & 0x7;
263 } 284 }
264 285
265 /* workaround for WMM S5 */ 286 /*
287 * workaround for WMM S5
288 *
289 * FIXME: wmi->traffic_class is always 100 so this test doesn't
290 * make sense
291 */
266 if ((wmi->traffic_class == WMM_AC_VI) && 292 if ((wmi->traffic_class == WMM_AC_VI) &&
267 ((usr_pri == 5) || (usr_pri == 4))) 293 ((usr_pri == 5) || (usr_pri == 4)))
268 usr_pri = 1; 294 usr_pri = 1;
@@ -284,7 +310,7 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
284 cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT); 310 cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
285 /* Implicit streams are created with TSID 0xFF */ 311 /* Implicit streams are created with TSID 0xFF */
286 cmd.tsid = WMI_IMPLICIT_PSTREAM; 312 cmd.tsid = WMI_IMPLICIT_PSTREAM;
287 ath6kl_wmi_create_pstream_cmd(wmi, &cmd); 313 ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd);
288 } 314 }
289 315
290 *ac = traffic_class; 316 *ac = traffic_class;
@@ -410,13 +436,14 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
410} 436}
411 437
412static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap, 438static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
413 int len) 439 int len, struct ath6kl_vif *vif)
414{ 440{
415 struct wmi_remain_on_chnl_event *ev; 441 struct wmi_remain_on_chnl_event *ev;
416 u32 freq; 442 u32 freq;
417 u32 dur; 443 u32 dur;
418 struct ieee80211_channel *chan; 444 struct ieee80211_channel *chan;
419 struct ath6kl *ar = wmi->parent_dev; 445 struct ath6kl *ar = wmi->parent_dev;
446 u32 id;
420 447
421 if (len < sizeof(*ev)) 448 if (len < sizeof(*ev))
422 return -EINVAL; 449 return -EINVAL;
@@ -426,26 +453,29 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
426 dur = le32_to_cpu(ev->duration); 453 dur = le32_to_cpu(ev->duration);
427 ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n", 454 ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
428 freq, dur); 455 freq, dur);
429 chan = ieee80211_get_channel(ar->wdev->wiphy, freq); 456 chan = ieee80211_get_channel(ar->wiphy, freq);
430 if (!chan) { 457 if (!chan) {
431 ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel " 458 ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
432 "(freq=%u)\n", freq); 459 "(freq=%u)\n", freq);
433 return -EINVAL; 460 return -EINVAL;
434 } 461 }
435 cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT, 462 id = vif->last_roc_id;
463 cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT,
436 dur, GFP_ATOMIC); 464 dur, GFP_ATOMIC);
437 465
438 return 0; 466 return 0;
439} 467}
440 468
441static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi, 469static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
442 u8 *datap, int len) 470 u8 *datap, int len,
471 struct ath6kl_vif *vif)
443{ 472{
444 struct wmi_cancel_remain_on_chnl_event *ev; 473 struct wmi_cancel_remain_on_chnl_event *ev;
445 u32 freq; 474 u32 freq;
446 u32 dur; 475 u32 dur;
447 struct ieee80211_channel *chan; 476 struct ieee80211_channel *chan;
448 struct ath6kl *ar = wmi->parent_dev; 477 struct ath6kl *ar = wmi->parent_dev;
478 u32 id;
449 479
450 if (len < sizeof(*ev)) 480 if (len < sizeof(*ev))
451 return -EINVAL; 481 return -EINVAL;
@@ -455,23 +485,29 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
455 dur = le32_to_cpu(ev->duration); 485 dur = le32_to_cpu(ev->duration);
456 ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u " 486 ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
457 "status=%u\n", freq, dur, ev->status); 487 "status=%u\n", freq, dur, ev->status);
458 chan = ieee80211_get_channel(ar->wdev->wiphy, freq); 488 chan = ieee80211_get_channel(ar->wiphy, freq);
459 if (!chan) { 489 if (!chan) {
460 ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown " 490 ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
461 "channel (freq=%u)\n", freq); 491 "channel (freq=%u)\n", freq);
462 return -EINVAL; 492 return -EINVAL;
463 } 493 }
464 cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan, 494 if (vif->last_cancel_roc_id &&
495 vif->last_cancel_roc_id + 1 == vif->last_roc_id)
496 id = vif->last_cancel_roc_id; /* event for cancel command */
497 else
498 id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
499 vif->last_cancel_roc_id = 0;
500 cfg80211_remain_on_channel_expired(vif->ndev, id, chan,
465 NL80211_CHAN_NO_HT, GFP_ATOMIC); 501 NL80211_CHAN_NO_HT, GFP_ATOMIC);
466 502
467 return 0; 503 return 0;
468} 504}
469 505
470static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len) 506static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
507 struct ath6kl_vif *vif)
471{ 508{
472 struct wmi_tx_status_event *ev; 509 struct wmi_tx_status_event *ev;
473 u32 id; 510 u32 id;
474 struct ath6kl *ar = wmi->parent_dev;
475 511
476 if (len < sizeof(*ev)) 512 if (len < sizeof(*ev))
477 return -EINVAL; 513 return -EINVAL;
@@ -481,7 +517,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
481 ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n", 517 ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
482 id, ev->ack_status); 518 id, ev->ack_status);
483 if (wmi->last_mgmt_tx_frame) { 519 if (wmi->last_mgmt_tx_frame) {
484 cfg80211_mgmt_tx_status(ar->net_dev, id, 520 cfg80211_mgmt_tx_status(vif->ndev, id,
485 wmi->last_mgmt_tx_frame, 521 wmi->last_mgmt_tx_frame,
486 wmi->last_mgmt_tx_frame_len, 522 wmi->last_mgmt_tx_frame_len,
487 !!ev->ack_status, GFP_ATOMIC); 523 !!ev->ack_status, GFP_ATOMIC);
@@ -493,12 +529,12 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
493 return 0; 529 return 0;
494} 530}
495 531
496static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len) 532static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
533 struct ath6kl_vif *vif)
497{ 534{
498 struct wmi_p2p_rx_probe_req_event *ev; 535 struct wmi_p2p_rx_probe_req_event *ev;
499 u32 freq; 536 u32 freq;
500 u16 dlen; 537 u16 dlen;
501 struct ath6kl *ar = wmi->parent_dev;
502 538
503 if (len < sizeof(*ev)) 539 if (len < sizeof(*ev))
504 return -EINVAL; 540 return -EINVAL;
@@ -513,10 +549,10 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
513 } 549 }
514 ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u " 550 ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
515 "probe_req_report=%d\n", 551 "probe_req_report=%d\n",
516 dlen, freq, ar->probe_req_report); 552 dlen, freq, vif->probe_req_report);
517 553
518 if (ar->probe_req_report || ar->nw_type == AP_NETWORK) 554 if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
519 cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC); 555 cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
520 556
521 return 0; 557 return 0;
522} 558}
@@ -536,12 +572,12 @@ static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len)
536 return 0; 572 return 0;
537} 573}
538 574
539static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len) 575static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
576 struct ath6kl_vif *vif)
540{ 577{
541 struct wmi_rx_action_event *ev; 578 struct wmi_rx_action_event *ev;
542 u32 freq; 579 u32 freq;
543 u16 dlen; 580 u16 dlen;
544 struct ath6kl *ar = wmi->parent_dev;
545 581
546 if (len < sizeof(*ev)) 582 if (len < sizeof(*ev))
547 return -EINVAL; 583 return -EINVAL;
@@ -555,7 +591,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
555 return -EINVAL; 591 return -EINVAL;
556 } 592 }
557 ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq); 593 ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
558 cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC); 594 cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
559 595
560 return 0; 596 return 0;
561} 597}
@@ -620,7 +656,8 @@ static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
620} 656}
621 657
622/* Send a "simple" wmi command -- one with no arguments */ 658/* Send a "simple" wmi command -- one with no arguments */
623static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id) 659static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx,
660 enum wmi_cmd_id cmd_id)
624{ 661{
625 struct sk_buff *skb; 662 struct sk_buff *skb;
626 int ret; 663 int ret;
@@ -629,7 +666,7 @@ static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
629 if (!skb) 666 if (!skb)
630 return -ENOMEM; 667 return -ENOMEM;
631 668
632 ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG); 669 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
633 670
634 return ret; 671 return ret;
635} 672}
@@ -641,7 +678,6 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
641 if (len < sizeof(struct wmi_ready_event_2)) 678 if (len < sizeof(struct wmi_ready_event_2))
642 return -EINVAL; 679 return -EINVAL;
643 680
644 wmi->ready = true;
645 ath6kl_ready_event(wmi->parent_dev, ev->mac_addr, 681 ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
646 le32_to_cpu(ev->sw_version), 682 le32_to_cpu(ev->sw_version),
647 le32_to_cpu(ev->abi_version)); 683 le32_to_cpu(ev->abi_version));
@@ -673,32 +709,73 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
673 cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR; 709 cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
674 cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS; 710 cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
675 711
676 ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG); 712 ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
713 NO_SYNC_WMIFLAG);
677 714
678 return 0; 715 return 0;
679} 716}
680 717
681static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len) 718int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
719{
720 struct sk_buff *skb;
721 struct roam_ctrl_cmd *cmd;
722
723 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
724 if (!skb)
725 return -ENOMEM;
726
727 cmd = (struct roam_ctrl_cmd *) skb->data;
728 memset(cmd, 0, sizeof(*cmd));
729
730 memcpy(cmd->info.bssid, bssid, ETH_ALEN);
731 cmd->roam_ctrl = WMI_FORCE_ROAM;
732
733 ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid);
734 return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
735 NO_SYNC_WMIFLAG);
736}
737
738int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
739{
740 struct sk_buff *skb;
741 struct roam_ctrl_cmd *cmd;
742
743 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
744 if (!skb)
745 return -ENOMEM;
746
747 cmd = (struct roam_ctrl_cmd *) skb->data;
748 memset(cmd, 0, sizeof(*cmd));
749
750 cmd->info.roam_mode = mode;
751 cmd->roam_ctrl = WMI_SET_ROAM_MODE;
752
753 ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode);
754 return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
755 NO_SYNC_WMIFLAG);
756}
757
758static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
759 struct ath6kl_vif *vif)
682{ 760{
683 struct wmi_connect_event *ev; 761 struct wmi_connect_event *ev;
684 u8 *pie, *peie; 762 u8 *pie, *peie;
685 struct ath6kl *ar = wmi->parent_dev;
686 763
687 if (len < sizeof(struct wmi_connect_event)) 764 if (len < sizeof(struct wmi_connect_event))
688 return -EINVAL; 765 return -EINVAL;
689 766
690 ev = (struct wmi_connect_event *) datap; 767 ev = (struct wmi_connect_event *) datap;
691 768
692 if (ar->nw_type == AP_NETWORK) { 769 if (vif->nw_type == AP_NETWORK) {
693 /* AP mode start/STA connected event */ 770 /* AP mode start/STA connected event */
694 struct net_device *dev = ar->net_dev; 771 struct net_device *dev = vif->ndev;
695 if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) { 772 if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
696 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM " 773 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
697 "(AP started)\n", 774 "(AP started)\n",
698 __func__, le16_to_cpu(ev->u.ap_bss.ch), 775 __func__, le16_to_cpu(ev->u.ap_bss.ch),
699 ev->u.ap_bss.bssid); 776 ev->u.ap_bss.bssid);
700 ath6kl_connect_ap_mode_bss( 777 ath6kl_connect_ap_mode_bss(
701 ar, le16_to_cpu(ev->u.ap_bss.ch)); 778 vif, le16_to_cpu(ev->u.ap_bss.ch));
702 } else { 779 } else {
703 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM " 780 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
704 "auth=%u keymgmt=%u cipher=%u apsd_info=%u " 781 "auth=%u keymgmt=%u cipher=%u apsd_info=%u "
@@ -710,7 +787,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
710 le16_to_cpu(ev->u.ap_sta.cipher), 787 le16_to_cpu(ev->u.ap_sta.cipher),
711 ev->u.ap_sta.apsd_info); 788 ev->u.ap_sta.apsd_info);
712 ath6kl_connect_ap_mode_sta( 789 ath6kl_connect_ap_mode_sta(
713 ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr, 790 vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
714 ev->u.ap_sta.keymgmt, 791 ev->u.ap_sta.keymgmt,
715 le16_to_cpu(ev->u.ap_sta.cipher), 792 le16_to_cpu(ev->u.ap_sta.cipher),
716 ev->u.ap_sta.auth, ev->assoc_req_len, 793 ev->u.ap_sta.auth, ev->assoc_req_len,
@@ -755,7 +832,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
755 pie += pie[1] + 2; 832 pie += pie[1] + 2;
756 } 833 }
757 834
758 ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch), 835 ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch),
759 ev->u.sta.bssid, 836 ev->u.sta.bssid,
760 le16_to_cpu(ev->u.sta.listen_intvl), 837 le16_to_cpu(ev->u.sta.listen_intvl),
761 le16_to_cpu(ev->u.sta.beacon_intvl), 838 le16_to_cpu(ev->u.sta.beacon_intvl),
@@ -834,14 +911,15 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
834 alpha2[0] = country->isoName[0]; 911 alpha2[0] = country->isoName[0];
835 alpha2[1] = country->isoName[1]; 912 alpha2[1] = country->isoName[1];
836 913
837 regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2); 914 regulatory_hint(wmi->parent_dev->wiphy, alpha2);
838 915
839 ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n", 916 ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
840 alpha2[0], alpha2[1]); 917 alpha2[0], alpha2[1]);
841 } 918 }
842} 919}
843 920
844static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len) 921static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len,
922 struct ath6kl_vif *vif)
845{ 923{
846 struct wmi_disconnect_event *ev; 924 struct wmi_disconnect_event *ev;
847 wmi->traffic_class = 100; 925 wmi->traffic_class = 100;
@@ -857,10 +935,8 @@ static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
857 ev->disconn_reason, ev->assoc_resp_len); 935 ev->disconn_reason, ev->assoc_resp_len);
858 936
859 wmi->is_wmm_enabled = false; 937 wmi->is_wmm_enabled = false;
860 wmi->pair_crypto_type = NONE_CRYPT;
861 wmi->grp_crypto_type = NONE_CRYPT;
862 938
863 ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason, 939 ath6kl_disconnect_event(vif, ev->disconn_reason,
864 ev->bssid, ev->assoc_resp_len, ev->assoc_info, 940 ev->bssid, ev->assoc_resp_len, ev->assoc_info,
865 le16_to_cpu(ev->proto_reason_status)); 941 le16_to_cpu(ev->proto_reason_status));
866 942
@@ -886,7 +962,8 @@ static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
886 return 0; 962 return 0;
887} 963}
888 964
889static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len) 965static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
966 struct ath6kl_vif *vif)
890{ 967{
891 struct wmi_tkip_micerr_event *ev; 968 struct wmi_tkip_micerr_event *ev;
892 969
@@ -895,12 +972,13 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
895 972
896 ev = (struct wmi_tkip_micerr_event *) datap; 973 ev = (struct wmi_tkip_micerr_event *) datap;
897 974
898 ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast); 975 ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast);
899 976
900 return 0; 977 return 0;
901} 978}
902 979
903static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len) 980static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
981 struct ath6kl_vif *vif)
904{ 982{
905 struct wmi_bss_info_hdr2 *bih; 983 struct wmi_bss_info_hdr2 *bih;
906 u8 *buf; 984 u8 *buf;
@@ -927,26 +1005,27 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
927 return 0; /* Only update BSS table for now */ 1005 return 0; /* Only update BSS table for now */
928 1006
929 if (bih->frame_type == BEACON_FTYPE && 1007 if (bih->frame_type == BEACON_FTYPE &&
930 test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) { 1008 test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) {
931 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); 1009 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
932 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0); 1010 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
1011 NONE_BSS_FILTER, 0);
933 } 1012 }
934 1013
935 channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch)); 1014 channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch));
936 if (channel == NULL) 1015 if (channel == NULL)
937 return -EINVAL; 1016 return -EINVAL;
938 1017
939 if (len < 8 + 2 + 2) 1018 if (len < 8 + 2 + 2)
940 return -EINVAL; 1019 return -EINVAL;
941 1020
942 if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) && 1021 if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags)
943 memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) { 1022 && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
944 const u8 *tim; 1023 const u8 *tim;
945 tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2, 1024 tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
946 len - 8 - 2 - 2); 1025 len - 8 - 2 - 2);
947 if (tim && tim[1] >= 2) { 1026 if (tim && tim[1] >= 2) {
948 ar->assoc_bss_dtim_period = tim[3]; 1027 vif->assoc_bss_dtim_period = tim[3];
949 set_bit(DTIM_PERIOD_AVAIL, &ar->flag); 1028 set_bit(DTIM_PERIOD_AVAIL, &vif->flags);
950 } 1029 }
951 } 1030 }
952 1031
@@ -966,7 +1045,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
966 IEEE80211_STYPE_BEACON); 1045 IEEE80211_STYPE_BEACON);
967 memset(mgmt->da, 0xff, ETH_ALEN); 1046 memset(mgmt->da, 0xff, ETH_ALEN);
968 } else { 1047 } else {
969 struct net_device *dev = ar->net_dev; 1048 struct net_device *dev = vif->ndev;
970 1049
971 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 1050 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
972 IEEE80211_STYPE_PROBE_RESP); 1051 IEEE80211_STYPE_PROBE_RESP);
@@ -979,7 +1058,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
979 1058
980 memcpy(&mgmt->u.beacon, buf, len); 1059 memcpy(&mgmt->u.beacon, buf, len);
981 1060
982 bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt, 1061 bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt,
983 24 + len, (bih->snr - 95) * 100, 1062 24 + len, (bih->snr - 95) * 100,
984 GFP_ATOMIC); 1063 GFP_ATOMIC);
985 kfree(mgmt); 1064 kfree(mgmt);
@@ -1094,20 +1173,21 @@ static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
1094 return 0; 1173 return 0;
1095} 1174}
1096 1175
1097static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len) 1176static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len,
1177 struct ath6kl_vif *vif)
1098{ 1178{
1099 struct wmi_scan_complete_event *ev; 1179 struct wmi_scan_complete_event *ev;
1100 1180
1101 ev = (struct wmi_scan_complete_event *) datap; 1181 ev = (struct wmi_scan_complete_event *) datap;
1102 1182
1103 ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status)); 1183 ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status));
1104 wmi->is_probe_ssid = false; 1184 wmi->is_probe_ssid = false;
1105 1185
1106 return 0; 1186 return 0;
1107} 1187}
1108 1188
1109static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap, 1189static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
1110 int len) 1190 int len, struct ath6kl_vif *vif)
1111{ 1191{
1112 struct wmi_neighbor_report_event *ev; 1192 struct wmi_neighbor_report_event *ev;
1113 u8 i; 1193 u8 i;
@@ -1125,7 +1205,7 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
1125 ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n", 1205 ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
1126 i + 1, ev->num_neighbors, ev->neighbor[i].bssid, 1206 i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
1127 ev->neighbor[i].bss_flags); 1207 ev->neighbor[i].bss_flags);
1128 cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i, 1208 cfg80211_pmksa_candidate_notify(vif->ndev, i,
1129 ev->neighbor[i].bssid, 1209 ev->neighbor[i].bssid,
1130 !!(ev->neighbor[i].bss_flags & 1210 !!(ev->neighbor[i].bss_flags &
1131 WMI_PREAUTH_CAPABLE_BSS), 1211 WMI_PREAUTH_CAPABLE_BSS),
@@ -1166,9 +1246,10 @@ static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
1166 return 0; 1246 return 0;
1167} 1247}
1168 1248
1169static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len) 1249static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len,
1250 struct ath6kl_vif *vif)
1170{ 1251{
1171 ath6kl_tgt_stats_event(wmi->parent_dev, datap, len); 1252 ath6kl_tgt_stats_event(vif, datap, len);
1172 1253
1173 return 0; 1254 return 0;
1174} 1255}
@@ -1222,7 +1303,7 @@ static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
1222 cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data; 1303 cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
1223 memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd)); 1304 memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
1224 1305
1225 return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID, 1306 return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
1226 NO_SYNC_WMIFLAG); 1307 NO_SYNC_WMIFLAG);
1227} 1308}
1228 1309
@@ -1322,7 +1403,8 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
1322 return 0; 1403 return 0;
1323} 1404}
1324 1405
1325static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len) 1406static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
1407 struct ath6kl_vif *vif)
1326{ 1408{
1327 struct wmi_cac_event *reply; 1409 struct wmi_cac_event *reply;
1328 struct ieee80211_tspec_ie *ts; 1410 struct ieee80211_tspec_ie *ts;
@@ -1343,7 +1425,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
1343 tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) & 1425 tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
1344 IEEE80211_WMM_IE_TSPEC_TID_MASK; 1426 IEEE80211_WMM_IE_TSPEC_TID_MASK;
1345 1427
1346 ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid); 1428 ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
1429 reply->ac, tsid);
1347 } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) { 1430 } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
1348 /* 1431 /*
1349 * Following assumes that there is only one outstanding 1432 * Following assumes that there is only one outstanding
@@ -1358,7 +1441,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
1358 break; 1441 break;
1359 } 1442 }
1360 if (index < (sizeof(active_tsids) * 8)) 1443 if (index < (sizeof(active_tsids) * 8))
1361 ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index); 1444 ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
1445 reply->ac, index);
1362 } 1446 }
1363 1447
1364 /* 1448 /*
@@ -1403,7 +1487,7 @@ static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
1403 cmd = (struct wmi_snr_threshold_params_cmd *) skb->data; 1487 cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
1404 memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd)); 1488 memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
1405 1489
1406 return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID, 1490 return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
1407 NO_SYNC_WMIFLAG); 1491 NO_SYNC_WMIFLAG);
1408} 1492}
1409 1493
@@ -1528,14 +1612,15 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
1528 return 0; 1612 return 0;
1529} 1613}
1530 1614
1531int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, 1615int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
1532 enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag) 1616 enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
1533{ 1617{
1534 struct wmi_cmd_hdr *cmd_hdr; 1618 struct wmi_cmd_hdr *cmd_hdr;
1535 enum htc_endpoint_id ep_id = wmi->ep_id; 1619 enum htc_endpoint_id ep_id = wmi->ep_id;
1536 int ret; 1620 int ret;
1621 u16 info1;
1537 1622
1538 if (WARN_ON(skb == NULL)) 1623 if (WARN_ON(skb == NULL || (if_idx > (MAX_NUM_VIF - 1))))
1539 return -EINVAL; 1624 return -EINVAL;
1540 1625
1541 ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n", 1626 ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
@@ -1554,19 +1639,20 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
1554 * Make sure all data currently queued is transmitted before 1639 * Make sure all data currently queued is transmitted before
1555 * the cmd execution. Establish a new sync point. 1640 * the cmd execution. Establish a new sync point.
1556 */ 1641 */
1557 ath6kl_wmi_sync_point(wmi); 1642 ath6kl_wmi_sync_point(wmi, if_idx);
1558 } 1643 }
1559 1644
1560 skb_push(skb, sizeof(struct wmi_cmd_hdr)); 1645 skb_push(skb, sizeof(struct wmi_cmd_hdr));
1561 1646
1562 cmd_hdr = (struct wmi_cmd_hdr *) skb->data; 1647 cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
1563 cmd_hdr->cmd_id = cpu_to_le16(cmd_id); 1648 cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
1564 cmd_hdr->info1 = 0; /* added for virtual interface */ 1649 info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK;
1650 cmd_hdr->info1 = cpu_to_le16(info1);
1565 1651
1566 /* Only for OPT_TX_CMD, use BE endpoint. */ 1652 /* Only for OPT_TX_CMD, use BE endpoint. */
1567 if (cmd_id == WMI_OPT_TX_FRAME_CMDID) { 1653 if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
1568 ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE, 1654 ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
1569 false, false, 0, NULL); 1655 false, false, 0, NULL, if_idx);
1570 if (ret) { 1656 if (ret) {
1571 dev_kfree_skb(skb); 1657 dev_kfree_skb(skb);
1572 return ret; 1658 return ret;
@@ -1582,13 +1668,14 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
1582 * Make sure all new data queued waits for the command to 1668 * Make sure all new data queued waits for the command to
1583 * execute. Establish a new sync point. 1669 * execute. Establish a new sync point.
1584 */ 1670 */
1585 ath6kl_wmi_sync_point(wmi); 1671 ath6kl_wmi_sync_point(wmi, if_idx);
1586 } 1672 }
1587 1673
1588 return 0; 1674 return 0;
1589} 1675}
1590 1676
1591int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type, 1677int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
1678 enum network_type nw_type,
1592 enum dot11_auth_mode dot11_auth_mode, 1679 enum dot11_auth_mode dot11_auth_mode,
1593 enum auth_mode auth_mode, 1680 enum auth_mode auth_mode,
1594 enum crypto_type pairwise_crypto, 1681 enum crypto_type pairwise_crypto,
@@ -1639,15 +1726,14 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
1639 if (bssid != NULL) 1726 if (bssid != NULL)
1640 memcpy(cc->bssid, bssid, ETH_ALEN); 1727 memcpy(cc->bssid, bssid, ETH_ALEN);
1641 1728
1642 wmi->pair_crypto_type = pairwise_crypto; 1729 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
1643 wmi->grp_crypto_type = group_crypto; 1730 NO_SYNC_WMIFLAG);
1644
1645 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
1646 1731
1647 return ret; 1732 return ret;
1648} 1733}
1649 1734
1650int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel) 1735int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
1736 u16 channel)
1651{ 1737{
1652 struct sk_buff *skb; 1738 struct sk_buff *skb;
1653 struct wmi_reconnect_cmd *cc; 1739 struct wmi_reconnect_cmd *cc;
@@ -1668,13 +1754,13 @@ int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
1668 if (bssid != NULL) 1754 if (bssid != NULL)
1669 memcpy(cc->bssid, bssid, ETH_ALEN); 1755 memcpy(cc->bssid, bssid, ETH_ALEN);
1670 1756
1671 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID, 1757 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID,
1672 NO_SYNC_WMIFLAG); 1758 NO_SYNC_WMIFLAG);
1673 1759
1674 return ret; 1760 return ret;
1675} 1761}
1676 1762
1677int ath6kl_wmi_disconnect_cmd(struct wmi *wmi) 1763int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
1678{ 1764{
1679 int ret; 1765 int ret;
1680 1766
@@ -1683,12 +1769,13 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
1683 wmi->traffic_class = 100; 1769 wmi->traffic_class = 100;
1684 1770
1685 /* Disconnect command does not need to do a SYNC before. */ 1771 /* Disconnect command does not need to do a SYNC before. */
1686 ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID); 1772 ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID);
1687 1773
1688 return ret; 1774 return ret;
1689} 1775}
1690 1776
1691int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type, 1777int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
1778 enum wmi_scan_type scan_type,
1692 u32 force_fgscan, u32 is_legacy, 1779 u32 force_fgscan, u32 is_legacy,
1693 u32 home_dwell_time, u32 force_scan_interval, 1780 u32 home_dwell_time, u32 force_scan_interval,
1694 s8 num_chan, u16 *ch_list) 1781 s8 num_chan, u16 *ch_list)
@@ -1724,13 +1811,14 @@ int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
1724 for (i = 0; i < num_chan; i++) 1811 for (i = 0; i < num_chan; i++)
1725 sc->ch_list[i] = cpu_to_le16(ch_list[i]); 1812 sc->ch_list[i] = cpu_to_le16(ch_list[i]);
1726 1813
1727 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID, 1814 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
1728 NO_SYNC_WMIFLAG); 1815 NO_SYNC_WMIFLAG);
1729 1816
1730 return ret; 1817 return ret;
1731} 1818}
1732 1819
1733int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec, 1820int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx,
1821 u16 fg_start_sec,
1734 u16 fg_end_sec, u16 bg_sec, 1822 u16 fg_end_sec, u16 bg_sec,
1735 u16 minact_chdw_msec, u16 maxact_chdw_msec, 1823 u16 minact_chdw_msec, u16 maxact_chdw_msec,
1736 u16 pas_chdw_msec, u8 short_scan_ratio, 1824 u16 pas_chdw_msec, u8 short_scan_ratio,
@@ -1757,12 +1845,12 @@ int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
1757 sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time); 1845 sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
1758 sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid); 1846 sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
1759 1847
1760 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID, 1848 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID,
1761 NO_SYNC_WMIFLAG); 1849 NO_SYNC_WMIFLAG);
1762 return ret; 1850 return ret;
1763} 1851}
1764 1852
1765int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask) 1853int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask)
1766{ 1854{
1767 struct sk_buff *skb; 1855 struct sk_buff *skb;
1768 struct wmi_bss_filter_cmd *cmd; 1856 struct wmi_bss_filter_cmd *cmd;
@@ -1779,12 +1867,12 @@ int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
1779 cmd->bss_filter = filter; 1867 cmd->bss_filter = filter;
1780 cmd->ie_mask = cpu_to_le32(ie_mask); 1868 cmd->ie_mask = cpu_to_le32(ie_mask);
1781 1869
1782 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID, 1870 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID,
1783 NO_SYNC_WMIFLAG); 1871 NO_SYNC_WMIFLAG);
1784 return ret; 1872 return ret;
1785} 1873}
1786 1874
1787int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag, 1875int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
1788 u8 ssid_len, u8 *ssid) 1876 u8 ssid_len, u8 *ssid)
1789{ 1877{
1790 struct sk_buff *skb; 1878 struct sk_buff *skb;
@@ -1816,12 +1904,13 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
1816 cmd->ssid_len = ssid_len; 1904 cmd->ssid_len = ssid_len;
1817 memcpy(cmd->ssid, ssid, ssid_len); 1905 memcpy(cmd->ssid, ssid, ssid_len);
1818 1906
1819 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID, 1907 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID,
1820 NO_SYNC_WMIFLAG); 1908 NO_SYNC_WMIFLAG);
1821 return ret; 1909 return ret;
1822} 1910}
1823 1911
1824int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval, 1912int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
1913 u16 listen_interval,
1825 u16 listen_beacons) 1914 u16 listen_beacons)
1826{ 1915{
1827 struct sk_buff *skb; 1916 struct sk_buff *skb;
@@ -1836,12 +1925,12 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
1836 cmd->listen_intvl = cpu_to_le16(listen_interval); 1925 cmd->listen_intvl = cpu_to_le16(listen_interval);
1837 cmd->num_beacons = cpu_to_le16(listen_beacons); 1926 cmd->num_beacons = cpu_to_le16(listen_beacons);
1838 1927
1839 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID, 1928 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
1840 NO_SYNC_WMIFLAG); 1929 NO_SYNC_WMIFLAG);
1841 return ret; 1930 return ret;
1842} 1931}
1843 1932
1844int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode) 1933int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
1845{ 1934{
1846 struct sk_buff *skb; 1935 struct sk_buff *skb;
1847 struct wmi_power_mode_cmd *cmd; 1936 struct wmi_power_mode_cmd *cmd;
@@ -1855,12 +1944,12 @@ int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
1855 cmd->pwr_mode = pwr_mode; 1944 cmd->pwr_mode = pwr_mode;
1856 wmi->pwr_mode = pwr_mode; 1945 wmi->pwr_mode = pwr_mode;
1857 1946
1858 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID, 1947 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID,
1859 NO_SYNC_WMIFLAG); 1948 NO_SYNC_WMIFLAG);
1860 return ret; 1949 return ret;
1861} 1950}
1862 1951
1863int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period, 1952int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
1864 u16 ps_poll_num, u16 dtim_policy, 1953 u16 ps_poll_num, u16 dtim_policy,
1865 u16 tx_wakeup_policy, u16 num_tx_to_wakeup, 1954 u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
1866 u16 ps_fail_event_policy) 1955 u16 ps_fail_event_policy)
@@ -1881,12 +1970,12 @@ int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
1881 pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup); 1970 pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
1882 pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy); 1971 pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
1883 1972
1884 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID, 1973 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID,
1885 NO_SYNC_WMIFLAG); 1974 NO_SYNC_WMIFLAG);
1886 return ret; 1975 return ret;
1887} 1976}
1888 1977
1889int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout) 1978int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
1890{ 1979{
1891 struct sk_buff *skb; 1980 struct sk_buff *skb;
1892 struct wmi_disc_timeout_cmd *cmd; 1981 struct wmi_disc_timeout_cmd *cmd;
@@ -1899,15 +1988,20 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
1899 cmd = (struct wmi_disc_timeout_cmd *) skb->data; 1988 cmd = (struct wmi_disc_timeout_cmd *) skb->data;
1900 cmd->discon_timeout = timeout; 1989 cmd->discon_timeout = timeout;
1901 1990
1902 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID, 1991 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID,
1903 NO_SYNC_WMIFLAG); 1992 NO_SYNC_WMIFLAG);
1993
1994 if (ret == 0)
1995 ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout);
1996
1904 return ret; 1997 return ret;
1905} 1998}
1906 1999
1907int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, 2000int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
1908 enum crypto_type key_type, 2001 enum crypto_type key_type,
1909 u8 key_usage, u8 key_len, 2002 u8 key_usage, u8 key_len,
1910 u8 *key_rsc, u8 *key_material, 2003 u8 *key_rsc, unsigned int key_rsc_len,
2004 u8 *key_material,
1911 u8 key_op_ctrl, u8 *mac_addr, 2005 u8 key_op_ctrl, u8 *mac_addr,
1912 enum wmi_sync_flag sync_flag) 2006 enum wmi_sync_flag sync_flag)
1913{ 2007{
@@ -1920,7 +2014,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
1920 key_index, key_type, key_usage, key_len, key_op_ctrl); 2014 key_index, key_type, key_usage, key_len, key_op_ctrl);
1921 2015
1922 if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) || 2016 if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
1923 (key_material == NULL)) 2017 (key_material == NULL) || key_rsc_len > 8)
1924 return -EINVAL; 2018 return -EINVAL;
1925 2019
1926 if ((WEP_CRYPT != key_type) && (NULL == key_rsc)) 2020 if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
@@ -1938,20 +2032,20 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
1938 memcpy(cmd->key, key_material, key_len); 2032 memcpy(cmd->key, key_material, key_len);
1939 2033
1940 if (key_rsc != NULL) 2034 if (key_rsc != NULL)
1941 memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc)); 2035 memcpy(cmd->key_rsc, key_rsc, key_rsc_len);
1942 2036
1943 cmd->key_op_ctrl = key_op_ctrl; 2037 cmd->key_op_ctrl = key_op_ctrl;
1944 2038
1945 if (mac_addr) 2039 if (mac_addr)
1946 memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN); 2040 memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
1947 2041
1948 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID, 2042 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID,
1949 sync_flag); 2043 sync_flag);
1950 2044
1951 return ret; 2045 return ret;
1952} 2046}
1953 2047
1954int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk) 2048int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
1955{ 2049{
1956 struct sk_buff *skb; 2050 struct sk_buff *skb;
1957 struct wmi_add_krk_cmd *cmd; 2051 struct wmi_add_krk_cmd *cmd;
@@ -1964,12 +2058,13 @@ int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
1964 cmd = (struct wmi_add_krk_cmd *) skb->data; 2058 cmd = (struct wmi_add_krk_cmd *) skb->data;
1965 memcpy(cmd->krk, krk, WMI_KRK_LEN); 2059 memcpy(cmd->krk, krk, WMI_KRK_LEN);
1966 2060
1967 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG); 2061 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID,
2062 NO_SYNC_WMIFLAG);
1968 2063
1969 return ret; 2064 return ret;
1970} 2065}
1971 2066
1972int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index) 2067int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index)
1973{ 2068{
1974 struct sk_buff *skb; 2069 struct sk_buff *skb;
1975 struct wmi_delete_cipher_key_cmd *cmd; 2070 struct wmi_delete_cipher_key_cmd *cmd;
@@ -1985,13 +2080,13 @@ int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
1985 cmd = (struct wmi_delete_cipher_key_cmd *) skb->data; 2080 cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
1986 cmd->key_index = key_index; 2081 cmd->key_index = key_index;
1987 2082
1988 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID, 2083 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID,
1989 NO_SYNC_WMIFLAG); 2084 NO_SYNC_WMIFLAG);
1990 2085
1991 return ret; 2086 return ret;
1992} 2087}
1993 2088
1994int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid, 2089int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
1995 const u8 *pmkid, bool set) 2090 const u8 *pmkid, bool set)
1996{ 2091{
1997 struct sk_buff *skb; 2092 struct sk_buff *skb;
@@ -2018,14 +2113,14 @@ int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
2018 cmd->enable = PMKID_DISABLE; 2113 cmd->enable = PMKID_DISABLE;
2019 } 2114 }
2020 2115
2021 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID, 2116 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID,
2022 NO_SYNC_WMIFLAG); 2117 NO_SYNC_WMIFLAG);
2023 2118
2024 return ret; 2119 return ret;
2025} 2120}
2026 2121
2027static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb, 2122static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
2028 enum htc_endpoint_id ep_id) 2123 enum htc_endpoint_id ep_id, u8 if_idx)
2029{ 2124{
2030 struct wmi_data_hdr *data_hdr; 2125 struct wmi_data_hdr *data_hdr;
2031 int ret; 2126 int ret;
@@ -2037,14 +2132,14 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
2037 2132
2038 data_hdr = (struct wmi_data_hdr *) skb->data; 2133 data_hdr = (struct wmi_data_hdr *) skb->data;
2039 data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT; 2134 data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
2040 data_hdr->info3 = 0; 2135 data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
2041 2136
2042 ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id); 2137 ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
2043 2138
2044 return ret; 2139 return ret;
2045} 2140}
2046 2141
2047static int ath6kl_wmi_sync_point(struct wmi *wmi) 2142static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
2048{ 2143{
2049 struct sk_buff *skb; 2144 struct sk_buff *skb;
2050 struct wmi_sync_cmd *cmd; 2145 struct wmi_sync_cmd *cmd;
@@ -2100,7 +2195,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
2100 * Send sync cmd followed by sync data messages on all 2195 * Send sync cmd followed by sync data messages on all
2101 * endpoints being used 2196 * endpoints being used
2102 */ 2197 */
2103 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID, 2198 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID,
2104 NO_SYNC_WMIFLAG); 2199 NO_SYNC_WMIFLAG);
2105 2200
2106 if (ret) 2201 if (ret)
@@ -2119,7 +2214,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
2119 traffic_class); 2214 traffic_class);
2120 ret = 2215 ret =
2121 ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb, 2216 ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
2122 ep_id); 2217 ep_id, if_idx);
2123 2218
2124 if (ret) 2219 if (ret)
2125 break; 2220 break;
@@ -2142,7 +2237,7 @@ free_skb:
2142 return ret; 2237 return ret;
2143} 2238}
2144 2239
2145int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, 2240int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
2146 struct wmi_create_pstream_cmd *params) 2241 struct wmi_create_pstream_cmd *params)
2147{ 2242{
2148 struct sk_buff *skb; 2243 struct sk_buff *skb;
@@ -2231,12 +2326,13 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
2231 ath6kl_indicate_tx_activity(wmi->parent_dev, 2326 ath6kl_indicate_tx_activity(wmi->parent_dev,
2232 params->traffic_class, true); 2327 params->traffic_class, true);
2233 2328
2234 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID, 2329 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID,
2235 NO_SYNC_WMIFLAG); 2330 NO_SYNC_WMIFLAG);
2236 return ret; 2331 return ret;
2237} 2332}
2238 2333
2239int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid) 2334int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
2335 u8 tsid)
2240{ 2336{
2241 struct sk_buff *skb; 2337 struct sk_buff *skb;
2242 struct wmi_delete_pstream_cmd *cmd; 2338 struct wmi_delete_pstream_cmd *cmd;
@@ -2272,7 +2368,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
2272 "sending delete_pstream_cmd: traffic class: %d tsid=%d\n", 2368 "sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
2273 traffic_class, tsid); 2369 traffic_class, tsid);
2274 2370
2275 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID, 2371 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID,
2276 SYNC_BEFORE_WMIFLAG); 2372 SYNC_BEFORE_WMIFLAG);
2277 2373
2278 spin_lock_bh(&wmi->lock); 2374 spin_lock_bh(&wmi->lock);
@@ -2311,17 +2407,173 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
2311 cmd = (struct wmi_set_ip_cmd *) skb->data; 2407 cmd = (struct wmi_set_ip_cmd *) skb->data;
2312 memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd)); 2408 memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
2313 2409
2314 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG); 2410 ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID,
2411 NO_SYNC_WMIFLAG);
2412 return ret;
2413}
2414
2415static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
2416{
2417 u16 active_tsids;
2418 u8 stream_exist;
2419 int i;
2420
2421 /*
2422 * Relinquish credits from all implicitly created pstreams
2423 * since when we go to sleep. If user created explicit
2424 * thinstreams exists with in a fatpipe leave them intact
2425 * for the user to delete.
2426 */
2427 spin_lock_bh(&wmi->lock);
2428 stream_exist = wmi->fat_pipe_exist;
2429 spin_unlock_bh(&wmi->lock);
2430
2431 for (i = 0; i < WMM_NUM_AC; i++) {
2432 if (stream_exist & (1 << i)) {
2433
2434 /*
2435 * FIXME: Is this lock & unlock inside
2436 * for loop correct? may need rework.
2437 */
2438 spin_lock_bh(&wmi->lock);
2439 active_tsids = wmi->stream_exist_for_ac[i];
2440 spin_unlock_bh(&wmi->lock);
2441
2442 /*
2443 * If there are no user created thin streams
2444 * delete the fatpipe
2445 */
2446 if (!active_tsids) {
2447 stream_exist &= ~(1 << i);
2448 /*
2449 * Indicate inactivity to driver layer for
2450 * this fatpipe (pstream)
2451 */
2452 ath6kl_indicate_tx_activity(wmi->parent_dev,
2453 i, false);
2454 }
2455 }
2456 }
2457
2458 /* FIXME: Can we do this assignment without locking ? */
2459 spin_lock_bh(&wmi->lock);
2460 wmi->fat_pipe_exist = stream_exist;
2461 spin_unlock_bh(&wmi->lock);
2462}
2463
2464int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
2465 enum ath6kl_host_mode host_mode)
2466{
2467 struct sk_buff *skb;
2468 struct wmi_set_host_sleep_mode_cmd *cmd;
2469 int ret;
2470
2471 if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) &&
2472 (host_mode != ATH6KL_HOST_MODE_AWAKE)) {
2473 ath6kl_err("invalid host sleep mode: %d\n", host_mode);
2474 return -EINVAL;
2475 }
2476
2477 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2478 if (!skb)
2479 return -ENOMEM;
2480
2481 cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data;
2482
2483 if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
2484 ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
2485 cmd->asleep = cpu_to_le32(1);
2486 } else
2487 cmd->awake = cpu_to_le32(1);
2488
2489 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
2490 WMI_SET_HOST_SLEEP_MODE_CMDID,
2491 NO_SYNC_WMIFLAG);
2315 return ret; 2492 return ret;
2316} 2493}
2317 2494
2318static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap, 2495int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
2319 int len) 2496 enum ath6kl_wow_mode wow_mode,
2497 u32 filter, u16 host_req_delay)
2320{ 2498{
2321 if (len < sizeof(struct wmi_get_wow_list_reply)) 2499 struct sk_buff *skb;
2500 struct wmi_set_wow_mode_cmd *cmd;
2501 int ret;
2502
2503 if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
2504 wow_mode != ATH6KL_WOW_MODE_DISABLE) {
2505 ath6kl_err("invalid wow mode: %d\n", wow_mode);
2322 return -EINVAL; 2506 return -EINVAL;
2507 }
2323 2508
2324 return 0; 2509 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2510 if (!skb)
2511 return -ENOMEM;
2512
2513 cmd = (struct wmi_set_wow_mode_cmd *) skb->data;
2514 cmd->enable_wow = cpu_to_le32(wow_mode);
2515 cmd->filter = cpu_to_le32(filter);
2516 cmd->host_req_delay = cpu_to_le16(host_req_delay);
2517
2518 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID,
2519 NO_SYNC_WMIFLAG);
2520 return ret;
2521}
2522
2523int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2524 u8 list_id, u8 filter_size,
2525 u8 filter_offset, u8 *filter, u8 *mask)
2526{
2527 struct sk_buff *skb;
2528 struct wmi_add_wow_pattern_cmd *cmd;
2529 u16 size;
2530 u8 *filter_mask;
2531 int ret;
2532
2533 /*
2534 * Allocate additional memory in the buffer to hold
2535 * filter and mask value, which is twice of filter_size.
2536 */
2537 size = sizeof(*cmd) + (2 * filter_size);
2538
2539 skb = ath6kl_wmi_get_new_buf(size);
2540 if (!skb)
2541 return -ENOMEM;
2542
2543 cmd = (struct wmi_add_wow_pattern_cmd *) skb->data;
2544 cmd->filter_list_id = list_id;
2545 cmd->filter_size = filter_size;
2546 cmd->filter_offset = filter_offset;
2547
2548 memcpy(cmd->filter, filter, filter_size);
2549
2550 filter_mask = (u8 *) (cmd->filter + filter_size);
2551 memcpy(filter_mask, mask, filter_size);
2552
2553 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID,
2554 NO_SYNC_WMIFLAG);
2555
2556 return ret;
2557}
2558
2559int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2560 u16 list_id, u16 filter_id)
2561{
2562 struct sk_buff *skb;
2563 struct wmi_del_wow_pattern_cmd *cmd;
2564 int ret;
2565
2566 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2567 if (!skb)
2568 return -ENOMEM;
2569
2570 cmd = (struct wmi_del_wow_pattern_cmd *) skb->data;
2571 cmd->filter_list_id = cpu_to_le16(list_id);
2572 cmd->filter_id = cpu_to_le16(filter_id);
2573
2574 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID,
2575 NO_SYNC_WMIFLAG);
2576 return ret;
2325} 2577}
2326 2578
2327static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb, 2579static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
@@ -2336,7 +2588,7 @@ static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
2336 cmd_hdr = (struct wmix_cmd_hdr *) skb->data; 2588 cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
2337 cmd_hdr->cmd_id = cpu_to_le32(cmd_id); 2589 cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
2338 2590
2339 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag); 2591 ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag);
2340 2592
2341 return ret; 2593 return ret;
2342} 2594}
@@ -2379,12 +2631,12 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config)
2379 return ret; 2631 return ret;
2380} 2632}
2381 2633
2382int ath6kl_wmi_get_stats_cmd(struct wmi *wmi) 2634int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx)
2383{ 2635{
2384 return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID); 2636 return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID);
2385} 2637}
2386 2638
2387int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM) 2639int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM)
2388{ 2640{
2389 struct sk_buff *skb; 2641 struct sk_buff *skb;
2390 struct wmi_set_tx_pwr_cmd *cmd; 2642 struct wmi_set_tx_pwr_cmd *cmd;
@@ -2397,18 +2649,24 @@ int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
2397 cmd = (struct wmi_set_tx_pwr_cmd *) skb->data; 2649 cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
2398 cmd->dbM = dbM; 2650 cmd->dbM = dbM;
2399 2651
2400 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID, 2652 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID,
2401 NO_SYNC_WMIFLAG); 2653 NO_SYNC_WMIFLAG);
2402 2654
2403 return ret; 2655 return ret;
2404} 2656}
2405 2657
2406int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi) 2658int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx)
2407{ 2659{
2408 return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID); 2660 return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID);
2409} 2661}
2410 2662
2411int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy) 2663int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi)
2664{
2665 return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID);
2666}
2667
2668int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
2669 u8 preamble_policy)
2412{ 2670{
2413 struct sk_buff *skb; 2671 struct sk_buff *skb;
2414 struct wmi_set_lpreamble_cmd *cmd; 2672 struct wmi_set_lpreamble_cmd *cmd;
@@ -2422,7 +2680,7 @@ int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
2422 cmd->status = status; 2680 cmd->status = status;
2423 cmd->preamble_policy = preamble_policy; 2681 cmd->preamble_policy = preamble_policy;
2424 2682
2425 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID, 2683 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID,
2426 NO_SYNC_WMIFLAG); 2684 NO_SYNC_WMIFLAG);
2427 return ret; 2685 return ret;
2428} 2686}
@@ -2440,11 +2698,12 @@ int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
2440 cmd = (struct wmi_set_rts_cmd *) skb->data; 2698 cmd = (struct wmi_set_rts_cmd *) skb->data;
2441 cmd->threshold = cpu_to_le16(threshold); 2699 cmd->threshold = cpu_to_le16(threshold);
2442 2700
2443 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG); 2701 ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID,
2702 NO_SYNC_WMIFLAG);
2444 return ret; 2703 return ret;
2445} 2704}
2446 2705
2447int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg) 2706int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg)
2448{ 2707{
2449 struct sk_buff *skb; 2708 struct sk_buff *skb;
2450 struct wmi_set_wmm_txop_cmd *cmd; 2709 struct wmi_set_wmm_txop_cmd *cmd;
@@ -2460,12 +2719,13 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
2460 cmd = (struct wmi_set_wmm_txop_cmd *) skb->data; 2719 cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
2461 cmd->txop_enable = cfg; 2720 cmd->txop_enable = cfg;
2462 2721
2463 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID, 2722 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID,
2464 NO_SYNC_WMIFLAG); 2723 NO_SYNC_WMIFLAG);
2465 return ret; 2724 return ret;
2466} 2725}
2467 2726
2468int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl) 2727int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
2728 u8 keep_alive_intvl)
2469{ 2729{
2470 struct sk_buff *skb; 2730 struct sk_buff *skb;
2471 struct wmi_set_keepalive_cmd *cmd; 2731 struct wmi_set_keepalive_cmd *cmd;
@@ -2477,10 +2737,13 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
2477 2737
2478 cmd = (struct wmi_set_keepalive_cmd *) skb->data; 2738 cmd = (struct wmi_set_keepalive_cmd *) skb->data;
2479 cmd->keep_alive_intvl = keep_alive_intvl; 2739 cmd->keep_alive_intvl = keep_alive_intvl;
2480 wmi->keep_alive_intvl = keep_alive_intvl;
2481 2740
2482 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID, 2741 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID,
2483 NO_SYNC_WMIFLAG); 2742 NO_SYNC_WMIFLAG);
2743
2744 if (ret == 0)
2745 ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl);
2746
2484 return ret; 2747 return ret;
2485} 2748}
2486 2749
@@ -2495,7 +2758,7 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
2495 2758
2496 memcpy(skb->data, buf, len); 2759 memcpy(skb->data, buf, len);
2497 2760
2498 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG); 2761 ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
2499 2762
2500 return ret; 2763 return ret;
2501} 2764}
@@ -2528,28 +2791,31 @@ static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
2528 return 0; 2791 return 0;
2529} 2792}
2530 2793
2531static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len) 2794static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
2795 struct ath6kl_vif *vif)
2532{ 2796{
2533 struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap; 2797 struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
2534 2798
2535 aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid, 2799 aggr_recv_addba_req_evt(vif, cmd->tid,
2536 le16_to_cpu(cmd->st_seq_no), cmd->win_sz); 2800 le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
2537 2801
2538 return 0; 2802 return 0;
2539} 2803}
2540 2804
2541static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len) 2805static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
2806 struct ath6kl_vif *vif)
2542{ 2807{
2543 struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap; 2808 struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
2544 2809
2545 aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid); 2810 aggr_recv_delba_req_evt(vif, cmd->tid);
2546 2811
2547 return 0; 2812 return 0;
2548} 2813}
2549 2814
2550/* AP mode functions */ 2815/* AP mode functions */
2551 2816
2552int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p) 2817int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
2818 struct wmi_connect_cmd *p)
2553{ 2819{
2554 struct sk_buff *skb; 2820 struct sk_buff *skb;
2555 struct wmi_connect_cmd *cm; 2821 struct wmi_connect_cmd *cm;
@@ -2562,7 +2828,7 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
2562 cm = (struct wmi_connect_cmd *) skb->data; 2828 cm = (struct wmi_connect_cmd *) skb->data;
2563 memcpy(cm, p, sizeof(*cm)); 2829 memcpy(cm, p, sizeof(*cm));
2564 2830
2565 res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID, 2831 res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
2566 NO_SYNC_WMIFLAG); 2832 NO_SYNC_WMIFLAG);
2567 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u " 2833 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
2568 "ctrl_flags=0x%x-> res=%d\n", 2834 "ctrl_flags=0x%x-> res=%d\n",
@@ -2571,7 +2837,8 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
2571 return res; 2837 return res;
2572} 2838}
2573 2839
2574int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason) 2840int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
2841 u16 reason)
2575{ 2842{
2576 struct sk_buff *skb; 2843 struct sk_buff *skb;
2577 struct wmi_ap_set_mlme_cmd *cm; 2844 struct wmi_ap_set_mlme_cmd *cm;
@@ -2585,11 +2852,12 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
2585 cm->reason = cpu_to_le16(reason); 2852 cm->reason = cpu_to_le16(reason);
2586 cm->cmd = cmd; 2853 cm->cmd = cmd;
2587 2854
2588 return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID, 2855 return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
2589 NO_SYNC_WMIFLAG); 2856 NO_SYNC_WMIFLAG);
2590} 2857}
2591 2858
2592static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len) 2859static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
2860 struct ath6kl_vif *vif)
2593{ 2861{
2594 struct wmi_pspoll_event *ev; 2862 struct wmi_pspoll_event *ev;
2595 2863
@@ -2598,19 +2866,21 @@ static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
2598 2866
2599 ev = (struct wmi_pspoll_event *) datap; 2867 ev = (struct wmi_pspoll_event *) datap;
2600 2868
2601 ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid)); 2869 ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid));
2602 2870
2603 return 0; 2871 return 0;
2604} 2872}
2605 2873
2606static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len) 2874static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len,
2875 struct ath6kl_vif *vif)
2607{ 2876{
2608 ath6kl_dtimexpiry_event(wmi->parent_dev); 2877 ath6kl_dtimexpiry_event(vif);
2609 2878
2610 return 0; 2879 return 0;
2611} 2880}
2612 2881
2613int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag) 2882int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
2883 bool flag)
2614{ 2884{
2615 struct sk_buff *skb; 2885 struct sk_buff *skb;
2616 struct wmi_ap_set_pvb_cmd *cmd; 2886 struct wmi_ap_set_pvb_cmd *cmd;
@@ -2625,13 +2895,14 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
2625 cmd->rsvd = cpu_to_le16(0); 2895 cmd->rsvd = cpu_to_le16(0);
2626 cmd->flag = cpu_to_le32(flag); 2896 cmd->flag = cpu_to_le32(flag);
2627 2897
2628 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID, 2898 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
2629 NO_SYNC_WMIFLAG); 2899 NO_SYNC_WMIFLAG);
2630 2900
2631 return 0; 2901 return 0;
2632} 2902}
2633 2903
2634int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver, 2904int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
2905 u8 rx_meta_ver,
2635 bool rx_dot11_hdr, bool defrag_on_host) 2906 bool rx_dot11_hdr, bool defrag_on_host)
2636{ 2907{
2637 struct sk_buff *skb; 2908 struct sk_buff *skb;
@@ -2648,14 +2919,14 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
2648 cmd->meta_ver = rx_meta_ver; 2919 cmd->meta_ver = rx_meta_ver;
2649 2920
2650 /* Delete the local aggr state, on host */ 2921 /* Delete the local aggr state, on host */
2651 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID, 2922 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID,
2652 NO_SYNC_WMIFLAG); 2923 NO_SYNC_WMIFLAG);
2653 2924
2654 return ret; 2925 return ret;
2655} 2926}
2656 2927
2657int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, 2928int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
2658 u8 ie_len) 2929 const u8 *ie, u8 ie_len)
2659{ 2930{
2660 struct sk_buff *skb; 2931 struct sk_buff *skb;
2661 struct wmi_set_appie_cmd *p; 2932 struct wmi_set_appie_cmd *p;
@@ -2670,7 +2941,7 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
2670 p->mgmt_frm_type = mgmt_frm_type; 2941 p->mgmt_frm_type = mgmt_frm_type;
2671 p->ie_len = ie_len; 2942 p->ie_len = ie_len;
2672 memcpy(p->ie_info, ie, ie_len); 2943 memcpy(p->ie_info, ie, ie_len);
2673 return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID, 2944 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID,
2674 NO_SYNC_WMIFLAG); 2945 NO_SYNC_WMIFLAG);
2675} 2946}
2676 2947
@@ -2688,11 +2959,11 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
2688 cmd = (struct wmi_disable_11b_rates_cmd *) skb->data; 2959 cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
2689 cmd->disable = disable ? 1 : 0; 2960 cmd->disable = disable ? 1 : 0;
2690 2961
2691 return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID, 2962 return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID,
2692 NO_SYNC_WMIFLAG); 2963 NO_SYNC_WMIFLAG);
2693} 2964}
2694 2965
2695int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur) 2966int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
2696{ 2967{
2697 struct sk_buff *skb; 2968 struct sk_buff *skb;
2698 struct wmi_remain_on_chnl_cmd *p; 2969 struct wmi_remain_on_chnl_cmd *p;
@@ -2706,12 +2977,12 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
2706 p = (struct wmi_remain_on_chnl_cmd *) skb->data; 2977 p = (struct wmi_remain_on_chnl_cmd *) skb->data;
2707 p->freq = cpu_to_le32(freq); 2978 p->freq = cpu_to_le32(freq);
2708 p->duration = cpu_to_le32(dur); 2979 p->duration = cpu_to_le32(dur);
2709 return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID, 2980 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID,
2710 NO_SYNC_WMIFLAG); 2981 NO_SYNC_WMIFLAG);
2711} 2982}
2712 2983
2713int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, 2984int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
2714 const u8 *data, u16 data_len) 2985 u32 wait, const u8 *data, u16 data_len)
2715{ 2986{
2716 struct sk_buff *skb; 2987 struct sk_buff *skb;
2717 struct wmi_send_action_cmd *p; 2988 struct wmi_send_action_cmd *p;
@@ -2731,6 +3002,7 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
2731 } 3002 }
2732 3003
2733 kfree(wmi->last_mgmt_tx_frame); 3004 kfree(wmi->last_mgmt_tx_frame);
3005 memcpy(buf, data, data_len);
2734 wmi->last_mgmt_tx_frame = buf; 3006 wmi->last_mgmt_tx_frame = buf;
2735 wmi->last_mgmt_tx_frame_len = data_len; 3007 wmi->last_mgmt_tx_frame_len = data_len;
2736 3008
@@ -2742,13 +3014,13 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
2742 p->wait = cpu_to_le32(wait); 3014 p->wait = cpu_to_le32(wait);
2743 p->len = cpu_to_le16(data_len); 3015 p->len = cpu_to_le16(data_len);
2744 memcpy(p->data, data, data_len); 3016 memcpy(p->data, data, data_len);
2745 return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID, 3017 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID,
2746 NO_SYNC_WMIFLAG); 3018 NO_SYNC_WMIFLAG);
2747} 3019}
2748 3020
2749int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq, 3021int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
2750 const u8 *dst, 3022 const u8 *dst, const u8 *data,
2751 const u8 *data, u16 data_len) 3023 u16 data_len)
2752{ 3024{
2753 struct sk_buff *skb; 3025 struct sk_buff *skb;
2754 struct wmi_p2p_probe_response_cmd *p; 3026 struct wmi_p2p_probe_response_cmd *p;
@@ -2764,11 +3036,12 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
2764 memcpy(p->destination_addr, dst, ETH_ALEN); 3036 memcpy(p->destination_addr, dst, ETH_ALEN);
2765 p->len = cpu_to_le16(data_len); 3037 p->len = cpu_to_le16(data_len);
2766 memcpy(p->data, data, data_len); 3038 memcpy(p->data, data, data_len);
2767 return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID, 3039 return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
3040 WMI_SEND_PROBE_RESPONSE_CMDID,
2768 NO_SYNC_WMIFLAG); 3041 NO_SYNC_WMIFLAG);
2769} 3042}
2770 3043
2771int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable) 3044int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable)
2772{ 3045{
2773 struct sk_buff *skb; 3046 struct sk_buff *skb;
2774 struct wmi_probe_req_report_cmd *p; 3047 struct wmi_probe_req_report_cmd *p;
@@ -2781,11 +3054,11 @@ int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
2781 enable); 3054 enable);
2782 p = (struct wmi_probe_req_report_cmd *) skb->data; 3055 p = (struct wmi_probe_req_report_cmd *) skb->data;
2783 p->enable = enable ? 1 : 0; 3056 p->enable = enable ? 1 : 0;
2784 return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID, 3057 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID,
2785 NO_SYNC_WMIFLAG); 3058 NO_SYNC_WMIFLAG);
2786} 3059}
2787 3060
2788int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags) 3061int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags)
2789{ 3062{
2790 struct sk_buff *skb; 3063 struct sk_buff *skb;
2791 struct wmi_get_p2p_info *p; 3064 struct wmi_get_p2p_info *p;
@@ -2798,14 +3071,15 @@ int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
2798 info_req_flags); 3071 info_req_flags);
2799 p = (struct wmi_get_p2p_info *) skb->data; 3072 p = (struct wmi_get_p2p_info *) skb->data;
2800 p->info_req_flags = cpu_to_le32(info_req_flags); 3073 p->info_req_flags = cpu_to_le32(info_req_flags);
2801 return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID, 3074 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID,
2802 NO_SYNC_WMIFLAG); 3075 NO_SYNC_WMIFLAG);
2803} 3076}
2804 3077
2805int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi) 3078int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
2806{ 3079{
2807 ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n"); 3080 ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n");
2808 return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID); 3081 return ath6kl_wmi_simple_cmd(wmi, if_idx,
3082 WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
2809} 3083}
2810 3084
2811static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) 3085static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
@@ -2818,7 +3092,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
2818 3092
2819 if (skb->len < sizeof(struct wmix_cmd_hdr)) { 3093 if (skb->len < sizeof(struct wmix_cmd_hdr)) {
2820 ath6kl_err("bad packet 1\n"); 3094 ath6kl_err("bad packet 1\n");
2821 wmi->stat.cmd_len_err++;
2822 return -EINVAL; 3095 return -EINVAL;
2823 } 3096 }
2824 3097
@@ -2840,7 +3113,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
2840 break; 3113 break;
2841 default: 3114 default:
2842 ath6kl_warn("unknown cmd id 0x%x\n", id); 3115 ath6kl_warn("unknown cmd id 0x%x\n", id);
2843 wmi->stat.cmd_id_err++;
2844 ret = -EINVAL; 3116 ret = -EINVAL;
2845 break; 3117 break;
2846 } 3118 }
@@ -2848,12 +3120,19 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
2848 return ret; 3120 return ret;
2849} 3121}
2850 3122
3123static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
3124{
3125 return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
3126}
3127
2851/* Control Path */ 3128/* Control Path */
2852int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb) 3129int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2853{ 3130{
2854 struct wmi_cmd_hdr *cmd; 3131 struct wmi_cmd_hdr *cmd;
3132 struct ath6kl_vif *vif;
2855 u32 len; 3133 u32 len;
2856 u16 id; 3134 u16 id;
3135 u8 if_idx;
2857 u8 *datap; 3136 u8 *datap;
2858 int ret = 0; 3137 int ret = 0;
2859 3138
@@ -2863,12 +3142,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2863 if (skb->len < sizeof(struct wmi_cmd_hdr)) { 3142 if (skb->len < sizeof(struct wmi_cmd_hdr)) {
2864 ath6kl_err("bad packet 1\n"); 3143 ath6kl_err("bad packet 1\n");
2865 dev_kfree_skb(skb); 3144 dev_kfree_skb(skb);
2866 wmi->stat.cmd_len_err++;
2867 return -EINVAL; 3145 return -EINVAL;
2868 } 3146 }
2869 3147
2870 cmd = (struct wmi_cmd_hdr *) skb->data; 3148 cmd = (struct wmi_cmd_hdr *) skb->data;
2871 id = le16_to_cpu(cmd->cmd_id); 3149 id = le16_to_cpu(cmd->cmd_id);
3150 if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
2872 3151
2873 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 3152 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
2874 3153
@@ -2879,6 +3158,15 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2879 ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ", 3158 ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
2880 datap, len); 3159 datap, len);
2881 3160
3161 vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
3162 if (!vif) {
3163 ath6kl_dbg(ATH6KL_DBG_WMI,
3164 "Wmi event for unavailable vif, vif_index:%d\n",
3165 if_idx);
3166 dev_kfree_skb(skb);
3167 return -EINVAL;
3168 }
3169
2882 switch (id) { 3170 switch (id) {
2883 case WMI_GET_BITRATE_CMDID: 3171 case WMI_GET_BITRATE_CMDID:
2884 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n"); 3172 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
@@ -2898,11 +3186,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2898 break; 3186 break;
2899 case WMI_CONNECT_EVENTID: 3187 case WMI_CONNECT_EVENTID:
2900 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n"); 3188 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
2901 ret = ath6kl_wmi_connect_event_rx(wmi, datap, len); 3189 ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
2902 break; 3190 break;
2903 case WMI_DISCONNECT_EVENTID: 3191 case WMI_DISCONNECT_EVENTID:
2904 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n"); 3192 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
2905 ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len); 3193 ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
2906 break; 3194 break;
2907 case WMI_PEER_NODE_EVENTID: 3195 case WMI_PEER_NODE_EVENTID:
2908 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n"); 3196 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
@@ -2910,11 +3198,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2910 break; 3198 break;
2911 case WMI_TKIP_MICERR_EVENTID: 3199 case WMI_TKIP_MICERR_EVENTID:
2912 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n"); 3200 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
2913 ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len); 3201 ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
2914 break; 3202 break;
2915 case WMI_BSSINFO_EVENTID: 3203 case WMI_BSSINFO_EVENTID:
2916 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n"); 3204 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
2917 ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len); 3205 ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
2918 break; 3206 break;
2919 case WMI_REGDOMAIN_EVENTID: 3207 case WMI_REGDOMAIN_EVENTID:
2920 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n"); 3208 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
@@ -2926,11 +3214,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2926 break; 3214 break;
2927 case WMI_NEIGHBOR_REPORT_EVENTID: 3215 case WMI_NEIGHBOR_REPORT_EVENTID:
2928 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n"); 3216 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
2929 ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len); 3217 ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
3218 vif);
2930 break; 3219 break;
2931 case WMI_SCAN_COMPLETE_EVENTID: 3220 case WMI_SCAN_COMPLETE_EVENTID:
2932 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n"); 3221 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
2933 ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len); 3222 ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
2934 break; 3223 break;
2935 case WMI_CMDERROR_EVENTID: 3224 case WMI_CMDERROR_EVENTID:
2936 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n"); 3225 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
@@ -2938,7 +3227,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2938 break; 3227 break;
2939 case WMI_REPORT_STATISTICS_EVENTID: 3228 case WMI_REPORT_STATISTICS_EVENTID:
2940 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n"); 3229 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
2941 ret = ath6kl_wmi_stats_event_rx(wmi, datap, len); 3230 ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
2942 break; 3231 break;
2943 case WMI_RSSI_THRESHOLD_EVENTID: 3232 case WMI_RSSI_THRESHOLD_EVENTID:
2944 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n"); 3233 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
@@ -2953,6 +3242,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2953 break; 3242 break;
2954 case WMI_REPORT_ROAM_TBL_EVENTID: 3243 case WMI_REPORT_ROAM_TBL_EVENTID:
2955 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n"); 3244 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
3245 ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len);
2956 break; 3246 break;
2957 case WMI_EXTENSION_EVENTID: 3247 case WMI_EXTENSION_EVENTID:
2958 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n"); 3248 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
@@ -2960,7 +3250,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2960 break; 3250 break;
2961 case WMI_CAC_EVENTID: 3251 case WMI_CAC_EVENTID:
2962 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n"); 3252 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
2963 ret = ath6kl_wmi_cac_event_rx(wmi, datap, len); 3253 ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
2964 break; 3254 break;
2965 case WMI_CHANNEL_CHANGE_EVENTID: 3255 case WMI_CHANNEL_CHANGE_EVENTID:
2966 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n"); 3256 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
@@ -2996,7 +3286,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2996 break; 3286 break;
2997 case WMI_GET_WOW_LIST_EVENTID: 3287 case WMI_GET_WOW_LIST_EVENTID:
2998 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n"); 3288 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
2999 ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
3000 break; 3289 break;
3001 case WMI_GET_PMKID_LIST_EVENTID: 3290 case WMI_GET_PMKID_LIST_EVENTID:
3002 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n"); 3291 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
@@ -3004,25 +3293,25 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
3004 break; 3293 break;
3005 case WMI_PSPOLL_EVENTID: 3294 case WMI_PSPOLL_EVENTID:
3006 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n"); 3295 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
3007 ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len); 3296 ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
3008 break; 3297 break;
3009 case WMI_DTIMEXPIRY_EVENTID: 3298 case WMI_DTIMEXPIRY_EVENTID:
3010 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n"); 3299 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
3011 ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len); 3300 ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
3012 break; 3301 break;
3013 case WMI_SET_PARAMS_REPLY_EVENTID: 3302 case WMI_SET_PARAMS_REPLY_EVENTID:
3014 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n"); 3303 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
3015 break; 3304 break;
3016 case WMI_ADDBA_REQ_EVENTID: 3305 case WMI_ADDBA_REQ_EVENTID:
3017 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n"); 3306 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
3018 ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len); 3307 ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
3019 break; 3308 break;
3020 case WMI_ADDBA_RESP_EVENTID: 3309 case WMI_ADDBA_RESP_EVENTID:
3021 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n"); 3310 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
3022 break; 3311 break;
3023 case WMI_DELBA_REQ_EVENTID: 3312 case WMI_DELBA_REQ_EVENTID:
3024 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n"); 3313 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
3025 ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len); 3314 ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
3026 break; 3315 break;
3027 case WMI_REPORT_BTCOEX_CONFIG_EVENTID: 3316 case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
3028 ath6kl_dbg(ATH6KL_DBG_WMI, 3317 ath6kl_dbg(ATH6KL_DBG_WMI,
@@ -3038,21 +3327,21 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
3038 break; 3327 break;
3039 case WMI_REMAIN_ON_CHNL_EVENTID: 3328 case WMI_REMAIN_ON_CHNL_EVENTID:
3040 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n"); 3329 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
3041 ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len); 3330 ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
3042 break; 3331 break;
3043 case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID: 3332 case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
3044 ath6kl_dbg(ATH6KL_DBG_WMI, 3333 ath6kl_dbg(ATH6KL_DBG_WMI,
3045 "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n"); 3334 "WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
3046 ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap, 3335 ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
3047 len); 3336 len, vif);
3048 break; 3337 break;
3049 case WMI_TX_STATUS_EVENTID: 3338 case WMI_TX_STATUS_EVENTID:
3050 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n"); 3339 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
3051 ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len); 3340 ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
3052 break; 3341 break;
3053 case WMI_RX_PROBE_REQ_EVENTID: 3342 case WMI_RX_PROBE_REQ_EVENTID:
3054 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n"); 3343 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
3055 ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len); 3344 ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
3056 break; 3345 break;
3057 case WMI_P2P_CAPABILITIES_EVENTID: 3346 case WMI_P2P_CAPABILITIES_EVENTID:
3058 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n"); 3347 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
@@ -3060,7 +3349,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
3060 break; 3349 break;
3061 case WMI_RX_ACTION_EVENTID: 3350 case WMI_RX_ACTION_EVENTID:
3062 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n"); 3351 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
3063 ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len); 3352 ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
3064 break; 3353 break;
3065 case WMI_P2P_INFO_EVENTID: 3354 case WMI_P2P_INFO_EVENTID:
3066 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n"); 3355 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
@@ -3068,7 +3357,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
3068 break; 3357 break;
3069 default: 3358 default:
3070 ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id); 3359 ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
3071 wmi->stat.cmd_id_err++;
3072 ret = -EINVAL; 3360 ret = -EINVAL;
3073 break; 3361 break;
3074 } 3362 }
@@ -3078,11 +3366,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
3078 return ret; 3366 return ret;
3079} 3367}
3080 3368
3081static void ath6kl_wmi_qos_state_init(struct wmi *wmi) 3369void ath6kl_wmi_reset(struct wmi *wmi)
3082{ 3370{
3083 if (!wmi)
3084 return;
3085
3086 spin_lock_bh(&wmi->lock); 3371 spin_lock_bh(&wmi->lock);
3087 3372
3088 wmi->fat_pipe_exist = 0; 3373 wmi->fat_pipe_exist = 0;
@@ -3103,16 +3388,9 @@ void *ath6kl_wmi_init(struct ath6kl *dev)
3103 3388
3104 wmi->parent_dev = dev; 3389 wmi->parent_dev = dev;
3105 3390
3106 ath6kl_wmi_qos_state_init(wmi);
3107
3108 wmi->pwr_mode = REC_POWER; 3391 wmi->pwr_mode = REC_POWER;
3109 wmi->phy_mode = WMI_11G_MODE;
3110
3111 wmi->pair_crypto_type = NONE_CRYPT;
3112 wmi->grp_crypto_type = NONE_CRYPT;
3113 3392
3114 wmi->ht_allowed[A_BAND_24GHZ] = 1; 3393 ath6kl_wmi_reset(wmi);
3115 wmi->ht_allowed[A_BAND_5GHZ] = 1;
3116 3394
3117 return wmi; 3395 return wmi;
3118} 3396}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index f8e644d54aa7..76342d5a1906 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -93,11 +93,6 @@ struct sq_threshold_params {
93 u8 last_rssi_poll_event; 93 u8 last_rssi_poll_event;
94}; 94};
95 95
96struct wmi_stats {
97 u32 cmd_len_err;
98 u32 cmd_id_err;
99};
100
101struct wmi_data_sync_bufs { 96struct wmi_data_sync_bufs {
102 u8 traffic_class; 97 u8 traffic_class;
103 struct sk_buff *skb; 98 struct sk_buff *skb;
@@ -111,32 +106,26 @@ struct wmi_data_sync_bufs {
111#define WMM_AC_VO 3 /* voice */ 106#define WMM_AC_VO 3 /* voice */
112 107
113struct wmi { 108struct wmi {
114 bool ready;
115 u16 stream_exist_for_ac[WMM_NUM_AC]; 109 u16 stream_exist_for_ac[WMM_NUM_AC];
116 u8 fat_pipe_exist; 110 u8 fat_pipe_exist;
117 struct ath6kl *parent_dev; 111 struct ath6kl *parent_dev;
118 struct wmi_stats stat;
119 u8 pwr_mode; 112 u8 pwr_mode;
120 u8 phy_mode;
121 u8 keep_alive_intvl;
122 spinlock_t lock; 113 spinlock_t lock;
123 enum htc_endpoint_id ep_id; 114 enum htc_endpoint_id ep_id;
124 struct sq_threshold_params 115 struct sq_threshold_params
125 sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX]; 116 sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
126 enum crypto_type pair_crypto_type;
127 enum crypto_type grp_crypto_type;
128 bool is_wmm_enabled; 117 bool is_wmm_enabled;
129 u8 ht_allowed[A_NUM_BANDS];
130 u8 traffic_class; 118 u8 traffic_class;
131 bool is_probe_ssid; 119 bool is_probe_ssid;
132 120
133 u8 *last_mgmt_tx_frame; 121 u8 *last_mgmt_tx_frame;
134 size_t last_mgmt_tx_frame_len; 122 size_t last_mgmt_tx_frame_len;
123 u8 saved_pwr_mode;
135}; 124};
136 125
137struct host_app_area { 126struct host_app_area {
138 u32 wmi_protocol_ver; 127 __le32 wmi_protocol_ver;
139}; 128} __packed;
140 129
141enum wmi_msg_type { 130enum wmi_msg_type {
142 DATA_MSGTYPE = 0x0, 131 DATA_MSGTYPE = 0x0,
@@ -184,6 +173,8 @@ enum wmi_data_hdr_data_type {
184#define WMI_DATA_HDR_META_MASK 0x7 173#define WMI_DATA_HDR_META_MASK 0x7
185#define WMI_DATA_HDR_META_SHIFT 13 174#define WMI_DATA_HDR_META_SHIFT 13
186 175
176#define WMI_DATA_HDR_IF_IDX_MASK 0xF
177
187struct wmi_data_hdr { 178struct wmi_data_hdr {
188 s8 rssi; 179 s8 rssi;
189 180
@@ -208,6 +199,12 @@ struct wmi_data_hdr {
208 * b15:b13 - META_DATA_VERSION 0 - 7 199 * b15:b13 - META_DATA_VERSION 0 - 7
209 */ 200 */
210 __le16 info2; 201 __le16 info2;
202
203 /*
204 * usage of info3, 16-bit:
205 * b3:b0 - Interface index
206 * b15:b4 - Reserved
207 */
211 __le16 info3; 208 __le16 info3;
212} __packed; 209} __packed;
213 210
@@ -250,6 +247,11 @@ static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
250 WMI_DATA_HDR_META_MASK; 247 WMI_DATA_HDR_META_MASK;
251} 248}
252 249
250static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
251{
252 return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK;
253}
254
253/* Tx meta version definitions */ 255/* Tx meta version definitions */
254#define WMI_MAX_TX_META_SZ 12 256#define WMI_MAX_TX_META_SZ 12
255#define WMI_META_VERSION_1 0x01 257#define WMI_META_VERSION_1 0x01
@@ -299,6 +301,8 @@ struct wmi_rx_meta_v2 {
299 u8 csum_flags; 301 u8 csum_flags;
300} __packed; 302} __packed;
301 303
304#define WMI_CMD_HDR_IF_ID_MASK 0xF
305
302/* Control Path */ 306/* Control Path */
303struct wmi_cmd_hdr { 307struct wmi_cmd_hdr {
304 __le16 cmd_id; 308 __le16 cmd_id;
@@ -312,6 +316,11 @@ struct wmi_cmd_hdr {
312 __le16 reserved; 316 __le16 reserved;
313} __packed; 317} __packed;
314 318
319static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr)
320{
321 return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK;
322}
323
315/* List of WMI commands */ 324/* List of WMI commands */
316enum wmi_cmd_id { 325enum wmi_cmd_id {
317 WMI_CONNECT_CMDID = 0x0001, 326 WMI_CONNECT_CMDID = 0x0001,
@@ -576,9 +585,6 @@ enum auth_mode {
576 WPA2_AUTH_CCKM = 0x40, 585 WPA2_AUTH_CCKM = 0x40,
577}; 586};
578 587
579#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
580#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
581
582#define WMI_MIN_KEY_INDEX 0 588#define WMI_MIN_KEY_INDEX 0
583#define WMI_MAX_KEY_INDEX 3 589#define WMI_MAX_KEY_INDEX 3
584 590
@@ -617,6 +623,7 @@ enum wmi_connect_ctrl_flags_bits {
617 CONNECT_CSA_FOLLOW_BSS = 0x0020, 623 CONNECT_CSA_FOLLOW_BSS = 0x0020,
618 CONNECT_DO_WPA_OFFLOAD = 0x0040, 624 CONNECT_DO_WPA_OFFLOAD = 0x0040,
619 CONNECT_DO_NOT_DEAUTH = 0x0080, 625 CONNECT_DO_NOT_DEAUTH = 0x0080,
626 CONNECT_WPS_FLAG = 0x0100,
620}; 627};
621 628
622struct wmi_connect_cmd { 629struct wmi_connect_cmd {
@@ -1365,14 +1372,20 @@ enum wmi_roam_ctrl {
1365 WMI_SET_LRSSI_SCAN_PARAMS, 1372 WMI_SET_LRSSI_SCAN_PARAMS,
1366}; 1373};
1367 1374
1375enum wmi_roam_mode {
1376 WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */
1377 WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */
1378 WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
1379};
1380
1368struct bss_bias { 1381struct bss_bias {
1369 u8 bssid[ETH_ALEN]; 1382 u8 bssid[ETH_ALEN];
1370 u8 bias; 1383 s8 bias;
1371} __packed; 1384} __packed;
1372 1385
1373struct bss_bias_info { 1386struct bss_bias_info {
1374 u8 num_bss; 1387 u8 num_bss;
1375 struct bss_bias bss_bias[1]; 1388 struct bss_bias bss_bias[0];
1376} __packed; 1389} __packed;
1377 1390
1378struct low_rssi_scan_params { 1391struct low_rssi_scan_params {
@@ -1385,10 +1398,11 @@ struct low_rssi_scan_params {
1385 1398
1386struct roam_ctrl_cmd { 1399struct roam_ctrl_cmd {
1387 union { 1400 union {
1388 u8 bssid[ETH_ALEN]; 1401 u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
1389 u8 roam_mode; 1402 u8 roam_mode; /* WMI_SET_ROAM_MODE */
1390 struct bss_bias_info bss; 1403 struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
1391 struct low_rssi_scan_params params; 1404 struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
1405 */
1392 } __packed info; 1406 } __packed info;
1393 u8 roam_ctrl; 1407 u8 roam_ctrl;
1394} __packed; 1408} __packed;
@@ -1455,6 +1469,10 @@ struct wmi_tkip_micerr_event {
1455 u8 is_mcast; 1469 u8 is_mcast;
1456} __packed; 1470} __packed;
1457 1471
1472enum wmi_scan_status {
1473 WMI_SCAN_STATUS_SUCCESS = 0,
1474};
1475
1458/* WMI_SCAN_COMPLETE_EVENTID */ 1476/* WMI_SCAN_COMPLETE_EVENTID */
1459struct wmi_scan_complete_event { 1477struct wmi_scan_complete_event {
1460 a_sle32 status; 1478 a_sle32 status;
@@ -1635,6 +1653,12 @@ struct wmi_bss_roam_info {
1635 u8 reserved; 1653 u8 reserved;
1636} __packed; 1654} __packed;
1637 1655
1656struct wmi_target_roam_tbl {
1657 __le16 roam_mode;
1658 __le16 num_entries;
1659 struct wmi_bss_roam_info info[];
1660} __packed;
1661
1638/* WMI_CAC_EVENTID */ 1662/* WMI_CAC_EVENTID */
1639enum cac_indication { 1663enum cac_indication {
1640 CAC_INDICATION_ADMISSION = 0x00, 1664 CAC_INDICATION_ADMISSION = 0x00,
@@ -1771,7 +1795,6 @@ struct wmi_set_appie_cmd {
1771#define WSC_REG_ACTIVE 1 1795#define WSC_REG_ACTIVE 1
1772#define WSC_REG_INACTIVE 0 1796#define WSC_REG_INACTIVE 0
1773 1797
1774#define WOW_MAX_FILTER_LISTS 1
1775#define WOW_MAX_FILTERS_PER_LIST 4 1798#define WOW_MAX_FILTERS_PER_LIST 4
1776#define WOW_PATTERN_SIZE 64 1799#define WOW_PATTERN_SIZE 64
1777#define WOW_MASK_SIZE 64 1800#define WOW_MASK_SIZE 64
@@ -1794,17 +1817,52 @@ struct wmi_set_ip_cmd {
1794 __le32 ips[MAX_IP_ADDRS]; 1817 __le32 ips[MAX_IP_ADDRS];
1795} __packed; 1818} __packed;
1796 1819
1797/* WMI_GET_WOW_LIST_CMD reply */ 1820enum ath6kl_wow_filters {
1798struct wmi_get_wow_list_reply { 1821 WOW_FILTER_SSID = BIT(0),
1799 /* number of patterns in reply */ 1822 WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2),
1800 u8 num_filters; 1823 WOW_FILTER_OPTION_EAP_REQ = BIT(3),
1824 WOW_FILTER_OPTION_PATTERNS = BIT(4),
1825 WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5),
1826 WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6),
1827 WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7),
1828 WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8),
1829 WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9),
1830 WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10),
1831 WOW_FILTER_OPTION_GTK_ERROR = BIT(11),
1832 WOW_FILTER_OPTION_TEST_MODE = BIT(15),
1833};
1801 1834
1802 /* this is filter # x of total num_filters */ 1835enum ath6kl_host_mode {
1803 u8 this_filter_num; 1836 ATH6KL_HOST_MODE_AWAKE,
1837 ATH6KL_HOST_MODE_ASLEEP,
1838};
1839
1840struct wmi_set_host_sleep_mode_cmd {
1841 __le32 awake;
1842 __le32 asleep;
1843} __packed;
1844
1845enum ath6kl_wow_mode {
1846 ATH6KL_WOW_MODE_DISABLE,
1847 ATH6KL_WOW_MODE_ENABLE,
1848};
1849
1850struct wmi_set_wow_mode_cmd {
1851 __le32 enable_wow;
1852 __le32 filter;
1853 __le16 host_req_delay;
1854} __packed;
1855
1856struct wmi_add_wow_pattern_cmd {
1857 u8 filter_list_id;
1858 u8 filter_size;
1859 u8 filter_offset;
1860 u8 filter[0];
1861} __packed;
1804 1862
1805 u8 wow_mode; 1863struct wmi_del_wow_pattern_cmd {
1806 u8 host_mode; 1864 __le16 filter_list_id;
1807 struct wow_filter wow_filters[1]; 1865 __le16 filter_id;
1808} __packed; 1866} __packed;
1809 1867
1810/* WMI_SET_AKMP_PARAMS_CMD */ 1868/* WMI_SET_AKMP_PARAMS_CMD */
@@ -2163,20 +2221,21 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
2163int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb, 2221int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
2164 u8 msg_type, bool more_data, 2222 u8 msg_type, bool more_data,
2165 enum wmi_data_hdr_data_type data_type, 2223 enum wmi_data_hdr_data_type data_type,
2166 u8 meta_ver, void *tx_meta_info); 2224 u8 meta_ver, void *tx_meta_info, u8 if_idx);
2167 2225
2168int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb); 2226int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
2169int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb); 2227int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
2170int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb, 2228int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
2171 u32 layer2_priority, bool wmm_enabled, 2229 struct sk_buff *skb, u32 layer2_priority,
2172 u8 *ac); 2230 bool wmm_enabled, u8 *ac);
2173 2231
2174int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb); 2232int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
2175 2233
2176int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb, 2234int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
2177 enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag); 2235 enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
2178 2236
2179int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type, 2237int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
2238 enum network_type nw_type,
2180 enum dot11_auth_mode dot11_auth_mode, 2239 enum dot11_auth_mode dot11_auth_mode,
2181 enum auth_mode auth_mode, 2240 enum auth_mode auth_mode,
2182 enum crypto_type pairwise_crypto, 2241 enum crypto_type pairwise_crypto,
@@ -2185,98 +2244,124 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
2185 u8 group_crypto_len, int ssid_len, u8 *ssid, 2244 u8 group_crypto_len, int ssid_len, u8 *ssid,
2186 u8 *bssid, u16 channel, u32 ctrl_flags); 2245 u8 *bssid, u16 channel, u32 ctrl_flags);
2187 2246
2188int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel); 2247int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
2189int ath6kl_wmi_disconnect_cmd(struct wmi *wmi); 2248 u16 channel);
2190int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type, 2249int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
2250int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
2251 enum wmi_scan_type scan_type,
2191 u32 force_fgscan, u32 is_legacy, 2252 u32 force_fgscan, u32 is_legacy,
2192 u32 home_dwell_time, u32 force_scan_interval, 2253 u32 home_dwell_time, u32 force_scan_interval,
2193 s8 num_chan, u16 *ch_list); 2254 s8 num_chan, u16 *ch_list);
2194int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec, 2255int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
2195 u16 fg_end_sec, u16 bg_sec, 2256 u16 fg_end_sec, u16 bg_sec,
2196 u16 minact_chdw_msec, u16 maxact_chdw_msec, 2257 u16 minact_chdw_msec, u16 maxact_chdw_msec,
2197 u16 pas_chdw_msec, u8 short_scan_ratio, 2258 u16 pas_chdw_msec, u8 short_scan_ratio,
2198 u8 scan_ctrl_flag, u32 max_dfsch_act_time, 2259 u8 scan_ctrl_flag, u32 max_dfsch_act_time,
2199 u16 maxact_scan_per_ssid); 2260 u16 maxact_scan_per_ssid);
2200int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask); 2261int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter,
2201int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag, 2262 u32 ie_mask);
2263int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
2202 u8 ssid_len, u8 *ssid); 2264 u8 ssid_len, u8 *ssid);
2203int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval, 2265int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
2266 u16 listen_interval,
2204 u16 listen_beacons); 2267 u16 listen_beacons);
2205int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode); 2268int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
2206int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period, 2269int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
2207 u16 ps_poll_num, u16 dtim_policy, 2270 u16 ps_poll_num, u16 dtim_policy,
2208 u16 tx_wakup_policy, u16 num_tx_to_wakeup, 2271 u16 tx_wakup_policy, u16 num_tx_to_wakeup,
2209 u16 ps_fail_event_policy); 2272 u16 ps_fail_event_policy);
2210int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout); 2273int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
2211int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
2212 struct wmi_create_pstream_cmd *pstream); 2274 struct wmi_create_pstream_cmd *pstream);
2213int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid); 2275int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
2276 u8 tsid);
2277int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout);
2214 2278
2215int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold); 2279int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
2216int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, 2280int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
2217 u8 preamble_policy); 2281 u8 preamble_policy);
2218 2282
2219int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source); 2283int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
2220int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config); 2284int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
2221 2285
2222int ath6kl_wmi_get_stats_cmd(struct wmi *wmi); 2286int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
2223int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index, 2287int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
2224 enum crypto_type key_type, 2288 enum crypto_type key_type,
2225 u8 key_usage, u8 key_len, 2289 u8 key_usage, u8 key_len,
2226 u8 *key_rsc, u8 *key_material, 2290 u8 *key_rsc, unsigned int key_rsc_len,
2291 u8 *key_material,
2227 u8 key_op_ctrl, u8 *mac_addr, 2292 u8 key_op_ctrl, u8 *mac_addr,
2228 enum wmi_sync_flag sync_flag); 2293 enum wmi_sync_flag sync_flag);
2229int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk); 2294int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
2230int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index); 2295int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
2231int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid, 2296int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
2232 const u8 *pmkid, bool set); 2297 const u8 *pmkid, bool set);
2233int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM); 2298int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM);
2234int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi); 2299int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx);
2300int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
2235 2301
2236int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg); 2302int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
2237int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl); 2303int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
2304 u8 keep_alive_intvl);
2238int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); 2305int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
2239 2306
2240s32 ath6kl_wmi_get_rate(s8 rate_index); 2307s32 ath6kl_wmi_get_rate(s8 rate_index);
2241 2308
2242int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd); 2309int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
2310int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
2311 enum ath6kl_host_mode host_mode);
2312int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
2313 enum ath6kl_wow_mode wow_mode,
2314 u32 filter, u16 host_req_delay);
2315int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2316 u8 list_id, u8 filter_size,
2317 u8 filter_offset, u8 *filter, u8 *mask);
2318int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2319 u16 list_id, u16 filter_id);
2243int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); 2320int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
2321int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
2322int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
2244 2323
2245/* AP mode */ 2324/* AP mode */
2246int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p); 2325int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
2326 struct wmi_connect_cmd *p);
2247 2327
2248int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason); 2328int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd,
2329 const u8 *mac, u16 reason);
2249 2330
2250int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag); 2331int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag);
2251 2332
2252int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version, 2333int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
2334 u8 rx_meta_version,
2253 bool rx_dot11_hdr, bool defrag_on_host); 2335 bool rx_dot11_hdr, bool defrag_on_host);
2254 2336
2255int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, 2337int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
2256 u8 ie_len); 2338 const u8 *ie, u8 ie_len);
2257 2339
2258/* P2P */ 2340/* P2P */
2259int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); 2341int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
2260 2342
2261int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur); 2343int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
2344 u32 dur);
2262 2345
2263int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait, 2346int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
2264 const u8 *data, u16 data_len); 2347 u32 wait, const u8 *data, u16 data_len);
2265 2348
2266int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq, 2349int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
2267 const u8 *dst, 2350 const u8 *dst, const u8 *data,
2268 const u8 *data, u16 data_len); 2351 u16 data_len);
2269 2352
2270int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable); 2353int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable);
2271 2354
2272int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags); 2355int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags);
2273 2356
2274int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi); 2357int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
2275 2358
2276int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie, 2359int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
2277 u8 ie_len); 2360 const u8 *ie, u8 ie_len);
2278 2361
2362struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
2279void *ath6kl_wmi_init(struct ath6kl *devt); 2363void *ath6kl_wmi_init(struct ath6kl *devt);
2280void ath6kl_wmi_shutdown(struct wmi *wmi); 2364void ath6kl_wmi_shutdown(struct wmi *wmi);
2365void ath6kl_wmi_reset(struct wmi *wmi);
2281 2366
2282#endif /* WMI_H */ 2367#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 012263968d64..9ac28d9de597 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -36,6 +36,20 @@ struct ath_btcoex_config {
36 bool bt_hold_rx_clear; 36 bool bt_hold_rx_clear;
37}; 37};
38 38
39static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
40 [AR9300_NUM_WLAN_WEIGHTS] = {
41 { 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
42 { 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
43 { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
44};
45
46static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
47 [AR9300_NUM_WLAN_WEIGHTS] = {
48 { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
49 { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
50 { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
51 { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
52};
39 53
40void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) 54void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
41{ 55{
@@ -152,27 +166,26 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
152 166
153static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) 167static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
154{ 168{
155 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 169 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
156 u32 val; 170 u32 val;
171 int i;
157 172
158 /* 173 /*
159 * Program coex mode and weight registers to 174 * Program coex mode and weight registers to
160 * enable coex 3-wire 175 * enable coex 3-wire
161 */ 176 */
162 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode); 177 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
163 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2); 178 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
164 179
165 180
166 if (AR_SREV_9300_20_OR_LATER(ah)) { 181 if (AR_SREV_9300_20_OR_LATER(ah)) {
167 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]); 182 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
168 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]); 183 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
169 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]); 184 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
170 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]); 185 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
171 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]); 186 btcoex->bt_weight[i]);
172 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
173
174 } else 187 } else
175 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights); 188 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
176 189
177 190
178 191
@@ -185,10 +198,23 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
185 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); 198 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
186 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); 199 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
187 200
188 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, 201 ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
189 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL); 202 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
190} 203}
191 204
205static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
206{
207 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
208 int i;
209
210 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
211 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
212 btcoex->wlan_weight[i]);
213
214 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
215 btcoex->enabled = true;
216}
217
192void ath9k_hw_btcoex_enable(struct ath_hw *ah) 218void ath9k_hw_btcoex_enable(struct ath_hw *ah)
193{ 219{
194 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 220 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@@ -202,6 +228,9 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
202 case ATH_BTCOEX_CFG_3WIRE: 228 case ATH_BTCOEX_CFG_3WIRE:
203 ath9k_hw_btcoex_enable_3wire(ah); 229 ath9k_hw_btcoex_enable_3wire(ah);
204 break; 230 break;
231 case ATH_BTCOEX_CFG_MCI:
232 ath9k_hw_btcoex_enable_mci(ah);
233 return;
205 } 234 }
206 235
207 REG_RMW(ah, AR_GPIO_PDPU, 236 REG_RMW(ah, AR_GPIO_PDPU,
@@ -215,7 +244,15 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
215void ath9k_hw_btcoex_disable(struct ath_hw *ah) 244void ath9k_hw_btcoex_disable(struct ath_hw *ah)
216{ 245{
217 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 246 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
218 247 int i;
248
249 btcoex_hw->enabled = false;
250 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
251 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
252 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
253 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
254 btcoex_hw->wlan_weight[i]);
255 }
219 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); 256 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
220 257
221 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio, 258 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
@@ -228,49 +265,27 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
228 if (AR_SREV_9300_20_OR_LATER(ah)) { 265 if (AR_SREV_9300_20_OR_LATER(ah)) {
229 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0); 266 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
230 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0); 267 REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
231 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0); 268 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
232 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0); 269 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
233 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
234 REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
235 } else 270 } else
236 REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0); 271 REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
237 272
238 } 273 }
239
240 ah->btcoex_hw.enabled = false;
241} 274}
242EXPORT_SYMBOL(ath9k_hw_btcoex_disable); 275EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
243 276
244static void ar9003_btcoex_bt_stomp(struct ath_hw *ah, 277static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
245 enum ath_stomp_type stomp_type) 278 enum ath_stomp_type stomp_type)
246{ 279{
247 ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT; 280 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
248 ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT; 281 const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
249 ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT; 282 ar9462_wlan_weights[stomp_type];
250 ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT; 283 int i;
251 284
252 285 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
253 switch (stomp_type) { 286 btcoex->bt_weight[i] = AR9300_BT_WGHT;
254 case ATH_BTCOEX_STOMP_ALL: 287 btcoex->wlan_weight[i] = weight[i];
255 ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
256 ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
257 break;
258 case ATH_BTCOEX_STOMP_LOW:
259 ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
260 ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
261 break;
262 case ATH_BTCOEX_STOMP_NONE:
263 ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
264 ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
265 break;
266
267 default:
268 ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
269 "Invalid Stomptype\n");
270 break;
271 } 288 }
272
273 ath9k_hw_btcoex_enable(ah);
274} 289}
275 290
276/* 291/*
@@ -302,7 +317,5 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
302 "Invalid Stomptype\n"); 317 "Invalid Stomptype\n");
303 break; 318 break;
304 } 319 }
305
306 ath9k_hw_btcoex_enable(ah);
307} 320}
308EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp); 321EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 234f77689b14..d5e5db1faad9 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,18 +36,22 @@
36#define ATH_BT_CNT_THRESHOLD 3 36#define ATH_BT_CNT_THRESHOLD 3
37#define ATH_BT_CNT_SCAN_THRESHOLD 15 37#define ATH_BT_CNT_SCAN_THRESHOLD 15
38 38
39#define AR9300_NUM_BT_WEIGHTS 4
40#define AR9300_NUM_WLAN_WEIGHTS 4
39/* Defines the BT AR_BT_COEX_WGHT used */ 41/* Defines the BT AR_BT_COEX_WGHT used */
40enum ath_stomp_type { 42enum ath_stomp_type {
41 ATH_BTCOEX_NO_STOMP,
42 ATH_BTCOEX_STOMP_ALL, 43 ATH_BTCOEX_STOMP_ALL,
43 ATH_BTCOEX_STOMP_LOW, 44 ATH_BTCOEX_STOMP_LOW,
44 ATH_BTCOEX_STOMP_NONE 45 ATH_BTCOEX_STOMP_NONE,
46 ATH_BTCOEX_STOMP_LOW_FTP,
47 ATH_BTCOEX_STOMP_MAX
45}; 48};
46 49
47enum ath_btcoex_scheme { 50enum ath_btcoex_scheme {
48 ATH_BTCOEX_CFG_NONE, 51 ATH_BTCOEX_CFG_NONE,
49 ATH_BTCOEX_CFG_2WIRE, 52 ATH_BTCOEX_CFG_2WIRE,
50 ATH_BTCOEX_CFG_3WIRE, 53 ATH_BTCOEX_CFG_3WIRE,
54 ATH_BTCOEX_CFG_MCI,
51}; 55};
52 56
53struct ath_btcoex_hw { 57struct ath_btcoex_hw {
@@ -59,6 +63,8 @@ struct ath_btcoex_hw {
59 u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ 63 u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
60 u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ 64 u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
61 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ 65 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
66 u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
67 u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
62}; 68};
63 69
64void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah); 70void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 2c279dcaf4ba..e4ae08e07719 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -198,6 +198,7 @@ static void ath_btcoex_period_timer(unsigned long data)
198 ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : 198 ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
199 btcoex->bt_stomp_type); 199 btcoex->bt_stomp_type);
200 200
201 ath9k_hw_btcoex_enable(ah);
201 spin_unlock_bh(&btcoex->btcoex_lock); 202 spin_unlock_bh(&btcoex->btcoex_lock);
202 203
203 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) { 204 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
@@ -240,6 +241,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
240 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 241 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
241 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); 242 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
242 243
244 ath9k_hw_btcoex_enable(ah);
243 spin_unlock_bh(&btcoex->btcoex_lock); 245 spin_unlock_bh(&btcoex->btcoex_lock);
244 ath9k_ps_restore(sc); 246 ath9k_ps_restore(sc);
245} 247}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index e3a02eb8e0cc..ce606b618e0b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -80,6 +80,7 @@ static void ath_btcoex_period_work(struct work_struct *work)
80 ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : 80 ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
81 btcoex->bt_stomp_type); 81 btcoex->bt_stomp_type);
82 82
83 ath9k_hw_btcoex_enable(priv->ah);
83 timer_period = is_btscan ? btcoex->btscan_no_stomp : 84 timer_period = is_btscan ? btcoex->btscan_no_stomp :
84 btcoex->btcoex_no_stomp; 85 btcoex->btcoex_no_stomp;
85 ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work, 86 ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
@@ -108,6 +109,7 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
108 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 109 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
109 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 110 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
110 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); 111 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
112 ath9k_hw_btcoex_enable(priv->ah);
111} 113}
112 114
113void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv) 115void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 33e8f2f9d425..3cb878c28ccf 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -59,9 +59,6 @@
59#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa 59#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
60#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab 60#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
61 61
62#define AR9300_NUM_BT_WEIGHTS 4
63#define AR9300_NUM_WLAN_WEIGHTS 4
64
65#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1) 62#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
66 63
67#define ATH_DEFAULT_NOISE_FLOOR -95 64#define ATH_DEFAULT_NOISE_FLOOR -95
@@ -802,8 +799,6 @@ struct ath_hw {
802 799
803 /* Bluetooth coexistance */ 800 /* Bluetooth coexistance */
804 struct ath_btcoex_hw btcoex_hw; 801 struct ath_btcoex_hw btcoex_hw;
805 u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
806 u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
807 802
808 u32 intr_txqs; 803 u32 intr_txqs;
809 u8 txchainmask; 804 u8 txchainmask;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 67b862cdae6d..4c8e296f663b 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1838,11 +1838,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1838 if (sc->sc_flags & SC_OP_RXFLUSH) 1838 if (sc->sc_flags & SC_OP_RXFLUSH)
1839 goto requeue_drop_frag; 1839 goto requeue_drop_frag;
1840 1840
1841 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1842 rxs, &decrypt_error);
1843 if (retval)
1844 goto requeue_drop_frag;
1845
1846 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1841 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
1847 if (rs.rs_tstamp > tsf_lower && 1842 if (rs.rs_tstamp > tsf_lower &&
1848 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1843 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1852,6 +1847,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1852 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1847 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
1853 rxs->mactime += 0x100000000ULL; 1848 rxs->mactime += 0x100000000ULL;
1854 1849
1850 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
1851 rxs, &decrypt_error);
1852 if (retval)
1853 goto requeue_drop_frag;
1854
1855 /* Ensure we always have an skb to requeue once we are done 1855 /* Ensure we always have an skb to requeue once we are done
1856 * processing the current buffer's skb */ 1856 * processing the current buffer's skb */
1857 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1857 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8fcb7e9e8399..45910975d853 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1752,19 +1752,10 @@ enum {
1752 1752
1753#define AR_BT_COEX_WL_WEIGHTS0 0x8174 1753#define AR_BT_COEX_WL_WEIGHTS0 0x8174
1754#define AR_BT_COEX_WL_WEIGHTS1 0x81c4 1754#define AR_BT_COEX_WL_WEIGHTS1 0x81c4
1755#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
1756#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2))
1755 1757
1756#define AR_BT_COEX_BT_WEIGHTS0 0x83ac 1758#define AR9300_BT_WGHT 0xcccc4444
1757#define AR_BT_COEX_BT_WEIGHTS1 0x83b0
1758#define AR_BT_COEX_BT_WEIGHTS2 0x83b4
1759#define AR_BT_COEX_BT_WEIGHTS3 0x83b8
1760
1761#define AR9300_BT_WGHT 0xcccc4444
1762#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0
1763#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0
1764#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880
1765#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880
1766#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000
1767#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000
1768 1759
1769#define AR_BT_COEX_MODE2 0x817c 1760#define AR_BT_COEX_MODE2 0x817c
1770#define AR_BT_BCN_MISS_THRESH 0x000000ff 1761#define AR_BT_BCN_MISS_THRESH 0x000000ff
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 59472e1605cd..d19a9ee9d057 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref)
314 * feedback either [CTL_REQ_TX_STATUS not set] 314 * feedback either [CTL_REQ_TX_STATUS not set]
315 */ 315 */
316 316
317 dev_kfree_skb_any(skb); 317 ieee80211_free_txskb(ar->hw, skb);
318 return; 318 return;
319 } else { 319 } else {
320 /* 320 /*
@@ -1432,7 +1432,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1432 1432
1433err_free: 1433err_free:
1434 ar->tx_dropped++; 1434 ar->tx_dropped++;
1435 dev_kfree_skb_any(skb); 1435 ieee80211_free_txskb(ar->hw, skb);
1436} 1436}
1437 1437
1438void carl9170_tx_scheduler(struct ar9170 *ar) 1438void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 025fa0eb6f47..39e305443d7e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -16,6 +16,8 @@
16 * File contents: support functions for PCI/PCIe 16 * File contents: support functions for PCI/PCIe
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include <linux/delay.h> 21#include <linux/delay.h>
20#include <linux/pci.h> 22#include <linux/pci.h>
21 23
@@ -349,9 +351,9 @@
349#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID)) 351#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
350 352
351#ifdef BCMDBG 353#ifdef BCMDBG
352#define SI_MSG(args) printk args 354#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
353#else 355#else
354#define SI_MSG(args) 356#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
355#endif /* BCMDBG */ 357#endif /* BCMDBG */
356 358
357#define GOODCOREADDR(x, b) \ 359#define GOODCOREADDR(x, b) \
@@ -1073,7 +1075,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
1073 1075
1074 /* scan for cores */ 1076 /* scan for cores */
1075 if (socitype == SOCI_AI) { 1077 if (socitype == SOCI_AI) {
1076 SI_MSG(("Found chip type AI (0x%08x)\n", w)); 1078 SI_MSG("Found chip type AI (0x%08x)\n", w);
1077 /* pass chipc address instead of original core base */ 1079 /* pass chipc address instead of original core base */
1078 ai_scan(&sii->pub, cc); 1080 ai_scan(&sii->pub, cc);
1079 } else { 1081 } else {
@@ -1129,7 +1131,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
1129 * set chipControl register bit 15 1131 * set chipControl register bit 15
1130 */ 1132 */
1131 if (sih->chiprev == 0) { 1133 if (sih->chiprev == 0) {
1132 SI_MSG(("Applying 43224A0 WARs\n")); 1134 SI_MSG("Applying 43224A0 WARs\n");
1133 ai_corereg(sih, SI_CC_IDX, 1135 ai_corereg(sih, SI_CC_IDX,
1134 offsetof(struct chipcregs, chipcontrol), 1136 offsetof(struct chipcregs, chipcontrol),
1135 CCTRL43224_GPIO_TOGGLE, 1137 CCTRL43224_GPIO_TOGGLE,
@@ -1138,7 +1140,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
1138 CCTRL_43224A0_12MA_LED_DRIVE); 1140 CCTRL_43224A0_12MA_LED_DRIVE);
1139 } 1141 }
1140 if (sih->chiprev >= 1) { 1142 if (sih->chiprev >= 1) {
1141 SI_MSG(("Applying 43224B0+ WARs\n")); 1143 SI_MSG("Applying 43224B0+ WARs\n");
1142 si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE, 1144 si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
1143 CCTRL_43224B0_12MA_LED_DRIVE); 1145 CCTRL_43224B0_12MA_LED_DRIVE);
1144 } 1146 }
@@ -1149,7 +1151,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
1149 * enable 12 mA drive strenth for 4313 and 1151 * enable 12 mA drive strenth for 4313 and
1150 * set chipControl register bit 1 1152 * set chipControl register bit 1
1151 */ 1153 */
1152 SI_MSG(("Applying 4313 WARs\n")); 1154 SI_MSG("Applying 4313 WARs\n");
1153 si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE, 1155 si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
1154 CCTRL_4313_12MA_LED_DRIVE); 1156 CCTRL_4313_12MA_LED_DRIVE);
1155 } 1157 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index e286fb4d4813..0bb8c37e979e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -13,6 +13,9 @@
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
16#include <linux/slab.h> 19#include <linux/slab.h>
17#include <linux/delay.h> 20#include <linux/delay.h>
18#include <linux/pci.h> 21#include <linux/pci.h>
@@ -168,26 +171,25 @@
168 171
169/* debug/trace */ 172/* debug/trace */
170#ifdef BCMDBG 173#ifdef BCMDBG
171#define DMA_ERROR(args) \ 174#define DMA_ERROR(fmt, ...) \
172 do { \ 175do { \
173 if (!(*di->msg_level & 1)) \ 176 if (*di->msg_level & 1) \
174 ; \ 177 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
175 else \ 178} while (0)
176 printk args; \ 179#define DMA_TRACE(fmt, ...) \
177 } while (0) 180do { \
178#define DMA_TRACE(args) \ 181 if (*di->msg_level & 2) \
179 do { \ 182 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
180 if (!(*di->msg_level & 2)) \ 183} while (0)
181 ; \
182 else \
183 printk args; \
184 } while (0)
185#else 184#else
186#define DMA_ERROR(args) 185#define DMA_ERROR(fmt, ...) \
187#define DMA_TRACE(args) 186 no_printk(fmt, ##__VA_ARGS__)
187#define DMA_TRACE(fmt, ...) \
188 no_printk(fmt, ##__VA_ARGS__)
188#endif /* BCMDBG */ 189#endif /* BCMDBG */
189 190
190#define DMA_NONE(args) 191#define DMA_NONE(fmt, ...) \
192 no_printk(fmt, ##__VA_ARGS__)
191 193
192#define MAXNAMEL 8 /* 8 char names */ 194#define MAXNAMEL 8 /* 8 char names */
193 195
@@ -361,7 +363,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
361 uint dmactrlflags; 363 uint dmactrlflags;
362 364
363 if (di == NULL) { 365 if (di == NULL) {
364 DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n")); 366 DMA_ERROR("NULL dma handle\n");
365 return 0; 367 return 0;
366 } 368 }
367 369
@@ -412,13 +414,13 @@ static bool _dma_isaddrext(struct dma_info *di)
412 /* not all tx or rx channel are available */ 414 /* not all tx or rx channel are available */
413 if (di->d64txregs != NULL) { 415 if (di->d64txregs != NULL) {
414 if (!_dma64_addrext(di->d64txregs)) 416 if (!_dma64_addrext(di->d64txregs))
415 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have " 417 DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
416 "AE set\n", di->name)); 418 di->name);
417 return true; 419 return true;
418 } else if (di->d64rxregs != NULL) { 420 } else if (di->d64rxregs != NULL) {
419 if (!_dma64_addrext(di->d64rxregs)) 421 if (!_dma64_addrext(di->d64rxregs))
420 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have " 422 DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
421 "AE set\n", di->name)); 423 di->name);
422 return true; 424 return true;
423 } 425 }
424 426
@@ -519,8 +521,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
519 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, 521 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
520 &alloced, &di->txdpaorig); 522 &alloced, &di->txdpaorig);
521 if (va == NULL) { 523 if (va == NULL) {
522 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)" 524 DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
523 " failed\n", di->name)); 525 di->name);
524 return false; 526 return false;
525 } 527 }
526 align = (1 << align_bits); 528 align = (1 << align_bits);
@@ -533,8 +535,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
533 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, 535 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
534 &alloced, &di->rxdpaorig); 536 &alloced, &di->rxdpaorig);
535 if (va == NULL) { 537 if (va == NULL) {
536 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)" 538 DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
537 " failed\n", di->name)); 539 di->name);
538 return false; 540 return false;
539 } 541 }
540 align = (1 << align_bits); 542 align = (1 << align_bits);
@@ -583,11 +585,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
583 */ 585 */
584 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); 586 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
585 587
586 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d " 588 DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
587 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " 589 name, "DMA64",
588 "dmaregstx %p dmaregsrx %p\n", name, "DMA64", 590 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
589 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, 591 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx);
590 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
591 592
592 /* make a private copy of our callers name */ 593 /* make a private copy of our callers name */
593 strncpy(di->name, name, MAXNAMEL); 594 strncpy(di->name, name, MAXNAMEL);
@@ -645,8 +646,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
645 di->dmadesc_align = 4; /* 16 byte alignment */ 646 di->dmadesc_align = 4; /* 16 byte alignment */
646 } 647 }
647 648
648 DMA_NONE(("DMA descriptor align_needed %d, align %d\n", 649 DMA_NONE("DMA descriptor align_needed %d, align %d\n",
649 di->aligndesc_4k, di->dmadesc_align)); 650 di->aligndesc_4k, di->dmadesc_align);
650 651
651 /* allocate tx packet pointer vector */ 652 /* allocate tx packet pointer vector */
652 if (ntxd) { 653 if (ntxd) {
@@ -684,21 +685,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
684 685
685 if ((di->ddoffsetlow != 0) && !di->addrext) { 686 if ((di->ddoffsetlow != 0) && !di->addrext) {
686 if (di->txdpa > SI_PCI_DMA_SZ) { 687 if (di->txdpa > SI_PCI_DMA_SZ) {
687 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not " 688 DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
688 "supported\n", di->name, (u32)di->txdpa)); 689 di->name, (u32)di->txdpa);
689 goto fail; 690 goto fail;
690 } 691 }
691 if (di->rxdpa > SI_PCI_DMA_SZ) { 692 if (di->rxdpa > SI_PCI_DMA_SZ) {
692 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not " 693 DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
693 "supported\n", di->name, (u32)di->rxdpa)); 694 di->name, (u32)di->rxdpa);
694 goto fail; 695 goto fail;
695 } 696 }
696 } 697 }
697 698
698 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x " 699 DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
699 "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, 700 di->ddoffsetlow, di->ddoffsethigh,
700 di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, 701 di->dataoffsetlow, di->dataoffsethigh,
701 di->addrext)); 702 di->addrext);
702 703
703 return (struct dma_pub *) di; 704 return (struct dma_pub *) di;
704 705
@@ -744,7 +745,7 @@ void dma_detach(struct dma_pub *pub)
744{ 745{
745 struct dma_info *di = (struct dma_info *)pub; 746 struct dma_info *di = (struct dma_info *)pub;
746 747
747 DMA_TRACE(("%s: dma_detach\n", di->name)); 748 DMA_TRACE("%s:\n", di->name);
748 749
749 /* free dma descriptor rings */ 750 /* free dma descriptor rings */
750 if (di->txd64) 751 if (di->txd64)
@@ -812,7 +813,7 @@ static void _dma_rxenable(struct dma_info *di)
812 uint dmactrlflags = di->dma.dmactrlflags; 813 uint dmactrlflags = di->dma.dmactrlflags;
813 u32 control; 814 u32 control;
814 815
815 DMA_TRACE(("%s: dma_rxenable\n", di->name)); 816 DMA_TRACE("%s:\n", di->name);
816 817
817 control = 818 control =
818 (R_REG(&di->d64rxregs->control) & D64_RC_AE) | 819 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
@@ -832,7 +833,7 @@ void dma_rxinit(struct dma_pub *pub)
832{ 833{
833 struct dma_info *di = (struct dma_info *)pub; 834 struct dma_info *di = (struct dma_info *)pub;
834 835
835 DMA_TRACE(("%s: dma_rxinit\n", di->name)); 836 DMA_TRACE("%s:\n", di->name);
836 837
837 if (di->nrxd == 0) 838 if (di->nrxd == 0)
838 return; 839 return;
@@ -926,7 +927,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
926 return 0; 927 return 0;
927 928
928 len = le16_to_cpu(*(__le16 *) (p->data)); 929 len = le16_to_cpu(*(__le16 *) (p->data));
929 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); 930 DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
930 dma_spin_for_len(len, p); 931 dma_spin_for_len(len, p);
931 932
932 /* set actual length */ 933 /* set actual length */
@@ -953,14 +954,14 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
953 D64_RS0_CD_MASK) - 954 D64_RS0_CD_MASK) -
954 di->rcvptrbase) & D64_RS0_CD_MASK, 955 di->rcvptrbase) & D64_RS0_CD_MASK,
955 struct dma64desc); 956 struct dma64desc);
956 DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n", 957 DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
957 di->rxin, di->rxout, cur)); 958 di->rxin, di->rxout, cur);
958 } 959 }
959#endif /* BCMDBG */ 960#endif /* BCMDBG */
960 961
961 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { 962 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
962 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", 963 DMA_ERROR("%s: bad frame length (%d)\n",
963 di->name, len)); 964 di->name, len);
964 skb_queue_walk_safe(&dma_frames, p, next) { 965 skb_queue_walk_safe(&dma_frames, p, next) {
965 skb_unlink(p, &dma_frames); 966 skb_unlink(p, &dma_frames);
966 brcmu_pkt_buf_free_skb(p); 967 brcmu_pkt_buf_free_skb(p);
@@ -977,7 +978,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
977 978
978static bool dma64_rxidle(struct dma_info *di) 979static bool dma64_rxidle(struct dma_info *di)
979{ 980{
980 DMA_TRACE(("%s: dma_rxidle\n", di->name)); 981 DMA_TRACE("%s:\n", di->name);
981 982
982 if (di->nrxd == 0) 983 if (di->nrxd == 0)
983 return true; 984 return true;
@@ -1017,7 +1018,7 @@ bool dma_rxfill(struct dma_pub *pub)
1017 1018
1018 n = di->nrxpost - nrxdactive(di, rxin, rxout); 1019 n = di->nrxpost - nrxdactive(di, rxin, rxout);
1019 1020
1020 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); 1021 DMA_TRACE("%s: post %d\n", di->name, n);
1021 1022
1022 if (di->rxbufsize > BCMEXTRAHDROOM) 1023 if (di->rxbufsize > BCMEXTRAHDROOM)
1023 extra_offset = di->rxextrahdrroom; 1024 extra_offset = di->rxextrahdrroom;
@@ -1030,11 +1031,9 @@ bool dma_rxfill(struct dma_pub *pub)
1030 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); 1031 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
1031 1032
1032 if (p == NULL) { 1033 if (p == NULL) {
1033 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", 1034 DMA_ERROR("%s: out of rxbufs\n", di->name);
1034 di->name));
1035 if (i == 0 && dma64_rxidle(di)) { 1035 if (i == 0 && dma64_rxidle(di)) {
1036 DMA_ERROR(("%s: rxfill64: ring is empty !\n", 1036 DMA_ERROR("%s: ring is empty !\n", di->name);
1037 di->name));
1038 ring_empty = true; 1037 ring_empty = true;
1039 } 1038 }
1040 di->dma.rxnobuf++; 1039 di->dma.rxnobuf++;
@@ -1079,7 +1078,7 @@ void dma_rxreclaim(struct dma_pub *pub)
1079 struct dma_info *di = (struct dma_info *)pub; 1078 struct dma_info *di = (struct dma_info *)pub;
1080 struct sk_buff *p; 1079 struct sk_buff *p;
1081 1080
1082 DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); 1081 DMA_TRACE("%s:\n", di->name);
1083 1082
1084 while ((p = _dma_getnextrxp(di, true))) 1083 while ((p = _dma_getnextrxp(di, true)))
1085 brcmu_pkt_buf_free_skb(p); 1084 brcmu_pkt_buf_free_skb(p);
@@ -1110,7 +1109,7 @@ void dma_txinit(struct dma_pub *pub)
1110 struct dma_info *di = (struct dma_info *)pub; 1109 struct dma_info *di = (struct dma_info *)pub;
1111 u32 control = D64_XC_XE; 1110 u32 control = D64_XC_XE;
1112 1111
1113 DMA_TRACE(("%s: dma_txinit\n", di->name)); 1112 DMA_TRACE("%s:\n", di->name);
1114 1113
1115 if (di->ntxd == 0) 1114 if (di->ntxd == 0)
1116 return; 1115 return;
@@ -1142,7 +1141,7 @@ void dma_txsuspend(struct dma_pub *pub)
1142{ 1141{
1143 struct dma_info *di = (struct dma_info *)pub; 1142 struct dma_info *di = (struct dma_info *)pub;
1144 1143
1145 DMA_TRACE(("%s: dma_txsuspend\n", di->name)); 1144 DMA_TRACE("%s:\n", di->name);
1146 1145
1147 if (di->ntxd == 0) 1146 if (di->ntxd == 0)
1148 return; 1147 return;
@@ -1154,7 +1153,7 @@ void dma_txresume(struct dma_pub *pub)
1154{ 1153{
1155 struct dma_info *di = (struct dma_info *)pub; 1154 struct dma_info *di = (struct dma_info *)pub;
1156 1155
1157 DMA_TRACE(("%s: dma_txresume\n", di->name)); 1156 DMA_TRACE("%s:\n", di->name);
1158 1157
1159 if (di->ntxd == 0) 1158 if (di->ntxd == 0)
1160 return; 1159 return;
@@ -1176,11 +1175,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
1176 struct dma_info *di = (struct dma_info *)pub; 1175 struct dma_info *di = (struct dma_info *)pub;
1177 struct sk_buff *p; 1176 struct sk_buff *p;
1178 1177
1179 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, 1178 DMA_TRACE("%s: %s\n",
1180 (range == DMA_RANGE_ALL) ? "all" : 1179 di->name,
1181 ((range == 1180 range == DMA_RANGE_ALL ? "all" :
1182 DMA_RANGE_TRANSMITTED) ? "transmitted" : 1181 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
1183 "transferred"))); 1182 "transferred");
1184 1183
1185 if (di->txin == di->txout) 1184 if (di->txin == di->txout)
1186 return; 1185 return;
@@ -1250,7 +1249,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
1250 u32 flags = 0; 1249 u32 flags = 0;
1251 dma_addr_t pa; 1250 dma_addr_t pa;
1252 1251
1253 DMA_TRACE(("%s: dma_txfast\n", di->name)); 1252 DMA_TRACE("%s:\n", di->name);
1254 1253
1255 txout = di->txout; 1254 txout = di->txout;
1256 1255
@@ -1314,7 +1313,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
1314 return 0; 1313 return 0;
1315 1314
1316 outoftxd: 1315 outoftxd:
1317 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name)); 1316 DMA_ERROR("%s: out of txds !!!\n", di->name);
1318 brcmu_pkt_buf_free_skb(p0); 1317 brcmu_pkt_buf_free_skb(p0);
1319 di->dma.txavail = 0; 1318 di->dma.txavail = 0;
1320 di->dma.txnobuf++; 1319 di->dma.txnobuf++;
@@ -1338,11 +1337,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1338 u16 active_desc; 1337 u16 active_desc;
1339 struct sk_buff *txp; 1338 struct sk_buff *txp;
1340 1339
1341 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, 1340 DMA_TRACE("%s: %s\n",
1342 (range == DMA_RANGE_ALL) ? "all" : 1341 di->name,
1343 ((range == 1342 range == DMA_RANGE_ALL ? "all" :
1344 DMA_RANGE_TRANSMITTED) ? "transmitted" : 1343 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
1345 "transferred"))); 1344 "transferred");
1346 1345
1347 if (di->ntxd == 0) 1346 if (di->ntxd == 0)
1348 return NULL; 1347 return NULL;
@@ -1402,8 +1401,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1402 return txp; 1401 return txp;
1403 1402
1404 bogus: 1403 bogus:
1405 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d " 1404 DMA_NONE("bogus curr: start %d end %d txout %d\n",
1406 "force %d\n", start, end, di->txout, forceall)); 1405 start, end, di->txout);
1407 return NULL; 1406 return NULL;
1408} 1407}
1409 1408
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 8457e969eb4f..ba3e4b5cba71 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1550,11 +1550,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
1550 if (le32_to_cpu(hdr->idx) == idx) { 1550 if (le32_to_cpu(hdr->idx) == idx) {
1551 pdata = wl->fw.fw_bin[i]->data + 1551 pdata = wl->fw.fw_bin[i]->data +
1552 le32_to_cpu(hdr->offset); 1552 le32_to_cpu(hdr->offset);
1553 *pbuf = kmalloc(len, GFP_ATOMIC); 1553 *pbuf = kmemdup(pdata, len, GFP_ATOMIC);
1554 if (*pbuf == NULL) 1554 if (*pbuf == NULL)
1555 goto fail; 1555 goto fail;
1556 1556
1557 memcpy(*pbuf, pdata, len);
1558 return 0; 1557 return 0;
1559 } 1558 }
1560 } 1559 }
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 045a93645a3d..18054d9c6688 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3872,8 +3872,8 @@ static void prism2_get_drvinfo(struct net_device *dev,
3872 iface = netdev_priv(dev); 3872 iface = netdev_priv(dev);
3873 local = iface->local; 3873 local = iface->local;
3874 3874
3875 strncpy(info->driver, "hostap", sizeof(info->driver) - 1); 3875 strlcpy(info->driver, "hostap", sizeof(info->driver));
3876 snprintf(info->fw_version, sizeof(info->fw_version) - 1, 3876 snprintf(info->fw_version, sizeof(info->fw_version),
3877 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff, 3877 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
3878 (local->sta_fw_ver >> 8) & 0xff, 3878 (local->sta_fw_ver >> 8) & 0xff,
3879 local->sta_fw_ver & 0xff); 3879 local->sta_fw_ver & 0xff);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 127e9c63beaf..a0e5c21d3657 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
5981 struct ipw2100_priv *priv = libipw_priv(dev); 5981 struct ipw2100_priv *priv = libipw_priv(dev);
5982 char fw_ver[64], ucode_ver[64]; 5982 char fw_ver[64], ucode_ver[64];
5983 5983
5984 strcpy(info->driver, DRV_NAME); 5984 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
5985 strcpy(info->version, DRV_VERSION); 5985 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
5986 5986
5987 ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver)); 5987 ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
5988 ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver)); 5988 ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5990,7 +5990,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
5990 snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s", 5990 snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
5991 fw_ver, priv->eeprom_version, ucode_ver); 5991 fw_ver, priv->eeprom_version, ucode_ver);
5992 5992
5993 strcpy(info->bus_info, pci_name(priv->pci_dev)); 5993 strlcpy(info->bus_info, pci_name(priv->pci_dev),
5994 sizeof(info->bus_info));
5994} 5995}
5995 5996
5996static u32 ipw2100_ethtool_get_link(struct net_device *dev) 5997static u32 ipw2100_ethtool_get_link(struct net_device *dev)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 99575884ff52..018a8deb88a8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -10548,8 +10548,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10548 char date[32]; 10548 char date[32];
10549 u32 len; 10549 u32 len;
10550 10550
10551 strcpy(info->driver, DRV_NAME); 10551 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10552 strcpy(info->version, DRV_VERSION); 10552 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10553 10553
10554 len = sizeof(vers); 10554 len = sizeof(vers);
10555 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len); 10555 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10558,7 +10558,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10558 10558
10559 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)", 10559 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10560 vers, date); 10560 vers, date);
10561 strcpy(info->bus_info, pci_name(p->pci_dev)); 10561 strlcpy(info->bus_info, pci_name(p->pci_dev),
10562 sizeof(info->bus_info));
10562 info->eedump_len = IPW_EEPROM_IMAGE_SIZE; 10563 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10563} 10564}
10564 10565
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644
index 000000000000..5e1a19fd354d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -0,0 +1,505 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "common.h"
30#include "3945.h"
31
32static int
33il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
34{
35 int p = 0;
36
37 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
38 le32_to_cpu(il->_3945.stats.flag));
39 if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(il->_3945.stats.flag) &
44 UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
45 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
46 (le32_to_cpu(il->_3945.stats.flag) &
47 UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
48 return p;
49}
50
51ssize_t
52il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
53 size_t count, loff_t *ppos)
54{
55 struct il_priv *il = file->private_data;
56 int pos = 0;
57 char *buf;
58 int bufsz =
59 sizeof(struct iwl39_stats_rx_phy) * 40 +
60 sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
61 ssize_t ret;
62 struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
63 struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
64 struct iwl39_stats_rx_non_phy *general, *accum_general;
65 struct iwl39_stats_rx_non_phy *delta_general, *max_general;
66
67 if (!il_is_alive(il))
68 return -EAGAIN;
69
70 buf = kzalloc(bufsz, GFP_KERNEL);
71 if (!buf) {
72 IL_ERR("Can not allocate Buffer\n");
73 return -ENOMEM;
74 }
75
76 /*
77 * The statistic information display here is based on
78 * the last stats notification from uCode
79 * might not reflect the current uCode activity
80 */
81 ofdm = &il->_3945.stats.rx.ofdm;
82 cck = &il->_3945.stats.rx.cck;
83 general = &il->_3945.stats.rx.general;
84 accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
85 accum_cck = &il->_3945.accum_stats.rx.cck;
86 accum_general = &il->_3945.accum_stats.rx.general;
87 delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
88 delta_cck = &il->_3945.delta_stats.rx.cck;
89 delta_general = &il->_3945.delta_stats.rx.general;
90 max_ofdm = &il->_3945.max_delta.rx.ofdm;
91 max_cck = &il->_3945.max_delta.rx.cck;
92 max_general = &il->_3945.max_delta.rx.general;
93
94 pos += il3945_stats_flag(il, buf, bufsz);
95 pos +=
96 scnprintf(buf + pos, bufsz - pos,
97 "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos +=
101 scnprintf(buf + pos, bufsz - pos,
102 " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
103 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos +=
106 scnprintf(buf + pos, bufsz - pos,
107 " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos +=
111 scnprintf(buf + pos, bufsz - pos,
112 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
113 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
114 delta_ofdm->plcp_err, max_ofdm->plcp_err);
115 pos +=
116 scnprintf(buf + pos, bufsz - pos,
117 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
118 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
119 delta_ofdm->crc32_err, max_ofdm->crc32_err);
120 pos +=
121 scnprintf(buf + pos, bufsz - pos,
122 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
123 le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
124 delta_ofdm->overrun_err, max_ofdm->overrun_err);
125 pos +=
126 scnprintf(buf + pos, bufsz - pos,
127 " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
128 le32_to_cpu(ofdm->early_overrun_err),
129 accum_ofdm->early_overrun_err,
130 delta_ofdm->early_overrun_err,
131 max_ofdm->early_overrun_err);
132 pos +=
133 scnprintf(buf + pos, bufsz - pos,
134 " %-30s %10u %10u %10u %10u\n", "crc32_good:",
135 le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
136 delta_ofdm->crc32_good, max_ofdm->crc32_good);
137 pos +=
138 scnprintf(buf + pos, bufsz - pos,
139 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
140 le32_to_cpu(ofdm->false_alarm_cnt),
141 accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
142 max_ofdm->false_alarm_cnt);
143 pos +=
144 scnprintf(buf + pos, bufsz - pos,
145 " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
146 le32_to_cpu(ofdm->fina_sync_err_cnt),
147 accum_ofdm->fina_sync_err_cnt,
148 delta_ofdm->fina_sync_err_cnt,
149 max_ofdm->fina_sync_err_cnt);
150 pos +=
151 scnprintf(buf + pos, bufsz - pos,
152 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
153 le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
154 delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
155 pos +=
156 scnprintf(buf + pos, bufsz - pos,
157 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
159 delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
160 pos +=
161 scnprintf(buf + pos, bufsz - pos,
162 " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
163 le32_to_cpu(ofdm->unresponded_rts),
164 accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
165 max_ofdm->unresponded_rts);
166 pos +=
167 scnprintf(buf + pos, bufsz - pos,
168 " %-30s %10u %10u %10u %10u\n",
169 "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos +=
175 scnprintf(buf + pos, bufsz - pos,
176 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
177 le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
178 delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
179 pos +=
180 scnprintf(buf + pos, bufsz - pos,
181 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
182 le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
183 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
184
185 pos +=
186 scnprintf(buf + pos, bufsz - pos,
187 "%-32s current"
188 "acumulative delta max\n",
189 "Statistics_Rx - CCK:");
190 pos +=
191 scnprintf(buf + pos, bufsz - pos,
192 " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
193 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
194 delta_cck->ina_cnt, max_cck->ina_cnt);
195 pos +=
196 scnprintf(buf + pos, bufsz - pos,
197 " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
198 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
199 delta_cck->fina_cnt, max_cck->fina_cnt);
200 pos +=
201 scnprintf(buf + pos, bufsz - pos,
202 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
203 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
204 delta_cck->plcp_err, max_cck->plcp_err);
205 pos +=
206 scnprintf(buf + pos, bufsz - pos,
207 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
208 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
209 delta_cck->crc32_err, max_cck->crc32_err);
210 pos +=
211 scnprintf(buf + pos, bufsz - pos,
212 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
213 le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
214 delta_cck->overrun_err, max_cck->overrun_err);
215 pos +=
216 scnprintf(buf + pos, bufsz - pos,
217 " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
218 le32_to_cpu(cck->early_overrun_err),
219 accum_cck->early_overrun_err,
220 delta_cck->early_overrun_err, max_cck->early_overrun_err);
221 pos +=
222 scnprintf(buf + pos, bufsz - pos,
223 " %-30s %10u %10u %10u %10u\n", "crc32_good:",
224 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
225 delta_cck->crc32_good, max_cck->crc32_good);
226 pos +=
227 scnprintf(buf + pos, bufsz - pos,
228 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
229 le32_to_cpu(cck->false_alarm_cnt),
230 accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
231 max_cck->false_alarm_cnt);
232 pos +=
233 scnprintf(buf + pos, bufsz - pos,
234 " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
235 le32_to_cpu(cck->fina_sync_err_cnt),
236 accum_cck->fina_sync_err_cnt,
237 delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
238 pos +=
239 scnprintf(buf + pos, bufsz - pos,
240 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
241 le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
242 delta_cck->sfd_timeout, max_cck->sfd_timeout);
243 pos +=
244 scnprintf(buf + pos, bufsz - pos,
245 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
246 le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
247 delta_cck->fina_timeout, max_cck->fina_timeout);
248 pos +=
249 scnprintf(buf + pos, bufsz - pos,
250 " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
251 le32_to_cpu(cck->unresponded_rts),
252 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
253 max_cck->unresponded_rts);
254 pos +=
255 scnprintf(buf + pos, bufsz - pos,
256 " %-30s %10u %10u %10u %10u\n",
257 "rxe_frame_lmt_ovrun:",
258 le32_to_cpu(cck->rxe_frame_limit_overrun),
259 accum_cck->rxe_frame_limit_overrun,
260 delta_cck->rxe_frame_limit_overrun,
261 max_cck->rxe_frame_limit_overrun);
262 pos +=
263 scnprintf(buf + pos, bufsz - pos,
264 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
265 le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
266 delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
267 pos +=
268 scnprintf(buf + pos, bufsz - pos,
269 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
270 le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
271 delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
272
273 pos +=
274 scnprintf(buf + pos, bufsz - pos,
275 "%-32s current"
276 "acumulative delta max\n",
277 "Statistics_Rx - GENERAL:");
278 pos +=
279 scnprintf(buf + pos, bufsz - pos,
280 " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
281 le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
282 delta_general->bogus_cts, max_general->bogus_cts);
283 pos +=
284 scnprintf(buf + pos, bufsz - pos,
285 " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
286 le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
287 delta_general->bogus_ack, max_general->bogus_ack);
288 pos +=
289 scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
291 le32_to_cpu(general->non_bssid_frames),
292 accum_general->non_bssid_frames,
293 delta_general->non_bssid_frames,
294 max_general->non_bssid_frames);
295 pos +=
296 scnprintf(buf + pos, bufsz - pos,
297 " %-30s %10u %10u %10u %10u\n", "filtered_frames:",
298 le32_to_cpu(general->filtered_frames),
299 accum_general->filtered_frames,
300 delta_general->filtered_frames,
301 max_general->filtered_frames);
302 pos +=
303 scnprintf(buf + pos, bufsz - pos,
304 " %-30s %10u %10u %10u %10u\n",
305 "non_channel_beacons:",
306 le32_to_cpu(general->non_channel_beacons),
307 accum_general->non_channel_beacons,
308 delta_general->non_channel_beacons,
309 max_general->non_channel_beacons);
310
311 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
312 kfree(buf);
313 return ret;
314}
315
316ssize_t
317il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
318 size_t count, loff_t *ppos)
319{
320 struct il_priv *il = file->private_data;
321 int pos = 0;
322 char *buf;
323 int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
324 ssize_t ret;
325 struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
326
327 if (!il_is_alive(il))
328 return -EAGAIN;
329
330 buf = kzalloc(bufsz, GFP_KERNEL);
331 if (!buf) {
332 IL_ERR("Can not allocate Buffer\n");
333 return -ENOMEM;
334 }
335
336 /*
337 * The statistic information display here is based on
338 * the last stats notification from uCode
339 * might not reflect the current uCode activity
340 */
341 tx = &il->_3945.stats.tx;
342 accum_tx = &il->_3945.accum_stats.tx;
343 delta_tx = &il->_3945.delta_stats.tx;
344 max_tx = &il->_3945.max_delta.tx;
345 pos += il3945_stats_flag(il, buf, bufsz);
346 pos +=
347 scnprintf(buf + pos, bufsz - pos,
348 "%-32s current"
349 "acumulative delta max\n",
350 "Statistics_Tx:");
351 pos +=
352 scnprintf(buf + pos, bufsz - pos,
353 " %-30s %10u %10u %10u %10u\n", "preamble:",
354 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
355 delta_tx->preamble_cnt, max_tx->preamble_cnt);
356 pos +=
357 scnprintf(buf + pos, bufsz - pos,
358 " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
359 le32_to_cpu(tx->rx_detected_cnt),
360 accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
361 max_tx->rx_detected_cnt);
362 pos +=
363 scnprintf(buf + pos, bufsz - pos,
364 " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
365 le32_to_cpu(tx->bt_prio_defer_cnt),
366 accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
367 max_tx->bt_prio_defer_cnt);
368 pos +=
369 scnprintf(buf + pos, bufsz - pos,
370 " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
371 le32_to_cpu(tx->bt_prio_kill_cnt),
372 accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
373 max_tx->bt_prio_kill_cnt);
374 pos +=
375 scnprintf(buf + pos, bufsz - pos,
376 " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
377 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
378 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
379 pos +=
380 scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n", "cts_timeout:",
382 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
383 delta_tx->cts_timeout, max_tx->cts_timeout);
384 pos +=
385 scnprintf(buf + pos, bufsz - pos,
386 " %-30s %10u %10u %10u %10u\n", "ack_timeout:",
387 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
388 delta_tx->ack_timeout, max_tx->ack_timeout);
389 pos +=
390 scnprintf(buf + pos, bufsz - pos,
391 " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
392 le32_to_cpu(tx->expected_ack_cnt),
393 accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
394 max_tx->expected_ack_cnt);
395 pos +=
396 scnprintf(buf + pos, bufsz - pos,
397 " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
398 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
399 delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
400
401 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
402 kfree(buf);
403 return ret;
404}
405
406ssize_t
407il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
408 size_t count, loff_t *ppos)
409{
410 struct il_priv *il = file->private_data;
411 int pos = 0;
412 char *buf;
413 int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
414 ssize_t ret;
415 struct iwl39_stats_general *general, *accum_general;
416 struct iwl39_stats_general *delta_general, *max_general;
417 struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
418 struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
419
420 if (!il_is_alive(il))
421 return -EAGAIN;
422
423 buf = kzalloc(bufsz, GFP_KERNEL);
424 if (!buf) {
425 IL_ERR("Can not allocate Buffer\n");
426 return -ENOMEM;
427 }
428
429 /*
430 * The statistic information display here is based on
431 * the last stats notification from uCode
432 * might not reflect the current uCode activity
433 */
434 general = &il->_3945.stats.general;
435 dbg = &il->_3945.stats.general.dbg;
436 div = &il->_3945.stats.general.div;
437 accum_general = &il->_3945.accum_stats.general;
438 delta_general = &il->_3945.delta_stats.general;
439 max_general = &il->_3945.max_delta.general;
440 accum_dbg = &il->_3945.accum_stats.general.dbg;
441 delta_dbg = &il->_3945.delta_stats.general.dbg;
442 max_dbg = &il->_3945.max_delta.general.dbg;
443 accum_div = &il->_3945.accum_stats.general.div;
444 delta_div = &il->_3945.delta_stats.general.div;
445 max_div = &il->_3945.max_delta.general.div;
446 pos += il3945_stats_flag(il, buf, bufsz);
447 pos +=
448 scnprintf(buf + pos, bufsz - pos,
449 "%-32s current"
450 "acumulative delta max\n",
451 "Statistics_General:");
452 pos +=
453 scnprintf(buf + pos, bufsz - pos,
454 " %-30s %10u %10u %10u %10u\n", "burst_check:",
455 le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
456 delta_dbg->burst_check, max_dbg->burst_check);
457 pos +=
458 scnprintf(buf + pos, bufsz - pos,
459 " %-30s %10u %10u %10u %10u\n", "burst_count:",
460 le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
461 delta_dbg->burst_count, max_dbg->burst_count);
462 pos +=
463 scnprintf(buf + pos, bufsz - pos,
464 " %-30s %10u %10u %10u %10u\n", "sleep_time:",
465 le32_to_cpu(general->sleep_time),
466 accum_general->sleep_time, delta_general->sleep_time,
467 max_general->sleep_time);
468 pos +=
469 scnprintf(buf + pos, bufsz - pos,
470 " %-30s %10u %10u %10u %10u\n", "slots_out:",
471 le32_to_cpu(general->slots_out), accum_general->slots_out,
472 delta_general->slots_out, max_general->slots_out);
473 pos +=
474 scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n", "slots_idle:",
476 le32_to_cpu(general->slots_idle),
477 accum_general->slots_idle, delta_general->slots_idle,
478 max_general->slots_idle);
479 pos +=
480 scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
481 le32_to_cpu(general->ttl_timestamp));
482 pos +=
483 scnprintf(buf + pos, bufsz - pos,
484 " %-30s %10u %10u %10u %10u\n", "tx_on_a:",
485 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
486 delta_div->tx_on_a, max_div->tx_on_a);
487 pos +=
488 scnprintf(buf + pos, bufsz - pos,
489 " %-30s %10u %10u %10u %10u\n", "tx_on_b:",
490 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
491 delta_div->tx_on_b, max_div->tx_on_b);
492 pos +=
493 scnprintf(buf + pos, bufsz - pos,
494 " %-30s %10u %10u %10u %10u\n", "exec_time:",
495 le32_to_cpu(div->exec_time), accum_div->exec_time,
496 delta_div->exec_time, max_div->exec_time);
497 pos +=
498 scnprintf(buf + pos, bufsz - pos,
499 " %-30s %10u %10u %10u %10u\n", "probe_time:",
500 le32_to_cpu(div->probe_time), accum_div->probe_time,
501 delta_div->probe_time, max_div->probe_time);
502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
503 kfree(buf);
504 return ret;
505}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644
index 000000000000..daef6b58f6cc
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -0,0 +1,3977 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/ieee80211_radiotap.h>
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl3945"
53
54#include "commands.h"
55#include "common.h"
56#include "3945.h"
57#include "iwl-spectrum.h"
58
59/*
60 * module name, copyright, version, etc.
61 */
62
63#define DRV_DESCRIPTION \
64"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
65
66#ifdef CONFIG_IWLEGACY_DEBUG
67#define VD "d"
68#else
69#define VD
70#endif
71
72/*
73 * add "s" to indicate spectrum measurement included.
74 * we add it here to be consistent with previous releases in which
75 * this was configurable.
76 */
77#define DRV_VERSION IWLWIFI_VERSION VD "s"
78#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
79#define DRV_AUTHOR "<ilw@linux.intel.com>"
80
81MODULE_DESCRIPTION(DRV_DESCRIPTION);
82MODULE_VERSION(DRV_VERSION);
83MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
84MODULE_LICENSE("GPL");
85
86 /* module parameters */
87struct il_mod_params il3945_mod_params = {
88 .sw_crypto = 1,
89 .restart_fw = 1,
90 .disable_hw_scan = 1,
91 /* the rest are 0 by default */
92};
93
94/**
95 * il3945_get_antenna_flags - Get antenna flags for RXON command
96 * @il: eeprom and antenna fields are used to determine antenna flags
97 *
98 * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed
99 * il3945_mod_params.antenna specifies the antenna diversity mode:
100 *
101 * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
102 * IL_ANTENNA_MAIN - Force MAIN antenna
103 * IL_ANTENNA_AUX - Force AUX antenna
104 */
105__le32
106il3945_get_antenna_flags(const struct il_priv *il)
107{
108 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
109
110 switch (il3945_mod_params.antenna) {
111 case IL_ANTENNA_DIVERSITY:
112 return 0;
113
114 case IL_ANTENNA_MAIN:
115 if (eeprom->antenna_switch_type)
116 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
117 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
118
119 case IL_ANTENNA_AUX:
120 if (eeprom->antenna_switch_type)
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
123 }
124
125 /* bad antenna selector value */
126 IL_ERR("Bad antenna selector value (0x%x)\n",
127 il3945_mod_params.antenna);
128
129 return 0; /* "diversity" is default if error */
130}
131
132static int
133il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
134 struct ieee80211_key_conf *keyconf, u8 sta_id)
135{
136 unsigned long flags;
137 __le16 key_flags = 0;
138 int ret;
139
140 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
141 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
142
143 if (sta_id == il->ctx.bcast_sta_id)
144 key_flags |= STA_KEY_MULTICAST_MSK;
145
146 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
147 keyconf->hw_key_idx = keyconf->keyidx;
148 key_flags &= ~STA_KEY_FLG_INVALID;
149
150 spin_lock_irqsave(&il->sta_lock, flags);
151 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
152 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
153 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
154
155 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
156
157 if ((il->stations[sta_id].sta.key.
158 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
159 il->stations[sta_id].sta.key.key_offset =
160 il_get_free_ucode_key_idx(il);
161 /* else, we are overriding an existing key => no need to allocated room
162 * in uCode. */
163
164 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
165 "no space for a new key");
166
167 il->stations[sta_id].sta.key.key_flags = key_flags;
168 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
169 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
170
171 D_INFO("hwcrypto: modify ucode station key info\n");
172
173 ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
174
175 spin_unlock_irqrestore(&il->sta_lock, flags);
176
177 return ret;
178}
179
180static int
181il3945_set_tkip_dynamic_key_info(struct il_priv *il,
182 struct ieee80211_key_conf *keyconf, u8 sta_id)
183{
184 return -EOPNOTSUPP;
185}
186
187static int
188il3945_set_wep_dynamic_key_info(struct il_priv *il,
189 struct ieee80211_key_conf *keyconf, u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int
195il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
196{
197 unsigned long flags;
198 struct il_addsta_cmd sta_cmd;
199
200 spin_lock_irqsave(&il->sta_lock, flags);
201 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
202 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
203 il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
204 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
205 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
206 memcpy(&sta_cmd, &il->stations[sta_id].sta,
207 sizeof(struct il_addsta_cmd));
208 spin_unlock_irqrestore(&il->sta_lock, flags);
209
210 D_INFO("hwcrypto: clear ucode station key info\n");
211 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
212}
213
214static int
215il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
216 u8 sta_id)
217{
218 int ret = 0;
219
220 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
221
222 switch (keyconf->cipher) {
223 case WLAN_CIPHER_SUITE_CCMP:
224 ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
225 break;
226 case WLAN_CIPHER_SUITE_TKIP:
227 ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
228 break;
229 case WLAN_CIPHER_SUITE_WEP40:
230 case WLAN_CIPHER_SUITE_WEP104:
231 ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
232 break;
233 default:
234 IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
235 ret = -EINVAL;
236 }
237
238 D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
239 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
240
241 return ret;
242}
243
244static int
245il3945_remove_static_key(struct il_priv *il)
246{
247 int ret = -EOPNOTSUPP;
248
249 return ret;
250}
251
252static int
253il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
254{
255 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
256 key->cipher == WLAN_CIPHER_SUITE_WEP104)
257 return -EOPNOTSUPP;
258
259 IL_ERR("Static key invalid: cipher %x\n", key->cipher);
260 return -EINVAL;
261}
262
263static void
264il3945_clear_free_frames(struct il_priv *il)
265{
266 struct list_head *element;
267
268 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
269
270 while (!list_empty(&il->free_frames)) {
271 element = il->free_frames.next;
272 list_del(element);
273 kfree(list_entry(element, struct il3945_frame, list));
274 il->frames_count--;
275 }
276
277 if (il->frames_count) {
278 IL_WARN("%d frames still in use. Did we lose one?\n",
279 il->frames_count);
280 il->frames_count = 0;
281 }
282}
283
284static struct il3945_frame *
285il3945_get_free_frame(struct il_priv *il)
286{
287 struct il3945_frame *frame;
288 struct list_head *element;
289 if (list_empty(&il->free_frames)) {
290 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
291 if (!frame) {
292 IL_ERR("Could not allocate frame!\n");
293 return NULL;
294 }
295
296 il->frames_count++;
297 return frame;
298 }
299
300 element = il->free_frames.next;
301 list_del(element);
302 return list_entry(element, struct il3945_frame, list);
303}
304
305static void
306il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
307{
308 memset(frame, 0, sizeof(*frame));
309 list_add(&frame->list, &il->free_frames);
310}
311
312unsigned int
313il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
314 int left)
315{
316
317 if (!il_is_associated(il) || !il->beacon_skb)
318 return 0;
319
320 if (il->beacon_skb->len > left)
321 return 0;
322
323 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
324
325 return il->beacon_skb->len;
326}
327
328static int
329il3945_send_beacon_cmd(struct il_priv *il)
330{
331 struct il3945_frame *frame;
332 unsigned int frame_size;
333 int rc;
334 u8 rate;
335
336 frame = il3945_get_free_frame(il);
337
338 if (!frame) {
339 IL_ERR("Could not obtain free frame buffer for beacon "
340 "command.\n");
341 return -ENOMEM;
342 }
343
344 rate = il_get_lowest_plcp(il, &il->ctx);
345
346 frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
347
348 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
349
350 il3945_free_frame(il, frame);
351
352 return rc;
353}
354
355static void
356il3945_unset_hw_params(struct il_priv *il)
357{
358 if (il->_3945.shared_virt)
359 dma_free_coherent(&il->pci_dev->dev,
360 sizeof(struct il3945_shared),
361 il->_3945.shared_virt, il->_3945.shared_phys);
362}
363
364static void
365il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
366 struct il_device_cmd *cmd,
367 struct sk_buff *skb_frag, int sta_id)
368{
369 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
370 struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
371
372 tx_cmd->sec_ctl = 0;
373
374 switch (keyinfo->cipher) {
375 case WLAN_CIPHER_SUITE_CCMP:
376 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
377 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
378 D_TX("tx_cmd with AES hwcrypto\n");
379 break;
380
381 case WLAN_CIPHER_SUITE_TKIP:
382 break;
383
384 case WLAN_CIPHER_SUITE_WEP104:
385 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
386 /* fall through */
387 case WLAN_CIPHER_SUITE_WEP40:
388 tx_cmd->sec_ctl |=
389 TX_CMD_SEC_WEP | (info->control.hw_key->
390 hw_key_idx & TX_CMD_SEC_MSK) <<
391 TX_CMD_SEC_SHIFT;
392
393 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
394
395 D_TX("Configuring packet for WEP encryption " "with key %d\n",
396 info->control.hw_key->hw_key_idx);
397 break;
398
399 default:
400 IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
401 break;
402 }
403}
404
405/*
406 * handle build C_TX command notification.
407 */
408static void
409il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
410 struct ieee80211_tx_info *info,
411 struct ieee80211_hdr *hdr, u8 std_id)
412{
413 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
414 __le32 tx_flags = tx_cmd->tx_flags;
415 __le16 fc = hdr->frame_control;
416
417 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
418 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
419 tx_flags |= TX_CMD_FLG_ACK_MSK;
420 if (ieee80211_is_mgmt(fc))
421 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
422 if (ieee80211_is_probe_resp(fc) &&
423 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
424 tx_flags |= TX_CMD_FLG_TSF_MSK;
425 } else {
426 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
427 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
428 }
429
430 tx_cmd->sta_id = std_id;
431 if (ieee80211_has_morefrags(fc))
432 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
433
434 if (ieee80211_is_data_qos(fc)) {
435 u8 *qc = ieee80211_get_qos_ctl(hdr);
436 tx_cmd->tid_tspec = qc[0] & 0xf;
437 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
438 } else {
439 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
440 }
441
442 il_tx_cmd_protection(il, info, fc, &tx_flags);
443
444 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
445 if (ieee80211_is_mgmt(fc)) {
446 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
447 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
448 else
449 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
450 } else {
451 tx_cmd->timeout.pm_frame_timeout = 0;
452 }
453
454 tx_cmd->driver_txop = 0;
455 tx_cmd->tx_flags = tx_flags;
456 tx_cmd->next_frame_len = 0;
457}
458
459/*
460 * start C_TX command process
461 */
462static int
463il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
464{
465 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
467 struct il3945_tx_cmd *tx_cmd;
468 struct il_tx_queue *txq = NULL;
469 struct il_queue *q = NULL;
470 struct il_device_cmd *out_cmd;
471 struct il_cmd_meta *out_meta;
472 dma_addr_t phys_addr;
473 dma_addr_t txcmd_phys;
474 int txq_id = skb_get_queue_mapping(skb);
475 u16 len, idx, hdr_len;
476 u8 id;
477 u8 unicast;
478 u8 sta_id;
479 u8 tid = 0;
480 __le16 fc;
481 u8 wait_write_ptr = 0;
482 unsigned long flags;
483
484 spin_lock_irqsave(&il->lock, flags);
485 if (il_is_rfkill(il)) {
486 D_DROP("Dropping - RF KILL\n");
487 goto drop_unlock;
488 }
489
490 if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
491 IL_INVALID_RATE) {
492 IL_ERR("ERROR: No TX rate available.\n");
493 goto drop_unlock;
494 }
495
496 unicast = !is_multicast_ether_addr(hdr->addr1);
497 id = 0;
498
499 fc = hdr->frame_control;
500
501#ifdef CONFIG_IWLEGACY_DEBUG
502 if (ieee80211_is_auth(fc))
503 D_TX("Sending AUTH frame\n");
504 else if (ieee80211_is_assoc_req(fc))
505 D_TX("Sending ASSOC frame\n");
506 else if (ieee80211_is_reassoc_req(fc))
507 D_TX("Sending REASSOC frame\n");
508#endif
509
510 spin_unlock_irqrestore(&il->lock, flags);
511
512 hdr_len = ieee80211_hdrlen(fc);
513
514 /* Find idx into station table for destination station */
515 sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
516 if (sta_id == IL_INVALID_STATION) {
517 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
518 goto drop;
519 }
520
521 D_RATE("station Id %d\n", sta_id);
522
523 if (ieee80211_is_data_qos(fc)) {
524 u8 *qc = ieee80211_get_qos_ctl(hdr);
525 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
526 if (unlikely(tid >= MAX_TID_COUNT))
527 goto drop;
528 }
529
530 /* Descriptor for chosen Tx queue */
531 txq = &il->txq[txq_id];
532 q = &txq->q;
533
534 if ((il_queue_space(q) < q->high_mark))
535 goto drop;
536
537 spin_lock_irqsave(&il->lock, flags);
538
539 idx = il_get_cmd_idx(q, q->write_ptr, 0);
540
541 /* Set up driver data for this TFD */
542 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
543 txq->txb[q->write_ptr].skb = skb;
544 txq->txb[q->write_ptr].ctx = &il->ctx;
545
546 /* Init first empty entry in queue's array of Tx/cmd buffers */
547 out_cmd = txq->cmd[idx];
548 out_meta = &txq->meta[idx];
549 tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
550 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
551 memset(tx_cmd, 0, sizeof(*tx_cmd));
552
553 /*
554 * Set up the Tx-command (not MAC!) header.
555 * Store the chosen Tx queue and TFD idx within the sequence field;
556 * after Tx, uCode's Tx response will return this value so driver can
557 * locate the frame within the tx queue and do post-tx processing.
558 */
559 out_cmd->hdr.cmd = C_TX;
560 out_cmd->hdr.sequence =
561 cpu_to_le16((u16)
562 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
563
564 /* Copy MAC header from skb into command buffer */
565 memcpy(tx_cmd->hdr, hdr, hdr_len);
566
567 if (info->control.hw_key)
568 il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
569
570 /* TODO need this for burst mode later on */
571 il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
572
573 /* set is_hcca to 0; it probably will never be implemented */
574 il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id, 0);
575
576 /* Total # bytes to be transmitted */
577 len = (u16) skb->len;
578 tx_cmd->len = cpu_to_le16(len);
579
580 il_dbg_log_tx_data_frame(il, len, hdr);
581 il_update_stats(il, true, fc, len);
582 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
583 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
584
585 if (!ieee80211_has_morefrags(hdr->frame_control)) {
586 txq->need_update = 1;
587 } else {
588 wait_write_ptr = 1;
589 txq->need_update = 0;
590 }
591
592 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
593 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
594 il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
595 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
596 ieee80211_hdrlen(fc));
597
598 /*
599 * Use the first empty entry in this queue's command buffer array
600 * to contain the Tx command and MAC header concatenated together
601 * (payload data will be in another buffer).
602 * Size of this varies, due to varying MAC header length.
603 * If end is not dword aligned, we'll have 2 extra bytes at the end
604 * of the MAC header (device reads on dword boundaries).
605 * We'll tell device about this padding later.
606 */
607 len =
608 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
609 hdr_len;
610 len = (len + 3) & ~3;
611
612 /* Physical address of this Tx command's header (not MAC header!),
613 * within command buffer array. */
614 txcmd_phys =
615 pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
616 /* we do not map meta data ... so we can safely access address to
617 * provide to unmap command*/
618 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
619 dma_unmap_len_set(out_meta, len, len);
620
621 /* Add buffer containing Tx command and MAC(!) header to TFD's
622 * first entry */
623 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
624 0);
625
626 /* Set up TFD's 2nd entry to point directly to remainder of skb,
627 * if any (802.11 null frames have no payload). */
628 len = skb->len - hdr_len;
629 if (len) {
630 phys_addr =
631 pci_map_single(il->pci_dev, skb->data + hdr_len, len,
632 PCI_DMA_TODEVICE);
633 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
634 len, 0, U32_PAD(len));
635 }
636
637 /* Tell device the write idx *just past* this latest filled TFD */
638 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
639 il_txq_update_write_ptr(il, txq);
640 spin_unlock_irqrestore(&il->lock, flags);
641
642 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
643 if (wait_write_ptr) {
644 spin_lock_irqsave(&il->lock, flags);
645 txq->need_update = 1;
646 il_txq_update_write_ptr(il, txq);
647 spin_unlock_irqrestore(&il->lock, flags);
648 }
649
650 il_stop_queue(il, txq);
651 }
652
653 return 0;
654
655drop_unlock:
656 spin_unlock_irqrestore(&il->lock, flags);
657drop:
658 return -1;
659}
660
661static int
662il3945_get_measurement(struct il_priv *il,
663 struct ieee80211_measurement_params *params, u8 type)
664{
665 struct il_spectrum_cmd spectrum;
666 struct il_rx_pkt *pkt;
667 struct il_host_cmd cmd = {
668 .id = C_SPECTRUM_MEASUREMENT,
669 .data = (void *)&spectrum,
670 .flags = CMD_WANT_SKB,
671 };
672 u32 add_time = le64_to_cpu(params->start_time);
673 int rc;
674 int spectrum_resp_status;
675 int duration = le16_to_cpu(params->duration);
676 struct il_rxon_context *ctx = &il->ctx;
677
678 if (il_is_associated(il))
679 add_time =
680 il_usecs_to_beacons(il,
681 le64_to_cpu(params->start_time) -
682 il->_3945.last_tsf,
683 le16_to_cpu(ctx->timing.
684 beacon_interval));
685
686 memset(&spectrum, 0, sizeof(spectrum));
687
688 spectrum.channel_count = cpu_to_le16(1);
689 spectrum.flags =
690 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
691 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
692 cmd.len = sizeof(spectrum);
693 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
694
695 if (il_is_associated(il))
696 spectrum.start_time =
697 il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
698 le16_to_cpu(ctx->timing.
699 beacon_interval));
700 else
701 spectrum.start_time = 0;
702
703 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
704 spectrum.channels[0].channel = params->channel;
705 spectrum.channels[0].type = type;
706 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
707 spectrum.flags |=
708 RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
709 RXON_FLG_TGG_PROTECT_MSK;
710
711 rc = il_send_cmd_sync(il, &cmd);
712 if (rc)
713 return rc;
714
715 pkt = (struct il_rx_pkt *)cmd.reply_page;
716 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
717 IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
718 rc = -EIO;
719 }
720
721 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
722 switch (spectrum_resp_status) {
723 case 0: /* Command will be handled */
724 if (pkt->u.spectrum.id != 0xff) {
725 D_INFO("Replaced existing measurement: %d\n",
726 pkt->u.spectrum.id);
727 il->measurement_status &= ~MEASUREMENT_READY;
728 }
729 il->measurement_status |= MEASUREMENT_ACTIVE;
730 rc = 0;
731 break;
732
733 case 1: /* Command will not be handled */
734 rc = -EAGAIN;
735 break;
736 }
737
738 il_free_pages(il, cmd.reply_page);
739
740 return rc;
741}
742
743static void
744il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
745{
746 struct il_rx_pkt *pkt = rxb_addr(rxb);
747 struct il_alive_resp *palive;
748 struct delayed_work *pwork;
749
750 palive = &pkt->u.alive_frame;
751
752 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
753 palive->is_valid, palive->ver_type, palive->ver_subtype);
754
755 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
756 D_INFO("Initialization Alive received.\n");
757 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
758 sizeof(struct il_alive_resp));
759 pwork = &il->init_alive_start;
760 } else {
761 D_INFO("Runtime Alive received.\n");
762 memcpy(&il->card_alive, &pkt->u.alive_frame,
763 sizeof(struct il_alive_resp));
764 pwork = &il->alive_start;
765 il3945_disable_events(il);
766 }
767
768 /* We delay the ALIVE response by 5ms to
769 * give the HW RF Kill time to activate... */
770 if (palive->is_valid == UCODE_VALID_OK)
771 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
772 else
773 IL_WARN("uCode did not respond OK.\n");
774}
775
776static void
777il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
778{
779#ifdef CONFIG_IWLEGACY_DEBUG
780 struct il_rx_pkt *pkt = rxb_addr(rxb);
781#endif
782
783 D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
784}
785
786static void
787il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
788{
789 struct il_rx_pkt *pkt = rxb_addr(rxb);
790 struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
791#ifdef CONFIG_IWLEGACY_DEBUG
792 u8 rate = beacon->beacon_notify_hdr.rate;
793
794 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
795 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
796 beacon->beacon_notify_hdr.failure_frame,
797 le32_to_cpu(beacon->ibss_mgr_status),
798 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
799#endif
800
801 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
802
803}
804
805/* Handle notification from uCode that card's power state is changing
806 * due to software, hardware, or critical temperature RFKILL */
807static void
808il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
809{
810 struct il_rx_pkt *pkt = rxb_addr(rxb);
811 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
812 unsigned long status = il->status;
813
814 IL_WARN("Card state received: HW:%s SW:%s\n",
815 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
816 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
817
818 _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
819
820 if (flags & HW_CARD_DISABLED)
821 set_bit(S_RF_KILL_HW, &il->status);
822 else
823 clear_bit(S_RF_KILL_HW, &il->status);
824
825 il_scan_cancel(il);
826
827 if ((test_bit(S_RF_KILL_HW, &status) !=
828 test_bit(S_RF_KILL_HW, &il->status)))
829 wiphy_rfkill_set_hw_state(il->hw->wiphy,
830 test_bit(S_RF_KILL_HW, &il->status));
831 else
832 wake_up(&il->wait_command_queue);
833}
834
835/**
836 * il3945_setup_handlers - Initialize Rx handler callbacks
837 *
838 * Setup the RX handlers for each of the reply types sent from the uCode
839 * to the host.
840 *
841 * This function chains into the hardware specific files for them to setup
842 * any hardware specific handlers as well.
843 */
844static void
845il3945_setup_handlers(struct il_priv *il)
846{
847 il->handlers[N_ALIVE] = il3945_hdl_alive;
848 il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
849 il->handlers[N_ERROR] = il_hdl_error;
850 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
851 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
852 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
853 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
854 il->handlers[N_BEACON] = il3945_hdl_beacon;
855
856 /*
857 * The same handler is used for both the REPLY to a discrete
858 * stats request from the host as well as for the periodic
859 * stats notifications (after received beacons) from the uCode.
860 */
861 il->handlers[C_STATS] = il3945_hdl_c_stats;
862 il->handlers[N_STATS] = il3945_hdl_stats;
863
864 il_setup_rx_scan_handlers(il);
865 il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
866
867 /* Set up hardware specific Rx handlers */
868 il3945_hw_handler_setup(il);
869}
870
871/************************** RX-FUNCTIONS ****************************/
872/*
873 * Rx theory of operation
874 *
875 * The host allocates 32 DMA target addresses and passes the host address
876 * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
877 * 0 to 31
878 *
879 * Rx Queue Indexes
880 * The host/firmware share two idx registers for managing the Rx buffers.
881 *
882 * The READ idx maps to the first position that the firmware may be writing
883 * to -- the driver can read up to (but not including) this position and get
884 * good data.
885 * The READ idx is managed by the firmware once the card is enabled.
886 *
887 * The WRITE idx maps to the last position the driver has read from -- the
888 * position preceding WRITE is the last slot the firmware can place a packet.
889 *
890 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
891 * WRITE = READ.
892 *
893 * During initialization, the host sets up the READ queue position to the first
894 * IDX position, and WRITE to the last (READ - 1 wrapped)
895 *
896 * When the firmware places a packet in a buffer, it will advance the READ idx
897 * and fire the RX interrupt. The driver can then query the READ idx and
898 * process as many packets as possible, moving the WRITE idx forward as it
899 * resets the Rx queue buffers with new memory.
900 *
901 * The management in the driver is as follows:
902 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
903 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
904 * to replenish the iwl->rxq->rx_free.
905 * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
906 * iwl->rxq is replenished and the READ IDX is updated (updating the
907 * 'processed' and 'read' driver idxes as well)
908 * + A received packet is processed and handed to the kernel network stack,
909 * detached from the iwl->rxq. The driver 'processed' idx is updated.
910 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
911 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
912 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
913 * were enough free buffers and RX_STALLED is set it is cleared.
914 *
915 *
916 * Driver sequence:
917 *
918 * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
919 * il3945_rx_queue_restock
920 * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
921 * queue, updates firmware pointers, and updates
922 * the WRITE idx. If insufficient rx_free buffers
923 * are available, schedules il3945_rx_replenish
924 *
925 * -- enable interrupts --
926 * ISR - il3945_rx() Detach il_rx_bufs from pool up to the
927 * READ IDX, detaching the SKB from the pool.
928 * Moves the packet buffer from queue to rx_used.
929 * Calls il3945_rx_queue_restock to refill any empty
930 * slots.
931 * ...
932 *
933 */
934
935/**
936 * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
937 */
938static inline __le32
939il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
940{
941 return cpu_to_le32((u32) dma_addr);
942}
943
944/**
945 * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
946 *
947 * If there are slots in the RX queue that need to be restocked,
948 * and we have free pre-allocated buffers, fill the ranks as much
949 * as we can, pulling from rx_free.
950 *
951 * This moves the 'write' idx forward to catch up with 'processed', and
952 * also updates the memory address in the firmware to reference the new
953 * target buffer.
954 */
955static void
956il3945_rx_queue_restock(struct il_priv *il)
957{
958 struct il_rx_queue *rxq = &il->rxq;
959 struct list_head *element;
960 struct il_rx_buf *rxb;
961 unsigned long flags;
962 int write;
963
964 spin_lock_irqsave(&rxq->lock, flags);
965 write = rxq->write & ~0x7;
966 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
967 /* Get next free Rx buffer, remove from free list */
968 element = rxq->rx_free.next;
969 rxb = list_entry(element, struct il_rx_buf, list);
970 list_del(element);
971
972 /* Point to Rx buffer via next RBD in circular buffer */
973 rxq->bd[rxq->write] =
974 il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
975 rxq->queue[rxq->write] = rxb;
976 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
977 rxq->free_count--;
978 }
979 spin_unlock_irqrestore(&rxq->lock, flags);
980 /* If the pre-allocated buffer pool is dropping low, schedule to
981 * refill it */
982 if (rxq->free_count <= RX_LOW_WATERMARK)
983 queue_work(il->workqueue, &il->rx_replenish);
984
985 /* If we've added more space for the firmware to place data, tell it.
986 * Increment device's write pointer in multiples of 8. */
987 if (rxq->write_actual != (rxq->write & ~0x7) ||
988 abs(rxq->write - rxq->read) > 7) {
989 spin_lock_irqsave(&rxq->lock, flags);
990 rxq->need_update = 1;
991 spin_unlock_irqrestore(&rxq->lock, flags);
992 il_rx_queue_update_write_ptr(il, rxq);
993 }
994}
995
996/**
997 * il3945_rx_replenish - Move all used packet from rx_used to rx_free
998 *
999 * When moving to rx_free an SKB is allocated for the slot.
1000 *
1001 * Also restock the Rx queue via il3945_rx_queue_restock.
1002 * This is called as a scheduled work item (except for during initialization)
1003 */
1004static void
1005il3945_rx_allocate(struct il_priv *il, gfp_t priority)
1006{
1007 struct il_rx_queue *rxq = &il->rxq;
1008 struct list_head *element;
1009 struct il_rx_buf *rxb;
1010 struct page *page;
1011 unsigned long flags;
1012 gfp_t gfp_mask = priority;
1013
1014 while (1) {
1015 spin_lock_irqsave(&rxq->lock, flags);
1016
1017 if (list_empty(&rxq->rx_used)) {
1018 spin_unlock_irqrestore(&rxq->lock, flags);
1019 return;
1020 }
1021 spin_unlock_irqrestore(&rxq->lock, flags);
1022
1023 if (rxq->free_count > RX_LOW_WATERMARK)
1024 gfp_mask |= __GFP_NOWARN;
1025
1026 if (il->hw_params.rx_page_order > 0)
1027 gfp_mask |= __GFP_COMP;
1028
1029 /* Alloc a new receive buffer */
1030 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
1031 if (!page) {
1032 if (net_ratelimit())
1033 D_INFO("Failed to allocate SKB buffer.\n");
1034 if (rxq->free_count <= RX_LOW_WATERMARK &&
1035 net_ratelimit())
1036 IL_ERR("Failed to allocate SKB buffer with %0x."
1037 "Only %u free buffers remaining.\n",
1038 priority, rxq->free_count);
1039 /* We don't reschedule replenish work here -- we will
1040 * call the restock method and if it still needs
1041 * more buffers it will schedule replenish */
1042 break;
1043 }
1044
1045 spin_lock_irqsave(&rxq->lock, flags);
1046 if (list_empty(&rxq->rx_used)) {
1047 spin_unlock_irqrestore(&rxq->lock, flags);
1048 __free_pages(page, il->hw_params.rx_page_order);
1049 return;
1050 }
1051 element = rxq->rx_used.next;
1052 rxb = list_entry(element, struct il_rx_buf, list);
1053 list_del(element);
1054 spin_unlock_irqrestore(&rxq->lock, flags);
1055
1056 rxb->page = page;
1057 /* Get physical address of RB/SKB */
1058 rxb->page_dma =
1059 pci_map_page(il->pci_dev, page, 0,
1060 PAGE_SIZE << il->hw_params.rx_page_order,
1061 PCI_DMA_FROMDEVICE);
1062
1063 spin_lock_irqsave(&rxq->lock, flags);
1064
1065 list_add_tail(&rxb->list, &rxq->rx_free);
1066 rxq->free_count++;
1067 il->alloc_rxb_page++;
1068
1069 spin_unlock_irqrestore(&rxq->lock, flags);
1070 }
1071}
1072
1073void
1074il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
1075{
1076 unsigned long flags;
1077 int i;
1078 spin_lock_irqsave(&rxq->lock, flags);
1079 INIT_LIST_HEAD(&rxq->rx_free);
1080 INIT_LIST_HEAD(&rxq->rx_used);
1081 /* Fill the rx_used queue with _all_ of the Rx buffers */
1082 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1083 /* In the reset function, these buffers may have been allocated
1084 * to an SKB, so we need to unmap and free potential storage */
1085 if (rxq->pool[i].page != NULL) {
1086 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1087 PAGE_SIZE << il->hw_params.rx_page_order,
1088 PCI_DMA_FROMDEVICE);
1089 __il_free_pages(il, rxq->pool[i].page);
1090 rxq->pool[i].page = NULL;
1091 }
1092 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1093 }
1094
1095 /* Set us so that we have processed and used all buffers, but have
1096 * not restocked the Rx queue with fresh buffers */
1097 rxq->read = rxq->write = 0;
1098 rxq->write_actual = 0;
1099 rxq->free_count = 0;
1100 spin_unlock_irqrestore(&rxq->lock, flags);
1101}
1102
1103void
1104il3945_rx_replenish(void *data)
1105{
1106 struct il_priv *il = data;
1107 unsigned long flags;
1108
1109 il3945_rx_allocate(il, GFP_KERNEL);
1110
1111 spin_lock_irqsave(&il->lock, flags);
1112 il3945_rx_queue_restock(il);
1113 spin_unlock_irqrestore(&il->lock, flags);
1114}
1115
1116static void
1117il3945_rx_replenish_now(struct il_priv *il)
1118{
1119 il3945_rx_allocate(il, GFP_ATOMIC);
1120
1121 il3945_rx_queue_restock(il);
1122}
1123
1124/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1125 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1126 * This free routine walks the list of POOL entries and if SKB is set to
1127 * non NULL it is unmapped and freed
1128 */
1129static void
1130il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
1131{
1132 int i;
1133 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1134 if (rxq->pool[i].page != NULL) {
1135 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1136 PAGE_SIZE << il->hw_params.rx_page_order,
1137 PCI_DMA_FROMDEVICE);
1138 __il_free_pages(il, rxq->pool[i].page);
1139 rxq->pool[i].page = NULL;
1140 }
1141 }
1142
1143 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1144 rxq->bd_dma);
1145 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
1146 rxq->rb_stts, rxq->rb_stts_dma);
1147 rxq->bd = NULL;
1148 rxq->rb_stts = NULL;
1149}
1150
1151/* Convert linear signal-to-noise ratio into dB */
1152static u8 ratio2dB[100] = {
1153/* 0 1 2 3 4 5 6 7 8 9 */
1154 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1155 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1156 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1157 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1158 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1159 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1160 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1161 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1162 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1163 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1164};
1165
1166/* Calculates a relative dB value from a ratio of linear
1167 * (i.e. not dB) signal levels.
1168 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1169int
1170il3945_calc_db_from_ratio(int sig_ratio)
1171{
1172 /* 1000:1 or higher just report as 60 dB */
1173 if (sig_ratio >= 1000)
1174 return 60;
1175
1176 /* 100:1 or higher, divide by 10 and use table,
1177 * add 20 dB to make up for divide by 10 */
1178 if (sig_ratio >= 100)
1179 return 20 + (int)ratio2dB[sig_ratio / 10];
1180
1181 /* We shouldn't see this */
1182 if (sig_ratio < 1)
1183 return 0;
1184
1185 /* Use table for ratios 1:1 - 99:1 */
1186 return (int)ratio2dB[sig_ratio];
1187}
1188
1189/**
1190 * il3945_rx_handle - Main entry function for receiving responses from uCode
1191 *
1192 * Uses the il->handlers callback function array to invoke
1193 * the appropriate handlers, including command responses,
1194 * frame-received notifications, and other notifications.
1195 */
1196static void
1197il3945_rx_handle(struct il_priv *il)
1198{
1199 struct il_rx_buf *rxb;
1200 struct il_rx_pkt *pkt;
1201 struct il_rx_queue *rxq = &il->rxq;
1202 u32 r, i;
1203 int reclaim;
1204 unsigned long flags;
1205 u8 fill_rx = 0;
1206 u32 count = 8;
1207 int total_empty = 0;
1208
1209 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
1210 * buffer that the driver may process (last buffer filled by ucode). */
1211 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1212 i = rxq->read;
1213
1214 /* calculate total frames need to be restock after handling RX */
1215 total_empty = r - rxq->write_actual;
1216 if (total_empty < 0)
1217 total_empty += RX_QUEUE_SIZE;
1218
1219 if (total_empty > (RX_QUEUE_SIZE / 2))
1220 fill_rx = 1;
1221 /* Rx interrupt, but nothing sent from uCode */
1222 if (i == r)
1223 D_RX("r = %d, i = %d\n", r, i);
1224
1225 while (i != r) {
1226 int len;
1227
1228 rxb = rxq->queue[i];
1229
1230 /* If an RXB doesn't have a Rx queue slot associated with it,
1231 * then a bug has been introduced in the queue refilling
1232 * routines -- catch it here */
1233 BUG_ON(rxb == NULL);
1234
1235 rxq->queue[i] = NULL;
1236
1237 pci_unmap_page(il->pci_dev, rxb->page_dma,
1238 PAGE_SIZE << il->hw_params.rx_page_order,
1239 PCI_DMA_FROMDEVICE);
1240 pkt = rxb_addr(rxb);
1241
1242 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
1243 len += sizeof(u32); /* account for status word */
1244
1245 /* Reclaim a command buffer only if this packet is a response
1246 * to a (driver-originated) command.
1247 * If the packet (e.g. Rx frame) originated from uCode,
1248 * there is no command buffer to reclaim.
1249 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1250 * but apparently a few don't get set; catch them here. */
1251 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1252 pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
1253
1254 /* Based on type of command response or notification,
1255 * handle those that need handling via function in
1256 * handlers table. See il3945_setup_handlers() */
1257 if (il->handlers[pkt->hdr.cmd]) {
1258 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
1259 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1260 il->isr_stats.handlers[pkt->hdr.cmd]++;
1261 il->handlers[pkt->hdr.cmd] (il, rxb);
1262 } else {
1263 /* No handling needed */
1264 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
1265 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1266 }
1267
1268 /*
1269 * XXX: After here, we should always check rxb->page
1270 * against NULL before touching it or its virtual
1271 * memory (pkt). Because some handler might have
1272 * already taken or freed the pages.
1273 */
1274
1275 if (reclaim) {
1276 /* Invoke any callbacks, transfer the buffer to caller,
1277 * and fire off the (possibly) blocking il_send_cmd()
1278 * as we reclaim the driver command queue */
1279 if (rxb->page)
1280 il_tx_cmd_complete(il, rxb);
1281 else
1282 IL_WARN("Claim null rxb?\n");
1283 }
1284
1285 /* Reuse the page if possible. For notification packets and
1286 * SKBs that fail to Rx correctly, add them back into the
1287 * rx_free list for reuse later. */
1288 spin_lock_irqsave(&rxq->lock, flags);
1289 if (rxb->page != NULL) {
1290 rxb->page_dma =
1291 pci_map_page(il->pci_dev, rxb->page, 0,
1292 PAGE_SIZE << il->hw_params.
1293 rx_page_order, PCI_DMA_FROMDEVICE);
1294 list_add_tail(&rxb->list, &rxq->rx_free);
1295 rxq->free_count++;
1296 } else
1297 list_add_tail(&rxb->list, &rxq->rx_used);
1298
1299 spin_unlock_irqrestore(&rxq->lock, flags);
1300
1301 i = (i + 1) & RX_QUEUE_MASK;
1302 /* If there are a lot of unused frames,
1303 * restock the Rx queue so ucode won't assert. */
1304 if (fill_rx) {
1305 count++;
1306 if (count >= 8) {
1307 rxq->read = i;
1308 il3945_rx_replenish_now(il);
1309 count = 0;
1310 }
1311 }
1312 }
1313
1314 /* Backtrack one entry */
1315 rxq->read = i;
1316 if (fill_rx)
1317 il3945_rx_replenish_now(il);
1318 else
1319 il3945_rx_queue_restock(il);
1320}
1321
1322/* call this function to flush any scheduled tasklet */
1323static inline void
1324il3945_synchronize_irq(struct il_priv *il)
1325{
1326 /* wait to make sure we flush pending tasklet */
1327 synchronize_irq(il->pci_dev->irq);
1328 tasklet_kill(&il->irq_tasklet);
1329}
1330
1331static const char *
1332il3945_desc_lookup(int i)
1333{
1334 switch (i) {
1335 case 1:
1336 return "FAIL";
1337 case 2:
1338 return "BAD_PARAM";
1339 case 3:
1340 return "BAD_CHECKSUM";
1341 case 4:
1342 return "NMI_INTERRUPT";
1343 case 5:
1344 return "SYSASSERT";
1345 case 6:
1346 return "FATAL_ERROR";
1347 }
1348
1349 return "UNKNOWN";
1350}
1351
1352#define ERROR_START_OFFSET (1 * sizeof(u32))
1353#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1354
1355void
1356il3945_dump_nic_error_log(struct il_priv *il)
1357{
1358 u32 i;
1359 u32 desc, time, count, base, data1;
1360 u32 blink1, blink2, ilink1, ilink2;
1361
1362 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
1363
1364 if (!il3945_hw_valid_rtc_data_addr(base)) {
1365 IL_ERR("Not valid error log pointer 0x%08X\n", base);
1366 return;
1367 }
1368
1369 count = il_read_targ_mem(il, base);
1370
1371 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1372 IL_ERR("Start IWL Error Log Dump:\n");
1373 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
1374 }
1375
1376 IL_ERR("Desc Time asrtPC blink2 "
1377 "ilink1 nmiPC Line\n");
1378 for (i = ERROR_START_OFFSET;
1379 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1380 i += ERROR_ELEM_SIZE) {
1381 desc = il_read_targ_mem(il, base + i);
1382 time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
1383 blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
1384 blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
1385 ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
1386 ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
1387 data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
1388
1389 IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1390 il3945_desc_lookup(desc), desc, time, blink1, blink2,
1391 ilink1, ilink2, data1);
1392 }
1393}
1394
1395static void
1396il3945_irq_tasklet(struct il_priv *il)
1397{
1398 u32 inta, handled = 0;
1399 u32 inta_fh;
1400 unsigned long flags;
1401#ifdef CONFIG_IWLEGACY_DEBUG
1402 u32 inta_mask;
1403#endif
1404
1405 spin_lock_irqsave(&il->lock, flags);
1406
1407 /* Ack/clear/reset pending uCode interrupts.
1408 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1409 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1410 inta = _il_rd(il, CSR_INT);
1411 _il_wr(il, CSR_INT, inta);
1412
1413 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1414 * Any new interrupts that happen after this, either while we're
1415 * in this tasklet, or later, will show up in next ISR/tasklet. */
1416 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1417 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
1418
1419#ifdef CONFIG_IWLEGACY_DEBUG
1420 if (il_get_debug_level(il) & IL_DL_ISR) {
1421 /* just for debug */
1422 inta_mask = _il_rd(il, CSR_INT_MASK);
1423 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
1424 inta_mask, inta_fh);
1425 }
1426#endif
1427
1428 spin_unlock_irqrestore(&il->lock, flags);
1429
1430 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1431 * atomic, make sure that inta covers all the interrupts that
1432 * we've discovered, even if FH interrupt came in just after
1433 * reading CSR_INT. */
1434 if (inta_fh & CSR39_FH_INT_RX_MASK)
1435 inta |= CSR_INT_BIT_FH_RX;
1436 if (inta_fh & CSR39_FH_INT_TX_MASK)
1437 inta |= CSR_INT_BIT_FH_TX;
1438
1439 /* Now service all interrupt bits discovered above. */
1440 if (inta & CSR_INT_BIT_HW_ERR) {
1441 IL_ERR("Hardware error detected. Restarting.\n");
1442
1443 /* Tell the device to stop sending interrupts */
1444 il_disable_interrupts(il);
1445
1446 il->isr_stats.hw++;
1447 il_irq_handle_error(il);
1448
1449 handled |= CSR_INT_BIT_HW_ERR;
1450
1451 return;
1452 }
1453#ifdef CONFIG_IWLEGACY_DEBUG
1454 if (il_get_debug_level(il) & (IL_DL_ISR)) {
1455 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1456 if (inta & CSR_INT_BIT_SCD) {
1457 D_ISR("Scheduler finished to transmit "
1458 "the frame/frames.\n");
1459 il->isr_stats.sch++;
1460 }
1461
1462 /* Alive notification via Rx interrupt will do the real work */
1463 if (inta & CSR_INT_BIT_ALIVE) {
1464 D_ISR("Alive interrupt\n");
1465 il->isr_stats.alive++;
1466 }
1467 }
1468#endif
1469 /* Safely ignore these bits for debug checks below */
1470 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1471
1472 /* Error detected by uCode */
1473 if (inta & CSR_INT_BIT_SW_ERR) {
1474 IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
1475 inta);
1476 il->isr_stats.sw++;
1477 il_irq_handle_error(il);
1478 handled |= CSR_INT_BIT_SW_ERR;
1479 }
1480
1481 /* uCode wakes up after power-down sleep */
1482 if (inta & CSR_INT_BIT_WAKEUP) {
1483 D_ISR("Wakeup interrupt\n");
1484 il_rx_queue_update_write_ptr(il, &il->rxq);
1485 il_txq_update_write_ptr(il, &il->txq[0]);
1486 il_txq_update_write_ptr(il, &il->txq[1]);
1487 il_txq_update_write_ptr(il, &il->txq[2]);
1488 il_txq_update_write_ptr(il, &il->txq[3]);
1489 il_txq_update_write_ptr(il, &il->txq[4]);
1490 il_txq_update_write_ptr(il, &il->txq[5]);
1491
1492 il->isr_stats.wakeup++;
1493 handled |= CSR_INT_BIT_WAKEUP;
1494 }
1495
1496 /* All uCode command responses, including Tx command responses,
1497 * Rx "responses" (frame-received notification), and other
1498 * notifications from uCode come through here*/
1499 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1500 il3945_rx_handle(il);
1501 il->isr_stats.rx++;
1502 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1503 }
1504
1505 if (inta & CSR_INT_BIT_FH_TX) {
1506 D_ISR("Tx interrupt\n");
1507 il->isr_stats.tx++;
1508
1509 _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
1510 il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
1511 handled |= CSR_INT_BIT_FH_TX;
1512 }
1513
1514 if (inta & ~handled) {
1515 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1516 il->isr_stats.unhandled++;
1517 }
1518
1519 if (inta & ~il->inta_mask) {
1520 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
1521 inta & ~il->inta_mask);
1522 IL_WARN(" with inta_fh = 0x%08x\n", inta_fh);
1523 }
1524
1525 /* Re-enable all interrupts */
1526 /* only Re-enable if disabled by irq */
1527 if (test_bit(S_INT_ENABLED, &il->status))
1528 il_enable_interrupts(il);
1529
1530#ifdef CONFIG_IWLEGACY_DEBUG
1531 if (il_get_debug_level(il) & (IL_DL_ISR)) {
1532 inta = _il_rd(il, CSR_INT);
1533 inta_mask = _il_rd(il, CSR_INT_MASK);
1534 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1535 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1536 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1537 }
1538#endif
1539}
1540
1541static int
1542il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
1543 u8 is_active, u8 n_probes,
1544 struct il3945_scan_channel *scan_ch,
1545 struct ieee80211_vif *vif)
1546{
1547 struct ieee80211_channel *chan;
1548 const struct ieee80211_supported_band *sband;
1549 const struct il_channel_info *ch_info;
1550 u16 passive_dwell = 0;
1551 u16 active_dwell = 0;
1552 int added, i;
1553
1554 sband = il_get_hw_mode(il, band);
1555 if (!sband)
1556 return 0;
1557
1558 active_dwell = il_get_active_dwell_time(il, band, n_probes);
1559 passive_dwell = il_get_passive_dwell_time(il, band, vif);
1560
1561 if (passive_dwell <= active_dwell)
1562 passive_dwell = active_dwell + 1;
1563
1564 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
1565 chan = il->scan_request->channels[i];
1566
1567 if (chan->band != band)
1568 continue;
1569
1570 scan_ch->channel = chan->hw_value;
1571
1572 ch_info = il_get_channel_info(il, band, scan_ch->channel);
1573 if (!il_is_channel_valid(ch_info)) {
1574 D_SCAN("Channel %d is INVALID for this band.\n",
1575 scan_ch->channel);
1576 continue;
1577 }
1578
1579 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1580 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1581 /* If passive , set up for auto-switch
1582 * and use long active_dwell time.
1583 */
1584 if (!is_active || il_is_channel_passive(ch_info) ||
1585 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1586 scan_ch->type = 0; /* passive */
1587 if (IL_UCODE_API(il->ucode_ver) == 1)
1588 scan_ch->active_dwell =
1589 cpu_to_le16(passive_dwell - 1);
1590 } else {
1591 scan_ch->type = 1; /* active */
1592 }
1593
1594 /* Set direct probe bits. These may be used both for active
1595 * scan channels (probes gets sent right away),
1596 * or for passive channels (probes get se sent only after
1597 * hearing clear Rx packet).*/
1598 if (IL_UCODE_API(il->ucode_ver) >= 2) {
1599 if (n_probes)
1600 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1601 } else {
1602 /* uCode v1 does not allow setting direct probe bits on
1603 * passive channel. */
1604 if ((scan_ch->type & 1) && n_probes)
1605 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1606 }
1607
1608 /* Set txpower levels to defaults */
1609 scan_ch->tpc.dsp_atten = 110;
1610 /* scan_pwr_info->tpc.dsp_atten; */
1611
1612 /*scan_pwr_info->tpc.tx_gain; */
1613 if (band == IEEE80211_BAND_5GHZ)
1614 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1615 else {
1616 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1617 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1618 * power level:
1619 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1620 */
1621 }
1622
1623 D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
1624 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1625 (scan_ch->type & 1) ? active_dwell : passive_dwell);
1626
1627 scan_ch++;
1628 added++;
1629 }
1630
1631 D_SCAN("total channels to scan %d\n", added);
1632 return added;
1633}
1634
1635static void
1636il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
1637{
1638 int i;
1639
1640 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
1641 rates[i].bitrate = il3945_rates[i].ieee * 5;
1642 rates[i].hw_value = i; /* Rate scaling will work on idxes */
1643 rates[i].hw_value_short = i;
1644 rates[i].flags = 0;
1645 if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
1646 /*
1647 * If CCK != 1M then set short preamble rate flag.
1648 */
1649 rates[i].flags |=
1650 (il3945_rates[i].plcp ==
1651 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1652 }
1653 }
1654}
1655
1656/******************************************************************************
1657 *
1658 * uCode download functions
1659 *
1660 ******************************************************************************/
1661
1662static void
1663il3945_dealloc_ucode_pci(struct il_priv *il)
1664{
1665 il_free_fw_desc(il->pci_dev, &il->ucode_code);
1666 il_free_fw_desc(il->pci_dev, &il->ucode_data);
1667 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
1668 il_free_fw_desc(il->pci_dev, &il->ucode_init);
1669 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
1670 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
1671}
1672
1673/**
1674 * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
1675 * looking at all data.
1676 */
1677static int
1678il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
1679{
1680 u32 val;
1681 u32 save_len = len;
1682 int rc = 0;
1683 u32 errcnt;
1684
1685 D_INFO("ucode inst image size is %u\n", len);
1686
1687 il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
1688
1689 errcnt = 0;
1690 for (; len > 0; len -= sizeof(u32), image++) {
1691 /* read data comes through single port, auto-incr addr */
1692 /* NOTE: Use the debugless read so we don't flood kernel log
1693 * if IL_DL_IO is set */
1694 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1695 if (val != le32_to_cpu(*image)) {
1696 IL_ERR("uCode INST section is invalid at "
1697 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1698 save_len - len, val, le32_to_cpu(*image));
1699 rc = -EIO;
1700 errcnt++;
1701 if (errcnt >= 20)
1702 break;
1703 }
1704 }
1705
1706 if (!errcnt)
1707 D_INFO("ucode image in INSTRUCTION memory is good\n");
1708
1709 return rc;
1710}
1711
1712/**
1713 * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1714 * using sample data 100 bytes apart. If these sample points are good,
1715 * it's a pretty good bet that everything between them is good, too.
1716 */
1717static int
1718il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
1719{
1720 u32 val;
1721 int rc = 0;
1722 u32 errcnt = 0;
1723 u32 i;
1724
1725 D_INFO("ucode inst image size is %u\n", len);
1726
1727 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
1728 /* read data comes through single port, auto-incr addr */
1729 /* NOTE: Use the debugless read so we don't flood kernel log
1730 * if IL_DL_IO is set */
1731 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
1732 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1733 if (val != le32_to_cpu(*image)) {
1734#if 0 /* Enable this if you want to see details */
1735 IL_ERR("uCode INST section is invalid at "
1736 "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
1737 *image);
1738#endif
1739 rc = -EIO;
1740 errcnt++;
1741 if (errcnt >= 3)
1742 break;
1743 }
1744 }
1745
1746 return rc;
1747}
1748
1749/**
1750 * il3945_verify_ucode - determine which instruction image is in SRAM,
1751 * and verify its contents
1752 */
1753static int
1754il3945_verify_ucode(struct il_priv *il)
1755{
1756 __le32 *image;
1757 u32 len;
1758 int rc = 0;
1759
1760 /* Try bootstrap */
1761 image = (__le32 *) il->ucode_boot.v_addr;
1762 len = il->ucode_boot.len;
1763 rc = il3945_verify_inst_sparse(il, image, len);
1764 if (rc == 0) {
1765 D_INFO("Bootstrap uCode is good in inst SRAM\n");
1766 return 0;
1767 }
1768
1769 /* Try initialize */
1770 image = (__le32 *) il->ucode_init.v_addr;
1771 len = il->ucode_init.len;
1772 rc = il3945_verify_inst_sparse(il, image, len);
1773 if (rc == 0) {
1774 D_INFO("Initialize uCode is good in inst SRAM\n");
1775 return 0;
1776 }
1777
1778 /* Try runtime/protocol */
1779 image = (__le32 *) il->ucode_code.v_addr;
1780 len = il->ucode_code.len;
1781 rc = il3945_verify_inst_sparse(il, image, len);
1782 if (rc == 0) {
1783 D_INFO("Runtime uCode is good in inst SRAM\n");
1784 return 0;
1785 }
1786
1787 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1788
1789 /* Since nothing seems to match, show first several data entries in
1790 * instruction SRAM, so maybe visual inspection will give a clue.
1791 * Selection of bootstrap image (vs. other images) is arbitrary. */
1792 image = (__le32 *) il->ucode_boot.v_addr;
1793 len = il->ucode_boot.len;
1794 rc = il3945_verify_inst_full(il, image, len);
1795
1796 return rc;
1797}
1798
1799static void
1800il3945_nic_start(struct il_priv *il)
1801{
1802 /* Remove all resets to allow NIC to operate */
1803 _il_wr(il, CSR_RESET, 0);
1804}
1805
1806#define IL3945_UCODE_GET(item) \
1807static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
1808{ \
1809 return le32_to_cpu(ucode->v1.item); \
1810}
1811
1812static u32
1813il3945_ucode_get_header_size(u32 api_ver)
1814{
1815 return 24;
1816}
1817
1818static u8 *
1819il3945_ucode_get_data(const struct il_ucode_header *ucode)
1820{
1821 return (u8 *) ucode->v1.data;
1822}
1823
1824IL3945_UCODE_GET(inst_size);
1825IL3945_UCODE_GET(data_size);
1826IL3945_UCODE_GET(init_size);
1827IL3945_UCODE_GET(init_data_size);
1828IL3945_UCODE_GET(boot_size);
1829
1830/**
1831 * il3945_read_ucode - Read uCode images from disk file.
1832 *
1833 * Copy into buffers for card to fetch via bus-mastering
1834 */
1835static int
1836il3945_read_ucode(struct il_priv *il)
1837{
1838 const struct il_ucode_header *ucode;
1839 int ret = -EINVAL, idx;
1840 const struct firmware *ucode_raw;
1841 /* firmware file name contains uCode/driver compatibility version */
1842 const char *name_pre = il->cfg->fw_name_pre;
1843 const unsigned int api_max = il->cfg->ucode_api_max;
1844 const unsigned int api_min = il->cfg->ucode_api_min;
1845 char buf[25];
1846 u8 *src;
1847 size_t len;
1848 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1849
1850 /* Ask kernel firmware_class module to get the boot firmware off disk.
1851 * request_firmware() is synchronous, file is in memory on return. */
1852 for (idx = api_max; idx >= api_min; idx--) {
1853 sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
1854 ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
1855 if (ret < 0) {
1856 IL_ERR("%s firmware file req failed: %d\n", buf, ret);
1857 if (ret == -ENOENT)
1858 continue;
1859 else
1860 goto error;
1861 } else {
1862 if (idx < api_max)
1863 IL_ERR("Loaded firmware %s, "
1864 "which is deprecated. "
1865 " Please use API v%u instead.\n", buf,
1866 api_max);
1867 D_INFO("Got firmware '%s' file "
1868 "(%zd bytes) from disk\n", buf, ucode_raw->size);
1869 break;
1870 }
1871 }
1872
1873 if (ret < 0)
1874 goto error;
1875
1876 /* Make sure that we got at least our header! */
1877 if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
1878 IL_ERR("File size way too small!\n");
1879 ret = -EINVAL;
1880 goto err_release;
1881 }
1882
1883 /* Data from ucode file: header followed by uCode images */
1884 ucode = (struct il_ucode_header *)ucode_raw->data;
1885
1886 il->ucode_ver = le32_to_cpu(ucode->ver);
1887 api_ver = IL_UCODE_API(il->ucode_ver);
1888 inst_size = il3945_ucode_get_inst_size(ucode);
1889 data_size = il3945_ucode_get_data_size(ucode);
1890 init_size = il3945_ucode_get_init_size(ucode);
1891 init_data_size = il3945_ucode_get_init_data_size(ucode);
1892 boot_size = il3945_ucode_get_boot_size(ucode);
1893 src = il3945_ucode_get_data(ucode);
1894
1895 /* api_ver should match the api version forming part of the
1896 * firmware filename ... but we don't check for that and only rely
1897 * on the API version read from firmware header from here on forward */
1898
1899 if (api_ver < api_min || api_ver > api_max) {
1900 IL_ERR("Driver unable to support your firmware API. "
1901 "Driver supports v%u, firmware is v%u.\n", api_max,
1902 api_ver);
1903 il->ucode_ver = 0;
1904 ret = -EINVAL;
1905 goto err_release;
1906 }
1907 if (api_ver != api_max)
1908 IL_ERR("Firmware has old API version. Expected %u, "
1909 "got %u. New firmware can be obtained "
1910 "from http://www.intellinuxwireless.org.\n", api_max,
1911 api_ver);
1912
1913 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
1914 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
1915 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
1916
1917 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
1918 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
1919 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
1920 IL_UCODE_SERIAL(il->ucode_ver));
1921
1922 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
1923 D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
1924 D_INFO("f/w package hdr runtime data size = %u\n", data_size);
1925 D_INFO("f/w package hdr init inst size = %u\n", init_size);
1926 D_INFO("f/w package hdr init data size = %u\n", init_data_size);
1927 D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
1928
1929 /* Verify size of file vs. image size info in file's header */
1930 if (ucode_raw->size !=
1931 il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
1932 init_size + init_data_size + boot_size) {
1933
1934 D_INFO("uCode file size %zd does not match expected size\n",
1935 ucode_raw->size);
1936 ret = -EINVAL;
1937 goto err_release;
1938 }
1939
1940 /* Verify that uCode images will fit in card's SRAM */
1941 if (inst_size > IL39_MAX_INST_SIZE) {
1942 D_INFO("uCode instr len %d too large to fit in\n", inst_size);
1943 ret = -EINVAL;
1944 goto err_release;
1945 }
1946
1947 if (data_size > IL39_MAX_DATA_SIZE) {
1948 D_INFO("uCode data len %d too large to fit in\n", data_size);
1949 ret = -EINVAL;
1950 goto err_release;
1951 }
1952 if (init_size > IL39_MAX_INST_SIZE) {
1953 D_INFO("uCode init instr len %d too large to fit in\n",
1954 init_size);
1955 ret = -EINVAL;
1956 goto err_release;
1957 }
1958 if (init_data_size > IL39_MAX_DATA_SIZE) {
1959 D_INFO("uCode init data len %d too large to fit in\n",
1960 init_data_size);
1961 ret = -EINVAL;
1962 goto err_release;
1963 }
1964 if (boot_size > IL39_MAX_BSM_SIZE) {
1965 D_INFO("uCode boot instr len %d too large to fit in\n",
1966 boot_size);
1967 ret = -EINVAL;
1968 goto err_release;
1969 }
1970
1971 /* Allocate ucode buffers for card's bus-master loading ... */
1972
1973 /* Runtime instructions and 2 copies of data:
1974 * 1) unmodified from disk
1975 * 2) backup cache for save/restore during power-downs */
1976 il->ucode_code.len = inst_size;
1977 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
1978
1979 il->ucode_data.len = data_size;
1980 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
1981
1982 il->ucode_data_backup.len = data_size;
1983 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
1984
1985 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
1986 !il->ucode_data_backup.v_addr)
1987 goto err_pci_alloc;
1988
1989 /* Initialization instructions and data */
1990 if (init_size && init_data_size) {
1991 il->ucode_init.len = init_size;
1992 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
1993
1994 il->ucode_init_data.len = init_data_size;
1995 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
1996
1997 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
1998 goto err_pci_alloc;
1999 }
2000
2001 /* Bootstrap (instructions only, no data) */
2002 if (boot_size) {
2003 il->ucode_boot.len = boot_size;
2004 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
2005
2006 if (!il->ucode_boot.v_addr)
2007 goto err_pci_alloc;
2008 }
2009
2010 /* Copy images into buffers for card's bus-master reads ... */
2011
2012 /* Runtime instructions (first block of data in file) */
2013 len = inst_size;
2014 D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
2015 memcpy(il->ucode_code.v_addr, src, len);
2016 src += len;
2017
2018 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2019 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
2020
2021 /* Runtime data (2nd block)
2022 * NOTE: Copy into backup buffer will be done in il3945_up() */
2023 len = data_size;
2024 D_INFO("Copying (but not loading) uCode data len %zd\n", len);
2025 memcpy(il->ucode_data.v_addr, src, len);
2026 memcpy(il->ucode_data_backup.v_addr, src, len);
2027 src += len;
2028
2029 /* Initialization instructions (3rd block) */
2030 if (init_size) {
2031 len = init_size;
2032 D_INFO("Copying (but not loading) init instr len %zd\n", len);
2033 memcpy(il->ucode_init.v_addr, src, len);
2034 src += len;
2035 }
2036
2037 /* Initialization data (4th block) */
2038 if (init_data_size) {
2039 len = init_data_size;
2040 D_INFO("Copying (but not loading) init data len %zd\n", len);
2041 memcpy(il->ucode_init_data.v_addr, src, len);
2042 src += len;
2043 }
2044
2045 /* Bootstrap instructions (5th block) */
2046 len = boot_size;
2047 D_INFO("Copying (but not loading) boot instr len %zd\n", len);
2048 memcpy(il->ucode_boot.v_addr, src, len);
2049
2050 /* We have our copies now, allow OS release its copies */
2051 release_firmware(ucode_raw);
2052 return 0;
2053
2054err_pci_alloc:
2055 IL_ERR("failed to allocate pci memory\n");
2056 ret = -ENOMEM;
2057 il3945_dealloc_ucode_pci(il);
2058
2059err_release:
2060 release_firmware(ucode_raw);
2061
2062error:
2063 return ret;
2064}
2065
2066/**
2067 * il3945_set_ucode_ptrs - Set uCode address location
2068 *
2069 * Tell initialization uCode where to find runtime uCode.
2070 *
2071 * BSM registers initially contain pointers to initialization uCode.
2072 * We need to replace them to load runtime uCode inst and data,
2073 * and to save runtime data when powering down.
2074 */
2075static int
2076il3945_set_ucode_ptrs(struct il_priv *il)
2077{
2078 dma_addr_t pinst;
2079 dma_addr_t pdata;
2080
2081 /* bits 31:0 for 3945 */
2082 pinst = il->ucode_code.p_addr;
2083 pdata = il->ucode_data_backup.p_addr;
2084
2085 /* Tell bootstrap uCode where to find image to load */
2086 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2087 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2088 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
2089
2090 /* Inst byte count must be last to set up, bit 31 signals uCode
2091 * that all new ptr/size info is in place */
2092 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
2093 il->ucode_code.len | BSM_DRAM_INST_LOAD);
2094
2095 D_INFO("Runtime uCode pointers are set.\n");
2096
2097 return 0;
2098}
2099
2100/**
2101 * il3945_init_alive_start - Called after N_ALIVE notification received
2102 *
2103 * Called after N_ALIVE notification received from "initialize" uCode.
2104 *
2105 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2106 */
2107static void
2108il3945_init_alive_start(struct il_priv *il)
2109{
2110 /* Check alive response for "valid" sign from uCode */
2111 if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
2112 /* We had an error bringing up the hardware, so take it
2113 * all the way back down so we can try again */
2114 D_INFO("Initialize Alive failed.\n");
2115 goto restart;
2116 }
2117
2118 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2119 * This is a paranoid check, because we would not have gotten the
2120 * "initialize" alive if code weren't properly loaded. */
2121 if (il3945_verify_ucode(il)) {
2122 /* Runtime instruction load was bad;
2123 * take it all the way back down so we can try again */
2124 D_INFO("Bad \"initialize\" uCode load.\n");
2125 goto restart;
2126 }
2127
2128 /* Send pointers to protocol/runtime uCode image ... init code will
2129 * load and launch runtime uCode, which will send us another "Alive"
2130 * notification. */
2131 D_INFO("Initialization Alive received.\n");
2132 if (il3945_set_ucode_ptrs(il)) {
2133 /* Runtime instruction load won't happen;
2134 * take it all the way back down so we can try again */
2135 D_INFO("Couldn't set up uCode pointers.\n");
2136 goto restart;
2137 }
2138 return;
2139
2140restart:
2141 queue_work(il->workqueue, &il->restart);
2142}
2143
2144/**
2145 * il3945_alive_start - called after N_ALIVE notification received
2146 * from protocol/runtime uCode (initialization uCode's
2147 * Alive gets handled by il3945_init_alive_start()).
2148 */
2149static void
2150il3945_alive_start(struct il_priv *il)
2151{
2152 int thermal_spin = 0;
2153 u32 rfkill;
2154 struct il_rxon_context *ctx = &il->ctx;
2155
2156 D_INFO("Runtime Alive received.\n");
2157
2158 if (il->card_alive.is_valid != UCODE_VALID_OK) {
2159 /* We had an error bringing up the hardware, so take it
2160 * all the way back down so we can try again */
2161 D_INFO("Alive failed.\n");
2162 goto restart;
2163 }
2164
2165 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2166 * This is a paranoid check, because we would not have gotten the
2167 * "runtime" alive if code weren't properly loaded. */
2168 if (il3945_verify_ucode(il)) {
2169 /* Runtime instruction load was bad;
2170 * take it all the way back down so we can try again */
2171 D_INFO("Bad runtime uCode load.\n");
2172 goto restart;
2173 }
2174
2175 rfkill = il_rd_prph(il, APMG_RFKILL_REG);
2176 D_INFO("RFKILL status: 0x%x\n", rfkill);
2177
2178 if (rfkill & 0x1) {
2179 clear_bit(S_RF_KILL_HW, &il->status);
2180 /* if RFKILL is not on, then wait for thermal
2181 * sensor in adapter to kick in */
2182 while (il3945_hw_get_temperature(il) == 0) {
2183 thermal_spin++;
2184 udelay(10);
2185 }
2186
2187 if (thermal_spin)
2188 D_INFO("Thermal calibration took %dus\n",
2189 thermal_spin * 10);
2190 } else
2191 set_bit(S_RF_KILL_HW, &il->status);
2192
2193 /* After the ALIVE response, we can send commands to 3945 uCode */
2194 set_bit(S_ALIVE, &il->status);
2195
2196 /* Enable watchdog to monitor the driver tx queues */
2197 il_setup_watchdog(il);
2198
2199 if (il_is_rfkill(il))
2200 return;
2201
2202 ieee80211_wake_queues(il->hw);
2203
2204 il->active_rate = RATES_MASK_3945;
2205
2206 il_power_update_mode(il, true);
2207
2208 if (il_is_associated(il)) {
2209 struct il3945_rxon_cmd *active_rxon =
2210 (struct il3945_rxon_cmd *)(&ctx->active);
2211
2212 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2213 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2214 } else {
2215 /* Initialize our rx_config data */
2216 il_connection_init_rx_config(il, ctx);
2217 }
2218
2219 /* Configure Bluetooth device coexistence support */
2220 il_send_bt_config(il);
2221
2222 set_bit(S_READY, &il->status);
2223
2224 /* Configure the adapter for unassociated operation */
2225 il3945_commit_rxon(il, ctx);
2226
2227 il3945_reg_txpower_periodic(il);
2228
2229 D_INFO("ALIVE processing complete.\n");
2230 wake_up(&il->wait_command_queue);
2231
2232 return;
2233
2234restart:
2235 queue_work(il->workqueue, &il->restart);
2236}
2237
2238static void il3945_cancel_deferred_work(struct il_priv *il);
2239
2240static void
2241__il3945_down(struct il_priv *il)
2242{
2243 unsigned long flags;
2244 int exit_pending;
2245
2246 D_INFO(DRV_NAME " is going down\n");
2247
2248 il_scan_cancel_timeout(il, 200);
2249
2250 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
2251
2252 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
2253 * to prevent rearm timer */
2254 del_timer_sync(&il->watchdog);
2255
2256 /* Station information will now be cleared in device */
2257 il_clear_ucode_stations(il, NULL);
2258 il_dealloc_bcast_stations(il);
2259 il_clear_driver_stations(il);
2260
2261 /* Unblock any waiting calls */
2262 wake_up_all(&il->wait_command_queue);
2263
2264 /* Wipe out the EXIT_PENDING status bit if we are not actually
2265 * exiting the module */
2266 if (!exit_pending)
2267 clear_bit(S_EXIT_PENDING, &il->status);
2268
2269 /* stop and reset the on-board processor */
2270 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2271
2272 /* tell the device to stop sending interrupts */
2273 spin_lock_irqsave(&il->lock, flags);
2274 il_disable_interrupts(il);
2275 spin_unlock_irqrestore(&il->lock, flags);
2276 il3945_synchronize_irq(il);
2277
2278 if (il->mac80211_registered)
2279 ieee80211_stop_queues(il->hw);
2280
2281 /* If we have not previously called il3945_init() then
2282 * clear all bits but the RF Kill bits and return */
2283 if (!il_is_init(il)) {
2284 il->status =
2285 test_bit(S_RF_KILL_HW,
2286 &il->
2287 status) << S_RF_KILL_HW |
2288 test_bit(S_GEO_CONFIGURED,
2289 &il->
2290 status) << S_GEO_CONFIGURED |
2291 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2292 goto exit;
2293 }
2294
2295 /* ...otherwise clear out all the status bits but the RF Kill
2296 * bit and continue taking the NIC down. */
2297 il->status &=
2298 test_bit(S_RF_KILL_HW,
2299 &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
2300 &il->
2301 status) <<
2302 S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
2303 &il->
2304 status) << S_FW_ERROR |
2305 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2306
2307 il3945_hw_txq_ctx_stop(il);
2308 il3945_hw_rxq_stop(il);
2309
2310 /* Power-down device's busmaster DMA clocks */
2311 il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2312 udelay(5);
2313
2314 /* Stop the device, and put it in low power state */
2315 il_apm_stop(il);
2316
2317exit:
2318 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
2319
2320 if (il->beacon_skb)
2321 dev_kfree_skb(il->beacon_skb);
2322 il->beacon_skb = NULL;
2323
2324 /* clear out any free frames */
2325 il3945_clear_free_frames(il);
2326}
2327
2328static void
2329il3945_down(struct il_priv *il)
2330{
2331 mutex_lock(&il->mutex);
2332 __il3945_down(il);
2333 mutex_unlock(&il->mutex);
2334
2335 il3945_cancel_deferred_work(il);
2336}
2337
2338#define MAX_HW_RESTARTS 5
2339
2340static int
2341il3945_alloc_bcast_station(struct il_priv *il)
2342{
2343 struct il_rxon_context *ctx = &il->ctx;
2344 unsigned long flags;
2345 u8 sta_id;
2346
2347 spin_lock_irqsave(&il->sta_lock, flags);
2348 sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
2349 if (sta_id == IL_INVALID_STATION) {
2350 IL_ERR("Unable to prepare broadcast station\n");
2351 spin_unlock_irqrestore(&il->sta_lock, flags);
2352
2353 return -EINVAL;
2354 }
2355
2356 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
2357 il->stations[sta_id].used |= IL_STA_BCAST;
2358 spin_unlock_irqrestore(&il->sta_lock, flags);
2359
2360 return 0;
2361}
2362
2363static int
2364__il3945_up(struct il_priv *il)
2365{
2366 int rc, i;
2367
2368 rc = il3945_alloc_bcast_station(il);
2369 if (rc)
2370 return rc;
2371
2372 if (test_bit(S_EXIT_PENDING, &il->status)) {
2373 IL_WARN("Exit pending; will not bring the NIC up\n");
2374 return -EIO;
2375 }
2376
2377 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
2378 IL_ERR("ucode not available for device bring up\n");
2379 return -EIO;
2380 }
2381
2382 /* If platform's RF_KILL switch is NOT set to KILL */
2383 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2384 clear_bit(S_RF_KILL_HW, &il->status);
2385 else {
2386 set_bit(S_RF_KILL_HW, &il->status);
2387 IL_WARN("Radio disabled by HW RF Kill switch\n");
2388 return -ENODEV;
2389 }
2390
2391 _il_wr(il, CSR_INT, 0xFFFFFFFF);
2392
2393 rc = il3945_hw_nic_init(il);
2394 if (rc) {
2395 IL_ERR("Unable to int nic\n");
2396 return rc;
2397 }
2398
2399 /* make sure rfkill handshake bits are cleared */
2400 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2401 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2402
2403 /* clear (again), then enable host interrupts */
2404 _il_wr(il, CSR_INT, 0xFFFFFFFF);
2405 il_enable_interrupts(il);
2406
2407 /* really make sure rfkill handshake bits are cleared */
2408 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2409 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2410
2411 /* Copy original ucode data image from disk into backup cache.
2412 * This will be used to initialize the on-board processor's
2413 * data SRAM for a clean start when the runtime program first loads. */
2414 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
2415 il->ucode_data.len);
2416
2417 /* We return success when we resume from suspend and rf_kill is on. */
2418 if (test_bit(S_RF_KILL_HW, &il->status))
2419 return 0;
2420
2421 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2422
2423 /* load bootstrap state machine,
2424 * load bootstrap program into processor's memory,
2425 * prepare to load the "initialize" uCode */
2426 rc = il->cfg->ops->lib->load_ucode(il);
2427
2428 if (rc) {
2429 IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
2430 continue;
2431 }
2432
2433 /* start card; "initialize" will load runtime ucode */
2434 il3945_nic_start(il);
2435
2436 D_INFO(DRV_NAME " is coming up\n");
2437
2438 return 0;
2439 }
2440
2441 set_bit(S_EXIT_PENDING, &il->status);
2442 __il3945_down(il);
2443 clear_bit(S_EXIT_PENDING, &il->status);
2444
2445 /* tried to restart and config the device for as long as our
2446 * patience could withstand */
2447 IL_ERR("Unable to initialize device after %d attempts.\n", i);
2448 return -EIO;
2449}
2450
2451/*****************************************************************************
2452 *
2453 * Workqueue callbacks
2454 *
2455 *****************************************************************************/
2456
2457static void
2458il3945_bg_init_alive_start(struct work_struct *data)
2459{
2460 struct il_priv *il =
2461 container_of(data, struct il_priv, init_alive_start.work);
2462
2463 mutex_lock(&il->mutex);
2464 if (test_bit(S_EXIT_PENDING, &il->status))
2465 goto out;
2466
2467 il3945_init_alive_start(il);
2468out:
2469 mutex_unlock(&il->mutex);
2470}
2471
2472static void
2473il3945_bg_alive_start(struct work_struct *data)
2474{
2475 struct il_priv *il =
2476 container_of(data, struct il_priv, alive_start.work);
2477
2478 mutex_lock(&il->mutex);
2479 if (test_bit(S_EXIT_PENDING, &il->status))
2480 goto out;
2481
2482 il3945_alive_start(il);
2483out:
2484 mutex_unlock(&il->mutex);
2485}
2486
2487/*
2488 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2489 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2490 * *is* readable even when device has been SW_RESET into low power mode
2491 * (e.g. during RF KILL).
2492 */
2493static void
2494il3945_rfkill_poll(struct work_struct *data)
2495{
2496 struct il_priv *il =
2497 container_of(data, struct il_priv, _3945.rfkill_poll.work);
2498 bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
2499 bool new_rfkill =
2500 !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2501
2502 if (new_rfkill != old_rfkill) {
2503 if (new_rfkill)
2504 set_bit(S_RF_KILL_HW, &il->status);
2505 else
2506 clear_bit(S_RF_KILL_HW, &il->status);
2507
2508 wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
2509
2510 D_RF_KILL("RF_KILL bit toggled to %s.\n",
2511 new_rfkill ? "disable radio" : "enable radio");
2512 }
2513
2514 /* Keep this running, even if radio now enabled. This will be
2515 * cancelled in mac_start() if system decides to start again */
2516 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2517 round_jiffies_relative(2 * HZ));
2518
2519}
2520
2521int
2522il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
2523{
2524 struct il_host_cmd cmd = {
2525 .id = C_SCAN,
2526 .len = sizeof(struct il3945_scan_cmd),
2527 .flags = CMD_SIZE_HUGE,
2528 };
2529 struct il3945_scan_cmd *scan;
2530 u8 n_probes = 0;
2531 enum ieee80211_band band;
2532 bool is_active = false;
2533 int ret;
2534 u16 len;
2535
2536 lockdep_assert_held(&il->mutex);
2537
2538 if (!il->scan_cmd) {
2539 il->scan_cmd =
2540 kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
2541 GFP_KERNEL);
2542 if (!il->scan_cmd) {
2543 D_SCAN("Fail to allocate scan memory\n");
2544 return -ENOMEM;
2545 }
2546 }
2547 scan = il->scan_cmd;
2548 memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
2549
2550 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
2551 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
2552
2553 if (il_is_associated(il)) {
2554 u16 interval;
2555 u32 extra;
2556 u32 suspend_time = 100;
2557 u32 scan_suspend_time = 100;
2558
2559 D_INFO("Scanning while associated...\n");
2560
2561 interval = vif->bss_conf.beacon_int;
2562
2563 scan->suspend_time = 0;
2564 scan->max_out_time = cpu_to_le32(200 * 1024);
2565 if (!interval)
2566 interval = suspend_time;
2567 /*
2568 * suspend time format:
2569 * 0-19: beacon interval in usec (time before exec.)
2570 * 20-23: 0
2571 * 24-31: number of beacons (suspend between channels)
2572 */
2573
2574 extra = (suspend_time / interval) << 24;
2575 scan_suspend_time =
2576 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
2577
2578 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2579 D_SCAN("suspend_time 0x%X beacon interval %d\n",
2580 scan_suspend_time, interval);
2581 }
2582
2583 if (il->scan_request->n_ssids) {
2584 int i, p = 0;
2585 D_SCAN("Kicking off active scan\n");
2586 for (i = 0; i < il->scan_request->n_ssids; i++) {
2587 /* always does wildcard anyway */
2588 if (!il->scan_request->ssids[i].ssid_len)
2589 continue;
2590 scan->direct_scan[p].id = WLAN_EID_SSID;
2591 scan->direct_scan[p].len =
2592 il->scan_request->ssids[i].ssid_len;
2593 memcpy(scan->direct_scan[p].ssid,
2594 il->scan_request->ssids[i].ssid,
2595 il->scan_request->ssids[i].ssid_len);
2596 n_probes++;
2597 p++;
2598 }
2599 is_active = true;
2600 } else
2601 D_SCAN("Kicking off passive scan.\n");
2602
2603 /* We don't build a direct scan probe request; the uCode will do
2604 * that based on the direct_mask added to each channel entry */
2605 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2606 scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
2607 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2608
2609 /* flags + rate selection */
2610
2611 switch (il->scan_band) {
2612 case IEEE80211_BAND_2GHZ:
2613 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2614 scan->tx_cmd.rate = RATE_1M_PLCP;
2615 band = IEEE80211_BAND_2GHZ;
2616 break;
2617 case IEEE80211_BAND_5GHZ:
2618 scan->tx_cmd.rate = RATE_6M_PLCP;
2619 band = IEEE80211_BAND_5GHZ;
2620 break;
2621 default:
2622 IL_WARN("Invalid scan band\n");
2623 return -EIO;
2624 }
2625
2626 /*
2627 * If active scaning is requested but a certain channel
2628 * is marked passive, we can do active scanning if we
2629 * detect transmissions.
2630 */
2631 scan->good_CRC_th =
2632 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_DISABLED;
2633
2634 len =
2635 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
2636 vif->addr, il->scan_request->ie,
2637 il->scan_request->ie_len,
2638 IL_MAX_SCAN_SIZE - sizeof(*scan));
2639 scan->tx_cmd.len = cpu_to_le16(len);
2640
2641 /* select Rx antennas */
2642 scan->flags |= il3945_get_antenna_flags(il);
2643
2644 scan->channel_count =
2645 il3945_get_channels_for_scan(il, band, is_active, n_probes,
2646 (void *)&scan->data[len], vif);
2647 if (scan->channel_count == 0) {
2648 D_SCAN("channel count %d\n", scan->channel_count);
2649 return -EIO;
2650 }
2651
2652 cmd.len +=
2653 le16_to_cpu(scan->tx_cmd.len) +
2654 scan->channel_count * sizeof(struct il3945_scan_channel);
2655 cmd.data = scan;
2656 scan->len = cpu_to_le16(cmd.len);
2657
2658 set_bit(S_SCAN_HW, &il->status);
2659 ret = il_send_cmd_sync(il, &cmd);
2660 if (ret)
2661 clear_bit(S_SCAN_HW, &il->status);
2662 return ret;
2663}
2664
2665void
2666il3945_post_scan(struct il_priv *il)
2667{
2668 struct il_rxon_context *ctx = &il->ctx;
2669
2670 /*
2671 * Since setting the RXON may have been deferred while
2672 * performing the scan, fire one off if needed
2673 */
2674 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2675 il3945_commit_rxon(il, ctx);
2676}
2677
2678static void
2679il3945_bg_restart(struct work_struct *data)
2680{
2681 struct il_priv *il = container_of(data, struct il_priv, restart);
2682
2683 if (test_bit(S_EXIT_PENDING, &il->status))
2684 return;
2685
2686 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
2687 mutex_lock(&il->mutex);
2688 il->ctx.vif = NULL;
2689 il->is_open = 0;
2690 mutex_unlock(&il->mutex);
2691 il3945_down(il);
2692 ieee80211_restart_hw(il->hw);
2693 } else {
2694 il3945_down(il);
2695
2696 mutex_lock(&il->mutex);
2697 if (test_bit(S_EXIT_PENDING, &il->status)) {
2698 mutex_unlock(&il->mutex);
2699 return;
2700 }
2701
2702 __il3945_up(il);
2703 mutex_unlock(&il->mutex);
2704 }
2705}
2706
2707static void
2708il3945_bg_rx_replenish(struct work_struct *data)
2709{
2710 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
2711
2712 mutex_lock(&il->mutex);
2713 if (test_bit(S_EXIT_PENDING, &il->status))
2714 goto out;
2715
2716 il3945_rx_replenish(il);
2717out:
2718 mutex_unlock(&il->mutex);
2719}
2720
2721void
2722il3945_post_associate(struct il_priv *il)
2723{
2724 int rc = 0;
2725 struct ieee80211_conf *conf = NULL;
2726 struct il_rxon_context *ctx = &il->ctx;
2727
2728 if (!ctx->vif || !il->is_open)
2729 return;
2730
2731 D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
2732 ctx->active.bssid_addr);
2733
2734 if (test_bit(S_EXIT_PENDING, &il->status))
2735 return;
2736
2737 il_scan_cancel_timeout(il, 200);
2738
2739 conf = &il->hw->conf;
2740
2741 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2742 il3945_commit_rxon(il, ctx);
2743
2744 rc = il_send_rxon_timing(il, ctx);
2745 if (rc)
2746 IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
2747
2748 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2749
2750 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
2751
2752 D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
2753 ctx->vif->bss_conf.beacon_int);
2754
2755 if (ctx->vif->bss_conf.use_short_preamble)
2756 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2757 else
2758 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2759
2760 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2761 if (ctx->vif->bss_conf.use_short_slot)
2762 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2763 else
2764 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2765 }
2766
2767 il3945_commit_rxon(il, ctx);
2768
2769 switch (ctx->vif->type) {
2770 case NL80211_IFTYPE_STATION:
2771 il3945_rate_scale_init(il->hw, IL_AP_ID);
2772 break;
2773 case NL80211_IFTYPE_ADHOC:
2774 il3945_send_beacon_cmd(il);
2775 break;
2776 default:
2777 IL_ERR("%s Should not be called in %d mode\n", __func__,
2778 ctx->vif->type);
2779 break;
2780 }
2781}
2782
2783/*****************************************************************************
2784 *
2785 * mac80211 entry point functions
2786 *
2787 *****************************************************************************/
2788
2789#define UCODE_READY_TIMEOUT (2 * HZ)
2790
2791static int
2792il3945_mac_start(struct ieee80211_hw *hw)
2793{
2794 struct il_priv *il = hw->priv;
2795 int ret;
2796
2797 D_MAC80211("enter\n");
2798
2799 /* we should be verifying the device is ready to be opened */
2800 mutex_lock(&il->mutex);
2801
2802 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2803 * ucode filename and max sizes are card-specific. */
2804
2805 if (!il->ucode_code.len) {
2806 ret = il3945_read_ucode(il);
2807 if (ret) {
2808 IL_ERR("Could not read microcode: %d\n", ret);
2809 mutex_unlock(&il->mutex);
2810 goto out_release_irq;
2811 }
2812 }
2813
2814 ret = __il3945_up(il);
2815
2816 mutex_unlock(&il->mutex);
2817
2818 if (ret)
2819 goto out_release_irq;
2820
2821 D_INFO("Start UP work.\n");
2822
2823 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2824 * mac80211 will not be run successfully. */
2825 ret = wait_event_timeout(il->wait_command_queue,
2826 test_bit(S_READY, &il->status),
2827 UCODE_READY_TIMEOUT);
2828 if (!ret) {
2829 if (!test_bit(S_READY, &il->status)) {
2830 IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
2831 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2832 ret = -ETIMEDOUT;
2833 goto out_release_irq;
2834 }
2835 }
2836
2837 /* ucode is running and will send rfkill notifications,
2838 * no need to poll the killswitch state anymore */
2839 cancel_delayed_work(&il->_3945.rfkill_poll);
2840
2841 il->is_open = 1;
2842 D_MAC80211("leave\n");
2843 return 0;
2844
2845out_release_irq:
2846 il->is_open = 0;
2847 D_MAC80211("leave - failed\n");
2848 return ret;
2849}
2850
2851static void
2852il3945_mac_stop(struct ieee80211_hw *hw)
2853{
2854 struct il_priv *il = hw->priv;
2855
2856 D_MAC80211("enter\n");
2857
2858 if (!il->is_open) {
2859 D_MAC80211("leave - skip\n");
2860 return;
2861 }
2862
2863 il->is_open = 0;
2864
2865 il3945_down(il);
2866
2867 flush_workqueue(il->workqueue);
2868
2869 /* start polling the killswitch state again */
2870 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2871 round_jiffies_relative(2 * HZ));
2872
2873 D_MAC80211("leave\n");
2874}
2875
2876static void
2877il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2878{
2879 struct il_priv *il = hw->priv;
2880
2881 D_MAC80211("enter\n");
2882
2883 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2884 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2885
2886 if (il3945_tx_skb(il, skb))
2887 dev_kfree_skb_any(skb);
2888
2889 D_MAC80211("leave\n");
2890}
2891
2892void
2893il3945_config_ap(struct il_priv *il)
2894{
2895 struct il_rxon_context *ctx = &il->ctx;
2896 struct ieee80211_vif *vif = ctx->vif;
2897 int rc = 0;
2898
2899 if (test_bit(S_EXIT_PENDING, &il->status))
2900 return;
2901
2902 /* The following should be done only at AP bring up */
2903 if (!(il_is_associated(il))) {
2904
2905 /* RXON - unassoc (to set timing command) */
2906 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2907 il3945_commit_rxon(il, ctx);
2908
2909 /* RXON Timing */
2910 rc = il_send_rxon_timing(il, ctx);
2911 if (rc)
2912 IL_WARN("C_RXON_TIMING failed - "
2913 "Attempting to continue.\n");
2914
2915 ctx->staging.assoc_id = 0;
2916
2917 if (vif->bss_conf.use_short_preamble)
2918 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2919 else
2920 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2921
2922 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2923 if (vif->bss_conf.use_short_slot)
2924 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2925 else
2926 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2927 }
2928 /* restore RXON assoc */
2929 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2930 il3945_commit_rxon(il, ctx);
2931 }
2932 il3945_send_beacon_cmd(il);
2933}
2934
2935static int
2936il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2937 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2938 struct ieee80211_key_conf *key)
2939{
2940 struct il_priv *il = hw->priv;
2941 int ret = 0;
2942 u8 sta_id = IL_INVALID_STATION;
2943 u8 static_key;
2944
2945 D_MAC80211("enter\n");
2946
2947 if (il3945_mod_params.sw_crypto) {
2948 D_MAC80211("leave - hwcrypto disabled\n");
2949 return -EOPNOTSUPP;
2950 }
2951
2952 /*
2953 * To support IBSS RSN, don't program group keys in IBSS, the
2954 * hardware will then not attempt to decrypt the frames.
2955 */
2956 if (vif->type == NL80211_IFTYPE_ADHOC &&
2957 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2958 return -EOPNOTSUPP;
2959
2960 static_key = !il_is_associated(il);
2961
2962 if (!static_key) {
2963 sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
2964 if (sta_id == IL_INVALID_STATION)
2965 return -EINVAL;
2966 }
2967
2968 mutex_lock(&il->mutex);
2969 il_scan_cancel_timeout(il, 100);
2970
2971 switch (cmd) {
2972 case SET_KEY:
2973 if (static_key)
2974 ret = il3945_set_static_key(il, key);
2975 else
2976 ret = il3945_set_dynamic_key(il, key, sta_id);
2977 D_MAC80211("enable hwcrypto key\n");
2978 break;
2979 case DISABLE_KEY:
2980 if (static_key)
2981 ret = il3945_remove_static_key(il);
2982 else
2983 ret = il3945_clear_sta_key_info(il, sta_id);
2984 D_MAC80211("disable hwcrypto key\n");
2985 break;
2986 default:
2987 ret = -EINVAL;
2988 }
2989
2990 mutex_unlock(&il->mutex);
2991 D_MAC80211("leave\n");
2992
2993 return ret;
2994}
2995
2996static int
2997il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2998 struct ieee80211_sta *sta)
2999{
3000 struct il_priv *il = hw->priv;
3001 struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3002 int ret;
3003 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3004 u8 sta_id;
3005
3006 D_INFO("received request to add station %pM\n", sta->addr);
3007 mutex_lock(&il->mutex);
3008 D_INFO("proceeding to add station %pM\n", sta->addr);
3009 sta_priv->common.sta_id = IL_INVALID_STATION;
3010
3011 ret =
3012 il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
3013 if (ret) {
3014 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
3015 /* Should we return success if return code is EEXIST ? */
3016 mutex_unlock(&il->mutex);
3017 return ret;
3018 }
3019
3020 sta_priv->common.sta_id = sta_id;
3021
3022 /* Initialize rate scaling */
3023 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
3024 il3945_rs_rate_init(il, sta, sta_id);
3025 mutex_unlock(&il->mutex);
3026
3027 return 0;
3028}
3029
3030static void
3031il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
3032 unsigned int *total_flags, u64 multicast)
3033{
3034 struct il_priv *il = hw->priv;
3035 __le32 filter_or = 0, filter_nand = 0;
3036 struct il_rxon_context *ctx = &il->ctx;
3037
3038#define CHK(test, flag) do { \
3039 if (*total_flags & (test)) \
3040 filter_or |= (flag); \
3041 else \
3042 filter_nand |= (flag); \
3043 } while (0)
3044
3045 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
3046 *total_flags);
3047
3048 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3049 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3050 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3051
3052#undef CHK
3053
3054 mutex_lock(&il->mutex);
3055
3056 ctx->staging.filter_flags &= ~filter_nand;
3057 ctx->staging.filter_flags |= filter_or;
3058
3059 /*
3060 * Not committing directly because hardware can perform a scan,
3061 * but even if hw is ready, committing here breaks for some reason,
3062 * we'll eventually commit the filter flags change anyway.
3063 */
3064
3065 mutex_unlock(&il->mutex);
3066
3067 /*
3068 * Receiving all multicast frames is always enabled by the
3069 * default flags setup in il_connection_init_rx_config()
3070 * since we currently do not support programming multicast
3071 * filters into the device.
3072 */
3073 *total_flags &=
3074 FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3075 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3076}
3077
3078/*****************************************************************************
3079 *
3080 * sysfs attributes
3081 *
3082 *****************************************************************************/
3083
3084#ifdef CONFIG_IWLEGACY_DEBUG
3085
3086/*
3087 * The following adds a new attribute to the sysfs representation
3088 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3089 * used for controlling the debug level.
3090 *
3091 * See the level definitions in iwl for details.
3092 *
3093 * The debug_level being managed using sysfs below is a per device debug
3094 * level that is used instead of the global debug level if it (the per
3095 * device debug level) is set.
3096 */
3097static ssize_t
3098il3945_show_debug_level(struct device *d, struct device_attribute *attr,
3099 char *buf)
3100{
3101 struct il_priv *il = dev_get_drvdata(d);
3102 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
3103}
3104
3105static ssize_t
3106il3945_store_debug_level(struct device *d, struct device_attribute *attr,
3107 const char *buf, size_t count)
3108{
3109 struct il_priv *il = dev_get_drvdata(d);
3110 unsigned long val;
3111 int ret;
3112
3113 ret = strict_strtoul(buf, 0, &val);
3114 if (ret)
3115 IL_INFO("%s is not in hex or decimal form.\n", buf);
3116 else {
3117 il->debug_level = val;
3118 if (il_alloc_traffic_mem(il))
3119 IL_ERR("Not enough memory to generate traffic log\n");
3120 }
3121 return strnlen(buf, count);
3122}
3123
3124static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
3125 il3945_store_debug_level);
3126
3127#endif /* CONFIG_IWLEGACY_DEBUG */
3128
3129static ssize_t
3130il3945_show_temperature(struct device *d, struct device_attribute *attr,
3131 char *buf)
3132{
3133 struct il_priv *il = dev_get_drvdata(d);
3134
3135 if (!il_is_alive(il))
3136 return -EAGAIN;
3137
3138 return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
3139}
3140
3141static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
3142
3143static ssize_t
3144il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
3145{
3146 struct il_priv *il = dev_get_drvdata(d);
3147 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
3148}
3149
3150static ssize_t
3151il3945_store_tx_power(struct device *d, struct device_attribute *attr,
3152 const char *buf, size_t count)
3153{
3154 struct il_priv *il = dev_get_drvdata(d);
3155 char *p = (char *)buf;
3156 u32 val;
3157
3158 val = simple_strtoul(p, &p, 10);
3159 if (p == buf)
3160 IL_INFO(": %s is not in decimal form.\n", buf);
3161 else
3162 il3945_hw_reg_set_txpower(il, val);
3163
3164 return count;
3165}
3166
3167static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
3168 il3945_store_tx_power);
3169
3170static ssize_t
3171il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
3172{
3173 struct il_priv *il = dev_get_drvdata(d);
3174 struct il_rxon_context *ctx = &il->ctx;
3175
3176 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3177}
3178
3179static ssize_t
3180il3945_store_flags(struct device *d, struct device_attribute *attr,
3181 const char *buf, size_t count)
3182{
3183 struct il_priv *il = dev_get_drvdata(d);
3184 u32 flags = simple_strtoul(buf, NULL, 0);
3185 struct il_rxon_context *ctx = &il->ctx;
3186
3187 mutex_lock(&il->mutex);
3188 if (le32_to_cpu(ctx->staging.flags) != flags) {
3189 /* Cancel any currently running scans... */
3190 if (il_scan_cancel_timeout(il, 100))
3191 IL_WARN("Could not cancel scan.\n");
3192 else {
3193 D_INFO("Committing rxon.flags = 0x%04X\n", flags);
3194 ctx->staging.flags = cpu_to_le32(flags);
3195 il3945_commit_rxon(il, ctx);
3196 }
3197 }
3198 mutex_unlock(&il->mutex);
3199
3200 return count;
3201}
3202
3203static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
3204 il3945_store_flags);
3205
3206static ssize_t
3207il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
3208 char *buf)
3209{
3210 struct il_priv *il = dev_get_drvdata(d);
3211 struct il_rxon_context *ctx = &il->ctx;
3212
3213 return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
3214}
3215
3216static ssize_t
3217il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
3218 const char *buf, size_t count)
3219{
3220 struct il_priv *il = dev_get_drvdata(d);
3221 struct il_rxon_context *ctx = &il->ctx;
3222 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3223
3224 mutex_lock(&il->mutex);
3225 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3226 /* Cancel any currently running scans... */
3227 if (il_scan_cancel_timeout(il, 100))
3228 IL_WARN("Could not cancel scan.\n");
3229 else {
3230 D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
3231 filter_flags);
3232 ctx->staging.filter_flags = cpu_to_le32(filter_flags);
3233 il3945_commit_rxon(il, ctx);
3234 }
3235 }
3236 mutex_unlock(&il->mutex);
3237
3238 return count;
3239}
3240
3241static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
3242 il3945_store_filter_flags);
3243
3244static ssize_t
3245il3945_show_measurement(struct device *d, struct device_attribute *attr,
3246 char *buf)
3247{
3248 struct il_priv *il = dev_get_drvdata(d);
3249 struct il_spectrum_notification measure_report;
3250 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3251 u8 *data = (u8 *) &measure_report;
3252 unsigned long flags;
3253
3254 spin_lock_irqsave(&il->lock, flags);
3255 if (!(il->measurement_status & MEASUREMENT_READY)) {
3256 spin_unlock_irqrestore(&il->lock, flags);
3257 return 0;
3258 }
3259 memcpy(&measure_report, &il->measure_report, size);
3260 il->measurement_status = 0;
3261 spin_unlock_irqrestore(&il->lock, flags);
3262
3263 while (size && PAGE_SIZE - len) {
3264 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3265 PAGE_SIZE - len, 1);
3266 len = strlen(buf);
3267 if (PAGE_SIZE - len)
3268 buf[len++] = '\n';
3269
3270 ofs += 16;
3271 size -= min(size, 16U);
3272 }
3273
3274 return len;
3275}
3276
3277static ssize_t
3278il3945_store_measurement(struct device *d, struct device_attribute *attr,
3279 const char *buf, size_t count)
3280{
3281 struct il_priv *il = dev_get_drvdata(d);
3282 struct il_rxon_context *ctx = &il->ctx;
3283 struct ieee80211_measurement_params params = {
3284 .channel = le16_to_cpu(ctx->active.channel),
3285 .start_time = cpu_to_le64(il->_3945.last_tsf),
3286 .duration = cpu_to_le16(1),
3287 };
3288 u8 type = IL_MEASURE_BASIC;
3289 u8 buffer[32];
3290 u8 channel;
3291
3292 if (count) {
3293 char *p = buffer;
3294 strncpy(buffer, buf, min(sizeof(buffer), count));
3295 channel = simple_strtoul(p, NULL, 0);
3296 if (channel)
3297 params.channel = channel;
3298
3299 p = buffer;
3300 while (*p && *p != ' ')
3301 p++;
3302 if (*p)
3303 type = simple_strtoul(p + 1, NULL, 0);
3304 }
3305
3306 D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
3307 type, params.channel, buf);
3308 il3945_get_measurement(il, &params, type);
3309
3310 return count;
3311}
3312
3313static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
3314 il3945_store_measurement);
3315
3316static ssize_t
3317il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
3318 const char *buf, size_t count)
3319{
3320 struct il_priv *il = dev_get_drvdata(d);
3321
3322 il->retry_rate = simple_strtoul(buf, NULL, 0);
3323 if (il->retry_rate <= 0)
3324 il->retry_rate = 1;
3325
3326 return count;
3327}
3328
3329static ssize_t
3330il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
3331 char *buf)
3332{
3333 struct il_priv *il = dev_get_drvdata(d);
3334 return sprintf(buf, "%d", il->retry_rate);
3335}
3336
3337static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
3338 il3945_store_retry_rate);
3339
3340static ssize_t
3341il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
3342{
3343 /* all this shit doesn't belong into sysfs anyway */
3344 return 0;
3345}
3346
3347static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
3348
3349static ssize_t
3350il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
3351{
3352 struct il_priv *il = dev_get_drvdata(d);
3353
3354 if (!il_is_alive(il))
3355 return -EAGAIN;
3356
3357 return sprintf(buf, "%d\n", il3945_mod_params.antenna);
3358}
3359
3360static ssize_t
3361il3945_store_antenna(struct device *d, struct device_attribute *attr,
3362 const char *buf, size_t count)
3363{
3364 struct il_priv *il __maybe_unused = dev_get_drvdata(d);
3365 int ant;
3366
3367 if (count == 0)
3368 return 0;
3369
3370 if (sscanf(buf, "%1i", &ant) != 1) {
3371 D_INFO("not in hex or decimal form.\n");
3372 return count;
3373 }
3374
3375 if (ant >= 0 && ant <= 2) {
3376 D_INFO("Setting antenna select to %d.\n", ant);
3377 il3945_mod_params.antenna = (enum il3945_antenna)ant;
3378 } else
3379 D_INFO("Bad antenna select value %d.\n", ant);
3380
3381 return count;
3382}
3383
3384static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
3385 il3945_store_antenna);
3386
3387static ssize_t
3388il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
3389{
3390 struct il_priv *il = dev_get_drvdata(d);
3391 if (!il_is_alive(il))
3392 return -EAGAIN;
3393 return sprintf(buf, "0x%08x\n", (int)il->status);
3394}
3395
3396static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
3397
3398static ssize_t
3399il3945_dump_error_log(struct device *d, struct device_attribute *attr,
3400 const char *buf, size_t count)
3401{
3402 struct il_priv *il = dev_get_drvdata(d);
3403 char *p = (char *)buf;
3404
3405 if (p[0] == '1')
3406 il3945_dump_nic_error_log(il);
3407
3408 return strnlen(buf, count);
3409}
3410
3411static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
3412
3413/*****************************************************************************
3414 *
3415 * driver setup and tear down
3416 *
3417 *****************************************************************************/
3418
3419static void
3420il3945_setup_deferred_work(struct il_priv *il)
3421{
3422 il->workqueue = create_singlethread_workqueue(DRV_NAME);
3423
3424 init_waitqueue_head(&il->wait_command_queue);
3425
3426 INIT_WORK(&il->restart, il3945_bg_restart);
3427 INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
3428 INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
3429 INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
3430 INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
3431
3432 il_setup_scan_deferred_work(il);
3433
3434 il3945_hw_setup_deferred_work(il);
3435
3436 init_timer(&il->watchdog);
3437 il->watchdog.data = (unsigned long)il;
3438 il->watchdog.function = il_bg_watchdog;
3439
3440 tasklet_init(&il->irq_tasklet,
3441 (void (*)(unsigned long))il3945_irq_tasklet,
3442 (unsigned long)il);
3443}
3444
3445static void
3446il3945_cancel_deferred_work(struct il_priv *il)
3447{
3448 il3945_hw_cancel_deferred_work(il);
3449
3450 cancel_delayed_work_sync(&il->init_alive_start);
3451 cancel_delayed_work(&il->alive_start);
3452
3453 il_cancel_scan_deferred_work(il);
3454}
3455
3456static struct attribute *il3945_sysfs_entries[] = {
3457 &dev_attr_antenna.attr,
3458 &dev_attr_channels.attr,
3459 &dev_attr_dump_errors.attr,
3460 &dev_attr_flags.attr,
3461 &dev_attr_filter_flags.attr,
3462 &dev_attr_measurement.attr,
3463 &dev_attr_retry_rate.attr,
3464 &dev_attr_status.attr,
3465 &dev_attr_temperature.attr,
3466 &dev_attr_tx_power.attr,
3467#ifdef CONFIG_IWLEGACY_DEBUG
3468 &dev_attr_debug_level.attr,
3469#endif
3470 NULL
3471};
3472
3473static struct attribute_group il3945_attribute_group = {
3474 .name = NULL, /* put in device directory */
3475 .attrs = il3945_sysfs_entries,
3476};
3477
3478struct ieee80211_ops il3945_hw_ops = {
3479 .tx = il3945_mac_tx,
3480 .start = il3945_mac_start,
3481 .stop = il3945_mac_stop,
3482 .add_interface = il_mac_add_interface,
3483 .remove_interface = il_mac_remove_interface,
3484 .change_interface = il_mac_change_interface,
3485 .config = il_mac_config,
3486 .configure_filter = il3945_configure_filter,
3487 .set_key = il3945_mac_set_key,
3488 .conf_tx = il_mac_conf_tx,
3489 .reset_tsf = il_mac_reset_tsf,
3490 .bss_info_changed = il_mac_bss_info_changed,
3491 .hw_scan = il_mac_hw_scan,
3492 .sta_add = il3945_mac_sta_add,
3493 .sta_remove = il_mac_sta_remove,
3494 .tx_last_beacon = il_mac_tx_last_beacon,
3495};
3496
3497static int
3498il3945_init_drv(struct il_priv *il)
3499{
3500 int ret;
3501 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
3502
3503 il->retry_rate = 1;
3504 il->beacon_skb = NULL;
3505
3506 spin_lock_init(&il->sta_lock);
3507 spin_lock_init(&il->hcmd_lock);
3508
3509 INIT_LIST_HEAD(&il->free_frames);
3510
3511 mutex_init(&il->mutex);
3512
3513 il->ieee_channels = NULL;
3514 il->ieee_rates = NULL;
3515 il->band = IEEE80211_BAND_2GHZ;
3516
3517 il->iw_mode = NL80211_IFTYPE_STATION;
3518 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
3519
3520 /* initialize force reset */
3521 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
3522
3523 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3524 IL_WARN("Unsupported EEPROM version: 0x%04X\n",
3525 eeprom->version);
3526 ret = -EINVAL;
3527 goto err;
3528 }
3529 ret = il_init_channel_map(il);
3530 if (ret) {
3531 IL_ERR("initializing regulatory failed: %d\n", ret);
3532 goto err;
3533 }
3534
3535 /* Set up txpower settings in driver for all channels */
3536 if (il3945_txpower_set_from_eeprom(il)) {
3537 ret = -EIO;
3538 goto err_free_channel_map;
3539 }
3540
3541 ret = il_init_geos(il);
3542 if (ret) {
3543 IL_ERR("initializing geos failed: %d\n", ret);
3544 goto err_free_channel_map;
3545 }
3546 il3945_init_hw_rates(il, il->ieee_rates);
3547
3548 return 0;
3549
3550err_free_channel_map:
3551 il_free_channel_map(il);
3552err:
3553 return ret;
3554}
3555
3556#define IL3945_MAX_PROBE_REQUEST 200
3557
3558static int
3559il3945_setup_mac(struct il_priv *il)
3560{
3561 int ret;
3562 struct ieee80211_hw *hw = il->hw;
3563
3564 hw->rate_control_algorithm = "iwl-3945-rs";
3565 hw->sta_data_size = sizeof(struct il3945_sta_priv);
3566 hw->vif_data_size = sizeof(struct il_vif_priv);
3567
3568 /* Tell mac80211 our characteristics */
3569 hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
3570
3571 hw->wiphy->interface_modes = il->ctx.interface_modes;
3572
3573 hw->wiphy->flags |=
3574 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
3575 WIPHY_FLAG_IBSS_RSN;
3576
3577 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3578 /* we create the 802.11 header and a zero-length SSID element */
3579 hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
3580
3581 /* Default value; 4 EDCA QOS priorities */
3582 hw->queues = 4;
3583
3584 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
3585 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3586 &il->bands[IEEE80211_BAND_2GHZ];
3587
3588 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
3589 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3590 &il->bands[IEEE80211_BAND_5GHZ];
3591
3592 il_leds_init(il);
3593
3594 ret = ieee80211_register_hw(il->hw);
3595 if (ret) {
3596 IL_ERR("Failed to register hw (error %d)\n", ret);
3597 return ret;
3598 }
3599 il->mac80211_registered = 1;
3600
3601 return 0;
3602}
3603
3604static int
3605il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3606{
3607 int err = 0;
3608 struct il_priv *il;
3609 struct ieee80211_hw *hw;
3610 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
3611 struct il3945_eeprom *eeprom;
3612 unsigned long flags;
3613
3614 /***********************
3615 * 1. Allocating HW data
3616 * ********************/
3617
3618 /* mac80211 allocates memory for this device instance, including
3619 * space for this driver's ilate structure */
3620 hw = il_alloc_all(cfg);
3621 if (hw == NULL) {
3622 pr_err("Can not allocate network device\n");
3623 err = -ENOMEM;
3624 goto out;
3625 }
3626 il = hw->priv;
3627 SET_IEEE80211_DEV(hw, &pdev->dev);
3628
3629 il->cmd_queue = IL39_CMD_QUEUE_NUM;
3630
3631 il->ctx.ctxid = 0;
3632
3633 il->ctx.rxon_cmd = C_RXON;
3634 il->ctx.rxon_timing_cmd = C_RXON_TIMING;
3635 il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
3636 il->ctx.qos_cmd = C_QOS_PARAM;
3637 il->ctx.ap_sta_id = IL_AP_ID;
3638 il->ctx.wep_key_cmd = C_WEPKEY;
3639 il->ctx.interface_modes =
3640 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
3641 il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
3642 il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
3643 il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
3644
3645 /*
3646 * Disabling hardware scan means that mac80211 will perform scans
3647 * "the hard way", rather than using device's scan.
3648 */
3649 if (il3945_mod_params.disable_hw_scan) {
3650 D_INFO("Disabling hw_scan\n");
3651 il3945_hw_ops.hw_scan = NULL;
3652 }
3653
3654 D_INFO("*** LOAD DRIVER ***\n");
3655 il->cfg = cfg;
3656 il->pci_dev = pdev;
3657 il->inta_mask = CSR_INI_SET_MASK;
3658
3659 if (il_alloc_traffic_mem(il))
3660 IL_ERR("Not enough memory to generate traffic log\n");
3661
3662 /***************************
3663 * 2. Initializing PCI bus
3664 * *************************/
3665 pci_disable_link_state(pdev,
3666 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3667 PCIE_LINK_STATE_CLKPM);
3668
3669 if (pci_enable_device(pdev)) {
3670 err = -ENODEV;
3671 goto out_ieee80211_free_hw;
3672 }
3673
3674 pci_set_master(pdev);
3675
3676 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3677 if (!err)
3678 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3679 if (err) {
3680 IL_WARN("No suitable DMA available.\n");
3681 goto out_pci_disable_device;
3682 }
3683
3684 pci_set_drvdata(pdev, il);
3685 err = pci_request_regions(pdev, DRV_NAME);
3686 if (err)
3687 goto out_pci_disable_device;
3688
3689 /***********************
3690 * 3. Read REV Register
3691 * ********************/
3692 il->hw_base = pci_iomap(pdev, 0, 0);
3693 if (!il->hw_base) {
3694 err = -ENODEV;
3695 goto out_pci_release_regions;
3696 }
3697
3698 D_INFO("pci_resource_len = 0x%08llx\n",
3699 (unsigned long long)pci_resource_len(pdev, 0));
3700 D_INFO("pci_resource_base = %p\n", il->hw_base);
3701
3702 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3703 * PCI Tx retries from interfering with C3 CPU state */
3704 pci_write_config_byte(pdev, 0x41, 0x00);
3705
3706 /* these spin locks will be used in apm_ops.init and EEPROM access
3707 * we should init now
3708 */
3709 spin_lock_init(&il->reg_lock);
3710 spin_lock_init(&il->lock);
3711
3712 /*
3713 * stop and reset the on-board processor just in case it is in a
3714 * strange state ... like being left stranded by a primary kernel
3715 * and this is now the kdump kernel trying to start up
3716 */
3717 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3718
3719 /***********************
3720 * 4. Read EEPROM
3721 * ********************/
3722
3723 /* Read the EEPROM */
3724 err = il_eeprom_init(il);
3725 if (err) {
3726 IL_ERR("Unable to init EEPROM\n");
3727 goto out_iounmap;
3728 }
3729 /* MAC Address location in EEPROM same for 3945/4965 */
3730 eeprom = (struct il3945_eeprom *)il->eeprom;
3731 D_INFO("MAC address: %pM\n", eeprom->mac_address);
3732 SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
3733
3734 /***********************
3735 * 5. Setup HW Constants
3736 * ********************/
3737 /* Device-specific setup */
3738 if (il3945_hw_set_hw_params(il)) {
3739 IL_ERR("failed to set hw settings\n");
3740 goto out_eeprom_free;
3741 }
3742
3743 /***********************
3744 * 6. Setup il
3745 * ********************/
3746
3747 err = il3945_init_drv(il);
3748 if (err) {
3749 IL_ERR("initializing driver failed\n");
3750 goto out_unset_hw_params;
3751 }
3752
3753 IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
3754
3755 /***********************
3756 * 7. Setup Services
3757 * ********************/
3758
3759 spin_lock_irqsave(&il->lock, flags);
3760 il_disable_interrupts(il);
3761 spin_unlock_irqrestore(&il->lock, flags);
3762
3763 pci_enable_msi(il->pci_dev);
3764
3765 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
3766 if (err) {
3767 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
3768 goto out_disable_msi;
3769 }
3770
3771 err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
3772 if (err) {
3773 IL_ERR("failed to create sysfs device attributes\n");
3774 goto out_release_irq;
3775 }
3776
3777 il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
3778 &il->ctx);
3779 il3945_setup_deferred_work(il);
3780 il3945_setup_handlers(il);
3781 il_power_initialize(il);
3782
3783 /*********************************
3784 * 8. Setup and Register mac80211
3785 * *******************************/
3786
3787 il_enable_interrupts(il);
3788
3789 err = il3945_setup_mac(il);
3790 if (err)
3791 goto out_remove_sysfs;
3792
3793 err = il_dbgfs_register(il, DRV_NAME);
3794 if (err)
3795 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
3796 err);
3797
3798 /* Start monitoring the killswitch */
3799 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
3800
3801 return 0;
3802
3803out_remove_sysfs:
3804 destroy_workqueue(il->workqueue);
3805 il->workqueue = NULL;
3806 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3807out_release_irq:
3808 free_irq(il->pci_dev->irq, il);
3809out_disable_msi:
3810 pci_disable_msi(il->pci_dev);
3811 il_free_geos(il);
3812 il_free_channel_map(il);
3813out_unset_hw_params:
3814 il3945_unset_hw_params(il);
3815out_eeprom_free:
3816 il_eeprom_free(il);
3817out_iounmap:
3818 pci_iounmap(pdev, il->hw_base);
3819out_pci_release_regions:
3820 pci_release_regions(pdev);
3821out_pci_disable_device:
3822 pci_set_drvdata(pdev, NULL);
3823 pci_disable_device(pdev);
3824out_ieee80211_free_hw:
3825 il_free_traffic_mem(il);
3826 ieee80211_free_hw(il->hw);
3827out:
3828 return err;
3829}
3830
3831static void __devexit
3832il3945_pci_remove(struct pci_dev *pdev)
3833{
3834 struct il_priv *il = pci_get_drvdata(pdev);
3835 unsigned long flags;
3836
3837 if (!il)
3838 return;
3839
3840 D_INFO("*** UNLOAD DRIVER ***\n");
3841
3842 il_dbgfs_unregister(il);
3843
3844 set_bit(S_EXIT_PENDING, &il->status);
3845
3846 il_leds_exit(il);
3847
3848 if (il->mac80211_registered) {
3849 ieee80211_unregister_hw(il->hw);
3850 il->mac80211_registered = 0;
3851 } else {
3852 il3945_down(il);
3853 }
3854
3855 /*
3856 * Make sure device is reset to low power before unloading driver.
3857 * This may be redundant with il_down(), but there are paths to
3858 * run il_down() without calling apm_ops.stop(), and there are
3859 * paths to avoid running il_down() at all before leaving driver.
3860 * This (inexpensive) call *makes sure* device is reset.
3861 */
3862 il_apm_stop(il);
3863
3864 /* make sure we flush any pending irq or
3865 * tasklet for the driver
3866 */
3867 spin_lock_irqsave(&il->lock, flags);
3868 il_disable_interrupts(il);
3869 spin_unlock_irqrestore(&il->lock, flags);
3870
3871 il3945_synchronize_irq(il);
3872
3873 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3874
3875 cancel_delayed_work_sync(&il->_3945.rfkill_poll);
3876
3877 il3945_dealloc_ucode_pci(il);
3878
3879 if (il->rxq.bd)
3880 il3945_rx_queue_free(il, &il->rxq);
3881 il3945_hw_txq_ctx_free(il);
3882
3883 il3945_unset_hw_params(il);
3884
3885 /*netif_stop_queue(dev); */
3886 flush_workqueue(il->workqueue);
3887
3888 /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
3889 * il->workqueue... so we can't take down the workqueue
3890 * until now... */
3891 destroy_workqueue(il->workqueue);
3892 il->workqueue = NULL;
3893 il_free_traffic_mem(il);
3894
3895 free_irq(pdev->irq, il);
3896 pci_disable_msi(pdev);
3897
3898 pci_iounmap(pdev, il->hw_base);
3899 pci_release_regions(pdev);
3900 pci_disable_device(pdev);
3901 pci_set_drvdata(pdev, NULL);
3902
3903 il_free_channel_map(il);
3904 il_free_geos(il);
3905 kfree(il->scan_cmd);
3906 if (il->beacon_skb)
3907 dev_kfree_skb(il->beacon_skb);
3908
3909 ieee80211_free_hw(il->hw);
3910}
3911
3912/*****************************************************************************
3913 *
3914 * driver and module entry point
3915 *
3916 *****************************************************************************/
3917
3918static struct pci_driver il3945_driver = {
3919 .name = DRV_NAME,
3920 .id_table = il3945_hw_card_ids,
3921 .probe = il3945_pci_probe,
3922 .remove = __devexit_p(il3945_pci_remove),
3923 .driver.pm = IL_LEGACY_PM_OPS,
3924};
3925
3926static int __init
3927il3945_init(void)
3928{
3929
3930 int ret;
3931 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3932 pr_info(DRV_COPYRIGHT "\n");
3933
3934 ret = il3945_rate_control_register();
3935 if (ret) {
3936 pr_err("Unable to register rate control algorithm: %d\n", ret);
3937 return ret;
3938 }
3939
3940 ret = pci_register_driver(&il3945_driver);
3941 if (ret) {
3942 pr_err("Unable to initialize PCI module\n");
3943 goto error_register;
3944 }
3945
3946 return ret;
3947
3948error_register:
3949 il3945_rate_control_unregister();
3950 return ret;
3951}
3952
3953static void __exit
3954il3945_exit(void)
3955{
3956 pci_unregister_driver(&il3945_driver);
3957 il3945_rate_control_unregister();
3958}
3959
3960MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
3961
3962module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
3963MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
3964module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
3965MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
3966module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
3967 S_IRUGO);
3968MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
3969#ifdef CONFIG_IWLEGACY_DEBUG
3970module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
3971MODULE_PARM_DESC(debug, "debug output mask");
3972#endif
3973module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
3974MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3975
3976module_exit(il3945_exit);
3977module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644
index 000000000000..30ad404f8df7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -0,0 +1,995 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "commands.h"
40#include "3945.h"
41
42#define RS_NAME "iwl-3945-rs"
43
44static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
45 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
46};
47
48static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
49 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
50};
51
52static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
53 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
54};
55
56static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
57 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
58};
59
60struct il3945_tpt_entry {
61 s8 min_rssi;
62 u8 idx;
63};
64
65static struct il3945_tpt_entry il3945_tpt_table_a[] = {
66 {-60, RATE_54M_IDX},
67 {-64, RATE_48M_IDX},
68 {-72, RATE_36M_IDX},
69 {-80, RATE_24M_IDX},
70 {-84, RATE_18M_IDX},
71 {-85, RATE_12M_IDX},
72 {-87, RATE_9M_IDX},
73 {-89, RATE_6M_IDX}
74};
75
76static struct il3945_tpt_entry il3945_tpt_table_g[] = {
77 {-60, RATE_54M_IDX},
78 {-64, RATE_48M_IDX},
79 {-68, RATE_36M_IDX},
80 {-80, RATE_24M_IDX},
81 {-84, RATE_18M_IDX},
82 {-85, RATE_12M_IDX},
83 {-86, RATE_11M_IDX},
84 {-88, RATE_5M_IDX},
85 {-90, RATE_2M_IDX},
86 {-92, RATE_1M_IDX}
87};
88
89#define RATE_MAX_WINDOW 62
90#define RATE_FLUSH (3*HZ)
91#define RATE_WIN_FLUSH (HZ/2)
92#define IL39_RATE_HIGH_TH 11520
93#define IL_SUCCESS_UP_TH 8960
94#define IL_SUCCESS_DOWN_TH 10880
95#define RATE_MIN_FAILURE_TH 6
96#define RATE_MIN_SUCCESS_TH 8
97#define RATE_DECREASE_TH 1920
98#define RATE_RETRY_TH 15
99
100static u8
101il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
102{
103 u32 idx = 0;
104 u32 table_size = 0;
105 struct il3945_tpt_entry *tpt_table = NULL;
106
107 if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
108 rssi = IL_MIN_RSSI_VAL;
109
110 switch (band) {
111 case IEEE80211_BAND_2GHZ:
112 tpt_table = il3945_tpt_table_g;
113 table_size = ARRAY_SIZE(il3945_tpt_table_g);
114 break;
115
116 case IEEE80211_BAND_5GHZ:
117 tpt_table = il3945_tpt_table_a;
118 table_size = ARRAY_SIZE(il3945_tpt_table_a);
119 break;
120
121 default:
122 BUG();
123 break;
124 }
125
126 while (idx < table_size && rssi < tpt_table[idx].min_rssi)
127 idx++;
128
129 idx = min(idx, (table_size - 1));
130
131 return tpt_table[idx].idx;
132}
133
134static void
135il3945_clear_win(struct il3945_rate_scale_data *win)
136{
137 win->data = 0;
138 win->success_counter = 0;
139 win->success_ratio = -1;
140 win->counter = 0;
141 win->average_tpt = IL_INVALID_VALUE;
142 win->stamp = 0;
143}
144
145/**
146 * il3945_rate_scale_flush_wins - flush out the rate scale wins
147 *
148 * Returns the number of wins that have gathered data but were
149 * not flushed. If there were any that were not flushed, then
150 * reschedule the rate flushing routine.
151 */
152static int
153il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
154{
155 int unflushed = 0;
156 int i;
157 unsigned long flags;
158 struct il_priv *il __maybe_unused = rs_sta->il;
159
160 /*
161 * For each rate, if we have collected data on that rate
162 * and it has been more than RATE_WIN_FLUSH
163 * since we flushed, clear out the gathered stats
164 */
165 for (i = 0; i < RATE_COUNT_3945; i++) {
166 if (!rs_sta->win[i].counter)
167 continue;
168
169 spin_lock_irqsave(&rs_sta->lock, flags);
170 if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
171 D_RATE("flushing %d samples of rate " "idx %d\n",
172 rs_sta->win[i].counter, i);
173 il3945_clear_win(&rs_sta->win[i]);
174 } else
175 unflushed++;
176 spin_unlock_irqrestore(&rs_sta->lock, flags);
177 }
178
179 return unflushed;
180}
181
182#define RATE_FLUSH_MAX 5000 /* msec */
183#define RATE_FLUSH_MIN 50 /* msec */
184#define IL_AVERAGE_PACKETS 1500
185
186static void
187il3945_bg_rate_scale_flush(unsigned long data)
188{
189 struct il3945_rs_sta *rs_sta = (void *)data;
190 struct il_priv *il __maybe_unused = rs_sta->il;
191 int unflushed = 0;
192 unsigned long flags;
193 u32 packet_count, duration, pps;
194
195 D_RATE("enter\n");
196
197 unflushed = il3945_rate_scale_flush_wins(rs_sta);
198
199 spin_lock_irqsave(&rs_sta->lock, flags);
200
201 /* Number of packets Rx'd since last time this timer ran */
202 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
203
204 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
205
206 if (unflushed) {
207 duration =
208 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
209
210 D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
211
212 /* Determine packets per second */
213 if (duration)
214 pps = (packet_count * 1000) / duration;
215 else
216 pps = 0;
217
218 if (pps) {
219 duration = (IL_AVERAGE_PACKETS * 1000) / pps;
220 if (duration < RATE_FLUSH_MIN)
221 duration = RATE_FLUSH_MIN;
222 else if (duration > RATE_FLUSH_MAX)
223 duration = RATE_FLUSH_MAX;
224 } else
225 duration = RATE_FLUSH_MAX;
226
227 rs_sta->flush_time = msecs_to_jiffies(duration);
228
229 D_RATE("new flush period: %d msec ave %d\n", duration,
230 packet_count);
231
232 mod_timer(&rs_sta->rate_scale_flush,
233 jiffies + rs_sta->flush_time);
234
235 rs_sta->last_partial_flush = jiffies;
236 } else {
237 rs_sta->flush_time = RATE_FLUSH;
238 rs_sta->flush_pending = 0;
239 }
240 /* If there weren't any unflushed entries, we don't schedule the timer
241 * to run again */
242
243 rs_sta->last_flush = jiffies;
244
245 spin_unlock_irqrestore(&rs_sta->lock, flags);
246
247 D_RATE("leave\n");
248}
249
250/**
251 * il3945_collect_tx_data - Update the success/failure sliding win
252 *
253 * We keep a sliding win of the last 64 packets transmitted
254 * at this rate. win->data contains the bitmask of successful
255 * packets.
256 */
257static void
258il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
259 struct il3945_rate_scale_data *win, int success,
260 int retries, int idx)
261{
262 unsigned long flags;
263 s32 fail_count;
264 struct il_priv *il __maybe_unused = rs_sta->il;
265
266 if (!retries) {
267 D_RATE("leave: retries == 0 -- should be at least 1\n");
268 return;
269 }
270
271 spin_lock_irqsave(&rs_sta->lock, flags);
272
273 /*
274 * Keep track of only the latest 62 tx frame attempts in this rate's
275 * history win; anything older isn't really relevant any more.
276 * If we have filled up the sliding win, drop the oldest attempt;
277 * if the oldest attempt (highest bit in bitmap) shows "success",
278 * subtract "1" from the success counter (this is the main reason
279 * we keep these bitmaps!).
280 * */
281 while (retries > 0) {
282 if (win->counter >= RATE_MAX_WINDOW) {
283
284 /* remove earliest */
285 win->counter = RATE_MAX_WINDOW - 1;
286
287 if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
288 win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
289 win->success_counter--;
290 }
291 }
292
293 /* Increment frames-attempted counter */
294 win->counter++;
295
296 /* Shift bitmap by one frame (throw away oldest history),
297 * OR in "1", and increment "success" if this
298 * frame was successful. */
299 win->data <<= 1;
300 if (success > 0) {
301 win->success_counter++;
302 win->data |= 0x1;
303 success--;
304 }
305
306 retries--;
307 }
308
309 /* Calculate current success ratio, avoid divide-by-0! */
310 if (win->counter > 0)
311 win->success_ratio =
312 128 * (100 * win->success_counter) / win->counter;
313 else
314 win->success_ratio = IL_INVALID_VALUE;
315
316 fail_count = win->counter - win->success_counter;
317
318 /* Calculate average throughput, if we have enough history. */
319 if (fail_count >= RATE_MIN_FAILURE_TH ||
320 win->success_counter >= RATE_MIN_SUCCESS_TH)
321 win->average_tpt =
322 ((win->success_ratio * rs_sta->expected_tpt[idx] +
323 64) / 128);
324 else
325 win->average_tpt = IL_INVALID_VALUE;
326
327 /* Tag this win as having been updated */
328 win->stamp = jiffies;
329
330 spin_unlock_irqrestore(&rs_sta->lock, flags);
331
332}
333
334/*
335 * Called after adding a new station to initialize rate scaling
336 */
337void
338il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
339{
340 struct ieee80211_hw *hw = il->hw;
341 struct ieee80211_conf *conf = &il->hw->conf;
342 struct il3945_sta_priv *psta;
343 struct il3945_rs_sta *rs_sta;
344 struct ieee80211_supported_band *sband;
345 int i;
346
347 D_INFO("enter\n");
348 if (sta_id == il->ctx.bcast_sta_id)
349 goto out;
350
351 psta = (struct il3945_sta_priv *)sta->drv_priv;
352 rs_sta = &psta->rs_sta;
353 sband = hw->wiphy->bands[conf->channel->band];
354
355 rs_sta->il = il;
356
357 rs_sta->start_rate = RATE_INVALID;
358
359 /* default to just 802.11b */
360 rs_sta->expected_tpt = il3945_expected_tpt_b;
361
362 rs_sta->last_partial_flush = jiffies;
363 rs_sta->last_flush = jiffies;
364 rs_sta->flush_time = RATE_FLUSH;
365 rs_sta->last_tx_packets = 0;
366
367 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
368 rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
369
370 for (i = 0; i < RATE_COUNT_3945; i++)
371 il3945_clear_win(&rs_sta->win[i]);
372
373 /* TODO: what is a good starting rate for STA? About middle? Maybe not
374 * the lowest or the highest rate.. Could consider using RSSI from
375 * previous packets? Need to have IEEE 802.1X auth succeed immediately
376 * after assoc.. */
377
378 for (i = sband->n_bitrates - 1; i >= 0; i--) {
379 if (sta->supp_rates[sband->band] & (1 << i)) {
380 rs_sta->last_txrate_idx = i;
381 break;
382 }
383 }
384
385 il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
386 /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
387 if (sband->band == IEEE80211_BAND_5GHZ) {
388 rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
389 il->_3945.sta_supp_rates =
390 il->_3945.sta_supp_rates << IL_FIRST_OFDM_RATE;
391 }
392
393out:
394 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
395
396 D_INFO("leave\n");
397}
398
399static void *
400il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
401{
402 return hw->priv;
403}
404
405/* rate scale requires free function to be implemented */
406static void
407il3945_rs_free(void *il)
408{
409 return;
410}
411
412static void *
413il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
414{
415 struct il3945_rs_sta *rs_sta;
416 struct il3945_sta_priv *psta = (void *)sta->drv_priv;
417 struct il_priv *il __maybe_unused = il_priv;
418
419 D_RATE("enter\n");
420
421 rs_sta = &psta->rs_sta;
422
423 spin_lock_init(&rs_sta->lock);
424 init_timer(&rs_sta->rate_scale_flush);
425
426 D_RATE("leave\n");
427
428 return rs_sta;
429}
430
431static void
432il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
433{
434 struct il3945_rs_sta *rs_sta = il_sta;
435
436 /*
437 * Be careful not to use any members of il3945_rs_sta (like trying
438 * to use il_priv to print out debugging) since it may not be fully
439 * initialized at this point.
440 */
441 del_timer_sync(&rs_sta->rate_scale_flush);
442}
443
444/**
445 * il3945_rs_tx_status - Update rate control values based on Tx results
446 *
447 * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
448 * the hardware for each rate.
449 */
450static void
451il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
452 struct ieee80211_sta *sta, void *il_sta,
453 struct sk_buff *skb)
454{
455 s8 retries = 0, current_count;
456 int scale_rate_idx, first_idx, last_idx;
457 unsigned long flags;
458 struct il_priv *il = (struct il_priv *)il_rate;
459 struct il3945_rs_sta *rs_sta = il_sta;
460 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
461
462 D_RATE("enter\n");
463
464 retries = info->status.rates[0].count;
465 /* Sanity Check for retries */
466 if (retries > RATE_RETRY_TH)
467 retries = RATE_RETRY_TH;
468
469 first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
470 if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
471 D_RATE("leave: Rate out of bounds: %d\n", first_idx);
472 return;
473 }
474
475 if (!il_sta) {
476 D_RATE("leave: No STA il data to update!\n");
477 return;
478 }
479
480 /* Treat uninitialized rate scaling data same as non-existing. */
481 if (!rs_sta->il) {
482 D_RATE("leave: STA il data uninitialized!\n");
483 return;
484 }
485
486 rs_sta->tx_packets++;
487
488 scale_rate_idx = first_idx;
489 last_idx = first_idx;
490
491 /*
492 * Update the win for each rate. We determine which rates
493 * were Tx'd based on the total number of retries vs. the number
494 * of retries configured for each rate -- currently set to the
495 * il value 'retry_rate' vs. rate specific
496 *
497 * On exit from this while loop last_idx indicates the rate
498 * at which the frame was finally transmitted (or failed if no
499 * ACK)
500 */
501 while (retries > 1) {
502 if ((retries - 1) < il->retry_rate) {
503 current_count = (retries - 1);
504 last_idx = scale_rate_idx;
505 } else {
506 current_count = il->retry_rate;
507 last_idx = il3945_rs_next_rate(il, scale_rate_idx);
508 }
509
510 /* Update this rate accounting for as many retries
511 * as was used for it (per current_count) */
512 il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
513 current_count, scale_rate_idx);
514 D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
515 current_count);
516
517 retries -= current_count;
518
519 scale_rate_idx = last_idx;
520 }
521
522 /* Update the last idx win with success/failure based on ACK */
523 D_RATE("Update rate %d with %s.\n", last_idx,
524 (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
525 il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
526 info->flags & IEEE80211_TX_STAT_ACK, 1,
527 last_idx);
528
529 /* We updated the rate scale win -- if its been more than
530 * flush_time since the last run, schedule the flush
531 * again */
532 spin_lock_irqsave(&rs_sta->lock, flags);
533
534 if (!rs_sta->flush_pending &&
535 time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
536
537 rs_sta->last_partial_flush = jiffies;
538 rs_sta->flush_pending = 1;
539 mod_timer(&rs_sta->rate_scale_flush,
540 jiffies + rs_sta->flush_time);
541 }
542
543 spin_unlock_irqrestore(&rs_sta->lock, flags);
544
545 D_RATE("leave\n");
546}
547
548static u16
549il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
550 enum ieee80211_band band)
551{
552 u8 high = RATE_INVALID;
553 u8 low = RATE_INVALID;
554 struct il_priv *il __maybe_unused = rs_sta->il;
555
556 /* 802.11A walks to the next literal adjacent rate in
557 * the rate table */
558 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
559 int i;
560 u32 mask;
561
562 /* Find the previous rate that is in the rate mask */
563 i = idx - 1;
564 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
565 if (rate_mask & mask) {
566 low = i;
567 break;
568 }
569 }
570
571 /* Find the next rate that is in the rate mask */
572 i = idx + 1;
573 for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
574 if (rate_mask & mask) {
575 high = i;
576 break;
577 }
578 }
579
580 return (high << 8) | low;
581 }
582
583 low = idx;
584 while (low != RATE_INVALID) {
585 if (rs_sta->tgg)
586 low = il3945_rates[low].prev_rs_tgg;
587 else
588 low = il3945_rates[low].prev_rs;
589 if (low == RATE_INVALID)
590 break;
591 if (rate_mask & (1 << low))
592 break;
593 D_RATE("Skipping masked lower rate: %d\n", low);
594 }
595
596 high = idx;
597 while (high != RATE_INVALID) {
598 if (rs_sta->tgg)
599 high = il3945_rates[high].next_rs_tgg;
600 else
601 high = il3945_rates[high].next_rs;
602 if (high == RATE_INVALID)
603 break;
604 if (rate_mask & (1 << high))
605 break;
606 D_RATE("Skipping masked higher rate: %d\n", high);
607 }
608
609 return (high << 8) | low;
610}
611
612/**
613 * il3945_rs_get_rate - find the rate for the requested packet
614 *
615 * Returns the ieee80211_rate structure allocated by the driver.
616 *
617 * The rate control algorithm has no internal mapping between hw_mode's
618 * rate ordering and the rate ordering used by the rate control algorithm.
619 *
620 * The rate control algorithm uses a single table of rates that goes across
621 * the entire A/B/G spectrum vs. being limited to just one particular
622 * hw_mode.
623 *
624 * As such, we can't convert the idx obtained below into the hw_mode's
625 * rate table and must reference the driver allocated rate table
626 *
627 */
628static void
629il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
630 struct ieee80211_tx_rate_control *txrc)
631{
632 struct ieee80211_supported_band *sband = txrc->sband;
633 struct sk_buff *skb = txrc->skb;
634 u8 low = RATE_INVALID;
635 u8 high = RATE_INVALID;
636 u16 high_low;
637 int idx;
638 struct il3945_rs_sta *rs_sta = il_sta;
639 struct il3945_rate_scale_data *win = NULL;
640 int current_tpt = IL_INVALID_VALUE;
641 int low_tpt = IL_INVALID_VALUE;
642 int high_tpt = IL_INVALID_VALUE;
643 u32 fail_count;
644 s8 scale_action = 0;
645 unsigned long flags;
646 u16 rate_mask;
647 s8 max_rate_idx = -1;
648 struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
649 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
650
651 D_RATE("enter\n");
652
653 /* Treat uninitialized rate scaling data same as non-existing. */
654 if (rs_sta && !rs_sta->il) {
655 D_RATE("Rate scaling information not initialized yet.\n");
656 il_sta = NULL;
657 }
658
659 if (rate_control_send_low(sta, il_sta, txrc))
660 return;
661
662 rate_mask = sta->supp_rates[sband->band];
663
664 /* get user max rate if set */
665 max_rate_idx = txrc->max_rate_idx;
666 if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
667 max_rate_idx += IL_FIRST_OFDM_RATE;
668 if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
669 max_rate_idx = -1;
670
671 idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
672
673 if (sband->band == IEEE80211_BAND_5GHZ)
674 rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
675
676 spin_lock_irqsave(&rs_sta->lock, flags);
677
678 /* for recent assoc, choose best rate regarding
679 * to rssi value
680 */
681 if (rs_sta->start_rate != RATE_INVALID) {
682 if (rs_sta->start_rate < idx &&
683 (rate_mask & (1 << rs_sta->start_rate)))
684 idx = rs_sta->start_rate;
685 rs_sta->start_rate = RATE_INVALID;
686 }
687
688 /* force user max rate if set by user */
689 if (max_rate_idx != -1 && max_rate_idx < idx) {
690 if (rate_mask & (1 << max_rate_idx))
691 idx = max_rate_idx;
692 }
693
694 win = &(rs_sta->win[idx]);
695
696 fail_count = win->counter - win->success_counter;
697
698 if (fail_count < RATE_MIN_FAILURE_TH &&
699 win->success_counter < RATE_MIN_SUCCESS_TH) {
700 spin_unlock_irqrestore(&rs_sta->lock, flags);
701
702 D_RATE("Invalid average_tpt on rate %d: "
703 "counter: %d, success_counter: %d, "
704 "expected_tpt is %sNULL\n", idx, win->counter,
705 win->success_counter,
706 rs_sta->expected_tpt ? "not " : "");
707
708 /* Can't calculate this yet; not enough history */
709 win->average_tpt = IL_INVALID_VALUE;
710 goto out;
711
712 }
713
714 current_tpt = win->average_tpt;
715
716 high_low =
717 il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
718 low = high_low & 0xff;
719 high = (high_low >> 8) & 0xff;
720
721 /* If user set max rate, dont allow higher than user constrain */
722 if (max_rate_idx != -1 && max_rate_idx < high)
723 high = RATE_INVALID;
724
725 /* Collect Measured throughputs of adjacent rates */
726 if (low != RATE_INVALID)
727 low_tpt = rs_sta->win[low].average_tpt;
728
729 if (high != RATE_INVALID)
730 high_tpt = rs_sta->win[high].average_tpt;
731
732 spin_unlock_irqrestore(&rs_sta->lock, flags);
733
734 scale_action = 0;
735
736 /* Low success ratio , need to drop the rate */
737 if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
738 D_RATE("decrease rate because of low success_ratio\n");
739 scale_action = -1;
740 /* No throughput measured yet for adjacent rates,
741 * try increase */
742 } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
743
744 if (high != RATE_INVALID &&
745 win->success_ratio >= RATE_INCREASE_TH)
746 scale_action = 1;
747 else if (low != RATE_INVALID)
748 scale_action = 0;
749
750 /* Both adjacent throughputs are measured, but neither one has
751 * better throughput; we're using the best rate, don't change
752 * it! */
753 } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
754 && low_tpt < current_tpt && high_tpt < current_tpt) {
755
756 D_RATE("No action -- low [%d] & high [%d] < "
757 "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
758 scale_action = 0;
759
760 /* At least one of the rates has better throughput */
761 } else {
762 if (high_tpt != IL_INVALID_VALUE) {
763
764 /* High rate has better throughput, Increase
765 * rate */
766 if (high_tpt > current_tpt &&
767 win->success_ratio >= RATE_INCREASE_TH)
768 scale_action = 1;
769 else {
770 D_RATE("decrease rate because of high tpt\n");
771 scale_action = 0;
772 }
773 } else if (low_tpt != IL_INVALID_VALUE) {
774 if (low_tpt > current_tpt) {
775 D_RATE("decrease rate because of low tpt\n");
776 scale_action = -1;
777 } else if (win->success_ratio >= RATE_INCREASE_TH) {
778 /* Lower rate has better
779 * throughput,decrease rate */
780 scale_action = 1;
781 }
782 }
783 }
784
785 /* Sanity check; asked for decrease, but success rate or throughput
786 * has been good at old rate. Don't change it. */
787 if (scale_action == -1 && low != RATE_INVALID &&
788 (win->success_ratio > RATE_HIGH_TH ||
789 current_tpt > 100 * rs_sta->expected_tpt[low]))
790 scale_action = 0;
791
792 switch (scale_action) {
793 case -1:
794
795 /* Decrese rate */
796 if (low != RATE_INVALID)
797 idx = low;
798 break;
799
800 case 1:
801 /* Increase rate */
802 if (high != RATE_INVALID)
803 idx = high;
804
805 break;
806
807 case 0:
808 default:
809 /* No change */
810 break;
811 }
812
813 D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
814 low, high);
815
816out:
817
818 if (sband->band == IEEE80211_BAND_5GHZ) {
819 if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
820 idx = IL_FIRST_OFDM_RATE;
821 rs_sta->last_txrate_idx = idx;
822 info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
823 } else {
824 rs_sta->last_txrate_idx = idx;
825 info->control.rates[0].idx = rs_sta->last_txrate_idx;
826 }
827
828 D_RATE("leave: %d\n", idx);
829}
830
831#ifdef CONFIG_MAC80211_DEBUGFS
832static int
833il3945_open_file_generic(struct inode *inode, struct file *file)
834{
835 file->private_data = inode->i_private;
836 return 0;
837}
838
839static ssize_t
840il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
841 size_t count, loff_t *ppos)
842{
843 char *buff;
844 int desc = 0;
845 int j;
846 ssize_t ret;
847 struct il3945_rs_sta *lq_sta = file->private_data;
848
849 buff = kmalloc(1024, GFP_KERNEL);
850 if (!buff)
851 return -ENOMEM;
852
853 desc +=
854 sprintf(buff + desc,
855 "tx packets=%d last rate idx=%d\n"
856 "rate=0x%X flush time %d\n", lq_sta->tx_packets,
857 lq_sta->last_txrate_idx, lq_sta->start_rate,
858 jiffies_to_msecs(lq_sta->flush_time));
859 for (j = 0; j < RATE_COUNT_3945; j++) {
860 desc +=
861 sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
862 lq_sta->win[j].counter,
863 lq_sta->win[j].success_counter,
864 lq_sta->win[j].success_ratio);
865 }
866 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
867 kfree(buff);
868 return ret;
869}
870
871static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
872 .read = il3945_sta_dbgfs_stats_table_read,
873 .open = il3945_open_file_generic,
874 .llseek = default_llseek,
875};
876
877static void
878il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
879{
880 struct il3945_rs_sta *lq_sta = il_sta;
881
882 lq_sta->rs_sta_dbgfs_stats_table_file =
883 debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
884 &rs_sta_dbgfs_stats_table_ops);
885
886}
887
888static void
889il3945_remove_debugfs(void *il, void *il_sta)
890{
891 struct il3945_rs_sta *lq_sta = il_sta;
892 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
893}
894#endif
895
896/*
897 * Initialization of rate scaling information is done by driver after
898 * the station is added. Since mac80211 calls this function before a
899 * station is added we ignore it.
900 */
901static void
902il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
903 struct ieee80211_sta *sta, void *il_sta)
904{
905}
906
907static struct rate_control_ops rs_ops = {
908 .module = NULL,
909 .name = RS_NAME,
910 .tx_status = il3945_rs_tx_status,
911 .get_rate = il3945_rs_get_rate,
912 .rate_init = il3945_rs_rate_init_stub,
913 .alloc = il3945_rs_alloc,
914 .free = il3945_rs_free,
915 .alloc_sta = il3945_rs_alloc_sta,
916 .free_sta = il3945_rs_free_sta,
917#ifdef CONFIG_MAC80211_DEBUGFS
918 .add_sta_debugfs = il3945_add_debugfs,
919 .remove_sta_debugfs = il3945_remove_debugfs,
920#endif
921
922};
923
924void
925il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
926{
927 struct il_priv *il = hw->priv;
928 s32 rssi = 0;
929 unsigned long flags;
930 struct il3945_rs_sta *rs_sta;
931 struct ieee80211_sta *sta;
932 struct il3945_sta_priv *psta;
933
934 D_RATE("enter\n");
935
936 rcu_read_lock();
937
938 sta =
939 ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
940 if (!sta) {
941 D_RATE("Unable to find station to initialize rate scaling.\n");
942 rcu_read_unlock();
943 return;
944 }
945
946 psta = (void *)sta->drv_priv;
947 rs_sta = &psta->rs_sta;
948
949 spin_lock_irqsave(&rs_sta->lock, flags);
950
951 rs_sta->tgg = 0;
952 switch (il->band) {
953 case IEEE80211_BAND_2GHZ:
954 /* TODO: this always does G, not a regression */
955 if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
956 rs_sta->tgg = 1;
957 rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
958 } else
959 rs_sta->expected_tpt = il3945_expected_tpt_g;
960 break;
961
962 case IEEE80211_BAND_5GHZ:
963 rs_sta->expected_tpt = il3945_expected_tpt_a;
964 break;
965 case IEEE80211_NUM_BANDS:
966 BUG();
967 break;
968 }
969
970 spin_unlock_irqrestore(&rs_sta->lock, flags);
971
972 rssi = il->_3945.last_rx_rssi;
973 if (rssi == 0)
974 rssi = IL_MIN_RSSI_VAL;
975
976 D_RATE("Network RSSI: %d\n", rssi);
977
978 rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
979
980 D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
981 rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
982 rcu_read_unlock();
983}
984
985int
986il3945_rate_control_register(void)
987{
988 return ieee80211_rate_control_register(&rs_ops);
989}
990
991void
992il3945_rate_control_unregister(void)
993{
994 ieee80211_rate_control_unregister(&rs_ops);
995}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644
index 000000000000..863664f9ba8b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -0,0 +1,2751 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/firmware.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40#include <net/mac80211.h>
41
42#include "common.h"
43#include "3945.h"
44
45/* Send led command */
46static int
47il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
48{
49 struct il_host_cmd cmd = {
50 .id = C_LEDS,
51 .len = sizeof(struct il_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56
57 return il_send_cmd(il, &cmd);
58}
59
60const struct il_led_ops il3945_led_ops = {
61 .cmd = il3945_send_led_cmd,
62};
63
64#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
65 [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
66 RATE_##r##M_IEEE, \
67 RATE_##ip##M_IDX, \
68 RATE_##in##M_IDX, \
69 RATE_##rp##M_IDX, \
70 RATE_##rn##M_IDX, \
71 RATE_##pp##M_IDX, \
72 RATE_##np##M_IDX, \
73 RATE_##r##M_IDX_TBL, \
74 RATE_##ip##M_IDX_TBL }
75
76/*
77 * Parameter order:
78 * rate, prev rate, next rate, prev tgg rate, next tgg rate
79 *
80 * If there isn't a valid next or previous rate then INV is used which
81 * maps to RATE_INVALID
82 *
83 */
84const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
85 IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
86 IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
87 IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
88 IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
89 IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
90 IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
91 IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
92 IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
93 IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
94 IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
95 IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
96 IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
97};
98
99static inline u8
100il3945_get_prev_ieee_rate(u8 rate_idx)
101{
102 u8 rate = il3945_rates[rate_idx].prev_ieee;
103
104 if (rate == RATE_INVALID)
105 rate = rate_idx;
106 return rate;
107}
108
109/* 1 = enable the il3945_disable_events() function */
110#define IL_EVT_DISABLE (0)
111#define IL_EVT_DISABLE_SIZE (1532/32)
112
113/**
114 * il3945_disable_events - Disable selected events in uCode event log
115 *
116 * Disable an event by writing "1"s into "disable"
117 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
118 * Default values of 0 enable uCode events to be logged.
119 * Use for only special debugging. This function is just a placeholder as-is,
120 * you'll need to provide the special bits! ...
121 * ... and set IL_EVT_DISABLE to 1. */
122void
123il3945_disable_events(struct il_priv *il)
124{
125 int i;
126 u32 base; /* SRAM address of event log header */
127 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
128 u32 array_size; /* # of u32 entries in array */
129 static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
130 0x00000000, /* 31 - 0 Event id numbers */
131 0x00000000, /* 63 - 32 */
132 0x00000000, /* 95 - 64 */
133 0x00000000, /* 127 - 96 */
134 0x00000000, /* 159 - 128 */
135 0x00000000, /* 191 - 160 */
136 0x00000000, /* 223 - 192 */
137 0x00000000, /* 255 - 224 */
138 0x00000000, /* 287 - 256 */
139 0x00000000, /* 319 - 288 */
140 0x00000000, /* 351 - 320 */
141 0x00000000, /* 383 - 352 */
142 0x00000000, /* 415 - 384 */
143 0x00000000, /* 447 - 416 */
144 0x00000000, /* 479 - 448 */
145 0x00000000, /* 511 - 480 */
146 0x00000000, /* 543 - 512 */
147 0x00000000, /* 575 - 544 */
148 0x00000000, /* 607 - 576 */
149 0x00000000, /* 639 - 608 */
150 0x00000000, /* 671 - 640 */
151 0x00000000, /* 703 - 672 */
152 0x00000000, /* 735 - 704 */
153 0x00000000, /* 767 - 736 */
154 0x00000000, /* 799 - 768 */
155 0x00000000, /* 831 - 800 */
156 0x00000000, /* 863 - 832 */
157 0x00000000, /* 895 - 864 */
158 0x00000000, /* 927 - 896 */
159 0x00000000, /* 959 - 928 */
160 0x00000000, /* 991 - 960 */
161 0x00000000, /* 1023 - 992 */
162 0x00000000, /* 1055 - 1024 */
163 0x00000000, /* 1087 - 1056 */
164 0x00000000, /* 1119 - 1088 */
165 0x00000000, /* 1151 - 1120 */
166 0x00000000, /* 1183 - 1152 */
167 0x00000000, /* 1215 - 1184 */
168 0x00000000, /* 1247 - 1216 */
169 0x00000000, /* 1279 - 1248 */
170 0x00000000, /* 1311 - 1280 */
171 0x00000000, /* 1343 - 1312 */
172 0x00000000, /* 1375 - 1344 */
173 0x00000000, /* 1407 - 1376 */
174 0x00000000, /* 1439 - 1408 */
175 0x00000000, /* 1471 - 1440 */
176 0x00000000, /* 1503 - 1472 */
177 };
178
179 base = le32_to_cpu(il->card_alive.log_event_table_ptr);
180 if (!il3945_hw_valid_rtc_data_addr(base)) {
181 IL_ERR("Invalid event log pointer 0x%08X\n", base);
182 return;
183 }
184
185 disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
186 array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
187
188 if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
189 D_INFO("Disabling selected uCode log events at 0x%x\n",
190 disable_ptr);
191 for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
192 il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
193 evt_disable[i]);
194
195 } else {
196 D_INFO("Selected uCode log events may be disabled\n");
197 D_INFO(" by writing \"1\"s into disable bitmap\n");
198 D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
199 array_size);
200 }
201
202}
203
204static int
205il3945_hwrate_to_plcp_idx(u8 plcp)
206{
207 int idx;
208
209 for (idx = 0; idx < RATE_COUNT_3945; idx++)
210 if (il3945_rates[idx].plcp == plcp)
211 return idx;
212 return -1;
213}
214
215#ifdef CONFIG_IWLEGACY_DEBUG
216#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
217
218static const char *
219il3945_get_tx_fail_reason(u32 status)
220{
221 switch (status & TX_STATUS_MSK) {
222 case TX_3945_STATUS_SUCCESS:
223 return "SUCCESS";
224 TX_STATUS_ENTRY(SHORT_LIMIT);
225 TX_STATUS_ENTRY(LONG_LIMIT);
226 TX_STATUS_ENTRY(FIFO_UNDERRUN);
227 TX_STATUS_ENTRY(MGMNT_ABORT);
228 TX_STATUS_ENTRY(NEXT_FRAG);
229 TX_STATUS_ENTRY(LIFE_EXPIRE);
230 TX_STATUS_ENTRY(DEST_PS);
231 TX_STATUS_ENTRY(ABORTED);
232 TX_STATUS_ENTRY(BT_RETRY);
233 TX_STATUS_ENTRY(STA_INVALID);
234 TX_STATUS_ENTRY(FRAG_DROPPED);
235 TX_STATUS_ENTRY(TID_DISABLE);
236 TX_STATUS_ENTRY(FRAME_FLUSHED);
237 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
238 TX_STATUS_ENTRY(TX_LOCKED);
239 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
240 }
241
242 return "UNKNOWN";
243}
244#else
245static inline const char *
246il3945_get_tx_fail_reason(u32 status)
247{
248 return "";
249}
250#endif
251
252/*
253 * get ieee prev rate from rate scale table.
254 * for A and B mode we need to overright prev
255 * value
256 */
257int
258il3945_rs_next_rate(struct il_priv *il, int rate)
259{
260 int next_rate = il3945_get_prev_ieee_rate(rate);
261
262 switch (il->band) {
263 case IEEE80211_BAND_5GHZ:
264 if (rate == RATE_12M_IDX)
265 next_rate = RATE_9M_IDX;
266 else if (rate == RATE_6M_IDX)
267 next_rate = RATE_6M_IDX;
268 break;
269 case IEEE80211_BAND_2GHZ:
270 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
271 il_is_associated(il)) {
272 if (rate == RATE_11M_IDX)
273 next_rate = RATE_5M_IDX;
274 }
275 break;
276
277 default:
278 break;
279 }
280
281 return next_rate;
282}
283
284/**
285 * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
286 *
287 * When FW advances 'R' idx, all entries between old and new 'R' idx
288 * need to be reclaimed. As result, some free space forms. If there is
289 * enough free space (> low mark), wake the stack that feeds us.
290 */
291static void
292il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
293{
294 struct il_tx_queue *txq = &il->txq[txq_id];
295 struct il_queue *q = &txq->q;
296 struct il_tx_info *tx_info;
297
298 BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
299
300 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
301 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
302
303 tx_info = &txq->txb[txq->q.read_ptr];
304 ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
305 tx_info->skb = NULL;
306 il->cfg->ops->lib->txq_free_tfd(il, txq);
307 }
308
309 if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
310 txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
311 il_wake_queue(il, txq);
312}
313
314/**
315 * il3945_hdl_tx - Handle Tx response
316 */
317static void
318il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
319{
320 struct il_rx_pkt *pkt = rxb_addr(rxb);
321 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
322 int txq_id = SEQ_TO_QUEUE(sequence);
323 int idx = SEQ_TO_IDX(sequence);
324 struct il_tx_queue *txq = &il->txq[txq_id];
325 struct ieee80211_tx_info *info;
326 struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
327 u32 status = le32_to_cpu(tx_resp->status);
328 int rate_idx;
329 int fail;
330
331 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
332 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
333 "is out of range [0-%d] %d %d\n", txq_id, idx,
334 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
335 return;
336 }
337
338 txq->time_stamp = jiffies;
339 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
340 ieee80211_tx_info_clear_status(info);
341
342 /* Fill the MRR chain with some info about on-chip retransmissions */
343 rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
344 if (info->band == IEEE80211_BAND_5GHZ)
345 rate_idx -= IL_FIRST_OFDM_RATE;
346
347 fail = tx_resp->failure_frame;
348
349 info->status.rates[0].idx = rate_idx;
350 info->status.rates[0].count = fail + 1; /* add final attempt */
351
352 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
353 info->flags |=
354 ((status & TX_STATUS_MSK) ==
355 TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
356
357 D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
358 il3945_get_tx_fail_reason(status), status, tx_resp->rate,
359 tx_resp->failure_frame);
360
361 D_TX_REPLY("Tx queue reclaim %d\n", idx);
362 il3945_tx_queue_reclaim(il, txq_id, idx);
363
364 if (status & TX_ABORT_REQUIRED_MSK)
365 IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
366}
367
368/*****************************************************************************
369 *
370 * Intel PRO/Wireless 3945ABG/BG Network Connection
371 *
372 * RX handler implementations
373 *
374 *****************************************************************************/
375#ifdef CONFIG_IWLEGACY_DEBUGFS
376static void
377il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
378{
379 int i;
380 __le32 *prev_stats;
381 u32 *accum_stats;
382 u32 *delta, *max_delta;
383
384 prev_stats = (__le32 *) &il->_3945.stats;
385 accum_stats = (u32 *) &il->_3945.accum_stats;
386 delta = (u32 *) &il->_3945.delta_stats;
387 max_delta = (u32 *) &il->_3945.max_delta;
388
389 for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
390 i +=
391 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
392 accum_stats++) {
393 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
394 *delta =
395 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
396 *accum_stats += *delta;
397 if (*delta > *max_delta)
398 *max_delta = *delta;
399 }
400 }
401
402 /* reset accumulative stats for "no-counter" type stats */
403 il->_3945.accum_stats.general.temperature =
404 il->_3945.stats.general.temperature;
405 il->_3945.accum_stats.general.ttl_timestamp =
406 il->_3945.stats.general.ttl_timestamp;
407}
408#endif
409
410void
411il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
412{
413 struct il_rx_pkt *pkt = rxb_addr(rxb);
414
415 D_RX("Statistics notification received (%d vs %d).\n",
416 (int)sizeof(struct il3945_notif_stats),
417 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
418#ifdef CONFIG_IWLEGACY_DEBUGFS
419 il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
420#endif
421
422 memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
423}
424
425void
426il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
427{
428 struct il_rx_pkt *pkt = rxb_addr(rxb);
429 __le32 *flag = (__le32 *) &pkt->u.raw;
430
431 if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
432#ifdef CONFIG_IWLEGACY_DEBUGFS
433 memset(&il->_3945.accum_stats, 0,
434 sizeof(struct il3945_notif_stats));
435 memset(&il->_3945.delta_stats, 0,
436 sizeof(struct il3945_notif_stats));
437 memset(&il->_3945.max_delta, 0,
438 sizeof(struct il3945_notif_stats));
439#endif
440 D_RX("Statistics have been cleared\n");
441 }
442 il3945_hdl_stats(il, rxb);
443}
444
445/******************************************************************************
446 *
447 * Misc. internal state and helper functions
448 *
449 ******************************************************************************/
450
451/* This is necessary only for a number of stats, see the caller. */
452static int
453il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
454{
455 /* Filter incoming packets to determine if they are targeted toward
456 * this network, discarding packets coming from ourselves */
457 switch (il->iw_mode) {
458 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
459 /* packets to our IBSS update information */
460 return !compare_ether_addr(header->addr3, il->bssid);
461 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
462 /* packets to our IBSS update information */
463 return !compare_ether_addr(header->addr2, il->bssid);
464 default:
465 return 1;
466 }
467}
468
469static void
470il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
471 struct ieee80211_rx_status *stats)
472{
473 struct il_rx_pkt *pkt = rxb_addr(rxb);
474 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
475 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
476 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
477 u16 len = le16_to_cpu(rx_hdr->len);
478 struct sk_buff *skb;
479 __le16 fc = hdr->frame_control;
480
481 /* We received data from the HW, so stop the watchdog */
482 if (unlikely
483 (len + IL39_RX_FRAME_SIZE >
484 PAGE_SIZE << il->hw_params.rx_page_order)) {
485 D_DROP("Corruption detected!\n");
486 return;
487 }
488
489 /* We only process data packets if the interface is open */
490 if (unlikely(!il->is_open)) {
491 D_DROP("Dropping packet while interface is not open.\n");
492 return;
493 }
494
495 skb = dev_alloc_skb(128);
496 if (!skb) {
497 IL_ERR("dev_alloc_skb failed\n");
498 return;
499 }
500
501 if (!il3945_mod_params.sw_crypto)
502 il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
503 le32_to_cpu(rx_end->status), stats);
504
505 skb_add_rx_frag(skb, 0, rxb->page,
506 (void *)rx_hdr->payload - (void *)pkt, len);
507
508 il_update_stats(il, false, fc, len);
509 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
510
511 ieee80211_rx(il->hw, skb);
512 il->alloc_rxb_page--;
513 rxb->page = NULL;
514}
515
516#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
517
518static void
519il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
520{
521 struct ieee80211_hdr *header;
522 struct ieee80211_rx_status rx_status;
523 struct il_rx_pkt *pkt = rxb_addr(rxb);
524 struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
525 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
526 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
527 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
528 u16 rx_stats_noise_diff __maybe_unused =
529 le16_to_cpu(rx_stats->noise_diff);
530 u8 network_packet;
531
532 rx_status.flag = 0;
533 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
534 rx_status.band =
535 (rx_hdr->
536 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
537 IEEE80211_BAND_5GHZ;
538 rx_status.freq =
539 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
540 rx_status.band);
541
542 rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
543 if (rx_status.band == IEEE80211_BAND_5GHZ)
544 rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
545
546 rx_status.antenna =
547 (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
548 4;
549
550 /* set the preamble flag if appropriate */
551 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
552 rx_status.flag |= RX_FLAG_SHORTPRE;
553
554 if ((unlikely(rx_stats->phy_count > 20))) {
555 D_DROP("dsp size out of range [0,20]: %d/n",
556 rx_stats->phy_count);
557 return;
558 }
559
560 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
561 !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
562 D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
563 return;
564 }
565
566 /* Convert 3945's rssi indicator to dBm */
567 rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
568
569 D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
570 rx_stats_sig_avg, rx_stats_noise_diff);
571
572 header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
573
574 network_packet = il3945_is_network_packet(il, header);
575
576 D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
577 network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
578 rx_status.signal, rx_status.signal, rx_status.rate_idx);
579
580 il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
581
582 if (network_packet) {
583 il->_3945.last_beacon_time =
584 le32_to_cpu(rx_end->beacon_timestamp);
585 il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
586 il->_3945.last_rx_rssi = rx_status.signal;
587 }
588
589 il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
590}
591
592int
593il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
594 dma_addr_t addr, u16 len, u8 reset, u8 pad)
595{
596 int count;
597 struct il_queue *q;
598 struct il3945_tfd *tfd, *tfd_tmp;
599
600 q = &txq->q;
601 tfd_tmp = (struct il3945_tfd *)txq->tfds;
602 tfd = &tfd_tmp[q->write_ptr];
603
604 if (reset)
605 memset(tfd, 0, sizeof(*tfd));
606
607 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
608
609 if (count >= NUM_TFD_CHUNKS || count < 0) {
610 IL_ERR("Error can not send more than %d chunks\n",
611 NUM_TFD_CHUNKS);
612 return -EINVAL;
613 }
614
615 tfd->tbs[count].addr = cpu_to_le32(addr);
616 tfd->tbs[count].len = cpu_to_le32(len);
617
618 count++;
619
620 tfd->control_flags =
621 cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
622
623 return 0;
624}
625
626/**
627 * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
628 *
629 * Does NOT advance any idxes
630 */
631void
632il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
633{
634 struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
635 int idx = txq->q.read_ptr;
636 struct il3945_tfd *tfd = &tfd_tmp[idx];
637 struct pci_dev *dev = il->pci_dev;
638 int i;
639 int counter;
640
641 /* sanity check */
642 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
643 if (counter > NUM_TFD_CHUNKS) {
644 IL_ERR("Too many chunks: %i\n", counter);
645 /* @todo issue fatal error, it is quite serious situation */
646 return;
647 }
648
649 /* Unmap tx_cmd */
650 if (counter)
651 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
652 dma_unmap_len(&txq->meta[idx], len),
653 PCI_DMA_TODEVICE);
654
655 /* unmap chunks if any */
656
657 for (i = 1; i < counter; i++)
658 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
659 le32_to_cpu(tfd->tbs[i].len),
660 PCI_DMA_TODEVICE);
661
662 /* free SKB */
663 if (txq->txb) {
664 struct sk_buff *skb;
665
666 skb = txq->txb[txq->q.read_ptr].skb;
667
668 /* can be called from irqs-disabled context */
669 if (skb) {
670 dev_kfree_skb_any(skb);
671 txq->txb[txq->q.read_ptr].skb = NULL;
672 }
673 }
674}
675
676/**
677 * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
678 *
679*/
680void
681il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
682 struct ieee80211_tx_info *info,
683 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
684{
685 u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
686 u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945);
687 u16 rate_mask;
688 int rate;
689 u8 rts_retry_limit;
690 u8 data_retry_limit;
691 __le32 tx_flags;
692 __le16 fc = hdr->frame_control;
693 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
694
695 rate = il3945_rates[rate_idx].plcp;
696 tx_flags = tx_cmd->tx_flags;
697
698 /* We need to figure out how to get the sta->supp_rates while
699 * in this running context */
700 rate_mask = RATES_MASK_3945;
701
702 /* Set retry limit on DATA packets and Probe Responses */
703 if (ieee80211_is_probe_resp(fc))
704 data_retry_limit = 3;
705 else
706 data_retry_limit = IL_DEFAULT_TX_RETRY;
707 tx_cmd->data_retry_limit = data_retry_limit;
708
709 if (tx_id >= IL39_CMD_QUEUE_NUM)
710 rts_retry_limit = 3;
711 else
712 rts_retry_limit = 7;
713
714 if (data_retry_limit < rts_retry_limit)
715 rts_retry_limit = data_retry_limit;
716 tx_cmd->rts_retry_limit = rts_retry_limit;
717
718 tx_cmd->rate = rate;
719 tx_cmd->tx_flags = tx_flags;
720
721 /* OFDM */
722 tx_cmd->supp_rates[0] =
723 ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
724
725 /* CCK */
726 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
727
728 D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
729 "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
730 le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
731 tx_cmd->supp_rates[0]);
732}
733
734static u8
735il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
736{
737 unsigned long flags_spin;
738 struct il_station_entry *station;
739
740 if (sta_id == IL_INVALID_STATION)
741 return IL_INVALID_STATION;
742
743 spin_lock_irqsave(&il->sta_lock, flags_spin);
744 station = &il->stations[sta_id];
745
746 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
747 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
748 station->sta.mode = STA_CONTROL_MODIFY_MSK;
749 il_send_add_sta(il, &station->sta, CMD_ASYNC);
750 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
751
752 D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
753 return sta_id;
754}
755
756static void
757il3945_set_pwr_vmain(struct il_priv *il)
758{
759/*
760 * (for documentation purposes)
761 * to set power to V_AUX, do
762
763 if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
764 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
765 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
766 ~APMG_PS_CTRL_MSK_PWR_SRC);
767
768 _il_poll_bit(il, CSR_GPIO_IN,
769 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
770 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
771 }
772 */
773
774 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
775 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
776 ~APMG_PS_CTRL_MSK_PWR_SRC);
777
778 _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
779 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
780}
781
782static int
783il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
784{
785 il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
786 il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
787 il_wr(il, FH39_RCSR_WPTR(0), 0);
788 il_wr(il, FH39_RCSR_CONFIG(0),
789 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
790 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
791 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
792 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
793 <<
794 FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
795 | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
796 FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
797 | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
798
799 /* fake read to flush all prev I/O */
800 il_rd(il, FH39_RSSR_CTRL);
801
802 return 0;
803}
804
805static int
806il3945_tx_reset(struct il_priv *il)
807{
808
809 /* bypass mode */
810 il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
811
812 /* RA 0 is active */
813 il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
814
815 /* all 6 fifo are active */
816 il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
817
818 il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
819 il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
820 il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
821 il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
822
823 il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
824
825 il_wr(il, FH39_TSSR_MSG_CONFIG,
826 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
827 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
828 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
829 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
830 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
831 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
832 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
833
834 return 0;
835}
836
837/**
838 * il3945_txq_ctx_reset - Reset TX queue context
839 *
840 * Destroys all DMA structures and initialize them again
841 */
842static int
843il3945_txq_ctx_reset(struct il_priv *il)
844{
845 int rc;
846 int txq_id, slots_num;
847
848 il3945_hw_txq_ctx_free(il);
849
850 /* allocate tx queue structure */
851 rc = il_alloc_txq_mem(il);
852 if (rc)
853 return rc;
854
855 /* Tx CMD queue */
856 rc = il3945_tx_reset(il);
857 if (rc)
858 goto error;
859
860 /* Tx queue(s) */
861 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
862 slots_num =
863 (txq_id ==
864 IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
865 rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
866 if (rc) {
867 IL_ERR("Tx %d queue init failed\n", txq_id);
868 goto error;
869 }
870 }
871
872 return rc;
873
874error:
875 il3945_hw_txq_ctx_free(il);
876 return rc;
877}
878
879/*
880 * Start up 3945's basic functionality after it has been reset
881 * (e.g. after platform boot, or shutdown via il_apm_stop())
882 * NOTE: This does not load uCode nor start the embedded processor
883 */
884static int
885il3945_apm_init(struct il_priv *il)
886{
887 int ret = il_apm_init(il);
888
889 /* Clear APMG (NIC's internal power management) interrupts */
890 il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
891 il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
892
893 /* Reset radio chip */
894 il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
895 udelay(5);
896 il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
897
898 return ret;
899}
900
901static void
902il3945_nic_config(struct il_priv *il)
903{
904 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
905 unsigned long flags;
906 u8 rev_id = il->pci_dev->revision;
907
908 spin_lock_irqsave(&il->lock, flags);
909
910 /* Determine HW type */
911 D_INFO("HW Revision ID = 0x%X\n", rev_id);
912
913 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
914 D_INFO("RTP type\n");
915 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
916 D_INFO("3945 RADIO-MB type\n");
917 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
918 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
919 } else {
920 D_INFO("3945 RADIO-MM type\n");
921 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
922 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
923 }
924
925 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
926 D_INFO("SKU OP mode is mrc\n");
927 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
928 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
929 } else
930 D_INFO("SKU OP mode is basic\n");
931
932 if ((eeprom->board_revision & 0xF0) == 0xD0) {
933 D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
934 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
935 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
936 } else {
937 D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
938 il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
939 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
940 }
941
942 if (eeprom->almgor_m_version <= 1) {
943 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
944 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
945 D_INFO("Card M type A version is 0x%X\n",
946 eeprom->almgor_m_version);
947 } else {
948 D_INFO("Card M type B version is 0x%X\n",
949 eeprom->almgor_m_version);
950 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
951 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
952 }
953 spin_unlock_irqrestore(&il->lock, flags);
954
955 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
956 D_RF_KILL("SW RF KILL supported in EEPROM.\n");
957
958 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
959 D_RF_KILL("HW RF KILL supported in EEPROM.\n");
960}
961
962int
963il3945_hw_nic_init(struct il_priv *il)
964{
965 int rc;
966 unsigned long flags;
967 struct il_rx_queue *rxq = &il->rxq;
968
969 spin_lock_irqsave(&il->lock, flags);
970 il->cfg->ops->lib->apm_ops.init(il);
971 spin_unlock_irqrestore(&il->lock, flags);
972
973 il3945_set_pwr_vmain(il);
974
975 il->cfg->ops->lib->apm_ops.config(il);
976
977 /* Allocate the RX queue, or reset if it is already allocated */
978 if (!rxq->bd) {
979 rc = il_rx_queue_alloc(il);
980 if (rc) {
981 IL_ERR("Unable to initialize Rx queue\n");
982 return -ENOMEM;
983 }
984 } else
985 il3945_rx_queue_reset(il, rxq);
986
987 il3945_rx_replenish(il);
988
989 il3945_rx_init(il, rxq);
990
991 /* Look at using this instead:
992 rxq->need_update = 1;
993 il_rx_queue_update_write_ptr(il, rxq);
994 */
995
996 il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
997
998 rc = il3945_txq_ctx_reset(il);
999 if (rc)
1000 return rc;
1001
1002 set_bit(S_INIT, &il->status);
1003
1004 return 0;
1005}
1006
1007/**
1008 * il3945_hw_txq_ctx_free - Free TXQ Context
1009 *
1010 * Destroy all TX DMA queues and structures
1011 */
1012void
1013il3945_hw_txq_ctx_free(struct il_priv *il)
1014{
1015 int txq_id;
1016
1017 /* Tx queues */
1018 if (il->txq)
1019 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1020 if (txq_id == IL39_CMD_QUEUE_NUM)
1021 il_cmd_queue_free(il);
1022 else
1023 il_tx_queue_free(il, txq_id);
1024
1025 /* free tx queue structure */
1026 il_txq_mem(il);
1027}
1028
1029void
1030il3945_hw_txq_ctx_stop(struct il_priv *il)
1031{
1032 int txq_id;
1033
1034 /* stop SCD */
1035 il_wr_prph(il, ALM_SCD_MODE_REG, 0);
1036 il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
1037
1038 /* reset TFD queues */
1039 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
1040 il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
1041 il_poll_bit(il, FH39_TSSR_TX_STATUS,
1042 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1043 1000);
1044 }
1045
1046 il3945_hw_txq_ctx_free(il);
1047}
1048
1049/**
1050 * il3945_hw_reg_adjust_power_by_temp
1051 * return idx delta into power gain settings table
1052*/
1053static int
1054il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1055{
1056 return (new_reading - old_reading) * (-11) / 100;
1057}
1058
1059/**
1060 * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1061 */
1062static inline int
1063il3945_hw_reg_temp_out_of_range(int temperature)
1064{
1065 return (temperature < -260 || temperature > 25) ? 1 : 0;
1066}
1067
1068int
1069il3945_hw_get_temperature(struct il_priv *il)
1070{
1071 return _il_rd(il, CSR_UCODE_DRV_GP2);
1072}
1073
1074/**
1075 * il3945_hw_reg_txpower_get_temperature
1076 * get the current temperature by reading from NIC
1077*/
1078static int
1079il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
1080{
1081 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1082 int temperature;
1083
1084 temperature = il3945_hw_get_temperature(il);
1085
1086 /* driver's okay range is -260 to +25.
1087 * human readable okay range is 0 to +285 */
1088 D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
1089
1090 /* handle insane temp reading */
1091 if (il3945_hw_reg_temp_out_of_range(temperature)) {
1092 IL_ERR("Error bad temperature value %d\n", temperature);
1093
1094 /* if really really hot(?),
1095 * substitute the 3rd band/group's temp measured at factory */
1096 if (il->last_temperature > 100)
1097 temperature = eeprom->groups[2].temperature;
1098 else /* else use most recent "sane" value from driver */
1099 temperature = il->last_temperature;
1100 }
1101
1102 return temperature; /* raw, not "human readable" */
1103}
1104
1105/* Adjust Txpower only if temperature variance is greater than threshold.
1106 *
1107 * Both are lower than older versions' 9 degrees */
1108#define IL_TEMPERATURE_LIMIT_TIMER 6
1109
1110/**
1111 * il3945_is_temp_calib_needed - determines if new calibration is needed
1112 *
1113 * records new temperature in tx_mgr->temperature.
1114 * replaces tx_mgr->last_temperature *only* if calib needed
1115 * (assumes caller will actually do the calibration!). */
1116static int
1117il3945_is_temp_calib_needed(struct il_priv *il)
1118{
1119 int temp_diff;
1120
1121 il->temperature = il3945_hw_reg_txpower_get_temperature(il);
1122 temp_diff = il->temperature - il->last_temperature;
1123
1124 /* get absolute value */
1125 if (temp_diff < 0) {
1126 D_POWER("Getting cooler, delta %d,\n", temp_diff);
1127 temp_diff = -temp_diff;
1128 } else if (temp_diff == 0)
1129 D_POWER("Same temp,\n");
1130 else
1131 D_POWER("Getting warmer, delta %d,\n", temp_diff);
1132
1133 /* if we don't need calibration, *don't* update last_temperature */
1134 if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
1135 D_POWER("Timed thermal calib not needed\n");
1136 return 0;
1137 }
1138
1139 D_POWER("Timed thermal calib needed\n");
1140
1141 /* assume that caller will actually do calib ...
1142 * update the "last temperature" value */
1143 il->last_temperature = il->temperature;
1144 return 1;
1145}
1146
1147#define IL_MAX_GAIN_ENTRIES 78
1148#define IL_CCK_FROM_OFDM_POWER_DIFF -5
1149#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
1150
1151/* radio and DSP power table, each step is 1/2 dB.
1152 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1153static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
1154 {
1155 {251, 127}, /* 2.4 GHz, highest power */
1156 {251, 127},
1157 {251, 127},
1158 {251, 127},
1159 {251, 125},
1160 {251, 110},
1161 {251, 105},
1162 {251, 98},
1163 {187, 125},
1164 {187, 115},
1165 {187, 108},
1166 {187, 99},
1167 {243, 119},
1168 {243, 111},
1169 {243, 105},
1170 {243, 97},
1171 {243, 92},
1172 {211, 106},
1173 {211, 100},
1174 {179, 120},
1175 {179, 113},
1176 {179, 107},
1177 {147, 125},
1178 {147, 119},
1179 {147, 112},
1180 {147, 106},
1181 {147, 101},
1182 {147, 97},
1183 {147, 91},
1184 {115, 107},
1185 {235, 121},
1186 {235, 115},
1187 {235, 109},
1188 {203, 127},
1189 {203, 121},
1190 {203, 115},
1191 {203, 108},
1192 {203, 102},
1193 {203, 96},
1194 {203, 92},
1195 {171, 110},
1196 {171, 104},
1197 {171, 98},
1198 {139, 116},
1199 {227, 125},
1200 {227, 119},
1201 {227, 113},
1202 {227, 107},
1203 {227, 101},
1204 {227, 96},
1205 {195, 113},
1206 {195, 106},
1207 {195, 102},
1208 {195, 95},
1209 {163, 113},
1210 {163, 106},
1211 {163, 102},
1212 {163, 95},
1213 {131, 113},
1214 {131, 106},
1215 {131, 102},
1216 {131, 95},
1217 {99, 113},
1218 {99, 106},
1219 {99, 102},
1220 {99, 95},
1221 {67, 113},
1222 {67, 106},
1223 {67, 102},
1224 {67, 95},
1225 {35, 113},
1226 {35, 106},
1227 {35, 102},
1228 {35, 95},
1229 {3, 113},
1230 {3, 106},
1231 {3, 102},
1232 {3, 95} /* 2.4 GHz, lowest power */
1233 },
1234 {
1235 {251, 127}, /* 5.x GHz, highest power */
1236 {251, 120},
1237 {251, 114},
1238 {219, 119},
1239 {219, 101},
1240 {187, 113},
1241 {187, 102},
1242 {155, 114},
1243 {155, 103},
1244 {123, 117},
1245 {123, 107},
1246 {123, 99},
1247 {123, 92},
1248 {91, 108},
1249 {59, 125},
1250 {59, 118},
1251 {59, 109},
1252 {59, 102},
1253 {59, 96},
1254 {59, 90},
1255 {27, 104},
1256 {27, 98},
1257 {27, 92},
1258 {115, 118},
1259 {115, 111},
1260 {115, 104},
1261 {83, 126},
1262 {83, 121},
1263 {83, 113},
1264 {83, 105},
1265 {83, 99},
1266 {51, 118},
1267 {51, 111},
1268 {51, 104},
1269 {51, 98},
1270 {19, 116},
1271 {19, 109},
1272 {19, 102},
1273 {19, 98},
1274 {19, 93},
1275 {171, 113},
1276 {171, 107},
1277 {171, 99},
1278 {139, 120},
1279 {139, 113},
1280 {139, 107},
1281 {139, 99},
1282 {107, 120},
1283 {107, 113},
1284 {107, 107},
1285 {107, 99},
1286 {75, 120},
1287 {75, 113},
1288 {75, 107},
1289 {75, 99},
1290 {43, 120},
1291 {43, 113},
1292 {43, 107},
1293 {43, 99},
1294 {11, 120},
1295 {11, 113},
1296 {11, 107},
1297 {11, 99},
1298 {131, 107},
1299 {131, 99},
1300 {99, 120},
1301 {99, 113},
1302 {99, 107},
1303 {99, 99},
1304 {67, 120},
1305 {67, 113},
1306 {67, 107},
1307 {67, 99},
1308 {35, 120},
1309 {35, 113},
1310 {35, 107},
1311 {35, 99},
1312 {3, 120} /* 5.x GHz, lowest power */
1313 }
1314};
1315
1316static inline u8
1317il3945_hw_reg_fix_power_idx(int idx)
1318{
1319 if (idx < 0)
1320 return 0;
1321 if (idx >= IL_MAX_GAIN_ENTRIES)
1322 return IL_MAX_GAIN_ENTRIES - 1;
1323 return (u8) idx;
1324}
1325
1326/* Kick off thermal recalibration check every 60 seconds */
1327#define REG_RECALIB_PERIOD (60)
1328
1329/**
1330 * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1331 *
1332 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1333 * or 6 Mbit (OFDM) rates.
1334 */
1335static void
1336il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
1337 const s8 *clip_pwrs,
1338 struct il_channel_info *ch_info, int band_idx)
1339{
1340 struct il3945_scan_power_info *scan_power_info;
1341 s8 power;
1342 u8 power_idx;
1343
1344 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
1345
1346 /* use this channel group's 6Mbit clipping/saturation pwr,
1347 * but cap at regulatory scan power restriction (set during init
1348 * based on eeprom channel data) for this channel. */
1349 power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
1350
1351 power = min(power, il->tx_power_user_lmt);
1352 scan_power_info->requested_power = power;
1353
1354 /* find difference between new scan *power* and current "normal"
1355 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1356 * current "normal" temperature-compensated Tx power *idx* for
1357 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1358 * *idx*. */
1359 power_idx =
1360 ch_info->power_info[rate_idx].power_table_idx - (power -
1361 ch_info->
1362 power_info
1363 [RATE_6M_IDX_TBL].
1364 requested_power) *
1365 2;
1366
1367 /* store reference idx that we use when adjusting *all* scan
1368 * powers. So we can accommodate user (all channel) or spectrum
1369 * management (single channel) power changes "between" temperature
1370 * feedback compensation procedures.
1371 * don't force fit this reference idx into gain table; it may be a
1372 * negative number. This will help avoid errors when we're at
1373 * the lower bounds (highest gains, for warmest temperatures)
1374 * of the table. */
1375
1376 /* don't exceed table bounds for "real" setting */
1377 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1378
1379 scan_power_info->power_table_idx = power_idx;
1380 scan_power_info->tpc.tx_gain =
1381 power_gain_table[band_idx][power_idx].tx_gain;
1382 scan_power_info->tpc.dsp_atten =
1383 power_gain_table[band_idx][power_idx].dsp_atten;
1384}
1385
1386/**
1387 * il3945_send_tx_power - fill in Tx Power command with gain settings
1388 *
1389 * Configures power settings for all rates for the current channel,
1390 * using values from channel info struct, and send to NIC
1391 */
1392static int
1393il3945_send_tx_power(struct il_priv *il)
1394{
1395 int rate_idx, i;
1396 const struct il_channel_info *ch_info = NULL;
1397 struct il3945_txpowertable_cmd txpower = {
1398 .channel = il->ctx.active.channel,
1399 };
1400 u16 chan;
1401
1402 if (WARN_ONCE
1403 (test_bit(S_SCAN_HW, &il->status),
1404 "TX Power requested while scanning!\n"))
1405 return -EAGAIN;
1406
1407 chan = le16_to_cpu(il->ctx.active.channel);
1408
1409 txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1410 ch_info = il_get_channel_info(il, il->band, chan);
1411 if (!ch_info) {
1412 IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
1413 il->band);
1414 return -EINVAL;
1415 }
1416
1417 if (!il_is_channel_valid(ch_info)) {
1418 D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
1419 return 0;
1420 }
1421
1422 /* fill cmd with power settings for all rates for current channel */
1423 /* Fill OFDM rate */
1424 for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
1425 rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
1426
1427 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1428 txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1429
1430 D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1431 le16_to_cpu(txpower.channel), txpower.band,
1432 txpower.power[i].tpc.tx_gain,
1433 txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1434 }
1435 /* Fill CCK rates */
1436 for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
1437 rate_idx++, i++) {
1438 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1439 txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1440
1441 D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1442 le16_to_cpu(txpower.channel), txpower.band,
1443 txpower.power[i].tpc.tx_gain,
1444 txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1445 }
1446
1447 return il_send_cmd_pdu(il, C_TX_PWR_TBL,
1448 sizeof(struct il3945_txpowertable_cmd),
1449 &txpower);
1450
1451}
1452
1453/**
1454 * il3945_hw_reg_set_new_power - Configures power tables at new levels
1455 * @ch_info: Channel to update. Uses power_info.requested_power.
1456 *
1457 * Replace requested_power and base_power_idx ch_info fields for
1458 * one channel.
1459 *
1460 * Called if user or spectrum management changes power preferences.
1461 * Takes into account h/w and modulation limitations (clip power).
1462 *
1463 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1464 *
1465 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1466 * properly fill out the scan powers, and actual h/w gain settings,
1467 * and send changes to NIC
1468 */
1469static int
1470il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
1471{
1472 struct il3945_channel_power_info *power_info;
1473 int power_changed = 0;
1474 int i;
1475 const s8 *clip_pwrs;
1476 int power;
1477
1478 /* Get this chnlgrp's rate-to-max/clip-powers table */
1479 clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1480
1481 /* Get this channel's rate-to-current-power settings table */
1482 power_info = ch_info->power_info;
1483
1484 /* update OFDM Txpower settings */
1485 for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
1486 int delta_idx;
1487
1488 /* limit new power to be no more than h/w capability */
1489 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1490 if (power == power_info->requested_power)
1491 continue;
1492
1493 /* find difference between old and new requested powers,
1494 * update base (non-temp-compensated) power idx */
1495 delta_idx = (power - power_info->requested_power) * 2;
1496 power_info->base_power_idx -= delta_idx;
1497
1498 /* save new requested power value */
1499 power_info->requested_power = power;
1500
1501 power_changed = 1;
1502 }
1503
1504 /* update CCK Txpower settings, based on OFDM 12M setting ...
1505 * ... all CCK power settings for a given channel are the *same*. */
1506 if (power_changed) {
1507 power =
1508 ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
1509 IL_CCK_FROM_OFDM_POWER_DIFF;
1510
1511 /* do all CCK rates' il3945_channel_power_info structures */
1512 for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
1513 power_info->requested_power = power;
1514 power_info->base_power_idx =
1515 ch_info->power_info[RATE_12M_IDX_TBL].
1516 base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
1517 ++power_info;
1518 }
1519 }
1520
1521 return 0;
1522}
1523
1524/**
1525 * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1526 *
1527 * NOTE: Returned power limit may be less (but not more) than requested,
1528 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1529 * (no consideration for h/w clipping limitations).
1530 */
1531static int
1532il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
1533{
1534 s8 max_power;
1535
1536#if 0
1537 /* if we're using TGd limits, use lower of TGd or EEPROM */
1538 if (ch_info->tgd_data.max_power != 0)
1539 max_power =
1540 min(ch_info->tgd_data.max_power,
1541 ch_info->eeprom.max_power_avg);
1542
1543 /* else just use EEPROM limits */
1544 else
1545#endif
1546 max_power = ch_info->eeprom.max_power_avg;
1547
1548 return min(max_power, ch_info->max_power_avg);
1549}
1550
1551/**
1552 * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
1553 *
1554 * Compensate txpower settings of *all* channels for temperature.
1555 * This only accounts for the difference between current temperature
1556 * and the factory calibration temperatures, and bases the new settings
1557 * on the channel's base_power_idx.
1558 *
1559 * If RxOn is "associated", this sends the new Txpower to NIC!
1560 */
1561static int
1562il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
1563{
1564 struct il_channel_info *ch_info = NULL;
1565 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1566 int delta_idx;
1567 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1568 u8 a_band;
1569 u8 rate_idx;
1570 u8 scan_tbl_idx;
1571 u8 i;
1572 int ref_temp;
1573 int temperature = il->temperature;
1574
1575 if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
1576 /* do not perform tx power calibration */
1577 return 0;
1578 }
1579 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1580 for (i = 0; i < il->channel_count; i++) {
1581 ch_info = &il->channel_info[i];
1582 a_band = il_is_channel_a_band(ch_info);
1583
1584 /* Get this chnlgrp's factory calibration temperature */
1585 ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
1586
1587 /* get power idx adjustment based on current and factory
1588 * temps */
1589 delta_idx =
1590 il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
1591
1592 /* set tx power value for all rates, OFDM and CCK */
1593 for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
1594 int power_idx =
1595 ch_info->power_info[rate_idx].base_power_idx;
1596
1597 /* temperature compensate */
1598 power_idx += delta_idx;
1599
1600 /* stay within table range */
1601 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1602 ch_info->power_info[rate_idx].power_table_idx =
1603 (u8) power_idx;
1604 ch_info->power_info[rate_idx].tpc =
1605 power_gain_table[a_band][power_idx];
1606 }
1607
1608 /* Get this chnlgrp's rate-to-max/clip-powers table */
1609 clip_pwrs =
1610 il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1611
1612 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1613 for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
1614 scan_tbl_idx++) {
1615 s32 actual_idx =
1616 (scan_tbl_idx ==
1617 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
1618 il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
1619 actual_idx, clip_pwrs,
1620 ch_info, a_band);
1621 }
1622 }
1623
1624 /* send Txpower command for current channel to ucode */
1625 return il->cfg->ops->lib->send_tx_power(il);
1626}
1627
1628int
1629il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
1630{
1631 struct il_channel_info *ch_info;
1632 s8 max_power;
1633 u8 a_band;
1634 u8 i;
1635
1636 if (il->tx_power_user_lmt == power) {
1637 D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
1638 power);
1639 return 0;
1640 }
1641
1642 D_POWER("Setting upper limit clamp to %ddBm.\n", power);
1643 il->tx_power_user_lmt = power;
1644
1645 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1646
1647 for (i = 0; i < il->channel_count; i++) {
1648 ch_info = &il->channel_info[i];
1649 a_band = il_is_channel_a_band(ch_info);
1650
1651 /* find minimum power of all user and regulatory constraints
1652 * (does not consider h/w clipping limitations) */
1653 max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
1654 max_power = min(power, max_power);
1655 if (max_power != ch_info->curr_txpow) {
1656 ch_info->curr_txpow = max_power;
1657
1658 /* this considers the h/w clipping limitations */
1659 il3945_hw_reg_set_new_power(il, ch_info);
1660 }
1661 }
1662
1663 /* update txpower settings for all channels,
1664 * send to NIC if associated. */
1665 il3945_is_temp_calib_needed(il);
1666 il3945_hw_reg_comp_txpower_temp(il);
1667
1668 return 0;
1669}
1670
1671static int
1672il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
1673{
1674 int rc = 0;
1675 struct il_rx_pkt *pkt;
1676 struct il3945_rxon_assoc_cmd rxon_assoc;
1677 struct il_host_cmd cmd = {
1678 .id = C_RXON_ASSOC,
1679 .len = sizeof(rxon_assoc),
1680 .flags = CMD_WANT_SKB,
1681 .data = &rxon_assoc,
1682 };
1683 const struct il_rxon_cmd *rxon1 = &ctx->staging;
1684 const struct il_rxon_cmd *rxon2 = &ctx->active;
1685
1686 if (rxon1->flags == rxon2->flags &&
1687 rxon1->filter_flags == rxon2->filter_flags &&
1688 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1689 rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1690 D_INFO("Using current RXON_ASSOC. Not resending.\n");
1691 return 0;
1692 }
1693
1694 rxon_assoc.flags = ctx->staging.flags;
1695 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1696 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1697 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1698 rxon_assoc.reserved = 0;
1699
1700 rc = il_send_cmd_sync(il, &cmd);
1701 if (rc)
1702 return rc;
1703
1704 pkt = (struct il_rx_pkt *)cmd.reply_page;
1705 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1706 IL_ERR("Bad return from C_RXON_ASSOC command\n");
1707 rc = -EIO;
1708 }
1709
1710 il_free_pages(il, cmd.reply_page);
1711
1712 return rc;
1713}
1714
1715/**
1716 * il3945_commit_rxon - commit staging_rxon to hardware
1717 *
1718 * The RXON command in staging_rxon is committed to the hardware and
1719 * the active_rxon structure is updated with the new data. This
1720 * function correctly transitions out of the RXON_ASSOC_MSK state if
1721 * a HW tune is required based on the RXON structure changes.
1722 */
1723int
1724il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
1725{
1726 /* cast away the const for active_rxon in this function */
1727 struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1728 struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1729 int rc = 0;
1730 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1731
1732 if (test_bit(S_EXIT_PENDING, &il->status))
1733 return -EINVAL;
1734
1735 if (!il_is_alive(il))
1736 return -1;
1737
1738 /* always get timestamp with Rx frame */
1739 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1740
1741 /* select antenna */
1742 staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1743 staging_rxon->flags |= il3945_get_antenna_flags(il);
1744
1745 rc = il_check_rxon_cmd(il, ctx);
1746 if (rc) {
1747 IL_ERR("Invalid RXON configuration. Not committing.\n");
1748 return -EINVAL;
1749 }
1750
1751 /* If we don't need to send a full RXON, we can use
1752 * il3945_rxon_assoc_cmd which is used to reconfigure filter
1753 * and other flags for the current radio configuration. */
1754 if (!il_full_rxon_required(il, &il->ctx)) {
1755 rc = il_send_rxon_assoc(il, &il->ctx);
1756 if (rc) {
1757 IL_ERR("Error setting RXON_ASSOC "
1758 "configuration (%d).\n", rc);
1759 return rc;
1760 }
1761
1762 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1763 /*
1764 * We do not commit tx power settings while channel changing,
1765 * do it now if tx power changed.
1766 */
1767 il_set_tx_power(il, il->tx_power_next, false);
1768 return 0;
1769 }
1770
1771 /* If we are currently associated and the new config requires
1772 * an RXON_ASSOC and the new config wants the associated mask enabled,
1773 * we must clear the associated from the active configuration
1774 * before we apply the new config */
1775 if (il_is_associated(il) && new_assoc) {
1776 D_INFO("Toggling associated bit on current RXON\n");
1777 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1778
1779 /*
1780 * reserved4 and 5 could have been filled by the iwlcore code.
1781 * Let's clear them before pushing to the 3945.
1782 */
1783 active_rxon->reserved4 = 0;
1784 active_rxon->reserved5 = 0;
1785 rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1786 &il->ctx.active);
1787
1788 /* If the mask clearing failed then we set
1789 * active_rxon back to what it was previously */
1790 if (rc) {
1791 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1792 IL_ERR("Error clearing ASSOC_MSK on current "
1793 "configuration (%d).\n", rc);
1794 return rc;
1795 }
1796 il_clear_ucode_stations(il, &il->ctx);
1797 il_restore_stations(il, &il->ctx);
1798 }
1799
1800 D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1801 "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1802 le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
1803
1804 /*
1805 * reserved4 and 5 could have been filled by the iwlcore code.
1806 * Let's clear them before pushing to the 3945.
1807 */
1808 staging_rxon->reserved4 = 0;
1809 staging_rxon->reserved5 = 0;
1810
1811 il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
1812
1813 /* Apply the new configuration */
1814 rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1815 staging_rxon);
1816 if (rc) {
1817 IL_ERR("Error setting new configuration (%d).\n", rc);
1818 return rc;
1819 }
1820
1821 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1822
1823 if (!new_assoc) {
1824 il_clear_ucode_stations(il, &il->ctx);
1825 il_restore_stations(il, &il->ctx);
1826 }
1827
1828 /* If we issue a new RXON command which required a tune then we must
1829 * send a new TXPOWER command or we won't be able to Tx any frames */
1830 rc = il_set_tx_power(il, il->tx_power_next, true);
1831 if (rc) {
1832 IL_ERR("Error setting Tx power (%d).\n", rc);
1833 return rc;
1834 }
1835
1836 /* Init the hardware's rate fallback order based on the band */
1837 rc = il3945_init_hw_rate_table(il);
1838 if (rc) {
1839 IL_ERR("Error setting HW rate table: %02X\n", rc);
1840 return -EIO;
1841 }
1842
1843 return 0;
1844}
1845
1846/**
1847 * il3945_reg_txpower_periodic - called when time to check our temperature.
1848 *
1849 * -- reset periodic timer
1850 * -- see if temp has changed enough to warrant re-calibration ... if so:
1851 * -- correct coeffs for temp (can reset temp timer)
1852 * -- save this temp as "last",
1853 * -- send new set of gain settings to NIC
1854 * NOTE: This should continue working, even when we're not associated,
1855 * so we can keep our internal table of scan powers current. */
1856void
1857il3945_reg_txpower_periodic(struct il_priv *il)
1858{
1859 /* This will kick in the "brute force"
1860 * il3945_hw_reg_comp_txpower_temp() below */
1861 if (!il3945_is_temp_calib_needed(il))
1862 goto reschedule;
1863
1864 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1865 * This is based *only* on current temperature,
1866 * ignoring any previous power measurements */
1867 il3945_hw_reg_comp_txpower_temp(il);
1868
1869reschedule:
1870 queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
1871 REG_RECALIB_PERIOD * HZ);
1872}
1873
1874static void
1875il3945_bg_reg_txpower_periodic(struct work_struct *work)
1876{
1877 struct il_priv *il = container_of(work, struct il_priv,
1878 _3945.thermal_periodic.work);
1879
1880 if (test_bit(S_EXIT_PENDING, &il->status))
1881 return;
1882
1883 mutex_lock(&il->mutex);
1884 il3945_reg_txpower_periodic(il);
1885 mutex_unlock(&il->mutex);
1886}
1887
1888/**
1889 * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
1890 *
1891 * This function is used when initializing channel-info structs.
1892 *
1893 * NOTE: These channel groups do *NOT* match the bands above!
1894 * These channel groups are based on factory-tested channels;
1895 * on A-band, EEPROM's "group frequency" entries represent the top
1896 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1897 */
1898static u16
1899il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
1900 const struct il_channel_info *ch_info)
1901{
1902 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1903 struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1904 u8 group;
1905 u16 group_idx = 0; /* based on factory calib frequencies */
1906 u8 grp_channel;
1907
1908 /* Find the group idx for the channel ... don't use idx 1(?) */
1909 if (il_is_channel_a_band(ch_info)) {
1910 for (group = 1; group < 5; group++) {
1911 grp_channel = ch_grp[group].group_channel;
1912 if (ch_info->channel <= grp_channel) {
1913 group_idx = group;
1914 break;
1915 }
1916 }
1917 /* group 4 has a few channels *above* its factory cal freq */
1918 if (group == 5)
1919 group_idx = 4;
1920 } else
1921 group_idx = 0; /* 2.4 GHz, group 0 */
1922
1923 D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
1924 return group_idx;
1925}
1926
1927/**
1928 * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
1929 *
1930 * Interpolate to get nominal (i.e. at factory calibration temperature) idx
1931 * into radio/DSP gain settings table for requested power.
1932 */
1933static int
1934il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
1935 s32 setting_idx, s32 *new_idx)
1936{
1937 const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
1938 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1939 s32 idx0, idx1;
1940 s32 power = 2 * requested_power;
1941 s32 i;
1942 const struct il3945_eeprom_txpower_sample *samples;
1943 s32 gains0, gains1;
1944 s32 res;
1945 s32 denominator;
1946
1947 chnl_grp = &eeprom->groups[setting_idx];
1948 samples = chnl_grp->samples;
1949 for (i = 0; i < 5; i++) {
1950 if (power == samples[i].power) {
1951 *new_idx = samples[i].gain_idx;
1952 return 0;
1953 }
1954 }
1955
1956 if (power > samples[1].power) {
1957 idx0 = 0;
1958 idx1 = 1;
1959 } else if (power > samples[2].power) {
1960 idx0 = 1;
1961 idx1 = 2;
1962 } else if (power > samples[3].power) {
1963 idx0 = 2;
1964 idx1 = 3;
1965 } else {
1966 idx0 = 3;
1967 idx1 = 4;
1968 }
1969
1970 denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
1971 if (denominator == 0)
1972 return -EINVAL;
1973 gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
1974 gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
1975 res =
1976 gains0 + (gains1 - gains0) * ((s32) power -
1977 (s32) samples[idx0].power) /
1978 denominator + (1 << 18);
1979 *new_idx = res >> 19;
1980 return 0;
1981}
1982
1983static void
1984il3945_hw_reg_init_channel_groups(struct il_priv *il)
1985{
1986 u32 i;
1987 s32 rate_idx;
1988 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1989 const struct il3945_eeprom_txpower_group *group;
1990
1991 D_POWER("Initializing factory calib info from EEPROM\n");
1992
1993 for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
1994 s8 *clip_pwrs; /* table of power levels for each rate */
1995 s8 satur_pwr; /* saturation power for each chnl group */
1996 group = &eeprom->groups[i];
1997
1998 /* sanity check on factory saturation power value */
1999 if (group->saturation_power < 40) {
2000 IL_WARN("Error: saturation power is %d, "
2001 "less than minimum expected 40\n",
2002 group->saturation_power);
2003 return;
2004 }
2005
2006 /*
2007 * Derive requested power levels for each rate, based on
2008 * hardware capabilities (saturation power for band).
2009 * Basic value is 3dB down from saturation, with further
2010 * power reductions for highest 3 data rates. These
2011 * backoffs provide headroom for high rate modulation
2012 * power peaks, without too much distortion (clipping).
2013 */
2014 /* we'll fill in this array with h/w max power levels */
2015 clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
2016
2017 /* divide factory saturation power by 2 to find -3dB level */
2018 satur_pwr = (s8) (group->saturation_power >> 1);
2019
2020 /* fill in channel group's nominal powers for each rate */
2021 for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
2022 rate_idx++, clip_pwrs++) {
2023 switch (rate_idx) {
2024 case RATE_36M_IDX_TBL:
2025 if (i == 0) /* B/G */
2026 *clip_pwrs = satur_pwr;
2027 else /* A */
2028 *clip_pwrs = satur_pwr - 5;
2029 break;
2030 case RATE_48M_IDX_TBL:
2031 if (i == 0)
2032 *clip_pwrs = satur_pwr - 7;
2033 else
2034 *clip_pwrs = satur_pwr - 10;
2035 break;
2036 case RATE_54M_IDX_TBL:
2037 if (i == 0)
2038 *clip_pwrs = satur_pwr - 9;
2039 else
2040 *clip_pwrs = satur_pwr - 12;
2041 break;
2042 default:
2043 *clip_pwrs = satur_pwr;
2044 break;
2045 }
2046 }
2047 }
2048}
2049
2050/**
2051 * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2052 *
2053 * Second pass (during init) to set up il->channel_info
2054 *
2055 * Set up Tx-power settings in our channel info database for each VALID
2056 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2057 * and current temperature.
2058 *
2059 * Since this is based on current temperature (at init time), these values may
2060 * not be valid for very long, but it gives us a starting/default point,
2061 * and allows us to active (i.e. using Tx) scan.
2062 *
2063 * This does *not* write values to NIC, just sets up our internal table.
2064 */
2065int
2066il3945_txpower_set_from_eeprom(struct il_priv *il)
2067{
2068 struct il_channel_info *ch_info = NULL;
2069 struct il3945_channel_power_info *pwr_info;
2070 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
2071 int delta_idx;
2072 u8 rate_idx;
2073 u8 scan_tbl_idx;
2074 const s8 *clip_pwrs; /* array of power levels for each rate */
2075 u8 gain, dsp_atten;
2076 s8 power;
2077 u8 pwr_idx, base_pwr_idx, a_band;
2078 u8 i;
2079 int temperature;
2080
2081 /* save temperature reference,
2082 * so we can determine next time to calibrate */
2083 temperature = il3945_hw_reg_txpower_get_temperature(il);
2084 il->last_temperature = temperature;
2085
2086 il3945_hw_reg_init_channel_groups(il);
2087
2088 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2089 for (i = 0, ch_info = il->channel_info; i < il->channel_count;
2090 i++, ch_info++) {
2091 a_band = il_is_channel_a_band(ch_info);
2092 if (!il_is_channel_valid(ch_info))
2093 continue;
2094
2095 /* find this channel's channel group (*not* "band") idx */
2096 ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
2097
2098 /* Get this chnlgrp's rate->max/clip-powers table */
2099 clip_pwrs =
2100 il->_3945.clip_groups[ch_info->group_idx].clip_powers;
2101
2102 /* calculate power idx *adjustment* value according to
2103 * diff between current temperature and factory temperature */
2104 delta_idx =
2105 il3945_hw_reg_adjust_power_by_temp(temperature,
2106 eeprom->groups[ch_info->
2107 group_idx].
2108 temperature);
2109
2110 D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
2111 delta_idx, temperature + IL_TEMP_CONVERT);
2112
2113 /* set tx power value for all OFDM rates */
2114 for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
2115 s32 uninitialized_var(power_idx);
2116 int rc;
2117
2118 /* use channel group's clip-power table,
2119 * but don't exceed channel's max power */
2120 s8 pwr = min(ch_info->max_power_avg,
2121 clip_pwrs[rate_idx]);
2122
2123 pwr_info = &ch_info->power_info[rate_idx];
2124
2125 /* get base (i.e. at factory-measured temperature)
2126 * power table idx for this rate's power */
2127 rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
2128 ch_info->
2129 group_idx,
2130 &power_idx);
2131 if (rc) {
2132 IL_ERR("Invalid power idx\n");
2133 return rc;
2134 }
2135 pwr_info->base_power_idx = (u8) power_idx;
2136
2137 /* temperature compensate */
2138 power_idx += delta_idx;
2139
2140 /* stay within range of gain table */
2141 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
2142
2143 /* fill 1 OFDM rate's il3945_channel_power_info struct */
2144 pwr_info->requested_power = pwr;
2145 pwr_info->power_table_idx = (u8) power_idx;
2146 pwr_info->tpc.tx_gain =
2147 power_gain_table[a_band][power_idx].tx_gain;
2148 pwr_info->tpc.dsp_atten =
2149 power_gain_table[a_band][power_idx].dsp_atten;
2150 }
2151
2152 /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
2153 pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
2154 power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
2155 pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2156 base_pwr_idx =
2157 pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2158
2159 /* stay within table range */
2160 pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
2161 gain = power_gain_table[a_band][pwr_idx].tx_gain;
2162 dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
2163
2164 /* fill each CCK rate's il3945_channel_power_info structure
2165 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2166 * NOTE: CCK rates start at end of OFDM rates! */
2167 for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
2168 pwr_info =
2169 &ch_info->power_info[rate_idx + IL_OFDM_RATES];
2170 pwr_info->requested_power = power;
2171 pwr_info->power_table_idx = pwr_idx;
2172 pwr_info->base_power_idx = base_pwr_idx;
2173 pwr_info->tpc.tx_gain = gain;
2174 pwr_info->tpc.dsp_atten = dsp_atten;
2175 }
2176
2177 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2178 for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
2179 scan_tbl_idx++) {
2180 s32 actual_idx =
2181 (scan_tbl_idx ==
2182 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
2183 il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
2184 actual_idx, clip_pwrs,
2185 ch_info, a_band);
2186 }
2187 }
2188
2189 return 0;
2190}
2191
2192int
2193il3945_hw_rxq_stop(struct il_priv *il)
2194{
2195 int rc;
2196
2197 il_wr(il, FH39_RCSR_CONFIG(0), 0);
2198 rc = il_poll_bit(il, FH39_RSSR_STATUS,
2199 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2200 if (rc < 0)
2201 IL_ERR("Can't stop Rx DMA.\n");
2202
2203 return 0;
2204}
2205
2206int
2207il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
2208{
2209 int txq_id = txq->q.id;
2210
2211 struct il3945_shared *shared_data = il->_3945.shared_virt;
2212
2213 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
2214
2215 il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
2216 il_wr(il, FH39_CBCC_BASE(txq_id), 0);
2217
2218 il_wr(il, FH39_TCSR_CONFIG(txq_id),
2219 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2220 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2221 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2222 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2223 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2224
2225 /* fake read to flush all prev. writes */
2226 _il_rd(il, FH39_TSSR_CBB_BASE);
2227
2228 return 0;
2229}
2230
2231/*
2232 * HCMD utils
2233 */
2234static u16
2235il3945_get_hcmd_size(u8 cmd_id, u16 len)
2236{
2237 switch (cmd_id) {
2238 case C_RXON:
2239 return sizeof(struct il3945_rxon_cmd);
2240 case C_POWER_TBL:
2241 return sizeof(struct il3945_powertable_cmd);
2242 default:
2243 return len;
2244 }
2245}
2246
2247static u16
2248il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
2249{
2250 struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
2251 addsta->mode = cmd->mode;
2252 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2253 memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
2254 addsta->station_flags = cmd->station_flags;
2255 addsta->station_flags_msk = cmd->station_flags_msk;
2256 addsta->tid_disable_tx = cpu_to_le16(0);
2257 addsta->rate_n_flags = cmd->rate_n_flags;
2258 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2259 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2260 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2261
2262 return (u16) sizeof(struct il3945_addsta_cmd);
2263}
2264
2265static int
2266il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
2267{
2268 struct il_rxon_context *ctx = &il->ctx;
2269 int ret;
2270 u8 sta_id;
2271 unsigned long flags;
2272
2273 if (sta_id_r)
2274 *sta_id_r = IL_INVALID_STATION;
2275
2276 ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
2277 if (ret) {
2278 IL_ERR("Unable to add station %pM\n", addr);
2279 return ret;
2280 }
2281
2282 if (sta_id_r)
2283 *sta_id_r = sta_id;
2284
2285 spin_lock_irqsave(&il->sta_lock, flags);
2286 il->stations[sta_id].used |= IL_STA_LOCAL;
2287 spin_unlock_irqrestore(&il->sta_lock, flags);
2288
2289 return 0;
2290}
2291
2292static int
2293il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
2294 bool add)
2295{
2296 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
2297 int ret;
2298
2299 if (add) {
2300 ret =
2301 il3945_add_bssid_station(il, vif->bss_conf.bssid,
2302 &vif_priv->ibss_bssid_sta_id);
2303 if (ret)
2304 return ret;
2305
2306 il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
2307 (il->band ==
2308 IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
2309 RATE_1M_PLCP);
2310 il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
2311
2312 return 0;
2313 }
2314
2315 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
2316 vif->bss_conf.bssid);
2317}
2318
2319/**
2320 * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
2321 */
2322int
2323il3945_init_hw_rate_table(struct il_priv *il)
2324{
2325 int rc, i, idx, prev_idx;
2326 struct il3945_rate_scaling_cmd rate_cmd = {
2327 .reserved = {0, 0, 0},
2328 };
2329 struct il3945_rate_scaling_info *table = rate_cmd.table;
2330
2331 for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
2332 idx = il3945_rates[i].table_rs_idx;
2333
2334 table[idx].rate_n_flags =
2335 il3945_hw_set_rate_n_flags(il3945_rates[i].plcp, 0);
2336 table[idx].try_cnt = il->retry_rate;
2337 prev_idx = il3945_get_prev_ieee_rate(i);
2338 table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
2339 }
2340
2341 switch (il->band) {
2342 case IEEE80211_BAND_5GHZ:
2343 D_RATE("Select A mode rate scale\n");
2344 /* If one of the following CCK rates is used,
2345 * have it fall back to the 6M OFDM rate */
2346 for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
2347 table[i].next_rate_idx =
2348 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2349
2350 /* Don't fall back to CCK rates */
2351 table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
2352
2353 /* Don't drop out of OFDM rates */
2354 table[RATE_6M_IDX_TBL].next_rate_idx =
2355 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2356 break;
2357
2358 case IEEE80211_BAND_2GHZ:
2359 D_RATE("Select B/G mode rate scale\n");
2360 /* If an OFDM rate is used, have it fall back to the
2361 * 1M CCK rates */
2362
2363 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
2364 il_is_associated(il)) {
2365
2366 idx = IL_FIRST_CCK_RATE;
2367 for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
2368 table[i].next_rate_idx =
2369 il3945_rates[idx].table_rs_idx;
2370
2371 idx = RATE_11M_IDX_TBL;
2372 /* CCK shouldn't fall back to OFDM... */
2373 table[idx].next_rate_idx = RATE_5M_IDX_TBL;
2374 }
2375 break;
2376
2377 default:
2378 WARN_ON(1);
2379 break;
2380 }
2381
2382 /* Update the rate scaling for control frame Tx */
2383 rate_cmd.table_id = 0;
2384 rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2385 if (rc)
2386 return rc;
2387
2388 /* Update the rate scaling for data frame Tx */
2389 rate_cmd.table_id = 1;
2390 return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2391}
2392
2393/* Called when initializing driver */
2394int
2395il3945_hw_set_hw_params(struct il_priv *il)
2396{
2397 memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
2398
2399 il->_3945.shared_virt =
2400 dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
2401 &il->_3945.shared_phys, GFP_KERNEL);
2402 if (!il->_3945.shared_virt) {
2403 IL_ERR("failed to allocate pci memory\n");
2404 return -ENOMEM;
2405 }
2406
2407 /* Assign number of Usable TX queues */
2408 il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
2409
2410 il->hw_params.tfd_size = sizeof(struct il3945_tfd);
2411 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
2412 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2413 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2414 il->hw_params.max_stations = IL3945_STATION_COUNT;
2415 il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
2416
2417 il->sta_key_max_num = STA_KEY_MAX_NUM;
2418
2419 il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2420 il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
2421 il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
2422
2423 return 0;
2424}
2425
2426unsigned int
2427il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
2428 u8 rate)
2429{
2430 struct il3945_tx_beacon_cmd *tx_beacon_cmd;
2431 unsigned int frame_size;
2432
2433 tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
2434 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2435
2436 tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
2437 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2438
2439 frame_size =
2440 il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
2441 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2442
2443 BUG_ON(frame_size > MAX_MPDU_SIZE);
2444 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
2445
2446 tx_beacon_cmd->tx.rate = rate;
2447 tx_beacon_cmd->tx.tx_flags =
2448 (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
2449
2450 /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
2451 tx_beacon_cmd->tx.supp_rates[0] =
2452 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
2453
2454 tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
2455
2456 return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
2457}
2458
2459void
2460il3945_hw_handler_setup(struct il_priv *il)
2461{
2462 il->handlers[C_TX] = il3945_hdl_tx;
2463 il->handlers[N_3945_RX] = il3945_hdl_rx;
2464}
2465
2466void
2467il3945_hw_setup_deferred_work(struct il_priv *il)
2468{
2469 INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
2470 il3945_bg_reg_txpower_periodic);
2471}
2472
2473void
2474il3945_hw_cancel_deferred_work(struct il_priv *il)
2475{
2476 cancel_delayed_work(&il->_3945.thermal_periodic);
2477}
2478
2479/* check contents of special bootstrap uCode SRAM */
2480static int
2481il3945_verify_bsm(struct il_priv *il)
2482{
2483 __le32 *image = il->ucode_boot.v_addr;
2484 u32 len = il->ucode_boot.len;
2485 u32 reg;
2486 u32 val;
2487
2488 D_INFO("Begin verify bsm\n");
2489
2490 /* verify BSM SRAM contents */
2491 val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
2492 for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
2493 reg += sizeof(u32), image++) {
2494 val = il_rd_prph(il, reg);
2495 if (val != le32_to_cpu(*image)) {
2496 IL_ERR("BSM uCode verification failed at "
2497 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2498 BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
2499 len, val, le32_to_cpu(*image));
2500 return -EIO;
2501 }
2502 }
2503
2504 D_INFO("BSM bootstrap uCode image OK\n");
2505
2506 return 0;
2507}
2508
2509/******************************************************************************
2510 *
2511 * EEPROM related functions
2512 *
2513 ******************************************************************************/
2514
2515/*
2516 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2517 * embedded controller) as EEPROM reader; each read is a series of pulses
2518 * to/from the EEPROM chip, not a single event, so even reads could conflict
2519 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2520 * simply claims ownership, which should be safe when this function is called
2521 * (i.e. before loading uCode!).
2522 */
2523static int
2524il3945_eeprom_acquire_semaphore(struct il_priv *il)
2525{
2526 _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2527 return 0;
2528}
2529
2530static void
2531il3945_eeprom_release_semaphore(struct il_priv *il)
2532{
2533 return;
2534}
2535
2536 /**
2537 * il3945_load_bsm - Load bootstrap instructions
2538 *
2539 * BSM operation:
2540 *
2541 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2542 * in special SRAM that does not power down during RFKILL. When powering back
2543 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2544 * the bootstrap program into the on-board processor, and starts it.
2545 *
2546 * The bootstrap program loads (via DMA) instructions and data for a new
2547 * program from host DRAM locations indicated by the host driver in the
2548 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2549 * automatically.
2550 *
2551 * When initializing the NIC, the host driver points the BSM to the
2552 * "initialize" uCode image. This uCode sets up some internal data, then
2553 * notifies host via "initialize alive" that it is complete.
2554 *
2555 * The host then replaces the BSM_DRAM_* pointer values to point to the
2556 * normal runtime uCode instructions and a backup uCode data cache buffer
2557 * (filled initially with starting data values for the on-board processor),
2558 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2559 * which begins normal operation.
2560 *
2561 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2562 * the backup data cache in DRAM before SRAM is powered down.
2563 *
2564 * When powering back up, the BSM loads the bootstrap program. This reloads
2565 * the runtime uCode instructions and the backup data cache into SRAM,
2566 * and re-launches the runtime uCode from where it left off.
2567 */
2568static int
2569il3945_load_bsm(struct il_priv *il)
2570{
2571 __le32 *image = il->ucode_boot.v_addr;
2572 u32 len = il->ucode_boot.len;
2573 dma_addr_t pinst;
2574 dma_addr_t pdata;
2575 u32 inst_len;
2576 u32 data_len;
2577 int rc;
2578 int i;
2579 u32 done;
2580 u32 reg_offset;
2581
2582 D_INFO("Begin load bsm\n");
2583
2584 /* make sure bootstrap program is no larger than BSM's SRAM size */
2585 if (len > IL39_MAX_BSM_SIZE)
2586 return -EINVAL;
2587
2588 /* Tell bootstrap uCode where to find the "Initialize" uCode
2589 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2590 * NOTE: il3945_initialize_alive_start() will replace these values,
2591 * after the "initialize" uCode has run, to point to
2592 * runtime/protocol instructions and backup data cache. */
2593 pinst = il->ucode_init.p_addr;
2594 pdata = il->ucode_init_data.p_addr;
2595 inst_len = il->ucode_init.len;
2596 data_len = il->ucode_init_data.len;
2597
2598 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2599 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2600 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2601 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2602
2603 /* Fill BSM memory with bootstrap instructions */
2604 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2605 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2606 reg_offset += sizeof(u32), image++)
2607 _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
2608
2609 rc = il3945_verify_bsm(il);
2610 if (rc)
2611 return rc;
2612
2613 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2614 il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
2615 il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
2616 il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2617
2618 /* Load bootstrap code into instruction SRAM now,
2619 * to prepare to load "initialize" uCode */
2620 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2621
2622 /* Wait for load of bootstrap uCode to finish */
2623 for (i = 0; i < 100; i++) {
2624 done = il_rd_prph(il, BSM_WR_CTRL_REG);
2625 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2626 break;
2627 udelay(10);
2628 }
2629 if (i < 100)
2630 D_INFO("BSM write complete, poll %d iterations\n", i);
2631 else {
2632 IL_ERR("BSM write did not complete!\n");
2633 return -EIO;
2634 }
2635
2636 /* Enable future boot loads whenever power management unit triggers it
2637 * (e.g. when powering back up after power-save shutdown) */
2638 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2639
2640 return 0;
2641}
2642
2643static struct il_hcmd_ops il3945_hcmd = {
2644 .rxon_assoc = il3945_send_rxon_assoc,
2645 .commit_rxon = il3945_commit_rxon,
2646};
2647
2648static struct il_lib_ops il3945_lib = {
2649 .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
2650 .txq_free_tfd = il3945_hw_txq_free_tfd,
2651 .txq_init = il3945_hw_tx_queue_init,
2652 .load_ucode = il3945_load_bsm,
2653 .dump_nic_error_log = il3945_dump_nic_error_log,
2654 .apm_ops = {
2655 .init = il3945_apm_init,
2656 .config = il3945_nic_config,
2657 },
2658 .eeprom_ops = {
2659 .regulatory_bands = {
2660 EEPROM_REGULATORY_BAND_1_CHANNELS,
2661 EEPROM_REGULATORY_BAND_2_CHANNELS,
2662 EEPROM_REGULATORY_BAND_3_CHANNELS,
2663 EEPROM_REGULATORY_BAND_4_CHANNELS,
2664 EEPROM_REGULATORY_BAND_5_CHANNELS,
2665 EEPROM_REGULATORY_BAND_NO_HT40,
2666 EEPROM_REGULATORY_BAND_NO_HT40,
2667 },
2668 .acquire_semaphore = il3945_eeprom_acquire_semaphore,
2669 .release_semaphore = il3945_eeprom_release_semaphore,
2670 },
2671 .send_tx_power = il3945_send_tx_power,
2672 .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
2673
2674#ifdef CONFIG_IWLEGACY_DEBUGFS
2675 .debugfs_ops = {
2676 .rx_stats_read = il3945_ucode_rx_stats_read,
2677 .tx_stats_read = il3945_ucode_tx_stats_read,
2678 .general_stats_read = il3945_ucode_general_stats_read,
2679 },
2680#endif
2681};
2682
2683static const struct il_legacy_ops il3945_legacy_ops = {
2684 .post_associate = il3945_post_associate,
2685 .config_ap = il3945_config_ap,
2686 .manage_ibss_station = il3945_manage_ibss_station,
2687};
2688
2689static struct il_hcmd_utils_ops il3945_hcmd_utils = {
2690 .get_hcmd_size = il3945_get_hcmd_size,
2691 .build_addsta_hcmd = il3945_build_addsta_hcmd,
2692 .request_scan = il3945_request_scan,
2693 .post_scan = il3945_post_scan,
2694};
2695
2696static const struct il_ops il3945_ops = {
2697 .lib = &il3945_lib,
2698 .hcmd = &il3945_hcmd,
2699 .utils = &il3945_hcmd_utils,
2700 .led = &il3945_led_ops,
2701 .legacy = &il3945_legacy_ops,
2702 .ieee80211_ops = &il3945_hw_ops,
2703};
2704
2705static struct il_base_params il3945_base_params = {
2706 .eeprom_size = IL3945_EEPROM_IMG_SIZE,
2707 .num_of_queues = IL39_NUM_QUEUES,
2708 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2709 .set_l0s = false,
2710 .use_bsm = true,
2711 .led_compensation = 64,
2712 .wd_timeout = IL_DEF_WD_TIMEOUT,
2713};
2714
2715static struct il_cfg il3945_bg_cfg = {
2716 .name = "3945BG",
2717 .fw_name_pre = IL3945_FW_PRE,
2718 .ucode_api_max = IL3945_UCODE_API_MAX,
2719 .ucode_api_min = IL3945_UCODE_API_MIN,
2720 .sku = IL_SKU_G,
2721 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2722 .ops = &il3945_ops,
2723 .mod_params = &il3945_mod_params,
2724 .base_params = &il3945_base_params,
2725 .led_mode = IL_LED_BLINK,
2726};
2727
2728static struct il_cfg il3945_abg_cfg = {
2729 .name = "3945ABG",
2730 .fw_name_pre = IL3945_FW_PRE,
2731 .ucode_api_max = IL3945_UCODE_API_MAX,
2732 .ucode_api_min = IL3945_UCODE_API_MIN,
2733 .sku = IL_SKU_A | IL_SKU_G,
2734 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2735 .ops = &il3945_ops,
2736 .mod_params = &il3945_mod_params,
2737 .base_params = &il3945_base_params,
2738 .led_mode = IL_LED_BLINK,
2739};
2740
2741DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
2742 {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
2743 {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
2744 {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
2745 {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
2746 {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
2747 {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
2748 {0}
2749};
2750
2751MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644
index 000000000000..2b2895c544d7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -0,0 +1,626 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __il_3945_h__
28#define __il_3945_h__
29
30#include <linux/pci.h> /* for struct pci_device_id */
31#include <linux/kernel.h>
32#include <net/ieee80211_radiotap.h>
33
34/* Hardware specific file defines the PCI IDs table for that hardware module */
35extern const struct pci_device_id il3945_hw_card_ids[];
36
37#include "common.h"
38
39/* Highest firmware API version supported */
40#define IL3945_UCODE_API_MAX 2
41
42/* Lowest firmware API version supported */
43#define IL3945_UCODE_API_MIN 1
44
45#define IL3945_FW_PRE "iwlwifi-3945-"
46#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
47#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
48
49/* Default noise level to report when noise measurement is not available.
50 * This may be because we're:
51 * 1) Not associated (4965, no beacon stats being sent to driver)
52 * 2) Scanning (noise measurement does not apply to associated channel)
53 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
54 * Use default noise value of -127 ... this is below the range of measurable
55 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
56 * Also, -127 works better than 0 when averaging frames with/without
57 * noise info (e.g. averaging might be done in app); measured dBm values are
58 * always negative ... using a negative value as the default keeps all
59 * averages within an s8's (used in some apps) range of negative values. */
60#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
61
62/* Module parameters accessible from iwl-*.c */
63extern struct il_mod_params il3945_mod_params;
64
65struct il3945_rate_scale_data {
66 u64 data;
67 s32 success_counter;
68 s32 success_ratio;
69 s32 counter;
70 s32 average_tpt;
71 unsigned long stamp;
72};
73
74struct il3945_rs_sta {
75 spinlock_t lock;
76 struct il_priv *il;
77 s32 *expected_tpt;
78 unsigned long last_partial_flush;
79 unsigned long last_flush;
80 u32 flush_time;
81 u32 last_tx_packets;
82 u32 tx_packets;
83 u8 tgg;
84 u8 flush_pending;
85 u8 start_rate;
86 struct timer_list rate_scale_flush;
87 struct il3945_rate_scale_data win[RATE_COUNT_3945];
88#ifdef CONFIG_MAC80211_DEBUGFS
89 struct dentry *rs_sta_dbgfs_stats_table_file;
90#endif
91
92 /* used to be in sta_info */
93 int last_txrate_idx;
94};
95
96/*
97 * The common struct MUST be first because it is shared between
98 * 3945 and 4965!
99 */
100struct il3945_sta_priv {
101 struct il_station_priv_common common;
102 struct il3945_rs_sta rs_sta;
103};
104
105enum il3945_antenna {
106 IL_ANTENNA_DIVERSITY,
107 IL_ANTENNA_MAIN,
108 IL_ANTENNA_AUX
109};
110
111/*
112 * RTS threshold here is total size [2347] minus 4 FCS bytes
113 * Per spec:
114 * a value of 0 means RTS on all data/management packets
115 * a value > max MSDU size means no RTS
116 * else RTS for data/management frames where MPDU is larger
117 * than RTS value.
118 */
119#define DEFAULT_RTS_THRESHOLD 2347U
120#define MIN_RTS_THRESHOLD 0U
121#define MAX_RTS_THRESHOLD 2347U
122#define MAX_MSDU_SIZE 2304U
123#define MAX_MPDU_SIZE 2346U
124#define DEFAULT_BEACON_INTERVAL 100U
125#define DEFAULT_SHORT_RETRY_LIMIT 7U
126#define DEFAULT_LONG_RETRY_LIMIT 4U
127
128#define IL_TX_FIFO_AC0 0
129#define IL_TX_FIFO_AC1 1
130#define IL_TX_FIFO_AC2 2
131#define IL_TX_FIFO_AC3 3
132#define IL_TX_FIFO_HCCA_1 5
133#define IL_TX_FIFO_HCCA_2 6
134#define IL_TX_FIFO_NONE 7
135
136#define IEEE80211_DATA_LEN 2304
137#define IEEE80211_4ADDR_LEN 30
138#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
139#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
140
141struct il3945_frame {
142 union {
143 struct ieee80211_hdr frame;
144 struct il3945_tx_beacon_cmd beacon;
145 u8 raw[IEEE80211_FRAME_LEN];
146 u8 cmd[360];
147 } u;
148 struct list_head list;
149};
150
151#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
152#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
153#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
154
155#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
156#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
157#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
158
159#define IL_SUPPORTED_RATES_IE_LEN 8
160
161#define SCAN_INTERVAL 100
162
163#define MAX_TID_COUNT 9
164
165#define IL_INVALID_RATE 0xFF
166#define IL_INVALID_VALUE -1
167
168#define STA_PS_STATUS_WAKE 0
169#define STA_PS_STATUS_SLEEP 1
170
171struct il3945_ibss_seq {
172 u8 mac[ETH_ALEN];
173 u16 seq_num;
174 u16 frag_num;
175 unsigned long packet_time;
176 struct list_head list;
177};
178
179#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
180 x->u.rx_frame.stats.payload + \
181 x->u.rx_frame.stats.phy_count))
182#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
183 IL_RX_HDR(x)->payload + \
184 le16_to_cpu(IL_RX_HDR(x)->len)))
185#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
186#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
187
188/******************************************************************************
189 *
190 * Functions implemented in iwl3945-base.c which are forward declared here
191 * for use by iwl-*.c
192 *
193 *****************************************************************************/
194extern int il3945_calc_db_from_ratio(int sig_ratio);
195extern void il3945_rx_replenish(void *data);
196extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
197extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
198 struct ieee80211_hdr *hdr,
199 int left);
200extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
201 char **buf, bool display);
202extern void il3945_dump_nic_error_log(struct il_priv *il);
203
204/******************************************************************************
205 *
206 * Functions implemented in iwl-[34]*.c which are forward declared here
207 * for use by iwl3945-base.c
208 *
209 * NOTE: The implementation of these functions are hardware specific
210 * which is why they are in the hardware specific files (vs. iwl-base.c)
211 *
212 * Naming convention --
213 * il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
214 * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
215 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
216 * il3945_bg_ <-- Called from work queue context
217 * il3945_mac_ <-- mac80211 callback
218 *
219 ****************************************************************************/
220extern void il3945_hw_handler_setup(struct il_priv *il);
221extern void il3945_hw_setup_deferred_work(struct il_priv *il);
222extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
223extern int il3945_hw_rxq_stop(struct il_priv *il);
224extern int il3945_hw_set_hw_params(struct il_priv *il);
225extern int il3945_hw_nic_init(struct il_priv *il);
226extern int il3945_hw_nic_stop_master(struct il_priv *il);
227extern void il3945_hw_txq_ctx_free(struct il_priv *il);
228extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
229extern int il3945_hw_nic_reset(struct il_priv *il);
230extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
231 struct il_tx_queue *txq,
232 dma_addr_t addr, u16 len, u8 reset,
233 u8 pad);
234extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
235extern int il3945_hw_get_temperature(struct il_priv *il);
236extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
237extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
238 struct il3945_frame *frame,
239 u8 rate);
240void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
241 struct ieee80211_tx_info *info,
242 struct ieee80211_hdr *hdr, int sta_id,
243 int tx_id);
244extern int il3945_hw_reg_send_txpower(struct il_priv *il);
245extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
246extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
247void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
248extern void il3945_disable_events(struct il_priv *il);
249extern int il4965_get_temperature(const struct il_priv *il);
250extern void il3945_post_associate(struct il_priv *il);
251extern void il3945_config_ap(struct il_priv *il);
252
253extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
254
255/**
256 * il3945_hw_find_station - Find station id for a given BSSID
257 * @bssid: MAC address of station ID to find
258 *
259 * NOTE: This should not be hardware specific but the code has
260 * not yet been merged into a single common layer for managing the
261 * station tables.
262 */
263extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
264
265extern struct ieee80211_ops il3945_hw_ops;
266
267extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
268extern int il3945_init_hw_rate_table(struct il_priv *il);
269extern void il3945_reg_txpower_periodic(struct il_priv *il);
270extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
271
272extern int il3945_rs_next_rate(struct il_priv *il, int rate);
273
274/* scanning */
275int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
276void il3945_post_scan(struct il_priv *il);
277
278/* rates */
279extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
280
281/* RSSI to dBm */
282#define IL39_RSSI_OFFSET 95
283
284/*
285 * EEPROM related constants, enums, and structures.
286 */
287#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
288
289/*
290 * Mapping of a Tx power level, at factory calibration temperature,
291 * to a radio/DSP gain table idx.
292 * One for each of 5 "sample" power levels in each band.
293 * v_det is measured at the factory, using the 3945's built-in power amplifier
294 * (PA) output voltage detector. This same detector is used during Tx of
295 * long packets in normal operation to provide feedback as to proper output
296 * level.
297 * Data copied from EEPROM.
298 * DO NOT ALTER THIS STRUCTURE!!!
299 */
300struct il3945_eeprom_txpower_sample {
301 u8 gain_idx; /* idx into power (gain) setup table ... */
302 s8 power; /* ... for this pwr level for this chnl group */
303 u16 v_det; /* PA output voltage */
304} __packed;
305
306/*
307 * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
308 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
309 * Tx power setup code interpolates between the 5 "sample" power levels
310 * to determine the nominal setup for a requested power level.
311 * Data copied from EEPROM.
312 * DO NOT ALTER THIS STRUCTURE!!!
313 */
314struct il3945_eeprom_txpower_group {
315 struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
316 s32 a, b, c, d, e; /* coefficients for voltage->power
317 * formula (signed) */
318 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
319 * frequency (signed) */
320 s8 saturation_power; /* highest power possible by h/w in this
321 * band */
322 u8 group_channel; /* "representative" channel # in this band */
323 s16 temperature; /* h/w temperature at factory calib this band
324 * (signed) */
325} __packed;
326
327/*
328 * Temperature-based Tx-power compensation data, not band-specific.
329 * These coefficients are use to modify a/b/c/d/e coeffs based on
330 * difference between current temperature and factory calib temperature.
331 * Data copied from EEPROM.
332 */
333struct il3945_eeprom_temperature_corr {
334 u32 Ta;
335 u32 Tb;
336 u32 Tc;
337 u32 Td;
338 u32 Te;
339} __packed;
340
341/*
342 * EEPROM map
343 */
344struct il3945_eeprom {
345 u8 reserved0[16];
346 u16 device_id; /* abs.ofs: 16 */
347 u8 reserved1[2];
348 u16 pmc; /* abs.ofs: 20 */
349 u8 reserved2[20];
350 u8 mac_address[6]; /* abs.ofs: 42 */
351 u8 reserved3[58];
352 u16 board_revision; /* abs.ofs: 106 */
353 u8 reserved4[11];
354 u8 board_pba_number[9]; /* abs.ofs: 119 */
355 u8 reserved5[8];
356 u16 version; /* abs.ofs: 136 */
357 u8 sku_cap; /* abs.ofs: 138 */
358 u8 leds_mode; /* abs.ofs: 139 */
359 u16 oem_mode;
360 u16 wowlan_mode; /* abs.ofs: 142 */
361 u16 leds_time_interval; /* abs.ofs: 144 */
362 u8 leds_off_time; /* abs.ofs: 146 */
363 u8 leds_on_time; /* abs.ofs: 147 */
364 u8 almgor_m_version; /* abs.ofs: 148 */
365 u8 antenna_switch_type; /* abs.ofs: 149 */
366 u8 reserved6[42];
367 u8 sku_id[4]; /* abs.ofs: 192 */
368
369/*
370 * Per-channel regulatory data.
371 *
372 * Each channel that *might* be supported by 3945 has a fixed location
373 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
374 * txpower (MSB).
375 *
376 * Entries immediately below are for 20 MHz channel width.
377 *
378 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
379 */
380 u16 band_1_count; /* abs.ofs: 196 */
381 struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
382
383/*
384 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
385 * 5.0 GHz channels 7, 8, 11, 12, 16
386 * (4915-5080MHz) (none of these is ever supported)
387 */
388 u16 band_2_count; /* abs.ofs: 226 */
389 struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
390
391/*
392 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
393 * (5170-5320MHz)
394 */
395 u16 band_3_count; /* abs.ofs: 254 */
396 struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
397
398/*
399 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
400 * (5500-5700MHz)
401 */
402 u16 band_4_count; /* abs.ofs: 280 */
403 struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
404
405/*
406 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
407 * (5725-5825MHz)
408 */
409 u16 band_5_count; /* abs.ofs: 304 */
410 struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
411
412 u8 reserved9[194];
413
414/*
415 * 3945 Txpower calibration data.
416 */
417#define IL_NUM_TX_CALIB_GROUPS 5
418 struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
419/* abs.ofs: 512 */
420 struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
421 u8 reserved16[172]; /* fill out to full 1024 byte block */
422} __packed;
423
424#define IL3945_EEPROM_IMG_SIZE 1024
425
426/* End of EEPROM */
427
428#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
429#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
430
431/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
432#define IL39_NUM_QUEUES 5
433#define IL39_CMD_QUEUE_NUM 4
434
435#define IL_DEFAULT_TX_RETRY 15
436
437/*********************************************/
438
439#define RFD_SIZE 4
440#define NUM_TFD_CHUNKS 4
441
442#define TFD_CTL_COUNT_SET(n) (n << 24)
443#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
444#define TFD_CTL_PAD_SET(n) (n << 28)
445#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
446
447/* Sizes and addresses for instruction and data memory (SRAM) in
448 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
449#define IL39_RTC_INST_LOWER_BOUND (0x000000)
450#define IL39_RTC_INST_UPPER_BOUND (0x014000)
451
452#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
453#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
454
455#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
456 IL39_RTC_INST_LOWER_BOUND)
457#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
458 IL39_RTC_DATA_LOWER_BOUND)
459
460#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
461#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
462
463/* Size of uCode instruction memory in bootstrap state machine */
464#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
465
466static inline int
467il3945_hw_valid_rtc_data_addr(u32 addr)
468{
469 return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
470 addr < IL39_RTC_DATA_UPPER_BOUND);
471}
472
473/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
474 * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
475struct il3945_shared {
476 __le32 tx_base_ptr[8];
477} __packed;
478
479static inline u8
480il3945_hw_get_rate(__le16 rate_n_flags)
481{
482 return le16_to_cpu(rate_n_flags) & 0xFF;
483}
484
485static inline u16
486il3945_hw_get_rate_n_flags(__le16 rate_n_flags)
487{
488 return le16_to_cpu(rate_n_flags);
489}
490
491static inline __le16
492il3945_hw_set_rate_n_flags(u8 rate, u16 flags)
493{
494 return cpu_to_le16((u16) rate | flags);
495}
496
497/************************************/
498/* iwl3945 Flow Handler Definitions */
499/************************************/
500
501/**
502 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
503 * Addresses are offsets from device's PCI hardware base address.
504 */
505#define FH39_MEM_LOWER_BOUND (0x0800)
506#define FH39_MEM_UPPER_BOUND (0x1000)
507
508#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
509#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
510#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
511#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
512#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
513#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
514
515/* TFDB (Transmit Frame Buffer Descriptor) */
516#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
517 ((_ch) * 2 + (buf)) * 0x28)
518#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
519
520/* CBCC channel is [0,2] */
521#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
522#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
523#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
524
525/* RCSR channel is [0,2] */
526#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
527#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
528#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
529#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
530#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
531
532#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
533
534/* RSSR */
535#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
536#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
537
538/* TCSR */
539#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
540#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
541#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
542#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
543
544/* TSSR */
545#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
546#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
547#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
548
549/* DBM */
550
551#define FH39_SRVC_CHNL (6)
552
553#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
554#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
555
556#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
557
558#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
559
560#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
561
562#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
563
564#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
565
566#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
567
568#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
569#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
570
571#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
572#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
573
574#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
575
576#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
577
578#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
579#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
580
581#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
582
583#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
584
585#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
586#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
587
588#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
589
590#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
591#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
592
593#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
594#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
595
596#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
597#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
598
599#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
600 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
601 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
602
603#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
604
605struct il3945_tfd_tb {
606 __le32 addr;
607 __le32 len;
608} __packed;
609
610struct il3945_tfd {
611 __le32 control_flags;
612 struct il3945_tfd_tb tbs[4];
613 u8 __pad[28];
614} __packed;
615
616#ifdef CONFIG_IWLEGACY_DEBUGFS
617ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
618 size_t count, loff_t *ppos);
619ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
620 size_t count, loff_t *ppos);
621ssize_t il3945_ucode_general_stats_read(struct file *file,
622 char __user *user_buf, size_t count,
623 loff_t *ppos);
624#endif
625
626#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
index 162d877e6869..d3248e3ef23b 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -63,15 +63,14 @@
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <net/mac80211.h> 64#include <net/mac80211.h>
65 65
66#include "iwl-dev.h" 66#include "common.h"
67#include "iwl-core.h" 67#include "4965.h"
68#include "iwl-4965-calib.h"
69 68
70/***************************************************************************** 69/*****************************************************************************
71 * INIT calibrations framework 70 * INIT calibrations framework
72 *****************************************************************************/ 71 *****************************************************************************/
73 72
74struct statistics_general_data { 73struct stats_general_data {
75 u32 beacon_silence_rssi_a; 74 u32 beacon_silence_rssi_a;
76 u32 beacon_silence_rssi_b; 75 u32 beacon_silence_rssi_b;
77 u32 beacon_silence_rssi_c; 76 u32 beacon_silence_rssi_c;
@@ -80,14 +79,15 @@ struct statistics_general_data {
80 u32 beacon_energy_c; 79 u32 beacon_energy_c;
81}; 80};
82 81
83void iwl4965_calib_free_results(struct iwl_priv *priv) 82void
83il4965_calib_free_results(struct il_priv *il)
84{ 84{
85 int i; 85 int i;
86 86
87 for (i = 0; i < IWL_CALIB_MAX; i++) { 87 for (i = 0; i < IL_CALIB_MAX; i++) {
88 kfree(priv->calib_results[i].buf); 88 kfree(il->calib_results[i].buf);
89 priv->calib_results[i].buf = NULL; 89 il->calib_results[i].buf = NULL;
90 priv->calib_results[i].buf_len = 0; 90 il->calib_results[i].buf_len = 0;
91 } 91 }
92} 92}
93 93
@@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv)
103 * enough to receive all of our own network traffic, but not so 103 * enough to receive all of our own network traffic, but not so
104 * high that our DSP gets too busy trying to lock onto non-network 104 * high that our DSP gets too busy trying to lock onto non-network
105 * activity/noise. */ 105 * activity/noise. */
106static int iwl4965_sens_energy_cck(struct iwl_priv *priv, 106static int
107 u32 norm_fa, 107il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
108 u32 rx_enable_time, 108 struct stats_general_data *rx_info)
109 struct statistics_general_data *rx_info)
110{ 109{
111 u32 max_nrg_cck = 0; 110 u32 max_nrg_cck = 0;
112 int i = 0; 111 int i = 0;
@@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
129 u32 false_alarms = norm_fa * 200 * 1024; 128 u32 false_alarms = norm_fa * 200 * 1024;
130 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; 129 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
131 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; 130 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
132 struct iwl_sensitivity_data *data = NULL; 131 struct il_sensitivity_data *data = NULL;
133 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 132 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
134 133
135 data = &(priv->sensitivity_data); 134 data = &(il->sensitivity_data);
136 135
137 data->nrg_auto_corr_silence_diff = 0; 136 data->nrg_auto_corr_silence_diff = 0;
138 137
139 /* Find max silence rssi among all 3 receivers. 138 /* Find max silence rssi among all 3 receivers.
140 * This is background noise, which may include transmissions from other 139 * This is background noise, which may include transmissions from other
141 * networks, measured during silence before our network's beacon */ 140 * networks, measured during silence before our network's beacon */
142 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & 141 silence_rssi_a =
143 ALL_BAND_FILTER) >> 8); 142 (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
144 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & 143 silence_rssi_b =
145 ALL_BAND_FILTER) >> 8); 144 (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
146 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & 145 silence_rssi_c =
147 ALL_BAND_FILTER) >> 8); 146 (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
148 147
149 val = max(silence_rssi_b, silence_rssi_c); 148 val = max(silence_rssi_b, silence_rssi_c);
150 max_silence_rssi = max(silence_rssi_a, (u8) val); 149 max_silence_rssi = max(silence_rssi_a, (u8) val);
@@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
160 val = data->nrg_silence_rssi[i]; 159 val = data->nrg_silence_rssi[i];
161 silence_ref = max(silence_ref, val); 160 silence_ref = max(silence_ref, val);
162 } 161 }
163 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", 162 D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
164 silence_rssi_a, silence_rssi_b, silence_rssi_c, 163 silence_rssi_b, silence_rssi_c, silence_ref);
165 silence_ref);
166 164
167 /* Find max rx energy (min value!) among all 3 receivers, 165 /* Find max rx energy (min value!) among all 3 receivers,
168 * measured during beacon frame. 166 * measured during beacon frame.
@@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
184 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); 182 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
185 max_nrg_cck += 6; 183 max_nrg_cck += 6;
186 184
187 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", 185 D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
188 rx_info->beacon_energy_a, rx_info->beacon_energy_b, 186 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
189 rx_info->beacon_energy_c, max_nrg_cck - 6); 187 rx_info->beacon_energy_c, max_nrg_cck - 6);
190 188
191 /* Count number of consecutive beacons with fewer-than-desired 189 /* Count number of consecutive beacons with fewer-than-desired
192 * false alarms. */ 190 * false alarms. */
@@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
194 data->num_in_cck_no_fa++; 192 data->num_in_cck_no_fa++;
195 else 193 else
196 data->num_in_cck_no_fa = 0; 194 data->num_in_cck_no_fa = 0;
197 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", 195 D_CALIB("consecutive bcns with few false alarms = %u\n",
198 data->num_in_cck_no_fa); 196 data->num_in_cck_no_fa);
199 197
200 /* If we got too many false alarms this time, reduce sensitivity */ 198 /* If we got too many false alarms this time, reduce sensitivity */
201 if ((false_alarms > max_false_alarms) && 199 if (false_alarms > max_false_alarms &&
202 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { 200 data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
203 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", 201 D_CALIB("norm FA %u > max FA %u\n", false_alarms,
204 false_alarms, max_false_alarms); 202 max_false_alarms);
205 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); 203 D_CALIB("... reducing sensitivity\n");
206 data->nrg_curr_state = IWL_FA_TOO_MANY; 204 data->nrg_curr_state = IL_FA_TOO_MANY;
207 /* Store for "fewer than desired" on later beacon */ 205 /* Store for "fewer than desired" on later beacon */
208 data->nrg_silence_ref = silence_ref; 206 data->nrg_silence_ref = silence_ref;
209 207
210 /* increase energy threshold (reduce nrg value) 208 /* increase energy threshold (reduce nrg value)
211 * to decrease sensitivity */ 209 * to decrease sensitivity */
212 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; 210 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
213 /* Else if we got fewer than desired, increase sensitivity */ 211 /* Else if we got fewer than desired, increase sensitivity */
214 } else if (false_alarms < min_false_alarms) { 212 } else if (false_alarms < min_false_alarms) {
215 data->nrg_curr_state = IWL_FA_TOO_FEW; 213 data->nrg_curr_state = IL_FA_TOO_FEW;
216 214
217 /* Compare silence level with silence level for most recent 215 /* Compare silence level with silence level for most recent
218 * healthy number or too many false alarms */ 216 * healthy number or too many false alarms */
219 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - 217 data->nrg_auto_corr_silence_diff =
220 (s32)silence_ref; 218 (s32) data->nrg_silence_ref - (s32) silence_ref;
221 219
222 IWL_DEBUG_CALIB(priv, 220 D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
223 "norm FA %u < min FA %u, silence diff %d\n", 221 false_alarms, min_false_alarms,
224 false_alarms, min_false_alarms, 222 data->nrg_auto_corr_silence_diff);
225 data->nrg_auto_corr_silence_diff);
226 223
227 /* Increase value to increase sensitivity, but only if: 224 /* Increase value to increase sensitivity, but only if:
228 * 1a) previous beacon did *not* have *too many* false alarms 225 * 1a) previous beacon did *not* have *too many* false alarms
@@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
230 * from a previous beacon with too many, or healthy # FAs 227 * from a previous beacon with too many, or healthy # FAs
231 * OR 2) We've seen a lot of beacons (100) with too few 228 * OR 2) We've seen a lot of beacons (100) with too few
232 * false alarms */ 229 * false alarms */
233 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && 230 if (data->nrg_prev_state != IL_FA_TOO_MANY &&
234 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || 231 (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
235 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { 232 data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
236 233
237 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); 234 D_CALIB("... increasing sensitivity\n");
238 /* Increase nrg value to increase sensitivity */ 235 /* Increase nrg value to increase sensitivity */
239 val = data->nrg_th_cck + NRG_STEP_CCK; 236 val = data->nrg_th_cck + NRG_STEP_CCK;
240 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); 237 data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
241 } else { 238 } else {
242 IWL_DEBUG_CALIB(priv, 239 D_CALIB("... but not changing sensitivity\n");
243 "... but not changing sensitivity\n");
244 } 240 }
245 241
246 /* Else we got a healthy number of false alarms, keep status quo */ 242 /* Else we got a healthy number of false alarms, keep status quo */
247 } else { 243 } else {
248 IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); 244 D_CALIB(" FA in safe zone\n");
249 data->nrg_curr_state = IWL_FA_GOOD_RANGE; 245 data->nrg_curr_state = IL_FA_GOOD_RANGE;
250 246
251 /* Store for use in "fewer than desired" with later beacon */ 247 /* Store for use in "fewer than desired" with later beacon */
252 data->nrg_silence_ref = silence_ref; 248 data->nrg_silence_ref = silence_ref;
@@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
254 /* If previous beacon had too many false alarms, 250 /* If previous beacon had too many false alarms,
255 * give it some extra margin by reducing sensitivity again 251 * give it some extra margin by reducing sensitivity again
256 * (but don't go below measured energy of desired Rx) */ 252 * (but don't go below measured energy of desired Rx) */
257 if (IWL_FA_TOO_MANY == data->nrg_prev_state) { 253 if (IL_FA_TOO_MANY == data->nrg_prev_state) {
258 IWL_DEBUG_CALIB(priv, "... increasing margin\n"); 254 D_CALIB("... increasing margin\n");
259 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) 255 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
260 data->nrg_th_cck -= NRG_MARGIN; 256 data->nrg_th_cck -= NRG_MARGIN;
261 else 257 else
@@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
269 * Lower value is higher energy, so we use max()! 265 * Lower value is higher energy, so we use max()!
270 */ 266 */
271 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); 267 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
272 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); 268 D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
273 269
274 data->nrg_prev_state = data->nrg_curr_state; 270 data->nrg_prev_state = data->nrg_curr_state;
275 271
@@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
284 else { 280 else {
285 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; 281 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
286 data->auto_corr_cck = 282 data->auto_corr_cck =
287 min((u32)ranges->auto_corr_max_cck, val); 283 min((u32) ranges->auto_corr_max_cck, val);
288 } 284 }
289 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; 285 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
290 data->auto_corr_cck_mrc = 286 data->auto_corr_cck_mrc =
291 min((u32)ranges->auto_corr_max_cck_mrc, val); 287 min((u32) ranges->auto_corr_max_cck_mrc, val);
292 } else if ((false_alarms < min_false_alarms) && 288 } else if (false_alarms < min_false_alarms &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || 289 (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { 290 data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
295 291
296 /* Decrease auto_corr values to increase sensitivity */ 292 /* Decrease auto_corr values to increase sensitivity */
297 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; 293 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
298 data->auto_corr_cck = 294 data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
299 max((u32)ranges->auto_corr_min_cck, val);
300 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; 295 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
301 data->auto_corr_cck_mrc = 296 data->auto_corr_cck_mrc =
302 max((u32)ranges->auto_corr_min_cck_mrc, val); 297 max((u32) ranges->auto_corr_min_cck_mrc, val);
303 } 298 }
304 299
305 return 0; 300 return 0;
306} 301}
307 302
308 303static int
309static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, 304il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
310 u32 norm_fa,
311 u32 rx_enable_time)
312{ 305{
313 u32 val; 306 u32 val;
314 u32 false_alarms = norm_fa * 200 * 1024; 307 u32 false_alarms = norm_fa * 200 * 1024;
315 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; 308 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
316 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; 309 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
317 struct iwl_sensitivity_data *data = NULL; 310 struct il_sensitivity_data *data = NULL;
318 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 311 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
319 312
320 data = &(priv->sensitivity_data); 313 data = &(il->sensitivity_data);
321 314
322 /* If we got too many false alarms this time, reduce sensitivity */ 315 /* If we got too many false alarms this time, reduce sensitivity */
323 if (false_alarms > max_false_alarms) { 316 if (false_alarms > max_false_alarms) {
324 317
325 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", 318 D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
326 false_alarms, max_false_alarms); 319 max_false_alarms);
327 320
328 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; 321 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
329 data->auto_corr_ofdm = 322 data->auto_corr_ofdm =
330 min((u32)ranges->auto_corr_max_ofdm, val); 323 min((u32) ranges->auto_corr_max_ofdm, val);
331 324
332 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; 325 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm_mrc = 326 data->auto_corr_ofdm_mrc =
334 min((u32)ranges->auto_corr_max_ofdm_mrc, val); 327 min((u32) ranges->auto_corr_max_ofdm_mrc, val);
335 328
336 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; 329 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_x1 = 330 data->auto_corr_ofdm_x1 =
338 min((u32)ranges->auto_corr_max_ofdm_x1, val); 331 min((u32) ranges->auto_corr_max_ofdm_x1, val);
339 332
340 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; 333 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_mrc_x1 = 334 data->auto_corr_ofdm_mrc_x1 =
342 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); 335 min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
343 } 336 }
344 337
345 /* Else if we got fewer than desired, increase sensitivity */ 338 /* Else if we got fewer than desired, increase sensitivity */
346 else if (false_alarms < min_false_alarms) { 339 else if (false_alarms < min_false_alarms) {
347 340
348 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", 341 D_CALIB("norm FA %u < min FA %u\n", false_alarms,
349 false_alarms, min_false_alarms); 342 min_false_alarms);
350 343
351 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; 344 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
352 data->auto_corr_ofdm = 345 data->auto_corr_ofdm =
353 max((u32)ranges->auto_corr_min_ofdm, val); 346 max((u32) ranges->auto_corr_min_ofdm, val);
354 347
355 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; 348 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
356 data->auto_corr_ofdm_mrc = 349 data->auto_corr_ofdm_mrc =
357 max((u32)ranges->auto_corr_min_ofdm_mrc, val); 350 max((u32) ranges->auto_corr_min_ofdm_mrc, val);
358 351
359 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; 352 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
360 data->auto_corr_ofdm_x1 = 353 data->auto_corr_ofdm_x1 =
361 max((u32)ranges->auto_corr_min_ofdm_x1, val); 354 max((u32) ranges->auto_corr_min_ofdm_x1, val);
362 355
363 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; 356 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
364 data->auto_corr_ofdm_mrc_x1 = 357 data->auto_corr_ofdm_mrc_x1 =
365 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); 358 max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
366 } else { 359 } else {
367 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", 360 D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
368 min_false_alarms, false_alarms, max_false_alarms); 361 min_false_alarms, false_alarms, max_false_alarms);
369 } 362 }
370 return 0; 363 return 0;
371} 364}
372 365
373static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, 366static void
374 struct iwl_sensitivity_data *data, 367il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
375 __le16 *tbl) 368 struct il_sensitivity_data *data,
369 __le16 *tbl)
376{ 370{
377 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = 371 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
378 cpu_to_le16((u16)data->auto_corr_ofdm); 372 cpu_to_le16((u16) data->auto_corr_ofdm);
379 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = 373 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
380 cpu_to_le16((u16)data->auto_corr_ofdm_mrc); 374 cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
381 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = 375 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
382 cpu_to_le16((u16)data->auto_corr_ofdm_x1); 376 cpu_to_le16((u16) data->auto_corr_ofdm_x1);
383 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = 377 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
384 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); 378 cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
385 379
386 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = 380 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
387 cpu_to_le16((u16)data->auto_corr_cck); 381 cpu_to_le16((u16) data->auto_corr_cck);
388 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = 382 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
389 cpu_to_le16((u16)data->auto_corr_cck_mrc); 383 cpu_to_le16((u16) data->auto_corr_cck_mrc);
390 384
391 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = 385 tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
392 cpu_to_le16((u16)data->nrg_th_cck); 386 tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
393 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = 387
394 cpu_to_le16((u16)data->nrg_th_ofdm); 388 tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
395 389 cpu_to_le16(data->barker_corr_th_min);
396 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = 390 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
397 cpu_to_le16(data->barker_corr_th_min); 391 cpu_to_le16(data->barker_corr_th_min_mrc);
398 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = 392 tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
399 cpu_to_le16(data->barker_corr_th_min_mrc); 393
400 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = 394 D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
401 cpu_to_le16(data->nrg_th_cca); 395 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
402 396 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
403 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", 397 data->nrg_th_ofdm);
404 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, 398
405 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, 399 D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
406 data->nrg_th_ofdm); 400 data->auto_corr_cck_mrc, data->nrg_th_cck);
407
408 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
409 data->auto_corr_cck, data->auto_corr_cck_mrc,
410 data->nrg_th_cck);
411} 401}
412 402
413/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 403/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
414static int iwl4965_sensitivity_write(struct iwl_priv *priv) 404static int
405il4965_sensitivity_write(struct il_priv *il)
415{ 406{
416 struct iwl_sensitivity_cmd cmd; 407 struct il_sensitivity_cmd cmd;
417 struct iwl_sensitivity_data *data = NULL; 408 struct il_sensitivity_data *data = NULL;
418 struct iwl_host_cmd cmd_out = { 409 struct il_host_cmd cmd_out = {
419 .id = SENSITIVITY_CMD, 410 .id = C_SENSITIVITY,
420 .len = sizeof(struct iwl_sensitivity_cmd), 411 .len = sizeof(struct il_sensitivity_cmd),
421 .flags = CMD_ASYNC, 412 .flags = CMD_ASYNC,
422 .data = &cmd, 413 .data = &cmd,
423 }; 414 };
424 415
425 data = &(priv->sensitivity_data); 416 data = &(il->sensitivity_data);
426 417
427 memset(&cmd, 0, sizeof(cmd)); 418 memset(&cmd, 0, sizeof(cmd));
428 419
429 iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); 420 il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
430 421
431 /* Update uCode's "work" table, and copy it to DSP */ 422 /* Update uCode's "work" table, and copy it to DSP */
432 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; 423 cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
433 424
434 /* Don't send command to uCode if nothing has changed */ 425 /* Don't send command to uCode if nothing has changed */
435 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), 426 if (!memcmp
436 sizeof(u16)*HD_TABLE_SIZE)) { 427 (&cmd.table[0], &(il->sensitivity_tbl[0]),
437 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); 428 sizeof(u16) * HD_TBL_SIZE)) {
429 D_CALIB("No change in C_SENSITIVITY\n");
438 return 0; 430 return 0;
439 } 431 }
440 432
441 /* Copy table for comparison next time */ 433 /* Copy table for comparison next time */
442 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 434 memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
443 sizeof(u16)*HD_TABLE_SIZE); 435 sizeof(u16) * HD_TBL_SIZE);
444 436
445 return iwl_legacy_send_cmd(priv, &cmd_out); 437 return il_send_cmd(il, &cmd_out);
446} 438}
447 439
448void iwl4965_init_sensitivity(struct iwl_priv *priv) 440void
441il4965_init_sensitivity(struct il_priv *il)
449{ 442{
450 int ret = 0; 443 int ret = 0;
451 int i; 444 int i;
452 struct iwl_sensitivity_data *data = NULL; 445 struct il_sensitivity_data *data = NULL;
453 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 446 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
454 447
455 if (priv->disable_sens_cal) 448 if (il->disable_sens_cal)
456 return; 449 return;
457 450
458 IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n"); 451 D_CALIB("Start il4965_init_sensitivity\n");
459 452
460 /* Clear driver's sensitivity algo data */ 453 /* Clear driver's sensitivity algo data */
461 data = &(priv->sensitivity_data); 454 data = &(il->sensitivity_data);
462 455
463 if (ranges == NULL) 456 if (ranges == NULL)
464 return; 457 return;
465 458
466 memset(data, 0, sizeof(struct iwl_sensitivity_data)); 459 memset(data, 0, sizeof(struct il_sensitivity_data));
467 460
468 data->num_in_cck_no_fa = 0; 461 data->num_in_cck_no_fa = 0;
469 data->nrg_curr_state = IWL_FA_TOO_MANY; 462 data->nrg_curr_state = IL_FA_TOO_MANY;
470 data->nrg_prev_state = IWL_FA_TOO_MANY; 463 data->nrg_prev_state = IL_FA_TOO_MANY;
471 data->nrg_silence_ref = 0; 464 data->nrg_silence_ref = 0;
472 data->nrg_silence_idx = 0; 465 data->nrg_silence_idx = 0;
473 data->nrg_energy_idx = 0; 466 data->nrg_energy_idx = 0;
@@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
478 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) 471 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
479 data->nrg_silence_rssi[i] = 0; 472 data->nrg_silence_rssi[i] = 0;
480 473
481 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; 474 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
482 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; 475 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
483 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; 476 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
484 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; 477 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
485 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; 478 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
486 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; 479 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
@@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
495 data->last_bad_plcp_cnt_cck = 0; 488 data->last_bad_plcp_cnt_cck = 0;
496 data->last_fa_cnt_cck = 0; 489 data->last_fa_cnt_cck = 0;
497 490
498 ret |= iwl4965_sensitivity_write(priv); 491 ret |= il4965_sensitivity_write(il);
499 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret); 492 D_CALIB("<<return 0x%X\n", ret);
500} 493}
501 494
502void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) 495void
496il4965_sensitivity_calibration(struct il_priv *il, void *resp)
503{ 497{
504 u32 rx_enable_time; 498 u32 rx_enable_time;
505 u32 fa_cck; 499 u32 fa_cck;
@@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
508 u32 bad_plcp_ofdm; 502 u32 bad_plcp_ofdm;
509 u32 norm_fa_ofdm; 503 u32 norm_fa_ofdm;
510 u32 norm_fa_cck; 504 u32 norm_fa_cck;
511 struct iwl_sensitivity_data *data = NULL; 505 struct il_sensitivity_data *data = NULL;
512 struct statistics_rx_non_phy *rx_info; 506 struct stats_rx_non_phy *rx_info;
513 struct statistics_rx_phy *ofdm, *cck; 507 struct stats_rx_phy *ofdm, *cck;
514 unsigned long flags; 508 unsigned long flags;
515 struct statistics_general_data statis; 509 struct stats_general_data statis;
516 510
517 if (priv->disable_sens_cal) 511 if (il->disable_sens_cal)
518 return; 512 return;
519 513
520 data = &(priv->sensitivity_data); 514 data = &(il->sensitivity_data);
521 515
522 if (!iwl_legacy_is_any_associated(priv)) { 516 if (!il_is_any_associated(il)) {
523 IWL_DEBUG_CALIB(priv, "<< - not associated\n"); 517 D_CALIB("<< - not associated\n");
524 return; 518 return;
525 } 519 }
526 520
527 spin_lock_irqsave(&priv->lock, flags); 521 spin_lock_irqsave(&il->lock, flags);
528 522
529 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general); 523 rx_info = &(((struct il_notif_stats *)resp)->rx.general);
530 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm); 524 ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
531 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck); 525 cck = &(((struct il_notif_stats *)resp)->rx.cck);
532 526
533 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 527 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
534 IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); 528 D_CALIB("<< invalid data.\n");
535 spin_unlock_irqrestore(&priv->lock, flags); 529 spin_unlock_irqrestore(&il->lock, flags);
536 return; 530 return;
537 } 531 }
538 532
@@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
544 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); 538 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
545 539
546 statis.beacon_silence_rssi_a = 540 statis.beacon_silence_rssi_a =
547 le32_to_cpu(rx_info->beacon_silence_rssi_a); 541 le32_to_cpu(rx_info->beacon_silence_rssi_a);
548 statis.beacon_silence_rssi_b = 542 statis.beacon_silence_rssi_b =
549 le32_to_cpu(rx_info->beacon_silence_rssi_b); 543 le32_to_cpu(rx_info->beacon_silence_rssi_b);
550 statis.beacon_silence_rssi_c = 544 statis.beacon_silence_rssi_c =
551 le32_to_cpu(rx_info->beacon_silence_rssi_c); 545 le32_to_cpu(rx_info->beacon_silence_rssi_c);
552 statis.beacon_energy_a = 546 statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
553 le32_to_cpu(rx_info->beacon_energy_a); 547 statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
554 statis.beacon_energy_b = 548 statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
555 le32_to_cpu(rx_info->beacon_energy_b);
556 statis.beacon_energy_c =
557 le32_to_cpu(rx_info->beacon_energy_c);
558 549
559 spin_unlock_irqrestore(&priv->lock, flags); 550 spin_unlock_irqrestore(&il->lock, flags);
560 551
561 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); 552 D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
562 553
563 if (!rx_enable_time) { 554 if (!rx_enable_time) {
564 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); 555 D_CALIB("<< RX Enable Time == 0!\n");
565 return; 556 return;
566 } 557 }
567 558
568 /* These statistics increase monotonically, and do not reset 559 /* These stats increase monotonically, and do not reset
569 * at each beacon. Calculate difference from last value, or just 560 * at each beacon. Calculate difference from last value, or just
570 * use the new statistics value if it has reset or wrapped around. */ 561 * use the new stats value if it has reset or wrapped around. */
571 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) 562 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
572 data->last_bad_plcp_cnt_cck = bad_plcp_cck; 563 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
573 else { 564 else {
@@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
600 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; 591 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
601 norm_fa_cck = fa_cck + bad_plcp_cck; 592 norm_fa_cck = fa_cck + bad_plcp_cck;
602 593
603 IWL_DEBUG_CALIB(priv, 594 D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
604 "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, 595 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
605 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
606 596
607 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); 597 il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
608 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); 598 il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
609 599
610 iwl4965_sensitivity_write(priv); 600 il4965_sensitivity_write(il);
611} 601}
612 602
613static inline u8 iwl4965_find_first_chain(u8 mask) 603static inline u8
604il4965_find_first_chain(u8 mask)
614{ 605{
615 if (mask & ANT_A) 606 if (mask & ANT_A)
616 return CHAIN_A; 607 return CHAIN_A;
@@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask)
624 * disconnected. 615 * disconnected.
625 */ 616 */
626static void 617static void
627iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, 618il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
628 struct iwl_chain_noise_data *data) 619 struct il_chain_noise_data *data)
629{ 620{
630 u32 active_chains = 0; 621 u32 active_chains = 0;
631 u32 max_average_sig; 622 u32 max_average_sig;
@@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
634 u8 first_chain; 625 u8 first_chain;
635 u16 i = 0; 626 u16 i = 0;
636 627
637 average_sig[0] = data->chain_signal_a / 628 average_sig[0] =
638 priv->cfg->base_params->chain_noise_num_beacons; 629 data->chain_signal_a /
639 average_sig[1] = data->chain_signal_b / 630 il->cfg->base_params->chain_noise_num_beacons;
640 priv->cfg->base_params->chain_noise_num_beacons; 631 average_sig[1] =
641 average_sig[2] = data->chain_signal_c / 632 data->chain_signal_b /
642 priv->cfg->base_params->chain_noise_num_beacons; 633 il->cfg->base_params->chain_noise_num_beacons;
634 average_sig[2] =
635 data->chain_signal_c /
636 il->cfg->base_params->chain_noise_num_beacons;
643 637
644 if (average_sig[0] >= average_sig[1]) { 638 if (average_sig[0] >= average_sig[1]) {
645 max_average_sig = average_sig[0]; 639 max_average_sig = average_sig[0];
@@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
657 active_chains = (1 << max_average_sig_antenna_i); 651 active_chains = (1 << max_average_sig_antenna_i);
658 } 652 }
659 653
660 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", 654 D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
661 average_sig[0], average_sig[1], average_sig[2]); 655 average_sig[2]);
662 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", 656 D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
663 max_average_sig, max_average_sig_antenna_i); 657 max_average_sig_antenna_i);
664 658
665 /* Compare signal strengths for all 3 receivers. */ 659 /* Compare signal strengths for all 3 receivers. */
666 for (i = 0; i < NUM_RX_CHAINS; i++) { 660 for (i = 0; i < NUM_RX_CHAINS; i++) {
@@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
673 data->disconn_array[i] = 1; 667 data->disconn_array[i] = 1;
674 else 668 else
675 active_chains |= (1 << i); 669 active_chains |= (1 << i);
676 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " 670 D_CALIB("i = %d rssiDelta = %d "
677 "disconn_array[i] = %d\n", 671 "disconn_array[i] = %d\n", i, rssi_delta,
678 i, rssi_delta, data->disconn_array[i]); 672 data->disconn_array[i]);
679 } 673 }
680 } 674 }
681 675
@@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
689 * To be safe, simply mask out any chains that we know 683 * To be safe, simply mask out any chains that we know
690 * are not on the device. 684 * are not on the device.
691 */ 685 */
692 active_chains &= priv->hw_params.valid_rx_ant; 686 active_chains &= il->hw_params.valid_rx_ant;
693 687
694 num_tx_chains = 0; 688 num_tx_chains = 0;
695 for (i = 0; i < NUM_RX_CHAINS; i++) { 689 for (i = 0; i < NUM_RX_CHAINS; i++) {
696 /* loops on all the bits of 690 /* loops on all the bits of
697 * priv->hw_setting.valid_tx_ant */ 691 * il->hw_setting.valid_tx_ant */
698 u8 ant_msk = (1 << i); 692 u8 ant_msk = (1 << i);
699 if (!(priv->hw_params.valid_tx_ant & ant_msk)) 693 if (!(il->hw_params.valid_tx_ant & ant_msk))
700 continue; 694 continue;
701 695
702 num_tx_chains++; 696 num_tx_chains++;
703 if (data->disconn_array[i] == 0) 697 if (data->disconn_array[i] == 0)
704 /* there is a Tx antenna connected */ 698 /* there is a Tx antenna connected */
705 break; 699 break;
706 if (num_tx_chains == priv->hw_params.tx_chains_num && 700 if (num_tx_chains == il->hw_params.tx_chains_num &&
707 data->disconn_array[i]) { 701 data->disconn_array[i]) {
708 /* 702 /*
709 * If all chains are disconnected 703 * If all chains are disconnected
710 * connect the first valid tx chain 704 * connect the first valid tx chain
711 */ 705 */
712 first_chain = 706 first_chain =
713 iwl4965_find_first_chain(priv->cfg->valid_tx_ant); 707 il4965_find_first_chain(il->cfg->valid_tx_ant);
714 data->disconn_array[first_chain] = 0; 708 data->disconn_array[first_chain] = 0;
715 active_chains |= BIT(first_chain); 709 active_chains |= BIT(first_chain);
716 IWL_DEBUG_CALIB(priv, 710 D_CALIB("All Tx chains are disconnected"
717 "All Tx chains are disconnected W/A - declare %d as connected\n", 711 "- declare %d as connected\n", first_chain);
718 first_chain);
719 break; 712 break;
720 } 713 }
721 } 714 }
722 715
723 if (active_chains != priv->hw_params.valid_rx_ant && 716 if (active_chains != il->hw_params.valid_rx_ant &&
724 active_chains != priv->chain_noise_data.active_chains) 717 active_chains != il->chain_noise_data.active_chains)
725 IWL_DEBUG_CALIB(priv, 718 D_CALIB("Detected that not all antennas are connected! "
726 "Detected that not all antennas are connected! " 719 "Connected: %#x, valid: %#x.\n", active_chains,
727 "Connected: %#x, valid: %#x.\n", 720 il->hw_params.valid_rx_ant);
728 active_chains, priv->hw_params.valid_rx_ant);
729 721
730 /* Save for use within RXON, TX, SCAN commands, etc. */ 722 /* Save for use within RXON, TX, SCAN commands, etc. */
731 data->active_chains = active_chains; 723 data->active_chains = active_chains;
732 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", 724 D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
733 active_chains);
734} 725}
735 726
736static void iwl4965_gain_computation(struct iwl_priv *priv, 727static void
737 u32 *average_noise, 728il4965_gain_computation(struct il_priv *il, u32 * average_noise,
738 u16 min_average_noise_antenna_i, 729 u16 min_average_noise_antenna_i, u32 min_average_noise,
739 u32 min_average_noise, 730 u8 default_chain)
740 u8 default_chain)
741{ 731{
742 int i, ret; 732 int i, ret;
743 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 733 struct il_chain_noise_data *data = &il->chain_noise_data;
744 734
745 data->delta_gain_code[min_average_noise_antenna_i] = 0; 735 data->delta_gain_code[min_average_noise_antenna_i] = 0;
746 736
747 for (i = default_chain; i < NUM_RX_CHAINS; i++) { 737 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
748 s32 delta_g = 0; 738 s32 delta_g = 0;
749 739
750 if (!(data->disconn_array[i]) && 740 if (!data->disconn_array[i] &&
751 (data->delta_gain_code[i] == 741 data->delta_gain_code[i] ==
752 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { 742 CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
753 delta_g = average_noise[i] - min_average_noise; 743 delta_g = average_noise[i] - min_average_noise;
754 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); 744 data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
755 data->delta_gain_code[i] = 745 data->delta_gain_code[i] =
756 min(data->delta_gain_code[i], 746 min(data->delta_gain_code[i],
757 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); 747 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
758 748
759 data->delta_gain_code[i] = 749 data->delta_gain_code[i] =
760 (data->delta_gain_code[i] | (1 << 2)); 750 (data->delta_gain_code[i] | (1 << 2));
761 } else { 751 } else {
762 data->delta_gain_code[i] = 0; 752 data->delta_gain_code[i] = 0;
763 } 753 }
764 } 754 }
765 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", 755 D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
766 data->delta_gain_code[0], 756 data->delta_gain_code[1], data->delta_gain_code[2]);
767 data->delta_gain_code[1],
768 data->delta_gain_code[2]);
769 757
770 /* Differential gain gets sent to uCode only once */ 758 /* Differential gain gets sent to uCode only once */
771 if (!data->radio_write) { 759 if (!data->radio_write) {
772 struct iwl_calib_diff_gain_cmd cmd; 760 struct il_calib_diff_gain_cmd cmd;
773 data->radio_write = 1; 761 data->radio_write = 1;
774 762
775 memset(&cmd, 0, sizeof(cmd)); 763 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; 764 cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
777 cmd.diff_gain_a = data->delta_gain_code[0]; 765 cmd.diff_gain_a = data->delta_gain_code[0];
778 cmd.diff_gain_b = data->delta_gain_code[1]; 766 cmd.diff_gain_b = data->delta_gain_code[1];
779 cmd.diff_gain_c = data->delta_gain_code[2]; 767 cmd.diff_gain_c = data->delta_gain_code[2];
780 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 768 ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
781 sizeof(cmd), &cmd);
782 if (ret) 769 if (ret)
783 IWL_DEBUG_CALIB(priv, "fail sending cmd " 770 D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
784 "REPLY_PHY_CALIBRATION_CMD\n");
785 771
786 /* TODO we might want recalculate 772 /* TODO we might want recalculate
787 * rx_chain in rxon cmd */ 773 * rx_chain in rxon cmd */
788 774
789 /* Mark so we run this algo only once! */ 775 /* Mark so we run this algo only once! */
790 data->state = IWL_CHAIN_NOISE_CALIBRATED; 776 data->state = IL_CHAIN_NOISE_CALIBRATED;
791 } 777 }
792} 778}
793 779
794
795
796/* 780/*
797 * Accumulate 16 beacons of signal and noise statistics for each of 781 * Accumulate 16 beacons of signal and noise stats for each of
798 * 3 receivers/antennas/rx-chains, then figure out: 782 * 3 receivers/antennas/rx-chains, then figure out:
799 * 1) Which antennas are connected. 783 * 1) Which antennas are connected.
800 * 2) Differential rx gain settings to balance the 3 receivers. 784 * 2) Differential rx gain settings to balance the 3 receivers.
801 */ 785 */
802void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) 786void
787il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
803{ 788{
804 struct iwl_chain_noise_data *data = NULL; 789 struct il_chain_noise_data *data = NULL;
805 790
806 u32 chain_noise_a; 791 u32 chain_noise_a;
807 u32 chain_noise_b; 792 u32 chain_noise_b;
@@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
809 u32 chain_sig_a; 794 u32 chain_sig_a;
810 u32 chain_sig_b; 795 u32 chain_sig_b;
811 u32 chain_sig_c; 796 u32 chain_sig_c;
812 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 797 u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
813 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 798 u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
814 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; 799 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
815 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; 800 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
816 u16 i = 0; 801 u16 i = 0;
@@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
819 u8 rxon_band24; 804 u8 rxon_band24;
820 u8 stat_band24; 805 u8 stat_band24;
821 unsigned long flags; 806 unsigned long flags;
822 struct statistics_rx_non_phy *rx_info; 807 struct stats_rx_non_phy *rx_info;
823 808
824 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 809 struct il_rxon_context *ctx = &il->ctx;
825 810
826 if (priv->disable_chain_noise_cal) 811 if (il->disable_chain_noise_cal)
827 return; 812 return;
828 813
829 data = &(priv->chain_noise_data); 814 data = &(il->chain_noise_data);
830 815
831 /* 816 /*
832 * Accumulate just the first "chain_noise_num_beacons" after 817 * Accumulate just the first "chain_noise_num_beacons" after
833 * the first association, then we're done forever. 818 * the first association, then we're done forever.
834 */ 819 */
835 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { 820 if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
836 if (data->state == IWL_CHAIN_NOISE_ALIVE) 821 if (data->state == IL_CHAIN_NOISE_ALIVE)
837 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); 822 D_CALIB("Wait for noise calib reset\n");
838 return; 823 return;
839 } 824 }
840 825
841 spin_lock_irqsave(&priv->lock, flags); 826 spin_lock_irqsave(&il->lock, flags);
842 827
843 rx_info = &(((struct iwl_notif_statistics *)stat_resp)-> 828 rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
844 rx.general);
845 829
846 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 830 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
847 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); 831 D_CALIB(" << Interference data unavailable\n");
848 spin_unlock_irqrestore(&priv->lock, flags); 832 spin_unlock_irqrestore(&il->lock, flags);
849 return; 833 return;
850 } 834 }
851 835
852 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); 836 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
853 rxon_chnum = le16_to_cpu(ctx->staging.channel); 837 rxon_chnum = le16_to_cpu(ctx->staging.channel);
854 838
855 stat_band24 = !!(((struct iwl_notif_statistics *) 839 stat_band24 =
856 stat_resp)->flag & 840 !!(((struct il_notif_stats *)stat_resp)->
857 STATISTICS_REPLY_FLG_BAND_24G_MSK); 841 flag & STATS_REPLY_FLG_BAND_24G_MSK);
858 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *) 842 stat_chnum =
859 stat_resp)->flag) >> 16; 843 le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
860 844
861 /* Make sure we accumulate data for just the associated channel 845 /* Make sure we accumulate data for just the associated channel
862 * (even if scanning). */ 846 * (even if scanning). */
863 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { 847 if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
864 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", 848 D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
865 rxon_chnum, rxon_band24); 849 rxon_band24);
866 spin_unlock_irqrestore(&priv->lock, flags); 850 spin_unlock_irqrestore(&il->lock, flags);
867 return; 851 return;
868 } 852 }
869 853
870 /* 854 /*
871 * Accumulate beacon statistics values across 855 * Accumulate beacon stats values across
872 * "chain_noise_num_beacons" 856 * "chain_noise_num_beacons"
873 */ 857 */
874 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & 858 chain_noise_a =
875 IN_BAND_FILTER; 859 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
876 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & 860 chain_noise_b =
877 IN_BAND_FILTER; 861 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
878 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & 862 chain_noise_c =
879 IN_BAND_FILTER; 863 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
880 864
881 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; 865 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
882 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; 866 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
883 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; 867 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
884 868
885 spin_unlock_irqrestore(&priv->lock, flags); 869 spin_unlock_irqrestore(&il->lock, flags);
886 870
887 data->beacon_count++; 871 data->beacon_count++;
888 872
@@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
894 data->chain_signal_b = (chain_sig_b + data->chain_signal_b); 878 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
895 data->chain_signal_c = (chain_sig_c + data->chain_signal_c); 879 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
896 880
897 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", 881 D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
898 rxon_chnum, rxon_band24, data->beacon_count); 882 data->beacon_count);
899 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", 883 D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
900 chain_sig_a, chain_sig_b, chain_sig_c); 884 chain_sig_c);
901 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", 885 D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
902 chain_noise_a, chain_noise_b, chain_noise_c); 886 chain_noise_c);
903 887
904 /* If this is the "chain_noise_num_beacons", determine: 888 /* If this is the "chain_noise_num_beacons", determine:
905 * 1) Disconnected antennas (using signal strengths) 889 * 1) Disconnected antennas (using signal strengths)
906 * 2) Differential gain (using silence noise) to balance receivers */ 890 * 2) Differential gain (using silence noise) to balance receivers */
907 if (data->beacon_count != 891 if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
908 priv->cfg->base_params->chain_noise_num_beacons)
909 return; 892 return;
910 893
911 /* Analyze signal for disconnected antenna */ 894 /* Analyze signal for disconnected antenna */
912 iwl4965_find_disconn_antenna(priv, average_sig, data); 895 il4965_find_disconn_antenna(il, average_sig, data);
913 896
914 /* Analyze noise for rx balance */ 897 /* Analyze noise for rx balance */
915 average_noise[0] = data->chain_noise_a / 898 average_noise[0] =
916 priv->cfg->base_params->chain_noise_num_beacons; 899 data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
917 average_noise[1] = data->chain_noise_b / 900 average_noise[1] =
918 priv->cfg->base_params->chain_noise_num_beacons; 901 data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
919 average_noise[2] = data->chain_noise_c / 902 average_noise[2] =
920 priv->cfg->base_params->chain_noise_num_beacons; 903 data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
921 904
922 for (i = 0; i < NUM_RX_CHAINS; i++) { 905 for (i = 0; i < NUM_RX_CHAINS; i++) {
923 if (!(data->disconn_array[i]) && 906 if (!data->disconn_array[i] &&
924 (average_noise[i] <= min_average_noise)) { 907 average_noise[i] <= min_average_noise) {
925 /* This means that chain i is active and has 908 /* This means that chain i is active and has
926 * lower noise values so far: */ 909 * lower noise values so far: */
927 min_average_noise = average_noise[i]; 910 min_average_noise = average_noise[i];
@@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
929 } 912 }
930 } 913 }
931 914
932 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", 915 D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
933 average_noise[0], average_noise[1], 916 average_noise[1], average_noise[2]);
934 average_noise[2]);
935 917
936 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", 918 D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
937 min_average_noise, min_average_noise_antenna_i); 919 min_average_noise_antenna_i);
938 920
939 iwl4965_gain_computation(priv, average_noise, 921 il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
940 min_average_noise_antenna_i, min_average_noise, 922 min_average_noise,
941 iwl4965_find_first_chain(priv->cfg->valid_rx_ant)); 923 il4965_find_first_chain(il->cfg->valid_rx_ant));
942 924
943 /* Some power changes may have been made during the calibration. 925 /* Some power changes may have been made during the calibration.
944 * Update and commit the RXON 926 * Update and commit the RXON
945 */ 927 */
946 if (priv->cfg->ops->lib->update_chain_flags) 928 if (il->cfg->ops->lib->update_chain_flags)
947 priv->cfg->ops->lib->update_chain_flags(priv); 929 il->cfg->ops->lib->update_chain_flags(il);
948 930
949 data->state = IWL_CHAIN_NOISE_DONE; 931 data->state = IL_CHAIN_NOISE_DONE;
950 iwl_legacy_power_update_mode(priv, false); 932 il_power_update_mode(il, false);
951} 933}
952 934
953void iwl4965_reset_run_time_calib(struct iwl_priv *priv) 935void
936il4965_reset_run_time_calib(struct il_priv *il)
954{ 937{
955 int i; 938 int i;
956 memset(&(priv->sensitivity_data), 0, 939 memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
957 sizeof(struct iwl_sensitivity_data)); 940 memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
958 memset(&(priv->chain_noise_data), 0,
959 sizeof(struct iwl_chain_noise_data));
960 for (i = 0; i < NUM_RX_CHAINS; i++) 941 for (i = 0; i < NUM_RX_CHAINS; i++)
961 priv->chain_noise_data.delta_gain_code[i] = 942 il->chain_noise_data.delta_gain_code[i] =
962 CHAIN_NOISE_DELTA_GAIN_INIT_VAL; 943 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
963 944
964 /* Ask for statistics now, the uCode will send notification 945 /* Ask for stats now, the uCode will send notification
965 * periodically after association */ 946 * periodically after association */
966 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true); 947 il_send_stats_request(il, CMD_ASYNC, true);
967} 948}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644
index 000000000000..98ec39f56ba3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -0,0 +1,746 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "common.h"
29#include "4965.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int
37il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
38{
39 int p = 0;
40 u32 flag;
41
42 flag = le32_to_cpu(il->_4965.stats.flag);
43
44 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
45 if (flag & UCODE_STATS_CLEAR_MSK)
46 p += scnprintf(buf + p, bufsz - p,
47 "\tStatistics have been cleared\n");
48 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
49 (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
50 "5.2 GHz");
51 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
52 (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
53 "disabled");
54
55 return p;
56}
57
58ssize_t
59il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
60 size_t count, loff_t *ppos)
61{
62 struct il_priv *il = file->private_data;
63 int pos = 0;
64 char *buf;
65 int bufsz =
66 sizeof(struct stats_rx_phy) * 40 +
67 sizeof(struct stats_rx_non_phy) * 40 +
68 sizeof(struct stats_rx_ht_phy) * 40 + 400;
69 ssize_t ret;
70 struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
71 struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
72 struct stats_rx_non_phy *general, *accum_general;
73 struct stats_rx_non_phy *delta_general, *max_general;
74 struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
75
76 if (!il_is_alive(il))
77 return -EAGAIN;
78
79 buf = kzalloc(bufsz, GFP_KERNEL);
80 if (!buf) {
81 IL_ERR("Can not allocate Buffer\n");
82 return -ENOMEM;
83 }
84
85 /*
86 * the statistic information display here is based on
87 * the last stats notification from uCode
88 * might not reflect the current uCode activity
89 */
90 ofdm = &il->_4965.stats.rx.ofdm;
91 cck = &il->_4965.stats.rx.cck;
92 general = &il->_4965.stats.rx.general;
93 ht = &il->_4965.stats.rx.ofdm_ht;
94 accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
95 accum_cck = &il->_4965.accum_stats.rx.cck;
96 accum_general = &il->_4965.accum_stats.rx.general;
97 accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
98 delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
99 delta_cck = &il->_4965.delta_stats.rx.cck;
100 delta_general = &il->_4965.delta_stats.rx.general;
101 delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
102 max_ofdm = &il->_4965.max_delta.rx.ofdm;
103 max_cck = &il->_4965.max_delta.rx.cck;
104 max_general = &il->_4965.max_delta.rx.general;
105 max_ht = &il->_4965.max_delta.rx.ofdm_ht;
106
107 pos += il4965_stats_flag(il, buf, bufsz);
108 pos +=
109 scnprintf(buf + pos, bufsz - pos, fmt_header,
110 "Statistics_Rx - OFDM:");
111 pos +=
112 scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
113 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
114 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
115 pos +=
116 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
117 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
118 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
119 pos +=
120 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
121 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
122 delta_ofdm->plcp_err, max_ofdm->plcp_err);
123 pos +=
124 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
125 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
126 delta_ofdm->crc32_err, max_ofdm->crc32_err);
127 pos +=
128 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
129 le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
130 delta_ofdm->overrun_err, max_ofdm->overrun_err);
131 pos +=
132 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
133 le32_to_cpu(ofdm->early_overrun_err),
134 accum_ofdm->early_overrun_err,
135 delta_ofdm->early_overrun_err,
136 max_ofdm->early_overrun_err);
137 pos +=
138 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
139 le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
140 delta_ofdm->crc32_good, max_ofdm->crc32_good);
141 pos +=
142 scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
143 le32_to_cpu(ofdm->false_alarm_cnt),
144 accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos +=
147 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos +=
153 scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
155 delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
156 pos +=
157 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
159 delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
160 pos +=
161 scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
162 le32_to_cpu(ofdm->unresponded_rts),
163 accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
164 max_ofdm->unresponded_rts);
165 pos +=
166 scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
167 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
168 accum_ofdm->rxe_frame_limit_overrun,
169 delta_ofdm->rxe_frame_limit_overrun,
170 max_ofdm->rxe_frame_limit_overrun);
171 pos +=
172 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
173 le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
174 delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
175 pos +=
176 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
177 le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
178 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
179 pos +=
180 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
181 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
182 accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
183 max_ofdm->sent_ba_rsp_cnt);
184 pos +=
185 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
186 le32_to_cpu(ofdm->dsp_self_kill),
187 accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
188 max_ofdm->dsp_self_kill);
189 pos +=
190 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
191 le32_to_cpu(ofdm->mh_format_err),
192 accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
193 max_ofdm->mh_format_err);
194 pos +=
195 scnprintf(buf + pos, bufsz - pos, fmt_table,
196 "re_acq_main_rssi_sum:",
197 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
198 accum_ofdm->re_acq_main_rssi_sum,
199 delta_ofdm->re_acq_main_rssi_sum,
200 max_ofdm->re_acq_main_rssi_sum);
201
202 pos +=
203 scnprintf(buf + pos, bufsz - pos, fmt_header,
204 "Statistics_Rx - CCK:");
205 pos +=
206 scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
207 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
208 delta_cck->ina_cnt, max_cck->ina_cnt);
209 pos +=
210 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
211 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
212 delta_cck->fina_cnt, max_cck->fina_cnt);
213 pos +=
214 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
215 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
216 delta_cck->plcp_err, max_cck->plcp_err);
217 pos +=
218 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
219 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
220 delta_cck->crc32_err, max_cck->crc32_err);
221 pos +=
222 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
223 le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
224 delta_cck->overrun_err, max_cck->overrun_err);
225 pos +=
226 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
227 le32_to_cpu(cck->early_overrun_err),
228 accum_cck->early_overrun_err,
229 delta_cck->early_overrun_err, max_cck->early_overrun_err);
230 pos +=
231 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
232 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
233 delta_cck->crc32_good, max_cck->crc32_good);
234 pos +=
235 scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
236 le32_to_cpu(cck->false_alarm_cnt),
237 accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
238 max_cck->false_alarm_cnt);
239 pos +=
240 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
244 pos +=
245 scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
246 le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
247 delta_cck->sfd_timeout, max_cck->sfd_timeout);
248 pos +=
249 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
250 le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
251 delta_cck->fina_timeout, max_cck->fina_timeout);
252 pos +=
253 scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
254 le32_to_cpu(cck->unresponded_rts),
255 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
256 max_cck->unresponded_rts);
257 pos +=
258 scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
259 le32_to_cpu(cck->rxe_frame_limit_overrun),
260 accum_cck->rxe_frame_limit_overrun,
261 delta_cck->rxe_frame_limit_overrun,
262 max_cck->rxe_frame_limit_overrun);
263 pos +=
264 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
265 le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
266 delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
267 pos +=
268 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
269 le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
270 delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
271 pos +=
272 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
273 le32_to_cpu(cck->sent_ba_rsp_cnt),
274 accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
275 max_cck->sent_ba_rsp_cnt);
276 pos +=
277 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
278 le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
279 delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
280 pos +=
281 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
282 le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
283 delta_cck->mh_format_err, max_cck->mh_format_err);
284 pos +=
285 scnprintf(buf + pos, bufsz - pos, fmt_table,
286 "re_acq_main_rssi_sum:",
287 le32_to_cpu(cck->re_acq_main_rssi_sum),
288 accum_cck->re_acq_main_rssi_sum,
289 delta_cck->re_acq_main_rssi_sum,
290 max_cck->re_acq_main_rssi_sum);
291
292 pos +=
293 scnprintf(buf + pos, bufsz - pos, fmt_header,
294 "Statistics_Rx - GENERAL:");
295 pos +=
296 scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
297 le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
298 delta_general->bogus_cts, max_general->bogus_cts);
299 pos +=
300 scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
301 le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
302 delta_general->bogus_ack, max_general->bogus_ack);
303 pos +=
304 scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
305 le32_to_cpu(general->non_bssid_frames),
306 accum_general->non_bssid_frames,
307 delta_general->non_bssid_frames,
308 max_general->non_bssid_frames);
309 pos +=
310 scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos +=
316 scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
317 le32_to_cpu(general->non_channel_beacons),
318 accum_general->non_channel_beacons,
319 delta_general->non_channel_beacons,
320 max_general->non_channel_beacons);
321 pos +=
322 scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
323 le32_to_cpu(general->channel_beacons),
324 accum_general->channel_beacons,
325 delta_general->channel_beacons,
326 max_general->channel_beacons);
327 pos +=
328 scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
329 le32_to_cpu(general->num_missed_bcon),
330 accum_general->num_missed_bcon,
331 delta_general->num_missed_bcon,
332 max_general->num_missed_bcon);
333 pos +=
334 scnprintf(buf + pos, bufsz - pos, fmt_table,
335 "adc_rx_saturation_time:",
336 le32_to_cpu(general->adc_rx_saturation_time),
337 accum_general->adc_rx_saturation_time,
338 delta_general->adc_rx_saturation_time,
339 max_general->adc_rx_saturation_time);
340 pos +=
341 scnprintf(buf + pos, bufsz - pos, fmt_table,
342 "ina_detect_search_tm:",
343 le32_to_cpu(general->ina_detection_search_time),
344 accum_general->ina_detection_search_time,
345 delta_general->ina_detection_search_time,
346 max_general->ina_detection_search_time);
347 pos +=
348 scnprintf(buf + pos, bufsz - pos, fmt_table,
349 "beacon_silence_rssi_a:",
350 le32_to_cpu(general->beacon_silence_rssi_a),
351 accum_general->beacon_silence_rssi_a,
352 delta_general->beacon_silence_rssi_a,
353 max_general->beacon_silence_rssi_a);
354 pos +=
355 scnprintf(buf + pos, bufsz - pos, fmt_table,
356 "beacon_silence_rssi_b:",
357 le32_to_cpu(general->beacon_silence_rssi_b),
358 accum_general->beacon_silence_rssi_b,
359 delta_general->beacon_silence_rssi_b,
360 max_general->beacon_silence_rssi_b);
361 pos +=
362 scnprintf(buf + pos, bufsz - pos, fmt_table,
363 "beacon_silence_rssi_c:",
364 le32_to_cpu(general->beacon_silence_rssi_c),
365 accum_general->beacon_silence_rssi_c,
366 delta_general->beacon_silence_rssi_c,
367 max_general->beacon_silence_rssi_c);
368 pos +=
369 scnprintf(buf + pos, bufsz - pos, fmt_table,
370 "interference_data_flag:",
371 le32_to_cpu(general->interference_data_flag),
372 accum_general->interference_data_flag,
373 delta_general->interference_data_flag,
374 max_general->interference_data_flag);
375 pos +=
376 scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
377 le32_to_cpu(general->channel_load),
378 accum_general->channel_load, delta_general->channel_load,
379 max_general->channel_load);
380 pos +=
381 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
382 le32_to_cpu(general->dsp_false_alarms),
383 accum_general->dsp_false_alarms,
384 delta_general->dsp_false_alarms,
385 max_general->dsp_false_alarms);
386 pos +=
387 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
388 le32_to_cpu(general->beacon_rssi_a),
389 accum_general->beacon_rssi_a,
390 delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
391 pos +=
392 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
393 le32_to_cpu(general->beacon_rssi_b),
394 accum_general->beacon_rssi_b,
395 delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
396 pos +=
397 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
398 le32_to_cpu(general->beacon_rssi_c),
399 accum_general->beacon_rssi_c,
400 delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
401 pos +=
402 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
403 le32_to_cpu(general->beacon_energy_a),
404 accum_general->beacon_energy_a,
405 delta_general->beacon_energy_a,
406 max_general->beacon_energy_a);
407 pos +=
408 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
409 le32_to_cpu(general->beacon_energy_b),
410 accum_general->beacon_energy_b,
411 delta_general->beacon_energy_b,
412 max_general->beacon_energy_b);
413 pos +=
414 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
415 le32_to_cpu(general->beacon_energy_c),
416 accum_general->beacon_energy_c,
417 delta_general->beacon_energy_c,
418 max_general->beacon_energy_c);
419
420 pos +=
421 scnprintf(buf + pos, bufsz - pos, fmt_header,
422 "Statistics_Rx - OFDM_HT:");
423 pos +=
424 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
425 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
426 delta_ht->plcp_err, max_ht->plcp_err);
427 pos +=
428 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
429 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
430 delta_ht->overrun_err, max_ht->overrun_err);
431 pos +=
432 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
433 le32_to_cpu(ht->early_overrun_err),
434 accum_ht->early_overrun_err, delta_ht->early_overrun_err,
435 max_ht->early_overrun_err);
436 pos +=
437 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
438 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
439 delta_ht->crc32_good, max_ht->crc32_good);
440 pos +=
441 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
442 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
443 delta_ht->crc32_err, max_ht->crc32_err);
444 pos +=
445 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
446 le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
447 delta_ht->mh_format_err, max_ht->mh_format_err);
448 pos +=
449 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
450 le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
451 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
452 pos +=
453 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
454 le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
455 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
456 pos +=
457 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
458 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
459 delta_ht->agg_cnt, max_ht->agg_cnt);
460 pos +=
461 scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
462 le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
463 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
464
465 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
466 kfree(buf);
467 return ret;
468}
469
470ssize_t
471il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
472 size_t count, loff_t *ppos)
473{
474 struct il_priv *il = file->private_data;
475 int pos = 0;
476 char *buf;
477 int bufsz = (sizeof(struct stats_tx) * 48) + 250;
478 ssize_t ret;
479 struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
480
481 if (!il_is_alive(il))
482 return -EAGAIN;
483
484 buf = kzalloc(bufsz, GFP_KERNEL);
485 if (!buf) {
486 IL_ERR("Can not allocate Buffer\n");
487 return -ENOMEM;
488 }
489
490 /* the statistic information display here is based on
491 * the last stats notification from uCode
492 * might not reflect the current uCode activity
493 */
494 tx = &il->_4965.stats.tx;
495 accum_tx = &il->_4965.accum_stats.tx;
496 delta_tx = &il->_4965.delta_stats.tx;
497 max_tx = &il->_4965.max_delta.tx;
498
499 pos += il4965_stats_flag(il, buf, bufsz);
500 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
501 pos +=
502 scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
503 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
504 delta_tx->preamble_cnt, max_tx->preamble_cnt);
505 pos +=
506 scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
507 le32_to_cpu(tx->rx_detected_cnt),
508 accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
509 max_tx->rx_detected_cnt);
510 pos +=
511 scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
512 le32_to_cpu(tx->bt_prio_defer_cnt),
513 accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
514 max_tx->bt_prio_defer_cnt);
515 pos +=
516 scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
517 le32_to_cpu(tx->bt_prio_kill_cnt),
518 accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
519 max_tx->bt_prio_kill_cnt);
520 pos +=
521 scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
522 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
523 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
524 pos +=
525 scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
526 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
527 delta_tx->cts_timeout, max_tx->cts_timeout);
528 pos +=
529 scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
530 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
531 delta_tx->ack_timeout, max_tx->ack_timeout);
532 pos +=
533 scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
534 le32_to_cpu(tx->expected_ack_cnt),
535 accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
536 max_tx->expected_ack_cnt);
537 pos +=
538 scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
539 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
540 delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
541 pos +=
542 scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
543 le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
544 delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
545 pos +=
546 scnprintf(buf + pos, bufsz - pos, fmt_table,
547 "abort_nxt_frame_mismatch:",
548 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
549 accum_tx->burst_abort_next_frame_mismatch_cnt,
550 delta_tx->burst_abort_next_frame_mismatch_cnt,
551 max_tx->burst_abort_next_frame_mismatch_cnt);
552 pos +=
553 scnprintf(buf + pos, bufsz - pos, fmt_table,
554 "abort_missing_nxt_frame:",
555 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
556 accum_tx->burst_abort_missing_next_frame_cnt,
557 delta_tx->burst_abort_missing_next_frame_cnt,
558 max_tx->burst_abort_missing_next_frame_cnt);
559 pos +=
560 scnprintf(buf + pos, bufsz - pos, fmt_table,
561 "cts_timeout_collision:",
562 le32_to_cpu(tx->cts_timeout_collision),
563 accum_tx->cts_timeout_collision,
564 delta_tx->cts_timeout_collision,
565 max_tx->cts_timeout_collision);
566 pos +=
567 scnprintf(buf + pos, bufsz - pos, fmt_table,
568 "ack_ba_timeout_collision:",
569 le32_to_cpu(tx->ack_or_ba_timeout_collision),
570 accum_tx->ack_or_ba_timeout_collision,
571 delta_tx->ack_or_ba_timeout_collision,
572 max_tx->ack_or_ba_timeout_collision);
573 pos +=
574 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
575 le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
576 delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
577 pos +=
578 scnprintf(buf + pos, bufsz - pos, fmt_table,
579 "agg ba_resched_frames:",
580 le32_to_cpu(tx->agg.ba_reschedule_frames),
581 accum_tx->agg.ba_reschedule_frames,
582 delta_tx->agg.ba_reschedule_frames,
583 max_tx->agg.ba_reschedule_frames);
584 pos +=
585 scnprintf(buf + pos, bufsz - pos, fmt_table,
586 "agg scd_query_agg_frame:",
587 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
588 accum_tx->agg.scd_query_agg_frame_cnt,
589 delta_tx->agg.scd_query_agg_frame_cnt,
590 max_tx->agg.scd_query_agg_frame_cnt);
591 pos +=
592 scnprintf(buf + pos, bufsz - pos, fmt_table,
593 "agg scd_query_no_agg:",
594 le32_to_cpu(tx->agg.scd_query_no_agg),
595 accum_tx->agg.scd_query_no_agg,
596 delta_tx->agg.scd_query_no_agg,
597 max_tx->agg.scd_query_no_agg);
598 pos +=
599 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
600 le32_to_cpu(tx->agg.scd_query_agg),
601 accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
602 max_tx->agg.scd_query_agg);
603 pos +=
604 scnprintf(buf + pos, bufsz - pos, fmt_table,
605 "agg scd_query_mismatch:",
606 le32_to_cpu(tx->agg.scd_query_mismatch),
607 accum_tx->agg.scd_query_mismatch,
608 delta_tx->agg.scd_query_mismatch,
609 max_tx->agg.scd_query_mismatch);
610 pos +=
611 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
612 le32_to_cpu(tx->agg.frame_not_ready),
613 accum_tx->agg.frame_not_ready,
614 delta_tx->agg.frame_not_ready,
615 max_tx->agg.frame_not_ready);
616 pos +=
617 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
618 le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
619 delta_tx->agg.underrun, max_tx->agg.underrun);
620 pos +=
621 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
622 le32_to_cpu(tx->agg.bt_prio_kill),
623 accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
624 max_tx->agg.bt_prio_kill);
625 pos +=
626 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
627 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
628 accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
629 max_tx->agg.rx_ba_rsp_cnt);
630
631 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
632 kfree(buf);
633 return ret;
634}
635
636ssize_t
637il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
638 size_t count, loff_t *ppos)
639{
640 struct il_priv *il = file->private_data;
641 int pos = 0;
642 char *buf;
643 int bufsz = sizeof(struct stats_general) * 10 + 300;
644 ssize_t ret;
645 struct stats_general_common *general, *accum_general;
646 struct stats_general_common *delta_general, *max_general;
647 struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
648 struct stats_div *div, *accum_div, *delta_div, *max_div;
649
650 if (!il_is_alive(il))
651 return -EAGAIN;
652
653 buf = kzalloc(bufsz, GFP_KERNEL);
654 if (!buf) {
655 IL_ERR("Can not allocate Buffer\n");
656 return -ENOMEM;
657 }
658
659 /* the statistic information display here is based on
660 * the last stats notification from uCode
661 * might not reflect the current uCode activity
662 */
663 general = &il->_4965.stats.general.common;
664 dbg = &il->_4965.stats.general.common.dbg;
665 div = &il->_4965.stats.general.common.div;
666 accum_general = &il->_4965.accum_stats.general.common;
667 accum_dbg = &il->_4965.accum_stats.general.common.dbg;
668 accum_div = &il->_4965.accum_stats.general.common.div;
669 delta_general = &il->_4965.delta_stats.general.common;
670 max_general = &il->_4965.max_delta.general.common;
671 delta_dbg = &il->_4965.delta_stats.general.common.dbg;
672 max_dbg = &il->_4965.max_delta.general.common.dbg;
673 delta_div = &il->_4965.delta_stats.general.common.div;
674 max_div = &il->_4965.max_delta.general.common.div;
675
676 pos += il4965_stats_flag(il, buf, bufsz);
677 pos +=
678 scnprintf(buf + pos, bufsz - pos, fmt_header,
679 "Statistics_General:");
680 pos +=
681 scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
682 le32_to_cpu(general->temperature));
683 pos +=
684 scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
685 le32_to_cpu(general->ttl_timestamp));
686 pos +=
687 scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
688 le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
689 delta_dbg->burst_check, max_dbg->burst_check);
690 pos +=
691 scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
692 le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
693 delta_dbg->burst_count, max_dbg->burst_count);
694 pos +=
695 scnprintf(buf + pos, bufsz - pos, fmt_table,
696 "wait_for_silence_timeout_count:",
697 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
698 accum_dbg->wait_for_silence_timeout_cnt,
699 delta_dbg->wait_for_silence_timeout_cnt,
700 max_dbg->wait_for_silence_timeout_cnt);
701 pos +=
702 scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
703 le32_to_cpu(general->sleep_time),
704 accum_general->sleep_time, delta_general->sleep_time,
705 max_general->sleep_time);
706 pos +=
707 scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
708 le32_to_cpu(general->slots_out), accum_general->slots_out,
709 delta_general->slots_out, max_general->slots_out);
710 pos +=
711 scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
712 le32_to_cpu(general->slots_idle),
713 accum_general->slots_idle, delta_general->slots_idle,
714 max_general->slots_idle);
715 pos +=
716 scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
717 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
718 delta_div->tx_on_a, max_div->tx_on_a);
719 pos +=
720 scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
721 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
722 delta_div->tx_on_b, max_div->tx_on_b);
723 pos +=
724 scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
725 le32_to_cpu(div->exec_time), accum_div->exec_time,
726 delta_div->exec_time, max_div->exec_time);
727 pos +=
728 scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
729 le32_to_cpu(div->probe_time), accum_div->probe_time,
730 delta_div->probe_time, max_div->probe_time);
731 pos +=
732 scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
733 le32_to_cpu(general->rx_enable_counter),
734 accum_general->rx_enable_counter,
735 delta_general->rx_enable_counter,
736 max_general->rx_enable_counter);
737 pos +=
738 scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
739 le32_to_cpu(general->num_of_sos_states),
740 accum_general->num_of_sos_states,
741 delta_general->num_of_sos_states,
742 max_general->num_of_sos_states);
743 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
744 kfree(buf);
745 return ret;
746}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644
index 000000000000..4aaef4135564
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -0,0 +1,6536 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "common.h"
54#include "4965.h"
55
56/******************************************************************************
57 *
58 * module boiler plate
59 *
60 ******************************************************************************/
61
62/*
63 * module name, copyright, version, etc.
64 */
65#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
66
67#ifdef CONFIG_IWLEGACY_DEBUG
68#define VD "d"
69#else
70#define VD
71#endif
72
73#define DRV_VERSION IWLWIFI_VERSION VD
74
75MODULE_DESCRIPTION(DRV_DESCRIPTION);
76MODULE_VERSION(DRV_VERSION);
77MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
78MODULE_LICENSE("GPL");
79MODULE_ALIAS("iwl4965");
80
81void
82il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
83{
84 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
85 IL_ERR("Tx flush command to flush out all frames\n");
86 if (!test_bit(S_EXIT_PENDING, &il->status))
87 queue_work(il->workqueue, &il->tx_flush);
88 }
89}
90
91/*
92 * EEPROM
93 */
94struct il_mod_params il4965_mod_params = {
95 .amsdu_size_8K = 1,
96 .restart_fw = 1,
97 /* the rest are 0 by default */
98};
99
100void
101il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
102{
103 unsigned long flags;
104 int i;
105 spin_lock_irqsave(&rxq->lock, flags);
106 INIT_LIST_HEAD(&rxq->rx_free);
107 INIT_LIST_HEAD(&rxq->rx_used);
108 /* Fill the rx_used queue with _all_ of the Rx buffers */
109 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
110 /* In the reset function, these buffers may have been allocated
111 * to an SKB, so we need to unmap and free potential storage */
112 if (rxq->pool[i].page != NULL) {
113 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
114 PAGE_SIZE << il->hw_params.rx_page_order,
115 PCI_DMA_FROMDEVICE);
116 __il_free_pages(il, rxq->pool[i].page);
117 rxq->pool[i].page = NULL;
118 }
119 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
120 }
121
122 for (i = 0; i < RX_QUEUE_SIZE; i++)
123 rxq->queue[i] = NULL;
124
125 /* Set us so that we have processed and used all buffers, but have
126 * not restocked the Rx queue with fresh buffers */
127 rxq->read = rxq->write = 0;
128 rxq->write_actual = 0;
129 rxq->free_count = 0;
130 spin_unlock_irqrestore(&rxq->lock, flags);
131}
132
133int
134il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
135{
136 u32 rb_size;
137 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
138 u32 rb_timeout = 0;
139
140 if (il->cfg->mod_params->amsdu_size_8K)
141 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
142 else
143 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
144
145 /* Stop Rx DMA */
146 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
147
148 /* Reset driver's Rx queue write idx */
149 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
150
151 /* Tell device where to find RBD circular buffer in DRAM */
152 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
153
154 /* Tell device where in DRAM to update its Rx status */
155 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
156
157 /* Enable Rx DMA
158 * Direct rx interrupts to hosts
159 * Rx buffer size 4 or 8k
160 * RB timeout 0x10
161 * 256 RBDs
162 */
163 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
164 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
165 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
166 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
167 rb_size |
168 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
169 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
170
171 /* Set interrupt coalescing timer to default (2048 usecs) */
172 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
173
174 return 0;
175}
176
177static void
178il4965_set_pwr_vmain(struct il_priv *il)
179{
180/*
181 * (for documentation purposes)
182 * to set power to V_AUX, do:
183
184 if (pci_pme_capable(il->pci_dev, PCI_D3cold))
185 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
186 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
187 ~APMG_PS_CTRL_MSK_PWR_SRC);
188 */
189
190 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
191 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
192 ~APMG_PS_CTRL_MSK_PWR_SRC);
193}
194
195int
196il4965_hw_nic_init(struct il_priv *il)
197{
198 unsigned long flags;
199 struct il_rx_queue *rxq = &il->rxq;
200 int ret;
201
202 /* nic_init */
203 spin_lock_irqsave(&il->lock, flags);
204 il->cfg->ops->lib->apm_ops.init(il);
205
206 /* Set interrupt coalescing calibration timer to default (512 usecs) */
207 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
208
209 spin_unlock_irqrestore(&il->lock, flags);
210
211 il4965_set_pwr_vmain(il);
212
213 il->cfg->ops->lib->apm_ops.config(il);
214
215 /* Allocate the RX queue, or reset if it is already allocated */
216 if (!rxq->bd) {
217 ret = il_rx_queue_alloc(il);
218 if (ret) {
219 IL_ERR("Unable to initialize Rx queue\n");
220 return -ENOMEM;
221 }
222 } else
223 il4965_rx_queue_reset(il, rxq);
224
225 il4965_rx_replenish(il);
226
227 il4965_rx_init(il, rxq);
228
229 spin_lock_irqsave(&il->lock, flags);
230
231 rxq->need_update = 1;
232 il_rx_queue_update_write_ptr(il, rxq);
233
234 spin_unlock_irqrestore(&il->lock, flags);
235
236 /* Allocate or reset and init all Tx and Command queues */
237 if (!il->txq) {
238 ret = il4965_txq_ctx_alloc(il);
239 if (ret)
240 return ret;
241 } else
242 il4965_txq_ctx_reset(il);
243
244 set_bit(S_INIT, &il->status);
245
246 return 0;
247}
248
249/**
250 * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
251 */
252static inline __le32
253il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
254{
255 return cpu_to_le32((u32) (dma_addr >> 8));
256}
257
258/**
259 * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
260 *
261 * If there are slots in the RX queue that need to be restocked,
262 * and we have free pre-allocated buffers, fill the ranks as much
263 * as we can, pulling from rx_free.
264 *
265 * This moves the 'write' idx forward to catch up with 'processed', and
266 * also updates the memory address in the firmware to reference the new
267 * target buffer.
268 */
269void
270il4965_rx_queue_restock(struct il_priv *il)
271{
272 struct il_rx_queue *rxq = &il->rxq;
273 struct list_head *element;
274 struct il_rx_buf *rxb;
275 unsigned long flags;
276
277 spin_lock_irqsave(&rxq->lock, flags);
278 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
279 /* The overwritten rxb must be a used one */
280 rxb = rxq->queue[rxq->write];
281 BUG_ON(rxb && rxb->page);
282
283 /* Get next free Rx buffer, remove from free list */
284 element = rxq->rx_free.next;
285 rxb = list_entry(element, struct il_rx_buf, list);
286 list_del(element);
287
288 /* Point to Rx buffer via next RBD in circular buffer */
289 rxq->bd[rxq->write] =
290 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
291 rxq->queue[rxq->write] = rxb;
292 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
293 rxq->free_count--;
294 }
295 spin_unlock_irqrestore(&rxq->lock, flags);
296 /* If the pre-allocated buffer pool is dropping low, schedule to
297 * refill it */
298 if (rxq->free_count <= RX_LOW_WATERMARK)
299 queue_work(il->workqueue, &il->rx_replenish);
300
301 /* If we've added more space for the firmware to place data, tell it.
302 * Increment device's write pointer in multiples of 8. */
303 if (rxq->write_actual != (rxq->write & ~0x7)) {
304 spin_lock_irqsave(&rxq->lock, flags);
305 rxq->need_update = 1;
306 spin_unlock_irqrestore(&rxq->lock, flags);
307 il_rx_queue_update_write_ptr(il, rxq);
308 }
309}
310
311/**
312 * il4965_rx_replenish - Move all used packet from rx_used to rx_free
313 *
314 * When moving to rx_free an SKB is allocated for the slot.
315 *
316 * Also restock the Rx queue via il_rx_queue_restock.
317 * This is called as a scheduled work item (except for during initialization)
318 */
319static void
320il4965_rx_allocate(struct il_priv *il, gfp_t priority)
321{
322 struct il_rx_queue *rxq = &il->rxq;
323 struct list_head *element;
324 struct il_rx_buf *rxb;
325 struct page *page;
326 unsigned long flags;
327 gfp_t gfp_mask = priority;
328
329 while (1) {
330 spin_lock_irqsave(&rxq->lock, flags);
331 if (list_empty(&rxq->rx_used)) {
332 spin_unlock_irqrestore(&rxq->lock, flags);
333 return;
334 }
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 if (rxq->free_count > RX_LOW_WATERMARK)
338 gfp_mask |= __GFP_NOWARN;
339
340 if (il->hw_params.rx_page_order > 0)
341 gfp_mask |= __GFP_COMP;
342
343 /* Alloc a new receive buffer */
344 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
345 if (!page) {
346 if (net_ratelimit())
347 D_INFO("alloc_pages failed, " "order: %d\n",
348 il->hw_params.rx_page_order);
349
350 if (rxq->free_count <= RX_LOW_WATERMARK &&
351 net_ratelimit())
352 IL_ERR("Failed to alloc_pages with %s. "
353 "Only %u free buffers remaining.\n",
354 priority ==
355 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
356 rxq->free_count);
357 /* We don't reschedule replenish work here -- we will
358 * call the restock method and if it still needs
359 * more buffers it will schedule replenish */
360 return;
361 }
362
363 spin_lock_irqsave(&rxq->lock, flags);
364
365 if (list_empty(&rxq->rx_used)) {
366 spin_unlock_irqrestore(&rxq->lock, flags);
367 __free_pages(page, il->hw_params.rx_page_order);
368 return;
369 }
370 element = rxq->rx_used.next;
371 rxb = list_entry(element, struct il_rx_buf, list);
372 list_del(element);
373
374 spin_unlock_irqrestore(&rxq->lock, flags);
375
376 BUG_ON(rxb->page);
377 rxb->page = page;
378 /* Get physical address of the RB */
379 rxb->page_dma =
380 pci_map_page(il->pci_dev, page, 0,
381 PAGE_SIZE << il->hw_params.rx_page_order,
382 PCI_DMA_FROMDEVICE);
383 /* dma address must be no more than 36 bits */
384 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
385 /* and also 256 byte aligned! */
386 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
387
388 spin_lock_irqsave(&rxq->lock, flags);
389
390 list_add_tail(&rxb->list, &rxq->rx_free);
391 rxq->free_count++;
392 il->alloc_rxb_page++;
393
394 spin_unlock_irqrestore(&rxq->lock, flags);
395 }
396}
397
398void
399il4965_rx_replenish(struct il_priv *il)
400{
401 unsigned long flags;
402
403 il4965_rx_allocate(il, GFP_KERNEL);
404
405 spin_lock_irqsave(&il->lock, flags);
406 il4965_rx_queue_restock(il);
407 spin_unlock_irqrestore(&il->lock, flags);
408}
409
410void
411il4965_rx_replenish_now(struct il_priv *il)
412{
413 il4965_rx_allocate(il, GFP_ATOMIC);
414
415 il4965_rx_queue_restock(il);
416}
417
418/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
419 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
420 * This free routine walks the list of POOL entries and if SKB is set to
421 * non NULL it is unmapped and freed
422 */
423void
424il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
425{
426 int i;
427 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
428 if (rxq->pool[i].page != NULL) {
429 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
430 PAGE_SIZE << il->hw_params.rx_page_order,
431 PCI_DMA_FROMDEVICE);
432 __il_free_pages(il, rxq->pool[i].page);
433 rxq->pool[i].page = NULL;
434 }
435 }
436
437 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
438 rxq->bd_dma);
439 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
440 rxq->rb_stts, rxq->rb_stts_dma);
441 rxq->bd = NULL;
442 rxq->rb_stts = NULL;
443}
444
445int
446il4965_rxq_stop(struct il_priv *il)
447{
448
449 /* stop Rx DMA */
450 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
451 il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
452 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
453
454 return 0;
455}
456
457int
458il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
459{
460 int idx = 0;
461 int band_offset = 0;
462
463 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
464 if (rate_n_flags & RATE_MCS_HT_MSK) {
465 idx = (rate_n_flags & 0xff);
466 return idx;
467 /* Legacy rate format, search for match in table */
468 } else {
469 if (band == IEEE80211_BAND_5GHZ)
470 band_offset = IL_FIRST_OFDM_RATE;
471 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
472 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
473 return idx - band_offset;
474 }
475
476 return -1;
477}
478
479static int
480il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
481{
482 /* data from PHY/DSP regarding signal strength, etc.,
483 * contents are always there, not configurable by host. */
484 struct il4965_rx_non_cfg_phy *ncphy =
485 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
486 u32 agc =
487 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
488 IL49_AGC_DB_POS;
489
490 u32 valid_antennae =
491 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
492 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
493 u8 max_rssi = 0;
494 u32 i;
495
496 /* Find max rssi among 3 possible receivers.
497 * These values are measured by the digital signal processor (DSP).
498 * They should stay fairly constant even as the signal strength varies,
499 * if the radio's automatic gain control (AGC) is working right.
500 * AGC value (see below) will provide the "interesting" info. */
501 for (i = 0; i < 3; i++)
502 if (valid_antennae & (1 << i))
503 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
504
505 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
506 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
507 max_rssi, agc);
508
509 /* dBm = max_rssi dB - agc dB - constant.
510 * Higher AGC (higher radio gain) means lower signal. */
511 return max_rssi - agc - IL4965_RSSI_OFFSET;
512}
513
514static u32
515il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
516{
517 u32 decrypt_out = 0;
518
519 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
520 RX_RES_STATUS_STATION_FOUND)
521 decrypt_out |=
522 (RX_RES_STATUS_STATION_FOUND |
523 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
524
525 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
526
527 /* packet was not encrypted */
528 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
529 RX_RES_STATUS_SEC_TYPE_NONE)
530 return decrypt_out;
531
532 /* packet was encrypted with unknown alg */
533 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
534 RX_RES_STATUS_SEC_TYPE_ERR)
535 return decrypt_out;
536
537 /* decryption was not done in HW */
538 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
539 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
540 return decrypt_out;
541
542 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
543
544 case RX_RES_STATUS_SEC_TYPE_CCMP:
545 /* alg is CCM: check MIC only */
546 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
547 /* Bad MIC */
548 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
549 else
550 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
551
552 break;
553
554 case RX_RES_STATUS_SEC_TYPE_TKIP:
555 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
556 /* Bad TTAK */
557 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
558 break;
559 }
560 /* fall through if TTAK OK */
561 default:
562 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
563 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
564 else
565 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
566 break;
567 }
568
569 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
570
571 return decrypt_out;
572}
573
574static void
575il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
576 u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
577 struct ieee80211_rx_status *stats)
578{
579 struct sk_buff *skb;
580 __le16 fc = hdr->frame_control;
581
582 /* We only process data packets if the interface is open */
583 if (unlikely(!il->is_open)) {
584 D_DROP("Dropping packet while interface is not open.\n");
585 return;
586 }
587
588 /* In case of HW accelerated crypto and bad decryption, drop */
589 if (!il->cfg->mod_params->sw_crypto &&
590 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
591 return;
592
593 skb = dev_alloc_skb(128);
594 if (!skb) {
595 IL_ERR("dev_alloc_skb failed\n");
596 return;
597 }
598
599 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
600
601 il_update_stats(il, false, fc, len);
602 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
603
604 ieee80211_rx(il->hw, skb);
605 il->alloc_rxb_page--;
606 rxb->page = NULL;
607}
608
609/* Called for N_RX (legacy ABG frames), or
610 * N_RX_MPDU (HT high-throughput N frames). */
611void
612il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
613{
614 struct ieee80211_hdr *header;
615 struct ieee80211_rx_status rx_status;
616 struct il_rx_pkt *pkt = rxb_addr(rxb);
617 struct il_rx_phy_res *phy_res;
618 __le32 rx_pkt_status;
619 struct il_rx_mpdu_res_start *amsdu;
620 u32 len;
621 u32 ampdu_status;
622 u32 rate_n_flags;
623
624 /**
625 * N_RX and N_RX_MPDU are handled differently.
626 * N_RX: physical layer info is in this buffer
627 * N_RX_MPDU: physical layer info was sent in separate
628 * command and cached in il->last_phy_res
629 *
630 * Here we set up local variables depending on which command is
631 * received.
632 */
633 if (pkt->hdr.cmd == N_RX) {
634 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
635 header =
636 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
637 phy_res->cfg_phy_cnt);
638
639 len = le16_to_cpu(phy_res->byte_count);
640 rx_pkt_status =
641 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
642 phy_res->cfg_phy_cnt + len);
643 ampdu_status = le32_to_cpu(rx_pkt_status);
644 } else {
645 if (!il->_4965.last_phy_res_valid) {
646 IL_ERR("MPDU frame without cached PHY data\n");
647 return;
648 }
649 phy_res = &il->_4965.last_phy_res;
650 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
651 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
652 len = le16_to_cpu(amsdu->byte_count);
653 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
654 ampdu_status =
655 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
656 }
657
658 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
659 D_DROP("dsp size out of range [0,20]: %d/n",
660 phy_res->cfg_phy_cnt);
661 return;
662 }
663
664 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
665 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
666 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
667 return;
668 }
669
670 /* This will be used in several places later */
671 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
672
673 /* rx_status carries information about the packet to mac80211 */
674 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
675 rx_status.band =
676 (phy_res->
677 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
678 IEEE80211_BAND_5GHZ;
679 rx_status.freq =
680 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
681 rx_status.band);
682 rx_status.rate_idx =
683 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
684 rx_status.flag = 0;
685
686 /* TSF isn't reliable. In order to allow smooth user experience,
687 * this W/A doesn't propagate it to the mac80211 */
688 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
689
690 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
691
692 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
693 rx_status.signal = il4965_calc_rssi(il, phy_res);
694
695 il_dbg_log_rx_data_frame(il, len, header);
696 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
697 (unsigned long long)rx_status.mactime);
698
699 /*
700 * "antenna number"
701 *
702 * It seems that the antenna field in the phy flags value
703 * is actually a bit field. This is undefined by radiotap,
704 * it wants an actual antenna number but I always get "7"
705 * for most legacy frames I receive indicating that the
706 * same frame was received on all three RX chains.
707 *
708 * I think this field should be removed in favor of a
709 * new 802.11n radiotap field "RX chains" that is defined
710 * as a bitmask.
711 */
712 rx_status.antenna =
713 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
714 RX_RES_PHY_FLAGS_ANTENNA_POS;
715
716 /* set the preamble flag if appropriate */
717 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
718 rx_status.flag |= RX_FLAG_SHORTPRE;
719
720 /* Set up the HT phy flags */
721 if (rate_n_flags & RATE_MCS_HT_MSK)
722 rx_status.flag |= RX_FLAG_HT;
723 if (rate_n_flags & RATE_MCS_HT40_MSK)
724 rx_status.flag |= RX_FLAG_40MHZ;
725 if (rate_n_flags & RATE_MCS_SGI_MSK)
726 rx_status.flag |= RX_FLAG_SHORT_GI;
727
728 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
729 &rx_status);
730}
731
732/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
733 * This will be used later in il_hdl_rx() for N_RX_MPDU. */
734void
735il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
736{
737 struct il_rx_pkt *pkt = rxb_addr(rxb);
738 il->_4965.last_phy_res_valid = true;
739 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
740 sizeof(struct il_rx_phy_res));
741}
742
743static int
744il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
745 enum ieee80211_band band, u8 is_active,
746 u8 n_probes, struct il_scan_channel *scan_ch)
747{
748 struct ieee80211_channel *chan;
749 const struct ieee80211_supported_band *sband;
750 const struct il_channel_info *ch_info;
751 u16 passive_dwell = 0;
752 u16 active_dwell = 0;
753 int added, i;
754 u16 channel;
755
756 sband = il_get_hw_mode(il, band);
757 if (!sband)
758 return 0;
759
760 active_dwell = il_get_active_dwell_time(il, band, n_probes);
761 passive_dwell = il_get_passive_dwell_time(il, band, vif);
762
763 if (passive_dwell <= active_dwell)
764 passive_dwell = active_dwell + 1;
765
766 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
767 chan = il->scan_request->channels[i];
768
769 if (chan->band != band)
770 continue;
771
772 channel = chan->hw_value;
773 scan_ch->channel = cpu_to_le16(channel);
774
775 ch_info = il_get_channel_info(il, band, channel);
776 if (!il_is_channel_valid(ch_info)) {
777 D_SCAN("Channel %d is INVALID for this band.\n",
778 channel);
779 continue;
780 }
781
782 if (!is_active || il_is_channel_passive(ch_info) ||
783 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
784 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
785 else
786 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
787
788 if (n_probes)
789 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
790
791 scan_ch->active_dwell = cpu_to_le16(active_dwell);
792 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
793
794 /* Set txpower levels to defaults */
795 scan_ch->dsp_atten = 110;
796
797 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
798 * power level:
799 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
800 */
801 if (band == IEEE80211_BAND_5GHZ)
802 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
803 else
804 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
805
806 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
807 le32_to_cpu(scan_ch->type),
808 (scan_ch->
809 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
810 (scan_ch->
811 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
812 passive_dwell);
813
814 scan_ch++;
815 added++;
816 }
817
818 D_SCAN("total channels to scan %d\n", added);
819 return added;
820}
821
822static inline u32
823il4965_ant_idx_to_flags(u8 ant_idx)
824{
825 return BIT(ant_idx) << RATE_MCS_ANT_POS;
826}
827
828int
829il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
830{
831 struct il_host_cmd cmd = {
832 .id = C_SCAN,
833 .len = sizeof(struct il_scan_cmd),
834 .flags = CMD_SIZE_HUGE,
835 };
836 struct il_scan_cmd *scan;
837 struct il_rxon_context *ctx = &il->ctx;
838 u32 rate_flags = 0;
839 u16 cmd_len;
840 u16 rx_chain = 0;
841 enum ieee80211_band band;
842 u8 n_probes = 0;
843 u8 rx_ant = il->hw_params.valid_rx_ant;
844 u8 rate;
845 bool is_active = false;
846 int chan_mod;
847 u8 active_chains;
848 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
849 int ret;
850
851 lockdep_assert_held(&il->mutex);
852
853 ctx = il_rxon_ctx_from_vif(vif);
854
855 if (!il->scan_cmd) {
856 il->scan_cmd =
857 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
858 GFP_KERNEL);
859 if (!il->scan_cmd) {
860 D_SCAN("fail to allocate memory for scan\n");
861 return -ENOMEM;
862 }
863 }
864 scan = il->scan_cmd;
865 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
866
867 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
868 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
869
870 if (il_is_any_associated(il)) {
871 u16 interval;
872 u32 extra;
873 u32 suspend_time = 100;
874 u32 scan_suspend_time = 100;
875
876 D_INFO("Scanning while associated...\n");
877 interval = vif->bss_conf.beacon_int;
878
879 scan->suspend_time = 0;
880 scan->max_out_time = cpu_to_le32(200 * 1024);
881 if (!interval)
882 interval = suspend_time;
883
884 extra = (suspend_time / interval) << 22;
885 scan_suspend_time =
886 (extra | ((suspend_time % interval) * 1024));
887 scan->suspend_time = cpu_to_le32(scan_suspend_time);
888 D_SCAN("suspend_time 0x%X beacon interval %d\n",
889 scan_suspend_time, interval);
890 }
891
892 if (il->scan_request->n_ssids) {
893 int i, p = 0;
894 D_SCAN("Kicking off active scan\n");
895 for (i = 0; i < il->scan_request->n_ssids; i++) {
896 /* always does wildcard anyway */
897 if (!il->scan_request->ssids[i].ssid_len)
898 continue;
899 scan->direct_scan[p].id = WLAN_EID_SSID;
900 scan->direct_scan[p].len =
901 il->scan_request->ssids[i].ssid_len;
902 memcpy(scan->direct_scan[p].ssid,
903 il->scan_request->ssids[i].ssid,
904 il->scan_request->ssids[i].ssid_len);
905 n_probes++;
906 p++;
907 }
908 is_active = true;
909 } else
910 D_SCAN("Start passive scan.\n");
911
912 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
913 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
914 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
915
916 switch (il->scan_band) {
917 case IEEE80211_BAND_2GHZ:
918 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
919 chan_mod =
920 le32_to_cpu(il->ctx.active.
921 flags & RXON_FLG_CHANNEL_MODE_MSK) >>
922 RXON_FLG_CHANNEL_MODE_POS;
923 if (chan_mod == CHANNEL_MODE_PURE_40) {
924 rate = RATE_6M_PLCP;
925 } else {
926 rate = RATE_1M_PLCP;
927 rate_flags = RATE_MCS_CCK_MSK;
928 }
929 break;
930 case IEEE80211_BAND_5GHZ:
931 rate = RATE_6M_PLCP;
932 break;
933 default:
934 IL_WARN("Invalid scan band\n");
935 return -EIO;
936 }
937
938 /*
939 * If active scanning is requested but a certain channel is
940 * marked passive, we can do active scanning if we detect
941 * transmissions.
942 *
943 * There is an issue with some firmware versions that triggers
944 * a sysassert on a "good CRC threshold" of zero (== disabled),
945 * on a radar channel even though this means that we should NOT
946 * send probes.
947 *
948 * The "good CRC threshold" is the number of frames that we
949 * need to receive during our dwell time on a channel before
950 * sending out probes -- setting this to a huge value will
951 * mean we never reach it, but at the same time work around
952 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
953 * here instead of IL_GOOD_CRC_TH_DISABLED.
954 */
955 scan->good_CRC_th =
956 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
957
958 band = il->scan_band;
959
960 if (il->cfg->scan_rx_antennas[band])
961 rx_ant = il->cfg->scan_rx_antennas[band];
962
963 il->scan_tx_ant[band] =
964 il4965_toggle_tx_ant(il, il->scan_tx_ant[band], scan_tx_antennas);
965 rate_flags |= il4965_ant_idx_to_flags(il->scan_tx_ant[band]);
966 scan->tx_cmd.rate_n_flags =
967 il4965_hw_set_rate_n_flags(rate, rate_flags);
968
969 /* In power save mode use one chain, otherwise use all chains */
970 if (test_bit(S_POWER_PMI, &il->status)) {
971 /* rx_ant has been set to all valid chains previously */
972 active_chains =
973 rx_ant & ((u8) (il->chain_noise_data.active_chains));
974 if (!active_chains)
975 active_chains = rx_ant;
976
977 D_SCAN("chain_noise_data.active_chains: %u\n",
978 il->chain_noise_data.active_chains);
979
980 rx_ant = il4965_first_antenna(active_chains);
981 }
982
983 /* MIMO is not used here, but value is required */
984 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
985 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
986 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
987 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
988 scan->rx_chain = cpu_to_le16(rx_chain);
989
990 cmd_len =
991 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
992 vif->addr, il->scan_request->ie,
993 il->scan_request->ie_len,
994 IL_MAX_SCAN_SIZE - sizeof(*scan));
995 scan->tx_cmd.len = cpu_to_le16(cmd_len);
996
997 scan->filter_flags |=
998 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
999
1000 scan->channel_count =
1001 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1002 (void *)&scan->data[cmd_len]);
1003 if (scan->channel_count == 0) {
1004 D_SCAN("channel count %d\n", scan->channel_count);
1005 return -EIO;
1006 }
1007
1008 cmd.len +=
1009 le16_to_cpu(scan->tx_cmd.len) +
1010 scan->channel_count * sizeof(struct il_scan_channel);
1011 cmd.data = scan;
1012 scan->len = cpu_to_le16(cmd.len);
1013
1014 set_bit(S_SCAN_HW, &il->status);
1015
1016 ret = il_send_cmd_sync(il, &cmd);
1017 if (ret)
1018 clear_bit(S_SCAN_HW, &il->status);
1019
1020 return ret;
1021}
1022
1023int
1024il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1025 bool add)
1026{
1027 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1028
1029 if (add)
1030 return il4965_add_bssid_station(il, vif_priv->ctx,
1031 vif->bss_conf.bssid,
1032 &vif_priv->ibss_bssid_sta_id);
1033 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1034 vif->bss_conf.bssid);
1035}
1036
1037void
1038il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1039{
1040 lockdep_assert_held(&il->sta_lock);
1041
1042 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1043 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1044 else {
1045 D_TX("free more than tfds_in_queue (%u:%d)\n",
1046 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1047 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1048 }
1049}
1050
1051#define IL_TX_QUEUE_MSK 0xfffff
1052
1053static bool
1054il4965_is_single_rx_stream(struct il_priv *il)
1055{
1056 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1057 il->current_ht_config.single_chain_sufficient;
1058}
1059
1060#define IL_NUM_RX_CHAINS_MULTIPLE 3
1061#define IL_NUM_RX_CHAINS_SINGLE 2
1062#define IL_NUM_IDLE_CHAINS_DUAL 2
1063#define IL_NUM_IDLE_CHAINS_SINGLE 1
1064
1065/*
1066 * Determine how many receiver/antenna chains to use.
1067 *
1068 * More provides better reception via diversity. Fewer saves power
1069 * at the expense of throughput, but only when not in powersave to
1070 * start with.
1071 *
1072 * MIMO (dual stream) requires at least 2, but works better with 3.
1073 * This does not determine *which* chains to use, just how many.
1074 */
1075static int
1076il4965_get_active_rx_chain_count(struct il_priv *il)
1077{
1078 /* # of Rx chains to use when expecting MIMO. */
1079 if (il4965_is_single_rx_stream(il))
1080 return IL_NUM_RX_CHAINS_SINGLE;
1081 else
1082 return IL_NUM_RX_CHAINS_MULTIPLE;
1083}
1084
1085/*
1086 * When we are in power saving mode, unless device support spatial
1087 * multiplexing power save, use the active count for rx chain count.
1088 */
1089static int
1090il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1091{
1092 /* # Rx chains when idling, depending on SMPS mode */
1093 switch (il->current_ht_config.smps) {
1094 case IEEE80211_SMPS_STATIC:
1095 case IEEE80211_SMPS_DYNAMIC:
1096 return IL_NUM_IDLE_CHAINS_SINGLE;
1097 case IEEE80211_SMPS_OFF:
1098 return active_cnt;
1099 default:
1100 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1101 return active_cnt;
1102 }
1103}
1104
1105/* up to 4 chains */
1106static u8
1107il4965_count_chain_bitmap(u32 chain_bitmap)
1108{
1109 u8 res;
1110 res = (chain_bitmap & BIT(0)) >> 0;
1111 res += (chain_bitmap & BIT(1)) >> 1;
1112 res += (chain_bitmap & BIT(2)) >> 2;
1113 res += (chain_bitmap & BIT(3)) >> 3;
1114 return res;
1115}
1116
1117/**
1118 * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1119 *
1120 * Selects how many and which Rx receivers/antennas/chains to use.
1121 * This should not be used for scan command ... it puts data in wrong place.
1122 */
1123void
1124il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
1125{
1126 bool is_single = il4965_is_single_rx_stream(il);
1127 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1128 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1129 u32 active_chains;
1130 u16 rx_chain;
1131
1132 /* Tell uCode which antennas are actually connected.
1133 * Before first association, we assume all antennas are connected.
1134 * Just after first association, il4965_chain_noise_calibration()
1135 * checks which antennas actually *are* connected. */
1136 if (il->chain_noise_data.active_chains)
1137 active_chains = il->chain_noise_data.active_chains;
1138 else
1139 active_chains = il->hw_params.valid_rx_ant;
1140
1141 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1142
1143 /* How many receivers should we use? */
1144 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1145 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1146
1147 /* correct rx chain count according hw settings
1148 * and chain noise calibration
1149 */
1150 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1151 if (valid_rx_cnt < active_rx_cnt)
1152 active_rx_cnt = valid_rx_cnt;
1153
1154 if (valid_rx_cnt < idle_rx_cnt)
1155 idle_rx_cnt = valid_rx_cnt;
1156
1157 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1158 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1159
1160 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1161
1162 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1163 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1164 else
1165 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1166
1167 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
1168 active_rx_cnt, idle_rx_cnt);
1169
1170 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1171 active_rx_cnt < idle_rx_cnt);
1172}
1173
1174u8
1175il4965_toggle_tx_ant(struct il_priv *il, u8 ant, u8 valid)
1176{
1177 int i;
1178 u8 ind = ant;
1179
1180 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1181 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1182 if (valid & BIT(ind))
1183 return ind;
1184 }
1185 return ant;
1186}
1187
1188static const char *
1189il4965_get_fh_string(int cmd)
1190{
1191 switch (cmd) {
1192 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1193 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1194 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1195 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1196 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1197 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1198 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1199 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1200 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1201 default:
1202 return "UNKNOWN";
1203 }
1204}
1205
1206int
1207il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1208{
1209 int i;
1210#ifdef CONFIG_IWLEGACY_DEBUG
1211 int pos = 0;
1212 size_t bufsz = 0;
1213#endif
1214 static const u32 fh_tbl[] = {
1215 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1216 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1217 FH49_RSCSR_CHNL0_WPTR,
1218 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1219 FH49_MEM_RSSR_SHARED_CTRL_REG,
1220 FH49_MEM_RSSR_RX_STATUS_REG,
1221 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1222 FH49_TSSR_TX_STATUS_REG,
1223 FH49_TSSR_TX_ERROR_REG
1224 };
1225#ifdef CONFIG_IWLEGACY_DEBUG
1226 if (display) {
1227 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1228 *buf = kmalloc(bufsz, GFP_KERNEL);
1229 if (!*buf)
1230 return -ENOMEM;
1231 pos +=
1232 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1233 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1234 pos +=
1235 scnprintf(*buf + pos, bufsz - pos,
1236 " %34s: 0X%08x\n",
1237 il4965_get_fh_string(fh_tbl[i]),
1238 il_rd(il, fh_tbl[i]));
1239 }
1240 return pos;
1241 }
1242#endif
1243 IL_ERR("FH register values:\n");
1244 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1245 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1246 il_rd(il, fh_tbl[i]));
1247 }
1248 return 0;
1249}
1250
1251void
1252il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1253{
1254 struct il_rx_pkt *pkt = rxb_addr(rxb);
1255 struct il_missed_beacon_notif *missed_beacon;
1256
1257 missed_beacon = &pkt->u.missed_beacon;
1258 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1259 il->missed_beacon_threshold) {
1260 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1261 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1262 le32_to_cpu(missed_beacon->total_missed_becons),
1263 le32_to_cpu(missed_beacon->num_recvd_beacons),
1264 le32_to_cpu(missed_beacon->num_expected_beacons));
1265 if (!test_bit(S_SCANNING, &il->status))
1266 il4965_init_sensitivity(il);
1267 }
1268}
1269
1270/* Calculate noise level, based on measurements during network silence just
1271 * before arriving beacon. This measurement can be done only if we know
1272 * exactly when to expect beacons, therefore only when we're associated. */
1273static void
1274il4965_rx_calc_noise(struct il_priv *il)
1275{
1276 struct stats_rx_non_phy *rx_info;
1277 int num_active_rx = 0;
1278 int total_silence = 0;
1279 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1280 int last_rx_noise;
1281
1282 rx_info = &(il->_4965.stats.rx.general);
1283 bcn_silence_a =
1284 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1285 bcn_silence_b =
1286 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1287 bcn_silence_c =
1288 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1289
1290 if (bcn_silence_a) {
1291 total_silence += bcn_silence_a;
1292 num_active_rx++;
1293 }
1294 if (bcn_silence_b) {
1295 total_silence += bcn_silence_b;
1296 num_active_rx++;
1297 }
1298 if (bcn_silence_c) {
1299 total_silence += bcn_silence_c;
1300 num_active_rx++;
1301 }
1302
1303 /* Average among active antennas */
1304 if (num_active_rx)
1305 last_rx_noise = (total_silence / num_active_rx) - 107;
1306 else
1307 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1308
1309 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1310 bcn_silence_b, bcn_silence_c, last_rx_noise);
1311}
1312
1313#ifdef CONFIG_IWLEGACY_DEBUGFS
1314/*
1315 * based on the assumption of all stats counter are in DWORD
1316 * FIXME: This function is for debugging, do not deal with
1317 * the case of counters roll-over.
1318 */
1319static void
1320il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1321{
1322 int i, size;
1323 __le32 *prev_stats;
1324 u32 *accum_stats;
1325 u32 *delta, *max_delta;
1326 struct stats_general_common *general, *accum_general;
1327 struct stats_tx *tx, *accum_tx;
1328
1329 prev_stats = (__le32 *) &il->_4965.stats;
1330 accum_stats = (u32 *) &il->_4965.accum_stats;
1331 size = sizeof(struct il_notif_stats);
1332 general = &il->_4965.stats.general.common;
1333 accum_general = &il->_4965.accum_stats.general.common;
1334 tx = &il->_4965.stats.tx;
1335 accum_tx = &il->_4965.accum_stats.tx;
1336 delta = (u32 *) &il->_4965.delta_stats;
1337 max_delta = (u32 *) &il->_4965.max_delta;
1338
1339 for (i = sizeof(__le32); i < size;
1340 i +=
1341 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1342 accum_stats++) {
1343 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1344 *delta =
1345 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1346 *accum_stats += *delta;
1347 if (*delta > *max_delta)
1348 *max_delta = *delta;
1349 }
1350 }
1351
1352 /* reset accumulative stats for "no-counter" type stats */
1353 accum_general->temperature = general->temperature;
1354 accum_general->ttl_timestamp = general->ttl_timestamp;
1355}
1356#endif
1357
1358#define REG_RECALIB_PERIOD (60)
1359
1360void
1361il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1362{
1363 int change;
1364 struct il_rx_pkt *pkt = rxb_addr(rxb);
1365
1366 D_RX("Statistics notification received (%d vs %d).\n",
1367 (int)sizeof(struct il_notif_stats),
1368 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1369
1370 change =
1371 ((il->_4965.stats.general.common.temperature !=
1372 pkt->u.stats.general.common.temperature) ||
1373 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1374 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1375#ifdef CONFIG_IWLEGACY_DEBUGFS
1376 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1377#endif
1378
1379 /* TODO: reading some of stats is unneeded */
1380 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1381
1382 set_bit(S_STATS, &il->status);
1383
1384 /* Reschedule the stats timer to occur in
1385 * REG_RECALIB_PERIOD seconds to ensure we get a
1386 * thermal update even if the uCode doesn't give
1387 * us one */
1388 mod_timer(&il->stats_periodic,
1389 jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
1390
1391 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1392 (pkt->hdr.cmd == N_STATS)) {
1393 il4965_rx_calc_noise(il);
1394 queue_work(il->workqueue, &il->run_time_calib_work);
1395 }
1396 if (il->cfg->ops->lib->temp_ops.temperature && change)
1397 il->cfg->ops->lib->temp_ops.temperature(il);
1398}
1399
1400void
1401il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1402{
1403 struct il_rx_pkt *pkt = rxb_addr(rxb);
1404
1405 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1406#ifdef CONFIG_IWLEGACY_DEBUGFS
1407 memset(&il->_4965.accum_stats, 0,
1408 sizeof(struct il_notif_stats));
1409 memset(&il->_4965.delta_stats, 0,
1410 sizeof(struct il_notif_stats));
1411 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1412#endif
1413 D_RX("Statistics have been cleared\n");
1414 }
1415 il4965_hdl_stats(il, rxb);
1416}
1417
1418
1419/*
1420 * mac80211 queues, ACs, hardware queues, FIFOs.
1421 *
1422 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
1423 *
1424 * Mac80211 uses the following numbers, which we get as from it
1425 * by way of skb_get_queue_mapping(skb):
1426 *
1427 * VO 0
1428 * VI 1
1429 * BE 2
1430 * BK 3
1431 *
1432 *
1433 * Regular (not A-MPDU) frames are put into hardware queues corresponding
1434 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
1435 * own queue per aggregation session (RA/TID combination), such queues are
1436 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
1437 * order to map frames to the right queue, we also need an AC->hw queue
1438 * mapping. This is implemented here.
1439 *
1440 * Due to the way hw queues are set up (by the hw specific modules like
1441 * 4965.c), the AC->hw queue mapping is the identity
1442 * mapping.
1443 */
1444
1445static const u8 tid_to_ac[] = {
1446 IEEE80211_AC_BE,
1447 IEEE80211_AC_BK,
1448 IEEE80211_AC_BK,
1449 IEEE80211_AC_BE,
1450 IEEE80211_AC_VI,
1451 IEEE80211_AC_VI,
1452 IEEE80211_AC_VO,
1453 IEEE80211_AC_VO
1454};
1455
1456static inline int
1457il4965_get_ac_from_tid(u16 tid)
1458{
1459 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1460 return tid_to_ac[tid];
1461
1462 /* no support for TIDs 8-15 yet */
1463 return -EINVAL;
1464}
1465
1466static inline int
1467il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
1468{
1469 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1470 return ctx->ac_to_fifo[tid_to_ac[tid]];
1471
1472 /* no support for TIDs 8-15 yet */
1473 return -EINVAL;
1474}
1475
1476/*
1477 * handle build C_TX command notification.
1478 */
1479static void
1480il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1481 struct il_tx_cmd *tx_cmd,
1482 struct ieee80211_tx_info *info,
1483 struct ieee80211_hdr *hdr, u8 std_id)
1484{
1485 __le16 fc = hdr->frame_control;
1486 __le32 tx_flags = tx_cmd->tx_flags;
1487
1488 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1489 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1490 tx_flags |= TX_CMD_FLG_ACK_MSK;
1491 if (ieee80211_is_mgmt(fc))
1492 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1493 if (ieee80211_is_probe_resp(fc) &&
1494 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1495 tx_flags |= TX_CMD_FLG_TSF_MSK;
1496 } else {
1497 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1498 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1499 }
1500
1501 if (ieee80211_is_back_req(fc))
1502 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1503
1504 tx_cmd->sta_id = std_id;
1505 if (ieee80211_has_morefrags(fc))
1506 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1507
1508 if (ieee80211_is_data_qos(fc)) {
1509 u8 *qc = ieee80211_get_qos_ctl(hdr);
1510 tx_cmd->tid_tspec = qc[0] & 0xf;
1511 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1512 } else {
1513 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1514 }
1515
1516 il_tx_cmd_protection(il, info, fc, &tx_flags);
1517
1518 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1519 if (ieee80211_is_mgmt(fc)) {
1520 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1521 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1522 else
1523 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1524 } else {
1525 tx_cmd->timeout.pm_frame_timeout = 0;
1526 }
1527
1528 tx_cmd->driver_txop = 0;
1529 tx_cmd->tx_flags = tx_flags;
1530 tx_cmd->next_frame_len = 0;
1531}
1532
1533#define RTS_DFAULT_RETRY_LIMIT 60
1534
1535static void
1536il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
1537 struct ieee80211_tx_info *info, __le16 fc)
1538{
1539 u32 rate_flags;
1540 int rate_idx;
1541 u8 rts_retry_limit;
1542 u8 data_retry_limit;
1543 u8 rate_plcp;
1544
1545 /* Set retry limit on DATA packets and Probe Responses */
1546 if (ieee80211_is_probe_resp(fc))
1547 data_retry_limit = 3;
1548 else
1549 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1550 tx_cmd->data_retry_limit = data_retry_limit;
1551
1552 /* Set retry limit on RTS packets */
1553 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
1554 if (data_retry_limit < rts_retry_limit)
1555 rts_retry_limit = data_retry_limit;
1556 tx_cmd->rts_retry_limit = rts_retry_limit;
1557
1558 /* DATA packets will use the uCode station table for rate/antenna
1559 * selection */
1560 if (ieee80211_is_data(fc)) {
1561 tx_cmd->initial_rate_idx = 0;
1562 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1563 return;
1564 }
1565
1566 /**
1567 * If the current TX rate stored in mac80211 has the MCS bit set, it's
1568 * not really a TX rate. Thus, we use the lowest supported rate for
1569 * this band. Also use the lowest supported rate if the stored rate
1570 * idx is invalid.
1571 */
1572 rate_idx = info->control.rates[0].idx;
1573 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1574 || rate_idx > RATE_COUNT_LEGACY)
1575 rate_idx =
1576 rate_lowest_index(&il->bands[info->band],
1577 info->control.sta);
1578 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1579 if (info->band == IEEE80211_BAND_5GHZ)
1580 rate_idx += IL_FIRST_OFDM_RATE;
1581 /* Get PLCP rate for tx_cmd->rate_n_flags */
1582 rate_plcp = il_rates[rate_idx].plcp;
1583 /* Zero out flags for this packet */
1584 rate_flags = 0;
1585
1586 /* Set CCK flag as needed */
1587 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1588 rate_flags |= RATE_MCS_CCK_MSK;
1589
1590 /* Set up antennas */
1591 il->mgmt_tx_ant =
1592 il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
1593 il->hw_params.valid_tx_ant);
1594
1595 rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant);
1596
1597 /* Set the rate in the TX cmd */
1598 tx_cmd->rate_n_flags =
1599 il4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
1600}
1601
1602static void
1603il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1604 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1605 int sta_id)
1606{
1607 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1608
1609 switch (keyconf->cipher) {
1610 case WLAN_CIPHER_SUITE_CCMP:
1611 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1612 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1613 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1614 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1615 D_TX("tx_cmd with AES hwcrypto\n");
1616 break;
1617
1618 case WLAN_CIPHER_SUITE_TKIP:
1619 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1620 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1621 D_TX("tx_cmd with tkip hwcrypto\n");
1622 break;
1623
1624 case WLAN_CIPHER_SUITE_WEP104:
1625 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1626 /* fall through */
1627 case WLAN_CIPHER_SUITE_WEP40:
1628 tx_cmd->sec_ctl |=
1629 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1630 TX_CMD_SEC_SHIFT);
1631
1632 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1633
1634 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1635 keyconf->keyidx);
1636 break;
1637
1638 default:
1639 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1640 break;
1641 }
1642}
1643
1644/*
1645 * start C_TX command process
1646 */
1647int
1648il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1649{
1650 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1651 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1652 struct ieee80211_sta *sta = info->control.sta;
1653 struct il_station_priv *sta_priv = NULL;
1654 struct il_tx_queue *txq;
1655 struct il_queue *q;
1656 struct il_device_cmd *out_cmd;
1657 struct il_cmd_meta *out_meta;
1658 struct il_tx_cmd *tx_cmd;
1659 struct il_rxon_context *ctx = &il->ctx;
1660 int txq_id;
1661 dma_addr_t phys_addr;
1662 dma_addr_t txcmd_phys;
1663 dma_addr_t scratch_phys;
1664 u16 len, firstlen, secondlen;
1665 u16 seq_number = 0;
1666 __le16 fc;
1667 u8 hdr_len;
1668 u8 sta_id;
1669 u8 wait_write_ptr = 0;
1670 u8 tid = 0;
1671 u8 *qc = NULL;
1672 unsigned long flags;
1673 bool is_agg = false;
1674
1675 if (info->control.vif)
1676 ctx = il_rxon_ctx_from_vif(info->control.vif);
1677
1678 spin_lock_irqsave(&il->lock, flags);
1679 if (il_is_rfkill(il)) {
1680 D_DROP("Dropping - RF KILL\n");
1681 goto drop_unlock;
1682 }
1683
1684 fc = hdr->frame_control;
1685
1686#ifdef CONFIG_IWLEGACY_DEBUG
1687 if (ieee80211_is_auth(fc))
1688 D_TX("Sending AUTH frame\n");
1689 else if (ieee80211_is_assoc_req(fc))
1690 D_TX("Sending ASSOC frame\n");
1691 else if (ieee80211_is_reassoc_req(fc))
1692 D_TX("Sending REASSOC frame\n");
1693#endif
1694
1695 hdr_len = ieee80211_hdrlen(fc);
1696
1697 /* For management frames use broadcast id to do not break aggregation */
1698 if (!ieee80211_is_data(fc))
1699 sta_id = ctx->bcast_sta_id;
1700 else {
1701 /* Find idx into station table for destination station */
1702 sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
1703
1704 if (sta_id == IL_INVALID_STATION) {
1705 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1706 goto drop_unlock;
1707 }
1708 }
1709
1710 D_TX("station Id %d\n", sta_id);
1711
1712 if (sta)
1713 sta_priv = (void *)sta->drv_priv;
1714
1715 if (sta_priv && sta_priv->asleep &&
1716 (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
1717 /*
1718 * This sends an asynchronous command to the device,
1719 * but we can rely on it being processed before the
1720 * next frame is processed -- and the next frame to
1721 * this station is the one that will consume this
1722 * counter.
1723 * For now set the counter to just 1 since we do not
1724 * support uAPSD yet.
1725 */
1726 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1727 }
1728
1729 /*
1730 * Send this frame after DTIM -- there's a special queue
1731 * reserved for this for contexts that support AP mode.
1732 */
1733 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1734 txq_id = ctx->mcast_queue;
1735 /*
1736 * The microcode will clear the more data
1737 * bit in the last frame it transmits.
1738 */
1739 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1740 } else
1741 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
1742
1743 /* irqs already disabled/saved above when locking il->lock */
1744 spin_lock(&il->sta_lock);
1745
1746 if (ieee80211_is_data_qos(fc)) {
1747 qc = ieee80211_get_qos_ctl(hdr);
1748 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1749 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1750 spin_unlock(&il->sta_lock);
1751 goto drop_unlock;
1752 }
1753 seq_number = il->stations[sta_id].tid[tid].seq_number;
1754 seq_number &= IEEE80211_SCTL_SEQ;
1755 hdr->seq_ctrl =
1756 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1757 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1758 seq_number += 0x10;
1759 /* aggregation is on for this <sta,tid> */
1760 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1761 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1762 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1763 is_agg = true;
1764 }
1765 }
1766
1767 txq = &il->txq[txq_id];
1768 q = &txq->q;
1769
1770 if (unlikely(il_queue_space(q) < q->high_mark)) {
1771 spin_unlock(&il->sta_lock);
1772 goto drop_unlock;
1773 }
1774
1775 if (ieee80211_is_data_qos(fc)) {
1776 il->stations[sta_id].tid[tid].tfds_in_queue++;
1777 if (!ieee80211_has_morefrags(fc))
1778 il->stations[sta_id].tid[tid].seq_number = seq_number;
1779 }
1780
1781 spin_unlock(&il->sta_lock);
1782
1783 /* Set up driver data for this TFD */
1784 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
1785 txq->txb[q->write_ptr].skb = skb;
1786 txq->txb[q->write_ptr].ctx = ctx;
1787
1788 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1789 out_cmd = txq->cmd[q->write_ptr];
1790 out_meta = &txq->meta[q->write_ptr];
1791 tx_cmd = &out_cmd->cmd.tx;
1792 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1793 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1794
1795 /*
1796 * Set up the Tx-command (not MAC!) header.
1797 * Store the chosen Tx queue and TFD idx within the sequence field;
1798 * after Tx, uCode's Tx response will return this value so driver can
1799 * locate the frame within the tx queue and do post-tx processing.
1800 */
1801 out_cmd->hdr.cmd = C_TX;
1802 out_cmd->hdr.sequence =
1803 cpu_to_le16((u16)
1804 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1805
1806 /* Copy MAC header from skb into command buffer */
1807 memcpy(tx_cmd->hdr, hdr, hdr_len);
1808
1809 /* Total # bytes to be transmitted */
1810 len = (u16) skb->len;
1811 tx_cmd->len = cpu_to_le16(len);
1812
1813 if (info->control.hw_key)
1814 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1815
1816 /* TODO need this for burst mode later on */
1817 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1818 il_dbg_log_tx_data_frame(il, len, hdr);
1819
1820 il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
1821
1822 il_update_stats(il, true, fc, len);
1823 /*
1824 * Use the first empty entry in this queue's command buffer array
1825 * to contain the Tx command and MAC header concatenated together
1826 * (payload data will be in another buffer).
1827 * Size of this varies, due to varying MAC header length.
1828 * If end is not dword aligned, we'll have 2 extra bytes at the end
1829 * of the MAC header (device reads on dword boundaries).
1830 * We'll tell device about this padding later.
1831 */
1832 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1833 firstlen = (len + 3) & ~3;
1834
1835 /* Tell NIC about any 2-byte padding after MAC header */
1836 if (firstlen != len)
1837 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1838
1839 /* Physical address of this Tx command's header (not MAC header!),
1840 * within command buffer array. */
1841 txcmd_phys =
1842 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1843 PCI_DMA_BIDIRECTIONAL);
1844 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1845 dma_unmap_len_set(out_meta, len, firstlen);
1846 /* Add buffer containing Tx command and MAC(!) header to TFD's
1847 * first entry */
1848 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
1849 1, 0);
1850
1851 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1852 txq->need_update = 1;
1853 } else {
1854 wait_write_ptr = 1;
1855 txq->need_update = 0;
1856 }
1857
1858 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1859 * if any (802.11 null frames have no payload). */
1860 secondlen = skb->len - hdr_len;
1861 if (secondlen > 0) {
1862 phys_addr =
1863 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1864 PCI_DMA_TODEVICE);
1865 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
1866 secondlen, 0, 0);
1867 }
1868
1869 scratch_phys =
1870 txcmd_phys + sizeof(struct il_cmd_header) +
1871 offsetof(struct il_tx_cmd, scratch);
1872
1873 /* take back ownership of DMA buffer to enable update */
1874 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1875 PCI_DMA_BIDIRECTIONAL);
1876 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1877 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1878
1879 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1880 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1881 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1882 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1883
1884 /* Set up entry for this TFD in Tx byte-count array */
1885 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1886 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
1887 le16_to_cpu(tx_cmd->
1888 len));
1889
1890 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1891 PCI_DMA_BIDIRECTIONAL);
1892
1893 /* Tell device the write idx *just past* this latest filled TFD */
1894 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1895 il_txq_update_write_ptr(il, txq);
1896 spin_unlock_irqrestore(&il->lock, flags);
1897
1898 /*
1899 * At this point the frame is "transmitted" successfully
1900 * and we will get a TX status notification eventually,
1901 * regardless of the value of ret. "ret" only indicates
1902 * whether or not we should update the write pointer.
1903 */
1904
1905 /*
1906 * Avoid atomic ops if it isn't an associated client.
1907 * Also, if this is a packet for aggregation, don't
1908 * increase the counter because the ucode will stop
1909 * aggregation queues when their respective station
1910 * goes to sleep.
1911 */
1912 if (sta_priv && sta_priv->client && !is_agg)
1913 atomic_inc(&sta_priv->pending_frames);
1914
1915 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1916 if (wait_write_ptr) {
1917 spin_lock_irqsave(&il->lock, flags);
1918 txq->need_update = 1;
1919 il_txq_update_write_ptr(il, txq);
1920 spin_unlock_irqrestore(&il->lock, flags);
1921 } else {
1922 il_stop_queue(il, txq);
1923 }
1924 }
1925
1926 return 0;
1927
1928drop_unlock:
1929 spin_unlock_irqrestore(&il->lock, flags);
1930 return -1;
1931}
1932
1933static inline int
1934il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1935{
1936 ptr->addr =
1937 dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
1938 if (!ptr->addr)
1939 return -ENOMEM;
1940 ptr->size = size;
1941 return 0;
1942}
1943
1944static inline void
1945il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1946{
1947 if (unlikely(!ptr->addr))
1948 return;
1949
1950 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1951 memset(ptr, 0, sizeof(*ptr));
1952}
1953
1954/**
1955 * il4965_hw_txq_ctx_free - Free TXQ Context
1956 *
1957 * Destroy all TX DMA queues and structures
1958 */
1959void
1960il4965_hw_txq_ctx_free(struct il_priv *il)
1961{
1962 int txq_id;
1963
1964 /* Tx queues */
1965 if (il->txq) {
1966 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1967 if (txq_id == il->cmd_queue)
1968 il_cmd_queue_free(il);
1969 else
1970 il_tx_queue_free(il, txq_id);
1971 }
1972 il4965_free_dma_ptr(il, &il->kw);
1973
1974 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1975
1976 /* free tx queue structure */
1977 il_txq_mem(il);
1978}
1979
1980/**
1981 * il4965_txq_ctx_alloc - allocate TX queue context
1982 * Allocate all Tx DMA structures and initialize them
1983 *
1984 * @param il
1985 * @return error code
1986 */
1987int
1988il4965_txq_ctx_alloc(struct il_priv *il)
1989{
1990 int ret;
1991 int txq_id, slots_num;
1992 unsigned long flags;
1993
1994 /* Free all tx/cmd queues and keep-warm buffer */
1995 il4965_hw_txq_ctx_free(il);
1996
1997 ret =
1998 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1999 il->hw_params.scd_bc_tbls_size);
2000 if (ret) {
2001 IL_ERR("Scheduler BC Table allocation failed\n");
2002 goto error_bc_tbls;
2003 }
2004 /* Alloc keep-warm buffer */
2005 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
2006 if (ret) {
2007 IL_ERR("Keep Warm allocation failed\n");
2008 goto error_kw;
2009 }
2010
2011 /* allocate tx queue structure */
2012 ret = il_alloc_txq_mem(il);
2013 if (ret)
2014 goto error;
2015
2016 spin_lock_irqsave(&il->lock, flags);
2017
2018 /* Turn off all Tx DMA fifos */
2019 il4965_txq_set_sched(il, 0);
2020
2021 /* Tell NIC where to find the "keep warm" buffer */
2022 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2023
2024 spin_unlock_irqrestore(&il->lock, flags);
2025
2026 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
2027 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2028 slots_num =
2029 (txq_id ==
2030 il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2031 ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
2032 if (ret) {
2033 IL_ERR("Tx %d queue init failed\n", txq_id);
2034 goto error;
2035 }
2036 }
2037
2038 return ret;
2039
2040error:
2041 il4965_hw_txq_ctx_free(il);
2042 il4965_free_dma_ptr(il, &il->kw);
2043error_kw:
2044 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2045error_bc_tbls:
2046 return ret;
2047}
2048
2049void
2050il4965_txq_ctx_reset(struct il_priv *il)
2051{
2052 int txq_id, slots_num;
2053 unsigned long flags;
2054
2055 spin_lock_irqsave(&il->lock, flags);
2056
2057 /* Turn off all Tx DMA fifos */
2058 il4965_txq_set_sched(il, 0);
2059
2060 /* Tell NIC where to find the "keep warm" buffer */
2061 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2062
2063 spin_unlock_irqrestore(&il->lock, flags);
2064
2065 /* Alloc and init all Tx queues, including the command queue (#4) */
2066 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2067 slots_num =
2068 txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2069 il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
2070 }
2071}
2072
2073/**
2074 * il4965_txq_ctx_stop - Stop all Tx DMA channels
2075 */
2076void
2077il4965_txq_ctx_stop(struct il_priv *il)
2078{
2079 int ch, txq_id;
2080 unsigned long flags;
2081
2082 /* Turn off all Tx DMA fifos */
2083 spin_lock_irqsave(&il->lock, flags);
2084
2085 il4965_txq_set_sched(il, 0);
2086
2087 /* Stop each Tx DMA channel, and wait for it to be idle */
2088 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2089 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2090 if (il_poll_bit
2091 (il, FH49_TSSR_TX_STATUS_REG,
2092 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
2093 IL_ERR("Failing on timeout while stopping"
2094 " DMA channel %d [0x%08x]", ch,
2095 il_rd(il, FH49_TSSR_TX_STATUS_REG));
2096 }
2097 spin_unlock_irqrestore(&il->lock, flags);
2098
2099 if (!il->txq)
2100 return;
2101
2102 /* Unmap DMA from host system and free skb's */
2103 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2104 if (txq_id == il->cmd_queue)
2105 il_cmd_queue_unmap(il);
2106 else
2107 il_tx_queue_unmap(il, txq_id);
2108}
2109
2110/*
2111 * Find first available (lowest unused) Tx Queue, mark it "active".
2112 * Called only when finding queue for aggregation.
2113 * Should never return anything < 7, because they should already
2114 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
2115 */
2116static int
2117il4965_txq_ctx_activate_free(struct il_priv *il)
2118{
2119 int txq_id;
2120
2121 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2122 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2123 return txq_id;
2124 return -1;
2125}
2126
2127/**
2128 * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
2129 */
2130static void
2131il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2132{
2133 /* Simply stop the queue, but don't change any configuration;
2134 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
2135 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2136 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2137 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2138}
2139
2140/**
2141 * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
2142 */
2143static int
2144il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2145{
2146 u32 tbl_dw_addr;
2147 u32 tbl_dw;
2148 u16 scd_q2ratid;
2149
2150 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2151
2152 tbl_dw_addr =
2153 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2154
2155 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2156
2157 if (txq_id & 0x1)
2158 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2159 else
2160 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2161
2162 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2163
2164 return 0;
2165}
2166
2167/**
2168 * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
2169 *
2170 * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
2171 * i.e. it must be one of the higher queues used for aggregation
2172 */
2173static int
2174il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2175 int tid, u16 ssn_idx)
2176{
2177 unsigned long flags;
2178 u16 ra_tid;
2179 int ret;
2180
2181 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2182 (IL49_FIRST_AMPDU_QUEUE +
2183 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2184 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2185 txq_id, IL49_FIRST_AMPDU_QUEUE,
2186 IL49_FIRST_AMPDU_QUEUE +
2187 il->cfg->base_params->num_of_ampdu_queues - 1);
2188 return -EINVAL;
2189 }
2190
2191 ra_tid = BUILD_RAxTID(sta_id, tid);
2192
2193 /* Modify device's station table to Tx this TID */
2194 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2195 if (ret)
2196 return ret;
2197
2198 spin_lock_irqsave(&il->lock, flags);
2199
2200 /* Stop this Tx queue before configuring it */
2201 il4965_tx_queue_stop_scheduler(il, txq_id);
2202
2203 /* Map receiver-address / traffic-ID to this queue */
2204 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2205
2206 /* Set this queue as a chain-building queue */
2207 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2208
2209 /* Place first TFD at idx corresponding to start sequence number.
2210 * Assumes that ssn_idx is valid (!= 0xFFF) */
2211 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2212 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2213 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2214
2215 /* Set up Tx win size and frame limit for this queue */
2216 il_write_targ_mem(il,
2217 il->scd_base_addr +
2218 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2219 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2220 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2221
2222 il_write_targ_mem(il,
2223 il->scd_base_addr +
2224 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2225 (SCD_FRAME_LIMIT <<
2226 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2227 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2228
2229 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2230
2231 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
2232 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2233
2234 spin_unlock_irqrestore(&il->lock, flags);
2235
2236 return 0;
2237}
2238
2239int
2240il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2241 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2242{
2243 int sta_id;
2244 int tx_fifo;
2245 int txq_id;
2246 int ret;
2247 unsigned long flags;
2248 struct il_tid_data *tid_data;
2249
2250 tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2251 if (unlikely(tx_fifo < 0))
2252 return tx_fifo;
2253
2254 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2255
2256 sta_id = il_sta_id(sta);
2257 if (sta_id == IL_INVALID_STATION) {
2258 IL_ERR("Start AGG on invalid station\n");
2259 return -ENXIO;
2260 }
2261 if (unlikely(tid >= MAX_TID_COUNT))
2262 return -EINVAL;
2263
2264 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2265 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2266 return -ENXIO;
2267 }
2268
2269 txq_id = il4965_txq_ctx_activate_free(il);
2270 if (txq_id == -1) {
2271 IL_ERR("No free aggregation queue available\n");
2272 return -ENXIO;
2273 }
2274
2275 spin_lock_irqsave(&il->sta_lock, flags);
2276 tid_data = &il->stations[sta_id].tid[tid];
2277 *ssn = SEQ_TO_SN(tid_data->seq_number);
2278 tid_data->agg.txq_id = txq_id;
2279 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2280 spin_unlock_irqrestore(&il->sta_lock, flags);
2281
2282 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2283 if (ret)
2284 return ret;
2285
2286 spin_lock_irqsave(&il->sta_lock, flags);
2287 tid_data = &il->stations[sta_id].tid[tid];
2288 if (tid_data->tfds_in_queue == 0) {
2289 D_HT("HW queue is empty\n");
2290 tid_data->agg.state = IL_AGG_ON;
2291 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2292 } else {
2293 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2294 tid_data->tfds_in_queue);
2295 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2296 }
2297 spin_unlock_irqrestore(&il->sta_lock, flags);
2298 return ret;
2299}
2300
2301/**
2302 * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
2303 * il->lock must be held by the caller
2304 */
2305static int
2306il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2307{
2308 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2309 (IL49_FIRST_AMPDU_QUEUE +
2310 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2311 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2312 txq_id, IL49_FIRST_AMPDU_QUEUE,
2313 IL49_FIRST_AMPDU_QUEUE +
2314 il->cfg->base_params->num_of_ampdu_queues - 1);
2315 return -EINVAL;
2316 }
2317
2318 il4965_tx_queue_stop_scheduler(il, txq_id);
2319
2320 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2321
2322 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2323 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2324 /* supposes that ssn_idx is valid (!= 0xFFF) */
2325 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2326
2327 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2328 il_txq_ctx_deactivate(il, txq_id);
2329 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2330
2331 return 0;
2332}
2333
2334int
2335il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2336 struct ieee80211_sta *sta, u16 tid)
2337{
2338 int tx_fifo_id, txq_id, sta_id, ssn;
2339 struct il_tid_data *tid_data;
2340 int write_ptr, read_ptr;
2341 unsigned long flags;
2342
2343 tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2344 if (unlikely(tx_fifo_id < 0))
2345 return tx_fifo_id;
2346
2347 sta_id = il_sta_id(sta);
2348
2349 if (sta_id == IL_INVALID_STATION) {
2350 IL_ERR("Invalid station for AGG tid %d\n", tid);
2351 return -ENXIO;
2352 }
2353
2354 spin_lock_irqsave(&il->sta_lock, flags);
2355
2356 tid_data = &il->stations[sta_id].tid[tid];
2357 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2358 txq_id = tid_data->agg.txq_id;
2359
2360 switch (il->stations[sta_id].tid[tid].agg.state) {
2361 case IL_EMPTYING_HW_QUEUE_ADDBA:
2362 /*
2363 * This can happen if the peer stops aggregation
2364 * again before we've had a chance to drain the
2365 * queue we selected previously, i.e. before the
2366 * session was really started completely.
2367 */
2368 D_HT("AGG stop before setup done\n");
2369 goto turn_off;
2370 case IL_AGG_ON:
2371 break;
2372 default:
2373 IL_WARN("Stopping AGG while state not ON or starting\n");
2374 }
2375
2376 write_ptr = il->txq[txq_id].q.write_ptr;
2377 read_ptr = il->txq[txq_id].q.read_ptr;
2378
2379 /* The queue is not empty */
2380 if (write_ptr != read_ptr) {
2381 D_HT("Stopping a non empty AGG HW QUEUE\n");
2382 il->stations[sta_id].tid[tid].agg.state =
2383 IL_EMPTYING_HW_QUEUE_DELBA;
2384 spin_unlock_irqrestore(&il->sta_lock, flags);
2385 return 0;
2386 }
2387
2388 D_HT("HW queue is empty\n");
2389turn_off:
2390 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2391
2392 /* do not restore/save irqs */
2393 spin_unlock(&il->sta_lock);
2394 spin_lock(&il->lock);
2395
2396 /*
2397 * the only reason this call can fail is queue number out of range,
2398 * which can happen if uCode is reloaded and all the station
2399 * information are lost. if it is outside the range, there is no need
2400 * to deactivate the uCode queue, just return "success" to allow
2401 * mac80211 to clean up it own data.
2402 */
2403 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2404 spin_unlock_irqrestore(&il->lock, flags);
2405
2406 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2407
2408 return 0;
2409}
2410
2411int
2412il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2413{
2414 struct il_queue *q = &il->txq[txq_id].q;
2415 u8 *addr = il->stations[sta_id].sta.sta.addr;
2416 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2417 struct il_rxon_context *ctx;
2418
2419 ctx = &il->ctx;
2420
2421 lockdep_assert_held(&il->sta_lock);
2422
2423 switch (il->stations[sta_id].tid[tid].agg.state) {
2424 case IL_EMPTYING_HW_QUEUE_DELBA:
2425 /* We are reclaiming the last packet of the */
2426 /* aggregated HW queue */
2427 if (txq_id == tid_data->agg.txq_id &&
2428 q->read_ptr == q->write_ptr) {
2429 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
2430 int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
2431 D_HT("HW queue empty: continue DELBA flow\n");
2432 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2433 tid_data->agg.state = IL_AGG_OFF;
2434 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2435 }
2436 break;
2437 case IL_EMPTYING_HW_QUEUE_ADDBA:
2438 /* We are reclaiming the last packet of the queue */
2439 if (tid_data->tfds_in_queue == 0) {
2440 D_HT("HW queue empty: continue ADDBA flow\n");
2441 tid_data->agg.state = IL_AGG_ON;
2442 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2443 }
2444 break;
2445 }
2446
2447 return 0;
2448}
2449
2450static void
2451il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
2452 const u8 *addr1)
2453{
2454 struct ieee80211_sta *sta;
2455 struct il_station_priv *sta_priv;
2456
2457 rcu_read_lock();
2458 sta = ieee80211_find_sta(ctx->vif, addr1);
2459 if (sta) {
2460 sta_priv = (void *)sta->drv_priv;
2461 /* avoid atomic ops if this isn't a client */
2462 if (sta_priv->client &&
2463 atomic_dec_return(&sta_priv->pending_frames) == 0)
2464 ieee80211_sta_block_awake(il->hw, sta, false);
2465 }
2466 rcu_read_unlock();
2467}
2468
2469static void
2470il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
2471{
2472 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
2473
2474 if (!is_agg)
2475 il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
2476
2477 ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
2478}
2479
2480int
2481il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2482{
2483 struct il_tx_queue *txq = &il->txq[txq_id];
2484 struct il_queue *q = &txq->q;
2485 struct il_tx_info *tx_info;
2486 int nfreed = 0;
2487 struct ieee80211_hdr *hdr;
2488
2489 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2490 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2491 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2492 q->write_ptr, q->read_ptr);
2493 return 0;
2494 }
2495
2496 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2497 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2498
2499 tx_info = &txq->txb[txq->q.read_ptr];
2500
2501 if (WARN_ON_ONCE(tx_info->skb == NULL))
2502 continue;
2503
2504 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
2505 if (ieee80211_is_data_qos(hdr->frame_control))
2506 nfreed++;
2507
2508 il4965_tx_status(il, tx_info,
2509 txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2510 tx_info->skb = NULL;
2511
2512 il->cfg->ops->lib->txq_free_tfd(il, txq);
2513 }
2514 return nfreed;
2515}
2516
2517/**
2518 * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
2519 *
2520 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
2521 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
2522 */
2523static int
2524il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2525 struct il_compressed_ba_resp *ba_resp)
2526{
2527 int i, sh, ack;
2528 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2529 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2530 int successes = 0;
2531 struct ieee80211_tx_info *info;
2532 u64 bitmap, sent_bitmap;
2533
2534 if (unlikely(!agg->wait_for_ba)) {
2535 if (unlikely(ba_resp->bitmap))
2536 IL_ERR("Received BA when not expected\n");
2537 return -EINVAL;
2538 }
2539
2540 /* Mark that the expected block-ack response arrived */
2541 agg->wait_for_ba = 0;
2542 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2543
2544 /* Calculate shift to align block-ack bits with our Tx win bits */
2545 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2546 if (sh < 0) /* tbw something is wrong with indices */
2547 sh += 0x100;
2548
2549 if (agg->frame_count > (64 - sh)) {
2550 D_TX_REPLY("more frames than bitmap size");
2551 return -1;
2552 }
2553
2554 /* don't use 64-bit values for now */
2555 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2556
2557 /* check for success or failure according to the
2558 * transmitted bitmap and block-ack bitmap */
2559 sent_bitmap = bitmap & agg->bitmap;
2560
2561 /* For each frame attempted in aggregation,
2562 * update driver's record of tx frame's status. */
2563 i = 0;
2564 while (sent_bitmap) {
2565 ack = sent_bitmap & 1ULL;
2566 successes += ack;
2567 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2568 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2569 sent_bitmap >>= 1;
2570 ++i;
2571 }
2572
2573 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2574
2575 info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
2576 memset(&info->status, 0, sizeof(info->status));
2577 info->flags |= IEEE80211_TX_STAT_ACK;
2578 info->flags |= IEEE80211_TX_STAT_AMPDU;
2579 info->status.ampdu_ack_len = successes;
2580 info->status.ampdu_len = agg->frame_count;
2581 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2582
2583 return 0;
2584}
2585
2586/**
2587 * translate ucode response to mac80211 tx status control values
2588 */
2589void
2590il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2591 struct ieee80211_tx_info *info)
2592{
2593 struct ieee80211_tx_rate *r = &info->control.rates[0];
2594
2595 info->antenna_sel_tx =
2596 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2597 if (rate_n_flags & RATE_MCS_HT_MSK)
2598 r->flags |= IEEE80211_TX_RC_MCS;
2599 if (rate_n_flags & RATE_MCS_GF_MSK)
2600 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2601 if (rate_n_flags & RATE_MCS_HT40_MSK)
2602 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2603 if (rate_n_flags & RATE_MCS_DUP_MSK)
2604 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2605 if (rate_n_flags & RATE_MCS_SGI_MSK)
2606 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2607 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2608}
2609
2610/**
2611 * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
2612 *
2613 * Handles block-acknowledge notification from device, which reports success
2614 * of frames sent via aggregation.
2615 */
2616void
2617il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2618{
2619 struct il_rx_pkt *pkt = rxb_addr(rxb);
2620 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2621 struct il_tx_queue *txq = NULL;
2622 struct il_ht_agg *agg;
2623 int idx;
2624 int sta_id;
2625 int tid;
2626 unsigned long flags;
2627
2628 /* "flow" corresponds to Tx queue */
2629 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2630
2631 /* "ssn" is start of block-ack Tx win, corresponds to idx
2632 * (in Tx queue's circular buffer) of first TFD/frame in win */
2633 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2634
2635 if (scd_flow >= il->hw_params.max_txq_num) {
2636 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2637 return;
2638 }
2639
2640 txq = &il->txq[scd_flow];
2641 sta_id = ba_resp->sta_id;
2642 tid = ba_resp->tid;
2643 agg = &il->stations[sta_id].tid[tid].agg;
2644 if (unlikely(agg->txq_id != scd_flow)) {
2645 /*
2646 * FIXME: this is a uCode bug which need to be addressed,
2647 * log the information and return for now!
2648 * since it is possible happen very often and in order
2649 * not to fill the syslog, don't enable the logging by default
2650 */
2651 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2652 scd_flow, agg->txq_id);
2653 return;
2654 }
2655
2656 /* Find idx just before block-ack win */
2657 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2658
2659 spin_lock_irqsave(&il->sta_lock, flags);
2660
2661 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2662 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2663 ba_resp->sta_id);
2664 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2665 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2666 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2667 ba_resp->scd_flow, ba_resp->scd_ssn);
2668 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2669 (unsigned long long)agg->bitmap);
2670
2671 /* Update driver's record of ACK vs. not for each frame in win */
2672 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2673
2674 /* Release all TFDs before the SSN, i.e. all TFDs in front of
2675 * block-ack win (we assume that they've been successfully
2676 * transmitted ... if not, it's too late anyway). */
2677 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2678 /* calculate mac80211 ampdu sw queue to wake */
2679 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2680 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2681
2682 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2683 il->mac80211_registered &&
2684 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2685 il_wake_queue(il, txq);
2686
2687 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2688 }
2689
2690 spin_unlock_irqrestore(&il->sta_lock, flags);
2691}
2692
2693#ifdef CONFIG_IWLEGACY_DEBUG
2694const char *
2695il4965_get_tx_fail_reason(u32 status)
2696{
2697#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2698#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
2699
2700 switch (status & TX_STATUS_MSK) {
2701 case TX_STATUS_SUCCESS:
2702 return "SUCCESS";
2703 TX_STATUS_POSTPONE(DELAY);
2704 TX_STATUS_POSTPONE(FEW_BYTES);
2705 TX_STATUS_POSTPONE(QUIET_PERIOD);
2706 TX_STATUS_POSTPONE(CALC_TTAK);
2707 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
2708 TX_STATUS_FAIL(SHORT_LIMIT);
2709 TX_STATUS_FAIL(LONG_LIMIT);
2710 TX_STATUS_FAIL(FIFO_UNDERRUN);
2711 TX_STATUS_FAIL(DRAIN_FLOW);
2712 TX_STATUS_FAIL(RFKILL_FLUSH);
2713 TX_STATUS_FAIL(LIFE_EXPIRE);
2714 TX_STATUS_FAIL(DEST_PS);
2715 TX_STATUS_FAIL(HOST_ABORTED);
2716 TX_STATUS_FAIL(BT_RETRY);
2717 TX_STATUS_FAIL(STA_INVALID);
2718 TX_STATUS_FAIL(FRAG_DROPPED);
2719 TX_STATUS_FAIL(TID_DISABLE);
2720 TX_STATUS_FAIL(FIFO_FLUSHED);
2721 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
2722 TX_STATUS_FAIL(PASSIVE_NO_RX);
2723 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
2724 }
2725
2726 return "UNKNOWN";
2727
2728#undef TX_STATUS_FAIL
2729#undef TX_STATUS_POSTPONE
2730}
2731#endif /* CONFIG_IWLEGACY_DEBUG */
2732
2733static struct il_link_quality_cmd *
2734il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
2735{
2736 int i, r;
2737 struct il_link_quality_cmd *link_cmd;
2738 u32 rate_flags = 0;
2739 __le32 rate_n_flags;
2740
2741 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
2742 if (!link_cmd) {
2743 IL_ERR("Unable to allocate memory for LQ cmd.\n");
2744 return NULL;
2745 }
2746 /* Set up the rate scaling to start at selected rate, fall back
2747 * all the way down to 1M in IEEE order, and then spin on 1M */
2748 if (il->band == IEEE80211_BAND_5GHZ)
2749 r = RATE_6M_IDX;
2750 else
2751 r = RATE_1M_IDX;
2752
2753 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
2754 rate_flags |= RATE_MCS_CCK_MSK;
2755
2756 rate_flags |=
2757 il4965_first_antenna(il->hw_params.
2758 valid_tx_ant) << RATE_MCS_ANT_POS;
2759 rate_n_flags = il4965_hw_set_rate_n_flags(il_rates[r].plcp, rate_flags);
2760 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2761 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
2762
2763 link_cmd->general_params.single_stream_ant_msk =
2764 il4965_first_antenna(il->hw_params.valid_tx_ant);
2765
2766 link_cmd->general_params.dual_stream_ant_msk =
2767 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
2768 valid_tx_ant);
2769 if (!link_cmd->general_params.dual_stream_ant_msk) {
2770 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
2771 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2772 link_cmd->general_params.dual_stream_ant_msk =
2773 il->hw_params.valid_tx_ant;
2774 }
2775
2776 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2777 link_cmd->agg_params.agg_time_limit =
2778 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2779
2780 link_cmd->sta_id = sta_id;
2781
2782 return link_cmd;
2783}
2784
2785/*
2786 * il4965_add_bssid_station - Add the special IBSS BSSID station
2787 *
2788 * Function sleeps.
2789 */
2790int
2791il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
2792 const u8 *addr, u8 *sta_id_r)
2793{
2794 int ret;
2795 u8 sta_id;
2796 struct il_link_quality_cmd *link_cmd;
2797 unsigned long flags;
2798
2799 if (sta_id_r)
2800 *sta_id_r = IL_INVALID_STATION;
2801
2802 ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
2803 if (ret) {
2804 IL_ERR("Unable to add station %pM\n", addr);
2805 return ret;
2806 }
2807
2808 if (sta_id_r)
2809 *sta_id_r = sta_id;
2810
2811 spin_lock_irqsave(&il->sta_lock, flags);
2812 il->stations[sta_id].used |= IL_STA_LOCAL;
2813 spin_unlock_irqrestore(&il->sta_lock, flags);
2814
2815 /* Set up default rate scaling table in device's station table */
2816 link_cmd = il4965_sta_alloc_lq(il, sta_id);
2817 if (!link_cmd) {
2818 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
2819 addr);
2820 return -ENOMEM;
2821 }
2822
2823 ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
2824 if (ret)
2825 IL_ERR("Link quality command failed (%d)\n", ret);
2826
2827 spin_lock_irqsave(&il->sta_lock, flags);
2828 il->stations[sta_id].lq = link_cmd;
2829 spin_unlock_irqrestore(&il->sta_lock, flags);
2830
2831 return 0;
2832}
2833
2834static int
2835il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2836 bool send_if_empty)
2837{
2838 int i, not_empty = 0;
2839 u8 buff[sizeof(struct il_wep_cmd) +
2840 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
2841 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
2842 size_t cmd_size = sizeof(struct il_wep_cmd);
2843 struct il_host_cmd cmd = {
2844 .id = ctx->wep_key_cmd,
2845 .data = wep_cmd,
2846 .flags = CMD_SYNC,
2847 };
2848
2849 might_sleep();
2850
2851 memset(wep_cmd, 0,
2852 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
2853
2854 for (i = 0; i < WEP_KEYS_MAX; i++) {
2855 wep_cmd->key[i].key_idx = i;
2856 if (ctx->wep_keys[i].key_size) {
2857 wep_cmd->key[i].key_offset = i;
2858 not_empty = 1;
2859 } else {
2860 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
2861 }
2862
2863 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
2864 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
2865 ctx->wep_keys[i].key_size);
2866 }
2867
2868 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
2869 wep_cmd->num_keys = WEP_KEYS_MAX;
2870
2871 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
2872
2873 cmd.len = cmd_size;
2874
2875 if (not_empty || send_if_empty)
2876 return il_send_cmd(il, &cmd);
2877 else
2878 return 0;
2879}
2880
2881int
2882il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
2883{
2884 lockdep_assert_held(&il->mutex);
2885
2886 return il4965_static_wepkey_cmd(il, ctx, false);
2887}
2888
2889int
2890il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
2891 struct ieee80211_key_conf *keyconf)
2892{
2893 int ret;
2894
2895 lockdep_assert_held(&il->mutex);
2896
2897 D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
2898
2899 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
2900 if (il_is_rfkill(il)) {
2901 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
2902 /* but keys in device are clear anyway so return success */
2903 return 0;
2904 }
2905 ret = il4965_static_wepkey_cmd(il, ctx, 1);
2906 D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
2907
2908 return ret;
2909}
2910
2911int
2912il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
2913 struct ieee80211_key_conf *keyconf)
2914{
2915 int ret;
2916
2917 lockdep_assert_held(&il->mutex);
2918
2919 if (keyconf->keylen != WEP_KEY_LEN_128 &&
2920 keyconf->keylen != WEP_KEY_LEN_64) {
2921 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
2922 return -EINVAL;
2923 }
2924
2925 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2926 keyconf->hw_key_idx = HW_KEY_DEFAULT;
2927 il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
2928
2929 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
2930 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
2931 keyconf->keylen);
2932
2933 ret = il4965_static_wepkey_cmd(il, ctx, false);
2934 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
2935 keyconf->keyidx, ret);
2936
2937 return ret;
2938}
2939
2940static int
2941il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
2942 struct ieee80211_key_conf *keyconf, u8 sta_id)
2943{
2944 unsigned long flags;
2945 __le16 key_flags = 0;
2946 struct il_addsta_cmd sta_cmd;
2947
2948 lockdep_assert_held(&il->mutex);
2949
2950 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2951
2952 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
2953 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
2954 key_flags &= ~STA_KEY_FLG_INVALID;
2955
2956 if (keyconf->keylen == WEP_KEY_LEN_128)
2957 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
2958
2959 if (sta_id == ctx->bcast_sta_id)
2960 key_flags |= STA_KEY_MULTICAST_MSK;
2961
2962 spin_lock_irqsave(&il->sta_lock, flags);
2963
2964 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
2965 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
2966 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
2967
2968 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
2969
2970 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
2971 keyconf->keylen);
2972
2973 if ((il->stations[sta_id].sta.key.
2974 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
2975 il->stations[sta_id].sta.key.key_offset =
2976 il_get_free_ucode_key_idx(il);
2977 /* else, we are overriding an existing key => no need to allocated room
2978 * in uCode. */
2979
2980 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
2981 "no space for a new key");
2982
2983 il->stations[sta_id].sta.key.key_flags = key_flags;
2984 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
2985 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
2986
2987 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2988 sizeof(struct il_addsta_cmd));
2989 spin_unlock_irqrestore(&il->sta_lock, flags);
2990
2991 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2992}
2993
2994static int
2995il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
2996 struct il_rxon_context *ctx,
2997 struct ieee80211_key_conf *keyconf, u8 sta_id)
2998{
2999 unsigned long flags;
3000 __le16 key_flags = 0;
3001 struct il_addsta_cmd sta_cmd;
3002
3003 lockdep_assert_held(&il->mutex);
3004
3005 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3006 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3007 key_flags &= ~STA_KEY_FLG_INVALID;
3008
3009 if (sta_id == ctx->bcast_sta_id)
3010 key_flags |= STA_KEY_MULTICAST_MSK;
3011
3012 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3013
3014 spin_lock_irqsave(&il->sta_lock, flags);
3015 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3016 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3017
3018 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3019
3020 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3021
3022 if ((il->stations[sta_id].sta.key.
3023 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3024 il->stations[sta_id].sta.key.key_offset =
3025 il_get_free_ucode_key_idx(il);
3026 /* else, we are overriding an existing key => no need to allocated room
3027 * in uCode. */
3028
3029 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3030 "no space for a new key");
3031
3032 il->stations[sta_id].sta.key.key_flags = key_flags;
3033 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3034 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3035
3036 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3037 sizeof(struct il_addsta_cmd));
3038 spin_unlock_irqrestore(&il->sta_lock, flags);
3039
3040 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3041}
3042
3043static int
3044il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3045 struct il_rxon_context *ctx,
3046 struct ieee80211_key_conf *keyconf, u8 sta_id)
3047{
3048 unsigned long flags;
3049 int ret = 0;
3050 __le16 key_flags = 0;
3051
3052 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3053 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3054 key_flags &= ~STA_KEY_FLG_INVALID;
3055
3056 if (sta_id == ctx->bcast_sta_id)
3057 key_flags |= STA_KEY_MULTICAST_MSK;
3058
3059 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3060 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3061
3062 spin_lock_irqsave(&il->sta_lock, flags);
3063
3064 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3065 il->stations[sta_id].keyinfo.keylen = 16;
3066
3067 if ((il->stations[sta_id].sta.key.
3068 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3069 il->stations[sta_id].sta.key.key_offset =
3070 il_get_free_ucode_key_idx(il);
3071 /* else, we are overriding an existing key => no need to allocated room
3072 * in uCode. */
3073
3074 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3075 "no space for a new key");
3076
3077 il->stations[sta_id].sta.key.key_flags = key_flags;
3078
3079 /* This copy is acutally not needed: we get the key with each TX */
3080 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3081
3082 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3083
3084 spin_unlock_irqrestore(&il->sta_lock, flags);
3085
3086 return ret;
3087}
3088
3089void
3090il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
3091 struct ieee80211_key_conf *keyconf,
3092 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
3093{
3094 u8 sta_id;
3095 unsigned long flags;
3096 int i;
3097
3098 if (il_scan_cancel(il)) {
3099 /* cancel scan failed, just live w/ bad key and rely
3100 briefly on SW decryption */
3101 return;
3102 }
3103
3104 sta_id = il_sta_id_or_broadcast(il, ctx, sta);
3105 if (sta_id == IL_INVALID_STATION)
3106 return;
3107
3108 spin_lock_irqsave(&il->sta_lock, flags);
3109
3110 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3111
3112 for (i = 0; i < 5; i++)
3113 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3114 cpu_to_le16(phase1key[i]);
3115
3116 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3117 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3118
3119 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3120
3121 spin_unlock_irqrestore(&il->sta_lock, flags);
3122
3123}
3124
3125int
3126il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
3127 struct ieee80211_key_conf *keyconf, u8 sta_id)
3128{
3129 unsigned long flags;
3130 u16 key_flags;
3131 u8 keyidx;
3132 struct il_addsta_cmd sta_cmd;
3133
3134 lockdep_assert_held(&il->mutex);
3135
3136 ctx->key_mapping_keys--;
3137
3138 spin_lock_irqsave(&il->sta_lock, flags);
3139 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3140 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3141
3142 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3143
3144 if (keyconf->keyidx != keyidx) {
3145 /* We need to remove a key with idx different that the one
3146 * in the uCode. This means that the key we need to remove has
3147 * been replaced by another one with different idx.
3148 * Don't do anything and return ok
3149 */
3150 spin_unlock_irqrestore(&il->sta_lock, flags);
3151 return 0;
3152 }
3153
3154 if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
3155 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3156 key_flags);
3157 spin_unlock_irqrestore(&il->sta_lock, flags);
3158 return 0;
3159 }
3160
3161 if (!test_and_clear_bit
3162 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3163 IL_ERR("idx %d not used in uCode key table.\n",
3164 il->stations[sta_id].sta.key.key_offset);
3165 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3166 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3167 il->stations[sta_id].sta.key.key_flags =
3168 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3169 il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
3170 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3171 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3172
3173 if (il_is_rfkill(il)) {
3174 D_WEP
3175 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3176 spin_unlock_irqrestore(&il->sta_lock, flags);
3177 return 0;
3178 }
3179 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3180 sizeof(struct il_addsta_cmd));
3181 spin_unlock_irqrestore(&il->sta_lock, flags);
3182
3183 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3184}
3185
3186int
3187il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
3188 struct ieee80211_key_conf *keyconf, u8 sta_id)
3189{
3190 int ret;
3191
3192 lockdep_assert_held(&il->mutex);
3193
3194 ctx->key_mapping_keys++;
3195 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3196
3197 switch (keyconf->cipher) {
3198 case WLAN_CIPHER_SUITE_CCMP:
3199 ret =
3200 il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
3201 break;
3202 case WLAN_CIPHER_SUITE_TKIP:
3203 ret =
3204 il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
3205 break;
3206 case WLAN_CIPHER_SUITE_WEP40:
3207 case WLAN_CIPHER_SUITE_WEP104:
3208 ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
3209 break;
3210 default:
3211 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3212 keyconf->cipher);
3213 ret = -EINVAL;
3214 }
3215
3216 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3217 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3218
3219 return ret;
3220}
3221
3222/**
3223 * il4965_alloc_bcast_station - add broadcast station into driver's station table.
3224 *
3225 * This adds the broadcast station into the driver's station table
3226 * and marks it driver active, so that it will be restored to the
3227 * device at the next best time.
3228 */
3229int
3230il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
3231{
3232 struct il_link_quality_cmd *link_cmd;
3233 unsigned long flags;
3234 u8 sta_id;
3235
3236 spin_lock_irqsave(&il->sta_lock, flags);
3237 sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
3238 if (sta_id == IL_INVALID_STATION) {
3239 IL_ERR("Unable to prepare broadcast station\n");
3240 spin_unlock_irqrestore(&il->sta_lock, flags);
3241
3242 return -EINVAL;
3243 }
3244
3245 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3246 il->stations[sta_id].used |= IL_STA_BCAST;
3247 spin_unlock_irqrestore(&il->sta_lock, flags);
3248
3249 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3250 if (!link_cmd) {
3251 IL_ERR
3252 ("Unable to initialize rate scaling for bcast station.\n");
3253 return -ENOMEM;
3254 }
3255
3256 spin_lock_irqsave(&il->sta_lock, flags);
3257 il->stations[sta_id].lq = link_cmd;
3258 spin_unlock_irqrestore(&il->sta_lock, flags);
3259
3260 return 0;
3261}
3262
3263/**
3264 * il4965_update_bcast_station - update broadcast station's LQ command
3265 *
3266 * Only used by iwl4965. Placed here to have all bcast station management
3267 * code together.
3268 */
3269static int
3270il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
3271{
3272 unsigned long flags;
3273 struct il_link_quality_cmd *link_cmd;
3274 u8 sta_id = ctx->bcast_sta_id;
3275
3276 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3277 if (!link_cmd) {
3278 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3279 return -ENOMEM;
3280 }
3281
3282 spin_lock_irqsave(&il->sta_lock, flags);
3283 if (il->stations[sta_id].lq)
3284 kfree(il->stations[sta_id].lq);
3285 else
3286 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3287 il->stations[sta_id].lq = link_cmd;
3288 spin_unlock_irqrestore(&il->sta_lock, flags);
3289
3290 return 0;
3291}
3292
3293int
3294il4965_update_bcast_stations(struct il_priv *il)
3295{
3296 return il4965_update_bcast_station(il, &il->ctx);
3297}
3298
3299/**
3300 * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
3301 */
3302int
3303il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3304{
3305 unsigned long flags;
3306 struct il_addsta_cmd sta_cmd;
3307
3308 lockdep_assert_held(&il->mutex);
3309
3310 /* Remove "disable" flag, to enable Tx for this TID */
3311 spin_lock_irqsave(&il->sta_lock, flags);
3312 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3313 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3314 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3315 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3316 sizeof(struct il_addsta_cmd));
3317 spin_unlock_irqrestore(&il->sta_lock, flags);
3318
3319 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3320}
3321
3322int
3323il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3324 u16 ssn)
3325{
3326 unsigned long flags;
3327 int sta_id;
3328 struct il_addsta_cmd sta_cmd;
3329
3330 lockdep_assert_held(&il->mutex);
3331
3332 sta_id = il_sta_id(sta);
3333 if (sta_id == IL_INVALID_STATION)
3334 return -ENXIO;
3335
3336 spin_lock_irqsave(&il->sta_lock, flags);
3337 il->stations[sta_id].sta.station_flags_msk = 0;
3338 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3339 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3340 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3341 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3342 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3343 sizeof(struct il_addsta_cmd));
3344 spin_unlock_irqrestore(&il->sta_lock, flags);
3345
3346 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3347}
3348
3349int
3350il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3351{
3352 unsigned long flags;
3353 int sta_id;
3354 struct il_addsta_cmd sta_cmd;
3355
3356 lockdep_assert_held(&il->mutex);
3357
3358 sta_id = il_sta_id(sta);
3359 if (sta_id == IL_INVALID_STATION) {
3360 IL_ERR("Invalid station for AGG tid %d\n", tid);
3361 return -ENXIO;
3362 }
3363
3364 spin_lock_irqsave(&il->sta_lock, flags);
3365 il->stations[sta_id].sta.station_flags_msk = 0;
3366 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3367 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3368 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3369 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3370 sizeof(struct il_addsta_cmd));
3371 spin_unlock_irqrestore(&il->sta_lock, flags);
3372
3373 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3374}
3375
3376void
3377il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3378{
3379 unsigned long flags;
3380
3381 spin_lock_irqsave(&il->sta_lock, flags);
3382 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3383 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3384 il->stations[sta_id].sta.sta.modify_mask =
3385 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3386 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3387 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3388 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3389 spin_unlock_irqrestore(&il->sta_lock, flags);
3390
3391}
3392
3393void
3394il4965_update_chain_flags(struct il_priv *il)
3395{
3396 if (il->cfg->ops->hcmd->set_rxon_chain) {
3397 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
3398 if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
3399 il_commit_rxon(il, &il->ctx);
3400 }
3401}
3402
3403static void
3404il4965_clear_free_frames(struct il_priv *il)
3405{
3406 struct list_head *element;
3407
3408 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3409
3410 while (!list_empty(&il->free_frames)) {
3411 element = il->free_frames.next;
3412 list_del(element);
3413 kfree(list_entry(element, struct il_frame, list));
3414 il->frames_count--;
3415 }
3416
3417 if (il->frames_count) {
3418 IL_WARN("%d frames still in use. Did we lose one?\n",
3419 il->frames_count);
3420 il->frames_count = 0;
3421 }
3422}
3423
3424static struct il_frame *
3425il4965_get_free_frame(struct il_priv *il)
3426{
3427 struct il_frame *frame;
3428 struct list_head *element;
3429 if (list_empty(&il->free_frames)) {
3430 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3431 if (!frame) {
3432 IL_ERR("Could not allocate frame!\n");
3433 return NULL;
3434 }
3435
3436 il->frames_count++;
3437 return frame;
3438 }
3439
3440 element = il->free_frames.next;
3441 list_del(element);
3442 return list_entry(element, struct il_frame, list);
3443}
3444
3445static void
3446il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3447{
3448 memset(frame, 0, sizeof(*frame));
3449 list_add(&frame->list, &il->free_frames);
3450}
3451
3452static u32
3453il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3454 int left)
3455{
3456 lockdep_assert_held(&il->mutex);
3457
3458 if (!il->beacon_skb)
3459 return 0;
3460
3461 if (il->beacon_skb->len > left)
3462 return 0;
3463
3464 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3465
3466 return il->beacon_skb->len;
3467}
3468
3469/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
3470static void
3471il4965_set_beacon_tim(struct il_priv *il,
3472 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3473 u32 frame_size)
3474{
3475 u16 tim_idx;
3476 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3477
3478 /*
3479 * The idx is relative to frame start but we start looking at the
3480 * variable-length part of the beacon.
3481 */
3482 tim_idx = mgmt->u.beacon.variable - beacon;
3483
3484 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
3485 while ((tim_idx < (frame_size - 2)) &&
3486 (beacon[tim_idx] != WLAN_EID_TIM))
3487 tim_idx += beacon[tim_idx + 1] + 2;
3488
3489 /* If TIM field was found, set variables */
3490 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3491 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3492 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3493 } else
3494 IL_WARN("Unable to find TIM Element in beacon\n");
3495}
3496
3497static unsigned int
3498il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3499{
3500 struct il_tx_beacon_cmd *tx_beacon_cmd;
3501 u32 frame_size;
3502 u32 rate_flags;
3503 u32 rate;
3504 /*
3505 * We have to set up the TX command, the TX Beacon command, and the
3506 * beacon contents.
3507 */
3508
3509 lockdep_assert_held(&il->mutex);
3510
3511 if (!il->beacon_ctx) {
3512 IL_ERR("trying to build beacon w/o beacon context!\n");
3513 return 0;
3514 }
3515
3516 /* Initialize memory */
3517 tx_beacon_cmd = &frame->u.beacon;
3518 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3519
3520 /* Set up TX beacon contents */
3521 frame_size =
3522 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3523 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3524 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3525 return 0;
3526 if (!frame_size)
3527 return 0;
3528
3529 /* Set up TX command fields */
3530 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3531 tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
3532 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3533 tx_beacon_cmd->tx.tx_flags =
3534 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3535 TX_CMD_FLG_STA_RATE_MSK;
3536
3537 /* Set up TX beacon command fields */
3538 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3539 frame_size);
3540
3541 /* Set up packet rate and flags */
3542 rate = il_get_lowest_plcp(il, il->beacon_ctx);
3543 il->mgmt_tx_ant =
3544 il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
3545 il->hw_params.valid_tx_ant);
3546 rate_flags = il4965_ant_idx_to_flags(il->mgmt_tx_ant);
3547 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3548 rate_flags |= RATE_MCS_CCK_MSK;
3549 tx_beacon_cmd->tx.rate_n_flags =
3550 il4965_hw_set_rate_n_flags(rate, rate_flags);
3551
3552 return sizeof(*tx_beacon_cmd) + frame_size;
3553}
3554
3555int
3556il4965_send_beacon_cmd(struct il_priv *il)
3557{
3558 struct il_frame *frame;
3559 unsigned int frame_size;
3560 int rc;
3561
3562 frame = il4965_get_free_frame(il);
3563 if (!frame) {
3564 IL_ERR("Could not obtain free frame buffer for beacon "
3565 "command.\n");
3566 return -ENOMEM;
3567 }
3568
3569 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3570 if (!frame_size) {
3571 IL_ERR("Error configuring the beacon command\n");
3572 il4965_free_frame(il, frame);
3573 return -EINVAL;
3574 }
3575
3576 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3577
3578 il4965_free_frame(il, frame);
3579
3580 return rc;
3581}
3582
3583static inline dma_addr_t
3584il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3585{
3586 struct il_tfd_tb *tb = &tfd->tbs[idx];
3587
3588 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3589 if (sizeof(dma_addr_t) > sizeof(u32))
3590 addr |=
3591 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3592 16;
3593
3594 return addr;
3595}
3596
3597static inline u16
3598il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3599{
3600 struct il_tfd_tb *tb = &tfd->tbs[idx];
3601
3602 return le16_to_cpu(tb->hi_n_len) >> 4;
3603}
3604
3605static inline void
3606il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3607{
3608 struct il_tfd_tb *tb = &tfd->tbs[idx];
3609 u16 hi_n_len = len << 4;
3610
3611 put_unaligned_le32(addr, &tb->lo);
3612 if (sizeof(dma_addr_t) > sizeof(u32))
3613 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3614
3615 tb->hi_n_len = cpu_to_le16(hi_n_len);
3616
3617 tfd->num_tbs = idx + 1;
3618}
3619
3620static inline u8
3621il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3622{
3623 return tfd->num_tbs & 0x1f;
3624}
3625
3626/**
3627 * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
3628 * @il - driver ilate data
3629 * @txq - tx queue
3630 *
3631 * Does NOT advance any TFD circular buffer read/write idxes
3632 * Does NOT free the TFD itself (which is within circular buffer)
3633 */
3634void
3635il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3636{
3637 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3638 struct il_tfd *tfd;
3639 struct pci_dev *dev = il->pci_dev;
3640 int idx = txq->q.read_ptr;
3641 int i;
3642 int num_tbs;
3643
3644 tfd = &tfd_tmp[idx];
3645
3646 /* Sanity check on number of chunks */
3647 num_tbs = il4965_tfd_get_num_tbs(tfd);
3648
3649 if (num_tbs >= IL_NUM_OF_TBS) {
3650 IL_ERR("Too many chunks: %i\n", num_tbs);
3651 /* @todo issue fatal error, it is quite serious situation */
3652 return;
3653 }
3654
3655 /* Unmap tx_cmd */
3656 if (num_tbs)
3657 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3658 dma_unmap_len(&txq->meta[idx], len),
3659 PCI_DMA_BIDIRECTIONAL);
3660
3661 /* Unmap chunks, if any. */
3662 for (i = 1; i < num_tbs; i++)
3663 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3664 il4965_tfd_tb_get_len(tfd, i),
3665 PCI_DMA_TODEVICE);
3666
3667 /* free SKB */
3668 if (txq->txb) {
3669 struct sk_buff *skb;
3670
3671 skb = txq->txb[txq->q.read_ptr].skb;
3672
3673 /* can be called from irqs-disabled context */
3674 if (skb) {
3675 dev_kfree_skb_any(skb);
3676 txq->txb[txq->q.read_ptr].skb = NULL;
3677 }
3678 }
3679}
3680
3681int
3682il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3683 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3684{
3685 struct il_queue *q;
3686 struct il_tfd *tfd, *tfd_tmp;
3687 u32 num_tbs;
3688
3689 q = &txq->q;
3690 tfd_tmp = (struct il_tfd *)txq->tfds;
3691 tfd = &tfd_tmp[q->write_ptr];
3692
3693 if (reset)
3694 memset(tfd, 0, sizeof(*tfd));
3695
3696 num_tbs = il4965_tfd_get_num_tbs(tfd);
3697
3698 /* Each TFD can point to a maximum 20 Tx buffers */
3699 if (num_tbs >= IL_NUM_OF_TBS) {
3700 IL_ERR("Error can not send more than %d chunks\n",
3701 IL_NUM_OF_TBS);
3702 return -EINVAL;
3703 }
3704
3705 BUG_ON(addr & ~DMA_BIT_MASK(36));
3706 if (unlikely(addr & ~IL_TX_DMA_MASK))
3707 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
3708
3709 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
3710
3711 return 0;
3712}
3713
3714/*
3715 * Tell nic where to find circular buffer of Tx Frame Descriptors for
3716 * given Tx queue, and enable the DMA channel used for that queue.
3717 *
3718 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3719 * channels supported in hardware.
3720 */
3721int
3722il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
3723{
3724 int txq_id = txq->q.id;
3725
3726 /* Circular buffer (TFD queue in DRAM) physical base address */
3727 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
3728
3729 return 0;
3730}
3731
3732/******************************************************************************
3733 *
3734 * Generic RX handler implementations
3735 *
3736 ******************************************************************************/
3737static void
3738il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
3739{
3740 struct il_rx_pkt *pkt = rxb_addr(rxb);
3741 struct il_alive_resp *palive;
3742 struct delayed_work *pwork;
3743
3744 palive = &pkt->u.alive_frame;
3745
3746 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
3747 palive->is_valid, palive->ver_type, palive->ver_subtype);
3748
3749 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3750 D_INFO("Initialization Alive received.\n");
3751 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
3752 sizeof(struct il_init_alive_resp));
3753 pwork = &il->init_alive_start;
3754 } else {
3755 D_INFO("Runtime Alive received.\n");
3756 memcpy(&il->card_alive, &pkt->u.alive_frame,
3757 sizeof(struct il_alive_resp));
3758 pwork = &il->alive_start;
3759 }
3760
3761 /* We delay the ALIVE response by 5ms to
3762 * give the HW RF Kill time to activate... */
3763 if (palive->is_valid == UCODE_VALID_OK)
3764 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
3765 else
3766 IL_WARN("uCode did not respond OK.\n");
3767}
3768
3769/**
3770 * il4965_bg_stats_periodic - Timer callback to queue stats
3771 *
3772 * This callback is provided in order to send a stats request.
3773 *
3774 * This timer function is continually reset to execute within
3775 * REG_RECALIB_PERIOD seconds since the last N_STATS
3776 * was received. We need to ensure we receive the stats in order
3777 * to update the temperature used for calibrating the TXPOWER.
3778 */
3779static void
3780il4965_bg_stats_periodic(unsigned long data)
3781{
3782 struct il_priv *il = (struct il_priv *)data;
3783
3784 if (test_bit(S_EXIT_PENDING, &il->status))
3785 return;
3786
3787 /* dont send host command if rf-kill is on */
3788 if (!il_is_ready_rf(il))
3789 return;
3790
3791 il_send_stats_request(il, CMD_ASYNC, false);
3792}
3793
3794static void
3795il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
3796{
3797 struct il_rx_pkt *pkt = rxb_addr(rxb);
3798 struct il4965_beacon_notif *beacon =
3799 (struct il4965_beacon_notif *)pkt->u.raw;
3800#ifdef CONFIG_IWLEGACY_DEBUG
3801 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3802
3803 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
3804 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
3805 beacon->beacon_notify_hdr.failure_frame,
3806 le32_to_cpu(beacon->ibss_mgr_status),
3807 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
3808#endif
3809
3810 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
3811}
3812
3813static void
3814il4965_perform_ct_kill_task(struct il_priv *il)
3815{
3816 unsigned long flags;
3817
3818 D_POWER("Stop all queues\n");
3819
3820 if (il->mac80211_registered)
3821 ieee80211_stop_queues(il->hw);
3822
3823 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
3824 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
3825 _il_rd(il, CSR_UCODE_DRV_GP1);
3826
3827 spin_lock_irqsave(&il->reg_lock, flags);
3828 if (!_il_grab_nic_access(il))
3829 _il_release_nic_access(il);
3830 spin_unlock_irqrestore(&il->reg_lock, flags);
3831}
3832
3833/* Handle notification from uCode that card's power state is changing
3834 * due to software, hardware, or critical temperature RFKILL */
3835static void
3836il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
3837{
3838 struct il_rx_pkt *pkt = rxb_addr(rxb);
3839 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3840 unsigned long status = il->status;
3841
3842 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
3843 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3844 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
3845 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
3846
3847 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
3848
3849 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
3850 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3851
3852 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3853
3854 if (!(flags & RXON_CARD_DISABLED)) {
3855 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
3856 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3857 il_wr(il, HBUS_TARG_MBX_C,
3858 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3859 }
3860 }
3861
3862 if (flags & CT_CARD_DISABLED)
3863 il4965_perform_ct_kill_task(il);
3864
3865 if (flags & HW_CARD_DISABLED)
3866 set_bit(S_RF_KILL_HW, &il->status);
3867 else
3868 clear_bit(S_RF_KILL_HW, &il->status);
3869
3870 if (!(flags & RXON_CARD_DISABLED))
3871 il_scan_cancel(il);
3872
3873 if ((test_bit(S_RF_KILL_HW, &status) !=
3874 test_bit(S_RF_KILL_HW, &il->status)))
3875 wiphy_rfkill_set_hw_state(il->hw->wiphy,
3876 test_bit(S_RF_KILL_HW, &il->status));
3877 else
3878 wake_up(&il->wait_command_queue);
3879}
3880
3881/**
3882 * il4965_setup_handlers - Initialize Rx handler callbacks
3883 *
3884 * Setup the RX handlers for each of the reply types sent from the uCode
3885 * to the host.
3886 *
3887 * This function chains into the hardware specific files for them to setup
3888 * any hardware specific handlers as well.
3889 */
3890static void
3891il4965_setup_handlers(struct il_priv *il)
3892{
3893 il->handlers[N_ALIVE] = il4965_hdl_alive;
3894 il->handlers[N_ERROR] = il_hdl_error;
3895 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
3896 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
3897 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
3898 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
3899 il->handlers[N_BEACON] = il4965_hdl_beacon;
3900
3901 /*
3902 * The same handler is used for both the REPLY to a discrete
3903 * stats request from the host as well as for the periodic
3904 * stats notifications (after received beacons) from the uCode.
3905 */
3906 il->handlers[C_STATS] = il4965_hdl_c_stats;
3907 il->handlers[N_STATS] = il4965_hdl_stats;
3908
3909 il_setup_rx_scan_handlers(il);
3910
3911 /* status change handler */
3912 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
3913
3914 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
3915 /* Rx handlers */
3916 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
3917 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
3918 /* block ack */
3919 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
3920 /* Set up hardware specific Rx handlers */
3921 il->cfg->ops->lib->handler_setup(il);
3922}
3923
3924/**
3925 * il4965_rx_handle - Main entry function for receiving responses from uCode
3926 *
3927 * Uses the il->handlers callback function array to invoke
3928 * the appropriate handlers, including command responses,
3929 * frame-received notifications, and other notifications.
3930 */
3931void
3932il4965_rx_handle(struct il_priv *il)
3933{
3934 struct il_rx_buf *rxb;
3935 struct il_rx_pkt *pkt;
3936 struct il_rx_queue *rxq = &il->rxq;
3937 u32 r, i;
3938 int reclaim;
3939 unsigned long flags;
3940 u8 fill_rx = 0;
3941 u32 count = 8;
3942 int total_empty;
3943
3944 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
3945 * buffer that the driver may process (last buffer filled by ucode). */
3946 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
3947 i = rxq->read;
3948
3949 /* Rx interrupt, but nothing sent from uCode */
3950 if (i == r)
3951 D_RX("r = %d, i = %d\n", r, i);
3952
3953 /* calculate total frames need to be restock after handling RX */
3954 total_empty = r - rxq->write_actual;
3955 if (total_empty < 0)
3956 total_empty += RX_QUEUE_SIZE;
3957
3958 if (total_empty > (RX_QUEUE_SIZE / 2))
3959 fill_rx = 1;
3960
3961 while (i != r) {
3962 int len;
3963
3964 rxb = rxq->queue[i];
3965
3966 /* If an RXB doesn't have a Rx queue slot associated with it,
3967 * then a bug has been introduced in the queue refilling
3968 * routines -- catch it here */
3969 BUG_ON(rxb == NULL);
3970
3971 rxq->queue[i] = NULL;
3972
3973 pci_unmap_page(il->pci_dev, rxb->page_dma,
3974 PAGE_SIZE << il->hw_params.rx_page_order,
3975 PCI_DMA_FROMDEVICE);
3976 pkt = rxb_addr(rxb);
3977
3978 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
3979 len += sizeof(u32); /* account for status word */
3980
3981 /* Reclaim a command buffer only if this packet is a response
3982 * to a (driver-originated) command.
3983 * If the packet (e.g. Rx frame) originated from uCode,
3984 * there is no command buffer to reclaim.
3985 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3986 * but apparently a few don't get set; catch them here. */
3987 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3988 (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
3989 (pkt->hdr.cmd != N_RX_MPDU) &&
3990 (pkt->hdr.cmd != N_COMPRESSED_BA) &&
3991 (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
3992
3993 /* Based on type of command response or notification,
3994 * handle those that need handling via function in
3995 * handlers table. See il4965_setup_handlers() */
3996 if (il->handlers[pkt->hdr.cmd]) {
3997 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
3998 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3999 il->isr_stats.handlers[pkt->hdr.cmd]++;
4000 il->handlers[pkt->hdr.cmd] (il, rxb);
4001 } else {
4002 /* No handling needed */
4003 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4004 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4005 }
4006
4007 /*
4008 * XXX: After here, we should always check rxb->page
4009 * against NULL before touching it or its virtual
4010 * memory (pkt). Because some handler might have
4011 * already taken or freed the pages.
4012 */
4013
4014 if (reclaim) {
4015 /* Invoke any callbacks, transfer the buffer to caller,
4016 * and fire off the (possibly) blocking il_send_cmd()
4017 * as we reclaim the driver command queue */
4018 if (rxb->page)
4019 il_tx_cmd_complete(il, rxb);
4020 else
4021 IL_WARN("Claim null rxb?\n");
4022 }
4023
4024 /* Reuse the page if possible. For notification packets and
4025 * SKBs that fail to Rx correctly, add them back into the
4026 * rx_free list for reuse later. */
4027 spin_lock_irqsave(&rxq->lock, flags);
4028 if (rxb->page != NULL) {
4029 rxb->page_dma =
4030 pci_map_page(il->pci_dev, rxb->page, 0,
4031 PAGE_SIZE << il->hw_params.
4032 rx_page_order, PCI_DMA_FROMDEVICE);
4033 list_add_tail(&rxb->list, &rxq->rx_free);
4034 rxq->free_count++;
4035 } else
4036 list_add_tail(&rxb->list, &rxq->rx_used);
4037
4038 spin_unlock_irqrestore(&rxq->lock, flags);
4039
4040 i = (i + 1) & RX_QUEUE_MASK;
4041 /* If there are a lot of unused frames,
4042 * restock the Rx queue so ucode wont assert. */
4043 if (fill_rx) {
4044 count++;
4045 if (count >= 8) {
4046 rxq->read = i;
4047 il4965_rx_replenish_now(il);
4048 count = 0;
4049 }
4050 }
4051 }
4052
4053 /* Backtrack one entry */
4054 rxq->read = i;
4055 if (fill_rx)
4056 il4965_rx_replenish_now(il);
4057 else
4058 il4965_rx_queue_restock(il);
4059}
4060
4061/* call this function to flush any scheduled tasklet */
4062static inline void
4063il4965_synchronize_irq(struct il_priv *il)
4064{
4065 /* wait to make sure we flush pending tasklet */
4066 synchronize_irq(il->pci_dev->irq);
4067 tasklet_kill(&il->irq_tasklet);
4068}
4069
4070static void
4071il4965_irq_tasklet(struct il_priv *il)
4072{
4073 u32 inta, handled = 0;
4074 u32 inta_fh;
4075 unsigned long flags;
4076 u32 i;
4077#ifdef CONFIG_IWLEGACY_DEBUG
4078 u32 inta_mask;
4079#endif
4080
4081 spin_lock_irqsave(&il->lock, flags);
4082
4083 /* Ack/clear/reset pending uCode interrupts.
4084 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4085 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4086 inta = _il_rd(il, CSR_INT);
4087 _il_wr(il, CSR_INT, inta);
4088
4089 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4090 * Any new interrupts that happen after this, either while we're
4091 * in this tasklet, or later, will show up in next ISR/tasklet. */
4092 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4093 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4094
4095#ifdef CONFIG_IWLEGACY_DEBUG
4096 if (il_get_debug_level(il) & IL_DL_ISR) {
4097 /* just for debug */
4098 inta_mask = _il_rd(il, CSR_INT_MASK);
4099 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4100 inta_mask, inta_fh);
4101 }
4102#endif
4103
4104 spin_unlock_irqrestore(&il->lock, flags);
4105
4106 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4107 * atomic, make sure that inta covers all the interrupts that
4108 * we've discovered, even if FH interrupt came in just after
4109 * reading CSR_INT. */
4110 if (inta_fh & CSR49_FH_INT_RX_MASK)
4111 inta |= CSR_INT_BIT_FH_RX;
4112 if (inta_fh & CSR49_FH_INT_TX_MASK)
4113 inta |= CSR_INT_BIT_FH_TX;
4114
4115 /* Now service all interrupt bits discovered above. */
4116 if (inta & CSR_INT_BIT_HW_ERR) {
4117 IL_ERR("Hardware error detected. Restarting.\n");
4118
4119 /* Tell the device to stop sending interrupts */
4120 il_disable_interrupts(il);
4121
4122 il->isr_stats.hw++;
4123 il_irq_handle_error(il);
4124
4125 handled |= CSR_INT_BIT_HW_ERR;
4126
4127 return;
4128 }
4129#ifdef CONFIG_IWLEGACY_DEBUG
4130 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4131 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4132 if (inta & CSR_INT_BIT_SCD) {
4133 D_ISR("Scheduler finished to transmit "
4134 "the frame/frames.\n");
4135 il->isr_stats.sch++;
4136 }
4137
4138 /* Alive notification via Rx interrupt will do the real work */
4139 if (inta & CSR_INT_BIT_ALIVE) {
4140 D_ISR("Alive interrupt\n");
4141 il->isr_stats.alive++;
4142 }
4143 }
4144#endif
4145 /* Safely ignore these bits for debug checks below */
4146 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4147
4148 /* HW RF KILL switch toggled */
4149 if (inta & CSR_INT_BIT_RF_KILL) {
4150 int hw_rf_kill = 0;
4151 if (!
4152 (_il_rd(il, CSR_GP_CNTRL) &
4153 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4154 hw_rf_kill = 1;
4155
4156 IL_WARN("RF_KILL bit toggled to %s.\n",
4157 hw_rf_kill ? "disable radio" : "enable radio");
4158
4159 il->isr_stats.rfkill++;
4160
4161 /* driver only loads ucode once setting the interface up.
4162 * the driver allows loading the ucode even if the radio
4163 * is killed. Hence update the killswitch state here. The
4164 * rfkill handler will care about restarting if needed.
4165 */
4166 if (!test_bit(S_ALIVE, &il->status)) {
4167 if (hw_rf_kill)
4168 set_bit(S_RF_KILL_HW, &il->status);
4169 else
4170 clear_bit(S_RF_KILL_HW, &il->status);
4171 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4172 }
4173
4174 handled |= CSR_INT_BIT_RF_KILL;
4175 }
4176
4177 /* Chip got too hot and stopped itself */
4178 if (inta & CSR_INT_BIT_CT_KILL) {
4179 IL_ERR("Microcode CT kill error detected.\n");
4180 il->isr_stats.ctkill++;
4181 handled |= CSR_INT_BIT_CT_KILL;
4182 }
4183
4184 /* Error detected by uCode */
4185 if (inta & CSR_INT_BIT_SW_ERR) {
4186 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4187 inta);
4188 il->isr_stats.sw++;
4189 il_irq_handle_error(il);
4190 handled |= CSR_INT_BIT_SW_ERR;
4191 }
4192
4193 /*
4194 * uCode wakes up after power-down sleep.
4195 * Tell device about any new tx or host commands enqueued,
4196 * and about any Rx buffers made available while asleep.
4197 */
4198 if (inta & CSR_INT_BIT_WAKEUP) {
4199 D_ISR("Wakeup interrupt\n");
4200 il_rx_queue_update_write_ptr(il, &il->rxq);
4201 for (i = 0; i < il->hw_params.max_txq_num; i++)
4202 il_txq_update_write_ptr(il, &il->txq[i]);
4203 il->isr_stats.wakeup++;
4204 handled |= CSR_INT_BIT_WAKEUP;
4205 }
4206
4207 /* All uCode command responses, including Tx command responses,
4208 * Rx "responses" (frame-received notification), and other
4209 * notifications from uCode come through here*/
4210 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4211 il4965_rx_handle(il);
4212 il->isr_stats.rx++;
4213 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4214 }
4215
4216 /* This "Tx" DMA channel is used only for loading uCode */
4217 if (inta & CSR_INT_BIT_FH_TX) {
4218 D_ISR("uCode load interrupt\n");
4219 il->isr_stats.tx++;
4220 handled |= CSR_INT_BIT_FH_TX;
4221 /* Wake up uCode load routine, now that load is complete */
4222 il->ucode_write_complete = 1;
4223 wake_up(&il->wait_command_queue);
4224 }
4225
4226 if (inta & ~handled) {
4227 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4228 il->isr_stats.unhandled++;
4229 }
4230
4231 if (inta & ~(il->inta_mask)) {
4232 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4233 inta & ~il->inta_mask);
4234 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4235 }
4236
4237 /* Re-enable all interrupts */
4238 /* only Re-enable if disabled by irq */
4239 if (test_bit(S_INT_ENABLED, &il->status))
4240 il_enable_interrupts(il);
4241 /* Re-enable RF_KILL if it occurred */
4242 else if (handled & CSR_INT_BIT_RF_KILL)
4243 il_enable_rfkill_int(il);
4244
4245#ifdef CONFIG_IWLEGACY_DEBUG
4246 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4247 inta = _il_rd(il, CSR_INT);
4248 inta_mask = _il_rd(il, CSR_INT_MASK);
4249 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4250 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4251 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4252 }
4253#endif
4254}
4255
4256/*****************************************************************************
4257 *
4258 * sysfs attributes
4259 *
4260 *****************************************************************************/
4261
4262#ifdef CONFIG_IWLEGACY_DEBUG
4263
4264/*
4265 * The following adds a new attribute to the sysfs representation
4266 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
4267 * used for controlling the debug level.
4268 *
4269 * See the level definitions in iwl for details.
4270 *
4271 * The debug_level being managed using sysfs below is a per device debug
4272 * level that is used instead of the global debug level if it (the per
4273 * device debug level) is set.
4274 */
4275static ssize_t
4276il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4277 char *buf)
4278{
4279 struct il_priv *il = dev_get_drvdata(d);
4280 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4281}
4282
4283static ssize_t
4284il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4285 const char *buf, size_t count)
4286{
4287 struct il_priv *il = dev_get_drvdata(d);
4288 unsigned long val;
4289 int ret;
4290
4291 ret = strict_strtoul(buf, 0, &val);
4292 if (ret)
4293 IL_ERR("%s is not in hex or decimal form.\n", buf);
4294 else {
4295 il->debug_level = val;
4296 if (il_alloc_traffic_mem(il))
4297 IL_ERR("Not enough memory to generate traffic log\n");
4298 }
4299 return strnlen(buf, count);
4300}
4301
4302static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
4303 il4965_store_debug_level);
4304
4305#endif /* CONFIG_IWLEGACY_DEBUG */
4306
4307static ssize_t
4308il4965_show_temperature(struct device *d, struct device_attribute *attr,
4309 char *buf)
4310{
4311 struct il_priv *il = dev_get_drvdata(d);
4312
4313 if (!il_is_alive(il))
4314 return -EAGAIN;
4315
4316 return sprintf(buf, "%d\n", il->temperature);
4317}
4318
4319static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
4320
4321static ssize_t
4322il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4323{
4324 struct il_priv *il = dev_get_drvdata(d);
4325
4326 if (!il_is_ready_rf(il))
4327 return sprintf(buf, "off\n");
4328 else
4329 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4330}
4331
4332static ssize_t
4333il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4334 const char *buf, size_t count)
4335{
4336 struct il_priv *il = dev_get_drvdata(d);
4337 unsigned long val;
4338 int ret;
4339
4340 ret = strict_strtoul(buf, 10, &val);
4341 if (ret)
4342 IL_INFO("%s is not in decimal form.\n", buf);
4343 else {
4344 ret = il_set_tx_power(il, val, false);
4345 if (ret)
4346 IL_ERR("failed setting tx power (0x%d).\n", ret);
4347 else
4348 ret = count;
4349 }
4350 return ret;
4351}
4352
4353static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
4354 il4965_store_tx_power);
4355
4356static struct attribute *il_sysfs_entries[] = {
4357 &dev_attr_temperature.attr,
4358 &dev_attr_tx_power.attr,
4359#ifdef CONFIG_IWLEGACY_DEBUG
4360 &dev_attr_debug_level.attr,
4361#endif
4362 NULL
4363};
4364
4365static struct attribute_group il_attribute_group = {
4366 .name = NULL, /* put in device directory */
4367 .attrs = il_sysfs_entries,
4368};
4369
4370/******************************************************************************
4371 *
4372 * uCode download functions
4373 *
4374 ******************************************************************************/
4375
4376static void
4377il4965_dealloc_ucode_pci(struct il_priv *il)
4378{
4379 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4380 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4381 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4382 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4383 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4384 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4385}
4386
4387static void
4388il4965_nic_start(struct il_priv *il)
4389{
4390 /* Remove all resets to allow NIC to operate */
4391 _il_wr(il, CSR_RESET, 0);
4392}
4393
4394static void il4965_ucode_callback(const struct firmware *ucode_raw,
4395 void *context);
4396static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4397
4398static int __must_check
4399il4965_request_firmware(struct il_priv *il, bool first)
4400{
4401 const char *name_pre = il->cfg->fw_name_pre;
4402 char tag[8];
4403
4404 if (first) {
4405 il->fw_idx = il->cfg->ucode_api_max;
4406 sprintf(tag, "%d", il->fw_idx);
4407 } else {
4408 il->fw_idx--;
4409 sprintf(tag, "%d", il->fw_idx);
4410 }
4411
4412 if (il->fw_idx < il->cfg->ucode_api_min) {
4413 IL_ERR("no suitable firmware found!\n");
4414 return -ENOENT;
4415 }
4416
4417 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4418
4419 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4420
4421 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4422 &il->pci_dev->dev, GFP_KERNEL, il,
4423 il4965_ucode_callback);
4424}
4425
4426struct il4965_firmware_pieces {
4427 const void *inst, *data, *init, *init_data, *boot;
4428 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4429};
4430
4431static int
4432il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4433 struct il4965_firmware_pieces *pieces)
4434{
4435 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4436 u32 api_ver, hdr_size;
4437 const u8 *src;
4438
4439 il->ucode_ver = le32_to_cpu(ucode->ver);
4440 api_ver = IL_UCODE_API(il->ucode_ver);
4441
4442 switch (api_ver) {
4443 default:
4444 case 0:
4445 case 1:
4446 case 2:
4447 hdr_size = 24;
4448 if (ucode_raw->size < hdr_size) {
4449 IL_ERR("File size too small!\n");
4450 return -EINVAL;
4451 }
4452 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4453 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4454 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4455 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4456 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4457 src = ucode->v1.data;
4458 break;
4459 }
4460
4461 /* Verify size of file vs. image size info in file's header */
4462 if (ucode_raw->size !=
4463 hdr_size + pieces->inst_size + pieces->data_size +
4464 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4465
4466 IL_ERR("uCode file size %d does not match expected size\n",
4467 (int)ucode_raw->size);
4468 return -EINVAL;
4469 }
4470
4471 pieces->inst = src;
4472 src += pieces->inst_size;
4473 pieces->data = src;
4474 src += pieces->data_size;
4475 pieces->init = src;
4476 src += pieces->init_size;
4477 pieces->init_data = src;
4478 src += pieces->init_data_size;
4479 pieces->boot = src;
4480 src += pieces->boot_size;
4481
4482 return 0;
4483}
4484
4485/**
4486 * il4965_ucode_callback - callback when firmware was loaded
4487 *
4488 * If loaded successfully, copies the firmware into buffers
4489 * for the card to fetch (via DMA).
4490 */
4491static void
4492il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4493{
4494 struct il_priv *il = context;
4495 struct il_ucode_header *ucode;
4496 int err;
4497 struct il4965_firmware_pieces pieces;
4498 const unsigned int api_max = il->cfg->ucode_api_max;
4499 const unsigned int api_min = il->cfg->ucode_api_min;
4500 u32 api_ver;
4501
4502 u32 max_probe_length = 200;
4503 u32 standard_phy_calibration_size =
4504 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4505
4506 memset(&pieces, 0, sizeof(pieces));
4507
4508 if (!ucode_raw) {
4509 if (il->fw_idx <= il->cfg->ucode_api_max)
4510 IL_ERR("request for firmware file '%s' failed.\n",
4511 il->firmware_name);
4512 goto try_again;
4513 }
4514
4515 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4516 ucode_raw->size);
4517
4518 /* Make sure that we got at least the API version number */
4519 if (ucode_raw->size < 4) {
4520 IL_ERR("File size way too small!\n");
4521 goto try_again;
4522 }
4523
4524 /* Data from ucode file: header followed by uCode images */
4525 ucode = (struct il_ucode_header *)ucode_raw->data;
4526
4527 err = il4965_load_firmware(il, ucode_raw, &pieces);
4528
4529 if (err)
4530 goto try_again;
4531
4532 api_ver = IL_UCODE_API(il->ucode_ver);
4533
4534 /*
4535 * api_ver should match the api version forming part of the
4536 * firmware filename ... but we don't check for that and only rely
4537 * on the API version read from firmware header from here on forward
4538 */
4539 if (api_ver < api_min || api_ver > api_max) {
4540 IL_ERR("Driver unable to support your firmware API. "
4541 "Driver supports v%u, firmware is v%u.\n", api_max,
4542 api_ver);
4543 goto try_again;
4544 }
4545
4546 if (api_ver != api_max)
4547 IL_ERR("Firmware has old API version. Expected v%u, "
4548 "got v%u. New firmware can be obtained "
4549 "from http://www.intellinuxwireless.org.\n", api_max,
4550 api_ver);
4551
4552 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4553 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4554 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4555
4556 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4557 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4558 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4559 IL_UCODE_SERIAL(il->ucode_ver));
4560
4561 /*
4562 * For any of the failures below (before allocating pci memory)
4563 * we will try to load a version with a smaller API -- maybe the
4564 * user just got a corrupted version of the latest API.
4565 */
4566
4567 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4568 D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
4569 D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
4570 D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
4571 D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
4572 D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
4573
4574 /* Verify that uCode images will fit in card's SRAM */
4575 if (pieces.inst_size > il->hw_params.max_inst_size) {
4576 IL_ERR("uCode instr len %Zd too large to fit in\n",
4577 pieces.inst_size);
4578 goto try_again;
4579 }
4580
4581 if (pieces.data_size > il->hw_params.max_data_size) {
4582 IL_ERR("uCode data len %Zd too large to fit in\n",
4583 pieces.data_size);
4584 goto try_again;
4585 }
4586
4587 if (pieces.init_size > il->hw_params.max_inst_size) {
4588 IL_ERR("uCode init instr len %Zd too large to fit in\n",
4589 pieces.init_size);
4590 goto try_again;
4591 }
4592
4593 if (pieces.init_data_size > il->hw_params.max_data_size) {
4594 IL_ERR("uCode init data len %Zd too large to fit in\n",
4595 pieces.init_data_size);
4596 goto try_again;
4597 }
4598
4599 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4600 IL_ERR("uCode boot instr len %Zd too large to fit in\n",
4601 pieces.boot_size);
4602 goto try_again;
4603 }
4604
4605 /* Allocate ucode buffers for card's bus-master loading ... */
4606
4607 /* Runtime instructions and 2 copies of data:
4608 * 1) unmodified from disk
4609 * 2) backup cache for save/restore during power-downs */
4610 il->ucode_code.len = pieces.inst_size;
4611 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4612
4613 il->ucode_data.len = pieces.data_size;
4614 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4615
4616 il->ucode_data_backup.len = pieces.data_size;
4617 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4618
4619 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4620 !il->ucode_data_backup.v_addr)
4621 goto err_pci_alloc;
4622
4623 /* Initialization instructions and data */
4624 if (pieces.init_size && pieces.init_data_size) {
4625 il->ucode_init.len = pieces.init_size;
4626 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4627
4628 il->ucode_init_data.len = pieces.init_data_size;
4629 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4630
4631 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4632 goto err_pci_alloc;
4633 }
4634
4635 /* Bootstrap (instructions only, no data) */
4636 if (pieces.boot_size) {
4637 il->ucode_boot.len = pieces.boot_size;
4638 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4639
4640 if (!il->ucode_boot.v_addr)
4641 goto err_pci_alloc;
4642 }
4643
4644 /* Now that we can no longer fail, copy information */
4645
4646 il->sta_key_max_num = STA_KEY_MAX_NUM;
4647
4648 /* Copy images into buffers for card's bus-master reads ... */
4649
4650 /* Runtime instructions (first block of data in file) */
4651 D_INFO("Copying (but not loading) uCode instr len %Zd\n",
4652 pieces.inst_size);
4653 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4654
4655 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4656 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4657
4658 /*
4659 * Runtime data
4660 * NOTE: Copy into backup buffer will be done in il_up()
4661 */
4662 D_INFO("Copying (but not loading) uCode data len %Zd\n",
4663 pieces.data_size);
4664 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4665 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4666
4667 /* Initialization instructions */
4668 if (pieces.init_size) {
4669 D_INFO("Copying (but not loading) init instr len %Zd\n",
4670 pieces.init_size);
4671 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4672 }
4673
4674 /* Initialization data */
4675 if (pieces.init_data_size) {
4676 D_INFO("Copying (but not loading) init data len %Zd\n",
4677 pieces.init_data_size);
4678 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4679 pieces.init_data_size);
4680 }
4681
4682 /* Bootstrap instructions */
4683 D_INFO("Copying (but not loading) boot instr len %Zd\n",
4684 pieces.boot_size);
4685 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4686
4687 /*
4688 * figure out the offset of chain noise reset and gain commands
4689 * base on the size of standard phy calibration commands table size
4690 */
4691 il->_4965.phy_calib_chain_noise_reset_cmd =
4692 standard_phy_calibration_size;
4693 il->_4965.phy_calib_chain_noise_gain_cmd =
4694 standard_phy_calibration_size + 1;
4695
4696 /**************************************************
4697 * This is still part of probe() in a sense...
4698 *
4699 * 9. Setup and register with mac80211 and debugfs
4700 **************************************************/
4701 err = il4965_mac_setup_register(il, max_probe_length);
4702 if (err)
4703 goto out_unbind;
4704
4705 err = il_dbgfs_register(il, DRV_NAME);
4706 if (err)
4707 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
4708 err);
4709
4710 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
4711 if (err) {
4712 IL_ERR("failed to create sysfs device attributes\n");
4713 goto out_unbind;
4714 }
4715
4716 /* We have our copies now, allow OS release its copies */
4717 release_firmware(ucode_raw);
4718 complete(&il->_4965.firmware_loading_complete);
4719 return;
4720
4721try_again:
4722 /* try next, if any */
4723 if (il4965_request_firmware(il, false))
4724 goto out_unbind;
4725 release_firmware(ucode_raw);
4726 return;
4727
4728err_pci_alloc:
4729 IL_ERR("failed to allocate pci memory\n");
4730 il4965_dealloc_ucode_pci(il);
4731out_unbind:
4732 complete(&il->_4965.firmware_loading_complete);
4733 device_release_driver(&il->pci_dev->dev);
4734 release_firmware(ucode_raw);
4735}
4736
4737static const char *const desc_lookup_text[] = {
4738 "OK",
4739 "FAIL",
4740 "BAD_PARAM",
4741 "BAD_CHECKSUM",
4742 "NMI_INTERRUPT_WDG",
4743 "SYSASSERT",
4744 "FATAL_ERROR",
4745 "BAD_COMMAND",
4746 "HW_ERROR_TUNE_LOCK",
4747 "HW_ERROR_TEMPERATURE",
4748 "ILLEGAL_CHAN_FREQ",
4749 "VCC_NOT_STBL",
4750 "FH49_ERROR",
4751 "NMI_INTERRUPT_HOST",
4752 "NMI_INTERRUPT_ACTION_PT",
4753 "NMI_INTERRUPT_UNKNOWN",
4754 "UCODE_VERSION_MISMATCH",
4755 "HW_ERROR_ABS_LOCK",
4756 "HW_ERROR_CAL_LOCK_FAIL",
4757 "NMI_INTERRUPT_INST_ACTION_PT",
4758 "NMI_INTERRUPT_DATA_ACTION_PT",
4759 "NMI_TRM_HW_ER",
4760 "NMI_INTERRUPT_TRM",
4761 "NMI_INTERRUPT_BREAK_POINT",
4762 "DEBUG_0",
4763 "DEBUG_1",
4764 "DEBUG_2",
4765 "DEBUG_3",
4766};
4767
4768static struct {
4769 char *name;
4770 u8 num;
4771} advanced_lookup[] = {
4772 {
4773 "NMI_INTERRUPT_WDG", 0x34}, {
4774 "SYSASSERT", 0x35}, {
4775 "UCODE_VERSION_MISMATCH", 0x37}, {
4776 "BAD_COMMAND", 0x38}, {
4777 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
4778 "FATAL_ERROR", 0x3D}, {
4779 "NMI_TRM_HW_ERR", 0x46}, {
4780 "NMI_INTERRUPT_TRM", 0x4C}, {
4781 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
4782 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
4783 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
4784 "NMI_INTERRUPT_HOST", 0x66}, {
4785 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
4786 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
4787 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
4788"ADVANCED_SYSASSERT", 0},};
4789
4790static const char *
4791il4965_desc_lookup(u32 num)
4792{
4793 int i;
4794 int max = ARRAY_SIZE(desc_lookup_text);
4795
4796 if (num < max)
4797 return desc_lookup_text[num];
4798
4799 max = ARRAY_SIZE(advanced_lookup) - 1;
4800 for (i = 0; i < max; i++) {
4801 if (advanced_lookup[i].num == num)
4802 break;
4803 }
4804 return advanced_lookup[i].name;
4805}
4806
4807#define ERROR_START_OFFSET (1 * sizeof(u32))
4808#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4809
4810void
4811il4965_dump_nic_error_log(struct il_priv *il)
4812{
4813 u32 data2, line;
4814 u32 desc, time, count, base, data1;
4815 u32 blink1, blink2, ilink1, ilink2;
4816 u32 pc, hcmd;
4817
4818 if (il->ucode_type == UCODE_INIT)
4819 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
4820 else
4821 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
4822
4823 if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4824 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
4825 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
4826 return;
4827 }
4828
4829 count = il_read_targ_mem(il, base);
4830
4831 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4832 IL_ERR("Start IWL Error Log Dump:\n");
4833 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
4834 }
4835
4836 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
4837 il->isr_stats.err_code = desc;
4838 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
4839 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
4840 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
4841 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
4842 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
4843 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
4844 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
4845 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
4846 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
4847 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
4848
4849 IL_ERR("Desc Time "
4850 "data1 data2 line\n");
4851 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
4852 il4965_desc_lookup(desc), desc, time, data1, data2, line);
4853 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
4854 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
4855 blink2, ilink1, ilink2, hcmd);
4856}
4857
4858static void
4859il4965_rf_kill_ct_config(struct il_priv *il)
4860{
4861 struct il_ct_kill_config cmd;
4862 unsigned long flags;
4863 int ret = 0;
4864
4865 spin_lock_irqsave(&il->lock, flags);
4866 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4867 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4868 spin_unlock_irqrestore(&il->lock, flags);
4869
4870 cmd.critical_temperature_R =
4871 cpu_to_le32(il->hw_params.ct_kill_threshold);
4872
4873 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
4874 if (ret)
4875 IL_ERR("C_CT_KILL_CONFIG failed\n");
4876 else
4877 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
4878 "critical temperature is %d\n",
4879 il->hw_params.ct_kill_threshold);
4880}
4881
4882static const s8 default_queue_to_tx_fifo[] = {
4883 IL_TX_FIFO_VO,
4884 IL_TX_FIFO_VI,
4885 IL_TX_FIFO_BE,
4886 IL_TX_FIFO_BK,
4887 IL49_CMD_FIFO_NUM,
4888 IL_TX_FIFO_UNUSED,
4889 IL_TX_FIFO_UNUSED,
4890};
4891
4892#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
4893
4894static int
4895il4965_alive_notify(struct il_priv *il)
4896{
4897 u32 a;
4898 unsigned long flags;
4899 int i, chan;
4900 u32 reg_val;
4901
4902 spin_lock_irqsave(&il->lock, flags);
4903
4904 /* Clear 4965's internal Tx Scheduler data base */
4905 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
4906 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
4907 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
4908 il_write_targ_mem(il, a, 0);
4909 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
4910 il_write_targ_mem(il, a, 0);
4911 for (;
4912 a <
4913 il->scd_base_addr +
4914 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
4915 a += 4)
4916 il_write_targ_mem(il, a, 0);
4917
4918 /* Tel 4965 where to find Tx byte count tables */
4919 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
4920
4921 /* Enable DMA channel */
4922 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
4923 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
4924 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4925 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
4926
4927 /* Update FH chicken bits */
4928 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
4929 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
4930 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
4931
4932 /* Disable chain mode for all queues */
4933 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
4934
4935 /* Initialize each Tx queue (including the command queue) */
4936 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4937
4938 /* TFD circular buffer read/write idxes */
4939 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
4940 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
4941
4942 /* Max Tx Window size for Scheduler-ACK mode */
4943 il_write_targ_mem(il,
4944 il->scd_base_addr +
4945 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
4946 (SCD_WIN_SIZE <<
4947 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4948 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4949
4950 /* Frame limit */
4951 il_write_targ_mem(il,
4952 il->scd_base_addr +
4953 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
4954 sizeof(u32),
4955 (SCD_FRAME_LIMIT <<
4956 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4957 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4958
4959 }
4960 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
4961 (1 << il->hw_params.max_txq_num) - 1);
4962
4963 /* Activate all Tx DMA/FIFO channels */
4964 il4965_txq_set_sched(il, IL_MASK(0, 6));
4965
4966 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
4967
4968 /* make sure all queue are not stopped */
4969 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
4970 for (i = 0; i < 4; i++)
4971 atomic_set(&il->queue_stop_count[i], 0);
4972
4973 /* reset to 0 to enable all the queue first */
4974 il->txq_ctx_active_msk = 0;
4975 /* Map each Tx/cmd queue to its corresponding fifo */
4976 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
4977
4978 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
4979 int ac = default_queue_to_tx_fifo[i];
4980
4981 il_txq_ctx_activate(il, i);
4982
4983 if (ac == IL_TX_FIFO_UNUSED)
4984 continue;
4985
4986 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
4987 }
4988
4989 spin_unlock_irqrestore(&il->lock, flags);
4990
4991 return 0;
4992}
4993
4994/**
4995 * il4965_alive_start - called after N_ALIVE notification received
4996 * from protocol/runtime uCode (initialization uCode's
4997 * Alive gets handled by il_init_alive_start()).
4998 */
4999static void
5000il4965_alive_start(struct il_priv *il)
5001{
5002 int ret = 0;
5003 struct il_rxon_context *ctx = &il->ctx;
5004
5005 D_INFO("Runtime Alive received.\n");
5006
5007 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5008 /* We had an error bringing up the hardware, so take it
5009 * all the way back down so we can try again */
5010 D_INFO("Alive failed.\n");
5011 goto restart;
5012 }
5013
5014 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5015 * This is a paranoid check, because we would not have gotten the
5016 * "runtime" alive if code weren't properly loaded. */
5017 if (il4965_verify_ucode(il)) {
5018 /* Runtime instruction load was bad;
5019 * take it all the way back down so we can try again */
5020 D_INFO("Bad runtime uCode load.\n");
5021 goto restart;
5022 }
5023
5024 ret = il4965_alive_notify(il);
5025 if (ret) {
5026 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5027 goto restart;
5028 }
5029
5030 /* After the ALIVE response, we can send host commands to the uCode */
5031 set_bit(S_ALIVE, &il->status);
5032
5033 /* Enable watchdog to monitor the driver tx queues */
5034 il_setup_watchdog(il);
5035
5036 if (il_is_rfkill(il))
5037 return;
5038
5039 ieee80211_wake_queues(il->hw);
5040
5041 il->active_rate = RATES_MASK;
5042
5043 if (il_is_associated_ctx(ctx)) {
5044 struct il_rxon_cmd *active_rxon =
5045 (struct il_rxon_cmd *)&ctx->active;
5046 /* apply any changes in staging */
5047 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5048 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5049 } else {
5050 /* Initialize our rx_config data */
5051 il_connection_init_rx_config(il, &il->ctx);
5052
5053 if (il->cfg->ops->hcmd->set_rxon_chain)
5054 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
5055 }
5056
5057 /* Configure bluetooth coexistence if enabled */
5058 il_send_bt_config(il);
5059
5060 il4965_reset_run_time_calib(il);
5061
5062 set_bit(S_READY, &il->status);
5063
5064 /* Configure the adapter for unassociated operation */
5065 il_commit_rxon(il, ctx);
5066
5067 /* At this point, the NIC is initialized and operational */
5068 il4965_rf_kill_ct_config(il);
5069
5070 D_INFO("ALIVE processing complete.\n");
5071 wake_up(&il->wait_command_queue);
5072
5073 il_power_update_mode(il, true);
5074 D_INFO("Updated power mode\n");
5075
5076 return;
5077
5078restart:
5079 queue_work(il->workqueue, &il->restart);
5080}
5081
5082static void il4965_cancel_deferred_work(struct il_priv *il);
5083
5084static void
5085__il4965_down(struct il_priv *il)
5086{
5087 unsigned long flags;
5088 int exit_pending;
5089
5090 D_INFO(DRV_NAME " is going down\n");
5091
5092 il_scan_cancel_timeout(il, 200);
5093
5094 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5095
5096 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
5097 * to prevent rearm timer */
5098 del_timer_sync(&il->watchdog);
5099
5100 il_clear_ucode_stations(il, NULL);
5101 il_dealloc_bcast_stations(il);
5102 il_clear_driver_stations(il);
5103
5104 /* Unblock any waiting calls */
5105 wake_up_all(&il->wait_command_queue);
5106
5107 /* Wipe out the EXIT_PENDING status bit if we are not actually
5108 * exiting the module */
5109 if (!exit_pending)
5110 clear_bit(S_EXIT_PENDING, &il->status);
5111
5112 /* stop and reset the on-board processor */
5113 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5114
5115 /* tell the device to stop sending interrupts */
5116 spin_lock_irqsave(&il->lock, flags);
5117 il_disable_interrupts(il);
5118 spin_unlock_irqrestore(&il->lock, flags);
5119 il4965_synchronize_irq(il);
5120
5121 if (il->mac80211_registered)
5122 ieee80211_stop_queues(il->hw);
5123
5124 /* If we have not previously called il_init() then
5125 * clear all bits but the RF Kill bit and return */
5126 if (!il_is_init(il)) {
5127 il->status =
5128 test_bit(S_RF_KILL_HW,
5129 &il->
5130 status) << S_RF_KILL_HW |
5131 test_bit(S_GEO_CONFIGURED,
5132 &il->
5133 status) << S_GEO_CONFIGURED |
5134 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5135 goto exit;
5136 }
5137
5138 /* ...otherwise clear out all the status bits but the RF Kill
5139 * bit and continue taking the NIC down. */
5140 il->status &=
5141 test_bit(S_RF_KILL_HW,
5142 &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
5143 &il->
5144 status) <<
5145 S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
5146 &il->
5147 status) << S_FW_ERROR |
5148 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5149
5150 il4965_txq_ctx_stop(il);
5151 il4965_rxq_stop(il);
5152
5153 /* Power-down device's busmaster DMA clocks */
5154 il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5155 udelay(5);
5156
5157 /* Make sure (redundant) we've released our request to stay awake */
5158 il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5159
5160 /* Stop the device, and put it in low power state */
5161 il_apm_stop(il);
5162
5163exit:
5164 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5165
5166 dev_kfree_skb(il->beacon_skb);
5167 il->beacon_skb = NULL;
5168
5169 /* clear out any free frames */
5170 il4965_clear_free_frames(il);
5171}
5172
5173static void
5174il4965_down(struct il_priv *il)
5175{
5176 mutex_lock(&il->mutex);
5177 __il4965_down(il);
5178 mutex_unlock(&il->mutex);
5179
5180 il4965_cancel_deferred_work(il);
5181}
5182
5183#define HW_READY_TIMEOUT (50)
5184
5185static int
5186il4965_set_hw_ready(struct il_priv *il)
5187{
5188 int ret = 0;
5189
5190 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5191 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5192
5193 /* See if we got it */
5194 ret =
5195 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5196 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5197 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
5198 if (ret != -ETIMEDOUT)
5199 il->hw_ready = true;
5200 else
5201 il->hw_ready = false;
5202
5203 D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
5204 return ret;
5205}
5206
5207static int
5208il4965_prepare_card_hw(struct il_priv *il)
5209{
5210 int ret = 0;
5211
5212 D_INFO("il4965_prepare_card_hw enter\n");
5213
5214 ret = il4965_set_hw_ready(il);
5215 if (il->hw_ready)
5216 return ret;
5217
5218 /* If HW is not ready, prepare the conditions to check again */
5219 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5220
5221 ret =
5222 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5223 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5224 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5225
5226 /* HW should be ready by now, check again. */
5227 if (ret != -ETIMEDOUT)
5228 il4965_set_hw_ready(il);
5229
5230 return ret;
5231}
5232
5233#define MAX_HW_RESTARTS 5
5234
5235static int
5236__il4965_up(struct il_priv *il)
5237{
5238 int i;
5239 int ret;
5240
5241 if (test_bit(S_EXIT_PENDING, &il->status)) {
5242 IL_WARN("Exit pending; will not bring the NIC up\n");
5243 return -EIO;
5244 }
5245
5246 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5247 IL_ERR("ucode not available for device bringup\n");
5248 return -EIO;
5249 }
5250
5251 ret = il4965_alloc_bcast_station(il, &il->ctx);
5252 if (ret) {
5253 il_dealloc_bcast_stations(il);
5254 return ret;
5255 }
5256
5257 il4965_prepare_card_hw(il);
5258
5259 if (!il->hw_ready) {
5260 IL_WARN("Exit HW not ready\n");
5261 return -EIO;
5262 }
5263
5264 /* If platform's RF_KILL switch is NOT set to KILL */
5265 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5266 clear_bit(S_RF_KILL_HW, &il->status);
5267 else
5268 set_bit(S_RF_KILL_HW, &il->status);
5269
5270 if (il_is_rfkill(il)) {
5271 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5272
5273 il_enable_interrupts(il);
5274 IL_WARN("Radio disabled by HW RF Kill switch\n");
5275 return 0;
5276 }
5277
5278 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5279
5280 /* must be initialised before il_hw_nic_init */
5281 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5282
5283 ret = il4965_hw_nic_init(il);
5284 if (ret) {
5285 IL_ERR("Unable to init nic\n");
5286 return ret;
5287 }
5288
5289 /* make sure rfkill handshake bits are cleared */
5290 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5291 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5292
5293 /* clear (again), then enable host interrupts */
5294 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5295 il_enable_interrupts(il);
5296
5297 /* really make sure rfkill handshake bits are cleared */
5298 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5299 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5300
5301 /* Copy original ucode data image from disk into backup cache.
5302 * This will be used to initialize the on-board processor's
5303 * data SRAM for a clean start when the runtime program first loads. */
5304 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5305 il->ucode_data.len);
5306
5307 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5308
5309 /* load bootstrap state machine,
5310 * load bootstrap program into processor's memory,
5311 * prepare to load the "initialize" uCode */
5312 ret = il->cfg->ops->lib->load_ucode(il);
5313
5314 if (ret) {
5315 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5316 continue;
5317 }
5318
5319 /* start card; "initialize" will load runtime ucode */
5320 il4965_nic_start(il);
5321
5322 D_INFO(DRV_NAME " is coming up\n");
5323
5324 return 0;
5325 }
5326
5327 set_bit(S_EXIT_PENDING, &il->status);
5328 __il4965_down(il);
5329 clear_bit(S_EXIT_PENDING, &il->status);
5330
5331 /* tried to restart and config the device for as long as our
5332 * patience could withstand */
5333 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5334 return -EIO;
5335}
5336
5337/*****************************************************************************
5338 *
5339 * Workqueue callbacks
5340 *
5341 *****************************************************************************/
5342
5343static void
5344il4965_bg_init_alive_start(struct work_struct *data)
5345{
5346 struct il_priv *il =
5347 container_of(data, struct il_priv, init_alive_start.work);
5348
5349 mutex_lock(&il->mutex);
5350 if (test_bit(S_EXIT_PENDING, &il->status))
5351 goto out;
5352
5353 il->cfg->ops->lib->init_alive_start(il);
5354out:
5355 mutex_unlock(&il->mutex);
5356}
5357
5358static void
5359il4965_bg_alive_start(struct work_struct *data)
5360{
5361 struct il_priv *il =
5362 container_of(data, struct il_priv, alive_start.work);
5363
5364 mutex_lock(&il->mutex);
5365 if (test_bit(S_EXIT_PENDING, &il->status))
5366 goto out;
5367
5368 il4965_alive_start(il);
5369out:
5370 mutex_unlock(&il->mutex);
5371}
5372
5373static void
5374il4965_bg_run_time_calib_work(struct work_struct *work)
5375{
5376 struct il_priv *il = container_of(work, struct il_priv,
5377 run_time_calib_work);
5378
5379 mutex_lock(&il->mutex);
5380
5381 if (test_bit(S_EXIT_PENDING, &il->status) ||
5382 test_bit(S_SCANNING, &il->status)) {
5383 mutex_unlock(&il->mutex);
5384 return;
5385 }
5386
5387 if (il->start_calib) {
5388 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5389 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5390 }
5391
5392 mutex_unlock(&il->mutex);
5393}
5394
5395static void
5396il4965_bg_restart(struct work_struct *data)
5397{
5398 struct il_priv *il = container_of(data, struct il_priv, restart);
5399
5400 if (test_bit(S_EXIT_PENDING, &il->status))
5401 return;
5402
5403 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5404 mutex_lock(&il->mutex);
5405 il->ctx.vif = NULL;
5406 il->is_open = 0;
5407
5408 __il4965_down(il);
5409
5410 mutex_unlock(&il->mutex);
5411 il4965_cancel_deferred_work(il);
5412 ieee80211_restart_hw(il->hw);
5413 } else {
5414 il4965_down(il);
5415
5416 mutex_lock(&il->mutex);
5417 if (test_bit(S_EXIT_PENDING, &il->status)) {
5418 mutex_unlock(&il->mutex);
5419 return;
5420 }
5421
5422 __il4965_up(il);
5423 mutex_unlock(&il->mutex);
5424 }
5425}
5426
5427static void
5428il4965_bg_rx_replenish(struct work_struct *data)
5429{
5430 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5431
5432 if (test_bit(S_EXIT_PENDING, &il->status))
5433 return;
5434
5435 mutex_lock(&il->mutex);
5436 il4965_rx_replenish(il);
5437 mutex_unlock(&il->mutex);
5438}
5439
5440/*****************************************************************************
5441 *
5442 * mac80211 entry point functions
5443 *
5444 *****************************************************************************/
5445
5446#define UCODE_READY_TIMEOUT (4 * HZ)
5447
5448/*
5449 * Not a mac80211 entry point function, but it fits in with all the
5450 * other mac80211 functions grouped here.
5451 */
5452static int
5453il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5454{
5455 int ret;
5456 struct ieee80211_hw *hw = il->hw;
5457
5458 hw->rate_control_algorithm = "iwl-4965-rs";
5459
5460 /* Tell mac80211 our characteristics */
5461 hw->flags =
5462 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5463 IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
5464 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
5465
5466 if (il->cfg->sku & IL_SKU_N)
5467 hw->flags |=
5468 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
5469 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
5470
5471 hw->sta_data_size = sizeof(struct il_station_priv);
5472 hw->vif_data_size = sizeof(struct il_vif_priv);
5473
5474 hw->wiphy->interface_modes |= il->ctx.interface_modes;
5475 hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
5476
5477 hw->wiphy->flags |=
5478 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
5479
5480 /*
5481 * For now, disable PS by default because it affects
5482 * RX performance significantly.
5483 */
5484 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5485
5486 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5487 /* we create the 802.11 header and a zero-length SSID element */
5488 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5489
5490 /* Default value; 4 EDCA QOS priorities */
5491 hw->queues = 4;
5492
5493 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5494
5495 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
5496 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5497 &il->bands[IEEE80211_BAND_2GHZ];
5498 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
5499 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5500 &il->bands[IEEE80211_BAND_5GHZ];
5501
5502 il_leds_init(il);
5503
5504 ret = ieee80211_register_hw(il->hw);
5505 if (ret) {
5506 IL_ERR("Failed to register hw (error %d)\n", ret);
5507 return ret;
5508 }
5509 il->mac80211_registered = 1;
5510
5511 return 0;
5512}
5513
5514int
5515il4965_mac_start(struct ieee80211_hw *hw)
5516{
5517 struct il_priv *il = hw->priv;
5518 int ret;
5519
5520 D_MAC80211("enter\n");
5521
5522 /* we should be verifying the device is ready to be opened */
5523 mutex_lock(&il->mutex);
5524 ret = __il4965_up(il);
5525 mutex_unlock(&il->mutex);
5526
5527 if (ret)
5528 return ret;
5529
5530 if (il_is_rfkill(il))
5531 goto out;
5532
5533 D_INFO("Start UP work done.\n");
5534
5535 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
5536 * mac80211 will not be run successfully. */
5537 ret = wait_event_timeout(il->wait_command_queue,
5538 test_bit(S_READY, &il->status),
5539 UCODE_READY_TIMEOUT);
5540 if (!ret) {
5541 if (!test_bit(S_READY, &il->status)) {
5542 IL_ERR("START_ALIVE timeout after %dms.\n",
5543 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5544 return -ETIMEDOUT;
5545 }
5546 }
5547
5548 il4965_led_enable(il);
5549
5550out:
5551 il->is_open = 1;
5552 D_MAC80211("leave\n");
5553 return 0;
5554}
5555
5556void
5557il4965_mac_stop(struct ieee80211_hw *hw)
5558{
5559 struct il_priv *il = hw->priv;
5560
5561 D_MAC80211("enter\n");
5562
5563 if (!il->is_open)
5564 return;
5565
5566 il->is_open = 0;
5567
5568 il4965_down(il);
5569
5570 flush_workqueue(il->workqueue);
5571
5572 /* User space software may expect getting rfkill changes
5573 * even if interface is down */
5574 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5575 il_enable_rfkill_int(il);
5576
5577 D_MAC80211("leave\n");
5578}
5579
5580void
5581il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
5582{
5583 struct il_priv *il = hw->priv;
5584
5585 D_MACDUMP("enter\n");
5586
5587 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5588 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5589
5590 if (il4965_tx_skb(il, skb))
5591 dev_kfree_skb_any(skb);
5592
5593 D_MACDUMP("leave\n");
5594}
5595
5596void
5597il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5598 struct ieee80211_key_conf *keyconf,
5599 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5600{
5601 struct il_priv *il = hw->priv;
5602 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5603
5604 D_MAC80211("enter\n");
5605
5606 il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
5607 phase1key);
5608
5609 D_MAC80211("leave\n");
5610}
5611
5612int
5613il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5614 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5615 struct ieee80211_key_conf *key)
5616{
5617 struct il_priv *il = hw->priv;
5618 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5619 struct il_rxon_context *ctx = vif_priv->ctx;
5620 int ret;
5621 u8 sta_id;
5622 bool is_default_wep_key = false;
5623
5624 D_MAC80211("enter\n");
5625
5626 if (il->cfg->mod_params->sw_crypto) {
5627 D_MAC80211("leave - hwcrypto disabled\n");
5628 return -EOPNOTSUPP;
5629 }
5630
5631 sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
5632 if (sta_id == IL_INVALID_STATION)
5633 return -EINVAL;
5634
5635 mutex_lock(&il->mutex);
5636 il_scan_cancel_timeout(il, 100);
5637
5638 /*
5639 * If we are getting WEP group key and we didn't receive any key mapping
5640 * so far, we are in legacy wep mode (group key only), otherwise we are
5641 * in 1X mode.
5642 * In legacy wep mode, we use another host command to the uCode.
5643 */
5644 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5645 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5646 if (cmd == SET_KEY)
5647 is_default_wep_key = !ctx->key_mapping_keys;
5648 else
5649 is_default_wep_key =
5650 (key->hw_key_idx == HW_KEY_DEFAULT);
5651 }
5652
5653 switch (cmd) {
5654 case SET_KEY:
5655 if (is_default_wep_key)
5656 ret =
5657 il4965_set_default_wep_key(il, vif_priv->ctx, key);
5658 else
5659 ret =
5660 il4965_set_dynamic_key(il, vif_priv->ctx, key,
5661 sta_id);
5662
5663 D_MAC80211("enable hwcrypto key\n");
5664 break;
5665 case DISABLE_KEY:
5666 if (is_default_wep_key)
5667 ret = il4965_remove_default_wep_key(il, ctx, key);
5668 else
5669 ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
5670
5671 D_MAC80211("disable hwcrypto key\n");
5672 break;
5673 default:
5674 ret = -EINVAL;
5675 }
5676
5677 mutex_unlock(&il->mutex);
5678 D_MAC80211("leave\n");
5679
5680 return ret;
5681}
5682
5683int
5684il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5685 enum ieee80211_ampdu_mlme_action action,
5686 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
5687 u8 buf_size)
5688{
5689 struct il_priv *il = hw->priv;
5690 int ret = -EINVAL;
5691
5692 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5693
5694 if (!(il->cfg->sku & IL_SKU_N))
5695 return -EACCES;
5696
5697 mutex_lock(&il->mutex);
5698
5699 switch (action) {
5700 case IEEE80211_AMPDU_RX_START:
5701 D_HT("start Rx\n");
5702 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
5703 break;
5704 case IEEE80211_AMPDU_RX_STOP:
5705 D_HT("stop Rx\n");
5706 ret = il4965_sta_rx_agg_stop(il, sta, tid);
5707 if (test_bit(S_EXIT_PENDING, &il->status))
5708 ret = 0;
5709 break;
5710 case IEEE80211_AMPDU_TX_START:
5711 D_HT("start Tx\n");
5712 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
5713 break;
5714 case IEEE80211_AMPDU_TX_STOP:
5715 D_HT("stop Tx\n");
5716 ret = il4965_tx_agg_stop(il, vif, sta, tid);
5717 if (test_bit(S_EXIT_PENDING, &il->status))
5718 ret = 0;
5719 break;
5720 case IEEE80211_AMPDU_TX_OPERATIONAL:
5721 ret = 0;
5722 break;
5723 }
5724 mutex_unlock(&il->mutex);
5725
5726 return ret;
5727}
5728
5729int
5730il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5731 struct ieee80211_sta *sta)
5732{
5733 struct il_priv *il = hw->priv;
5734 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
5735 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5736 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
5737 int ret;
5738 u8 sta_id;
5739
5740 D_INFO("received request to add station %pM\n", sta->addr);
5741 mutex_lock(&il->mutex);
5742 D_INFO("proceeding to add station %pM\n", sta->addr);
5743 sta_priv->common.sta_id = IL_INVALID_STATION;
5744
5745 atomic_set(&sta_priv->pending_frames, 0);
5746
5747 ret =
5748 il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
5749 &sta_id);
5750 if (ret) {
5751 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
5752 /* Should we return success if return code is EEXIST ? */
5753 mutex_unlock(&il->mutex);
5754 return ret;
5755 }
5756
5757 sta_priv->common.sta_id = sta_id;
5758
5759 /* Initialize rate scaling */
5760 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
5761 il4965_rs_rate_init(il, sta, sta_id);
5762 mutex_unlock(&il->mutex);
5763
5764 return 0;
5765}
5766
5767void
5768il4965_mac_channel_switch(struct ieee80211_hw *hw,
5769 struct ieee80211_channel_switch *ch_switch)
5770{
5771 struct il_priv *il = hw->priv;
5772 const struct il_channel_info *ch_info;
5773 struct ieee80211_conf *conf = &hw->conf;
5774 struct ieee80211_channel *channel = ch_switch->channel;
5775 struct il_ht_config *ht_conf = &il->current_ht_config;
5776
5777 struct il_rxon_context *ctx = &il->ctx;
5778 u16 ch;
5779
5780 D_MAC80211("enter\n");
5781
5782 mutex_lock(&il->mutex);
5783
5784 if (il_is_rfkill(il))
5785 goto out;
5786
5787 if (test_bit(S_EXIT_PENDING, &il->status) ||
5788 test_bit(S_SCANNING, &il->status) ||
5789 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
5790 goto out;
5791
5792 if (!il_is_associated_ctx(ctx))
5793 goto out;
5794
5795 if (!il->cfg->ops->lib->set_channel_switch)
5796 goto out;
5797
5798 ch = channel->hw_value;
5799 if (le16_to_cpu(ctx->active.channel) == ch)
5800 goto out;
5801
5802 ch_info = il_get_channel_info(il, channel->band, ch);
5803 if (!il_is_channel_valid(ch_info)) {
5804 D_MAC80211("invalid channel\n");
5805 goto out;
5806 }
5807
5808 spin_lock_irq(&il->lock);
5809
5810 il->current_ht_config.smps = conf->smps_mode;
5811
5812 /* Configure HT40 channels */
5813 ctx->ht.enabled = conf_is_ht(conf);
5814 if (ctx->ht.enabled) {
5815 if (conf_is_ht40_minus(conf)) {
5816 ctx->ht.extension_chan_offset =
5817 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5818 ctx->ht.is_40mhz = true;
5819 } else if (conf_is_ht40_plus(conf)) {
5820 ctx->ht.extension_chan_offset =
5821 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5822 ctx->ht.is_40mhz = true;
5823 } else {
5824 ctx->ht.extension_chan_offset =
5825 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5826 ctx->ht.is_40mhz = false;
5827 }
5828 } else
5829 ctx->ht.is_40mhz = false;
5830
5831 if ((le16_to_cpu(ctx->staging.channel) != ch))
5832 ctx->staging.flags = 0;
5833
5834 il_set_rxon_channel(il, channel, ctx);
5835 il_set_rxon_ht(il, ht_conf);
5836 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
5837
5838 spin_unlock_irq(&il->lock);
5839
5840 il_set_rate(il);
5841 /*
5842 * at this point, staging_rxon has the
5843 * configuration for channel switch
5844 */
5845 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
5846 il->switch_channel = cpu_to_le16(ch);
5847 if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
5848 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
5849 il->switch_channel = 0;
5850 ieee80211_chswitch_done(ctx->vif, false);
5851 }
5852
5853out:
5854 mutex_unlock(&il->mutex);
5855 D_MAC80211("leave\n");
5856}
5857
5858void
5859il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
5860 unsigned int *total_flags, u64 multicast)
5861{
5862 struct il_priv *il = hw->priv;
5863 __le32 filter_or = 0, filter_nand = 0;
5864
5865#define CHK(test, flag) do { \
5866 if (*total_flags & (test)) \
5867 filter_or |= (flag); \
5868 else \
5869 filter_nand |= (flag); \
5870 } while (0)
5871
5872 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
5873 *total_flags);
5874
5875 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
5876 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
5877 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
5878 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
5879
5880#undef CHK
5881
5882 mutex_lock(&il->mutex);
5883
5884 il->ctx.staging.filter_flags &= ~filter_nand;
5885 il->ctx.staging.filter_flags |= filter_or;
5886
5887 /*
5888 * Not committing directly because hardware can perform a scan,
5889 * but we'll eventually commit the filter flags change anyway.
5890 */
5891
5892 mutex_unlock(&il->mutex);
5893
5894 /*
5895 * Receiving all multicast frames is always enabled by the
5896 * default flags setup in il_connection_init_rx_config()
5897 * since we currently do not support programming multicast
5898 * filters into the device.
5899 */
5900 *total_flags &=
5901 FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
5902 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
5903}
5904
5905/*****************************************************************************
5906 *
5907 * driver setup and teardown
5908 *
5909 *****************************************************************************/
5910
5911static void
5912il4965_bg_txpower_work(struct work_struct *work)
5913{
5914 struct il_priv *il = container_of(work, struct il_priv,
5915 txpower_work);
5916
5917 mutex_lock(&il->mutex);
5918
5919 /* If a scan happened to start before we got here
5920 * then just return; the stats notification will
5921 * kick off another scheduled work to compensate for
5922 * any temperature delta we missed here. */
5923 if (test_bit(S_EXIT_PENDING, &il->status) ||
5924 test_bit(S_SCANNING, &il->status))
5925 goto out;
5926
5927 /* Regardless of if we are associated, we must reconfigure the
5928 * TX power since frames can be sent on non-radar channels while
5929 * not associated */
5930 il->cfg->ops->lib->send_tx_power(il);
5931
5932 /* Update last_temperature to keep is_calib_needed from running
5933 * when it isn't needed... */
5934 il->last_temperature = il->temperature;
5935out:
5936 mutex_unlock(&il->mutex);
5937}
5938
5939static void
5940il4965_setup_deferred_work(struct il_priv *il)
5941{
5942 il->workqueue = create_singlethread_workqueue(DRV_NAME);
5943
5944 init_waitqueue_head(&il->wait_command_queue);
5945
5946 INIT_WORK(&il->restart, il4965_bg_restart);
5947 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
5948 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
5949 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
5950 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
5951
5952 il_setup_scan_deferred_work(il);
5953
5954 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
5955
5956 init_timer(&il->stats_periodic);
5957 il->stats_periodic.data = (unsigned long)il;
5958 il->stats_periodic.function = il4965_bg_stats_periodic;
5959
5960 init_timer(&il->watchdog);
5961 il->watchdog.data = (unsigned long)il;
5962 il->watchdog.function = il_bg_watchdog;
5963
5964 tasklet_init(&il->irq_tasklet,
5965 (void (*)(unsigned long))il4965_irq_tasklet,
5966 (unsigned long)il);
5967}
5968
5969static void
5970il4965_cancel_deferred_work(struct il_priv *il)
5971{
5972 cancel_work_sync(&il->txpower_work);
5973 cancel_delayed_work_sync(&il->init_alive_start);
5974 cancel_delayed_work(&il->alive_start);
5975 cancel_work_sync(&il->run_time_calib_work);
5976
5977 il_cancel_scan_deferred_work(il);
5978
5979 del_timer_sync(&il->stats_periodic);
5980}
5981
5982static void
5983il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
5984{
5985 int i;
5986
5987 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
5988 rates[i].bitrate = il_rates[i].ieee * 5;
5989 rates[i].hw_value = i; /* Rate scaling will work on idxes */
5990 rates[i].hw_value_short = i;
5991 rates[i].flags = 0;
5992 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
5993 /*
5994 * If CCK != 1M then set short preamble rate flag.
5995 */
5996 rates[i].flags |=
5997 (il_rates[i].plcp ==
5998 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
5999 }
6000 }
6001}
6002
6003/*
6004 * Acquire il->lock before calling this function !
6005 */
6006void
6007il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6008{
6009 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6010 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6011}
6012
6013void
6014il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6015 int tx_fifo_id, int scd_retry)
6016{
6017 int txq_id = txq->q.id;
6018
6019 /* Find out whether to activate Tx queue */
6020 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6021
6022 /* Set up and activate */
6023 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6024 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6025 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6026 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6027 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6028 IL49_SCD_QUEUE_STTS_REG_MSK);
6029
6030 txq->sched_retry = scd_retry;
6031
6032 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6033 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6034}
6035
6036static int
6037il4965_init_drv(struct il_priv *il)
6038{
6039 int ret;
6040
6041 spin_lock_init(&il->sta_lock);
6042 spin_lock_init(&il->hcmd_lock);
6043
6044 INIT_LIST_HEAD(&il->free_frames);
6045
6046 mutex_init(&il->mutex);
6047
6048 il->ieee_channels = NULL;
6049 il->ieee_rates = NULL;
6050 il->band = IEEE80211_BAND_2GHZ;
6051
6052 il->iw_mode = NL80211_IFTYPE_STATION;
6053 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6054 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6055
6056 /* initialize force reset */
6057 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6058
6059 /* Choose which receivers/antennas to use */
6060 if (il->cfg->ops->hcmd->set_rxon_chain)
6061 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
6062
6063 il_init_scan_params(il);
6064
6065 ret = il_init_channel_map(il);
6066 if (ret) {
6067 IL_ERR("initializing regulatory failed: %d\n", ret);
6068 goto err;
6069 }
6070
6071 ret = il_init_geos(il);
6072 if (ret) {
6073 IL_ERR("initializing geos failed: %d\n", ret);
6074 goto err_free_channel_map;
6075 }
6076 il4965_init_hw_rates(il, il->ieee_rates);
6077
6078 return 0;
6079
6080err_free_channel_map:
6081 il_free_channel_map(il);
6082err:
6083 return ret;
6084}
6085
6086static void
6087il4965_uninit_drv(struct il_priv *il)
6088{
6089 il4965_calib_free_results(il);
6090 il_free_geos(il);
6091 il_free_channel_map(il);
6092 kfree(il->scan_cmd);
6093}
6094
6095static void
6096il4965_hw_detect(struct il_priv *il)
6097{
6098 il->hw_rev = _il_rd(il, CSR_HW_REV);
6099 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6100 il->rev_id = il->pci_dev->revision;
6101 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6102}
6103
6104static int
6105il4965_set_hw_params(struct il_priv *il)
6106{
6107 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6108 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6109 if (il->cfg->mod_params->amsdu_size_8K)
6110 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6111 else
6112 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6113
6114 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6115
6116 if (il->cfg->mod_params->disable_11n)
6117 il->cfg->sku &= ~IL_SKU_N;
6118
6119 /* Device-specific setup */
6120 return il->cfg->ops->lib->set_hw_params(il);
6121}
6122
6123static const u8 il4965_bss_ac_to_fifo[] = {
6124 IL_TX_FIFO_VO,
6125 IL_TX_FIFO_VI,
6126 IL_TX_FIFO_BE,
6127 IL_TX_FIFO_BK,
6128};
6129
6130static const u8 il4965_bss_ac_to_queue[] = {
6131 0, 1, 2, 3,
6132};
6133
6134static int
6135il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6136{
6137 int err = 0;
6138 struct il_priv *il;
6139 struct ieee80211_hw *hw;
6140 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6141 unsigned long flags;
6142 u16 pci_cmd;
6143
6144 /************************
6145 * 1. Allocating HW data
6146 ************************/
6147
6148 hw = il_alloc_all(cfg);
6149 if (!hw) {
6150 err = -ENOMEM;
6151 goto out;
6152 }
6153 il = hw->priv;
6154 /* At this point both hw and il are allocated. */
6155
6156 il->ctx.ctxid = 0;
6157
6158 il->ctx.always_active = true;
6159 il->ctx.is_active = true;
6160 il->ctx.rxon_cmd = C_RXON;
6161 il->ctx.rxon_timing_cmd = C_RXON_TIMING;
6162 il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
6163 il->ctx.qos_cmd = C_QOS_PARAM;
6164 il->ctx.ap_sta_id = IL_AP_ID;
6165 il->ctx.wep_key_cmd = C_WEPKEY;
6166 il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
6167 il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
6168 il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
6169 il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
6170 il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
6171 il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
6172 il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
6173 il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
6174
6175 SET_IEEE80211_DEV(hw, &pdev->dev);
6176
6177 D_INFO("*** LOAD DRIVER ***\n");
6178 il->cfg = cfg;
6179 il->pci_dev = pdev;
6180 il->inta_mask = CSR_INI_SET_MASK;
6181
6182 if (il_alloc_traffic_mem(il))
6183 IL_ERR("Not enough memory to generate traffic log\n");
6184
6185 /**************************
6186 * 2. Initializing PCI bus
6187 **************************/
6188 pci_disable_link_state(pdev,
6189 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6190 PCIE_LINK_STATE_CLKPM);
6191
6192 if (pci_enable_device(pdev)) {
6193 err = -ENODEV;
6194 goto out_ieee80211_free_hw;
6195 }
6196
6197 pci_set_master(pdev);
6198
6199 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6200 if (!err)
6201 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6202 if (err) {
6203 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6204 if (!err)
6205 err =
6206 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6207 /* both attempts failed: */
6208 if (err) {
6209 IL_WARN("No suitable DMA available.\n");
6210 goto out_pci_disable_device;
6211 }
6212 }
6213
6214 err = pci_request_regions(pdev, DRV_NAME);
6215 if (err)
6216 goto out_pci_disable_device;
6217
6218 pci_set_drvdata(pdev, il);
6219
6220 /***********************
6221 * 3. Read REV register
6222 ***********************/
6223 il->hw_base = pci_iomap(pdev, 0, 0);
6224 if (!il->hw_base) {
6225 err = -ENODEV;
6226 goto out_pci_release_regions;
6227 }
6228
6229 D_INFO("pci_resource_len = 0x%08llx\n",
6230 (unsigned long long)pci_resource_len(pdev, 0));
6231 D_INFO("pci_resource_base = %p\n", il->hw_base);
6232
6233 /* these spin locks will be used in apm_ops.init and EEPROM access
6234 * we should init now
6235 */
6236 spin_lock_init(&il->reg_lock);
6237 spin_lock_init(&il->lock);
6238
6239 /*
6240 * stop and reset the on-board processor just in case it is in a
6241 * strange state ... like being left stranded by a primary kernel
6242 * and this is now the kdump kernel trying to start up
6243 */
6244 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6245
6246 il4965_hw_detect(il);
6247 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6248
6249 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6250 * PCI Tx retries from interfering with C3 CPU state */
6251 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6252
6253 il4965_prepare_card_hw(il);
6254 if (!il->hw_ready) {
6255 IL_WARN("Failed, HW not ready\n");
6256 goto out_iounmap;
6257 }
6258
6259 /*****************
6260 * 4. Read EEPROM
6261 *****************/
6262 /* Read the EEPROM */
6263 err = il_eeprom_init(il);
6264 if (err) {
6265 IL_ERR("Unable to init EEPROM\n");
6266 goto out_iounmap;
6267 }
6268 err = il4965_eeprom_check_version(il);
6269 if (err)
6270 goto out_free_eeprom;
6271
6272 if (err)
6273 goto out_free_eeprom;
6274
6275 /* extract MAC Address */
6276 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6277 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6278 il->hw->wiphy->addresses = il->addresses;
6279 il->hw->wiphy->n_addresses = 1;
6280
6281 /************************
6282 * 5. Setup HW constants
6283 ************************/
6284 if (il4965_set_hw_params(il)) {
6285 IL_ERR("failed to set hw parameters\n");
6286 goto out_free_eeprom;
6287 }
6288
6289 /*******************
6290 * 6. Setup il
6291 *******************/
6292
6293 err = il4965_init_drv(il);
6294 if (err)
6295 goto out_free_eeprom;
6296 /* At this point both hw and il are initialized. */
6297
6298 /********************
6299 * 7. Setup services
6300 ********************/
6301 spin_lock_irqsave(&il->lock, flags);
6302 il_disable_interrupts(il);
6303 spin_unlock_irqrestore(&il->lock, flags);
6304
6305 pci_enable_msi(il->pci_dev);
6306
6307 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6308 if (err) {
6309 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6310 goto out_disable_msi;
6311 }
6312
6313 il4965_setup_deferred_work(il);
6314 il4965_setup_handlers(il);
6315
6316 /*********************************************
6317 * 8. Enable interrupts and read RFKILL state
6318 *********************************************/
6319
6320 /* enable rfkill interrupt: hw bug w/a */
6321 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6322 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6323 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6324 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6325 }
6326
6327 il_enable_rfkill_int(il);
6328
6329 /* If platform's RF_KILL switch is NOT set to KILL */
6330 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6331 clear_bit(S_RF_KILL_HW, &il->status);
6332 else
6333 set_bit(S_RF_KILL_HW, &il->status);
6334
6335 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6336 test_bit(S_RF_KILL_HW, &il->status));
6337
6338 il_power_initialize(il);
6339
6340 init_completion(&il->_4965.firmware_loading_complete);
6341
6342 err = il4965_request_firmware(il, true);
6343 if (err)
6344 goto out_destroy_workqueue;
6345
6346 return 0;
6347
6348out_destroy_workqueue:
6349 destroy_workqueue(il->workqueue);
6350 il->workqueue = NULL;
6351 free_irq(il->pci_dev->irq, il);
6352out_disable_msi:
6353 pci_disable_msi(il->pci_dev);
6354 il4965_uninit_drv(il);
6355out_free_eeprom:
6356 il_eeprom_free(il);
6357out_iounmap:
6358 pci_iounmap(pdev, il->hw_base);
6359out_pci_release_regions:
6360 pci_set_drvdata(pdev, NULL);
6361 pci_release_regions(pdev);
6362out_pci_disable_device:
6363 pci_disable_device(pdev);
6364out_ieee80211_free_hw:
6365 il_free_traffic_mem(il);
6366 ieee80211_free_hw(il->hw);
6367out:
6368 return err;
6369}
6370
6371static void __devexit
6372il4965_pci_remove(struct pci_dev *pdev)
6373{
6374 struct il_priv *il = pci_get_drvdata(pdev);
6375 unsigned long flags;
6376
6377 if (!il)
6378 return;
6379
6380 wait_for_completion(&il->_4965.firmware_loading_complete);
6381
6382 D_INFO("*** UNLOAD DRIVER ***\n");
6383
6384 il_dbgfs_unregister(il);
6385 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6386
6387 /* ieee80211_unregister_hw call wil cause il_mac_stop to
6388 * to be called and il4965_down since we are removing the device
6389 * we need to set S_EXIT_PENDING bit.
6390 */
6391 set_bit(S_EXIT_PENDING, &il->status);
6392
6393 il_leds_exit(il);
6394
6395 if (il->mac80211_registered) {
6396 ieee80211_unregister_hw(il->hw);
6397 il->mac80211_registered = 0;
6398 } else {
6399 il4965_down(il);
6400 }
6401
6402 /*
6403 * Make sure device is reset to low power before unloading driver.
6404 * This may be redundant with il4965_down(), but there are paths to
6405 * run il4965_down() without calling apm_ops.stop(), and there are
6406 * paths to avoid running il4965_down() at all before leaving driver.
6407 * This (inexpensive) call *makes sure* device is reset.
6408 */
6409 il_apm_stop(il);
6410
6411 /* make sure we flush any pending irq or
6412 * tasklet for the driver
6413 */
6414 spin_lock_irqsave(&il->lock, flags);
6415 il_disable_interrupts(il);
6416 spin_unlock_irqrestore(&il->lock, flags);
6417
6418 il4965_synchronize_irq(il);
6419
6420 il4965_dealloc_ucode_pci(il);
6421
6422 if (il->rxq.bd)
6423 il4965_rx_queue_free(il, &il->rxq);
6424 il4965_hw_txq_ctx_free(il);
6425
6426 il_eeprom_free(il);
6427
6428 /*netif_stop_queue(dev); */
6429 flush_workqueue(il->workqueue);
6430
6431 /* ieee80211_unregister_hw calls il_mac_stop, which flushes
6432 * il->workqueue... so we can't take down the workqueue
6433 * until now... */
6434 destroy_workqueue(il->workqueue);
6435 il->workqueue = NULL;
6436 il_free_traffic_mem(il);
6437
6438 free_irq(il->pci_dev->irq, il);
6439 pci_disable_msi(il->pci_dev);
6440 pci_iounmap(pdev, il->hw_base);
6441 pci_release_regions(pdev);
6442 pci_disable_device(pdev);
6443 pci_set_drvdata(pdev, NULL);
6444
6445 il4965_uninit_drv(il);
6446
6447 dev_kfree_skb(il->beacon_skb);
6448
6449 ieee80211_free_hw(il->hw);
6450}
6451
6452/*
6453 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
6454 * must be called under il->lock and mac access
6455 */
6456void
6457il4965_txq_set_sched(struct il_priv *il, u32 mask)
6458{
6459 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6460}
6461
6462/*****************************************************************************
6463 *
6464 * driver and module entry point
6465 *
6466 *****************************************************************************/
6467
6468/* Hardware specific file defines the PCI IDs table for that hardware module */
6469static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
6470 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6471 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6472 {0}
6473};
6474MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6475
6476static struct pci_driver il4965_driver = {
6477 .name = DRV_NAME,
6478 .id_table = il4965_hw_card_ids,
6479 .probe = il4965_pci_probe,
6480 .remove = __devexit_p(il4965_pci_remove),
6481 .driver.pm = IL_LEGACY_PM_OPS,
6482};
6483
6484static int __init
6485il4965_init(void)
6486{
6487
6488 int ret;
6489 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6490 pr_info(DRV_COPYRIGHT "\n");
6491
6492 ret = il4965_rate_control_register();
6493 if (ret) {
6494 pr_err("Unable to register rate control algorithm: %d\n", ret);
6495 return ret;
6496 }
6497
6498 ret = pci_register_driver(&il4965_driver);
6499 if (ret) {
6500 pr_err("Unable to initialize PCI module\n");
6501 goto error_register;
6502 }
6503
6504 return ret;
6505
6506error_register:
6507 il4965_rate_control_unregister();
6508 return ret;
6509}
6510
6511static void __exit
6512il4965_exit(void)
6513{
6514 pci_unregister_driver(&il4965_driver);
6515 il4965_rate_control_unregister();
6516}
6517
6518module_exit(il4965_exit);
6519module_init(il4965_init);
6520
6521#ifdef CONFIG_IWLEGACY_DEBUG
6522module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
6523MODULE_PARM_DESC(debug, "debug output mask");
6524#endif
6525
6526module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
6527MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6528module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
6529MODULE_PARM_DESC(queues_num, "number of hw queues.");
6530module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
6531MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6532module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
6533 S_IRUGO);
6534MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
6535module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
6536MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644
index 000000000000..467d0cb14ecd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -0,0 +1,2860 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37
38#include "common.h"
39#include "4965.h"
40
41#define IL4965_RS_NAME "iwl-4965-rs"
42
43#define NUM_TRY_BEFORE_ANT_TOGGLE 1
44#define IL_NUMBER_TRY 1
45#define IL_HT_NUMBER_TRY 3
46
47#define RATE_MAX_WINDOW 62 /* # tx in history win */
48#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
49#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
50
51/* max allowed rate miss before sync LQ cmd */
52#define IL_MISSED_RATE_MAX 15
53/* max time to accum history 2 seconds */
54#define RATE_SCALE_FLUSH_INTVL (3*HZ)
55
56static u8 rs_ht_to_legacy[] = {
57 RATE_6M_IDX, RATE_6M_IDX,
58 RATE_6M_IDX, RATE_6M_IDX,
59 RATE_6M_IDX,
60 RATE_6M_IDX, RATE_9M_IDX,
61 RATE_12M_IDX, RATE_18M_IDX,
62 RATE_24M_IDX, RATE_36M_IDX,
63 RATE_48M_IDX, RATE_54M_IDX
64};
65
66static const u8 ant_toggle_lookup[] = {
67 /*ANT_NONE -> */ ANT_NONE,
68 /*ANT_A -> */ ANT_B,
69 /*ANT_B -> */ ANT_C,
70 /*ANT_AB -> */ ANT_BC,
71 /*ANT_C -> */ ANT_A,
72 /*ANT_AC -> */ ANT_AB,
73 /*ANT_BC -> */ ANT_AC,
74 /*ANT_ABC -> */ ANT_ABC,
75};
76
77#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
78 [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
79 RATE_SISO_##s##M_PLCP, \
80 RATE_MIMO2_##s##M_PLCP,\
81 RATE_##r##M_IEEE, \
82 RATE_##ip##M_IDX, \
83 RATE_##in##M_IDX, \
84 RATE_##rp##M_IDX, \
85 RATE_##rn##M_IDX, \
86 RATE_##pp##M_IDX, \
87 RATE_##np##M_IDX }
88
89/*
90 * Parameter order:
91 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
92 *
93 * If there isn't a valid next or previous rate then INV is used which
94 * maps to RATE_INVALID
95 *
96 */
97const struct il_rate_info il_rates[RATE_COUNT] = {
98 IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
99 IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
100 IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
101 IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
102 IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
103 IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
104 IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
105 IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
106 IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
107 IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
108 IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
109 IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
110 IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
111};
112
113static int
114il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
115{
116 int idx = 0;
117
118 /* HT rate format */
119 if (rate_n_flags & RATE_MCS_HT_MSK) {
120 idx = (rate_n_flags & 0xff);
121
122 if (idx >= RATE_MIMO2_6M_PLCP)
123 idx = idx - RATE_MIMO2_6M_PLCP;
124
125 idx += IL_FIRST_OFDM_RATE;
126 /* skip 9M not supported in ht */
127 if (idx >= RATE_9M_IDX)
128 idx += 1;
129 if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
130 return idx;
131
132 /* legacy rate format, search for match in table */
133 } else {
134 for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
135 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
136 return idx;
137 }
138
139 return -1;
140}
141
142static void il4965_rs_rate_scale_perform(struct il_priv *il,
143 struct sk_buff *skb,
144 struct ieee80211_sta *sta,
145 struct il_lq_sta *lq_sta);
146static void il4965_rs_fill_link_cmd(struct il_priv *il,
147 struct il_lq_sta *lq_sta, u32 rate_n_flags);
148static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
149 bool force_search);
150
151#ifdef CONFIG_MAC80211_DEBUGFS
152static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
153 u32 *rate_n_flags, int idx);
154#else
155static void
156il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
157{
158}
159#endif
160
161/**
162 * The following tables contain the expected throughput metrics for all rates
163 *
164 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
165 *
166 * where invalid entries are zeros.
167 *
168 * CCK rates are only valid in legacy table and will only be used in G
169 * (2.4 GHz) band.
170 */
171
172static s32 expected_tpt_legacy[RATE_COUNT] = {
173 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
174};
175
176static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
177 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
178 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
179 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
180 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
181};
182
183static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
184 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
185 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
186 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
187 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
188};
189
190static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
191 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
192 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
193 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
194 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */
195};
196
197static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
198 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
199 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
200 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
201 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
202};
203
204/* mbps, mcs */
205static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
206 {"1", "BPSK DSSS"},
207 {"2", "QPSK DSSS"},
208 {"5.5", "BPSK CCK"},
209 {"11", "QPSK CCK"},
210 {"6", "BPSK 1/2"},
211 {"9", "BPSK 1/2"},
212 {"12", "QPSK 1/2"},
213 {"18", "QPSK 3/4"},
214 {"24", "16QAM 1/2"},
215 {"36", "16QAM 3/4"},
216 {"48", "64QAM 2/3"},
217 {"54", "64QAM 3/4"},
218 {"60", "64QAM 5/6"},
219};
220
221#define MCS_IDX_PER_STREAM (8)
222
223static inline u8
224il4965_rs_extract_rate(u32 rate_n_flags)
225{
226 return (u8) (rate_n_flags & 0xFF);
227}
228
229static void
230il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
231{
232 win->data = 0;
233 win->success_counter = 0;
234 win->success_ratio = IL_INVALID_VALUE;
235 win->counter = 0;
236 win->average_tpt = IL_INVALID_VALUE;
237 win->stamp = 0;
238}
239
240static inline u8
241il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
242{
243 return (ant_type & valid_antenna) == ant_type;
244}
245
246/*
247 * removes the old data from the stats. All data that is older than
248 * TID_MAX_TIME_DIFF, will be deleted.
249 */
250static void
251il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
252{
253 /* The oldest age we want to keep */
254 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
255
256 while (tl->queue_count && tl->time_stamp < oldest_time) {
257 tl->total -= tl->packet_count[tl->head];
258 tl->packet_count[tl->head] = 0;
259 tl->time_stamp += TID_QUEUE_CELL_SPACING;
260 tl->queue_count--;
261 tl->head++;
262 if (tl->head >= TID_QUEUE_MAX_SIZE)
263 tl->head = 0;
264 }
265}
266
267/*
268 * increment traffic load value for tid and also remove
269 * any old values if passed the certain time period
270 */
271static u8
272il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
273{
274 u32 curr_time = jiffies_to_msecs(jiffies);
275 u32 time_diff;
276 s32 idx;
277 struct il_traffic_load *tl = NULL;
278 u8 tid;
279
280 if (ieee80211_is_data_qos(hdr->frame_control)) {
281 u8 *qc = ieee80211_get_qos_ctl(hdr);
282 tid = qc[0] & 0xf;
283 } else
284 return MAX_TID_COUNT;
285
286 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
287 return MAX_TID_COUNT;
288
289 tl = &lq_data->load[tid];
290
291 curr_time -= curr_time % TID_ROUND_VALUE;
292
293 /* Happens only for the first packet. Initialize the data */
294 if (!(tl->queue_count)) {
295 tl->total = 1;
296 tl->time_stamp = curr_time;
297 tl->queue_count = 1;
298 tl->head = 0;
299 tl->packet_count[0] = 1;
300 return MAX_TID_COUNT;
301 }
302
303 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
304 idx = time_diff / TID_QUEUE_CELL_SPACING;
305
306 /* The history is too long: remove data that is older than */
307 /* TID_MAX_TIME_DIFF */
308 if (idx >= TID_QUEUE_MAX_SIZE)
309 il4965_rs_tl_rm_old_stats(tl, curr_time);
310
311 idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
312 tl->packet_count[idx] = tl->packet_count[idx] + 1;
313 tl->total = tl->total + 1;
314
315 if ((idx + 1) > tl->queue_count)
316 tl->queue_count = idx + 1;
317
318 return tid;
319}
320
321/*
322 get the traffic load value for tid
323*/
324static u32
325il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
326{
327 u32 curr_time = jiffies_to_msecs(jiffies);
328 u32 time_diff;
329 s32 idx;
330 struct il_traffic_load *tl = NULL;
331
332 if (tid >= TID_MAX_LOAD_COUNT)
333 return 0;
334
335 tl = &(lq_data->load[tid]);
336
337 curr_time -= curr_time % TID_ROUND_VALUE;
338
339 if (!(tl->queue_count))
340 return 0;
341
342 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
343 idx = time_diff / TID_QUEUE_CELL_SPACING;
344
345 /* The history is too long: remove data that is older than */
346 /* TID_MAX_TIME_DIFF */
347 if (idx >= TID_QUEUE_MAX_SIZE)
348 il4965_rs_tl_rm_old_stats(tl, curr_time);
349
350 return tl->total;
351}
352
353static int
354il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
355 u8 tid, struct ieee80211_sta *sta)
356{
357 int ret = -EAGAIN;
358 u32 load;
359
360 load = il4965_rs_tl_get_load(lq_data, tid);
361
362 if (load > IL_AGG_LOAD_THRESHOLD) {
363 D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
364 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
365 if (ret == -EAGAIN) {
366 /*
367 * driver and mac80211 is out of sync
368 * this might be cause by reloading firmware
369 * stop the tx ba session here
370 */
371 IL_ERR("Fail start Tx agg on tid: %d\n", tid);
372 ieee80211_stop_tx_ba_session(sta, tid);
373 }
374 } else
375 D_HT("Aggregation not enabled for tid %d because load = %u\n",
376 tid, load);
377
378 return ret;
379}
380
381static void
382il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
383 struct ieee80211_sta *sta)
384{
385 if (tid < TID_MAX_LOAD_COUNT)
386 il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
387 else
388 IL_ERR("tid exceeds max load count: %d/%d\n", tid,
389 TID_MAX_LOAD_COUNT);
390}
391
392static inline int
393il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
394{
395 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
398}
399
400/*
401 * Static function to get the expected throughput from an il_scale_tbl_info
402 * that wraps a NULL pointer check
403 */
404static s32
405il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
406{
407 if (tbl->expected_tpt)
408 return tbl->expected_tpt[rs_idx];
409 return 0;
410}
411
412/**
413 * il4965_rs_collect_tx_data - Update the success/failure sliding win
414 *
415 * We keep a sliding win of the last 62 packets transmitted
416 * at this rate. win->data contains the bitmask of successful
417 * packets.
418 */
419static int
420il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
421 int attempts, int successes)
422{
423 struct il_rate_scale_data *win = NULL;
424 static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
425 s32 fail_count, tpt;
426
427 if (scale_idx < 0 || scale_idx >= RATE_COUNT)
428 return -EINVAL;
429
430 /* Select win for current tx bit rate */
431 win = &(tbl->win[scale_idx]);
432
433 /* Get expected throughput */
434 tpt = il4965_get_expected_tpt(tbl, scale_idx);
435
436 /*
437 * Keep track of only the latest 62 tx frame attempts in this rate's
438 * history win; anything older isn't really relevant any more.
439 * If we have filled up the sliding win, drop the oldest attempt;
440 * if the oldest attempt (highest bit in bitmap) shows "success",
441 * subtract "1" from the success counter (this is the main reason
442 * we keep these bitmaps!).
443 */
444 while (attempts > 0) {
445 if (win->counter >= RATE_MAX_WINDOW) {
446
447 /* remove earliest */
448 win->counter = RATE_MAX_WINDOW - 1;
449
450 if (win->data & mask) {
451 win->data &= ~mask;
452 win->success_counter--;
453 }
454 }
455
456 /* Increment frames-attempted counter */
457 win->counter++;
458
459 /* Shift bitmap by one frame to throw away oldest history */
460 win->data <<= 1;
461
462 /* Mark the most recent #successes attempts as successful */
463 if (successes > 0) {
464 win->success_counter++;
465 win->data |= 0x1;
466 successes--;
467 }
468
469 attempts--;
470 }
471
472 /* Calculate current success ratio, avoid divide-by-0! */
473 if (win->counter > 0)
474 win->success_ratio =
475 128 * (100 * win->success_counter) / win->counter;
476 else
477 win->success_ratio = IL_INVALID_VALUE;
478
479 fail_count = win->counter - win->success_counter;
480
481 /* Calculate average throughput, if we have enough history. */
482 if (fail_count >= RATE_MIN_FAILURE_TH ||
483 win->success_counter >= RATE_MIN_SUCCESS_TH)
484 win->average_tpt = (win->success_ratio * tpt + 64) / 128;
485 else
486 win->average_tpt = IL_INVALID_VALUE;
487
488 /* Tag this win as having been updated */
489 win->stamp = jiffies;
490
491 return 0;
492}
493
494/*
495 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
496 */
497static u32
498il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
499 int idx, u8 use_green)
500{
501 u32 rate_n_flags = 0;
502
503 if (is_legacy(tbl->lq_type)) {
504 rate_n_flags = il_rates[idx].plcp;
505 if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
506 rate_n_flags |= RATE_MCS_CCK_MSK;
507
508 } else if (is_Ht(tbl->lq_type)) {
509 if (idx > IL_LAST_OFDM_RATE) {
510 IL_ERR("Invalid HT rate idx %d\n", idx);
511 idx = IL_LAST_OFDM_RATE;
512 }
513 rate_n_flags = RATE_MCS_HT_MSK;
514
515 if (is_siso(tbl->lq_type))
516 rate_n_flags |= il_rates[idx].plcp_siso;
517 else
518 rate_n_flags |= il_rates[idx].plcp_mimo2;
519 } else {
520 IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
521 }
522
523 rate_n_flags |=
524 ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
525
526 if (is_Ht(tbl->lq_type)) {
527 if (tbl->is_ht40) {
528 if (tbl->is_dup)
529 rate_n_flags |= RATE_MCS_DUP_MSK;
530 else
531 rate_n_flags |= RATE_MCS_HT40_MSK;
532 }
533 if (tbl->is_SGI)
534 rate_n_flags |= RATE_MCS_SGI_MSK;
535
536 if (use_green) {
537 rate_n_flags |= RATE_MCS_GF_MSK;
538 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
539 rate_n_flags &= ~RATE_MCS_SGI_MSK;
540 IL_ERR("GF was set with SGI:SISO\n");
541 }
542 }
543 }
544 return rate_n_flags;
545}
546
547/*
548 * Interpret uCode API's rate_n_flags format,
549 * fill "search" or "active" tx mode table.
550 */
551static int
552il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
553 enum ieee80211_band band,
554 struct il_scale_tbl_info *tbl, int *rate_idx)
555{
556 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
557 u8 il4965_num_of_ant =
558 il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
559 u8 mcs;
560
561 memset(tbl, 0, sizeof(struct il_scale_tbl_info));
562 *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
563
564 if (*rate_idx == RATE_INVALID) {
565 *rate_idx = -1;
566 return -EINVAL;
567 }
568 tbl->is_SGI = 0; /* default legacy setup */
569 tbl->is_ht40 = 0;
570 tbl->is_dup = 0;
571 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
572 tbl->lq_type = LQ_NONE;
573 tbl->max_search = IL_MAX_SEARCH;
574
575 /* legacy rate format */
576 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
577 if (il4965_num_of_ant == 1) {
578 if (band == IEEE80211_BAND_5GHZ)
579 tbl->lq_type = LQ_A;
580 else
581 tbl->lq_type = LQ_G;
582 }
583 /* HT rate format */
584 } else {
585 if (rate_n_flags & RATE_MCS_SGI_MSK)
586 tbl->is_SGI = 1;
587
588 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
589 (rate_n_flags & RATE_MCS_DUP_MSK))
590 tbl->is_ht40 = 1;
591
592 if (rate_n_flags & RATE_MCS_DUP_MSK)
593 tbl->is_dup = 1;
594
595 mcs = il4965_rs_extract_rate(rate_n_flags);
596
597 /* SISO */
598 if (mcs <= RATE_SISO_60M_PLCP) {
599 if (il4965_num_of_ant == 1)
600 tbl->lq_type = LQ_SISO; /*else NONE */
601 /* MIMO2 */
602 } else {
603 if (il4965_num_of_ant == 2)
604 tbl->lq_type = LQ_MIMO2;
605 }
606 }
607 return 0;
608}
609
610/* switch to another antenna/antennas and return 1 */
611/* if no other valid antenna found, return 0 */
612static int
613il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
614 struct il_scale_tbl_info *tbl)
615{
616 u8 new_ant_type;
617
618 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
619 return 0;
620
621 if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
622 return 0;
623
624 new_ant_type = ant_toggle_lookup[tbl->ant_type];
625
626 while (new_ant_type != tbl->ant_type &&
627 !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
628 new_ant_type = ant_toggle_lookup[new_ant_type];
629
630 if (new_ant_type == tbl->ant_type)
631 return 0;
632
633 tbl->ant_type = new_ant_type;
634 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
635 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
636 return 1;
637}
638
639/**
640 * Green-field mode is valid if the station supports it and
641 * there are no non-GF stations present in the BSS.
642 */
643static bool
644il4965_rs_use_green(struct ieee80211_sta *sta)
645{
646 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
647 struct il_rxon_context *ctx = sta_priv->common.ctx;
648
649 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
650 !(ctx->ht.non_gf_sta_present);
651}
652
653/**
654 * il4965_rs_get_supported_rates - get the available rates
655 *
656 * if management frame or broadcast frame only return
657 * basic available rates.
658 *
659 */
660static u16
661il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
662 struct ieee80211_hdr *hdr,
663 enum il_table_type rate_type)
664{
665 if (is_legacy(rate_type)) {
666 return lq_sta->active_legacy_rate;
667 } else {
668 if (is_siso(rate_type))
669 return lq_sta->active_siso_rate;
670 else
671 return lq_sta->active_mimo2_rate;
672 }
673}
674
675static u16
676il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
677 int rate_type)
678{
679 u8 high = RATE_INVALID;
680 u8 low = RATE_INVALID;
681
682 /* 802.11A or ht walks to the next literal adjacent rate in
683 * the rate table */
684 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
685 int i;
686 u32 mask;
687
688 /* Find the previous rate that is in the rate mask */
689 i = idx - 1;
690 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
691 if (rate_mask & mask) {
692 low = i;
693 break;
694 }
695 }
696
697 /* Find the next rate that is in the rate mask */
698 i = idx + 1;
699 for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
700 if (rate_mask & mask) {
701 high = i;
702 break;
703 }
704 }
705
706 return (high << 8) | low;
707 }
708
709 low = idx;
710 while (low != RATE_INVALID) {
711 low = il_rates[low].prev_rs;
712 if (low == RATE_INVALID)
713 break;
714 if (rate_mask & (1 << low))
715 break;
716 D_RATE("Skipping masked lower rate: %d\n", low);
717 }
718
719 high = idx;
720 while (high != RATE_INVALID) {
721 high = il_rates[high].next_rs;
722 if (high == RATE_INVALID)
723 break;
724 if (rate_mask & (1 << high))
725 break;
726 D_RATE("Skipping masked higher rate: %d\n", high);
727 }
728
729 return (high << 8) | low;
730}
731
732static u32
733il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
734 struct il_scale_tbl_info *tbl, u8 scale_idx,
735 u8 ht_possible)
736{
737 s32 low;
738 u16 rate_mask;
739 u16 high_low;
740 u8 switch_to_legacy = 0;
741 u8 is_green = lq_sta->is_green;
742 struct il_priv *il = lq_sta->drv;
743
744 /* check if we need to switch from HT to legacy rates.
745 * assumption is that mandatory rates (1Mbps or 6Mbps)
746 * are always supported (spec demand) */
747 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
748 switch_to_legacy = 1;
749 scale_idx = rs_ht_to_legacy[scale_idx];
750 if (lq_sta->band == IEEE80211_BAND_5GHZ)
751 tbl->lq_type = LQ_A;
752 else
753 tbl->lq_type = LQ_G;
754
755 if (il4965_num_of_ant(tbl->ant_type) > 1)
756 tbl->ant_type =
757 il4965_first_antenna(il->hw_params.valid_tx_ant);
758
759 tbl->is_ht40 = 0;
760 tbl->is_SGI = 0;
761 tbl->max_search = IL_MAX_SEARCH;
762 }
763
764 rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
765
766 /* Mask with station rate restriction */
767 if (is_legacy(tbl->lq_type)) {
768 /* supp_rates has no CCK bits in A mode */
769 if (lq_sta->band == IEEE80211_BAND_5GHZ)
770 rate_mask =
771 (u16) (rate_mask &
772 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
773 else
774 rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
775 }
776
777 /* If we switched from HT to legacy, check current rate */
778 if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
779 low = scale_idx;
780 goto out;
781 }
782
783 high_low =
784 il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
785 tbl->lq_type);
786 low = high_low & 0xff;
787
788 if (low == RATE_INVALID)
789 low = scale_idx;
790
791out:
792 return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
793}
794
795/*
796 * Simple function to compare two rate scale table types
797 */
798static bool
799il4965_table_type_matches(struct il_scale_tbl_info *a,
800 struct il_scale_tbl_info *b)
801{
802 return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
803 a->is_SGI == b->is_SGI);
804}
805
806/*
807 * mac80211 sends us Tx status
808 */
809static void
810il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
811 struct ieee80211_sta *sta, void *il_sta,
812 struct sk_buff *skb)
813{
814 int legacy_success;
815 int retries;
816 int rs_idx, mac_idx, i;
817 struct il_lq_sta *lq_sta = il_sta;
818 struct il_link_quality_cmd *table;
819 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
820 struct il_priv *il = (struct il_priv *)il_r;
821 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
822 enum mac80211_rate_control_flags mac_flags;
823 u32 tx_rate;
824 struct il_scale_tbl_info tbl_type;
825 struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
826 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
827 struct il_rxon_context *ctx = sta_priv->common.ctx;
828
829 D_RATE("get frame ack response, update rate scale win\n");
830
831 /* Treat uninitialized rate scaling data same as non-existing. */
832 if (!lq_sta) {
833 D_RATE("Station rate scaling not created yet.\n");
834 return;
835 } else if (!lq_sta->drv) {
836 D_RATE("Rate scaling not initialized yet.\n");
837 return;
838 }
839
840 if (!ieee80211_is_data(hdr->frame_control) ||
841 (info->flags & IEEE80211_TX_CTL_NO_ACK))
842 return;
843
844 /* This packet was aggregated but doesn't carry status info */
845 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
846 !(info->flags & IEEE80211_TX_STAT_AMPDU))
847 return;
848
849 /*
850 * Ignore this Tx frame response if its initial rate doesn't match
851 * that of latest Link Quality command. There may be stragglers
852 * from a previous Link Quality command, but we're no longer interested
853 * in those; they're either from the "active" mode while we're trying
854 * to check "search" mode, or a prior "search" mode after we've moved
855 * to a new "search" mode (which might become the new "active" mode).
856 */
857 table = &lq_sta->lq;
858 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
859 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
860 if (il->band == IEEE80211_BAND_5GHZ)
861 rs_idx -= IL_FIRST_OFDM_RATE;
862 mac_flags = info->status.rates[0].flags;
863 mac_idx = info->status.rates[0].idx;
864 /* For HT packets, map MCS to PLCP */
865 if (mac_flags & IEEE80211_TX_RC_MCS) {
866 mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */
867 if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
868 mac_idx++;
869 /*
870 * mac80211 HT idx is always zero-idxed; we need to move
871 * HT OFDM rates after CCK rates in 2.4 GHz band
872 */
873 if (il->band == IEEE80211_BAND_2GHZ)
874 mac_idx += IL_FIRST_OFDM_RATE;
875 }
876 /* Here we actually compare this rate to the latest LQ command */
877 if (mac_idx < 0 ||
878 tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
879 tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
880 tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
881 tbl_type.ant_type != info->antenna_sel_tx ||
882 !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
883 || !!(tx_rate & RATE_MCS_GF_MSK) !=
884 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
885 D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
886 rs_idx, tx_rate);
887 /*
888 * Since rates mis-match, the last LQ command may have failed.
889 * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
890 * ... driver.
891 */
892 lq_sta->missed_rate_counter++;
893 if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
894 lq_sta->missed_rate_counter = 0;
895 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
896 }
897 /* Regardless, ignore this status info for outdated rate */
898 return;
899 } else
900 /* Rate did match, so reset the missed_rate_counter */
901 lq_sta->missed_rate_counter = 0;
902
903 /* Figure out if rate scale algorithm is in active or search table */
904 if (il4965_table_type_matches
905 (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
906 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
907 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
908 } else
909 if (il4965_table_type_matches
910 (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
911 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
912 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
913 } else {
914 D_RATE("Neither active nor search matches tx rate\n");
915 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
916 D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
917 tmp_tbl->ant_type, tmp_tbl->is_SGI);
918 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
919 D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
920 tmp_tbl->ant_type, tmp_tbl->is_SGI);
921 D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
922 tbl_type.ant_type, tbl_type.is_SGI);
923 /*
924 * no matching table found, let's by-pass the data collection
925 * and continue to perform rate scale to find the rate table
926 */
927 il4965_rs_stay_in_table(lq_sta, true);
928 goto done;
929 }
930
931 /*
932 * Updating the frame history depends on whether packets were
933 * aggregated.
934 *
935 * For aggregation, all packets were transmitted at the same rate, the
936 * first idx into rate scale table.
937 */
938 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
939 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
940 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
941 &rs_idx);
942 il4965_rs_collect_tx_data(curr_tbl, rs_idx,
943 info->status.ampdu_len,
944 info->status.ampdu_ack_len);
945
946 /* Update success/fail counts if not searching for new mode */
947 if (lq_sta->stay_in_tbl) {
948 lq_sta->total_success += info->status.ampdu_ack_len;
949 lq_sta->total_failed +=
950 (info->status.ampdu_len -
951 info->status.ampdu_ack_len);
952 }
953 } else {
954 /*
955 * For legacy, update frame history with for each Tx retry.
956 */
957 retries = info->status.rates[0].count - 1;
958 /* HW doesn't send more than 15 retries */
959 retries = min(retries, 15);
960
961 /* The last transmission may have been successful */
962 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
963 /* Collect data for each rate used during failed TX attempts */
964 for (i = 0; i <= retries; ++i) {
965 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
966 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
967 &tbl_type, &rs_idx);
968 /*
969 * Only collect stats if retried rate is in the same RS
970 * table as active/search.
971 */
972 if (il4965_table_type_matches(&tbl_type, curr_tbl))
973 tmp_tbl = curr_tbl;
974 else if (il4965_table_type_matches
975 (&tbl_type, other_tbl))
976 tmp_tbl = other_tbl;
977 else
978 continue;
979 il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
980 i <
981 retries ? 0 : legacy_success);
982 }
983
984 /* Update success/fail counts if not searching for new mode */
985 if (lq_sta->stay_in_tbl) {
986 lq_sta->total_success += legacy_success;
987 lq_sta->total_failed += retries + (1 - legacy_success);
988 }
989 }
990 /* The last TX rate is cached in lq_sta; it's set in if/else above */
991 lq_sta->last_rate_n_flags = tx_rate;
992done:
993 /* See if there's a better rate or modulation mode to try. */
994 if (sta->supp_rates[sband->band])
995 il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
996}
997
998/*
999 * Begin a period of staying with a selected modulation mode.
1000 * Set "stay_in_tbl" flag to prevent any mode switches.
1001 * Set frame tx success limits according to legacy vs. high-throughput,
1002 * and reset overall (spanning all rates) tx success history stats.
1003 * These control how long we stay using same modulation mode before
1004 * searching for a new mode.
1005 */
1006static void
1007il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
1008 struct il_lq_sta *lq_sta)
1009{
1010 D_RATE("we are staying in the same table\n");
1011 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1012 if (is_legacy) {
1013 lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
1014 lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
1015 lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
1016 } else {
1017 lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
1018 lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
1019 lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
1020 }
1021 lq_sta->table_count = 0;
1022 lq_sta->total_failed = 0;
1023 lq_sta->total_success = 0;
1024 lq_sta->flush_timer = jiffies;
1025 lq_sta->action_counter = 0;
1026}
1027
1028/*
1029 * Find correct throughput table for given mode of modulation
1030 */
1031static void
1032il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
1033 struct il_scale_tbl_info *tbl)
1034{
1035 /* Used to choose among HT tables */
1036 s32(*ht_tbl_pointer)[RATE_COUNT];
1037
1038 /* Check for invalid LQ type */
1039 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1040 tbl->expected_tpt = expected_tpt_legacy;
1041 return;
1042 }
1043
1044 /* Legacy rates have only one table */
1045 if (is_legacy(tbl->lq_type)) {
1046 tbl->expected_tpt = expected_tpt_legacy;
1047 return;
1048 }
1049
1050 /* Choose among many HT tables depending on number of streams
1051 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1052 * status */
1053 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1054 ht_tbl_pointer = expected_tpt_siso20MHz;
1055 else if (is_siso(tbl->lq_type))
1056 ht_tbl_pointer = expected_tpt_siso40MHz;
1057 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1058 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1059 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1060 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1061
1062 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1063 tbl->expected_tpt = ht_tbl_pointer[0];
1064 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1065 tbl->expected_tpt = ht_tbl_pointer[1];
1066 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1067 tbl->expected_tpt = ht_tbl_pointer[2];
1068 else /* AGG+SGI */
1069 tbl->expected_tpt = ht_tbl_pointer[3];
1070}
1071
1072/*
1073 * Find starting rate for new "search" high-throughput mode of modulation.
1074 * Goal is to find lowest expected rate (under perfect conditions) that is
1075 * above the current measured throughput of "active" mode, to give new mode
1076 * a fair chance to prove itself without too many challenges.
1077 *
1078 * This gets called when transitioning to more aggressive modulation
1079 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1080 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1081 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1082 * bit rate will typically need to increase, but not if performance was bad.
1083 */
1084static s32
1085il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
1086 struct il_scale_tbl_info *tbl, /* "search" */
1087 u16 rate_mask, s8 idx)
1088{
1089 /* "active" values */
1090 struct il_scale_tbl_info *active_tbl =
1091 &(lq_sta->lq_info[lq_sta->active_tbl]);
1092 s32 active_sr = active_tbl->win[idx].success_ratio;
1093 s32 active_tpt = active_tbl->expected_tpt[idx];
1094
1095 /* expected "search" throughput */
1096 s32 *tpt_tbl = tbl->expected_tpt;
1097
1098 s32 new_rate, high, low, start_hi;
1099 u16 high_low;
1100 s8 rate = idx;
1101
1102 new_rate = high = low = start_hi = RATE_INVALID;
1103
1104 for (;;) {
1105 high_low =
1106 il4965_rs_get_adjacent_rate(il, rate, rate_mask,
1107 tbl->lq_type);
1108
1109 low = high_low & 0xff;
1110 high = (high_low >> 8) & 0xff;
1111
1112 /*
1113 * Lower the "search" bit rate, to give new "search" mode
1114 * approximately the same throughput as "active" if:
1115 *
1116 * 1) "Active" mode has been working modestly well (but not
1117 * great), and expected "search" throughput (under perfect
1118 * conditions) at candidate rate is above the actual
1119 * measured "active" throughput (but less than expected
1120 * "active" throughput under perfect conditions).
1121 * OR
1122 * 2) "Active" mode has been working perfectly or very well
1123 * and expected "search" throughput (under perfect
1124 * conditions) at candidate rate is above expected
1125 * "active" throughput (under perfect conditions).
1126 */
1127 if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
1128 (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
1129 && tpt_tbl[rate] <= active_tpt)) ||
1130 (active_sr >= RATE_SCALE_SWITCH &&
1131 tpt_tbl[rate] > active_tpt)) {
1132
1133 /* (2nd or later pass)
1134 * If we've already tried to raise the rate, and are
1135 * now trying to lower it, use the higher rate. */
1136 if (start_hi != RATE_INVALID) {
1137 new_rate = start_hi;
1138 break;
1139 }
1140
1141 new_rate = rate;
1142
1143 /* Loop again with lower rate */
1144 if (low != RATE_INVALID)
1145 rate = low;
1146
1147 /* Lower rate not available, use the original */
1148 else
1149 break;
1150
1151 /* Else try to raise the "search" rate to match "active" */
1152 } else {
1153 /* (2nd or later pass)
1154 * If we've already tried to lower the rate, and are
1155 * now trying to raise it, use the lower rate. */
1156 if (new_rate != RATE_INVALID)
1157 break;
1158
1159 /* Loop again with higher rate */
1160 else if (high != RATE_INVALID) {
1161 start_hi = high;
1162 rate = high;
1163
1164 /* Higher rate not available, use the original */
1165 } else {
1166 new_rate = rate;
1167 break;
1168 }
1169 }
1170 }
1171
1172 return new_rate;
1173}
1174
1175/*
1176 * Set up search table for MIMO2
1177 */
1178static int
1179il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
1180 struct ieee80211_conf *conf,
1181 struct ieee80211_sta *sta,
1182 struct il_scale_tbl_info *tbl, int idx)
1183{
1184 u16 rate_mask;
1185 s32 rate;
1186 s8 is_green = lq_sta->is_green;
1187 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1188 struct il_rxon_context *ctx = sta_priv->common.ctx;
1189
1190 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1191 return -1;
1192
1193 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
1194 WLAN_HT_CAP_SM_PS_STATIC)
1195 return -1;
1196
1197 /* Need both Tx chains/antennas to support MIMO */
1198 if (il->hw_params.tx_chains_num < 2)
1199 return -1;
1200
1201 D_RATE("LQ: try to switch to MIMO2\n");
1202
1203 tbl->lq_type = LQ_MIMO2;
1204 tbl->is_dup = lq_sta->is_dup;
1205 tbl->action = 0;
1206 tbl->max_search = IL_MAX_SEARCH;
1207 rate_mask = lq_sta->active_mimo2_rate;
1208
1209 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1210 tbl->is_ht40 = 1;
1211 else
1212 tbl->is_ht40 = 0;
1213
1214 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
1215
1216 rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
1217
1218 D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1219 if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
1220 D_RATE("Can't switch with idx %d rate mask %x\n", rate,
1221 rate_mask);
1222 return -1;
1223 }
1224 tbl->current_rate =
1225 il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
1226
1227 D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
1228 is_green);
1229 return 0;
1230}
1231
1232/*
1233 * Set up search table for SISO
1234 */
1235static int
1236il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
1237 struct ieee80211_conf *conf, struct ieee80211_sta *sta,
1238 struct il_scale_tbl_info *tbl, int idx)
1239{
1240 u16 rate_mask;
1241 u8 is_green = lq_sta->is_green;
1242 s32 rate;
1243 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1244 struct il_rxon_context *ctx = sta_priv->common.ctx;
1245
1246 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1247 return -1;
1248
1249 D_RATE("LQ: try to switch to SISO\n");
1250
1251 tbl->is_dup = lq_sta->is_dup;
1252 tbl->lq_type = LQ_SISO;
1253 tbl->action = 0;
1254 tbl->max_search = IL_MAX_SEARCH;
1255 rate_mask = lq_sta->active_siso_rate;
1256
1257 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1258 tbl->is_ht40 = 1;
1259 else
1260 tbl->is_ht40 = 0;
1261
1262 if (is_green)
1263 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */
1264
1265 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
1266 rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
1267
1268 D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
1269 if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
1270 D_RATE("can not switch with idx %d rate mask %x\n", rate,
1271 rate_mask);
1272 return -1;
1273 }
1274 tbl->current_rate =
1275 il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
1276 D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
1277 is_green);
1278 return 0;
1279}
1280
1281/*
1282 * Try to switch to new modulation mode from legacy
1283 */
1284static int
1285il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1286 struct ieee80211_conf *conf,
1287 struct ieee80211_sta *sta, int idx)
1288{
1289 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1290 struct il_scale_tbl_info *search_tbl =
1291 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1292 struct il_rate_scale_data *win = &(tbl->win[idx]);
1293 u32 sz =
1294 (sizeof(struct il_scale_tbl_info) -
1295 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1296 u8 start_action;
1297 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1298 u8 tx_chains_num = il->hw_params.tx_chains_num;
1299 int ret = 0;
1300 u8 update_search_tbl_counter = 0;
1301
1302 tbl->action = IL_LEGACY_SWITCH_SISO;
1303
1304 start_action = tbl->action;
1305 for (;;) {
1306 lq_sta->action_counter++;
1307 switch (tbl->action) {
1308 case IL_LEGACY_SWITCH_ANTENNA1:
1309 case IL_LEGACY_SWITCH_ANTENNA2:
1310 D_RATE("LQ: Legacy toggle Antenna\n");
1311
1312 if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
1313 tx_chains_num <= 1) ||
1314 (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
1315 tx_chains_num <= 2))
1316 break;
1317
1318 /* Don't change antenna if success has been great */
1319 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1320 break;
1321
1322 /* Set up search table to try other antenna */
1323 memcpy(search_tbl, tbl, sz);
1324
1325 if (il4965_rs_toggle_antenna
1326 (valid_tx_ant, &search_tbl->current_rate,
1327 search_tbl)) {
1328 update_search_tbl_counter = 1;
1329 il4965_rs_set_expected_tpt_table(lq_sta,
1330 search_tbl);
1331 goto out;
1332 }
1333 break;
1334 case IL_LEGACY_SWITCH_SISO:
1335 D_RATE("LQ: Legacy switch to SISO\n");
1336
1337 /* Set up search table to try SISO */
1338 memcpy(search_tbl, tbl, sz);
1339 search_tbl->is_SGI = 0;
1340 ret =
1341 il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
1342 search_tbl, idx);
1343 if (!ret) {
1344 lq_sta->action_counter = 0;
1345 goto out;
1346 }
1347
1348 break;
1349 case IL_LEGACY_SWITCH_MIMO2_AB:
1350 case IL_LEGACY_SWITCH_MIMO2_AC:
1351 case IL_LEGACY_SWITCH_MIMO2_BC:
1352 D_RATE("LQ: Legacy switch to MIMO2\n");
1353
1354 /* Set up search table to try MIMO */
1355 memcpy(search_tbl, tbl, sz);
1356 search_tbl->is_SGI = 0;
1357
1358 if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
1359 search_tbl->ant_type = ANT_AB;
1360 else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
1361 search_tbl->ant_type = ANT_AC;
1362 else
1363 search_tbl->ant_type = ANT_BC;
1364
1365 if (!il4965_rs_is_valid_ant
1366 (valid_tx_ant, search_tbl->ant_type))
1367 break;
1368
1369 ret =
1370 il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
1371 search_tbl, idx);
1372 if (!ret) {
1373 lq_sta->action_counter = 0;
1374 goto out;
1375 }
1376 break;
1377 }
1378 tbl->action++;
1379 if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
1380 tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
1381
1382 if (tbl->action == start_action)
1383 break;
1384
1385 }
1386 search_tbl->lq_type = LQ_NONE;
1387 return 0;
1388
1389out:
1390 lq_sta->search_better_tbl = 1;
1391 tbl->action++;
1392 if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
1393 tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
1394 if (update_search_tbl_counter)
1395 search_tbl->action = tbl->action;
1396 return 0;
1397
1398}
1399
1400/*
1401 * Try to switch to new modulation mode from SISO
1402 */
1403static int
1404il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1405 struct ieee80211_conf *conf,
1406 struct ieee80211_sta *sta, int idx)
1407{
1408 u8 is_green = lq_sta->is_green;
1409 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1410 struct il_scale_tbl_info *search_tbl =
1411 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1412 struct il_rate_scale_data *win = &(tbl->win[idx]);
1413 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1414 u32 sz =
1415 (sizeof(struct il_scale_tbl_info) -
1416 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1417 u8 start_action;
1418 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1419 u8 tx_chains_num = il->hw_params.tx_chains_num;
1420 u8 update_search_tbl_counter = 0;
1421 int ret;
1422
1423 start_action = tbl->action;
1424
1425 for (;;) {
1426 lq_sta->action_counter++;
1427 switch (tbl->action) {
1428 case IL_SISO_SWITCH_ANTENNA1:
1429 case IL_SISO_SWITCH_ANTENNA2:
1430 D_RATE("LQ: SISO toggle Antenna\n");
1431 if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
1432 tx_chains_num <= 1) ||
1433 (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
1434 tx_chains_num <= 2))
1435 break;
1436
1437 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1438 break;
1439
1440 memcpy(search_tbl, tbl, sz);
1441 if (il4965_rs_toggle_antenna
1442 (valid_tx_ant, &search_tbl->current_rate,
1443 search_tbl)) {
1444 update_search_tbl_counter = 1;
1445 goto out;
1446 }
1447 break;
1448 case IL_SISO_SWITCH_MIMO2_AB:
1449 case IL_SISO_SWITCH_MIMO2_AC:
1450 case IL_SISO_SWITCH_MIMO2_BC:
1451 D_RATE("LQ: SISO switch to MIMO2\n");
1452 memcpy(search_tbl, tbl, sz);
1453 search_tbl->is_SGI = 0;
1454
1455 if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
1456 search_tbl->ant_type = ANT_AB;
1457 else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
1458 search_tbl->ant_type = ANT_AC;
1459 else
1460 search_tbl->ant_type = ANT_BC;
1461
1462 if (!il4965_rs_is_valid_ant
1463 (valid_tx_ant, search_tbl->ant_type))
1464 break;
1465
1466 ret =
1467 il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
1468 search_tbl, idx);
1469 if (!ret)
1470 goto out;
1471 break;
1472 case IL_SISO_SWITCH_GI:
1473 if (!tbl->is_ht40 &&
1474 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
1475 break;
1476 if (tbl->is_ht40 &&
1477 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
1478 break;
1479
1480 D_RATE("LQ: SISO toggle SGI/NGI\n");
1481
1482 memcpy(search_tbl, tbl, sz);
1483 if (is_green) {
1484 if (!tbl->is_SGI)
1485 break;
1486 else
1487 IL_ERR("SGI was set in GF+SISO\n");
1488 }
1489 search_tbl->is_SGI = !tbl->is_SGI;
1490 il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1491 if (tbl->is_SGI) {
1492 s32 tpt = lq_sta->last_tpt / 100;
1493 if (tpt >= search_tbl->expected_tpt[idx])
1494 break;
1495 }
1496 search_tbl->current_rate =
1497 il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
1498 is_green);
1499 update_search_tbl_counter = 1;
1500 goto out;
1501 }
1502 tbl->action++;
1503 if (tbl->action > IL_SISO_SWITCH_GI)
1504 tbl->action = IL_SISO_SWITCH_ANTENNA1;
1505
1506 if (tbl->action == start_action)
1507 break;
1508 }
1509 search_tbl->lq_type = LQ_NONE;
1510 return 0;
1511
1512out:
1513 lq_sta->search_better_tbl = 1;
1514 tbl->action++;
1515 if (tbl->action > IL_SISO_SWITCH_GI)
1516 tbl->action = IL_SISO_SWITCH_ANTENNA1;
1517 if (update_search_tbl_counter)
1518 search_tbl->action = tbl->action;
1519
1520 return 0;
1521}
1522
1523/*
1524 * Try to switch to new modulation mode from MIMO2
1525 */
1526static int
1527il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1528 struct ieee80211_conf *conf,
1529 struct ieee80211_sta *sta, int idx)
1530{
1531 s8 is_green = lq_sta->is_green;
1532 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1533 struct il_scale_tbl_info *search_tbl =
1534 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1535 struct il_rate_scale_data *win = &(tbl->win[idx]);
1536 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1537 u32 sz =
1538 (sizeof(struct il_scale_tbl_info) -
1539 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1540 u8 start_action;
1541 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1542 u8 tx_chains_num = il->hw_params.tx_chains_num;
1543 u8 update_search_tbl_counter = 0;
1544 int ret;
1545
1546 start_action = tbl->action;
1547 for (;;) {
1548 lq_sta->action_counter++;
1549 switch (tbl->action) {
1550 case IL_MIMO2_SWITCH_ANTENNA1:
1551 case IL_MIMO2_SWITCH_ANTENNA2:
1552 D_RATE("LQ: MIMO2 toggle Antennas\n");
1553
1554 if (tx_chains_num <= 2)
1555 break;
1556
1557 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1558 break;
1559
1560 memcpy(search_tbl, tbl, sz);
1561 if (il4965_rs_toggle_antenna
1562 (valid_tx_ant, &search_tbl->current_rate,
1563 search_tbl)) {
1564 update_search_tbl_counter = 1;
1565 goto out;
1566 }
1567 break;
1568 case IL_MIMO2_SWITCH_SISO_A:
1569 case IL_MIMO2_SWITCH_SISO_B:
1570 case IL_MIMO2_SWITCH_SISO_C:
1571 D_RATE("LQ: MIMO2 switch to SISO\n");
1572
1573 /* Set up new search table for SISO */
1574 memcpy(search_tbl, tbl, sz);
1575
1576 if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
1577 search_tbl->ant_type = ANT_A;
1578 else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
1579 search_tbl->ant_type = ANT_B;
1580 else
1581 search_tbl->ant_type = ANT_C;
1582
1583 if (!il4965_rs_is_valid_ant
1584 (valid_tx_ant, search_tbl->ant_type))
1585 break;
1586
1587 ret =
1588 il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
1589 search_tbl, idx);
1590 if (!ret)
1591 goto out;
1592
1593 break;
1594
1595 case IL_MIMO2_SWITCH_GI:
1596 if (!tbl->is_ht40 &&
1597 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
1598 break;
1599 if (tbl->is_ht40 &&
1600 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
1601 break;
1602
1603 D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
1604
1605 /* Set up new search table for MIMO2 */
1606 memcpy(search_tbl, tbl, sz);
1607 search_tbl->is_SGI = !tbl->is_SGI;
1608 il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1609 /*
1610 * If active table already uses the fastest possible
1611 * modulation (dual stream with short guard interval),
1612 * and it's working well, there's no need to look
1613 * for a better type of modulation!
1614 */
1615 if (tbl->is_SGI) {
1616 s32 tpt = lq_sta->last_tpt / 100;
1617 if (tpt >= search_tbl->expected_tpt[idx])
1618 break;
1619 }
1620 search_tbl->current_rate =
1621 il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
1622 is_green);
1623 update_search_tbl_counter = 1;
1624 goto out;
1625
1626 }
1627 tbl->action++;
1628 if (tbl->action > IL_MIMO2_SWITCH_GI)
1629 tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
1630
1631 if (tbl->action == start_action)
1632 break;
1633 }
1634 search_tbl->lq_type = LQ_NONE;
1635 return 0;
1636out:
1637 lq_sta->search_better_tbl = 1;
1638 tbl->action++;
1639 if (tbl->action > IL_MIMO2_SWITCH_GI)
1640 tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
1641 if (update_search_tbl_counter)
1642 search_tbl->action = tbl->action;
1643
1644 return 0;
1645
1646}
1647
1648/*
1649 * Check whether we should continue using same modulation mode, or
1650 * begin search for a new mode, based on:
1651 * 1) # tx successes or failures while using this mode
1652 * 2) # times calling this function
1653 * 3) elapsed time in this mode (not used, for now)
1654 */
1655static void
1656il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
1657{
1658 struct il_scale_tbl_info *tbl;
1659 int i;
1660 int active_tbl;
1661 int flush_interval_passed = 0;
1662 struct il_priv *il;
1663
1664 il = lq_sta->drv;
1665 active_tbl = lq_sta->active_tbl;
1666
1667 tbl = &(lq_sta->lq_info[active_tbl]);
1668
1669 /* If we've been disallowing search, see if we should now allow it */
1670 if (lq_sta->stay_in_tbl) {
1671
1672 /* Elapsed time using current modulation mode */
1673 if (lq_sta->flush_timer)
1674 flush_interval_passed =
1675 time_after(jiffies,
1676 (unsigned long)(lq_sta->flush_timer +
1677 RATE_SCALE_FLUSH_INTVL));
1678
1679 /*
1680 * Check if we should allow search for new modulation mode.
1681 * If many frames have failed or succeeded, or we've used
1682 * this same modulation for a long time, allow search, and
1683 * reset history stats that keep track of whether we should
1684 * allow a new search. Also (below) reset all bitmaps and
1685 * stats in active history.
1686 */
1687 if (force_search ||
1688 lq_sta->total_failed > lq_sta->max_failure_limit ||
1689 lq_sta->total_success > lq_sta->max_success_limit ||
1690 (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
1691 flush_interval_passed)) {
1692 D_RATE("LQ: stay is expired %d %d %d\n:",
1693 lq_sta->total_failed, lq_sta->total_success,
1694 flush_interval_passed);
1695
1696 /* Allow search for new mode */
1697 lq_sta->stay_in_tbl = 0; /* only place reset */
1698 lq_sta->total_failed = 0;
1699 lq_sta->total_success = 0;
1700 lq_sta->flush_timer = 0;
1701
1702 /*
1703 * Else if we've used this modulation mode enough repetitions
1704 * (regardless of elapsed time or success/failure), reset
1705 * history bitmaps and rate-specific stats for all rates in
1706 * active table.
1707 */
1708 } else {
1709 lq_sta->table_count++;
1710 if (lq_sta->table_count >= lq_sta->table_count_limit) {
1711 lq_sta->table_count = 0;
1712
1713 D_RATE("LQ: stay in table clear win\n");
1714 for (i = 0; i < RATE_COUNT; i++)
1715 il4965_rs_rate_scale_clear_win(&
1716 (tbl->
1717 win
1718 [i]));
1719 }
1720 }
1721
1722 /* If transitioning to allow "search", reset all history
1723 * bitmaps and stats in active table (this will become the new
1724 * "search" table). */
1725 if (!lq_sta->stay_in_tbl) {
1726 for (i = 0; i < RATE_COUNT; i++)
1727 il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
1728 }
1729 }
1730}
1731
1732/*
1733 * setup rate table in uCode
1734 */
1735static void
1736il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
1737 struct il_lq_sta *lq_sta,
1738 struct il_scale_tbl_info *tbl, int idx, u8 is_green)
1739{
1740 u32 rate;
1741
1742 /* Update uCode's rate table. */
1743 rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
1744 il4965_rs_fill_link_cmd(il, lq_sta, rate);
1745 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
1746}
1747
1748/*
1749 * Do rate scaling and search for new modulation mode.
1750 */
1751static void
1752il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
1753 struct ieee80211_sta *sta,
1754 struct il_lq_sta *lq_sta)
1755{
1756 struct ieee80211_hw *hw = il->hw;
1757 struct ieee80211_conf *conf = &hw->conf;
1758 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1759 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1760 int low = RATE_INVALID;
1761 int high = RATE_INVALID;
1762 int idx;
1763 int i;
1764 struct il_rate_scale_data *win = NULL;
1765 int current_tpt = IL_INVALID_VALUE;
1766 int low_tpt = IL_INVALID_VALUE;
1767 int high_tpt = IL_INVALID_VALUE;
1768 u32 fail_count;
1769 s8 scale_action = 0;
1770 u16 rate_mask;
1771 u8 update_lq = 0;
1772 struct il_scale_tbl_info *tbl, *tbl1;
1773 u16 rate_scale_idx_msk = 0;
1774 u8 is_green = 0;
1775 u8 active_tbl = 0;
1776 u8 done_search = 0;
1777 u16 high_low;
1778 s32 sr;
1779 u8 tid = MAX_TID_COUNT;
1780 struct il_tid_data *tid_data;
1781 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1782 struct il_rxon_context *ctx = sta_priv->common.ctx;
1783
1784 D_RATE("rate scale calculate new rate for skb\n");
1785
1786 /* Send management frames and NO_ACK data using lowest rate. */
1787 /* TODO: this could probably be improved.. */
1788 if (!ieee80211_is_data(hdr->frame_control) ||
1789 (info->flags & IEEE80211_TX_CTL_NO_ACK))
1790 return;
1791
1792 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1793
1794 tid = il4965_rs_tl_add_packet(lq_sta, hdr);
1795 if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1796 tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
1797 if (tid_data->agg.state == IL_AGG_OFF)
1798 lq_sta->is_agg = 0;
1799 else
1800 lq_sta->is_agg = 1;
1801 } else
1802 lq_sta->is_agg = 0;
1803
1804 /*
1805 * Select rate-scale / modulation-mode table to work with in
1806 * the rest of this function: "search" if searching for better
1807 * modulation mode, or "active" if doing rate scaling within a mode.
1808 */
1809 if (!lq_sta->search_better_tbl)
1810 active_tbl = lq_sta->active_tbl;
1811 else
1812 active_tbl = 1 - lq_sta->active_tbl;
1813
1814 tbl = &(lq_sta->lq_info[active_tbl]);
1815 if (is_legacy(tbl->lq_type))
1816 lq_sta->is_green = 0;
1817 else
1818 lq_sta->is_green = il4965_rs_use_green(sta);
1819 is_green = lq_sta->is_green;
1820
1821 /* current tx rate */
1822 idx = lq_sta->last_txrate_idx;
1823
1824 D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
1825
1826 /* rates available for this association, and for modulation mode */
1827 rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1828
1829 D_RATE("mask 0x%04X\n", rate_mask);
1830
1831 /* mask with station rate restriction */
1832 if (is_legacy(tbl->lq_type)) {
1833 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1834 /* supp_rates has no CCK bits in A mode */
1835 rate_scale_idx_msk =
1836 (u16) (rate_mask &
1837 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
1838 else
1839 rate_scale_idx_msk =
1840 (u16) (rate_mask & lq_sta->supp_rates);
1841
1842 } else
1843 rate_scale_idx_msk = rate_mask;
1844
1845 if (!rate_scale_idx_msk)
1846 rate_scale_idx_msk = rate_mask;
1847
1848 if (!((1 << idx) & rate_scale_idx_msk)) {
1849 IL_ERR("Current Rate is not valid\n");
1850 if (lq_sta->search_better_tbl) {
1851 /* revert to active table if search table is not valid */
1852 tbl->lq_type = LQ_NONE;
1853 lq_sta->search_better_tbl = 0;
1854 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1855 /* get "active" rate info */
1856 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
1857 il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
1858 is_green);
1859 }
1860 return;
1861 }
1862
1863 /* Get expected throughput table and history win for current rate */
1864 if (!tbl->expected_tpt) {
1865 IL_ERR("tbl->expected_tpt is NULL\n");
1866 return;
1867 }
1868
1869 /* force user max rate if set by user */
1870 if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
1871 idx = lq_sta->max_rate_idx;
1872 update_lq = 1;
1873 win = &(tbl->win[idx]);
1874 goto lq_update;
1875 }
1876
1877 win = &(tbl->win[idx]);
1878
1879 /*
1880 * If there is not enough history to calculate actual average
1881 * throughput, keep analyzing results of more tx frames, without
1882 * changing rate or mode (bypass most of the rest of this function).
1883 * Set up new rate table in uCode only if old rate is not supported
1884 * in current association (use new rate found above).
1885 */
1886 fail_count = win->counter - win->success_counter;
1887 if (fail_count < RATE_MIN_FAILURE_TH &&
1888 win->success_counter < RATE_MIN_SUCCESS_TH) {
1889 D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
1890 win->success_counter, win->counter, idx);
1891
1892 /* Can't calculate this yet; not enough history */
1893 win->average_tpt = IL_INVALID_VALUE;
1894
1895 /* Should we stay with this modulation mode,
1896 * or search for a new one? */
1897 il4965_rs_stay_in_table(lq_sta, false);
1898
1899 goto out;
1900 }
1901 /* Else we have enough samples; calculate estimate of
1902 * actual average throughput */
1903 if (win->average_tpt !=
1904 ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
1905 IL_ERR("expected_tpt should have been calculated by now\n");
1906 win->average_tpt =
1907 ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
1908 }
1909
1910 /* If we are searching for better modulation mode, check success. */
1911 if (lq_sta->search_better_tbl) {
1912 /* If good success, continue using the "search" mode;
1913 * no need to send new link quality command, since we're
1914 * continuing to use the setup that we've been trying. */
1915 if (win->average_tpt > lq_sta->last_tpt) {
1916
1917 D_RATE("LQ: SWITCHING TO NEW TBL "
1918 "suc=%d cur-tpt=%d old-tpt=%d\n",
1919 win->success_ratio, win->average_tpt,
1920 lq_sta->last_tpt);
1921
1922 if (!is_legacy(tbl->lq_type))
1923 lq_sta->enable_counter = 1;
1924
1925 /* Swap tables; "search" becomes "active" */
1926 lq_sta->active_tbl = active_tbl;
1927 current_tpt = win->average_tpt;
1928
1929 /* Else poor success; go back to mode in "active" table */
1930 } else {
1931
1932 D_RATE("LQ: GOING BACK TO THE OLD TBL "
1933 "suc=%d cur-tpt=%d old-tpt=%d\n",
1934 win->success_ratio, win->average_tpt,
1935 lq_sta->last_tpt);
1936
1937 /* Nullify "search" table */
1938 tbl->lq_type = LQ_NONE;
1939
1940 /* Revert to "active" table */
1941 active_tbl = lq_sta->active_tbl;
1942 tbl = &(lq_sta->lq_info[active_tbl]);
1943
1944 /* Revert to "active" rate and throughput info */
1945 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
1946 current_tpt = lq_sta->last_tpt;
1947
1948 /* Need to set up a new rate table in uCode */
1949 update_lq = 1;
1950 }
1951
1952 /* Either way, we've made a decision; modulation mode
1953 * search is done, allow rate adjustment next time. */
1954 lq_sta->search_better_tbl = 0;
1955 done_search = 1; /* Don't switch modes below! */
1956 goto lq_update;
1957 }
1958
1959 /* (Else) not in search of better modulation mode, try for better
1960 * starting rate, while staying in this mode. */
1961 high_low =
1962 il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
1963 tbl->lq_type);
1964 low = high_low & 0xff;
1965 high = (high_low >> 8) & 0xff;
1966
1967 /* If user set max rate, dont allow higher than user constrain */
1968 if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
1969 high = RATE_INVALID;
1970
1971 sr = win->success_ratio;
1972
1973 /* Collect measured throughputs for current and adjacent rates */
1974 current_tpt = win->average_tpt;
1975 if (low != RATE_INVALID)
1976 low_tpt = tbl->win[low].average_tpt;
1977 if (high != RATE_INVALID)
1978 high_tpt = tbl->win[high].average_tpt;
1979
1980 scale_action = 0;
1981
1982 /* Too many failures, decrease rate */
1983 if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
1984 D_RATE("decrease rate because of low success_ratio\n");
1985 scale_action = -1;
1986
1987 /* No throughput measured yet for adjacent rates; try increase. */
1988 } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
1989
1990 if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
1991 scale_action = 1;
1992 else if (low != RATE_INVALID)
1993 scale_action = 0;
1994 }
1995
1996 /* Both adjacent throughputs are measured, but neither one has better
1997 * throughput; we're using the best rate, don't change it! */
1998 else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
1999 low_tpt < current_tpt && high_tpt < current_tpt)
2000 scale_action = 0;
2001
2002 /* At least one adjacent rate's throughput is measured,
2003 * and may have better performance. */
2004 else {
2005 /* Higher adjacent rate's throughput is measured */
2006 if (high_tpt != IL_INVALID_VALUE) {
2007 /* Higher rate has better throughput */
2008 if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
2009 scale_action = 1;
2010 else
2011 scale_action = 0;
2012
2013 /* Lower adjacent rate's throughput is measured */
2014 } else if (low_tpt != IL_INVALID_VALUE) {
2015 /* Lower rate has better throughput */
2016 if (low_tpt > current_tpt) {
2017 D_RATE("decrease rate because of low tpt\n");
2018 scale_action = -1;
2019 } else if (sr >= RATE_INCREASE_TH) {
2020 scale_action = 1;
2021 }
2022 }
2023 }
2024
2025 /* Sanity check; asked for decrease, but success rate or throughput
2026 * has been good at old rate. Don't change it. */
2027 if (scale_action == -1 && low != RATE_INVALID &&
2028 (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
2029 scale_action = 0;
2030
2031 switch (scale_action) {
2032 case -1:
2033 /* Decrease starting rate, update uCode's rate table */
2034 if (low != RATE_INVALID) {
2035 update_lq = 1;
2036 idx = low;
2037 }
2038
2039 break;
2040 case 1:
2041 /* Increase starting rate, update uCode's rate table */
2042 if (high != RATE_INVALID) {
2043 update_lq = 1;
2044 idx = high;
2045 }
2046
2047 break;
2048 case 0:
2049 /* No change */
2050 default:
2051 break;
2052 }
2053
2054 D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
2055 idx, scale_action, low, high, tbl->lq_type);
2056
2057lq_update:
2058 /* Replace uCode's rate table for the destination station. */
2059 if (update_lq)
2060 il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
2061 is_green);
2062
2063 /* Should we stay with this modulation mode,
2064 * or search for a new one? */
2065 il4965_rs_stay_in_table(lq_sta, false);
2066
2067 /*
2068 * Search for new modulation mode if we're:
2069 * 1) Not changing rates right now
2070 * 2) Not just finishing up a search
2071 * 3) Allowing a new search
2072 */
2073 if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
2074 /* Save current throughput to compare with "search" throughput */
2075 lq_sta->last_tpt = current_tpt;
2076
2077 /* Select a new "search" modulation mode to try.
2078 * If one is found, set up the new "search" table. */
2079 if (is_legacy(tbl->lq_type))
2080 il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
2081 else if (is_siso(tbl->lq_type))
2082 il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
2083 idx);
2084 else /* (is_mimo2(tbl->lq_type)) */
2085 il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
2086 idx);
2087
2088 /* If new "search" mode was selected, set up in uCode table */
2089 if (lq_sta->search_better_tbl) {
2090 /* Access the "search" table, clear its history. */
2091 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2092 for (i = 0; i < RATE_COUNT; i++)
2093 il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
2094
2095 /* Use new "search" start rate */
2096 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
2097
2098 D_RATE("Switch current mcs: %X idx: %d\n",
2099 tbl->current_rate, idx);
2100 il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
2101 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
2102 } else
2103 done_search = 1;
2104 }
2105
2106 if (done_search && !lq_sta->stay_in_tbl) {
2107 /* If the "active" (non-search) mode was legacy,
2108 * and we've tried switching antennas,
2109 * but we haven't been able to try HT modes (not available),
2110 * stay with best antenna legacy modulation for a while
2111 * before next round of mode comparisons. */
2112 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2113 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2114 lq_sta->action_counter > tbl1->max_search) {
2115 D_RATE("LQ: STAY in legacy table\n");
2116 il4965_rs_set_stay_in_table(il, 1, lq_sta);
2117 }
2118
2119 /* If we're in an HT mode, and all 3 mode switch actions
2120 * have been tried and compared, stay in this best modulation
2121 * mode for a while before next round of mode comparisons. */
2122 if (lq_sta->enable_counter &&
2123 lq_sta->action_counter >= tbl1->max_search) {
2124 if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
2125 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2126 tid != MAX_TID_COUNT) {
2127 tid_data =
2128 &il->stations[lq_sta->lq.sta_id].tid[tid];
2129 if (tid_data->agg.state == IL_AGG_OFF) {
2130 D_RATE("try to aggregate tid %d\n",
2131 tid);
2132 il4965_rs_tl_turn_on_agg(il, tid,
2133 lq_sta, sta);
2134 }
2135 }
2136 il4965_rs_set_stay_in_table(il, 0, lq_sta);
2137 }
2138 }
2139
2140out:
2141 tbl->current_rate =
2142 il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
2143 i = idx;
2144 lq_sta->last_txrate_idx = i;
2145}
2146
2147/**
2148 * il4965_rs_initialize_lq - Initialize a station's hardware rate table
2149 *
2150 * The uCode's station table contains a table of fallback rates
2151 * for automatic fallback during transmission.
2152 *
2153 * NOTE: This sets up a default set of values. These will be replaced later
2154 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2155 * rc80211_simple.
2156 *
2157 * NOTE: Run C_ADD_STA command to set up station table entry, before
2158 * calling this function (which runs C_TX_LINK_QUALITY_CMD,
2159 * which requires station table entry to exist).
2160 */
2161static void
2162il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
2163 struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
2164{
2165 struct il_scale_tbl_info *tbl;
2166 int rate_idx;
2167 int i;
2168 u32 rate;
2169 u8 use_green = il4965_rs_use_green(sta);
2170 u8 active_tbl = 0;
2171 u8 valid_tx_ant;
2172 struct il_station_priv *sta_priv;
2173 struct il_rxon_context *ctx;
2174
2175 if (!sta || !lq_sta)
2176 return;
2177
2178 sta_priv = (void *)sta->drv_priv;
2179 ctx = sta_priv->common.ctx;
2180
2181 i = lq_sta->last_txrate_idx;
2182
2183 valid_tx_ant = il->hw_params.valid_tx_ant;
2184
2185 if (!lq_sta->search_better_tbl)
2186 active_tbl = lq_sta->active_tbl;
2187 else
2188 active_tbl = 1 - lq_sta->active_tbl;
2189
2190 tbl = &(lq_sta->lq_info[active_tbl]);
2191
2192 if (i < 0 || i >= RATE_COUNT)
2193 i = 0;
2194
2195 rate = il_rates[i].plcp;
2196 tbl->ant_type = il4965_first_antenna(valid_tx_ant);
2197 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2198
2199 if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
2200 rate |= RATE_MCS_CCK_MSK;
2201
2202 il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
2203 if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2204 il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2205
2206 rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
2207 tbl->current_rate = rate;
2208 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
2209 il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2210 il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2211 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
2212}
2213
2214static void
2215il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2216 struct ieee80211_tx_rate_control *txrc)
2217{
2218
2219 struct sk_buff *skb = txrc->skb;
2220 struct ieee80211_supported_band *sband = txrc->sband;
2221 struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
2222 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2223 struct il_lq_sta *lq_sta = il_sta;
2224 int rate_idx;
2225
2226 D_RATE("rate scale calculate new rate for skb\n");
2227
2228 /* Get max rate if user set max rate */
2229 if (lq_sta) {
2230 lq_sta->max_rate_idx = txrc->max_rate_idx;
2231 if (sband->band == IEEE80211_BAND_5GHZ &&
2232 lq_sta->max_rate_idx != -1)
2233 lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
2234 if (lq_sta->max_rate_idx < 0 ||
2235 lq_sta->max_rate_idx >= RATE_COUNT)
2236 lq_sta->max_rate_idx = -1;
2237 }
2238
2239 /* Treat uninitialized rate scaling data same as non-existing. */
2240 if (lq_sta && !lq_sta->drv) {
2241 D_RATE("Rate scaling not initialized yet.\n");
2242 il_sta = NULL;
2243 }
2244
2245 /* Send management frames and NO_ACK data using lowest rate. */
2246 if (rate_control_send_low(sta, il_sta, txrc))
2247 return;
2248
2249 if (!lq_sta)
2250 return;
2251
2252 rate_idx = lq_sta->last_txrate_idx;
2253
2254 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2255 rate_idx -= IL_FIRST_OFDM_RATE;
2256 /* 6M and 9M shared same MCS idx */
2257 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2258 if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2259 RATE_MIMO2_6M_PLCP)
2260 rate_idx = rate_idx + MCS_IDX_PER_STREAM;
2261 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2262 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2263 info->control.rates[0].flags |=
2264 IEEE80211_TX_RC_SHORT_GI;
2265 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2266 info->control.rates[0].flags |=
2267 IEEE80211_TX_RC_DUP_DATA;
2268 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2269 info->control.rates[0].flags |=
2270 IEEE80211_TX_RC_40_MHZ_WIDTH;
2271 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2272 info->control.rates[0].flags |=
2273 IEEE80211_TX_RC_GREEN_FIELD;
2274 } else {
2275 /* Check for invalid rates */
2276 if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
2277 (sband->band == IEEE80211_BAND_5GHZ &&
2278 rate_idx < IL_FIRST_OFDM_RATE))
2279 rate_idx = rate_lowest_index(sband, sta);
2280 /* On valid 5 GHz rate, adjust idx */
2281 else if (sband->band == IEEE80211_BAND_5GHZ)
2282 rate_idx -= IL_FIRST_OFDM_RATE;
2283 info->control.rates[0].flags = 0;
2284 }
2285 info->control.rates[0].idx = rate_idx;
2286
2287}
2288
2289static void *
2290il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
2291{
2292 struct il_station_priv *sta_priv =
2293 (struct il_station_priv *)sta->drv_priv;
2294 struct il_priv *il;
2295
2296 il = (struct il_priv *)il_rate;
2297 D_RATE("create station rate scale win\n");
2298
2299 return &sta_priv->lq_sta;
2300}
2301
2302/*
2303 * Called after adding a new station to initialize rate scaling
2304 */
2305void
2306il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
2307{
2308 int i, j;
2309 struct ieee80211_hw *hw = il->hw;
2310 struct ieee80211_conf *conf = &il->hw->conf;
2311 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2312 struct il_station_priv *sta_priv;
2313 struct il_lq_sta *lq_sta;
2314 struct ieee80211_supported_band *sband;
2315
2316 sta_priv = (struct il_station_priv *)sta->drv_priv;
2317 lq_sta = &sta_priv->lq_sta;
2318 sband = hw->wiphy->bands[conf->channel->band];
2319
2320 lq_sta->lq.sta_id = sta_id;
2321
2322 for (j = 0; j < LQ_SIZE; j++)
2323 for (i = 0; i < RATE_COUNT; i++)
2324 il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
2325 win[i]);
2326
2327 lq_sta->flush_timer = 0;
2328 lq_sta->supp_rates = sta->supp_rates[sband->band];
2329 for (j = 0; j < LQ_SIZE; j++)
2330 for (i = 0; i < RATE_COUNT; i++)
2331 il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
2332 win[i]);
2333
2334 D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
2335 sta_id);
2336 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2337 * the lowest or the highest rate.. Could consider using RSSI from
2338 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2339 * after assoc.. */
2340
2341 lq_sta->is_dup = 0;
2342 lq_sta->max_rate_idx = -1;
2343 lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
2344 lq_sta->is_green = il4965_rs_use_green(sta);
2345 lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
2346 lq_sta->band = il->band;
2347 /*
2348 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2349 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2350 */
2351 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2352 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2353 lq_sta->active_siso_rate &= ~((u16) 0x2);
2354 lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
2355
2356 /* Same here */
2357 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2358 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2359 lq_sta->active_mimo2_rate &= ~((u16) 0x2);
2360 lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
2361
2362 /* These values will be overridden later */
2363 lq_sta->lq.general_params.single_stream_ant_msk =
2364 il4965_first_antenna(il->hw_params.valid_tx_ant);
2365 lq_sta->lq.general_params.dual_stream_ant_msk =
2366 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
2367 valid_tx_ant);
2368 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2369 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2370 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2371 lq_sta->lq.general_params.dual_stream_ant_msk =
2372 il->hw_params.valid_tx_ant;
2373 }
2374
2375 /* as default allow aggregation for all tids */
2376 lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
2377 lq_sta->drv = il;
2378
2379 /* Set last_txrate_idx to lowest rate */
2380 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2381 if (sband->band == IEEE80211_BAND_5GHZ)
2382 lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
2383 lq_sta->is_agg = 0;
2384
2385#ifdef CONFIG_MAC80211_DEBUGFS
2386 lq_sta->dbg_fixed_rate = 0;
2387#endif
2388
2389 il4965_rs_initialize_lq(il, conf, sta, lq_sta);
2390}
2391
2392static void
2393il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
2394 u32 new_rate)
2395{
2396 struct il_scale_tbl_info tbl_type;
2397 int idx = 0;
2398 int rate_idx;
2399 int repeat_rate = 0;
2400 u8 ant_toggle_cnt = 0;
2401 u8 use_ht_possible = 1;
2402 u8 valid_tx_ant = 0;
2403 struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
2404
2405 /* Override starting rate (idx 0) if needed for debug purposes */
2406 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2407
2408 /* Interpret new_rate (rate_n_flags) */
2409 il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2410 &rate_idx);
2411
2412 /* How many times should we repeat the initial rate? */
2413 if (is_legacy(tbl_type.lq_type)) {
2414 ant_toggle_cnt = 1;
2415 repeat_rate = IL_NUMBER_TRY;
2416 } else {
2417 repeat_rate = IL_HT_NUMBER_TRY;
2418 }
2419
2420 lq_cmd->general_params.mimo_delimiter =
2421 is_mimo(tbl_type.lq_type) ? 1 : 0;
2422
2423 /* Fill 1st table entry (idx 0) */
2424 lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
2425
2426 if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
2427 lq_cmd->general_params.single_stream_ant_msk =
2428 tbl_type.ant_type;
2429 } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
2430 lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
2431 }
2432 /* otherwise we don't modify the existing value */
2433 idx++;
2434 repeat_rate--;
2435 if (il)
2436 valid_tx_ant = il->hw_params.valid_tx_ant;
2437
2438 /* Fill rest of rate table */
2439 while (idx < LINK_QUAL_MAX_RETRY_NUM) {
2440 /* Repeat initial/next rate.
2441 * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
2442 * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
2443 while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
2444 if (is_legacy(tbl_type.lq_type)) {
2445 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2446 ant_toggle_cnt++;
2447 else if (il &&
2448 il4965_rs_toggle_antenna(valid_tx_ant,
2449 &new_rate,
2450 &tbl_type))
2451 ant_toggle_cnt = 1;
2452 }
2453
2454 /* Override next rate if needed for debug purposes */
2455 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2456
2457 /* Fill next table entry */
2458 lq_cmd->rs_table[idx].rate_n_flags =
2459 cpu_to_le32(new_rate);
2460 repeat_rate--;
2461 idx++;
2462 }
2463
2464 il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2465 &tbl_type, &rate_idx);
2466
2467 /* Indicate to uCode which entries might be MIMO.
2468 * If initial rate was MIMO, this will finally end up
2469 * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2470 if (is_mimo(tbl_type.lq_type))
2471 lq_cmd->general_params.mimo_delimiter = idx;
2472
2473 /* Get next rate */
2474 new_rate =
2475 il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
2476 use_ht_possible);
2477
2478 /* How many times should we repeat the next rate? */
2479 if (is_legacy(tbl_type.lq_type)) {
2480 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2481 ant_toggle_cnt++;
2482 else if (il &&
2483 il4965_rs_toggle_antenna(valid_tx_ant,
2484 &new_rate, &tbl_type))
2485 ant_toggle_cnt = 1;
2486
2487 repeat_rate = IL_NUMBER_TRY;
2488 } else {
2489 repeat_rate = IL_HT_NUMBER_TRY;
2490 }
2491
2492 /* Don't allow HT rates after next pass.
2493 * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2494 use_ht_possible = 0;
2495
2496 /* Override next rate if needed for debug purposes */
2497 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2498
2499 /* Fill next table entry */
2500 lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
2501
2502 idx++;
2503 repeat_rate--;
2504 }
2505
2506 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2507 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2508
2509 lq_cmd->agg_params.agg_time_limit =
2510 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2511}
2512
2513static void *
2514il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2515{
2516 return hw->priv;
2517}
2518
2519/* rate scale requires free function to be implemented */
2520static void
2521il4965_rs_free(void *il_rate)
2522{
2523 return;
2524}
2525
2526static void
2527il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
2528{
2529 struct il_priv *il __maybe_unused = il_r;
2530
2531 D_RATE("enter\n");
2532 D_RATE("leave\n");
2533}
2534
2535#ifdef CONFIG_MAC80211_DEBUGFS
2536static int
2537il4965_open_file_generic(struct inode *inode, struct file *file)
2538{
2539 file->private_data = inode->i_private;
2540 return 0;
2541}
2542
2543static void
2544il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
2545{
2546 struct il_priv *il;
2547 u8 valid_tx_ant;
2548 u8 ant_sel_tx;
2549
2550 il = lq_sta->drv;
2551 valid_tx_ant = il->hw_params.valid_tx_ant;
2552 if (lq_sta->dbg_fixed_rate) {
2553 ant_sel_tx =
2554 ((lq_sta->
2555 dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
2556 RATE_MCS_ANT_POS);
2557 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2558 *rate_n_flags = lq_sta->dbg_fixed_rate;
2559 D_RATE("Fixed rate ON\n");
2560 } else {
2561 lq_sta->dbg_fixed_rate = 0;
2562 IL_ERR
2563 ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
2564 ant_sel_tx, valid_tx_ant);
2565 D_RATE("Fixed rate OFF\n");
2566 }
2567 } else {
2568 D_RATE("Fixed rate OFF\n");
2569 }
2570}
2571
2572static ssize_t
2573il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2574 const char __user *user_buf,
2575 size_t count, loff_t *ppos)
2576{
2577 struct il_lq_sta *lq_sta = file->private_data;
2578 struct il_priv *il;
2579 char buf[64];
2580 size_t buf_size;
2581 u32 parsed_rate;
2582 struct il_station_priv *sta_priv =
2583 container_of(lq_sta, struct il_station_priv, lq_sta);
2584 struct il_rxon_context *ctx = sta_priv->common.ctx;
2585
2586 il = lq_sta->drv;
2587 memset(buf, 0, sizeof(buf));
2588 buf_size = min(count, sizeof(buf) - 1);
2589 if (copy_from_user(buf, user_buf, buf_size))
2590 return -EFAULT;
2591
2592 if (sscanf(buf, "%x", &parsed_rate) == 1)
2593 lq_sta->dbg_fixed_rate = parsed_rate;
2594 else
2595 lq_sta->dbg_fixed_rate = 0;
2596
2597 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2598 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2599 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2600
2601 D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
2602 lq_sta->dbg_fixed_rate);
2603
2604 if (lq_sta->dbg_fixed_rate) {
2605 il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2606 il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2607 }
2608
2609 return count;
2610}
2611
2612static ssize_t
2613il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
2614 size_t count, loff_t *ppos)
2615{
2616 char *buff;
2617 int desc = 0;
2618 int i = 0;
2619 int idx = 0;
2620 ssize_t ret;
2621
2622 struct il_lq_sta *lq_sta = file->private_data;
2623 struct il_priv *il;
2624 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2625
2626 il = lq_sta->drv;
2627 buff = kmalloc(1024, GFP_KERNEL);
2628 if (!buff)
2629 return -ENOMEM;
2630
2631 desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
2632 desc +=
2633 sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
2634 lq_sta->total_failed, lq_sta->total_success,
2635 lq_sta->active_legacy_rate);
2636 desc +=
2637 sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
2638 desc +=
2639 sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
2640 (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2641 (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2642 (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2643 desc +=
2644 sprintf(buff + desc, "lq type %s\n",
2645 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2646 if (is_Ht(tbl->lq_type)) {
2647 desc +=
2648 sprintf(buff + desc, " %s",
2649 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2650 desc +=
2651 sprintf(buff + desc, " %s",
2652 (tbl->is_ht40) ? "40MHz" : "20MHz");
2653 desc +=
2654 sprintf(buff + desc, " %s %s %s\n",
2655 (tbl->is_SGI) ? "SGI" : "",
2656 (lq_sta->is_green) ? "GF enabled" : "",
2657 (lq_sta->is_agg) ? "AGG on" : "");
2658 }
2659 desc +=
2660 sprintf(buff + desc, "last tx rate=0x%X\n",
2661 lq_sta->last_rate_n_flags);
2662 desc +=
2663 sprintf(buff + desc,
2664 "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2665 lq_sta->lq.general_params.flags,
2666 lq_sta->lq.general_params.mimo_delimiter,
2667 lq_sta->lq.general_params.single_stream_ant_msk,
2668 lq_sta->lq.general_params.dual_stream_ant_msk);
2669
2670 desc +=
2671 sprintf(buff + desc,
2672 "agg:"
2673 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2674 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2675 lq_sta->lq.agg_params.agg_dis_start_th,
2676 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2677
2678 desc +=
2679 sprintf(buff + desc,
2680 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2681 lq_sta->lq.general_params.start_rate_idx[0],
2682 lq_sta->lq.general_params.start_rate_idx[1],
2683 lq_sta->lq.general_params.start_rate_idx[2],
2684 lq_sta->lq.general_params.start_rate_idx[3]);
2685
2686 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2687 idx =
2688 il4965_hwrate_to_plcp_idx(le32_to_cpu
2689 (lq_sta->lq.rs_table[i].
2690 rate_n_flags));
2691 if (is_legacy(tbl->lq_type)) {
2692 desc +=
2693 sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
2694 le32_to_cpu(lq_sta->lq.rs_table[i].
2695 rate_n_flags),
2696 il_rate_mcs[idx].mbps);
2697 } else {
2698 desc +=
2699 sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
2700 i,
2701 le32_to_cpu(lq_sta->lq.rs_table[i].
2702 rate_n_flags),
2703 il_rate_mcs[idx].mbps,
2704 il_rate_mcs[idx].mcs);
2705 }
2706 }
2707
2708 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2709 kfree(buff);
2710 return ret;
2711}
2712
2713static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2714 .write = il4965_rs_sta_dbgfs_scale_table_write,
2715 .read = il4965_rs_sta_dbgfs_scale_table_read,
2716 .open = il4965_open_file_generic,
2717 .llseek = default_llseek,
2718};
2719
2720static ssize_t
2721il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
2722 size_t count, loff_t *ppos)
2723{
2724 char *buff;
2725 int desc = 0;
2726 int i, j;
2727 ssize_t ret;
2728
2729 struct il_lq_sta *lq_sta = file->private_data;
2730
2731 buff = kmalloc(1024, GFP_KERNEL);
2732 if (!buff)
2733 return -ENOMEM;
2734
2735 for (i = 0; i < LQ_SIZE; i++) {
2736 desc +=
2737 sprintf(buff + desc,
2738 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2739 "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
2740 lq_sta->lq_info[i].lq_type,
2741 lq_sta->lq_info[i].is_SGI,
2742 lq_sta->lq_info[i].is_ht40,
2743 lq_sta->lq_info[i].is_dup, lq_sta->is_green,
2744 lq_sta->lq_info[i].current_rate);
2745 for (j = 0; j < RATE_COUNT; j++) {
2746 desc +=
2747 sprintf(buff + desc,
2748 "counter=%d success=%d %%=%d\n",
2749 lq_sta->lq_info[i].win[j].counter,
2750 lq_sta->lq_info[i].win[j].success_counter,
2751 lq_sta->lq_info[i].win[j].success_ratio);
2752 }
2753 }
2754 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2755 kfree(buff);
2756 return ret;
2757}
2758
2759static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2760 .read = il4965_rs_sta_dbgfs_stats_table_read,
2761 .open = il4965_open_file_generic,
2762 .llseek = default_llseek,
2763};
2764
2765static ssize_t
2766il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2767 char __user *user_buf, size_t count,
2768 loff_t *ppos)
2769{
2770 char buff[120];
2771 int desc = 0;
2772 struct il_lq_sta *lq_sta = file->private_data;
2773 struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2774
2775 if (is_Ht(tbl->lq_type))
2776 desc +=
2777 sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
2778 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2779 else
2780 desc +=
2781 sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
2782 il_rates[lq_sta->last_txrate_idx].ieee >> 1);
2783
2784 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2785}
2786
2787static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2788 .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
2789 .open = il4965_open_file_generic,
2790 .llseek = default_llseek,
2791};
2792
2793static void
2794il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
2795{
2796 struct il_lq_sta *lq_sta = il_sta;
2797 lq_sta->rs_sta_dbgfs_scale_table_file =
2798 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2799 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2800 lq_sta->rs_sta_dbgfs_stats_table_file =
2801 debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
2802 &rs_sta_dbgfs_stats_table_ops);
2803 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2804 debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
2805 &rs_sta_dbgfs_rate_scale_data_ops);
2806 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2807 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2808 &lq_sta->tx_agg_tid_en);
2809
2810}
2811
2812static void
2813il4965_rs_remove_debugfs(void *il, void *il_sta)
2814{
2815 struct il_lq_sta *lq_sta = il_sta;
2816 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2817 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2818 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2819 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2820}
2821#endif
2822
2823/*
2824 * Initialization of rate scaling information is done by driver after
2825 * the station is added. Since mac80211 calls this function before a
2826 * station is added we ignore it.
2827 */
2828static void
2829il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
2830 struct ieee80211_sta *sta, void *il_sta)
2831{
2832}
2833
2834static struct rate_control_ops rs_4965_ops = {
2835 .module = NULL,
2836 .name = IL4965_RS_NAME,
2837 .tx_status = il4965_rs_tx_status,
2838 .get_rate = il4965_rs_get_rate,
2839 .rate_init = il4965_rs_rate_init_stub,
2840 .alloc = il4965_rs_alloc,
2841 .free = il4965_rs_free,
2842 .alloc_sta = il4965_rs_alloc_sta,
2843 .free_sta = il4965_rs_free_sta,
2844#ifdef CONFIG_MAC80211_DEBUGFS
2845 .add_sta_debugfs = il4965_rs_add_debugfs,
2846 .remove_sta_debugfs = il4965_rs_remove_debugfs,
2847#endif
2848};
2849
2850int
2851il4965_rate_control_register(void)
2852{
2853 return ieee80211_rate_control_register(&rs_4965_ops);
2854}
2855
2856void
2857il4965_rate_control_unregister(void)
2858{
2859 ieee80211_rate_control_unregister(&rs_4965_ops);
2860}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644
index 000000000000..84c54dccf195
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -0,0 +1,2421 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "common.h"
41#include "4965.h"
42
43/**
44 * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
45 * using sample data 100 bytes apart. If these sample points are good,
46 * it's a pretty good bet that everything between them is good, too.
47 */
48static int
49il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
50{
51 u32 val;
52 int ret = 0;
53 u32 errcnt = 0;
54 u32 i;
55
56 D_INFO("ucode inst image size is %u\n", len);
57
58 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
59 /* read data comes through single port, auto-incr addr */
60 /* NOTE: Use the debugless read so we don't flood kernel log
61 * if IL_DL_IO is set */
62 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
63 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
64 if (val != le32_to_cpu(*image)) {
65 ret = -EIO;
66 errcnt++;
67 if (errcnt >= 3)
68 break;
69 }
70 }
71
72 return ret;
73}
74
75/**
76 * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
77 * looking at all data.
78 */
79static int
80il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
81{
82 u32 val;
83 u32 save_len = len;
84 int ret = 0;
85 u32 errcnt;
86
87 D_INFO("ucode inst image size is %u\n", len);
88
89 il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
90
91 errcnt = 0;
92 for (; len > 0; len -= sizeof(u32), image++) {
93 /* read data comes through single port, auto-incr addr */
94 /* NOTE: Use the debugless read so we don't flood kernel log
95 * if IL_DL_IO is set */
96 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
97 if (val != le32_to_cpu(*image)) {
98 IL_ERR("uCode INST section is invalid at "
99 "offset 0x%x, is 0x%x, s/b 0x%x\n",
100 save_len - len, val, le32_to_cpu(*image));
101 ret = -EIO;
102 errcnt++;
103 if (errcnt >= 20)
104 break;
105 }
106 }
107
108 if (!errcnt)
109 D_INFO("ucode image in INSTRUCTION memory is good\n");
110
111 return ret;
112}
113
114/**
115 * il4965_verify_ucode - determine which instruction image is in SRAM,
116 * and verify its contents
117 */
118int
119il4965_verify_ucode(struct il_priv *il)
120{
121 __le32 *image;
122 u32 len;
123 int ret;
124
125 /* Try bootstrap */
126 image = (__le32 *) il->ucode_boot.v_addr;
127 len = il->ucode_boot.len;
128 ret = il4965_verify_inst_sparse(il, image, len);
129 if (!ret) {
130 D_INFO("Bootstrap uCode is good in inst SRAM\n");
131 return 0;
132 }
133
134 /* Try initialize */
135 image = (__le32 *) il->ucode_init.v_addr;
136 len = il->ucode_init.len;
137 ret = il4965_verify_inst_sparse(il, image, len);
138 if (!ret) {
139 D_INFO("Initialize uCode is good in inst SRAM\n");
140 return 0;
141 }
142
143 /* Try runtime/protocol */
144 image = (__le32 *) il->ucode_code.v_addr;
145 len = il->ucode_code.len;
146 ret = il4965_verify_inst_sparse(il, image, len);
147 if (!ret) {
148 D_INFO("Runtime uCode is good in inst SRAM\n");
149 return 0;
150 }
151
152 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
153
154 /* Since nothing seems to match, show first several data entries in
155 * instruction SRAM, so maybe visual inspection will give a clue.
156 * Selection of bootstrap image (vs. other images) is arbitrary. */
157 image = (__le32 *) il->ucode_boot.v_addr;
158 len = il->ucode_boot.len;
159 ret = il4965_verify_inst_full(il, image, len);
160
161 return ret;
162}
163
164/******************************************************************************
165 *
166 * EEPROM related functions
167 *
168******************************************************************************/
169
170/*
171 * The device's EEPROM semaphore prevents conflicts between driver and uCode
172 * when accessing the EEPROM; each access is a series of pulses to/from the
173 * EEPROM chip, not a single event, so even reads could conflict if they
174 * weren't arbitrated by the semaphore.
175 */
176int
177il4965_eeprom_acquire_semaphore(struct il_priv *il)
178{
179 u16 count;
180 int ret;
181
182 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
183 /* Request semaphore */
184 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
185 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
186
187 /* See if we got it */
188 ret =
189 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
190 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
191 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
192 EEPROM_SEM_TIMEOUT);
193 if (ret >= 0)
194 return ret;
195 }
196
197 return ret;
198}
199
200void
201il4965_eeprom_release_semaphore(struct il_priv *il)
202{
203 il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
204 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
205
206}
207
208int
209il4965_eeprom_check_version(struct il_priv *il)
210{
211 u16 eeprom_ver;
212 u16 calib_ver;
213
214 eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
215 calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
216
217 if (eeprom_ver < il->cfg->eeprom_ver ||
218 calib_ver < il->cfg->eeprom_calib_ver)
219 goto err;
220
221 IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
222
223 return 0;
224err:
225 IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
226 "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
227 calib_ver, il->cfg->eeprom_calib_ver);
228 return -EINVAL;
229
230}
231
232void
233il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
234{
235 const u8 *addr = il_eeprom_query_addr(il,
236 EEPROM_MAC_ADDRESS);
237 memcpy(mac, addr, ETH_ALEN);
238}
239
240/* Send led command */
241static int
242il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
243{
244 struct il_host_cmd cmd = {
245 .id = C_LEDS,
246 .len = sizeof(struct il_led_cmd),
247 .data = led_cmd,
248 .flags = CMD_ASYNC,
249 .callback = NULL,
250 };
251 u32 reg;
252
253 reg = _il_rd(il, CSR_LED_REG);
254 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
255 _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
256
257 return il_send_cmd(il, &cmd);
258}
259
260/* Set led register off */
261void
262il4965_led_enable(struct il_priv *il)
263{
264 _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
265}
266
267const struct il_led_ops il4965_led_ops = {
268 .cmd = il4965_send_led_cmd,
269};
270
271static int il4965_send_tx_power(struct il_priv *il);
272static int il4965_hw_get_temperature(struct il_priv *il);
273
274/* Highest firmware API version supported */
275#define IL4965_UCODE_API_MAX 2
276
277/* Lowest firmware API version supported */
278#define IL4965_UCODE_API_MIN 2
279
280#define IL4965_FW_PRE "iwlwifi-4965-"
281#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
282#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
283
284/* check contents of special bootstrap uCode SRAM */
285static int
286il4965_verify_bsm(struct il_priv *il)
287{
288 __le32 *image = il->ucode_boot.v_addr;
289 u32 len = il->ucode_boot.len;
290 u32 reg;
291 u32 val;
292
293 D_INFO("Begin verify bsm\n");
294
295 /* verify BSM SRAM contents */
296 val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
297 for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
298 reg += sizeof(u32), image++) {
299 val = il_rd_prph(il, reg);
300 if (val != le32_to_cpu(*image)) {
301 IL_ERR("BSM uCode verification failed at "
302 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
303 BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
304 len, val, le32_to_cpu(*image));
305 return -EIO;
306 }
307 }
308
309 D_INFO("BSM bootstrap uCode image OK\n");
310
311 return 0;
312}
313
314/**
315 * il4965_load_bsm - Load bootstrap instructions
316 *
317 * BSM operation:
318 *
319 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
320 * in special SRAM that does not power down during RFKILL. When powering back
321 * up after power-saving sleeps (or during initial uCode load), the BSM loads
322 * the bootstrap program into the on-board processor, and starts it.
323 *
324 * The bootstrap program loads (via DMA) instructions and data for a new
325 * program from host DRAM locations indicated by the host driver in the
326 * BSM_DRAM_* registers. Once the new program is loaded, it starts
327 * automatically.
328 *
329 * When initializing the NIC, the host driver points the BSM to the
330 * "initialize" uCode image. This uCode sets up some internal data, then
331 * notifies host via "initialize alive" that it is complete.
332 *
333 * The host then replaces the BSM_DRAM_* pointer values to point to the
334 * normal runtime uCode instructions and a backup uCode data cache buffer
335 * (filled initially with starting data values for the on-board processor),
336 * then triggers the "initialize" uCode to load and launch the runtime uCode,
337 * which begins normal operation.
338 *
339 * When doing a power-save shutdown, runtime uCode saves data SRAM into
340 * the backup data cache in DRAM before SRAM is powered down.
341 *
342 * When powering back up, the BSM loads the bootstrap program. This reloads
343 * the runtime uCode instructions and the backup data cache into SRAM,
344 * and re-launches the runtime uCode from where it left off.
345 */
346static int
347il4965_load_bsm(struct il_priv *il)
348{
349 __le32 *image = il->ucode_boot.v_addr;
350 u32 len = il->ucode_boot.len;
351 dma_addr_t pinst;
352 dma_addr_t pdata;
353 u32 inst_len;
354 u32 data_len;
355 int i;
356 u32 done;
357 u32 reg_offset;
358 int ret;
359
360 D_INFO("Begin load bsm\n");
361
362 il->ucode_type = UCODE_RT;
363
364 /* make sure bootstrap program is no larger than BSM's SRAM size */
365 if (len > IL49_MAX_BSM_SIZE)
366 return -EINVAL;
367
368 /* Tell bootstrap uCode where to find the "Initialize" uCode
369 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
370 * NOTE: il_init_alive_start() will replace these values,
371 * after the "initialize" uCode has run, to point to
372 * runtime/protocol instructions and backup data cache.
373 */
374 pinst = il->ucode_init.p_addr >> 4;
375 pdata = il->ucode_init_data.p_addr >> 4;
376 inst_len = il->ucode_init.len;
377 data_len = il->ucode_init_data.len;
378
379 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
380 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
381 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
382 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
383
384 /* Fill BSM memory with bootstrap instructions */
385 for (reg_offset = BSM_SRAM_LOWER_BOUND;
386 reg_offset < BSM_SRAM_LOWER_BOUND + len;
387 reg_offset += sizeof(u32), image++)
388 _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
389
390 ret = il4965_verify_bsm(il);
391 if (ret)
392 return ret;
393
394 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
395 il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
396 il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
397 il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
398
399 /* Load bootstrap code into instruction SRAM now,
400 * to prepare to load "initialize" uCode */
401 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
402
403 /* Wait for load of bootstrap uCode to finish */
404 for (i = 0; i < 100; i++) {
405 done = il_rd_prph(il, BSM_WR_CTRL_REG);
406 if (!(done & BSM_WR_CTRL_REG_BIT_START))
407 break;
408 udelay(10);
409 }
410 if (i < 100)
411 D_INFO("BSM write complete, poll %d iterations\n", i);
412 else {
413 IL_ERR("BSM write did not complete!\n");
414 return -EIO;
415 }
416
417 /* Enable future boot loads whenever power management unit triggers it
418 * (e.g. when powering back up after power-save shutdown) */
419 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
420
421 return 0;
422}
423
424/**
425 * il4965_set_ucode_ptrs - Set uCode address location
426 *
427 * Tell initialization uCode where to find runtime uCode.
428 *
429 * BSM registers initially contain pointers to initialization uCode.
430 * We need to replace them to load runtime uCode inst and data,
431 * and to save runtime data when powering down.
432 */
433static int
434il4965_set_ucode_ptrs(struct il_priv *il)
435{
436 dma_addr_t pinst;
437 dma_addr_t pdata;
438 int ret = 0;
439
440 /* bits 35:4 for 4965 */
441 pinst = il->ucode_code.p_addr >> 4;
442 pdata = il->ucode_data_backup.p_addr >> 4;
443
444 /* Tell bootstrap uCode where to find image to load */
445 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
446 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
447 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
448
449 /* Inst byte count must be last to set up, bit 31 signals uCode
450 * that all new ptr/size info is in place */
451 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
452 il->ucode_code.len | BSM_DRAM_INST_LOAD);
453 D_INFO("Runtime uCode pointers are set.\n");
454
455 return ret;
456}
457
458/**
459 * il4965_init_alive_start - Called after N_ALIVE notification received
460 *
461 * Called after N_ALIVE notification received from "initialize" uCode.
462 *
463 * The 4965 "initialize" ALIVE reply contains calibration data for:
464 * Voltage, temperature, and MIMO tx gain correction, now stored in il
465 * (3945 does not contain this data).
466 *
467 * Tell "initialize" uCode to go ahead and load the runtime uCode.
468*/
469static void
470il4965_init_alive_start(struct il_priv *il)
471{
472 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
473 * This is a paranoid check, because we would not have gotten the
474 * "initialize" alive if code weren't properly loaded. */
475 if (il4965_verify_ucode(il)) {
476 /* Runtime instruction load was bad;
477 * take it all the way back down so we can try again */
478 D_INFO("Bad \"initialize\" uCode load.\n");
479 goto restart;
480 }
481
482 /* Calculate temperature */
483 il->temperature = il4965_hw_get_temperature(il);
484
485 /* Send pointers to protocol/runtime uCode image ... init code will
486 * load and launch runtime uCode, which will send us another "Alive"
487 * notification. */
488 D_INFO("Initialization Alive received.\n");
489 if (il4965_set_ucode_ptrs(il)) {
490 /* Runtime instruction load won't happen;
491 * take it all the way back down so we can try again */
492 D_INFO("Couldn't set up uCode pointers.\n");
493 goto restart;
494 }
495 return;
496
497restart:
498 queue_work(il->workqueue, &il->restart);
499}
500
501static bool
502iw4965_is_ht40_channel(__le32 rxon_flags)
503{
504 int chan_mod =
505 le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
506 RXON_FLG_CHANNEL_MODE_POS;
507 return (chan_mod == CHANNEL_MODE_PURE_40 ||
508 chan_mod == CHANNEL_MODE_MIXED);
509}
510
511static void
512il4965_nic_config(struct il_priv *il)
513{
514 unsigned long flags;
515 u16 radio_cfg;
516
517 spin_lock_irqsave(&il->lock, flags);
518
519 radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
520
521 /* write radio config values to register */
522 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
523 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
524 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
525 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
526 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
527
528 /* set CSR_HW_CONFIG_REG for uCode use */
529 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
530 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
531 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
532
533 il->calib_info =
534 (struct il_eeprom_calib_info *)
535 il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
536
537 spin_unlock_irqrestore(&il->lock, flags);
538}
539
540/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
541 * Called after every association, but this runs only once!
542 * ... once chain noise is calibrated the first time, it's good forever. */
543static void
544il4965_chain_noise_reset(struct il_priv *il)
545{
546 struct il_chain_noise_data *data = &(il->chain_noise_data);
547
548 if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
549 struct il_calib_diff_gain_cmd cmd;
550
551 /* clear data for chain noise calibration algorithm */
552 data->chain_noise_a = 0;
553 data->chain_noise_b = 0;
554 data->chain_noise_c = 0;
555 data->chain_signal_a = 0;
556 data->chain_signal_b = 0;
557 data->chain_signal_c = 0;
558 data->beacon_count = 0;
559
560 memset(&cmd, 0, sizeof(cmd));
561 cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
562 cmd.diff_gain_a = 0;
563 cmd.diff_gain_b = 0;
564 cmd.diff_gain_c = 0;
565 if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
566 IL_ERR("Could not send C_PHY_CALIBRATION\n");
567 data->state = IL_CHAIN_NOISE_ACCUMULATE;
568 D_CALIB("Run chain_noise_calibrate\n");
569 }
570}
571
572static struct il_sensitivity_ranges il4965_sensitivity = {
573 .min_nrg_cck = 97,
574 .max_nrg_cck = 0, /* not used, set to 0 */
575
576 .auto_corr_min_ofdm = 85,
577 .auto_corr_min_ofdm_mrc = 170,
578 .auto_corr_min_ofdm_x1 = 105,
579 .auto_corr_min_ofdm_mrc_x1 = 220,
580
581 .auto_corr_max_ofdm = 120,
582 .auto_corr_max_ofdm_mrc = 210,
583 .auto_corr_max_ofdm_x1 = 140,
584 .auto_corr_max_ofdm_mrc_x1 = 270,
585
586 .auto_corr_min_cck = 125,
587 .auto_corr_max_cck = 200,
588 .auto_corr_min_cck_mrc = 200,
589 .auto_corr_max_cck_mrc = 400,
590
591 .nrg_th_cck = 100,
592 .nrg_th_ofdm = 100,
593
594 .barker_corr_th_min = 190,
595 .barker_corr_th_min_mrc = 390,
596 .nrg_th_cca = 62,
597};
598
599static void
600il4965_set_ct_threshold(struct il_priv *il)
601{
602 /* want Kelvin */
603 il->hw_params.ct_kill_threshold =
604 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
605}
606
607/**
608 * il4965_hw_set_hw_params
609 *
610 * Called when initializing driver
611 */
612static int
613il4965_hw_set_hw_params(struct il_priv *il)
614{
615 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
616 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
617 il->cfg->base_params->num_of_queues =
618 il->cfg->mod_params->num_of_queues;
619
620 il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
621 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
622 il->hw_params.scd_bc_tbls_size =
623 il->cfg->base_params->num_of_queues *
624 sizeof(struct il4965_scd_bc_tbl);
625 il->hw_params.tfd_size = sizeof(struct il_tfd);
626 il->hw_params.max_stations = IL4965_STATION_COUNT;
627 il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
628 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
629 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
630 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
631 il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
632
633 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
634
635 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
636 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
637 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
638 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
639
640 il4965_set_ct_threshold(il);
641
642 il->hw_params.sens = &il4965_sensitivity;
643 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
644
645 return 0;
646}
647
648static s32
649il4965_math_div_round(s32 num, s32 denom, s32 * res)
650{
651 s32 sign = 1;
652
653 if (num < 0) {
654 sign = -sign;
655 num = -num;
656 }
657 if (denom < 0) {
658 sign = -sign;
659 denom = -denom;
660 }
661 *res = 1;
662 *res = ((num * 2 + denom) / (denom * 2)) * sign;
663
664 return 1;
665}
666
667/**
668 * il4965_get_voltage_compensation - Power supply voltage comp for txpower
669 *
670 * Determines power supply voltage compensation for txpower calculations.
671 * Returns number of 1/2-dB steps to subtract from gain table idx,
672 * to compensate for difference between power supply voltage during
673 * factory measurements, vs. current power supply voltage.
674 *
675 * Voltage indication is higher for lower voltage.
676 * Lower voltage requires more gain (lower gain table idx).
677 */
678static s32
679il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
680{
681 s32 comp = 0;
682
683 if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
684 TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
685 return 0;
686
687 il4965_math_div_round(current_voltage - eeprom_voltage,
688 TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
689
690 if (current_voltage > eeprom_voltage)
691 comp *= 2;
692 if ((comp < -2) || (comp > 2))
693 comp = 0;
694
695 return comp;
696}
697
698static s32
699il4965_get_tx_atten_grp(u16 channel)
700{
701 if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
702 channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
703 return CALIB_CH_GROUP_5;
704
705 if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
706 channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
707 return CALIB_CH_GROUP_1;
708
709 if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
710 channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
711 return CALIB_CH_GROUP_2;
712
713 if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
714 channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
715 return CALIB_CH_GROUP_3;
716
717 if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
718 channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
719 return CALIB_CH_GROUP_4;
720
721 return -EINVAL;
722}
723
724static u32
725il4965_get_sub_band(const struct il_priv *il, u32 channel)
726{
727 s32 b = -1;
728
729 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
730 if (il->calib_info->band_info[b].ch_from == 0)
731 continue;
732
733 if (channel >= il->calib_info->band_info[b].ch_from &&
734 channel <= il->calib_info->band_info[b].ch_to)
735 break;
736 }
737
738 return b;
739}
740
741static s32
742il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
743{
744 s32 val;
745
746 if (x2 == x1)
747 return y1;
748 else {
749 il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
750 return val + y2;
751 }
752}
753
754/**
755 * il4965_interpolate_chan - Interpolate factory measurements for one channel
756 *
757 * Interpolates factory measurements from the two sample channels within a
758 * sub-band, to apply to channel of interest. Interpolation is proportional to
759 * differences in channel frequencies, which is proportional to differences
760 * in channel number.
761 */
762static int
763il4965_interpolate_chan(struct il_priv *il, u32 channel,
764 struct il_eeprom_calib_ch_info *chan_info)
765{
766 s32 s = -1;
767 u32 c;
768 u32 m;
769 const struct il_eeprom_calib_measure *m1;
770 const struct il_eeprom_calib_measure *m2;
771 struct il_eeprom_calib_measure *omeas;
772 u32 ch_i1;
773 u32 ch_i2;
774
775 s = il4965_get_sub_band(il, channel);
776 if (s >= EEPROM_TX_POWER_BANDS) {
777 IL_ERR("Tx Power can not find channel %d\n", channel);
778 return -1;
779 }
780
781 ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
782 ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
783 chan_info->ch_num = (u8) channel;
784
785 D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
786 ch_i1, ch_i2);
787
788 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
789 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
790 m1 = &(il->calib_info->band_info[s].ch1.
791 measurements[c][m]);
792 m2 = &(il->calib_info->band_info[s].ch2.
793 measurements[c][m]);
794 omeas = &(chan_info->measurements[c][m]);
795
796 omeas->actual_pow =
797 (u8) il4965_interpolate_value(channel, ch_i1,
798 m1->actual_pow, ch_i2,
799 m2->actual_pow);
800 omeas->gain_idx =
801 (u8) il4965_interpolate_value(channel, ch_i1,
802 m1->gain_idx, ch_i2,
803 m2->gain_idx);
804 omeas->temperature =
805 (u8) il4965_interpolate_value(channel, ch_i1,
806 m1->temperature,
807 ch_i2,
808 m2->temperature);
809 omeas->pa_det =
810 (s8) il4965_interpolate_value(channel, ch_i1,
811 m1->pa_det, ch_i2,
812 m2->pa_det);
813
814 D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
815 m, m1->actual_pow, m2->actual_pow,
816 omeas->actual_pow);
817 D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
818 m, m1->gain_idx, m2->gain_idx,
819 omeas->gain_idx);
820 D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
821 m, m1->pa_det, m2->pa_det, omeas->pa_det);
822 D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c,
823 m, m1->temperature, m2->temperature,
824 omeas->temperature);
825 }
826 }
827
828 return 0;
829}
830
831/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
832 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
833static s32 back_off_table[] = {
834 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
835 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
836 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
837 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
838 10 /* CCK */
839};
840
841/* Thermal compensation values for txpower for various frequency ranges ...
842 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
843static struct il4965_txpower_comp_entry {
844 s32 degrees_per_05db_a;
845 s32 degrees_per_05db_a_denom;
846} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
847 {
848 9, 2}, /* group 0 5.2, ch 34-43 */
849 {
850 4, 1}, /* group 1 5.2, ch 44-70 */
851 {
852 4, 1}, /* group 2 5.2, ch 71-124 */
853 {
854 4, 1}, /* group 3 5.2, ch 125-200 */
855 {
856 3, 1} /* group 4 2.4, ch all */
857};
858
859static s32
860get_min_power_idx(s32 rate_power_idx, u32 band)
861{
862 if (!band) {
863 if ((rate_power_idx & 7) <= 4)
864 return MIN_TX_GAIN_IDX_52GHZ_EXT;
865 }
866 return MIN_TX_GAIN_IDX;
867}
868
869struct gain_entry {
870 u8 dsp;
871 u8 radio;
872};
873
874static const struct gain_entry gain_table[2][108] = {
875 /* 5.2GHz power gain idx table */
876 {
877 {123, 0x3F}, /* highest txpower */
878 {117, 0x3F},
879 {110, 0x3F},
880 {104, 0x3F},
881 {98, 0x3F},
882 {110, 0x3E},
883 {104, 0x3E},
884 {98, 0x3E},
885 {110, 0x3D},
886 {104, 0x3D},
887 {98, 0x3D},
888 {110, 0x3C},
889 {104, 0x3C},
890 {98, 0x3C},
891 {110, 0x3B},
892 {104, 0x3B},
893 {98, 0x3B},
894 {110, 0x3A},
895 {104, 0x3A},
896 {98, 0x3A},
897 {110, 0x39},
898 {104, 0x39},
899 {98, 0x39},
900 {110, 0x38},
901 {104, 0x38},
902 {98, 0x38},
903 {110, 0x37},
904 {104, 0x37},
905 {98, 0x37},
906 {110, 0x36},
907 {104, 0x36},
908 {98, 0x36},
909 {110, 0x35},
910 {104, 0x35},
911 {98, 0x35},
912 {110, 0x34},
913 {104, 0x34},
914 {98, 0x34},
915 {110, 0x33},
916 {104, 0x33},
917 {98, 0x33},
918 {110, 0x32},
919 {104, 0x32},
920 {98, 0x32},
921 {110, 0x31},
922 {104, 0x31},
923 {98, 0x31},
924 {110, 0x30},
925 {104, 0x30},
926 {98, 0x30},
927 {110, 0x25},
928 {104, 0x25},
929 {98, 0x25},
930 {110, 0x24},
931 {104, 0x24},
932 {98, 0x24},
933 {110, 0x23},
934 {104, 0x23},
935 {98, 0x23},
936 {110, 0x22},
937 {104, 0x18},
938 {98, 0x18},
939 {110, 0x17},
940 {104, 0x17},
941 {98, 0x17},
942 {110, 0x16},
943 {104, 0x16},
944 {98, 0x16},
945 {110, 0x15},
946 {104, 0x15},
947 {98, 0x15},
948 {110, 0x14},
949 {104, 0x14},
950 {98, 0x14},
951 {110, 0x13},
952 {104, 0x13},
953 {98, 0x13},
954 {110, 0x12},
955 {104, 0x08},
956 {98, 0x08},
957 {110, 0x07},
958 {104, 0x07},
959 {98, 0x07},
960 {110, 0x06},
961 {104, 0x06},
962 {98, 0x06},
963 {110, 0x05},
964 {104, 0x05},
965 {98, 0x05},
966 {110, 0x04},
967 {104, 0x04},
968 {98, 0x04},
969 {110, 0x03},
970 {104, 0x03},
971 {98, 0x03},
972 {110, 0x02},
973 {104, 0x02},
974 {98, 0x02},
975 {110, 0x01},
976 {104, 0x01},
977 {98, 0x01},
978 {110, 0x00},
979 {104, 0x00},
980 {98, 0x00},
981 {93, 0x00},
982 {88, 0x00},
983 {83, 0x00},
984 {78, 0x00},
985 },
986 /* 2.4GHz power gain idx table */
987 {
988 {110, 0x3f}, /* highest txpower */
989 {104, 0x3f},
990 {98, 0x3f},
991 {110, 0x3e},
992 {104, 0x3e},
993 {98, 0x3e},
994 {110, 0x3d},
995 {104, 0x3d},
996 {98, 0x3d},
997 {110, 0x3c},
998 {104, 0x3c},
999 {98, 0x3c},
1000 {110, 0x3b},
1001 {104, 0x3b},
1002 {98, 0x3b},
1003 {110, 0x3a},
1004 {104, 0x3a},
1005 {98, 0x3a},
1006 {110, 0x39},
1007 {104, 0x39},
1008 {98, 0x39},
1009 {110, 0x38},
1010 {104, 0x38},
1011 {98, 0x38},
1012 {110, 0x37},
1013 {104, 0x37},
1014 {98, 0x37},
1015 {110, 0x36},
1016 {104, 0x36},
1017 {98, 0x36},
1018 {110, 0x35},
1019 {104, 0x35},
1020 {98, 0x35},
1021 {110, 0x34},
1022 {104, 0x34},
1023 {98, 0x34},
1024 {110, 0x33},
1025 {104, 0x33},
1026 {98, 0x33},
1027 {110, 0x32},
1028 {104, 0x32},
1029 {98, 0x32},
1030 {110, 0x31},
1031 {104, 0x31},
1032 {98, 0x31},
1033 {110, 0x30},
1034 {104, 0x30},
1035 {98, 0x30},
1036 {110, 0x6},
1037 {104, 0x6},
1038 {98, 0x6},
1039 {110, 0x5},
1040 {104, 0x5},
1041 {98, 0x5},
1042 {110, 0x4},
1043 {104, 0x4},
1044 {98, 0x4},
1045 {110, 0x3},
1046 {104, 0x3},
1047 {98, 0x3},
1048 {110, 0x2},
1049 {104, 0x2},
1050 {98, 0x2},
1051 {110, 0x1},
1052 {104, 0x1},
1053 {98, 0x1},
1054 {110, 0x0},
1055 {104, 0x0},
1056 {98, 0x0},
1057 {97, 0},
1058 {96, 0},
1059 {95, 0},
1060 {94, 0},
1061 {93, 0},
1062 {92, 0},
1063 {91, 0},
1064 {90, 0},
1065 {89, 0},
1066 {88, 0},
1067 {87, 0},
1068 {86, 0},
1069 {85, 0},
1070 {84, 0},
1071 {83, 0},
1072 {82, 0},
1073 {81, 0},
1074 {80, 0},
1075 {79, 0},
1076 {78, 0},
1077 {77, 0},
1078 {76, 0},
1079 {75, 0},
1080 {74, 0},
1081 {73, 0},
1082 {72, 0},
1083 {71, 0},
1084 {70, 0},
1085 {69, 0},
1086 {68, 0},
1087 {67, 0},
1088 {66, 0},
1089 {65, 0},
1090 {64, 0},
1091 {63, 0},
1092 {62, 0},
1093 {61, 0},
1094 {60, 0},
1095 {59, 0},
1096 }
1097};
1098
1099static int
1100il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
1101 u8 ctrl_chan_high,
1102 struct il4965_tx_power_db *tx_power_tbl)
1103{
1104 u8 saturation_power;
1105 s32 target_power;
1106 s32 user_target_power;
1107 s32 power_limit;
1108 s32 current_temp;
1109 s32 reg_limit;
1110 s32 current_regulatory;
1111 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1112 int i;
1113 int c;
1114 const struct il_channel_info *ch_info = NULL;
1115 struct il_eeprom_calib_ch_info ch_eeprom_info;
1116 const struct il_eeprom_calib_measure *measurement;
1117 s16 voltage;
1118 s32 init_voltage;
1119 s32 voltage_compensation;
1120 s32 degrees_per_05db_num;
1121 s32 degrees_per_05db_denom;
1122 s32 factory_temp;
1123 s32 temperature_comp[2];
1124 s32 factory_gain_idx[2];
1125 s32 factory_actual_pwr[2];
1126 s32 power_idx;
1127
1128 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
1129 * are used for idxing into txpower table) */
1130 user_target_power = 2 * il->tx_power_user_lmt;
1131
1132 /* Get current (RXON) channel, band, width */
1133 D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
1134
1135 ch_info = il_get_channel_info(il, il->band, channel);
1136
1137 if (!il_is_channel_valid(ch_info))
1138 return -EINVAL;
1139
1140 /* get txatten group, used to select 1) thermal txpower adjustment
1141 * and 2) mimo txpower balance between Tx chains. */
1142 txatten_grp = il4965_get_tx_atten_grp(channel);
1143 if (txatten_grp < 0) {
1144 IL_ERR("Can't find txatten group for channel %d.\n", channel);
1145 return txatten_grp;
1146 }
1147
1148 D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
1149 txatten_grp);
1150
1151 if (is_ht40) {
1152 if (ctrl_chan_high)
1153 channel -= 2;
1154 else
1155 channel += 2;
1156 }
1157
1158 /* hardware txpower limits ...
1159 * saturation (clipping distortion) txpowers are in half-dBm */
1160 if (band)
1161 saturation_power = il->calib_info->saturation_power24;
1162 else
1163 saturation_power = il->calib_info->saturation_power52;
1164
1165 if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
1166 saturation_power > IL_TX_POWER_SATURATION_MAX) {
1167 if (band)
1168 saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
1169 else
1170 saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
1171 }
1172
1173 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1174 * max_power_avg values are in dBm, convert * 2 */
1175 if (is_ht40)
1176 reg_limit = ch_info->ht40_max_power_avg * 2;
1177 else
1178 reg_limit = ch_info->max_power_avg * 2;
1179
1180 if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
1181 (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
1182 if (band)
1183 reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
1184 else
1185 reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
1186 }
1187
1188 /* Interpolate txpower calibration values for this channel,
1189 * based on factory calibration tests on spaced channels. */
1190 il4965_interpolate_chan(il, channel, &ch_eeprom_info);
1191
1192 /* calculate tx gain adjustment based on power supply voltage */
1193 voltage = le16_to_cpu(il->calib_info->voltage);
1194 init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
1195 voltage_compensation =
1196 il4965_get_voltage_compensation(voltage, init_voltage);
1197
1198 D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
1199 voltage, voltage_compensation);
1200
1201 /* get current temperature (Celsius) */
1202 current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
1203 current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
1204 current_temp = KELVIN_TO_CELSIUS(current_temp);
1205
1206 /* select thermal txpower adjustment params, based on channel group
1207 * (same frequency group used for mimo txatten adjustment) */
1208 degrees_per_05db_num =
1209 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1210 degrees_per_05db_denom =
1211 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1212
1213 /* get per-chain txpower values from factory measurements */
1214 for (c = 0; c < 2; c++) {
1215 measurement = &ch_eeprom_info.measurements[c][1];
1216
1217 /* txgain adjustment (in half-dB steps) based on difference
1218 * between factory and current temperature */
1219 factory_temp = measurement->temperature;
1220 il4965_math_div_round((current_temp -
1221 factory_temp) * degrees_per_05db_denom,
1222 degrees_per_05db_num,
1223 &temperature_comp[c]);
1224
1225 factory_gain_idx[c] = measurement->gain_idx;
1226 factory_actual_pwr[c] = measurement->actual_pow;
1227
1228 D_TXPOWER("chain = %d\n", c);
1229 D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
1230 factory_temp, current_temp, temperature_comp[c]);
1231
1232 D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
1233 factory_actual_pwr[c]);
1234 }
1235
1236 /* for each of 33 bit-rates (including 1 for CCK) */
1237 for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
1238 u8 is_mimo_rate;
1239 union il4965_tx_power_dual_stream tx_power;
1240
1241 /* for mimo, reduce each chain's txpower by half
1242 * (3dB, 6 steps), so total output power is regulatory
1243 * compliant. */
1244 if (i & 0x8) {
1245 current_regulatory =
1246 reg_limit -
1247 IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1248 is_mimo_rate = 1;
1249 } else {
1250 current_regulatory = reg_limit;
1251 is_mimo_rate = 0;
1252 }
1253
1254 /* find txpower limit, either hardware or regulatory */
1255 power_limit = saturation_power - back_off_table[i];
1256 if (power_limit > current_regulatory)
1257 power_limit = current_regulatory;
1258
1259 /* reduce user's txpower request if necessary
1260 * for this rate on this channel */
1261 target_power = user_target_power;
1262 if (target_power > power_limit)
1263 target_power = power_limit;
1264
1265 D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
1266 saturation_power - back_off_table[i],
1267 current_regulatory, user_target_power, target_power);
1268
1269 /* for each of 2 Tx chains (radio transmitters) */
1270 for (c = 0; c < 2; c++) {
1271 s32 atten_value;
1272
1273 if (is_mimo_rate)
1274 atten_value =
1275 (s32) le32_to_cpu(il->card_alive_init.
1276 tx_atten[txatten_grp][c]);
1277 else
1278 atten_value = 0;
1279
1280 /* calculate idx; higher idx means lower txpower */
1281 power_idx =
1282 (u8) (factory_gain_idx[c] -
1283 (target_power - factory_actual_pwr[c]) -
1284 temperature_comp[c] - voltage_compensation +
1285 atten_value);
1286
1287/* D_TXPOWER("calculated txpower idx %d\n",
1288 power_idx); */
1289
1290 if (power_idx < get_min_power_idx(i, band))
1291 power_idx = get_min_power_idx(i, band);
1292
1293 /* adjust 5 GHz idx to support negative idxes */
1294 if (!band)
1295 power_idx += 9;
1296
1297 /* CCK, rate 32, reduce txpower for CCK */
1298 if (i == POWER_TBL_CCK_ENTRY)
1299 power_idx +=
1300 IL_TX_POWER_CCK_COMPENSATION_C_STEP;
1301
1302 /* stay within the table! */
1303 if (power_idx > 107) {
1304 IL_WARN("txpower idx %d > 107\n", power_idx);
1305 power_idx = 107;
1306 }
1307 if (power_idx < 0) {
1308 IL_WARN("txpower idx %d < 0\n", power_idx);
1309 power_idx = 0;
1310 }
1311
1312 /* fill txpower command for this rate/chain */
1313 tx_power.s.radio_tx_gain[c] =
1314 gain_table[band][power_idx].radio;
1315 tx_power.s.dsp_predis_atten[c] =
1316 gain_table[band][power_idx].dsp;
1317
1318 D_TXPOWER("chain %d mimo %d idx %d "
1319 "gain 0x%02x dsp %d\n", c, atten_value,
1320 power_idx, tx_power.s.radio_tx_gain[c],
1321 tx_power.s.dsp_predis_atten[c]);
1322 } /* for each chain */
1323
1324 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1325
1326 } /* for each rate */
1327
1328 return 0;
1329}
1330
1331/**
1332 * il4965_send_tx_power - Configure the TXPOWER level user limit
1333 *
1334 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1335 * The power limit is taken from il->tx_power_user_lmt.
1336 */
1337static int
1338il4965_send_tx_power(struct il_priv *il)
1339{
1340 struct il4965_txpowertable_cmd cmd = { 0 };
1341 int ret;
1342 u8 band = 0;
1343 bool is_ht40 = false;
1344 u8 ctrl_chan_high = 0;
1345 struct il_rxon_context *ctx = &il->ctx;
1346
1347 if (WARN_ONCE
1348 (test_bit(S_SCAN_HW, &il->status),
1349 "TX Power requested while scanning!\n"))
1350 return -EAGAIN;
1351
1352 band = il->band == IEEE80211_BAND_2GHZ;
1353
1354 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1355
1356 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1357 ctrl_chan_high = 1;
1358
1359 cmd.band = band;
1360 cmd.channel = ctx->active.channel;
1361
1362 ret =
1363 il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
1364 is_ht40, ctrl_chan_high, &cmd.tx_power);
1365 if (ret)
1366 goto out;
1367
1368 ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
1369
1370out:
1371 return ret;
1372}
1373
1374static int
1375il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
1376{
1377 int ret = 0;
1378 struct il4965_rxon_assoc_cmd rxon_assoc;
1379 const struct il_rxon_cmd *rxon1 = &ctx->staging;
1380 const struct il_rxon_cmd *rxon2 = &ctx->active;
1381
1382 if (rxon1->flags == rxon2->flags &&
1383 rxon1->filter_flags == rxon2->filter_flags &&
1384 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1385 rxon1->ofdm_ht_single_stream_basic_rates ==
1386 rxon2->ofdm_ht_single_stream_basic_rates &&
1387 rxon1->ofdm_ht_dual_stream_basic_rates ==
1388 rxon2->ofdm_ht_dual_stream_basic_rates &&
1389 rxon1->rx_chain == rxon2->rx_chain &&
1390 rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1391 D_INFO("Using current RXON_ASSOC. Not resending.\n");
1392 return 0;
1393 }
1394
1395 rxon_assoc.flags = ctx->staging.flags;
1396 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1397 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1398 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1399 rxon_assoc.reserved = 0;
1400 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1401 ctx->staging.ofdm_ht_single_stream_basic_rates;
1402 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1403 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1404 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1405
1406 ret =
1407 il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
1408 &rxon_assoc, NULL);
1409
1410 return ret;
1411}
1412
1413static int
1414il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
1415{
1416 /* cast away the const for active_rxon in this function */
1417 struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
1418 int ret;
1419 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1420
1421 if (!il_is_alive(il))
1422 return -EBUSY;
1423
1424 if (!ctx->is_active)
1425 return 0;
1426
1427 /* always get timestamp with Rx frame */
1428 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1429
1430 ret = il_check_rxon_cmd(il, ctx);
1431 if (ret) {
1432 IL_ERR("Invalid RXON configuration. Not committing.\n");
1433 return -EINVAL;
1434 }
1435
1436 /*
1437 * receive commit_rxon request
1438 * abort any previous channel switch if still in process
1439 */
1440 if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
1441 il->switch_channel != ctx->staging.channel) {
1442 D_11H("abort channel switch on %d\n",
1443 le16_to_cpu(il->switch_channel));
1444 il_chswitch_done(il, false);
1445 }
1446
1447 /* If we don't need to send a full RXON, we can use
1448 * il_rxon_assoc_cmd which is used to reconfigure filter
1449 * and other flags for the current radio configuration. */
1450 if (!il_full_rxon_required(il, ctx)) {
1451 ret = il_send_rxon_assoc(il, ctx);
1452 if (ret) {
1453 IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
1454 return ret;
1455 }
1456
1457 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1458 il_print_rx_config_cmd(il, ctx);
1459 /*
1460 * We do not commit tx power settings while channel changing,
1461 * do it now if tx power changed.
1462 */
1463 il_set_tx_power(il, il->tx_power_next, false);
1464 return 0;
1465 }
1466
1467 /* If we are currently associated and the new config requires
1468 * an RXON_ASSOC and the new config wants the associated mask enabled,
1469 * we must clear the associated from the active configuration
1470 * before we apply the new config */
1471 if (il_is_associated_ctx(ctx) && new_assoc) {
1472 D_INFO("Toggling associated bit on current RXON\n");
1473 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1474
1475 ret =
1476 il_send_cmd_pdu(il, ctx->rxon_cmd,
1477 sizeof(struct il_rxon_cmd), active_rxon);
1478
1479 /* If the mask clearing failed then we set
1480 * active_rxon back to what it was previously */
1481 if (ret) {
1482 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1483 IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
1484 return ret;
1485 }
1486 il_clear_ucode_stations(il, ctx);
1487 il_restore_stations(il, ctx);
1488 ret = il4965_restore_default_wep_keys(il, ctx);
1489 if (ret) {
1490 IL_ERR("Failed to restore WEP keys (%d)\n", ret);
1491 return ret;
1492 }
1493 }
1494
1495 D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1496 "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1497 le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
1498
1499 il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
1500
1501 /* Apply the new configuration
1502 * RXON unassoc clears the station table in uCode so restoration of
1503 * stations is needed after it (the RXON command) completes
1504 */
1505 if (!new_assoc) {
1506 ret =
1507 il_send_cmd_pdu(il, ctx->rxon_cmd,
1508 sizeof(struct il_rxon_cmd), &ctx->staging);
1509 if (ret) {
1510 IL_ERR("Error setting new RXON (%d)\n", ret);
1511 return ret;
1512 }
1513 D_INFO("Return from !new_assoc RXON.\n");
1514 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1515 il_clear_ucode_stations(il, ctx);
1516 il_restore_stations(il, ctx);
1517 ret = il4965_restore_default_wep_keys(il, ctx);
1518 if (ret) {
1519 IL_ERR("Failed to restore WEP keys (%d)\n", ret);
1520 return ret;
1521 }
1522 }
1523 if (new_assoc) {
1524 il->start_calib = 0;
1525 /* Apply the new configuration
1526 * RXON assoc doesn't clear the station table in uCode,
1527 */
1528 ret =
1529 il_send_cmd_pdu(il, ctx->rxon_cmd,
1530 sizeof(struct il_rxon_cmd), &ctx->staging);
1531 if (ret) {
1532 IL_ERR("Error setting new RXON (%d)\n", ret);
1533 return ret;
1534 }
1535 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1536 }
1537 il_print_rx_config_cmd(il, ctx);
1538
1539 il4965_init_sensitivity(il);
1540
1541 /* If we issue a new RXON command which required a tune then we must
1542 * send a new TXPOWER command or we won't be able to Tx any frames */
1543 ret = il_set_tx_power(il, il->tx_power_next, true);
1544 if (ret) {
1545 IL_ERR("Error sending TX power (%d)\n", ret);
1546 return ret;
1547 }
1548
1549 return 0;
1550}
1551
1552static int
1553il4965_hw_channel_switch(struct il_priv *il,
1554 struct ieee80211_channel_switch *ch_switch)
1555{
1556 struct il_rxon_context *ctx = &il->ctx;
1557 int rc;
1558 u8 band = 0;
1559 bool is_ht40 = false;
1560 u8 ctrl_chan_high = 0;
1561 struct il4965_channel_switch_cmd cmd;
1562 const struct il_channel_info *ch_info;
1563 u32 switch_time_in_usec, ucode_switch_time;
1564 u16 ch;
1565 u32 tsf_low;
1566 u8 switch_count;
1567 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1568 struct ieee80211_vif *vif = ctx->vif;
1569 band = il->band == IEEE80211_BAND_2GHZ;
1570
1571 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1572
1573 if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1574 ctrl_chan_high = 1;
1575
1576 cmd.band = band;
1577 cmd.expect_beacon = 0;
1578 ch = ch_switch->channel->hw_value;
1579 cmd.channel = cpu_to_le16(ch);
1580 cmd.rxon_flags = ctx->staging.flags;
1581 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1582 switch_count = ch_switch->count;
1583 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1584 /*
1585 * calculate the ucode channel switch time
1586 * adding TSF as one of the factor for when to switch
1587 */
1588 if (il->ucode_beacon_time > tsf_low && beacon_interval) {
1589 if (switch_count >
1590 ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
1591 switch_count -=
1592 (il->ucode_beacon_time - tsf_low) / beacon_interval;
1593 } else
1594 switch_count = 0;
1595 }
1596 if (switch_count <= 1)
1597 cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
1598 else {
1599 switch_time_in_usec =
1600 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1601 ucode_switch_time =
1602 il_usecs_to_beacons(il, switch_time_in_usec,
1603 beacon_interval);
1604 cmd.switch_time =
1605 il_add_beacon_time(il, il->ucode_beacon_time,
1606 ucode_switch_time, beacon_interval);
1607 }
1608 D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
1609 ch_info = il_get_channel_info(il, il->band, ch);
1610 if (ch_info)
1611 cmd.expect_beacon = il_is_channel_radar(ch_info);
1612 else {
1613 IL_ERR("invalid channel switch from %u to %u\n",
1614 ctx->active.channel, ch);
1615 return -EFAULT;
1616 }
1617
1618 rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
1619 &cmd.tx_power);
1620 if (rc) {
1621 D_11H("error:%d fill txpower_tbl\n", rc);
1622 return rc;
1623 }
1624
1625 return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1626}
1627
1628/**
1629 * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1630 */
1631static void
1632il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
1633 u16 byte_cnt)
1634{
1635 struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
1636 int txq_id = txq->q.id;
1637 int write_ptr = txq->q.write_ptr;
1638 int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
1639 __le16 bc_ent;
1640
1641 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1642
1643 bc_ent = cpu_to_le16(len & 0xFFF);
1644 /* Set up byte count within first 256 entries */
1645 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1646
1647 /* If within first 64 entries, duplicate at end */
1648 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1649 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1650 bc_ent;
1651}
1652
1653/**
1654 * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1655 * @stats: Provides the temperature reading from the uCode
1656 *
1657 * A return of <0 indicates bogus data in the stats
1658 */
1659static int
1660il4965_hw_get_temperature(struct il_priv *il)
1661{
1662 s32 temperature;
1663 s32 vt;
1664 s32 R1, R2, R3;
1665 u32 R4;
1666
1667 if (test_bit(S_TEMPERATURE, &il->status) &&
1668 (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
1669 D_TEMP("Running HT40 temperature calibration\n");
1670 R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
1671 R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
1672 R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
1673 R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
1674 } else {
1675 D_TEMP("Running temperature calibration\n");
1676 R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
1677 R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
1678 R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
1679 R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
1680 }
1681
1682 /*
1683 * Temperature is only 23 bits, so sign extend out to 32.
1684 *
1685 * NOTE If we haven't received a stats notification yet
1686 * with an updated temperature, use R4 provided to us in the
1687 * "initialize" ALIVE response.
1688 */
1689 if (!test_bit(S_TEMPERATURE, &il->status))
1690 vt = sign_extend32(R4, 23);
1691 else
1692 vt = sign_extend32(le32_to_cpu
1693 (il->_4965.stats.general.common.temperature),
1694 23);
1695
1696 D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1697
1698 if (R3 == R1) {
1699 IL_ERR("Calibration conflict R1 == R3\n");
1700 return -1;
1701 }
1702
1703 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1704 * Add offset to center the adjustment around 0 degrees Centigrade. */
1705 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1706 temperature /= (R3 - R1);
1707 temperature =
1708 (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1709
1710 D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
1711 KELVIN_TO_CELSIUS(temperature));
1712
1713 return temperature;
1714}
1715
1716/* Adjust Txpower only if temperature variance is greater than threshold. */
1717#define IL_TEMPERATURE_THRESHOLD 3
1718
1719/**
1720 * il4965_is_temp_calib_needed - determines if new calibration is needed
1721 *
1722 * If the temperature changed has changed sufficiently, then a recalibration
1723 * is needed.
1724 *
1725 * Assumes caller will replace il->last_temperature once calibration
1726 * executed.
1727 */
1728static int
1729il4965_is_temp_calib_needed(struct il_priv *il)
1730{
1731 int temp_diff;
1732
1733 if (!test_bit(S_STATS, &il->status)) {
1734 D_TEMP("Temperature not updated -- no stats.\n");
1735 return 0;
1736 }
1737
1738 temp_diff = il->temperature - il->last_temperature;
1739
1740 /* get absolute value */
1741 if (temp_diff < 0) {
1742 D_POWER("Getting cooler, delta %d\n", temp_diff);
1743 temp_diff = -temp_diff;
1744 } else if (temp_diff == 0)
1745 D_POWER("Temperature unchanged\n");
1746 else
1747 D_POWER("Getting warmer, delta %d\n", temp_diff);
1748
1749 if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
1750 D_POWER(" => thermal txpower calib not needed\n");
1751 return 0;
1752 }
1753
1754 D_POWER(" => thermal txpower calib needed\n");
1755
1756 return 1;
1757}
1758
1759static void
1760il4965_temperature_calib(struct il_priv *il)
1761{
1762 s32 temp;
1763
1764 temp = il4965_hw_get_temperature(il);
1765 if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1766 return;
1767
1768 if (il->temperature != temp) {
1769 if (il->temperature)
1770 D_TEMP("Temperature changed " "from %dC to %dC\n",
1771 KELVIN_TO_CELSIUS(il->temperature),
1772 KELVIN_TO_CELSIUS(temp));
1773 else
1774 D_TEMP("Temperature " "initialized to %dC\n",
1775 KELVIN_TO_CELSIUS(temp));
1776 }
1777
1778 il->temperature = temp;
1779 set_bit(S_TEMPERATURE, &il->status);
1780
1781 if (!il->disable_tx_power_cal &&
1782 unlikely(!test_bit(S_SCANNING, &il->status)) &&
1783 il4965_is_temp_calib_needed(il))
1784 queue_work(il->workqueue, &il->txpower_work);
1785}
1786
1787static u16
1788il4965_get_hcmd_size(u8 cmd_id, u16 len)
1789{
1790 switch (cmd_id) {
1791 case C_RXON:
1792 return (u16) sizeof(struct il4965_rxon_cmd);
1793 default:
1794 return len;
1795 }
1796}
1797
1798static u16
1799il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
1800{
1801 struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
1802 addsta->mode = cmd->mode;
1803 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1804 memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
1805 addsta->station_flags = cmd->station_flags;
1806 addsta->station_flags_msk = cmd->station_flags_msk;
1807 addsta->tid_disable_tx = cmd->tid_disable_tx;
1808 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1809 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1810 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1811 addsta->sleep_tx_count = cmd->sleep_tx_count;
1812 addsta->reserved1 = cpu_to_le16(0);
1813 addsta->reserved2 = cpu_to_le16(0);
1814
1815 return (u16) sizeof(struct il4965_addsta_cmd);
1816}
1817
1818static inline u32
1819il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
1820{
1821 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
1822}
1823
1824static inline u32
1825il4965_tx_status_to_mac80211(u32 status)
1826{
1827 status &= TX_STATUS_MSK;
1828
1829 switch (status) {
1830 case TX_STATUS_SUCCESS:
1831 case TX_STATUS_DIRECT_DONE:
1832 return IEEE80211_TX_STAT_ACK;
1833 case TX_STATUS_FAIL_DEST_PS:
1834 return IEEE80211_TX_STAT_TX_FILTERED;
1835 default:
1836 return 0;
1837 }
1838}
1839
1840static inline bool
1841il4965_is_tx_success(u32 status)
1842{
1843 status &= TX_STATUS_MSK;
1844 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
1845}
1846
1847/**
1848 * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
1849 */
1850static int
1851il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
1852 struct il4965_tx_resp *tx_resp, int txq_id,
1853 u16 start_idx)
1854{
1855 u16 status;
1856 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
1857 struct ieee80211_tx_info *info = NULL;
1858 struct ieee80211_hdr *hdr = NULL;
1859 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1860 int i, sh, idx;
1861 u16 seq;
1862 if (agg->wait_for_ba)
1863 D_TX_REPLY("got tx response w/o block-ack\n");
1864
1865 agg->frame_count = tx_resp->frame_count;
1866 agg->start_idx = start_idx;
1867 agg->rate_n_flags = rate_n_flags;
1868 agg->bitmap = 0;
1869
1870 /* num frames attempted by Tx command */
1871 if (agg->frame_count == 1) {
1872 /* Only one frame was attempted; no block-ack will arrive */
1873 status = le16_to_cpu(frame_status[0].status);
1874 idx = start_idx;
1875
1876 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
1877 agg->frame_count, agg->start_idx, idx);
1878
1879 info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
1880 info->status.rates[0].count = tx_resp->failure_frame + 1;
1881 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1882 info->flags |= il4965_tx_status_to_mac80211(status);
1883 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
1884
1885 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
1886 tx_resp->failure_frame);
1887 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
1888
1889 agg->wait_for_ba = 0;
1890 } else {
1891 /* Two or more frames were attempted; expect block-ack */
1892 u64 bitmap = 0;
1893 int start = agg->start_idx;
1894
1895 /* Construct bit-map of pending frames within Tx win */
1896 for (i = 0; i < agg->frame_count; i++) {
1897 u16 sc;
1898 status = le16_to_cpu(frame_status[i].status);
1899 seq = le16_to_cpu(frame_status[i].sequence);
1900 idx = SEQ_TO_IDX(seq);
1901 txq_id = SEQ_TO_QUEUE(seq);
1902
1903 if (status &
1904 (AGG_TX_STATE_FEW_BYTES_MSK |
1905 AGG_TX_STATE_ABORT_MSK))
1906 continue;
1907
1908 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
1909 agg->frame_count, txq_id, idx);
1910
1911 hdr = il_tx_queue_get_hdr(il, txq_id, idx);
1912 if (!hdr) {
1913 IL_ERR("BUG_ON idx doesn't point to valid skb"
1914 " idx=%d, txq_id=%d\n", idx, txq_id);
1915 return -1;
1916 }
1917
1918 sc = le16_to_cpu(hdr->seq_ctrl);
1919 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1920 IL_ERR("BUG_ON idx doesn't match seq control"
1921 " idx=%d, seq_idx=%d, seq=%d\n", idx,
1922 SEQ_TO_SN(sc), hdr->seq_ctrl);
1923 return -1;
1924 }
1925
1926 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
1927 SEQ_TO_SN(sc));
1928
1929 sh = idx - start;
1930 if (sh > 64) {
1931 sh = (start - idx) + 0xff;
1932 bitmap = bitmap << sh;
1933 sh = 0;
1934 start = idx;
1935 } else if (sh < -64)
1936 sh = 0xff - (start - idx);
1937 else if (sh < 0) {
1938 sh = start - idx;
1939 start = idx;
1940 bitmap = bitmap << sh;
1941 sh = 0;
1942 }
1943 bitmap |= 1ULL << sh;
1944 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
1945 (unsigned long long)bitmap);
1946 }
1947
1948 agg->bitmap = bitmap;
1949 agg->start_idx = start;
1950 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
1951 agg->frame_count, agg->start_idx,
1952 (unsigned long long)agg->bitmap);
1953
1954 if (bitmap)
1955 agg->wait_for_ba = 1;
1956 }
1957 return 0;
1958}
1959
1960static u8
1961il4965_find_station(struct il_priv *il, const u8 * addr)
1962{
1963 int i;
1964 int start = 0;
1965 int ret = IL_INVALID_STATION;
1966 unsigned long flags;
1967
1968 if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
1969 start = IL_STA_ID;
1970
1971 if (is_broadcast_ether_addr(addr))
1972 return il->ctx.bcast_sta_id;
1973
1974 spin_lock_irqsave(&il->sta_lock, flags);
1975 for (i = start; i < il->hw_params.max_stations; i++)
1976 if (il->stations[i].used &&
1977 (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
1978 ret = i;
1979 goto out;
1980 }
1981
1982 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
1983
1984out:
1985 /*
1986 * It may be possible that more commands interacting with stations
1987 * arrive before we completed processing the adding of
1988 * station
1989 */
1990 if (ret != IL_INVALID_STATION &&
1991 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
1992 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
1993 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
1994 IL_ERR("Requested station info for sta %d before ready.\n",
1995 ret);
1996 ret = IL_INVALID_STATION;
1997 }
1998 spin_unlock_irqrestore(&il->sta_lock, flags);
1999 return ret;
2000}
2001
2002static int
2003il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2004{
2005 if (il->iw_mode == NL80211_IFTYPE_STATION) {
2006 return IL_AP_ID;
2007 } else {
2008 u8 *da = ieee80211_get_DA(hdr);
2009 return il4965_find_station(il, da);
2010 }
2011}
2012
2013/**
2014 * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
2015 */
2016static void
2017il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2018{
2019 struct il_rx_pkt *pkt = rxb_addr(rxb);
2020 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2021 int txq_id = SEQ_TO_QUEUE(sequence);
2022 int idx = SEQ_TO_IDX(sequence);
2023 struct il_tx_queue *txq = &il->txq[txq_id];
2024 struct ieee80211_hdr *hdr;
2025 struct ieee80211_tx_info *info;
2026 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2027 u32 status = le32_to_cpu(tx_resp->u.status);
2028 int uninitialized_var(tid);
2029 int sta_id;
2030 int freed;
2031 u8 *qc = NULL;
2032 unsigned long flags;
2033
2034 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2035 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2036 "is out of range [0-%d] %d %d\n", txq_id, idx,
2037 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2038 return;
2039 }
2040
2041 txq->time_stamp = jiffies;
2042 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
2043 memset(&info->status, 0, sizeof(info->status));
2044
2045 hdr = il_tx_queue_get_hdr(il, txq_id, idx);
2046 if (ieee80211_is_data_qos(hdr->frame_control)) {
2047 qc = ieee80211_get_qos_ctl(hdr);
2048 tid = qc[0] & 0xf;
2049 }
2050
2051 sta_id = il4965_get_ra_sta_id(il, hdr);
2052 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2053 IL_ERR("Station not known\n");
2054 return;
2055 }
2056
2057 spin_lock_irqsave(&il->sta_lock, flags);
2058 if (txq->sched_retry) {
2059 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2060 struct il_ht_agg *agg = NULL;
2061 WARN_ON(!qc);
2062
2063 agg = &il->stations[sta_id].tid[tid].agg;
2064
2065 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2066
2067 /* check if BAR is needed */
2068 if ((tx_resp->frame_count == 1) &&
2069 !il4965_is_tx_success(status))
2070 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2071
2072 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2073 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2074 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2075 "%d idx %d\n", scd_ssn, idx);
2076 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2077 if (qc)
2078 il4965_free_tfds_in_queue(il, sta_id, tid,
2079 freed);
2080
2081 if (il->mac80211_registered &&
2082 il_queue_space(&txq->q) > txq->q.low_mark &&
2083 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2084 il_wake_queue(il, txq);
2085 }
2086 } else {
2087 info->status.rates[0].count = tx_resp->failure_frame + 1;
2088 info->flags |= il4965_tx_status_to_mac80211(status);
2089 il4965_hwrate_to_tx_control(il,
2090 le32_to_cpu(tx_resp->rate_n_flags),
2091 info);
2092
2093 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2094 "rate_n_flags 0x%x retries %d\n", txq_id,
2095 il4965_get_tx_fail_reason(status), status,
2096 le32_to_cpu(tx_resp->rate_n_flags),
2097 tx_resp->failure_frame);
2098
2099 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2100 if (qc && likely(sta_id != IL_INVALID_STATION))
2101 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2102 else if (sta_id == IL_INVALID_STATION)
2103 D_TX_REPLY("Station not known\n");
2104
2105 if (il->mac80211_registered &&
2106 il_queue_space(&txq->q) > txq->q.low_mark)
2107 il_wake_queue(il, txq);
2108 }
2109 if (qc && likely(sta_id != IL_INVALID_STATION))
2110 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2111
2112 il4965_check_abort_status(il, tx_resp->frame_count, status);
2113
2114 spin_unlock_irqrestore(&il->sta_lock, flags);
2115}
2116
2117static void
2118il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
2119{
2120 struct il_rx_pkt *pkt = rxb_addr(rxb);
2121 struct il4965_beacon_notif *beacon = (void *)pkt->u.raw;
2122 u8 rate __maybe_unused =
2123 il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
2124
2125 D_RX("beacon status %#x, retries:%d ibssmgr:%d "
2126 "tsf:0x%.8x%.8x rate:%d\n",
2127 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
2128 beacon->beacon_notify_hdr.failure_frame,
2129 le32_to_cpu(beacon->ibss_mgr_status),
2130 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
2131
2132 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
2133}
2134
2135/* Set up 4965-specific Rx frame reply handlers */
2136static void
2137il4965_handler_setup(struct il_priv *il)
2138{
2139 /* Legacy Rx frames */
2140 il->handlers[N_RX] = il4965_hdl_rx;
2141 /* Tx response */
2142 il->handlers[C_TX] = il4965_hdl_tx;
2143 il->handlers[N_BEACON] = il4965_hdl_beacon;
2144}
2145
2146static struct il_hcmd_ops il4965_hcmd = {
2147 .rxon_assoc = il4965_send_rxon_assoc,
2148 .commit_rxon = il4965_commit_rxon,
2149 .set_rxon_chain = il4965_set_rxon_chain,
2150};
2151
2152static void
2153il4965_post_scan(struct il_priv *il)
2154{
2155 struct il_rxon_context *ctx = &il->ctx;
2156
2157 /*
2158 * Since setting the RXON may have been deferred while
2159 * performing the scan, fire one off if needed
2160 */
2161 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2162 il_commit_rxon(il, ctx);
2163}
2164
2165static void
2166il4965_post_associate(struct il_priv *il)
2167{
2168 struct il_rxon_context *ctx = &il->ctx;
2169 struct ieee80211_vif *vif = ctx->vif;
2170 struct ieee80211_conf *conf = NULL;
2171 int ret = 0;
2172
2173 if (!vif || !il->is_open)
2174 return;
2175
2176 if (test_bit(S_EXIT_PENDING, &il->status))
2177 return;
2178
2179 il_scan_cancel_timeout(il, 200);
2180
2181 conf = &il->hw->conf;
2182
2183 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2184 il_commit_rxon(il, ctx);
2185
2186 ret = il_send_rxon_timing(il, ctx);
2187 if (ret)
2188 IL_WARN("RXON timing - " "Attempting to continue.\n");
2189
2190 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2191
2192 il_set_rxon_ht(il, &il->current_ht_config);
2193
2194 if (il->cfg->ops->hcmd->set_rxon_chain)
2195 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
2196
2197 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
2198
2199 D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
2200 vif->bss_conf.beacon_int);
2201
2202 if (vif->bss_conf.use_short_preamble)
2203 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2204 else
2205 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2206
2207 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2208 if (vif->bss_conf.use_short_slot)
2209 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2210 else
2211 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2212 }
2213
2214 il_commit_rxon(il, ctx);
2215
2216 D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
2217 ctx->active.bssid_addr);
2218
2219 switch (vif->type) {
2220 case NL80211_IFTYPE_STATION:
2221 break;
2222 case NL80211_IFTYPE_ADHOC:
2223 il4965_send_beacon_cmd(il);
2224 break;
2225 default:
2226 IL_ERR("%s Should not be called in %d mode\n", __func__,
2227 vif->type);
2228 break;
2229 }
2230
2231 /* the chain noise calibration will enabled PM upon completion
2232 * If chain noise has already been run, then we need to enable
2233 * power management here */
2234 if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
2235 il_power_update_mode(il, false);
2236
2237 /* Enable Rx differential gain and sensitivity calibrations */
2238 il4965_chain_noise_reset(il);
2239 il->start_calib = 1;
2240}
2241
2242static void
2243il4965_config_ap(struct il_priv *il)
2244{
2245 struct il_rxon_context *ctx = &il->ctx;
2246 struct ieee80211_vif *vif = ctx->vif;
2247 int ret = 0;
2248
2249 lockdep_assert_held(&il->mutex);
2250
2251 if (test_bit(S_EXIT_PENDING, &il->status))
2252 return;
2253
2254 /* The following should be done only at AP bring up */
2255 if (!il_is_associated_ctx(ctx)) {
2256
2257 /* RXON - unassoc (to set timing command) */
2258 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2259 il_commit_rxon(il, ctx);
2260
2261 /* RXON Timing */
2262 ret = il_send_rxon_timing(il, ctx);
2263 if (ret)
2264 IL_WARN("RXON timing failed - "
2265 "Attempting to continue.\n");
2266
2267 /* AP has all antennas */
2268 il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
2269 il_set_rxon_ht(il, &il->current_ht_config);
2270 if (il->cfg->ops->hcmd->set_rxon_chain)
2271 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
2272
2273 ctx->staging.assoc_id = 0;
2274
2275 if (vif->bss_conf.use_short_preamble)
2276 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2277 else
2278 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2279
2280 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2281 if (vif->bss_conf.use_short_slot)
2282 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2283 else
2284 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2285 }
2286 /* need to send beacon cmd before committing assoc RXON! */
2287 il4965_send_beacon_cmd(il);
2288 /* restore RXON assoc */
2289 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2290 il_commit_rxon(il, ctx);
2291 }
2292 il4965_send_beacon_cmd(il);
2293}
2294
2295static struct il_hcmd_utils_ops il4965_hcmd_utils = {
2296 .get_hcmd_size = il4965_get_hcmd_size,
2297 .build_addsta_hcmd = il4965_build_addsta_hcmd,
2298 .request_scan = il4965_request_scan,
2299 .post_scan = il4965_post_scan,
2300};
2301
2302static struct il_lib_ops il4965_lib = {
2303 .set_hw_params = il4965_hw_set_hw_params,
2304 .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
2305 .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
2306 .txq_free_tfd = il4965_hw_txq_free_tfd,
2307 .txq_init = il4965_hw_tx_queue_init,
2308 .handler_setup = il4965_handler_setup,
2309 .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
2310 .init_alive_start = il4965_init_alive_start,
2311 .load_ucode = il4965_load_bsm,
2312 .dump_nic_error_log = il4965_dump_nic_error_log,
2313 .dump_fh = il4965_dump_fh,
2314 .set_channel_switch = il4965_hw_channel_switch,
2315 .apm_ops = {
2316 .init = il_apm_init,
2317 .config = il4965_nic_config,
2318 },
2319 .eeprom_ops = {
2320 .regulatory_bands = {
2321 EEPROM_REGULATORY_BAND_1_CHANNELS,
2322 EEPROM_REGULATORY_BAND_2_CHANNELS,
2323 EEPROM_REGULATORY_BAND_3_CHANNELS,
2324 EEPROM_REGULATORY_BAND_4_CHANNELS,
2325 EEPROM_REGULATORY_BAND_5_CHANNELS,
2326 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2327 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
2328 .acquire_semaphore = il4965_eeprom_acquire_semaphore,
2329 .release_semaphore = il4965_eeprom_release_semaphore,
2330 },
2331 .send_tx_power = il4965_send_tx_power,
2332 .update_chain_flags = il4965_update_chain_flags,
2333 .temp_ops = {
2334 .temperature = il4965_temperature_calib,
2335 },
2336#ifdef CONFIG_IWLEGACY_DEBUGFS
2337 .debugfs_ops = {
2338 .rx_stats_read = il4965_ucode_rx_stats_read,
2339 .tx_stats_read = il4965_ucode_tx_stats_read,
2340 .general_stats_read = il4965_ucode_general_stats_read,
2341 },
2342#endif
2343};
2344
2345static const struct il_legacy_ops il4965_legacy_ops = {
2346 .post_associate = il4965_post_associate,
2347 .config_ap = il4965_config_ap,
2348 .manage_ibss_station = il4965_manage_ibss_station,
2349 .update_bcast_stations = il4965_update_bcast_stations,
2350};
2351
2352struct ieee80211_ops il4965_hw_ops = {
2353 .tx = il4965_mac_tx,
2354 .start = il4965_mac_start,
2355 .stop = il4965_mac_stop,
2356 .add_interface = il_mac_add_interface,
2357 .remove_interface = il_mac_remove_interface,
2358 .change_interface = il_mac_change_interface,
2359 .config = il_mac_config,
2360 .configure_filter = il4965_configure_filter,
2361 .set_key = il4965_mac_set_key,
2362 .update_tkip_key = il4965_mac_update_tkip_key,
2363 .conf_tx = il_mac_conf_tx,
2364 .reset_tsf = il_mac_reset_tsf,
2365 .bss_info_changed = il_mac_bss_info_changed,
2366 .ampdu_action = il4965_mac_ampdu_action,
2367 .hw_scan = il_mac_hw_scan,
2368 .sta_add = il4965_mac_sta_add,
2369 .sta_remove = il_mac_sta_remove,
2370 .channel_switch = il4965_mac_channel_switch,
2371 .tx_last_beacon = il_mac_tx_last_beacon,
2372};
2373
2374static const struct il_ops il4965_ops = {
2375 .lib = &il4965_lib,
2376 .hcmd = &il4965_hcmd,
2377 .utils = &il4965_hcmd_utils,
2378 .led = &il4965_led_ops,
2379 .legacy = &il4965_legacy_ops,
2380 .ieee80211_ops = &il4965_hw_ops,
2381};
2382
2383static struct il_base_params il4965_base_params = {
2384 .eeprom_size = IL4965_EEPROM_IMG_SIZE,
2385 .num_of_queues = IL49_NUM_QUEUES,
2386 .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
2387 .pll_cfg_val = 0,
2388 .set_l0s = true,
2389 .use_bsm = true,
2390 .led_compensation = 61,
2391 .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
2392 .wd_timeout = IL_DEF_WD_TIMEOUT,
2393 .temperature_kelvin = true,
2394 .ucode_tracing = true,
2395 .sensitivity_calib_by_driver = true,
2396 .chain_noise_calib_by_driver = true,
2397};
2398
2399struct il_cfg il4965_cfg = {
2400 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2401 .fw_name_pre = IL4965_FW_PRE,
2402 .ucode_api_max = IL4965_UCODE_API_MAX,
2403 .ucode_api_min = IL4965_UCODE_API_MIN,
2404 .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
2405 .valid_tx_ant = ANT_AB,
2406 .valid_rx_ant = ANT_ABC,
2407 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2408 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2409 .ops = &il4965_ops,
2410 .mod_params = &il4965_mod_params,
2411 .base_params = &il4965_base_params,
2412 .led_mode = IL_LED_BLINK,
2413 /*
2414 * Force use of chains B and C for scan RX on 5 GHz band
2415 * because the device has off-channel reception on chain A.
2416 */
2417 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2418};
2419
2420/* Module firmware */
2421MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644
index 000000000000..74472314bc37
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -0,0 +1,1309 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __il_4965_h__
31#define __il_4965_h__
32
33struct il_rx_queue;
34struct il_rx_buf;
35struct il_rx_pkt;
36struct il_tx_queue;
37struct il_rxon_context;
38
39/* configuration for the _4965 devices */
40extern struct il_cfg il4965_cfg;
41
42extern struct il_mod_params il4965_mod_params;
43
44extern struct ieee80211_ops il4965_hw_ops;
45
46/* tx queue */
47void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
48 int freed);
49
50/* RXON */
51void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
52
53/* uCode */
54int il4965_verify_ucode(struct il_priv *il);
55
56/* lib */
57void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
58
59void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
60int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
61int il4965_hw_nic_init(struct il_priv *il);
62int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
63
64/* rx */
65void il4965_rx_queue_restock(struct il_priv *il);
66void il4965_rx_replenish(struct il_priv *il);
67void il4965_rx_replenish_now(struct il_priv *il);
68void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
69int il4965_rxq_stop(struct il_priv *il);
70int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
71void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
72void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
73void il4965_rx_handle(struct il_priv *il);
74
75/* tx */
76void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
77int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
78 dma_addr_t addr, u16 len, u8 reset, u8 pad);
79int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
80void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
81 struct ieee80211_tx_info *info);
82int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
83int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
84 struct ieee80211_sta *sta, u16 tid, u16 * ssn);
85int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
86 struct ieee80211_sta *sta, u16 tid);
87int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
88void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
89int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
90void il4965_hw_txq_ctx_free(struct il_priv *il);
91int il4965_txq_ctx_alloc(struct il_priv *il);
92void il4965_txq_ctx_reset(struct il_priv *il);
93void il4965_txq_ctx_stop(struct il_priv *il);
94void il4965_txq_set_sched(struct il_priv *il, u32 mask);
95
96/*
97 * Acquire il->lock before calling this function !
98 */
99void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
100/**
101 * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
102 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
103 * @scd_retry: (1) Indicates queue will be used in aggregation mode
104 *
105 * NOTE: Acquire il->lock before calling this function !
106 */
107void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
108 int tx_fifo_id, int scd_retry);
109
110u8 il4965_toggle_tx_ant(struct il_priv *il, u8 ant_idx, u8 valid);
111
112/* rx */
113void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
114bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
115void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
116void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
117
118/* scan */
119int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
120
121/* station mgmt */
122int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
123 bool add);
124
125/* hcmd */
126int il4965_send_beacon_cmd(struct il_priv *il);
127
128#ifdef CONFIG_IWLEGACY_DEBUG
129const char *il4965_get_tx_fail_reason(u32 status);
130#else
131static inline const char *
132il4965_get_tx_fail_reason(u32 status)
133{
134 return "";
135}
136#endif
137
138/* station management */
139int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
140int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
141 const u8 *addr, u8 *sta_id_r);
142int il4965_remove_default_wep_key(struct il_priv *il,
143 struct il_rxon_context *ctx,
144 struct ieee80211_key_conf *key);
145int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
146 struct ieee80211_key_conf *key);
147int il4965_restore_default_wep_keys(struct il_priv *il,
148 struct il_rxon_context *ctx);
149int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
150 struct ieee80211_key_conf *key, u8 sta_id);
151int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
152 struct ieee80211_key_conf *key, u8 sta_id);
153void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
154 struct ieee80211_key_conf *keyconf,
155 struct ieee80211_sta *sta, u32 iv32,
156 u16 *phase1key);
157int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
158int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
159 int tid, u16 ssn);
160int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
161 int tid);
162void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
163int il4965_update_bcast_stations(struct il_priv *il);
164
165/* rate */
166static inline u8
167il4965_hw_get_rate(__le32 rate_n_flags)
168{
169 return le32_to_cpu(rate_n_flags) & 0xFF;
170}
171
172static inline __le32
173il4965_hw_set_rate_n_flags(u8 rate, u32 flags)
174{
175 return cpu_to_le32(flags | (u32) rate);
176}
177
178/* eeprom */
179void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
180int il4965_eeprom_acquire_semaphore(struct il_priv *il);
181void il4965_eeprom_release_semaphore(struct il_priv *il);
182int il4965_eeprom_check_version(struct il_priv *il);
183
184/* mac80211 handlers (for 4965) */
185void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
186int il4965_mac_start(struct ieee80211_hw *hw);
187void il4965_mac_stop(struct ieee80211_hw *hw);
188void il4965_configure_filter(struct ieee80211_hw *hw,
189 unsigned int changed_flags,
190 unsigned int *total_flags, u64 multicast);
191int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
192 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
193 struct ieee80211_key_conf *key);
194void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
195 struct ieee80211_vif *vif,
196 struct ieee80211_key_conf *keyconf,
197 struct ieee80211_sta *sta, u32 iv32,
198 u16 *phase1key);
199int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
200 enum ieee80211_ampdu_mlme_action action,
201 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
202 u8 buf_size);
203int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
204 struct ieee80211_sta *sta);
205void il4965_mac_channel_switch(struct ieee80211_hw *hw,
206 struct ieee80211_channel_switch *ch_switch);
207
208void il4965_led_enable(struct il_priv *il);
209
210/* EEPROM */
211#define IL4965_EEPROM_IMG_SIZE 1024
212
213/*
214 * uCode queue management definitions ...
215 * The first queue used for block-ack aggregation is #7 (4965 only).
216 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
217 */
218#define IL49_FIRST_AMPDU_QUEUE 7
219
220/* Sizes and addresses for instruction and data memory (SRAM) in
221 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
222#define IL49_RTC_INST_LOWER_BOUND (0x000000)
223#define IL49_RTC_INST_UPPER_BOUND (0x018000)
224
225#define IL49_RTC_DATA_LOWER_BOUND (0x800000)
226#define IL49_RTC_DATA_UPPER_BOUND (0x80A000)
227
228#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \
229 IL49_RTC_INST_LOWER_BOUND)
230#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \
231 IL49_RTC_DATA_LOWER_BOUND)
232
233#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
234#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
235
236/* Size of uCode instruction memory in bootstrap state machine */
237#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
238
239static inline int
240il4965_hw_valid_rtc_data_addr(u32 addr)
241{
242 return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
243 addr < IL49_RTC_DATA_UPPER_BOUND);
244}
245
246/********************* START TEMPERATURE *************************************/
247
248/**
249 * 4965 temperature calculation.
250 *
251 * The driver must calculate the device temperature before calculating
252 * a txpower setting (amplifier gain is temperature dependent). The
253 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
254 * values used for the life of the driver, and one of which (R4) is the
255 * real-time temperature indicator.
256 *
257 * uCode provides all 4 values to the driver via the "initialize alive"
258 * notification (see struct il4965_init_alive_resp). After the runtime uCode
259 * image loads, uCode updates the R4 value via stats notifications
260 * (see N_STATS), which occur after each received beacon
261 * when associated, or can be requested via C_STATS.
262 *
263 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
264 * must sign-extend to 32 bits before applying formula below.
265 *
266 * Formula:
267 *
268 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
269 *
270 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
271 * an additional correction, which should be centered around 0 degrees
272 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
273 * centering the 97/100 correction around 0 degrees K.
274 *
275 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
276 * temperature with factory-measured temperatures when calculating txpower
277 * settings.
278 */
279#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
280#define TEMPERATURE_CALIB_A_VAL 259
281
282/* Limit range of calculated temperature to be between these Kelvin values */
283#define IL_TX_POWER_TEMPERATURE_MIN (263)
284#define IL_TX_POWER_TEMPERATURE_MAX (410)
285
286#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
287 ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
288 (t) > IL_TX_POWER_TEMPERATURE_MAX)
289
290/********************* END TEMPERATURE ***************************************/
291
292/********************* START TXPOWER *****************************************/
293
294/**
295 * 4965 txpower calculations rely on information from three sources:
296 *
297 * 1) EEPROM
298 * 2) "initialize" alive notification
299 * 3) stats notifications
300 *
301 * EEPROM data consists of:
302 *
303 * 1) Regulatory information (max txpower and channel usage flags) is provided
304 * separately for each channel that can possibly supported by 4965.
305 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
306 * (legacy) channels.
307 *
308 * See struct il4965_eeprom_channel for format, and struct il4965_eeprom
309 * for locations in EEPROM.
310 *
311 * 2) Factory txpower calibration information is provided separately for
312 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
313 * but 5 GHz has several sub-bands.
314 *
315 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
316 *
317 * See struct il4965_eeprom_calib_info (and the tree of structures
318 * contained within it) for format, and struct il4965_eeprom for
319 * locations in EEPROM.
320 *
321 * "Initialization alive" notification (see struct il4965_init_alive_resp)
322 * consists of:
323 *
324 * 1) Temperature calculation parameters.
325 *
326 * 2) Power supply voltage measurement.
327 *
328 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
329 *
330 * Statistics notifications deliver:
331 *
332 * 1) Current values for temperature param R4.
333 */
334
335/**
336 * To calculate a txpower setting for a given desired target txpower, channel,
337 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
338 * support MIMO and transmit diversity), driver must do the following:
339 *
340 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
341 * Do not exceed regulatory limit; reduce target txpower if necessary.
342 *
343 * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
344 * 2 transmitters will be used simultaneously; driver must reduce the
345 * regulatory limit by 3 dB (half-power) for each transmitter, so the
346 * combined total output of the 2 transmitters is within regulatory limits.
347 *
348 *
349 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
350 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
351 * reduce target txpower if necessary.
352 *
353 * Backoff values below are in 1/2 dB units (equivalent to steps in
354 * txpower gain tables):
355 *
356 * OFDM 6 - 36 MBit: 10 steps (5 dB)
357 * OFDM 48 MBit: 15 steps (7.5 dB)
358 * OFDM 54 MBit: 17 steps (8.5 dB)
359 * OFDM 60 MBit: 20 steps (10 dB)
360 * CCK all rates: 10 steps (5 dB)
361 *
362 * Backoff values apply to saturation txpower on a per-transmitter basis;
363 * when using MIMO (2 transmitters), each transmitter uses the same
364 * saturation level provided in EEPROM, and the same backoff values;
365 * no reduction (such as with regulatory txpower limits) is required.
366 *
367 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
368 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
369 * factory measurement for ht40 channels.
370 *
371 * The result of this step is the final target txpower. The rest of
372 * the steps figure out the proper settings for the device to achieve
373 * that target txpower.
374 *
375 *
376 * 3) Determine (EEPROM) calibration sub band for the target channel, by
377 * comparing against first and last channels in each sub band
378 * (see struct il4965_eeprom_calib_subband_info).
379 *
380 *
381 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
382 * referencing the 2 factory-measured (sample) channels within the sub band.
383 *
384 * Interpolation is based on difference between target channel's frequency
385 * and the sample channels' frequencies. Since channel numbers are based
386 * on frequency (5 MHz between each channel number), this is equivalent
387 * to interpolating based on channel number differences.
388 *
389 * Note that the sample channels may or may not be the channels at the
390 * edges of the sub band. The target channel may be "outside" of the
391 * span of the sampled channels.
392 *
393 * Driver may choose the pair (for 2 Tx chains) of measurements (see
394 * struct il4965_eeprom_calib_ch_info) for which the actual measured
395 * txpower comes closest to the desired txpower. Usually, though,
396 * the middle set of measurements is closest to the regulatory limits,
397 * and is therefore a good choice for all txpower calculations (this
398 * assumes that high accuracy is needed for maximizing legal txpower,
399 * while lower txpower configurations do not need as much accuracy).
400 *
401 * Driver should interpolate both members of the chosen measurement pair,
402 * i.e. for both Tx chains (radio transmitters), unless the driver knows
403 * that only one of the chains will be used (e.g. only one tx antenna
404 * connected, but this should be unusual). The rate scaling algorithm
405 * switches antennas to find best performance, so both Tx chains will
406 * be used (although only one at a time) even for non-MIMO transmissions.
407 *
408 * Driver should interpolate factory values for temperature, gain table
409 * idx, and actual power. The power amplifier detector values are
410 * not used by the driver.
411 *
412 * Sanity check: If the target channel happens to be one of the sample
413 * channels, the results should agree with the sample channel's
414 * measurements!
415 *
416 *
417 * 5) Find difference between desired txpower and (interpolated)
418 * factory-measured txpower. Using (interpolated) factory gain table idx
419 * (shown elsewhere) as a starting point, adjust this idx lower to
420 * increase txpower, or higher to decrease txpower, until the target
421 * txpower is reached. Each step in the gain table is 1/2 dB.
422 *
423 * For example, if factory measured txpower is 16 dBm, and target txpower
424 * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
425 * by 3 dB.
426 *
427 *
428 * 6) Find difference between current device temperature and (interpolated)
429 * factory-measured temperature for sub-band. Factory values are in
430 * degrees Celsius. To calculate current temperature, see comments for
431 * "4965 temperature calculation".
432 *
433 * If current temperature is higher than factory temperature, driver must
434 * increase gain (lower gain table idx), and vice verse.
435 *
436 * Temperature affects gain differently for different channels:
437 *
438 * 2.4 GHz all channels: 3.5 degrees per half-dB step
439 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
440 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
441 *
442 * NOTE: Temperature can increase rapidly when transmitting, especially
443 * with heavy traffic at high txpowers. Driver should update
444 * temperature calculations often under these conditions to
445 * maintain strong txpower in the face of rising temperature.
446 *
447 *
448 * 7) Find difference between current power supply voltage indicator
449 * (from "initialize alive") and factory-measured power supply voltage
450 * indicator (EEPROM).
451 *
452 * If the current voltage is higher (indicator is lower) than factory
453 * voltage, gain should be reduced (gain table idx increased) by:
454 *
455 * (eeprom - current) / 7
456 *
457 * If the current voltage is lower (indicator is higher) than factory
458 * voltage, gain should be increased (gain table idx decreased) by:
459 *
460 * 2 * (current - eeprom) / 7
461 *
462 * If number of idx steps in either direction turns out to be > 2,
463 * something is wrong ... just use 0.
464 *
465 * NOTE: Voltage compensation is independent of band/channel.
466 *
467 * NOTE: "Initialize" uCode measures current voltage, which is assumed
468 * to be constant after this initial measurement. Voltage
469 * compensation for txpower (number of steps in gain table)
470 * may be calculated once and used until the next uCode bootload.
471 *
472 *
473 * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
474 * adjust txpower for each transmitter chain, so txpower is balanced
475 * between the two chains. There are 5 pairs of tx_atten[group][chain]
476 * values in "initialize alive", one pair for each of 5 channel ranges:
477 *
478 * Group 0: 5 GHz channel 34-43
479 * Group 1: 5 GHz channel 44-70
480 * Group 2: 5 GHz channel 71-124
481 * Group 3: 5 GHz channel 125-200
482 * Group 4: 2.4 GHz all channels
483 *
484 * Add the tx_atten[group][chain] value to the idx for the target chain.
485 * The values are signed, but are in pairs of 0 and a non-negative number,
486 * so as to reduce gain (if necessary) of the "hotter" channel. This
487 * avoids any need to double-check for regulatory compliance after
488 * this step.
489 *
490 *
491 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
492 * value to the idx:
493 *
494 * Hardware rev B: 9 steps (4.5 dB)
495 * Hardware rev C: 5 steps (2.5 dB)
496 *
497 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
498 * bits [3:2], 1 = B, 2 = C.
499 *
500 * NOTE: This compensation is in addition to any saturation backoff that
501 * might have been applied in an earlier step.
502 *
503 *
504 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
505 *
506 * Limit the adjusted idx to stay within the table!
507 *
508 *
509 * 11) Read gain table entries for DSP and radio gain, place into appropriate
510 * location(s) in command (struct il4965_txpowertable_cmd).
511 */
512
513/**
514 * When MIMO is used (2 transmitters operating simultaneously), driver should
515 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
516 * for the device. That is, use half power for each transmitter, so total
517 * txpower is within regulatory limits.
518 *
519 * The value "6" represents number of steps in gain table to reduce power 3 dB.
520 * Each step is 1/2 dB.
521 */
522#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
523
524/**
525 * CCK gain compensation.
526 *
527 * When calculating txpowers for CCK, after making sure that the target power
528 * is within regulatory and saturation limits, driver must additionally
529 * back off gain by adding these values to the gain table idx.
530 *
531 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
532 * bits [3:2], 1 = B, 2 = C.
533 */
534#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
535#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
536
537/*
538 * 4965 power supply voltage compensation for txpower
539 */
540#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7)
541
542/**
543 * Gain tables.
544 *
545 * The following tables contain pair of values for setting txpower, i.e.
546 * gain settings for the output of the device's digital signal processor (DSP),
547 * and for the analog gain structure of the transmitter.
548 *
549 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
550 * are *relative* steps, not indications of absolute output power. Output
551 * power varies with temperature, voltage, and channel frequency, and also
552 * requires consideration of average power (to satisfy regulatory constraints),
553 * and peak power (to avoid distortion of the output signal).
554 *
555 * Each entry contains two values:
556 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
557 * linear value that multiplies the output of the digital signal processor,
558 * before being sent to the analog radio.
559 * 2) Radio gain. This sets the analog gain of the radio Tx path.
560 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
561 *
562 * EEPROM contains factory calibration data for txpower. This maps actual
563 * measured txpower levels to gain settings in the "well known" tables
564 * below ("well-known" means here that both factory calibration *and* the
565 * driver work with the same table).
566 *
567 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
568 * has an extension (into negative idxes), in case the driver needs to
569 * boost power setting for high device temperatures (higher than would be
570 * present during factory calibration). A 5 Ghz EEPROM idx of "40"
571 * corresponds to the 49th entry in the table used by the driver.
572 */
573#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */
574#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
575
576/**
577 * 2.4 GHz gain table
578 *
579 * Index Dsp gain Radio gain
580 * 0 110 0x3f (highest gain)
581 * 1 104 0x3f
582 * 2 98 0x3f
583 * 3 110 0x3e
584 * 4 104 0x3e
585 * 5 98 0x3e
586 * 6 110 0x3d
587 * 7 104 0x3d
588 * 8 98 0x3d
589 * 9 110 0x3c
590 * 10 104 0x3c
591 * 11 98 0x3c
592 * 12 110 0x3b
593 * 13 104 0x3b
594 * 14 98 0x3b
595 * 15 110 0x3a
596 * 16 104 0x3a
597 * 17 98 0x3a
598 * 18 110 0x39
599 * 19 104 0x39
600 * 20 98 0x39
601 * 21 110 0x38
602 * 22 104 0x38
603 * 23 98 0x38
604 * 24 110 0x37
605 * 25 104 0x37
606 * 26 98 0x37
607 * 27 110 0x36
608 * 28 104 0x36
609 * 29 98 0x36
610 * 30 110 0x35
611 * 31 104 0x35
612 * 32 98 0x35
613 * 33 110 0x34
614 * 34 104 0x34
615 * 35 98 0x34
616 * 36 110 0x33
617 * 37 104 0x33
618 * 38 98 0x33
619 * 39 110 0x32
620 * 40 104 0x32
621 * 41 98 0x32
622 * 42 110 0x31
623 * 43 104 0x31
624 * 44 98 0x31
625 * 45 110 0x30
626 * 46 104 0x30
627 * 47 98 0x30
628 * 48 110 0x6
629 * 49 104 0x6
630 * 50 98 0x6
631 * 51 110 0x5
632 * 52 104 0x5
633 * 53 98 0x5
634 * 54 110 0x4
635 * 55 104 0x4
636 * 56 98 0x4
637 * 57 110 0x3
638 * 58 104 0x3
639 * 59 98 0x3
640 * 60 110 0x2
641 * 61 104 0x2
642 * 62 98 0x2
643 * 63 110 0x1
644 * 64 104 0x1
645 * 65 98 0x1
646 * 66 110 0x0
647 * 67 104 0x0
648 * 68 98 0x0
649 * 69 97 0
650 * 70 96 0
651 * 71 95 0
652 * 72 94 0
653 * 73 93 0
654 * 74 92 0
655 * 75 91 0
656 * 76 90 0
657 * 77 89 0
658 * 78 88 0
659 * 79 87 0
660 * 80 86 0
661 * 81 85 0
662 * 82 84 0
663 * 83 83 0
664 * 84 82 0
665 * 85 81 0
666 * 86 80 0
667 * 87 79 0
668 * 88 78 0
669 * 89 77 0
670 * 90 76 0
671 * 91 75 0
672 * 92 74 0
673 * 93 73 0
674 * 94 72 0
675 * 95 71 0
676 * 96 70 0
677 * 97 69 0
678 * 98 68 0
679 */
680
681/**
682 * 5 GHz gain table
683 *
684 * Index Dsp gain Radio gain
685 * -9 123 0x3F (highest gain)
686 * -8 117 0x3F
687 * -7 110 0x3F
688 * -6 104 0x3F
689 * -5 98 0x3F
690 * -4 110 0x3E
691 * -3 104 0x3E
692 * -2 98 0x3E
693 * -1 110 0x3D
694 * 0 104 0x3D
695 * 1 98 0x3D
696 * 2 110 0x3C
697 * 3 104 0x3C
698 * 4 98 0x3C
699 * 5 110 0x3B
700 * 6 104 0x3B
701 * 7 98 0x3B
702 * 8 110 0x3A
703 * 9 104 0x3A
704 * 10 98 0x3A
705 * 11 110 0x39
706 * 12 104 0x39
707 * 13 98 0x39
708 * 14 110 0x38
709 * 15 104 0x38
710 * 16 98 0x38
711 * 17 110 0x37
712 * 18 104 0x37
713 * 19 98 0x37
714 * 20 110 0x36
715 * 21 104 0x36
716 * 22 98 0x36
717 * 23 110 0x35
718 * 24 104 0x35
719 * 25 98 0x35
720 * 26 110 0x34
721 * 27 104 0x34
722 * 28 98 0x34
723 * 29 110 0x33
724 * 30 104 0x33
725 * 31 98 0x33
726 * 32 110 0x32
727 * 33 104 0x32
728 * 34 98 0x32
729 * 35 110 0x31
730 * 36 104 0x31
731 * 37 98 0x31
732 * 38 110 0x30
733 * 39 104 0x30
734 * 40 98 0x30
735 * 41 110 0x25
736 * 42 104 0x25
737 * 43 98 0x25
738 * 44 110 0x24
739 * 45 104 0x24
740 * 46 98 0x24
741 * 47 110 0x23
742 * 48 104 0x23
743 * 49 98 0x23
744 * 50 110 0x22
745 * 51 104 0x18
746 * 52 98 0x18
747 * 53 110 0x17
748 * 54 104 0x17
749 * 55 98 0x17
750 * 56 110 0x16
751 * 57 104 0x16
752 * 58 98 0x16
753 * 59 110 0x15
754 * 60 104 0x15
755 * 61 98 0x15
756 * 62 110 0x14
757 * 63 104 0x14
758 * 64 98 0x14
759 * 65 110 0x13
760 * 66 104 0x13
761 * 67 98 0x13
762 * 68 110 0x12
763 * 69 104 0x08
764 * 70 98 0x08
765 * 71 110 0x07
766 * 72 104 0x07
767 * 73 98 0x07
768 * 74 110 0x06
769 * 75 104 0x06
770 * 76 98 0x06
771 * 77 110 0x05
772 * 78 104 0x05
773 * 79 98 0x05
774 * 80 110 0x04
775 * 81 104 0x04
776 * 82 98 0x04
777 * 83 110 0x03
778 * 84 104 0x03
779 * 85 98 0x03
780 * 86 110 0x02
781 * 87 104 0x02
782 * 88 98 0x02
783 * 89 110 0x01
784 * 90 104 0x01
785 * 91 98 0x01
786 * 92 110 0x00
787 * 93 104 0x00
788 * 94 98 0x00
789 * 95 93 0x00
790 * 96 88 0x00
791 * 97 83 0x00
792 * 98 78 0x00
793 */
794
795/**
796 * Sanity checks and default values for EEPROM regulatory levels.
797 * If EEPROM values fall outside MIN/MAX range, use default values.
798 *
799 * Regulatory limits refer to the maximum average txpower allowed by
800 * regulatory agencies in the geographies in which the device is meant
801 * to be operated. These limits are SKU-specific (i.e. geography-specific),
802 * and channel-specific; each channel has an individual regulatory limit
803 * listed in the EEPROM.
804 *
805 * Units are in half-dBm (i.e. "34" means 17 dBm).
806 */
807#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34)
808#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34)
809#define IL_TX_POWER_REGULATORY_MIN (0)
810#define IL_TX_POWER_REGULATORY_MAX (34)
811
812/**
813 * Sanity checks and default values for EEPROM saturation levels.
814 * If EEPROM values fall outside MIN/MAX range, use default values.
815 *
816 * Saturation is the highest level that the output power amplifier can produce
817 * without significant clipping distortion. This is a "peak" power level.
818 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
819 * require differing amounts of backoff, relative to their average power output,
820 * in order to avoid clipping distortion.
821 *
822 * Driver must make sure that it is violating neither the saturation limit,
823 * nor the regulatory limit, when calculating Tx power settings for various
824 * rates.
825 *
826 * Units are in half-dBm (i.e. "38" means 19 dBm).
827 */
828#define IL_TX_POWER_DEFAULT_SATURATION_24 (38)
829#define IL_TX_POWER_DEFAULT_SATURATION_52 (38)
830#define IL_TX_POWER_SATURATION_MIN (20)
831#define IL_TX_POWER_SATURATION_MAX (50)
832
833/**
834 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
835 * and thermal Txpower calibration.
836 *
837 * When calculating txpower, driver must compensate for current device
838 * temperature; higher temperature requires higher gain. Driver must calculate
839 * current temperature (see "4965 temperature calculation"), then compare vs.
840 * factory calibration temperature in EEPROM; if current temperature is higher
841 * than factory temperature, driver must *increase* gain by proportions shown
842 * in table below. If current temperature is lower than factory, driver must
843 * *decrease* gain.
844 *
845 * Different frequency ranges require different compensation, as shown below.
846 */
847/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
848#define CALIB_IL_TX_ATTEN_GR1_FCH 34
849#define CALIB_IL_TX_ATTEN_GR1_LCH 43
850
851/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
852#define CALIB_IL_TX_ATTEN_GR2_FCH 44
853#define CALIB_IL_TX_ATTEN_GR2_LCH 70
854
855/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
856#define CALIB_IL_TX_ATTEN_GR3_FCH 71
857#define CALIB_IL_TX_ATTEN_GR3_LCH 124
858
859/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
860#define CALIB_IL_TX_ATTEN_GR4_FCH 125
861#define CALIB_IL_TX_ATTEN_GR4_LCH 200
862
863/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
864#define CALIB_IL_TX_ATTEN_GR5_FCH 1
865#define CALIB_IL_TX_ATTEN_GR5_LCH 20
866
867enum {
868 CALIB_CH_GROUP_1 = 0,
869 CALIB_CH_GROUP_2 = 1,
870 CALIB_CH_GROUP_3 = 2,
871 CALIB_CH_GROUP_4 = 3,
872 CALIB_CH_GROUP_5 = 4,
873 CALIB_CH_GROUP_MAX
874};
875
876/********************* END TXPOWER *****************************************/
877
878/**
879 * Tx/Rx Queues
880 *
881 * Most communication between driver and 4965 is via queues of data buffers.
882 * For example, all commands that the driver issues to device's embedded
883 * controller (uCode) are via the command queue (one of the Tx queues). All
884 * uCode command responses/replies/notifications, including Rx frames, are
885 * conveyed from uCode to driver via the Rx queue.
886 *
887 * Most support for these queues, including handshake support, resides in
888 * structures in host DRAM, shared between the driver and the device. When
889 * allocating this memory, the driver must make sure that data written by
890 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
891 * cache memory), so DRAM and cache are consistent, and the device can
892 * immediately see changes made by the driver.
893 *
894 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
895 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
896 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
897 */
898#define IL49_NUM_FIFOS 7
899#define IL49_CMD_FIFO_NUM 4
900#define IL49_NUM_QUEUES 16
901#define IL49_NUM_AMPDU_QUEUES 8
902
903/**
904 * struct il4965_schedq_bc_tbl
905 *
906 * Byte Count table
907 *
908 * Each Tx queue uses a byte-count table containing 320 entries:
909 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
910 * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
911 * max Tx win is 64 TFDs).
912 *
913 * When driver sets up a new TFD, it must also enter the total byte count
914 * of the frame to be transmitted into the corresponding entry in the byte
915 * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver
916 * must duplicate the byte count entry in corresponding idx 256-319.
917 *
918 * padding puts each byte count table on a 1024-byte boundary;
919 * 4965 assumes tables are separated by 1024 bytes.
920 */
921struct il4965_scd_bc_tbl {
922 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
923 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
924} __packed;
925
926#define IL4965_RTC_INST_LOWER_BOUND (0x000000)
927
928/* RSSI to dBm */
929#define IL4965_RSSI_OFFSET 44
930
931/* PCI registers */
932#define PCI_CFG_RETRY_TIMEOUT 0x041
933
934/* PCI register values */
935#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
936#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
937
938#define IL4965_DEFAULT_TX_RETRY 15
939
940/* EEPROM */
941#define IL4965_FIRST_AMPDU_QUEUE 10
942
943/* Calibration */
944void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
945void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
946void il4965_init_sensitivity(struct il_priv *il);
947void il4965_reset_run_time_calib(struct il_priv *il);
948void il4965_calib_free_results(struct il_priv *il);
949
950/* Debug */
951#ifdef CONFIG_IWLEGACY_DEBUGFS
952ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
953 size_t count, loff_t *ppos);
954ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
955 size_t count, loff_t *ppos);
956ssize_t il4965_ucode_general_stats_read(struct file *file,
957 char __user *user_buf, size_t count,
958 loff_t *ppos);
959#endif
960
961/****************************/
962/* Flow Handler Definitions */
963/****************************/
964
965/**
966 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
967 * Addresses are offsets from device's PCI hardware base address.
968 */
969#define FH49_MEM_LOWER_BOUND (0x1000)
970#define FH49_MEM_UPPER_BOUND (0x2000)
971
972/**
973 * Keep-Warm (KW) buffer base address.
974 *
975 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
976 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
977 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
978 * from going into a power-savings mode that would cause higher DRAM latency,
979 * and possible data over/under-runs, before all Tx/Rx is complete.
980 *
981 * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
982 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
983 * automatically invokes keep-warm accesses when normal accesses might not
984 * be sufficient to maintain fast DRAM response.
985 *
986 * Bit fields:
987 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
988 */
989#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C)
990
991/**
992 * TFD Circular Buffers Base (CBBC) addresses
993 *
994 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
995 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
996 * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04
997 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
998 * aligned (address bits 0-7 must be 0).
999 *
1000 * Bit fields in each pointer register:
1001 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
1002 */
1003#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
1004#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10)
1005
1006/* Find TFD CB base pointer for given queue (range 0-15). */
1007#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
1008
1009/**
1010 * Rx SRAM Control and Status Registers (RSCSR)
1011 *
1012 * These registers provide handshake between driver and 4965 for the Rx queue
1013 * (this queue handles *all* command responses, notifications, Rx data, etc.
1014 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
1015 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
1016 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
1017 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
1018 * mapping between RBDs and RBs.
1019 *
1020 * Driver must allocate host DRAM memory for the following, and set the
1021 * physical address of each into 4965 registers:
1022 *
1023 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
1024 * entries (although any power of 2, up to 4096, is selectable by driver).
1025 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
1026 * (typically 4K, although 8K or 16K are also selectable by driver).
1027 * Driver sets up RB size and number of RBDs in the CB via Rx config
1028 * register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
1029 *
1030 * Bit fields within one RBD:
1031 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
1032 *
1033 * Driver sets physical address [35:8] of base of RBD circular buffer
1034 * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
1035 *
1036 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
1037 * (RBs) have been filled, via a "write pointer", actually the idx of
1038 * the RB's corresponding RBD within the circular buffer. Driver sets
1039 * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
1040 *
1041 * Bit fields in lower dword of Rx status buffer (upper dword not used
1042 * by driver; see struct il4965_shared, val0):
1043 * 31-12: Not used by driver
1044 * 11- 0: Index of last filled Rx buffer descriptor
1045 * (4965 writes, driver reads this value)
1046 *
1047 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
1048 * enter pointers to these RBs into contiguous RBD circular buffer entries,
1049 * and update the 4965's "write" idx register,
1050 * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
1051 *
1052 * This "write" idx corresponds to the *next* RBD that the driver will make
1053 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
1054 * the circular buffer. This value should initially be 0 (before preparing any
1055 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
1056 * wrap back to 0 at the end of the circular buffer (but don't wrap before
1057 * "read" idx has advanced past 1! See below).
1058 * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
1059 *
1060 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
1061 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
1062 * to tell the driver the idx of the latest filled RBD. The driver must
1063 * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
1064 *
1065 * The driver must also internally keep track of a third idx, which is the
1066 * next RBD to process. When receiving an Rx interrupt, driver should process
1067 * all filled but unprocessed RBs up to, but not including, the RB
1068 * corresponding to the "read" idx. For example, if "read" idx becomes "1",
1069 * driver may process the RB pointed to by RBD 0. Depending on volume of
1070 * traffic, there may be many RBs to process.
1071 *
1072 * If read idx == write idx, 4965 thinks there is no room to put new data.
1073 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
1074 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
1075 * and "read" idxes; that is, make sure that there are no more than 254
1076 * buffers waiting to be filled.
1077 */
1078#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0)
1079#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
1080#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND)
1081
1082/**
1083 * Physical base address of 8-byte Rx Status buffer.
1084 * Bit fields:
1085 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
1086 */
1087#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
1088
1089/**
1090 * Physical base address of Rx Buffer Descriptor Circular Buffer.
1091 * Bit fields:
1092 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
1093 */
1094#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004)
1095
1096/**
1097 * Rx write pointer (idx, really!).
1098 * Bit fields:
1099 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
1100 * NOTE: For 256-entry circular buffer, use only bits [7:0].
1101 */
1102#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008)
1103#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
1104
1105/**
1106 * Rx Config/Status Registers (RCSR)
1107 * Rx Config Reg for channel 0 (only channel used)
1108 *
1109 * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
1110 * normal operation (see bit fields).
1111 *
1112 * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
1113 * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for
1114 * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
1115 *
1116 * Bit fields:
1117 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1118 * '10' operate normally
1119 * 29-24: reserved
1120 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
1121 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
1122 * 19-18: reserved
1123 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
1124 * '10' 12K, '11' 16K.
1125 * 15-14: reserved
1126 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
1127 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
1128 * typical value 0x10 (about 1/2 msec)
1129 * 3- 0: reserved
1130 */
1131#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
1132#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0)
1133#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND)
1134
1135#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
1136
1137#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
1138#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
1139#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
1140#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
1141#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
1142#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
1143
1144#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
1145#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
1146#define RX_RB_TIMEOUT (0x10)
1147
1148#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
1149#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
1150#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
1151
1152#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
1153#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
1154#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
1155#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
1156
1157#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
1158#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
1159#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
1160
1161/**
1162 * Rx Shared Status Registers (RSSR)
1163 *
1164 * After stopping Rx DMA channel (writing 0 to
1165 * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
1166 * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
1167 *
1168 * Bit fields:
1169 * 24: 1 = Channel 0 is idle
1170 *
1171 * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
1172 * contain default values that should not be altered by the driver.
1173 */
1174#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40)
1175#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
1176
1177#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND)
1178#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
1179#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
1180 (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
1181
1182#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
1183
1184#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
1185
1186/* TFDB Area - TFDs buffer table */
1187#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
1188#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900)
1189#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958)
1190#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
1191#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
1192
1193/**
1194 * Transmit DMA Channel Control/Status Registers (TCSR)
1195 *
1196 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
1197 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
1198 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
1199 *
1200 * To use a Tx DMA channel, driver must initialize its
1201 * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
1202 *
1203 * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1204 * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
1205 *
1206 * All other bits should be 0.
1207 *
1208 * Bit fields:
1209 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1210 * '10' operate normally
1211 * 29- 4: Reserved, set to "0"
1212 * 3: Enable internal DMA requests (1, normal operation), disable (0)
1213 * 2- 0: Reserved, set to "0"
1214 */
1215#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
1216#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60)
1217
1218/* Find Control/Status reg for given Tx DMA/FIFO channel */
1219#define FH49_TCSR_CHNL_NUM (7)
1220#define FH50_TCSR_CHNL_NUM (8)
1221
1222/* TCSR: tx_config register values */
1223#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
1224 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
1225#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
1226 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
1227#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
1228 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
1229
1230#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
1231#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
1232
1233#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
1234#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
1235
1236#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
1237#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
1238#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
1239
1240#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
1241#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
1242#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
1243
1244#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
1245#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
1246#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
1247
1248#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
1249#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
1250#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
1251
1252#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
1253#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
1254
1255/**
1256 * Tx Shared Status Registers (TSSR)
1257 *
1258 * After stopping Tx DMA channel (writing 0 to
1259 * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
1260 * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
1261 * (channel's buffers empty | no pending requests).
1262 *
1263 * Bit fields:
1264 * 31-24: 1 = Channel buffers empty (channel 7:0)
1265 * 23-16: 1 = No pending requests (channel 7:0)
1266 */
1267#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0)
1268#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0)
1269
1270#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010)
1271
1272/**
1273 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
1274 * 31: Indicates an address error when accessed to internal memory
1275 * uCode/driver must write "1" in order to clear this flag
1276 * 30: Indicates that Host did not send the expected number of dwords to FH
1277 * uCode/driver must write "1" in order to clear this flag
1278 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
1279 * command was received from the scheduler while the TRB was already full
1280 * with previous command
1281 * uCode/driver must write "1" in order to clear this flag
1282 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
1283 * bit is set, it indicates that the FH has received a full indication
1284 * from the RTC TxFIFO and the current value of the TxCredit counter was
1285 * not equal to zero. This mean that the credit mechanism was not
1286 * synchronized to the TxFIFO status
1287 * uCode/driver must write "1" in order to clear this flag
1288 */
1289#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018)
1290
1291#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
1292
1293/* Tx service channels */
1294#define FH49_SRVC_CHNL (9)
1295#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8)
1296#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
1297#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
1298 (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
1299
1300#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98)
1301/* Instruct FH to increment the retry count of a packet when
1302 * it is brought from the memory to TX-FIFO
1303 */
1304#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
1305
1306/* Keep Warm Size */
1307#define IL_KW_SIZE 0x1000 /* 4k */
1308
1309#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index aef65cd47661..05bd375cb845 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,4 +1,4 @@
1config IWLWIFI_LEGACY 1config IWLEGACY
2 tristate 2 tristate
3 select FW_LOADER 3 select FW_LOADER
4 select NEW_LEDS 4 select NEW_LEDS
@@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
7 select MAC80211_LEDS 7 select MAC80211_LEDS
8 8
9menu "Debugging Options" 9menu "Debugging Options"
10 depends on IWLWIFI_LEGACY 10 depends on IWLEGACY
11 11
12config IWLWIFI_LEGACY_DEBUG 12config IWLEGACY_DEBUG
13 bool "Enable full debugging output in 4965 and 3945 drivers" 13 bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
14 depends on IWLWIFI_LEGACY 14 depends on IWLEGACY
15 ---help--- 15 ---help---
16 This option will enable debug tracing output for the iwlwifilegacy 16 This option will enable debug tracing output for the iwlegacy
17 drivers. 17 drivers.
18 18
19 This will result in the kernel module being ~100k larger. You can 19 This will result in the kernel module being ~100k larger. You can
@@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
29 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level 29 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
30 30
31 You can find the list of debug mask values in: 31 You can find the list of debug mask values in:
32 drivers/net/wireless/iwlwifilegacy/iwl-debug.h 32 drivers/net/wireless/iwlegacy/common.h
33 33
34 If this is your first time using this driver, you should say Y here 34 If this is your first time using this driver, you should say Y here
35 as the debug information can assist others in helping you resolve 35 as the debug information can assist others in helping you resolve
36 any problems you may encounter. 36 any problems you may encounter.
37 37
38config IWLWIFI_LEGACY_DEBUGFS 38config IWLEGACY_DEBUGFS
39 bool "4965 and 3945 debugfs support" 39 bool "iwlegacy (iwl 3945/4965) debugfs support"
40 depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS 40 depends on IWLEGACY && MAC80211_DEBUGFS
41 ---help--- 41 ---help---
42 Enable creation of debugfs files for the iwlwifilegacy drivers. This 42 Enable creation of debugfs files for the iwlegacy drivers. This
43 is a low-impact option that allows getting insight into the 43 is a low-impact option that allows getting insight into the
44 driver's state at runtime. 44 driver's state at runtime.
45 45
46config IWLWIFI_LEGACY_DEVICE_TRACING
47 bool "iwlwifilegacy legacy device access tracing"
48 depends on IWLWIFI_LEGACY
49 depends on EVENT_TRACING
50 help
51 Say Y here to trace all commands, including TX frames and IO
52 accesses, sent to the device. If you say yes, iwlwifilegacy will
53 register with the ftrace framework for event tracing and dump
54 all this information to the ringbuffer, you may need to
55 increase the ringbuffer size. See the ftrace documentation
56 for more information.
57
58 When tracing is not enabled, this option still has some
59 (though rather small) overhead.
60
61 If unsure, say Y so we can help you better when problems
62 occur.
63endmenu 46endmenu
64 47
65config IWL4965 48config IWL4965
66 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" 49 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
67 depends on PCI && MAC80211 50 depends on PCI && MAC80211
68 select IWLWIFI_LEGACY 51 select IWLEGACY
69 ---help--- 52 ---help---
70 This option enables support for 53 This option enables support for
71 54
@@ -93,7 +76,7 @@ config IWL4965
93config IWL3945 76config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 77 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on PCI && MAC80211 78 depends on PCI && MAC80211
96 select IWLWIFI_LEGACY 79 select IWLEGACY
97 ---help--- 80 ---help---
98 Select to build the driver supporting the: 81 Select to build the driver supporting the:
99 82
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
index d56aeb38c211..c985a01a0731 100644
--- a/drivers/net/wireless/iwlegacy/Makefile
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -1,25 +1,17 @@
1obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o 1obj-$(CONFIG_IWLEGACY) += iwlegacy.o
2iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 2iwlegacy-objs := common.o
3iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o 3iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
4iwl-legacy-objs += iwl-scan.o iwl-led.o
5iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
6iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
7 4
8iwl-legacy-objs += $(iwl-legacy-m) 5iwlegacy-objs += $(iwlegacy-m)
9
10CFLAGS_iwl-devtrace.o := -I$(src)
11 6
12# 4965 7# 4965
13obj-$(CONFIG_IWL4965) += iwl4965.o 8obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o 9iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
15iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o 10iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
16iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
17iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
18iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
19 11
20# 3945 12# 3945
21obj-$(CONFIG_IWL3945) += iwl3945.o 13obj-$(CONFIG_IWL3945) += iwl3945.o
22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o 14iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
23iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o 15iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
24 16
25ccflags-y += -D__CHECK_ENDIAN__ 17ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 89904054473f..25dd7d28d022 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -60,100 +60,96 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68 63
69#ifndef __iwl_legacy_commands_h__ 64#ifndef __il_commands_h__
70#define __iwl_legacy_commands_h__ 65#define __il_commands_h__
71 66
72struct iwl_priv; 67#include <linux/ieee80211.h>
73 68
74/* uCode version contains 4 values: Major/Minor/API/Serial */ 69struct il_priv;
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79 70
71/* uCode version contains 4 values: Major/Minor/API/Serial */
72#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
73#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
74#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
75#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
80 76
81/* Tx rates */ 77/* Tx rates */
82#define IWL_CCK_RATES 4 78#define IL_CCK_RATES 4
83#define IWL_OFDM_RATES 8 79#define IL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES) 80#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES)
85 81
86enum { 82enum {
87 REPLY_ALIVE = 0x1, 83 N_ALIVE = 0x1,
88 REPLY_ERROR = 0x2, 84 N_ERROR = 0x2,
89 85
90 /* RXON and QOS commands */ 86 /* RXON and QOS commands */
91 REPLY_RXON = 0x10, 87 C_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11, 88 C_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13, 89 C_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14, 90 C_RXON_TIMING = 0x14,
95 91
96 /* Multi-Station support */ 92 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18, 93 C_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19, 94 C_REM_STA = 0x19,
99 95
100 /* Security */ 96 /* Security */
101 REPLY_WEPKEY = 0x20, 97 C_WEPKEY = 0x20,
102 98
103 /* RX, TX, LEDs */ 99 /* RX, TX, LEDs */
104 REPLY_3945_RX = 0x1b, /* 3945 only */ 100 N_3945_RX = 0x1b, /* 3945 only */
105 REPLY_TX = 0x1c, 101 C_TX = 0x1c,
106 REPLY_RATE_SCALE = 0x47, /* 3945 only */ 102 C_RATE_SCALE = 0x47, /* 3945 only */
107 REPLY_LEDS_CMD = 0x48, 103 C_LEDS = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ 104 C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */
109 105
110 /* 802.11h related */ 106 /* 802.11h related */
111 REPLY_CHANNEL_SWITCH = 0x72, 107 C_CHANNEL_SWITCH = 0x72,
112 CHANNEL_SWITCH_NOTIFICATION = 0x73, 108 N_CHANNEL_SWITCH = 0x73,
113 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, 109 C_SPECTRUM_MEASUREMENT = 0x74,
114 SPECTRUM_MEASURE_NOTIFICATION = 0x75, 110 N_SPECTRUM_MEASUREMENT = 0x75,
115 111
116 /* Power Management */ 112 /* Power Management */
117 POWER_TABLE_CMD = 0x77, 113 C_POWER_TBL = 0x77,
118 PM_SLEEP_NOTIFICATION = 0x7A, 114 N_PM_SLEEP = 0x7A,
119 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, 115 N_PM_DEBUG_STATS = 0x7B,
120 116
121 /* Scan commands and notifications */ 117 /* Scan commands and notifications */
122 REPLY_SCAN_CMD = 0x80, 118 C_SCAN = 0x80,
123 REPLY_SCAN_ABORT_CMD = 0x81, 119 C_SCAN_ABORT = 0x81,
124 SCAN_START_NOTIFICATION = 0x82, 120 N_SCAN_START = 0x82,
125 SCAN_RESULTS_NOTIFICATION = 0x83, 121 N_SCAN_RESULTS = 0x83,
126 SCAN_COMPLETE_NOTIFICATION = 0x84, 122 N_SCAN_COMPLETE = 0x84,
127 123
128 /* IBSS/AP commands */ 124 /* IBSS/AP commands */
129 BEACON_NOTIFICATION = 0x90, 125 N_BEACON = 0x90,
130 REPLY_TX_BEACON = 0x91, 126 C_TX_BEACON = 0x91,
131 127
132 /* Miscellaneous commands */ 128 /* Miscellaneous commands */
133 REPLY_TX_PWR_TABLE_CMD = 0x97, 129 C_TX_PWR_TBL = 0x97,
134 130
135 /* Bluetooth device coexistence config command */ 131 /* Bluetooth device coexistence config command */
136 REPLY_BT_CONFIG = 0x9b, 132 C_BT_CONFIG = 0x9b,
137 133
138 /* Statistics */ 134 /* Statistics */
139 REPLY_STATISTICS_CMD = 0x9c, 135 C_STATS = 0x9c,
140 STATISTICS_NOTIFICATION = 0x9d, 136 N_STATS = 0x9d,
141 137
142 /* RF-KILL commands and notifications */ 138 /* RF-KILL commands and notifications */
143 CARD_STATE_NOTIFICATION = 0xa1, 139 N_CARD_STATE = 0xa1,
144 140
145 /* Missed beacons notification */ 141 /* Missed beacons notification */
146 MISSED_BEACONS_NOTIFICATION = 0xa2, 142 N_MISSED_BEACONS = 0xa2,
147 143
148 REPLY_CT_KILL_CONFIG_CMD = 0xa4, 144 C_CT_KILL_CONFIG = 0xa4,
149 SENSITIVITY_CMD = 0xa8, 145 C_SENSITIVITY = 0xa8,
150 REPLY_PHY_CALIBRATION_CMD = 0xb0, 146 C_PHY_CALIBRATION = 0xb0,
151 REPLY_RX_PHY_CMD = 0xc0, 147 N_RX_PHY = 0xc0,
152 REPLY_RX_MPDU_CMD = 0xc1, 148 N_RX_MPDU = 0xc1,
153 REPLY_RX = 0xc3, 149 N_RX = 0xc3,
154 REPLY_COMPRESSED_BA = 0xc5, 150 N_COMPRESSED_BA = 0xc5,
155 151
156 REPLY_MAX = 0xff 152 IL_CN_MAX = 0xff
157}; 153};
158 154
159/****************************************************************************** 155/******************************************************************************
@@ -163,25 +159,25 @@ enum {
163 * 159 *
164 *****************************************************************************/ 160 *****************************************************************************/
165 161
166/* iwl_cmd_header flags value */ 162/* il_cmd_header flags value */
167#define IWL_CMD_FAILED_MSK 0x40 163#define IL_CMD_FAILED_MSK 0x40
168 164
169#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) 165#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
170#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 166#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
171#define SEQ_TO_INDEX(s) ((s) & 0xff) 167#define SEQ_TO_IDX(s) ((s) & 0xff)
172#define INDEX_TO_SEQ(i) ((i) & 0xff) 168#define IDX_TO_SEQ(i) ((i) & 0xff)
173#define SEQ_HUGE_FRAME cpu_to_le16(0x4000) 169#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
174#define SEQ_RX_FRAME cpu_to_le16(0x8000) 170#define SEQ_RX_FRAME cpu_to_le16(0x8000)
175 171
176/** 172/**
177 * struct iwl_cmd_header 173 * struct il_cmd_header
178 * 174 *
179 * This header format appears in the beginning of each command sent from the 175 * This header format appears in the beginning of each command sent from the
180 * driver, and each response/notification received from uCode. 176 * driver, and each response/notification received from uCode.
181 */ 177 */
182struct iwl_cmd_header { 178struct il_cmd_header {
183 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 179 u8 cmd; /* Command ID: C_RXON, etc. */
184 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ 180 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
185 /* 181 /*
186 * The driver sets up the sequence number to values of its choosing. 182 * The driver sets up the sequence number to values of its choosing.
187 * uCode does not use this value, but passes it back to the driver 183 * uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@ struct iwl_cmd_header {
192 * There is one exception: uCode sets bit 15 when it originates 188 * There is one exception: uCode sets bit 15 when it originates
193 * the response/notification, i.e. when the response/notification 189 * the response/notification, i.e. when the response/notification
194 * is not a direct response to a command sent by the driver. For 190 * is not a direct response to a command sent by the driver. For
195 * example, uCode issues REPLY_3945_RX when it sends a received frame 191 * example, uCode issues N_3945_RX when it sends a received frame
196 * to the driver; it is not a direct response to any driver command. 192 * to the driver; it is not a direct response to any driver command.
197 * 193 *
198 * The Linux driver uses the following format: 194 * The Linux driver uses the following format:
199 * 195 *
200 * 0:7 tfd index - position within TX queue 196 * 0:7 tfd idx - position within TX queue
201 * 8:12 TX queue id 197 * 8:12 TX queue id
202 * 13 reserved 198 * 13 reserved
203 * 14 huge - driver sets this to indicate command is in the 199 * 14 huge - driver sets this to indicate command is in the
204 * 'huge' storage at the end of the command buffers 200 * 'huge' storage at the end of the command buffers
205 * 15 unsolicited RX or uCode-originated notification 201 * 15 unsolicited RX or uCode-originated notification
206 */ 202 */
207 __le16 sequence; 203 __le16 sequence;
208 204
209 /* command or response/notification data follows immediately */ 205 /* command or response/notification data follows immediately */
210 u8 data[0]; 206 u8 data[0];
211} __packed; 207} __packed;
212 208
213
214/** 209/**
215 * struct iwl3945_tx_power 210 * struct il3945_tx_power
216 * 211 *
217 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH 212 * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
218 * 213 *
219 * Each entry contains two values: 214 * Each entry contains two values:
220 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained 215 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
@@ -223,21 +218,21 @@ struct iwl_cmd_header {
223 * 2) Radio gain. This sets the analog gain of the radio Tx path. 218 * 2) Radio gain. This sets the analog gain of the radio Tx path.
224 * It is a coarser setting, and behaves in a logarithmic (dB) fashion. 219 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
225 * 220 *
226 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][]. 221 * Driver obtains values from struct il3945_tx_power power_gain_table[][].
227 */ 222 */
228struct iwl3945_tx_power { 223struct il3945_tx_power {
229 u8 tx_gain; /* gain for analog radio */ 224 u8 tx_gain; /* gain for analog radio */
230 u8 dsp_atten; /* gain for DSP */ 225 u8 dsp_atten; /* gain for DSP */
231} __packed; 226} __packed;
232 227
233/** 228/**
234 * struct iwl3945_power_per_rate 229 * struct il3945_power_per_rate
235 * 230 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 231 * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
237 */ 232 */
238struct iwl3945_power_per_rate { 233struct il3945_power_per_rate {
239 u8 rate; /* plcp */ 234 u8 rate; /* plcp */
240 struct iwl3945_tx_power tpc; 235 struct il3945_tx_power tpc;
241 u8 reserved; 236 u8 reserved;
242} __packed; 237} __packed;
243 238
@@ -245,10 +240,10 @@ struct iwl3945_power_per_rate {
245 * iwl4965 rate_n_flags bit fields 240 * iwl4965 rate_n_flags bit fields
246 * 241 *
247 * rate_n_flags format is used in following iwl4965 commands: 242 * rate_n_flags format is used in following iwl4965 commands:
248 * REPLY_RX (response only) 243 * N_RX (response only)
249 * REPLY_RX_MPDU (response only) 244 * N_RX_MPDU (response only)
250 * REPLY_TX (both command and response) 245 * C_TX (both command and response)
251 * REPLY_TX_LINK_QUALITY_CMD 246 * C_TX_LINK_QUALITY_CMD
252 * 247 *
253 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): 248 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
254 * 2-0: 0) 6 Mbps 249 * 2-0: 0) 6 Mbps
@@ -326,17 +321,17 @@ struct iwl3945_power_per_rate {
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) 321#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3 322#define RATE_ANT_NUM 3
328 323
329#define POWER_TABLE_NUM_ENTRIES 33 324#define POWER_TBL_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 325#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32 326#define POWER_TBL_CCK_ENTRY 32
332 327
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 328#define IL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2 329#define IL_PWR_CCK_ENTRIES 2
335 330
336/** 331/**
337 * union iwl4965_tx_power_dual_stream 332 * union il4965_tx_power_dual_stream
338 * 333 *
339 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 334 * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
340 * Use __le32 version (struct tx_power_dual_stream) when building command. 335 * Use __le32 version (struct tx_power_dual_stream) when building command.
341 * 336 *
342 * Driver provides radio gain and DSP attenuation settings to device in pairs, 337 * Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@ struct iwl3945_power_per_rate {
347 * For MIMO rates, one value may be different from the other, 342 * For MIMO rates, one value may be different from the other,
348 * in order to balance the Tx output between the two transmitters. 343 * in order to balance the Tx output between the two transmitters.
349 * 344 *
350 * See more details in doc for TXPOWER in iwl-4965-hw.h. 345 * See more details in doc for TXPOWER in 4965.h.
351 */ 346 */
352union iwl4965_tx_power_dual_stream { 347union il4965_tx_power_dual_stream {
353 struct { 348 struct {
354 u8 radio_tx_gain[2]; 349 u8 radio_tx_gain[2];
355 u8 dsp_predis_atten[2]; 350 u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream {
360/** 355/**
361 * struct tx_power_dual_stream 356 * struct tx_power_dual_stream
362 * 357 *
363 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 358 * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
364 * 359 *
365 * Same format as iwl_tx_power_dual_stream, but __le32 360 * Same format as il_tx_power_dual_stream, but __le32
366 */ 361 */
367struct tx_power_dual_stream { 362struct tx_power_dual_stream {
368 __le32 dw; 363 __le32 dw;
369} __packed; 364} __packed;
370 365
371/** 366/**
372 * struct iwl4965_tx_power_db 367 * struct il4965_tx_power_db
373 * 368 *
374 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 369 * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
375 */ 370 */
376struct iwl4965_tx_power_db { 371struct il4965_tx_power_db {
377 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; 372 struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
378} __packed; 373} __packed;
379 374
380/****************************************************************************** 375/******************************************************************************
@@ -387,7 +382,7 @@ struct iwl4965_tx_power_db {
387#define INITIALIZE_SUBTYPE (9) 382#define INITIALIZE_SUBTYPE (9)
388 383
389/* 384/*
390 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) 385 * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
391 * 386 *
392 * uCode issues this "initialize alive" notification once the initialization 387 * uCode issues this "initialize alive" notification once the initialization
393 * uCode image has completed its work, and is ready to load the runtime image. 388 * uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@ struct iwl4965_tx_power_db {
410 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, 405 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
411 * for each of 5 frequency ranges. 406 * for each of 5 frequency ranges.
412 */ 407 */
413struct iwl_init_alive_resp { 408struct il_init_alive_resp {
414 u8 ucode_minor; 409 u8 ucode_minor;
415 u8 ucode_major; 410 u8 ucode_major;
416 __le16 reserved1; 411 __le16 reserved1;
@@ -433,9 +428,8 @@ struct iwl_init_alive_resp {
433 * 2 Tx chains */ 428 * 2 Tx chains */
434} __packed; 429} __packed;
435 430
436
437/** 431/**
438 * REPLY_ALIVE = 0x1 (response only, not a command) 432 * N_ALIVE = 0x1 (response only, not a command)
439 * 433 *
440 * uCode issues this "alive" notification once the runtime image is ready 434 * uCode issues this "alive" notification once the runtime image is ready
441 * to receive commands from the driver. This is the *second* "alive" 435 * to receive commands from the driver. This is the *second* "alive"
@@ -454,7 +448,7 @@ struct iwl_init_alive_resp {
454 * __le32 log_size; log capacity (in number of entries) 448 * __le32 log_size; log capacity (in number of entries)
455 * __le32 type; (1) timestamp with each entry, (0) no timestamp 449 * __le32 type; (1) timestamp with each entry, (0) no timestamp
456 * __le32 wraps; # times uCode has wrapped to top of circular buffer 450 * __le32 wraps; # times uCode has wrapped to top of circular buffer
457 * __le32 write_index; next circular buffer entry that uCode would fill 451 * __le32 write_idx; next circular buffer entry that uCode would fill
458 * 452 *
459 * The header is followed by the circular buffer of log entries. Entries 453 * The header is followed by the circular buffer of log entries. Entries
460 * with timestamps have the following format: 454 * with timestamps have the following format:
@@ -511,13 +505,13 @@ struct iwl_init_alive_resp {
511 * The Linux driver can print both logs to the system log when a uCode error 505 * The Linux driver can print both logs to the system log when a uCode error
512 * occurs. 506 * occurs.
513 */ 507 */
514struct iwl_alive_resp { 508struct il_alive_resp {
515 u8 ucode_minor; 509 u8 ucode_minor;
516 u8 ucode_major; 510 u8 ucode_major;
517 __le16 reserved1; 511 __le16 reserved1;
518 u8 sw_rev[8]; 512 u8 sw_rev[8];
519 u8 ver_type; 513 u8 ver_type;
520 u8 ver_subtype; /* not "9" for runtime alive */ 514 u8 ver_subtype; /* not "9" for runtime alive */
521 __le16 reserved2; 515 __le16 reserved2;
522 __le32 log_event_table_ptr; /* SRAM address for event log */ 516 __le32 log_event_table_ptr; /* SRAM address for event log */
523 __le32 error_event_table_ptr; /* SRAM address for error log */ 517 __le32 error_event_table_ptr; /* SRAM address for error log */
@@ -526,9 +520,9 @@ struct iwl_alive_resp {
526} __packed; 520} __packed;
527 521
528/* 522/*
529 * REPLY_ERROR = 0x2 (response only, not a command) 523 * N_ERROR = 0x2 (response only, not a command)
530 */ 524 */
531struct iwl_error_resp { 525struct il_error_resp {
532 __le32 error_type; 526 __le32 error_type;
533 u8 cmd_id; 527 u8 cmd_id;
534 u8 reserved1; 528 u8 reserved1;
@@ -554,7 +548,6 @@ enum {
554 RXON_DEV_TYPE_SNIFFER = 6, 548 RXON_DEV_TYPE_SNIFFER = 6,
555}; 549};
556 550
557
558#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) 551#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
559#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) 552#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
560#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) 553#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@ enum {
593* (according to ON_AIR deassertion) */ 586* (according to ON_AIR deassertion) */
594#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) 587#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
595 588
596
597/* HT flags */ 589/* HT flags */
598#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) 590#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
599#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) 591#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@ enum {
640#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) 632#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
641 633
642/** 634/**
643 * REPLY_RXON = 0x10 (command, has simple generic response) 635 * C_RXON = 0x10 (command, has simple generic response)
644 * 636 *
645 * RXON tunes the radio tuner to a service channel, and sets up a number 637 * RXON tunes the radio tuner to a service channel, and sets up a number
646 * of parameters that are used primarily for Rx, but also for Tx operations. 638 * of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@ enum {
653 * channel. 645 * channel.
654 * 646 *
655 * NOTE: All RXONs wipe clean the internal txpower table. Driver must 647 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
656 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), 648 * issue a new C_TX_PWR_TBL after each C_RXON (0x10),
657 * regardless of whether RXON_FILTER_ASSOC_MSK is set. 649 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
658 */ 650 */
659 651
660struct iwl3945_rxon_cmd { 652struct il3945_rxon_cmd {
661 u8 node_addr[6]; 653 u8 node_addr[6];
662 __le16 reserved1; 654 __le16 reserved1;
663 u8 bssid_addr[6]; 655 u8 bssid_addr[6];
@@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd {
676 __le16 reserved5; 668 __le16 reserved5;
677} __packed; 669} __packed;
678 670
679struct iwl4965_rxon_cmd { 671struct il4965_rxon_cmd {
680 u8 node_addr[6]; 672 u8 node_addr[6];
681 __le16 reserved1; 673 __le16 reserved1;
682 u8 bssid_addr[6]; 674 u8 bssid_addr[6];
@@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd {
699/* Create a common rxon cmd which will be typecast into the 3945 or 4965 691/* Create a common rxon cmd which will be typecast into the 3945 or 4965
700 * specific rxon cmd, depending on where it is called from. 692 * specific rxon cmd, depending on where it is called from.
701 */ 693 */
702struct iwl_legacy_rxon_cmd { 694struct il_rxon_cmd {
703 u8 node_addr[6]; 695 u8 node_addr[6];
704 __le16 reserved1; 696 __le16 reserved1;
705 u8 bssid_addr[6]; 697 u8 bssid_addr[6];
@@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd {
721 u8 reserved5; 713 u8 reserved5;
722} __packed; 714} __packed;
723 715
724
725/* 716/*
726 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 717 * C_RXON_ASSOC = 0x11 (command, has simple generic response)
727 */ 718 */
728struct iwl3945_rxon_assoc_cmd { 719struct il3945_rxon_assoc_cmd {
729 __le32 flags; 720 __le32 flags;
730 __le32 filter_flags; 721 __le32 filter_flags;
731 u8 ofdm_basic_rates; 722 u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd {
733 __le16 reserved; 724 __le16 reserved;
734} __packed; 725} __packed;
735 726
736struct iwl4965_rxon_assoc_cmd { 727struct il4965_rxon_assoc_cmd {
737 __le32 flags; 728 __le32 flags;
738 __le32 filter_flags; 729 __le32 filter_flags;
739 u8 ofdm_basic_rates; 730 u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd {
744 __le16 reserved; 735 __le16 reserved;
745} __packed; 736} __packed;
746 737
747#define IWL_CONN_MAX_LISTEN_INTERVAL 10 738#define IL_CONN_MAX_LISTEN_INTERVAL 10
748#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ 739#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
749#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ 740#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
750 741
751/* 742/*
752 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 743 * C_RXON_TIMING = 0x14 (command, has simple generic response)
753 */ 744 */
754struct iwl_rxon_time_cmd { 745struct il_rxon_time_cmd {
755 __le64 timestamp; 746 __le64 timestamp;
756 __le16 beacon_interval; 747 __le16 beacon_interval;
757 __le16 atim_window; 748 __le16 atim_win;
758 __le32 beacon_init_val; 749 __le32 beacon_init_val;
759 __le16 listen_interval; 750 __le16 listen_interval;
760 u8 dtim_period; 751 u8 dtim_period;
@@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd {
762} __packed; 753} __packed;
763 754
764/* 755/*
765 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) 756 * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
766 */ 757 */
767struct iwl3945_channel_switch_cmd { 758struct il3945_channel_switch_cmd {
768 u8 band; 759 u8 band;
769 u8 expect_beacon; 760 u8 expect_beacon;
770 __le16 channel; 761 __le16 channel;
771 __le32 rxon_flags; 762 __le32 rxon_flags;
772 __le32 rxon_filter_flags; 763 __le32 rxon_filter_flags;
773 __le32 switch_time; 764 __le32 switch_time;
774 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 765 struct il3945_power_per_rate power[IL_MAX_RATES];
775} __packed; 766} __packed;
776 767
777struct iwl4965_channel_switch_cmd { 768struct il4965_channel_switch_cmd {
778 u8 band; 769 u8 band;
779 u8 expect_beacon; 770 u8 expect_beacon;
780 __le16 channel; 771 __le16 channel;
781 __le32 rxon_flags; 772 __le32 rxon_flags;
782 __le32 rxon_filter_flags; 773 __le32 rxon_filter_flags;
783 __le32 switch_time; 774 __le32 switch_time;
784 struct iwl4965_tx_power_db tx_power; 775 struct il4965_tx_power_db tx_power;
785} __packed; 776} __packed;
786 777
787/* 778/*
788 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 779 * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
789 */ 780 */
790struct iwl_csa_notification { 781struct il_csa_notification {
791 __le16 band; 782 __le16 band;
792 __le16 channel; 783 __le16 channel;
793 __le32 status; /* 0 - OK, 1 - fail */ 784 __le32 status; /* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@ struct iwl_csa_notification {
800 *****************************************************************************/ 791 *****************************************************************************/
801 792
802/** 793/**
803 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM 794 * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
804 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd 795 * One for each of 4 EDCA access categories in struct il_qosparam_cmd
805 * 796 *
806 * @cw_min: Contention window, start value in numbers of slots. 797 * @cw_min: Contention win, start value in numbers of slots.
807 * Should be a power-of-2, minus 1. Device's default is 0x0f. 798 * Should be a power-of-2, minus 1. Device's default is 0x0f.
808 * @cw_max: Contention window, max value in numbers of slots. 799 * @cw_max: Contention win, max value in numbers of slots.
809 * Should be a power-of-2, minus 1. Device's default is 0x3f. 800 * Should be a power-of-2, minus 1. Device's default is 0x3f.
810 * @aifsn: Number of slots in Arbitration Interframe Space (before 801 * @aifsn: Number of slots in Arbitration Interframe Space (before
811 * performing random backoff timing prior to Tx). Device default 1. 802 * performing random backoff timing prior to Tx). Device default 1.
812 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. 803 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
813 * 804 *
814 * Device will automatically increase contention window by (2*CW) + 1 for each 805 * Device will automatically increase contention win by (2*CW) + 1 for each
815 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW 806 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
816 * value, to cap the CW value. 807 * value, to cap the CW value.
817 */ 808 */
818struct iwl_ac_qos { 809struct il_ac_qos {
819 __le16 cw_min; 810 __le16 cw_min;
820 __le16 cw_max; 811 __le16 cw_max;
821 u8 aifsn; 812 u8 aifsn;
@@ -832,14 +823,14 @@ struct iwl_ac_qos {
832#define AC_NUM 4 823#define AC_NUM 4
833 824
834/* 825/*
835 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) 826 * C_QOS_PARAM = 0x13 (command, has simple generic response)
836 * 827 *
837 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs 828 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
838 * 0: Background, 1: Best Effort, 2: Video, 3: Voice. 829 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
839 */ 830 */
840struct iwl_qosparam_cmd { 831struct il_qosparam_cmd {
841 __le32 qos_flags; 832 __le32 qos_flags;
842 struct iwl_ac_qos ac[AC_NUM]; 833 struct il_ac_qos ac[AC_NUM];
843} __packed; 834} __packed;
844 835
845/****************************************************************************** 836/******************************************************************************
@@ -852,15 +843,15 @@ struct iwl_qosparam_cmd {
852 */ 843 */
853 844
854/* Special, dedicated locations within device's station table */ 845/* Special, dedicated locations within device's station table */
855#define IWL_AP_ID 0 846#define IL_AP_ID 0
856#define IWL_STA_ID 2 847#define IL_STA_ID 2
857#define IWL3945_BROADCAST_ID 24 848#define IL3945_BROADCAST_ID 24
858#define IWL3945_STATION_COUNT 25 849#define IL3945_STATION_COUNT 25
859#define IWL4965_BROADCAST_ID 31 850#define IL4965_BROADCAST_ID 31
860#define IWL4965_STATION_COUNT 32 851#define IL4965_STATION_COUNT 32
861 852
862#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 853#define IL_STATION_COUNT 32 /* MAX(3945,4965) */
863#define IWL_INVALID_STATION 255 854#define IL_INVALID_STATION 255
864 855
865#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 856#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
866#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) 857#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@ struct iwl_qosparam_cmd {
901#define STA_MODIFY_DELBA_TID_MSK 0x10 892#define STA_MODIFY_DELBA_TID_MSK 0x10
902#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 893#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
903 894
904/* Receiver address (actually, Rx station's index into station table), 895/* Receiver address (actually, Rx station's idx into station table),
905 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 896 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
906#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 897#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
907 898
908struct iwl4965_keyinfo { 899struct il4965_keyinfo {
909 __le16 key_flags; 900 __le16 key_flags;
910 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ 901 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
911 u8 reserved1; 902 u8 reserved1;
@@ -918,12 +909,12 @@ struct iwl4965_keyinfo {
918/** 909/**
919 * struct sta_id_modify 910 * struct sta_id_modify
920 * @addr[ETH_ALEN]: station's MAC address 911 * @addr[ETH_ALEN]: station's MAC address
921 * @sta_id: index of station in uCode's station table 912 * @sta_id: idx of station in uCode's station table
922 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change 913 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
923 * 914 *
924 * Driver selects unused table index when adding new station, 915 * Driver selects unused table idx when adding new station,
925 * or the index to a pre-existing station entry when modifying that station. 916 * or the idx to a pre-existing station entry when modifying that station.
926 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). 917 * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
927 * 918 *
928 * modify_mask flags select which parameters to modify vs. leave alone. 919 * modify_mask flags select which parameters to modify vs. leave alone.
929 */ 920 */
@@ -936,15 +927,15 @@ struct sta_id_modify {
936} __packed; 927} __packed;
937 928
938/* 929/*
939 * REPLY_ADD_STA = 0x18 (command) 930 * C_ADD_STA = 0x18 (command)
940 * 931 *
941 * The device contains an internal table of per-station information, 932 * The device contains an internal table of per-station information,
942 * with info on security keys, aggregation parameters, and Tx rates for 933 * with info on security keys, aggregation parameters, and Tx rates for
943 * initial Tx attempt and any retries (4965 devices uses 934 * initial Tx attempt and any retries (4965 devices uses
944 * REPLY_TX_LINK_QUALITY_CMD, 935 * C_TX_LINK_QUALITY_CMD,
945 * 3945 uses REPLY_RATE_SCALE to set up rate tables). 936 * 3945 uses C_RATE_SCALE to set up rate tables).
946 * 937 *
947 * REPLY_ADD_STA sets up the table entry for one station, either creating 938 * C_ADD_STA sets up the table entry for one station, either creating
948 * a new entry, or modifying a pre-existing one. 939 * a new entry, or modifying a pre-existing one.
949 * 940 *
950 * NOTE: RXON command (without "associated" bit set) wipes the station table 941 * NOTE: RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@ struct sta_id_modify {
954 * their own txpower/rate setup data). 945 * their own txpower/rate setup data).
955 * 946 *
956 * When getting started on a new channel, driver must set up the 947 * When getting started on a new channel, driver must set up the
957 * IWL_BROADCAST_ID entry (last entry in the table). For a client 948 * IL_BROADCAST_ID entry (last entry in the table). For a client
958 * station in a BSS, once an AP is selected, driver sets up the AP STA 949 * station in a BSS, once an AP is selected, driver sets up the AP STA
959 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP 950 * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP
960 * are all that are needed for a BSS client station. If the device is 951 * are all that are needed for a BSS client station. If the device is
961 * used as AP, or in an IBSS network, driver must set up station table 952 * used as AP, or in an IBSS network, driver must set up station table
962 * entries for all STAs in network, starting with index IWL_STA_ID. 953 * entries for all STAs in network, starting with idx IL_STA_ID.
963 */ 954 */
964 955
965struct iwl3945_addsta_cmd { 956struct il3945_addsta_cmd {
966 u8 mode; /* 1: modify existing, 0: add new station */ 957 u8 mode; /* 1: modify existing, 0: add new station */
967 u8 reserved[3]; 958 u8 reserved[3];
968 struct sta_id_modify sta; 959 struct sta_id_modify sta;
969 struct iwl4965_keyinfo key; 960 struct il4965_keyinfo key;
970 __le32 station_flags; /* STA_FLG_* */ 961 __le32 station_flags; /* STA_FLG_* */
971 __le32 station_flags_msk; /* STA_FLG_* */ 962 __le32 station_flags_msk; /* STA_FLG_* */
972 963
973 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 964 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd {
990 __le16 add_immediate_ba_ssn; 981 __le16 add_immediate_ba_ssn;
991} __packed; 982} __packed;
992 983
993struct iwl4965_addsta_cmd { 984struct il4965_addsta_cmd {
994 u8 mode; /* 1: modify existing, 0: add new station */ 985 u8 mode; /* 1: modify existing, 0: add new station */
995 u8 reserved[3]; 986 u8 reserved[3];
996 struct sta_id_modify sta; 987 struct sta_id_modify sta;
997 struct iwl4965_keyinfo key; 988 struct il4965_keyinfo key;
998 __le32 station_flags; /* STA_FLG_* */ 989 __le32 station_flags; /* STA_FLG_* */
999 __le32 station_flags_msk; /* STA_FLG_* */ 990 __le32 station_flags_msk; /* STA_FLG_* */
1000 991
1001 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 992 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd {
1003 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ 994 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1004 __le16 tid_disable_tx; 995 __le16 tid_disable_tx;
1005 996
1006 __le16 reserved1; 997 __le16 reserved1;
1007 998
1008 /* TID for which to add block-ack support. 999 /* TID for which to add block-ack support.
1009 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1000 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd {
1028} __packed; 1019} __packed;
1029 1020
1030/* Wrapper struct for 3945 and 4965 addsta_cmd structures */ 1021/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1031struct iwl_legacy_addsta_cmd { 1022struct il_addsta_cmd {
1032 u8 mode; /* 1: modify existing, 0: add new station */ 1023 u8 mode; /* 1: modify existing, 0: add new station */
1033 u8 reserved[3]; 1024 u8 reserved[3];
1034 struct sta_id_modify sta; 1025 struct sta_id_modify sta;
1035 struct iwl4965_keyinfo key; 1026 struct il4965_keyinfo key;
1036 __le32 station_flags; /* STA_FLG_* */ 1027 __le32 station_flags; /* STA_FLG_* */
1037 __le32 station_flags_msk; /* STA_FLG_* */ 1028 __le32 station_flags_msk; /* STA_FLG_* */
1038 1029
1039 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 1030 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd {
1041 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ 1032 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1042 __le16 tid_disable_tx; 1033 __le16 tid_disable_tx;
1043 1034
1044 __le16 rate_n_flags; /* 3945 only */ 1035 __le16 rate_n_flags; /* 3945 only */
1045 1036
1046 /* TID for which to add block-ack support. 1037 /* TID for which to add block-ack support.
1047 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1038 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd {
1065 __le16 reserved2; 1056 __le16 reserved2;
1066} __packed; 1057} __packed;
1067 1058
1068
1069#define ADD_STA_SUCCESS_MSK 0x1 1059#define ADD_STA_SUCCESS_MSK 0x1
1070#define ADD_STA_NO_ROOM_IN_TABLE 0x2 1060#define ADD_STA_NO_ROOM_IN_TBL 0x2
1071#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 1061#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1072#define ADD_STA_MODIFY_NON_EXIST_STA 0x8 1062#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1073/* 1063/*
1074 * REPLY_ADD_STA = 0x18 (response) 1064 * C_ADD_STA = 0x18 (response)
1075 */ 1065 */
1076struct iwl_add_sta_resp { 1066struct il_add_sta_resp {
1077 u8 status; /* ADD_STA_* */ 1067 u8 status; /* ADD_STA_* */
1078} __packed; 1068} __packed;
1079 1069
1080#define REM_STA_SUCCESS_MSK 0x1 1070#define REM_STA_SUCCESS_MSK 0x1
1081/* 1071/*
1082 * REPLY_REM_STA = 0x19 (response) 1072 * C_REM_STA = 0x19 (response)
1083 */ 1073 */
1084struct iwl_rem_sta_resp { 1074struct il_rem_sta_resp {
1085 u8 status; 1075 u8 status;
1086} __packed; 1076} __packed;
1087 1077
1088/* 1078/*
1089 * REPLY_REM_STA = 0x19 (command) 1079 * C_REM_STA = 0x19 (command)
1090 */ 1080 */
1091struct iwl_rem_sta_cmd { 1081struct il_rem_sta_cmd {
1092 u8 num_sta; /* number of removed stations */ 1082 u8 num_sta; /* number of removed stations */
1093 u8 reserved[3]; 1083 u8 reserved[3];
1094 u8 addr[ETH_ALEN]; /* MAC addr of the first station */ 1084 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1095 u8 reserved2[2]; 1085 u8 reserved2[2];
1096} __packed; 1086} __packed;
1097 1087
1098#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) 1088#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1099#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) 1089#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1100#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) 1090#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1101#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) 1091#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1102#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) 1092#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1103 1093
1104#define IWL_DROP_SINGLE 0 1094#define IL_DROP_SINGLE 0
1105#define IWL_DROP_SELECTED 1 1095#define IL_DROP_SELECTED 1
1106#define IWL_DROP_ALL 2 1096#define IL_DROP_ALL 2
1107 1097
1108/* 1098/*
1109 * REPLY_WEP_KEY = 0x20 1099 * REPLY_WEP_KEY = 0x20
1110 */ 1100 */
1111struct iwl_wep_key { 1101struct il_wep_key {
1112 u8 key_index; 1102 u8 key_idx;
1113 u8 key_offset; 1103 u8 key_offset;
1114 u8 reserved1[2]; 1104 u8 reserved1[2];
1115 u8 key_size; 1105 u8 key_size;
@@ -1117,12 +1107,12 @@ struct iwl_wep_key {
1117 u8 key[16]; 1107 u8 key[16];
1118} __packed; 1108} __packed;
1119 1109
1120struct iwl_wep_cmd { 1110struct il_wep_cmd {
1121 u8 num_keys; 1111 u8 num_keys;
1122 u8 global_key_type; 1112 u8 global_key_type;
1123 u8 flags; 1113 u8 flags;
1124 u8 reserved; 1114 u8 reserved;
1125 struct iwl_wep_key key[0]; 1115 struct il_wep_key key[0];
1126} __packed; 1116} __packed;
1127 1117
1128#define WEP_KEY_WEP_TYPE 1 1118#define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@ struct iwl_wep_cmd {
1168#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) 1158#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1169#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) 1159#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1170 1160
1171 1161struct il3945_rx_frame_stats {
1172struct iwl3945_rx_frame_stats {
1173 u8 phy_count; 1162 u8 phy_count;
1174 u8 id; 1163 u8 id;
1175 u8 rssi; 1164 u8 rssi;
@@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats {
1179 u8 payload[0]; 1168 u8 payload[0];
1180} __packed; 1169} __packed;
1181 1170
1182struct iwl3945_rx_frame_hdr { 1171struct il3945_rx_frame_hdr {
1183 __le16 channel; 1172 __le16 channel;
1184 __le16 phy_flags; 1173 __le16 phy_flags;
1185 u8 reserved1; 1174 u8 reserved1;
@@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr {
1188 u8 payload[0]; 1177 u8 payload[0];
1189} __packed; 1178} __packed;
1190 1179
1191struct iwl3945_rx_frame_end { 1180struct il3945_rx_frame_end {
1192 __le32 status; 1181 __le32 status;
1193 __le64 timestamp; 1182 __le64 timestamp;
1194 __le32 beacon_timestamp; 1183 __le32 beacon_timestamp;
1195} __packed; 1184} __packed;
1196 1185
1197/* 1186/*
1198 * REPLY_3945_RX = 0x1b (response only, not a command) 1187 * N_3945_RX = 0x1b (response only, not a command)
1199 * 1188 *
1200 * NOTE: DO NOT dereference from casts to this structure 1189 * NOTE: DO NOT dereference from casts to this structure
1201 * It is provided only for calculating minimum data set size. 1190 * It is provided only for calculating minimum data set size.
1202 * The actual offsets of the hdr and end are dynamic based on 1191 * The actual offsets of the hdr and end are dynamic based on
1203 * stats.phy_count 1192 * stats.phy_count
1204 */ 1193 */
1205struct iwl3945_rx_frame { 1194struct il3945_rx_frame {
1206 struct iwl3945_rx_frame_stats stats; 1195 struct il3945_rx_frame_stats stats;
1207 struct iwl3945_rx_frame_hdr hdr; 1196 struct il3945_rx_frame_hdr hdr;
1208 struct iwl3945_rx_frame_end end; 1197 struct il3945_rx_frame_end end;
1209} __packed; 1198} __packed;
1210 1199
1211#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) 1200#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame))
1212 1201
1213/* Fixed (non-configurable) rx data from phy */ 1202/* Fixed (non-configurable) rx data from phy */
1214 1203
1215#define IWL49_RX_RES_PHY_CNT 14 1204#define IL49_RX_RES_PHY_CNT 14
1216#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) 1205#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1217#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) 1206#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1218#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ 1207#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1219#define IWL49_AGC_DB_POS (7) 1208#define IL49_AGC_DB_POS (7)
1220struct iwl4965_rx_non_cfg_phy { 1209struct il4965_rx_non_cfg_phy {
1221 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ 1210 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1222 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ 1211 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1223 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ 1212 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1224 u8 pad[0]; 1213 u8 pad[0];
1225} __packed; 1214} __packed;
1226 1215
1227
1228/* 1216/*
1229 * REPLY_RX = 0xc3 (response only, not a command) 1217 * N_RX = 0xc3 (response only, not a command)
1230 * Used only for legacy (non 11n) frames. 1218 * Used only for legacy (non 11n) frames.
1231 */ 1219 */
1232struct iwl_rx_phy_res { 1220struct il_rx_phy_res {
1233 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ 1221 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1234 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ 1222 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1235 u8 stat_id; /* configurable DSP phy data set ID */ 1223 u8 stat_id; /* configurable DSP phy data set ID */
1236 u8 reserved1; 1224 u8 reserved1;
1237 __le64 timestamp; /* TSF at on air rise */ 1225 __le64 timestamp; /* TSF at on air rise */
1238 __le32 beacon_time_stamp; /* beacon at on-air rise */ 1226 __le32 beacon_time_stamp; /* beacon at on-air rise */
1239 __le16 phy_flags; /* general phy flags: band, modulation, ... */ 1227 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1240 __le16 channel; /* channel number */ 1228 __le16 channel; /* channel number */
1241 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ 1229 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1242 __le32 rate_n_flags; /* RATE_MCS_* */ 1230 __le32 rate_n_flags; /* RATE_MCS_* */
1243 __le16 byte_count; /* frame's byte-count */ 1231 __le16 byte_count; /* frame's byte-count */
1244 __le16 frame_time; /* frame's time on the air */ 1232 __le16 frame_time; /* frame's time on the air */
1245} __packed; 1233} __packed;
1246 1234
1247struct iwl_rx_mpdu_res_start { 1235struct il_rx_mpdu_res_start {
1248 __le16 byte_count; 1236 __le16 byte_count;
1249 __le16 reserved; 1237 __le16 reserved;
1250} __packed; 1238} __packed;
1251 1239
1252
1253/****************************************************************************** 1240/******************************************************************************
1254 * (5) 1241 * (5)
1255 * Tx Commands & Responses: 1242 * Tx Commands & Responses:
1256 * 1243 *
1257 * Driver must place each REPLY_TX command into one of the prioritized Tx 1244 * Driver must place each C_TX command into one of the prioritized Tx
1258 * queues in host DRAM, shared between driver and device (see comments for 1245 * queues in host DRAM, shared between driver and device (see comments for
1259 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode 1246 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1260 * are preparing to transmit, the device pulls the Tx command over the PCI 1247 * are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start {
1264 * uCode handles all timing and protocol related to control frames 1251 * uCode handles all timing and protocol related to control frames
1265 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler 1252 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1266 * handle reception of block-acks; uCode updates the host driver via 1253 * handle reception of block-acks; uCode updates the host driver via
1267 * REPLY_COMPRESSED_BA. 1254 * N_COMPRESSED_BA.
1268 * 1255 *
1269 * uCode handles retrying Tx when an ACK is expected but not received. 1256 * uCode handles retrying Tx when an ACK is expected but not received.
1270 * This includes trying lower data rates than the one requested in the Tx 1257 * This includes trying lower data rates than the one requested in the Tx
1271 * command, as set up by the REPLY_RATE_SCALE (for 3945) or 1258 * command, as set up by the C_RATE_SCALE (for 3945) or
1272 * REPLY_TX_LINK_QUALITY_CMD (4965). 1259 * C_TX_LINK_QUALITY_CMD (4965).
1273 * 1260 *
1274 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. 1261 * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
1275 * This command must be executed after every RXON command, before Tx can occur. 1262 * This command must be executed after every RXON command, before Tx can occur.
1276 *****************************************************************************/ 1263 *****************************************************************************/
1277 1264
1278/* REPLY_TX Tx flags field */ 1265/* C_TX Tx flags field */
1279 1266
1280/* 1267/*
1281 * 1: Use Request-To-Send protocol before this frame. 1268 * 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start {
1296#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) 1283#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1297 1284
1298/* For 4965 devices: 1285/* For 4965 devices:
1299 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). 1286 * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
1300 * Tx command's initial_rate_index indicates first rate to try; 1287 * Tx command's initial_rate_idx indicates first rate to try;
1301 * uCode walks through table for additional Tx attempts. 1288 * uCode walks through table for additional Tx attempts.
1302 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. 1289 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1303 * This rate will be used for all Tx attempts; it will not be scaled. */ 1290 * This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start {
1322/* 1: uCode overrides sequence control field in MAC header. 1309/* 1: uCode overrides sequence control field in MAC header.
1323 * 0: Driver provides sequence control field in MAC header. 1310 * 0: Driver provides sequence control field in MAC header.
1324 * Set this for management frames, non-QOS data frames, non-unicast frames, 1311 * Set this for management frames, non-QOS data frames, non-unicast frames,
1325 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ 1312 * and also in Tx command embedded in C_SCAN for active scans. */
1326#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) 1313#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1327 1314
1328/* 1: This frame is non-last MPDU; more fragments are coming. 1315/* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start {
1349/* HCCA-AP - disable duration overwriting. */ 1336/* HCCA-AP - disable duration overwriting. */
1350#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) 1337#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1351 1338
1352
1353/* 1339/*
1354 * TX command security control 1340 * TX command security control
1355 */ 1341 */
@@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start {
1369#define TKIP_ICV_LEN 4 1355#define TKIP_ICV_LEN 4
1370 1356
1371/* 1357/*
1372 * REPLY_TX = 0x1c (command) 1358 * C_TX = 0x1c (command)
1373 */ 1359 */
1374 1360
1375struct iwl3945_tx_cmd { 1361struct il3945_tx_cmd {
1376 /* 1362 /*
1377 * MPDU byte count: 1363 * MPDU byte count:
1378 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1364 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd {
1434} __packed; 1420} __packed;
1435 1421
1436/* 1422/*
1437 * REPLY_TX = 0x1c (response) 1423 * C_TX = 0x1c (response)
1438 */ 1424 */
1439struct iwl3945_tx_resp { 1425struct il3945_tx_resp {
1440 u8 failure_rts; 1426 u8 failure_rts;
1441 u8 failure_frame; 1427 u8 failure_frame;
1442 u8 bt_kill_count; 1428 u8 bt_kill_count;
@@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp {
1445 __le32 status; /* TX status */ 1431 __le32 status; /* TX status */
1446} __packed; 1432} __packed;
1447 1433
1448
1449/* 1434/*
1450 * 4965 uCode updates these Tx attempt count values in host DRAM. 1435 * 4965 uCode updates these Tx attempt count values in host DRAM.
1451 * Used for managing Tx retries when expecting block-acks. 1436 * Used for managing Tx retries when expecting block-acks.
1452 * Driver should set these fields to 0. 1437 * Driver should set these fields to 0.
1453 */ 1438 */
1454struct iwl_dram_scratch { 1439struct il_dram_scratch {
1455 u8 try_cnt; /* Tx attempts */ 1440 u8 try_cnt; /* Tx attempts */
1456 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ 1441 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1457 __le16 reserved; 1442 __le16 reserved;
1458} __packed; 1443} __packed;
1459 1444
1460struct iwl_tx_cmd { 1445struct il_tx_cmd {
1461 /* 1446 /*
1462 * MPDU byte count: 1447 * MPDU byte count:
1463 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1448 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@ struct iwl_tx_cmd {
1481 1466
1482 /* uCode may modify this field of the Tx command (in host DRAM!). 1467 /* uCode may modify this field of the Tx command (in host DRAM!).
1483 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ 1468 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1484 struct iwl_dram_scratch scratch; 1469 struct il_dram_scratch scratch;
1485 1470
1486 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ 1471 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1487 __le32 rate_n_flags; /* RATE_MCS_* */ 1472 __le32 rate_n_flags; /* RATE_MCS_* */
@@ -1493,13 +1478,13 @@ struct iwl_tx_cmd {
1493 u8 sec_ctl; /* TX_CMD_SEC_* */ 1478 u8 sec_ctl; /* TX_CMD_SEC_* */
1494 1479
1495 /* 1480 /*
1496 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial 1481 * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
1497 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for 1482 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1498 * data frames, this field may be used to selectively reduce initial 1483 * data frames, this field may be used to selectively reduce initial
1499 * rate (via non-0 value) for special frames (e.g. management), while 1484 * rate (via non-0 value) for special frames (e.g. management), while
1500 * still supporting rate scaling for all frames. 1485 * still supporting rate scaling for all frames.
1501 */ 1486 */
1502 u8 initial_rate_index; 1487 u8 initial_rate_idx;
1503 u8 reserved; 1488 u8 reserved;
1504 u8 key[16]; 1489 u8 key[16];
1505 __le16 next_frame_flags; 1490 __le16 next_frame_flags;
@@ -1628,12 +1613,12 @@ enum {
1628}; 1613};
1629 1614
1630enum { 1615enum {
1631 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ 1616 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1632 TX_STATUS_DELAY_MSK = 0x00000040, 1617 TX_STATUS_DELAY_MSK = 0x00000040,
1633 TX_STATUS_ABORT_MSK = 0x00000080, 1618 TX_STATUS_ABORT_MSK = 0x00000080,
1634 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ 1619 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1635 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ 1620 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1636 TX_RESERVED = 0x00780000, /* bits 19:22 */ 1621 TX_RESERVED = 0x00780000, /* bits 19:22 */
1637 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ 1622 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1638 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1623 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1639}; 1624};
@@ -1671,7 +1656,7 @@ enum {
1671#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 1656#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1672 1657
1673/* 1658/*
1674 * REPLY_TX = 0x1c (response) 1659 * C_TX = 0x1c (response)
1675 * 1660 *
1676 * This response may be in one of two slightly different formats, indicated 1661 * This response may be in one of two slightly different formats, indicated
1677 * by the frame_count field: 1662 * by the frame_count field:
@@ -1697,7 +1682,7 @@ struct agg_tx_status {
1697 __le16 sequence; 1682 __le16 sequence;
1698} __packed; 1683} __packed;
1699 1684
1700struct iwl4965_tx_resp { 1685struct il4965_tx_resp {
1701 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1686 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1702 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ 1687 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1703 u8 failure_rts; /* # failures due to unsuccessful RTS */ 1688 u8 failure_rts; /* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp {
1730 */ 1715 */
1731 union { 1716 union {
1732 __le32 status; 1717 __le32 status;
1733 struct agg_tx_status agg_status[0]; /* for each agg frame */ 1718 struct agg_tx_status agg_status[0]; /* for each agg frame */
1734 } u; 1719 } u;
1735} __packed; 1720} __packed;
1736 1721
1737/* 1722/*
1738 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1723 * N_COMPRESSED_BA = 0xc5 (response only, not a command)
1739 * 1724 *
1740 * Reports Block-Acknowledge from recipient station 1725 * Reports Block-Acknowledge from recipient station
1741 */ 1726 */
1742struct iwl_compressed_ba_resp { 1727struct il_compressed_ba_resp {
1743 __le32 sta_addr_lo32; 1728 __le32 sta_addr_lo32;
1744 __le16 sta_addr_hi16; 1729 __le16 sta_addr_hi16;
1745 __le16 reserved; 1730 __le16 reserved;
@@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp {
1754} __packed; 1739} __packed;
1755 1740
1756/* 1741/*
1757 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) 1742 * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
1758 * 1743 *
1759 * See details under "TXPOWER" in iwl-4965-hw.h. 1744 * See details under "TXPOWER" in 4965.h.
1760 */ 1745 */
1761 1746
1762struct iwl3945_txpowertable_cmd { 1747struct il3945_txpowertable_cmd {
1763 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 1748 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1764 u8 reserved; 1749 u8 reserved;
1765 __le16 channel; 1750 __le16 channel;
1766 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 1751 struct il3945_power_per_rate power[IL_MAX_RATES];
1767} __packed; 1752} __packed;
1768 1753
1769struct iwl4965_txpowertable_cmd { 1754struct il4965_txpowertable_cmd {
1770 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 1755 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1771 u8 reserved; 1756 u8 reserved;
1772 __le16 channel; 1757 __le16 channel;
1773 struct iwl4965_tx_power_db tx_power; 1758 struct il4965_tx_power_db tx_power;
1774} __packed; 1759} __packed;
1775 1760
1776
1777/** 1761/**
1778 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response 1762 * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
1779 * 1763 *
1780 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) 1764 * C_RATE_SCALE = 0x47 (command, has simple generic response)
1781 * 1765 *
1782 * NOTE: The table of rates passed to the uCode via the 1766 * NOTE: The table of rates passed to the uCode via the
1783 * RATE_SCALE command sets up the corresponding order of 1767 * RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd {
1786 * 1770 *
1787 * For example, if you set 9MB (PLCP 0x0f) as the first 1771 * For example, if you set 9MB (PLCP 0x0f) as the first
1788 * rate in the rate table, the bit mask for that rate 1772 * rate in the rate table, the bit mask for that rate
1789 * when passed through ofdm_basic_rates on the REPLY_RXON 1773 * when passed through ofdm_basic_rates on the C_RXON
1790 * command would be bit 0 (1 << 0) 1774 * command would be bit 0 (1 << 0)
1791 */ 1775 */
1792struct iwl3945_rate_scaling_info { 1776struct il3945_rate_scaling_info {
1793 __le16 rate_n_flags; 1777 __le16 rate_n_flags;
1794 u8 try_cnt; 1778 u8 try_cnt;
1795 u8 next_rate_index; 1779 u8 next_rate_idx;
1796} __packed; 1780} __packed;
1797 1781
1798struct iwl3945_rate_scaling_cmd { 1782struct il3945_rate_scaling_cmd {
1799 u8 table_id; 1783 u8 table_id;
1800 u8 reserved[3]; 1784 u8 reserved[3];
1801 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; 1785 struct il3945_rate_scaling_info table[IL_MAX_RATES];
1802} __packed; 1786} __packed;
1803 1787
1804
1805/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ 1788/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1806#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) 1789#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1807 1790
@@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd {
1816#define LINK_QUAL_ANT_B_MSK (1 << 1) 1799#define LINK_QUAL_ANT_B_MSK (1 << 1)
1817#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) 1800#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1818 1801
1819
1820/** 1802/**
1821 * struct iwl_link_qual_general_params 1803 * struct il_link_qual_general_params
1822 * 1804 *
1823 * Used in REPLY_TX_LINK_QUALITY_CMD 1805 * Used in C_TX_LINK_QUALITY_CMD
1824 */ 1806 */
1825struct iwl_link_qual_general_params { 1807struct il_link_qual_general_params {
1826 u8 flags; 1808 u8 flags;
1827 1809
1828 /* No entries at or above this (driver chosen) index contain MIMO */ 1810 /* No entries at or above this (driver chosen) idx contain MIMO */
1829 u8 mimo_delimiter; 1811 u8 mimo_delimiter;
1830 1812
1831 /* Best single antenna to use for single stream (legacy, SISO). */ 1813 /* Best single antenna to use for single stream (legacy, SISO). */
1832 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ 1814 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1833 1815
1834 /* Best antennas to use for MIMO (unused for 4965, assumes both). */ 1816 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1835 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ 1817 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1836 1818
1837 /* 1819 /*
1838 * If driver needs to use different initial rates for different 1820 * If driver needs to use different initial rates for different
1839 * EDCA QOS access categories (as implemented by tx fifos 0-3), 1821 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1840 * this table will set that up, by indicating the indexes in the 1822 * this table will set that up, by indicating the idxes in the
1841 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. 1823 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1842 * Otherwise, driver should set all entries to 0. 1824 * Otherwise, driver should set all entries to 0.
1843 * 1825 *
@@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params {
1845 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice 1827 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1846 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. 1828 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1847 */ 1829 */
1848 u8 start_rate_index[LINK_QUAL_AC_NUM]; 1830 u8 start_rate_idx[LINK_QUAL_AC_NUM];
1849} __packed; 1831} __packed;
1850 1832
1851#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 1833#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1852#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) 1834#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1853#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) 1835#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1854 1836
@@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params {
1861#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) 1843#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1862 1844
1863/** 1845/**
1864 * struct iwl_link_qual_agg_params 1846 * struct il_link_qual_agg_params
1865 * 1847 *
1866 * Used in REPLY_TX_LINK_QUALITY_CMD 1848 * Used in C_TX_LINK_QUALITY_CMD
1867 */ 1849 */
1868struct iwl_link_qual_agg_params { 1850struct il_link_qual_agg_params {
1869 1851
1870 /* 1852 /*
1871 *Maximum number of uSec in aggregation. 1853 *Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params {
1892} __packed; 1874} __packed;
1893 1875
1894/* 1876/*
1895 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) 1877 * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1896 * 1878 *
1897 * For 4965 devices only; 3945 uses REPLY_RATE_SCALE. 1879 * For 4965 devices only; 3945 uses C_RATE_SCALE.
1898 * 1880 *
1899 * Each station in the 4965 device's internal station table has its own table 1881 * Each station in the 4965 device's internal station table has its own table
1900 * of 16 1882 * of 16
@@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params {
1903 * one station. 1885 * one station.
1904 * 1886 *
1905 * NOTE: Station must already be in 4965 device's station table. 1887 * NOTE: Station must already be in 4965 device's station table.
1906 * Use REPLY_ADD_STA. 1888 * Use C_ADD_STA.
1907 * 1889 *
1908 * The rate scaling procedures described below work well. Of course, other 1890 * The rate scaling procedures described below work well. Of course, other
1909 * procedures are possible, and may work better for particular environments. 1891 * procedures are possible, and may work better for particular environments.
1910 * 1892 *
1911 * 1893 *
1912 * FILLING THE RATE TABLE 1894 * FILLING THE RATE TBL
1913 * 1895 *
1914 * Given a particular initial rate and mode, as determined by the rate 1896 * Given a particular initial rate and mode, as determined by the rate
1915 * scaling algorithm described below, the Linux driver uses the following 1897 * scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params {
1948 * speculative mode as the new current active mode. 1930 * speculative mode as the new current active mode.
1949 * 1931 *
1950 * Each history set contains, separately for each possible rate, data for a 1932 * Each history set contains, separately for each possible rate, data for a
1951 * sliding window of the 62 most recent tx attempts at that rate. The data 1933 * sliding win of the 62 most recent tx attempts at that rate. The data
1952 * includes a shifting bitmap of success(1)/failure(0), and sums of successful 1934 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1953 * and attempted frames, from which the driver can additionally calculate a 1935 * and attempted frames, from which the driver can additionally calculate a
1954 * success ratio (success / attempted) and number of failures 1936 * success ratio (success / attempted) and number of failures
1955 * (attempted - success), and control the size of the window (attempted). 1937 * (attempted - success), and control the size of the win (attempted).
1956 * The driver uses the bit map to remove successes from the success sum, as 1938 * The driver uses the bit map to remove successes from the success sum, as
1957 * the oldest tx attempts fall out of the window. 1939 * the oldest tx attempts fall out of the win.
1958 * 1940 *
1959 * When the 4965 device makes multiple tx attempts for a given frame, each 1941 * When the 4965 device makes multiple tx attempts for a given frame, each
1960 * attempt might be at a different rate, and have different modulation 1942 * attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params {
1966 * 1948 *
1967 * When using block-ack (aggregation), all frames are transmitted at the same 1949 * When using block-ack (aggregation), all frames are transmitted at the same
1968 * rate, since there is no per-attempt acknowledgment from the destination 1950 * rate, since there is no per-attempt acknowledgment from the destination
1969 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in 1951 * station. The Tx response struct il_tx_resp indicates the Tx rate in
1970 * rate_n_flags field. After receiving a block-ack, the driver can update 1952 * rate_n_flags field. After receiving a block-ack, the driver can update
1971 * history for the entire block all at once. 1953 * history for the entire block all at once.
1972 * 1954 *
@@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params {
2016 * good performance; higher rate is sure to have poorer success. 1998 * good performance; higher rate is sure to have poorer success.
2017 * 1999 *
2018 * 6) Re-evaluate the rate after each tx frame. If working with block- 2000 * 6) Re-evaluate the rate after each tx frame. If working with block-
2019 * acknowledge, history and statistics may be calculated for the entire 2001 * acknowledge, history and stats may be calculated for the entire
2020 * block (including prior history that fits within the history windows), 2002 * block (including prior history that fits within the history wins),
2021 * before re-evaluation. 2003 * before re-evaluation.
2022 * 2004 *
2023 * FINDING BEST STARTING MODULATION MODE: 2005 * FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params {
2079 * legacy), and then repeat the search process. 2061 * legacy), and then repeat the search process.
2080 * 2062 *
2081 */ 2063 */
2082struct iwl_link_quality_cmd { 2064struct il_link_quality_cmd {
2083 2065
2084 /* Index of destination/recipient station in uCode's station table */ 2066 /* Index of destination/recipient station in uCode's station table */
2085 u8 sta_id; 2067 u8 sta_id;
2086 u8 reserved1; 2068 u8 reserved1;
2087 __le16 control; /* not used */ 2069 __le16 control; /* not used */
2088 struct iwl_link_qual_general_params general_params; 2070 struct il_link_qual_general_params general_params;
2089 struct iwl_link_qual_agg_params agg_params; 2071 struct il_link_qual_agg_params agg_params;
2090 2072
2091 /* 2073 /*
2092 * Rate info; when using rate-scaling, Tx command's initial_rate_index 2074 * Rate info; when using rate-scaling, Tx command's initial_rate_idx
2093 * specifies 1st Tx rate attempted, via index into this table. 2075 * specifies 1st Tx rate attempted, via idx into this table.
2094 * 4965 devices works its way through table when retrying Tx. 2076 * 4965 devices works its way through table when retrying Tx.
2095 */ 2077 */
2096 struct { 2078 struct {
2097 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ 2079 __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */
2098 } rs_table[LINK_QUAL_MAX_RETRY_NUM]; 2080 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2099 __le32 reserved2; 2081 __le32 reserved2;
2100} __packed; 2082} __packed;
@@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd {
2117#define BT_MAX_KILL_DEF (0x5) 2099#define BT_MAX_KILL_DEF (0x5)
2118 2100
2119/* 2101/*
2120 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2102 * C_BT_CONFIG = 0x9b (command, has simple generic response)
2121 * 2103 *
2122 * 3945 and 4965 devices support hardware handshake with Bluetooth device on 2104 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2123 * same platform. Bluetooth device alerts wireless device when it will Tx; 2105 * same platform. Bluetooth device alerts wireless device when it will Tx;
2124 * wireless device can delay or kill its own Tx to accommodate. 2106 * wireless device can delay or kill its own Tx to accommodate.
2125 */ 2107 */
2126struct iwl_bt_cmd { 2108struct il_bt_cmd {
2127 u8 flags; 2109 u8 flags;
2128 u8 lead_time; 2110 u8 lead_time;
2129 u8 max_kill; 2111 u8 max_kill;
@@ -2132,7 +2114,6 @@ struct iwl_bt_cmd {
2132 __le32 kill_cts_mask; 2114 __le32 kill_cts_mask;
2133} __packed; 2115} __packed;
2134 2116
2135
2136/****************************************************************************** 2117/******************************************************************************
2137 * (6) 2118 * (6)
2138 * Spectrum Management (802.11h) Commands, Responses, Notifications: 2119 * Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@ struct iwl_bt_cmd {
2150 RXON_FILTER_ASSOC_MSK | \ 2131 RXON_FILTER_ASSOC_MSK | \
2151 RXON_FILTER_BCON_AWARE_MSK) 2132 RXON_FILTER_BCON_AWARE_MSK)
2152 2133
2153struct iwl_measure_channel { 2134struct il_measure_channel {
2154 __le32 duration; /* measurement duration in extended beacon 2135 __le32 duration; /* measurement duration in extended beacon
2155 * format */ 2136 * format */
2156 u8 channel; /* channel to measure */ 2137 u8 channel; /* channel to measure */
2157 u8 type; /* see enum iwl_measure_type */ 2138 u8 type; /* see enum il_measure_type */
2158 __le16 reserved; 2139 __le16 reserved;
2159} __packed; 2140} __packed;
2160 2141
2161/* 2142/*
2162 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) 2143 * C_SPECTRUM_MEASUREMENT = 0x74 (command)
2163 */ 2144 */
2164struct iwl_spectrum_cmd { 2145struct il_spectrum_cmd {
2165 __le16 len; /* number of bytes starting from token */ 2146 __le16 len; /* number of bytes starting from token */
2166 u8 token; /* token id */ 2147 u8 token; /* token id */
2167 u8 id; /* measurement id -- 0 or 1 */ 2148 u8 id; /* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd {
2174 __le32 filter_flags; /* rxon filter flags */ 2155 __le32 filter_flags; /* rxon filter flags */
2175 __le16 channel_count; /* minimum 1, maximum 10 */ 2156 __le16 channel_count; /* minimum 1, maximum 10 */
2176 __le16 reserved3; 2157 __le16 reserved3;
2177 struct iwl_measure_channel channels[10]; 2158 struct il_measure_channel channels[10];
2178} __packed; 2159} __packed;
2179 2160
2180/* 2161/*
2181 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) 2162 * C_SPECTRUM_MEASUREMENT = 0x74 (response)
2182 */ 2163 */
2183struct iwl_spectrum_resp { 2164struct il_spectrum_resp {
2184 u8 token; 2165 u8 token;
2185 u8 id; /* id of the prior command replaced, or 0xff */ 2166 u8 id; /* id of the prior command replaced, or 0xff */
2186 __le16 status; /* 0 - command will be handled 2167 __le16 status; /* 0 - command will be handled
@@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp {
2188 * measurement) */ 2169 * measurement) */
2189} __packed; 2170} __packed;
2190 2171
2191enum iwl_measurement_state { 2172enum il_measurement_state {
2192 IWL_MEASUREMENT_START = 0, 2173 IL_MEASUREMENT_START = 0,
2193 IWL_MEASUREMENT_STOP = 1, 2174 IL_MEASUREMENT_STOP = 1,
2194}; 2175};
2195 2176
2196enum iwl_measurement_status { 2177enum il_measurement_status {
2197 IWL_MEASUREMENT_OK = 0, 2178 IL_MEASUREMENT_OK = 0,
2198 IWL_MEASUREMENT_CONCURRENT = 1, 2179 IL_MEASUREMENT_CONCURRENT = 1,
2199 IWL_MEASUREMENT_CSA_CONFLICT = 2, 2180 IL_MEASUREMENT_CSA_CONFLICT = 2,
2200 IWL_MEASUREMENT_TGH_CONFLICT = 3, 2181 IL_MEASUREMENT_TGH_CONFLICT = 3,
2201 /* 4-5 reserved */ 2182 /* 4-5 reserved */
2202 IWL_MEASUREMENT_STOPPED = 6, 2183 IL_MEASUREMENT_STOPPED = 6,
2203 IWL_MEASUREMENT_TIMEOUT = 7, 2184 IL_MEASUREMENT_TIMEOUT = 7,
2204 IWL_MEASUREMENT_PERIODIC_FAILED = 8, 2185 IL_MEASUREMENT_PERIODIC_FAILED = 8,
2205}; 2186};
2206 2187
2207#define NUM_ELEMENTS_IN_HISTOGRAM 8 2188#define NUM_ELEMENTS_IN_HISTOGRAM 8
2208 2189
2209struct iwl_measurement_histogram { 2190struct il_measurement_histogram {
2210 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ 2191 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2211 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ 2192 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2212} __packed; 2193} __packed;
2213 2194
2214/* clear channel availability counters */ 2195/* clear channel availability counters */
2215struct iwl_measurement_cca_counters { 2196struct il_measurement_cca_counters {
2216 __le32 ofdm; 2197 __le32 ofdm;
2217 __le32 cck; 2198 __le32 cck;
2218} __packed; 2199} __packed;
2219 2200
2220enum iwl_measure_type { 2201enum il_measure_type {
2221 IWL_MEASURE_BASIC = (1 << 0), 2202 IL_MEASURE_BASIC = (1 << 0),
2222 IWL_MEASURE_CHANNEL_LOAD = (1 << 1), 2203 IL_MEASURE_CHANNEL_LOAD = (1 << 1),
2223 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), 2204 IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2224 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), 2205 IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2225 IWL_MEASURE_FRAME = (1 << 4), 2206 IL_MEASURE_FRAME = (1 << 4),
2226 /* bits 5:6 are reserved */ 2207 /* bits 5:6 are reserved */
2227 IWL_MEASURE_IDLE = (1 << 7), 2208 IL_MEASURE_IDLE = (1 << 7),
2228}; 2209};
2229 2210
2230/* 2211/*
2231 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) 2212 * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
2232 */ 2213 */
2233struct iwl_spectrum_notification { 2214struct il_spectrum_notification {
2234 u8 id; /* measurement id -- 0 or 1 */ 2215 u8 id; /* measurement id -- 0 or 1 */
2235 u8 token; 2216 u8 token;
2236 u8 channel_index; /* index in measurement channel list */ 2217 u8 channel_idx; /* idx in measurement channel list */
2237 u8 state; /* 0 - start, 1 - stop */ 2218 u8 state; /* 0 - start, 1 - stop */
2238 __le32 start_time; /* lower 32-bits of TSF */ 2219 __le32 start_time; /* lower 32-bits of TSF */
2239 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ 2220 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2240 u8 channel; 2221 u8 channel;
2241 u8 type; /* see enum iwl_measurement_type */ 2222 u8 type; /* see enum il_measurement_type */
2242 u8 reserved1; 2223 u8 reserved1;
2243 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only 2224 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2244 * valid if applicable for measurement type requested. */ 2225 * valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification {
2248 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - 2229 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2249 * unidentified */ 2230 * unidentified */
2250 u8 reserved2[3]; 2231 u8 reserved2[3];
2251 struct iwl_measurement_histogram histogram; 2232 struct il_measurement_histogram histogram;
2252 __le32 stop_time; /* lower 32-bits of TSF */ 2233 __le32 stop_time; /* lower 32-bits of TSF */
2253 __le32 status; /* see iwl_measurement_status */ 2234 __le32 status; /* see il_measurement_status */
2254} __packed; 2235} __packed;
2255 2236
2256/****************************************************************************** 2237/******************************************************************************
@@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification {
2260 *****************************************************************************/ 2241 *****************************************************************************/
2261 2242
2262/** 2243/**
2263 * struct iwl_powertable_cmd - Power Table Command 2244 * struct il_powertable_cmd - Power Table Command
2264 * @flags: See below: 2245 * @flags: See below:
2265 * 2246 *
2266 * POWER_TABLE_CMD = 0x77 (command, has simple generic response) 2247 * C_POWER_TBL = 0x77 (command, has simple generic response)
2267 * 2248 *
2268 * PM allow: 2249 * PM allow:
2269 * bit 0 - '0' Driver not allow power management 2250 * bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification {
2290 * '10' force xtal sleep 2271 * '10' force xtal sleep
2291 * '11' Illegal set 2272 * '11' Illegal set
2292 * 2273 *
2293 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then 2274 * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
2294 * ucode assume sleep over DTIM is allowed and we don't need to wake up 2275 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2295 * for every DTIM. 2276 * for every DTIM.
2296 */ 2277 */
2297#define IWL_POWER_VEC_SIZE 5 2278#define IL_POWER_VEC_SIZE 5
2298 2279
2299#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) 2280#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2300#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) 2281#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2301 2282
2302struct iwl3945_powertable_cmd { 2283struct il3945_powertable_cmd {
2303 __le16 flags; 2284 __le16 flags;
2304 u8 reserved[2]; 2285 u8 reserved[2];
2305 __le32 rx_data_timeout; 2286 __le32 rx_data_timeout;
2306 __le32 tx_data_timeout; 2287 __le32 tx_data_timeout;
2307 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2288 __le32 sleep_interval[IL_POWER_VEC_SIZE];
2308} __packed; 2289} __packed;
2309 2290
2310struct iwl_powertable_cmd { 2291struct il_powertable_cmd {
2311 __le16 flags; 2292 __le16 flags;
2312 u8 keep_alive_seconds; /* 3945 reserved */ 2293 u8 keep_alive_seconds; /* 3945 reserved */
2313 u8 debug_flags; /* 3945 reserved */ 2294 u8 debug_flags; /* 3945 reserved */
2314 __le32 rx_data_timeout; 2295 __le32 rx_data_timeout;
2315 __le32 tx_data_timeout; 2296 __le32 tx_data_timeout;
2316 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2297 __le32 sleep_interval[IL_POWER_VEC_SIZE];
2317 __le32 keep_alive_beacons; 2298 __le32 keep_alive_beacons;
2318} __packed; 2299} __packed;
2319 2300
2320/* 2301/*
2321 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) 2302 * N_PM_SLEEP = 0x7A (notification only, not a command)
2322 * all devices identical. 2303 * all devices identical.
2323 */ 2304 */
2324struct iwl_sleep_notification { 2305struct il_sleep_notification {
2325 u8 pm_sleep_mode; 2306 u8 pm_sleep_mode;
2326 u8 pm_wakeup_src; 2307 u8 pm_wakeup_src;
2327 __le16 reserved; 2308 __le16 reserved;
@@ -2332,23 +2313,23 @@ struct iwl_sleep_notification {
2332 2313
2333/* Sleep states. all devices identical. */ 2314/* Sleep states. all devices identical. */
2334enum { 2315enum {
2335 IWL_PM_NO_SLEEP = 0, 2316 IL_PM_NO_SLEEP = 0,
2336 IWL_PM_SLP_MAC = 1, 2317 IL_PM_SLP_MAC = 1,
2337 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, 2318 IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2338 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, 2319 IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2339 IWL_PM_SLP_PHY = 4, 2320 IL_PM_SLP_PHY = 4,
2340 IWL_PM_SLP_REPENT = 5, 2321 IL_PM_SLP_REPENT = 5,
2341 IWL_PM_WAKEUP_BY_TIMER = 6, 2322 IL_PM_WAKEUP_BY_TIMER = 6,
2342 IWL_PM_WAKEUP_BY_DRIVER = 7, 2323 IL_PM_WAKEUP_BY_DRIVER = 7,
2343 IWL_PM_WAKEUP_BY_RFKILL = 8, 2324 IL_PM_WAKEUP_BY_RFKILL = 8,
2344 /* 3 reserved */ 2325 /* 3 reserved */
2345 IWL_PM_NUM_OF_MODES = 12, 2326 IL_PM_NUM_OF_MODES = 12,
2346}; 2327};
2347 2328
2348/* 2329/*
2349 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) 2330 * N_CARD_STATE = 0xa1 (notification only, not a command)
2350 */ 2331 */
2351struct iwl_card_state_notif { 2332struct il_card_state_notif {
2352 __le32 flags; 2333 __le32 flags;
2353} __packed; 2334} __packed;
2354 2335
@@ -2357,11 +2338,11 @@ struct iwl_card_state_notif {
2357#define CT_CARD_DISABLED 0x04 2338#define CT_CARD_DISABLED 0x04
2358#define RXON_CARD_DISABLED 0x10 2339#define RXON_CARD_DISABLED 0x10
2359 2340
2360struct iwl_ct_kill_config { 2341struct il_ct_kill_config {
2361 __le32 reserved; 2342 __le32 reserved;
2362 __le32 critical_temperature_M; 2343 __le32 critical_temperature_M;
2363 __le32 critical_temperature_R; 2344 __le32 critical_temperature_R;
2364} __packed; 2345} __packed;
2365 2346
2366/****************************************************************************** 2347/******************************************************************************
2367 * (8) 2348 * (8)
@@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config {
2373#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) 2354#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2374 2355
2375/** 2356/**
2376 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table 2357 * struct il_scan_channel - entry in C_SCAN channel table
2377 * 2358 *
2378 * One for each channel in the scan list. 2359 * One for each channel in the scan list.
2379 * Each channel can independently select: 2360 * Each channel can independently select:
@@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config {
2383 * quiet_plcp_th, good_CRC_th) 2364 * quiet_plcp_th, good_CRC_th)
2384 * 2365 *
2385 * To avoid uCode errors, make sure the following are true (see comments 2366 * To avoid uCode errors, make sure the following are true (see comments
2386 * under struct iwl_scan_cmd about max_out_time and quiet_time): 2367 * under struct il_scan_cmd about max_out_time and quiet_time):
2387 * 1) If using passive_dwell (i.e. passive_dwell != 0): 2368 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2388 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) 2369 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2389 * 2) quiet_time <= active_dwell 2370 * 2) quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config {
2391 * passive_dwell < max_out_time 2372 * passive_dwell < max_out_time
2392 * active_dwell < max_out_time 2373 * active_dwell < max_out_time
2393 */ 2374 */
2394struct iwl3945_scan_channel { 2375struct il3945_scan_channel {
2395 /* 2376 /*
2396 * type is defined as: 2377 * type is defined as:
2397 * 0:0 1 = active, 0 = passive 2378 * 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel {
2400 * 5:7 reserved 2381 * 5:7 reserved
2401 */ 2382 */
2402 u8 type; 2383 u8 type;
2403 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */ 2384 u8 channel; /* band is selected by il3945_scan_cmd "flags" field */
2404 struct iwl3945_tx_power tpc; 2385 struct il3945_tx_power tpc;
2405 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2386 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2406 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2387 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2407} __packed; 2388} __packed;
2408 2389
2409/* set number of direct probes u8 type */ 2390/* set number of direct probes u8 type */
2410#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) 2391#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2411 2392
2412struct iwl_scan_channel { 2393struct il_scan_channel {
2413 /* 2394 /*
2414 * type is defined as: 2395 * type is defined as:
2415 * 0:0 1 = active, 0 = passive 2396 * 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@ struct iwl_scan_channel {
2418 * 21:31 reserved 2399 * 21:31 reserved
2419 */ 2400 */
2420 __le32 type; 2401 __le32 type;
2421 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ 2402 __le16 channel; /* band is selected by il_scan_cmd "flags" field */
2422 u8 tx_gain; /* gain for analog radio */ 2403 u8 tx_gain; /* gain for analog radio */
2423 u8 dsp_atten; /* gain for DSP */ 2404 u8 dsp_atten; /* gain for DSP */
2424 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2405 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@ struct iwl_scan_channel {
2426} __packed; 2407} __packed;
2427 2408
2428/* set number of direct probes __le32 type */ 2409/* set number of direct probes __le32 type */
2429#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) 2410#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2430 2411
2431/** 2412/**
2432 * struct iwl_ssid_ie - directed scan network information element 2413 * struct il_ssid_ie - directed scan network information element
2433 * 2414 *
2434 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in 2415 * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
2435 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; 2416 * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
2436 * each channel may select different ssids from among the 20 (4) entries. 2417 * each channel may select different ssids from among the 20 (4) entries.
2437 * SSID IEs get transmitted in reverse order of entry. 2418 * SSID IEs get transmitted in reverse order of entry.
2438 */ 2419 */
2439struct iwl_ssid_ie { 2420struct il_ssid_ie {
2440 u8 id; 2421 u8 id;
2441 u8 len; 2422 u8 len;
2442 u8 ssid[32]; 2423 u8 ssid[32];
@@ -2445,14 +2426,14 @@ struct iwl_ssid_ie {
2445#define PROBE_OPTION_MAX_3945 4 2426#define PROBE_OPTION_MAX_3945 4
2446#define PROBE_OPTION_MAX 20 2427#define PROBE_OPTION_MAX 20
2447#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2428#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2448#define IWL_GOOD_CRC_TH_DISABLED 0 2429#define IL_GOOD_CRC_TH_DISABLED 0
2449#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) 2430#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2450#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) 2431#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2451#define IWL_MAX_SCAN_SIZE 1024 2432#define IL_MAX_SCAN_SIZE 1024
2452#define IWL_MAX_CMD_SIZE 4096 2433#define IL_MAX_CMD_SIZE 4096
2453 2434
2454/* 2435/*
2455 * REPLY_SCAN_CMD = 0x80 (command) 2436 * C_SCAN = 0x80 (command)
2456 * 2437 *
2457 * The hardware scan command is very powerful; the driver can set it up to 2438 * The hardware scan command is very powerful; the driver can set it up to
2458 * maintain (relatively) normal network traffic while doing a scan in the 2439 * maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@ struct iwl_ssid_ie {
2501 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. 2482 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2502 * 2483 *
2503 * To avoid uCode errors, see timing restrictions described under 2484 * To avoid uCode errors, see timing restrictions described under
2504 * struct iwl_scan_channel. 2485 * struct il_scan_channel.
2505 */ 2486 */
2506 2487
2507struct iwl3945_scan_cmd { 2488struct il3945_scan_cmd {
2508 __le16 len; 2489 __le16 len;
2509 u8 reserved0; 2490 u8 reserved0;
2510 u8 channel_count; /* # channels in channel list */ 2491 u8 channel_count; /* # channels in channel list */
@@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd {
2525 2506
2526 /* For active scans (set to all-0s for passive scans). 2507 /* For active scans (set to all-0s for passive scans).
2527 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2508 * Does not include payload. Must specify Tx rate; no rate scaling. */
2528 struct iwl3945_tx_cmd tx_cmd; 2509 struct il3945_tx_cmd tx_cmd;
2529 2510
2530 /* For directed active scans (set to all-0s otherwise) */ 2511 /* For directed active scans (set to all-0s otherwise) */
2531 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; 2512 struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2532 2513
2533 /* 2514 /*
2534 * Probe request frame, followed by channel list. 2515 * Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd {
2538 * Number of channels in list is specified by channel_count. 2519 * Number of channels in list is specified by channel_count.
2539 * Each channel in list is of type: 2520 * Each channel in list is of type:
2540 * 2521 *
2541 * struct iwl3945_scan_channel channels[0]; 2522 * struct il3945_scan_channel channels[0];
2542 * 2523 *
2543 * NOTE: Only one band of channels can be scanned per pass. You 2524 * NOTE: Only one band of channels can be scanned per pass. You
2544 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait 2525 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2545 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) 2526 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
2546 * before requesting another scan. 2527 * before requesting another scan.
2547 */ 2528 */
2548 u8 data[0]; 2529 u8 data[0];
2549} __packed; 2530} __packed;
2550 2531
2551struct iwl_scan_cmd { 2532struct il_scan_cmd {
2552 __le16 len; 2533 __le16 len;
2553 u8 reserved0; 2534 u8 reserved0;
2554 u8 channel_count; /* # channels in channel list */ 2535 u8 channel_count; /* # channels in channel list */
@@ -2569,10 +2550,10 @@ struct iwl_scan_cmd {
2569 2550
2570 /* For active scans (set to all-0s for passive scans). 2551 /* For active scans (set to all-0s for passive scans).
2571 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2552 * Does not include payload. Must specify Tx rate; no rate scaling. */
2572 struct iwl_tx_cmd tx_cmd; 2553 struct il_tx_cmd tx_cmd;
2573 2554
2574 /* For directed active scans (set to all-0s otherwise) */ 2555 /* For directed active scans (set to all-0s otherwise) */
2575 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; 2556 struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
2576 2557
2577 /* 2558 /*
2578 * Probe request frame, followed by channel list. 2559 * Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@ struct iwl_scan_cmd {
2582 * Number of channels in list is specified by channel_count. 2563 * Number of channels in list is specified by channel_count.
2583 * Each channel in list is of type: 2564 * Each channel in list is of type:
2584 * 2565 *
2585 * struct iwl_scan_channel channels[0]; 2566 * struct il_scan_channel channels[0];
2586 * 2567 *
2587 * NOTE: Only one band of channels can be scanned per pass. You 2568 * NOTE: Only one band of channels can be scanned per pass. You
2588 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait 2569 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2589 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) 2570 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
2590 * before requesting another scan. 2571 * before requesting another scan.
2591 */ 2572 */
2592 u8 data[0]; 2573 u8 data[0];
@@ -2598,16 +2579,16 @@ struct iwl_scan_cmd {
2598#define ABORT_STATUS 0x2 2579#define ABORT_STATUS 0x2
2599 2580
2600/* 2581/*
2601 * REPLY_SCAN_CMD = 0x80 (response) 2582 * C_SCAN = 0x80 (response)
2602 */ 2583 */
2603struct iwl_scanreq_notification { 2584struct il_scanreq_notification {
2604 __le32 status; /* 1: okay, 2: cannot fulfill request */ 2585 __le32 status; /* 1: okay, 2: cannot fulfill request */
2605} __packed; 2586} __packed;
2606 2587
2607/* 2588/*
2608 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) 2589 * N_SCAN_START = 0x82 (notification only, not a command)
2609 */ 2590 */
2610struct iwl_scanstart_notification { 2591struct il_scanstart_notification {
2611 __le32 tsf_low; 2592 __le32 tsf_low;
2612 __le32 tsf_high; 2593 __le32 tsf_high;
2613 __le32 beacon_timer; 2594 __le32 beacon_timer;
@@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification {
2620#define SCAN_OWNER_STATUS 0x1 2601#define SCAN_OWNER_STATUS 0x1
2621#define MEASURE_OWNER_STATUS 0x2 2602#define MEASURE_OWNER_STATUS 0x2
2622 2603
2623#define IWL_PROBE_STATUS_OK 0 2604#define IL_PROBE_STATUS_OK 0
2624#define IWL_PROBE_STATUS_TX_FAILED BIT(0) 2605#define IL_PROBE_STATUS_TX_FAILED BIT(0)
2625/* error statuses combined with TX_FAILED */ 2606/* error statuses combined with TX_FAILED */
2626#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) 2607#define IL_PROBE_STATUS_FAIL_TTL BIT(1)
2627#define IWL_PROBE_STATUS_FAIL_BT BIT(2) 2608#define IL_PROBE_STATUS_FAIL_BT BIT(2)
2628 2609
2629#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ 2610#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */
2630/* 2611/*
2631 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) 2612 * N_SCAN_RESULTS = 0x83 (notification only, not a command)
2632 */ 2613 */
2633struct iwl_scanresults_notification { 2614struct il_scanresults_notification {
2634 u8 channel; 2615 u8 channel;
2635 u8 band; 2616 u8 band;
2636 u8 probe_status; 2617 u8 probe_status;
2637 u8 num_probe_not_sent; /* not enough time to send */ 2618 u8 num_probe_not_sent; /* not enough time to send */
2638 __le32 tsf_low; 2619 __le32 tsf_low;
2639 __le32 tsf_high; 2620 __le32 tsf_high;
2640 __le32 statistics[NUMBER_OF_STATISTICS]; 2621 __le32 stats[NUMBER_OF_STATS];
2641} __packed; 2622} __packed;
2642 2623
2643/* 2624/*
2644 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) 2625 * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
2645 */ 2626 */
2646struct iwl_scancomplete_notification { 2627struct il_scancomplete_notification {
2647 u8 scanned_channels; 2628 u8 scanned_channels;
2648 u8 status; 2629 u8 status;
2649 u8 last_channel; 2630 u8 last_channel;
@@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification {
2651 __le32 tsf_high; 2632 __le32 tsf_high;
2652} __packed; 2633} __packed;
2653 2634
2654
2655/****************************************************************************** 2635/******************************************************************************
2656 * (9) 2636 * (9)
2657 * IBSS/AP Commands and Notifications: 2637 * IBSS/AP Commands and Notifications:
2658 * 2638 *
2659 *****************************************************************************/ 2639 *****************************************************************************/
2660 2640
2661enum iwl_ibss_manager { 2641enum il_ibss_manager {
2662 IWL_NOT_IBSS_MANAGER = 0, 2642 IL_NOT_IBSS_MANAGER = 0,
2663 IWL_IBSS_MANAGER = 1, 2643 IL_IBSS_MANAGER = 1,
2664}; 2644};
2665 2645
2666/* 2646/*
2667 * BEACON_NOTIFICATION = 0x90 (notification only, not a command) 2647 * N_BEACON = 0x90 (notification only, not a command)
2668 */ 2648 */
2669 2649
2670struct iwl3945_beacon_notif { 2650struct il3945_beacon_notif {
2671 struct iwl3945_tx_resp beacon_notify_hdr; 2651 struct il3945_tx_resp beacon_notify_hdr;
2672 __le32 low_tsf; 2652 __le32 low_tsf;
2673 __le32 high_tsf; 2653 __le32 high_tsf;
2674 __le32 ibss_mgr_status; 2654 __le32 ibss_mgr_status;
2675} __packed; 2655} __packed;
2676 2656
2677struct iwl4965_beacon_notif { 2657struct il4965_beacon_notif {
2678 struct iwl4965_tx_resp beacon_notify_hdr; 2658 struct il4965_tx_resp beacon_notify_hdr;
2679 __le32 low_tsf; 2659 __le32 low_tsf;
2680 __le32 high_tsf; 2660 __le32 high_tsf;
2681 __le32 ibss_mgr_status; 2661 __le32 ibss_mgr_status;
2682} __packed; 2662} __packed;
2683 2663
2684/* 2664/*
2685 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2665 * C_TX_BEACON= 0x91 (command, has simple generic response)
2686 */ 2666 */
2687 2667
2688struct iwl3945_tx_beacon_cmd { 2668struct il3945_tx_beacon_cmd {
2689 struct iwl3945_tx_cmd tx; 2669 struct il3945_tx_cmd tx;
2690 __le16 tim_idx; 2670 __le16 tim_idx;
2691 u8 tim_size; 2671 u8 tim_size;
2692 u8 reserved1; 2672 u8 reserved1;
2693 struct ieee80211_hdr frame[0]; /* beacon frame */ 2673 struct ieee80211_hdr frame[0]; /* beacon frame */
2694} __packed; 2674} __packed;
2695 2675
2696struct iwl_tx_beacon_cmd { 2676struct il_tx_beacon_cmd {
2697 struct iwl_tx_cmd tx; 2677 struct il_tx_cmd tx;
2698 __le16 tim_idx; 2678 __le16 tim_idx;
2699 u8 tim_size; 2679 u8 tim_size;
2700 u8 reserved1; 2680 u8 reserved1;
@@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd {
2707 * 2687 *
2708 *****************************************************************************/ 2688 *****************************************************************************/
2709 2689
2710#define IWL_TEMP_CONVERT 260 2690#define IL_TEMP_CONVERT 260
2711 2691
2712#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 2692#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2713#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 2693#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
@@ -2727,9 +2707,9 @@ struct rate_histogram {
2727 } failed; 2707 } failed;
2728} __packed; 2708} __packed;
2729 2709
2730/* statistics command response */ 2710/* stats command response */
2731 2711
2732struct iwl39_statistics_rx_phy { 2712struct iwl39_stats_rx_phy {
2733 __le32 ina_cnt; 2713 __le32 ina_cnt;
2734 __le32 fina_cnt; 2714 __le32 fina_cnt;
2735 __le32 plcp_err; 2715 __le32 plcp_err;
@@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy {
2747 __le32 sent_cts_cnt; 2727 __le32 sent_cts_cnt;
2748} __packed; 2728} __packed;
2749 2729
2750struct iwl39_statistics_rx_non_phy { 2730struct iwl39_stats_rx_non_phy {
2751 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2731 __le32 bogus_cts; /* CTS received when not expecting CTS */
2752 __le32 bogus_ack; /* ACK received when not expecting ACK */ 2732 __le32 bogus_ack; /* ACK received when not expecting ACK */
2753 __le32 non_bssid_frames; /* number of frames with BSSID that 2733 __le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy {
2758 * our serving channel */ 2738 * our serving channel */
2759} __packed; 2739} __packed;
2760 2740
2761struct iwl39_statistics_rx { 2741struct iwl39_stats_rx {
2762 struct iwl39_statistics_rx_phy ofdm; 2742 struct iwl39_stats_rx_phy ofdm;
2763 struct iwl39_statistics_rx_phy cck; 2743 struct iwl39_stats_rx_phy cck;
2764 struct iwl39_statistics_rx_non_phy general; 2744 struct iwl39_stats_rx_non_phy general;
2765} __packed; 2745} __packed;
2766 2746
2767struct iwl39_statistics_tx { 2747struct iwl39_stats_tx {
2768 __le32 preamble_cnt; 2748 __le32 preamble_cnt;
2769 __le32 rx_detected_cnt; 2749 __le32 rx_detected_cnt;
2770 __le32 bt_prio_defer_cnt; 2750 __le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx {
2776 __le32 actual_ack_cnt; 2756 __le32 actual_ack_cnt;
2777} __packed; 2757} __packed;
2778 2758
2779struct statistics_dbg { 2759struct stats_dbg {
2780 __le32 burst_check; 2760 __le32 burst_check;
2781 __le32 burst_count; 2761 __le32 burst_count;
2782 __le32 wait_for_silence_timeout_cnt; 2762 __le32 wait_for_silence_timeout_cnt;
2783 __le32 reserved[3]; 2763 __le32 reserved[3];
2784} __packed; 2764} __packed;
2785 2765
2786struct iwl39_statistics_div { 2766struct iwl39_stats_div {
2787 __le32 tx_on_a; 2767 __le32 tx_on_a;
2788 __le32 tx_on_b; 2768 __le32 tx_on_b;
2789 __le32 exec_time; 2769 __le32 exec_time;
2790 __le32 probe_time; 2770 __le32 probe_time;
2791} __packed; 2771} __packed;
2792 2772
2793struct iwl39_statistics_general { 2773struct iwl39_stats_general {
2794 __le32 temperature; 2774 __le32 temperature;
2795 struct statistics_dbg dbg; 2775 struct stats_dbg dbg;
2796 __le32 sleep_time; 2776 __le32 sleep_time;
2797 __le32 slots_out; 2777 __le32 slots_out;
2798 __le32 slots_idle; 2778 __le32 slots_idle;
2799 __le32 ttl_timestamp; 2779 __le32 ttl_timestamp;
2800 struct iwl39_statistics_div div; 2780 struct iwl39_stats_div div;
2801} __packed; 2781} __packed;
2802 2782
2803struct statistics_rx_phy { 2783struct stats_rx_phy {
2804 __le32 ina_cnt; 2784 __le32 ina_cnt;
2805 __le32 fina_cnt; 2785 __le32 fina_cnt;
2806 __le32 plcp_err; 2786 __le32 plcp_err;
@@ -2823,7 +2803,7 @@ struct statistics_rx_phy {
2823 __le32 reserved3; 2803 __le32 reserved3;
2824} __packed; 2804} __packed;
2825 2805
2826struct statistics_rx_ht_phy { 2806struct stats_rx_ht_phy {
2827 __le32 plcp_err; 2807 __le32 plcp_err;
2828 __le32 overrun_err; 2808 __le32 overrun_err;
2829 __le32 early_overrun_err; 2809 __le32 early_overrun_err;
@@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy {
2838 2818
2839#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) 2819#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2840 2820
2841struct statistics_rx_non_phy { 2821struct stats_rx_non_phy {
2842 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2822 __le32 bogus_cts; /* CTS received when not expecting CTS */
2843 __le32 bogus_ack; /* ACK received when not expecting ACK */ 2823 __le32 bogus_ack; /* ACK received when not expecting ACK */
2844 __le32 non_bssid_frames; /* number of frames with BSSID that 2824 __le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy {
2852 __le32 num_missed_bcon; /* number of missed beacons */ 2832 __le32 num_missed_bcon; /* number of missed beacons */
2853 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the 2833 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2854 * ADC was in saturation */ 2834 * ADC was in saturation */
2855 __le32 ina_detection_search_time;/* total time (in 0.8us) searched 2835 __le32 ina_detection_search_time; /* total time (in 0.8us) searched
2856 * for INA */ 2836 * for INA */
2857 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ 2837 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2858 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ 2838 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2859 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ 2839 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2860 __le32 interference_data_flag; /* flag for interference data 2840 __le32 interference_data_flag; /* flag for interference data
2861 * availability. 1 when data is 2841 * availability. 1 when data is
2862 * available. */ 2842 * available. */
2863 __le32 channel_load; /* counts RX Enable time in uSec */ 2843 __le32 channel_load; /* counts RX Enable time in uSec */
2864 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM 2844 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2865 * and CCK) counter */ 2845 * and CCK) counter */
2866 __le32 beacon_rssi_a; 2846 __le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy {
2871 __le32 beacon_energy_c; 2851 __le32 beacon_energy_c;
2872} __packed; 2852} __packed;
2873 2853
2874struct statistics_rx { 2854struct stats_rx {
2875 struct statistics_rx_phy ofdm; 2855 struct stats_rx_phy ofdm;
2876 struct statistics_rx_phy cck; 2856 struct stats_rx_phy cck;
2877 struct statistics_rx_non_phy general; 2857 struct stats_rx_non_phy general;
2878 struct statistics_rx_ht_phy ofdm_ht; 2858 struct stats_rx_ht_phy ofdm_ht;
2879} __packed; 2859} __packed;
2880 2860
2881/** 2861/**
2882 * struct statistics_tx_power - current tx power 2862 * struct stats_tx_power - current tx power
2883 * 2863 *
2884 * @ant_a: current tx power on chain a in 1/2 dB step 2864 * @ant_a: current tx power on chain a in 1/2 dB step
2885 * @ant_b: current tx power on chain b in 1/2 dB step 2865 * @ant_b: current tx power on chain b in 1/2 dB step
2886 * @ant_c: current tx power on chain c in 1/2 dB step 2866 * @ant_c: current tx power on chain c in 1/2 dB step
2887 */ 2867 */
2888struct statistics_tx_power { 2868struct stats_tx_power {
2889 u8 ant_a; 2869 u8 ant_a;
2890 u8 ant_b; 2870 u8 ant_b;
2891 u8 ant_c; 2871 u8 ant_c;
2892 u8 reserved; 2872 u8 reserved;
2893} __packed; 2873} __packed;
2894 2874
2895struct statistics_tx_non_phy_agg { 2875struct stats_tx_non_phy_agg {
2896 __le32 ba_timeout; 2876 __le32 ba_timeout;
2897 __le32 ba_reschedule_frames; 2877 __le32 ba_reschedule_frames;
2898 __le32 scd_query_agg_frame_cnt; 2878 __le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg {
2905 __le32 rx_ba_rsp_cnt; 2885 __le32 rx_ba_rsp_cnt;
2906} __packed; 2886} __packed;
2907 2887
2908struct statistics_tx { 2888struct stats_tx {
2909 __le32 preamble_cnt; 2889 __le32 preamble_cnt;
2910 __le32 rx_detected_cnt; 2890 __le32 rx_detected_cnt;
2911 __le32 bt_prio_defer_cnt; 2891 __le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@ struct statistics_tx {
2920 __le32 burst_abort_missing_next_frame_cnt; 2900 __le32 burst_abort_missing_next_frame_cnt;
2921 __le32 cts_timeout_collision; 2901 __le32 cts_timeout_collision;
2922 __le32 ack_or_ba_timeout_collision; 2902 __le32 ack_or_ba_timeout_collision;
2923 struct statistics_tx_non_phy_agg agg; 2903 struct stats_tx_non_phy_agg agg;
2924 2904
2925 __le32 reserved1; 2905 __le32 reserved1;
2926} __packed; 2906} __packed;
2927 2907
2928 2908struct stats_div {
2929struct statistics_div {
2930 __le32 tx_on_a; 2909 __le32 tx_on_a;
2931 __le32 tx_on_b; 2910 __le32 tx_on_b;
2932 __le32 exec_time; 2911 __le32 exec_time;
@@ -2935,14 +2914,14 @@ struct statistics_div {
2935 __le32 reserved2; 2914 __le32 reserved2;
2936} __packed; 2915} __packed;
2937 2916
2938struct statistics_general_common { 2917struct stats_general_common {
2939 __le32 temperature; /* radio temperature */ 2918 __le32 temperature; /* radio temperature */
2940 struct statistics_dbg dbg; 2919 struct stats_dbg dbg;
2941 __le32 sleep_time; 2920 __le32 sleep_time;
2942 __le32 slots_out; 2921 __le32 slots_out;
2943 __le32 slots_idle; 2922 __le32 slots_idle;
2944 __le32 ttl_timestamp; 2923 __le32 ttl_timestamp;
2945 struct statistics_div div; 2924 struct stats_div div;
2946 __le32 rx_enable_counter; 2925 __le32 rx_enable_counter;
2947 /* 2926 /*
2948 * num_of_sos_states: 2927 * num_of_sos_states:
@@ -2952,73 +2931,73 @@ struct statistics_general_common {
2952 __le32 num_of_sos_states; 2931 __le32 num_of_sos_states;
2953} __packed; 2932} __packed;
2954 2933
2955struct statistics_general { 2934struct stats_general {
2956 struct statistics_general_common common; 2935 struct stats_general_common common;
2957 __le32 reserved2; 2936 __le32 reserved2;
2958 __le32 reserved3; 2937 __le32 reserved3;
2959} __packed; 2938} __packed;
2960 2939
2961#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) 2940#define UCODE_STATS_CLEAR_MSK (0x1 << 0)
2962#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) 2941#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1)
2963#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) 2942#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2)
2964 2943
2965/* 2944/*
2966 * REPLY_STATISTICS_CMD = 0x9c, 2945 * C_STATS = 0x9c,
2967 * all devices identical. 2946 * all devices identical.
2968 * 2947 *
2969 * This command triggers an immediate response containing uCode statistics. 2948 * This command triggers an immediate response containing uCode stats.
2970 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. 2949 * The response is in the same format as N_STATS 0x9d, below.
2971 * 2950 *
2972 * If the CLEAR_STATS configuration flag is set, uCode will clear its 2951 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2973 * internal copy of the statistics (counters) after issuing the response. 2952 * internal copy of the stats (counters) after issuing the response.
2974 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). 2953 * This flag does not affect N_STATSs after beacons (see below).
2975 * 2954 *
2976 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue 2955 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2977 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag 2956 * N_STATSs after received beacons (see below). This flag
2978 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. 2957 * does not affect the response to the C_STATS 0x9c itself.
2979 */ 2958 */
2980#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ 2959#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2981#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ 2960#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */
2982struct iwl_statistics_cmd { 2961struct il_stats_cmd {
2983 __le32 configuration_flags; /* IWL_STATS_CONF_* */ 2962 __le32 configuration_flags; /* IL_STATS_CONF_* */
2984} __packed; 2963} __packed;
2985 2964
2986/* 2965/*
2987 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) 2966 * N_STATS = 0x9d (notification only, not a command)
2988 * 2967 *
2989 * By default, uCode issues this notification after receiving a beacon 2968 * By default, uCode issues this notification after receiving a beacon
2990 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the 2969 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2991 * REPLY_STATISTICS_CMD 0x9c, above. 2970 * C_STATS 0x9c, above.
2992 * 2971 *
2993 * Statistics counters continue to increment beacon after beacon, but are 2972 * Statistics counters continue to increment beacon after beacon, but are
2994 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD 2973 * cleared when changing channels or when driver issues C_STATS
2995 * 0x9c with CLEAR_STATS bit set (see above). 2974 * 0x9c with CLEAR_STATS bit set (see above).
2996 * 2975 *
2997 * uCode also issues this notification during scans. uCode clears statistics 2976 * uCode also issues this notification during scans. uCode clears stats
2998 * appropriately so that each notification contains statistics for only the 2977 * appropriately so that each notification contains stats for only the
2999 * one channel that has just been scanned. 2978 * one channel that has just been scanned.
3000 */ 2979 */
3001#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) 2980#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3002#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) 2981#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3003 2982
3004struct iwl3945_notif_statistics { 2983struct il3945_notif_stats {
3005 __le32 flag; 2984 __le32 flag;
3006 struct iwl39_statistics_rx rx; 2985 struct iwl39_stats_rx rx;
3007 struct iwl39_statistics_tx tx; 2986 struct iwl39_stats_tx tx;
3008 struct iwl39_statistics_general general; 2987 struct iwl39_stats_general general;
3009} __packed; 2988} __packed;
3010 2989
3011struct iwl_notif_statistics { 2990struct il_notif_stats {
3012 __le32 flag; 2991 __le32 flag;
3013 struct statistics_rx rx; 2992 struct stats_rx rx;
3014 struct statistics_tx tx; 2993 struct stats_tx tx;
3015 struct statistics_general general; 2994 struct stats_general general;
3016} __packed; 2995} __packed;
3017 2996
3018/* 2997/*
3019 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) 2998 * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
3020 * 2999 *
3021 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed 3000 * uCode send N_MISSED_BEACONS to driver when detect beacon missed
3022 * in regardless of how many missed beacons, which mean when driver receive the 3001 * in regardless of how many missed beacons, which mean when driver receive the
3023 * notification, inside the command, it can find all the beacons information 3002 * notification, inside the command, it can find all the beacons information
3024 * which include number of total missed beacons, number of consecutive missed 3003 * which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@ struct iwl_notif_statistics {
3035 * 3014 *
3036 */ 3015 */
3037 3016
3038#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) 3017#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
3039#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) 3018#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
3040#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF 3019#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
3041 3020
3042struct iwl_missed_beacon_notif { 3021struct il_missed_beacon_notif {
3043 __le32 consecutive_missed_beacons; 3022 __le32 consecutive_missed_beacons;
3044 __le32 total_missed_becons; 3023 __le32 total_missed_becons;
3045 __le32 num_expected_beacons; 3024 __le32 num_expected_beacons;
3046 __le32 num_recvd_beacons; 3025 __le32 num_recvd_beacons;
3047} __packed; 3026} __packed;
3048 3027
3049
3050/****************************************************************************** 3028/******************************************************************************
3051 * (11) 3029 * (11)
3052 * Rx Calibration Commands: 3030 * Rx Calibration Commands:
@@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif {
3062 *****************************************************************************/ 3040 *****************************************************************************/
3063 3041
3064/** 3042/**
3065 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) 3043 * C_SENSITIVITY = 0xa8 (command, has simple generic response)
3066 * 3044 *
3067 * This command sets up the Rx signal detector for a sensitivity level that 3045 * This command sets up the Rx signal detector for a sensitivity level that
3068 * is high enough to lock onto all signals within the associated network, 3046 * is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif {
3076 * time listening, not transmitting). Driver must adjust sensitivity so that 3054 * time listening, not transmitting). Driver must adjust sensitivity so that
3077 * the ratio of actual false alarms to actual Rx time falls within this range. 3055 * the ratio of actual false alarms to actual Rx time falls within this range.
3078 * 3056 *
3079 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each 3057 * While associated, uCode delivers N_STATSs after each
3080 * received beacon. These provide information to the driver to analyze the 3058 * received beacon. These provide information to the driver to analyze the
3081 * sensitivity. Don't analyze statistics that come in from scanning, or any 3059 * sensitivity. Don't analyze stats that come in from scanning, or any
3082 * other non-associated-network source. Pertinent statistics include: 3060 * other non-associated-network source. Pertinent stats include:
3083 * 3061 *
3084 * From "general" statistics (struct statistics_rx_non_phy): 3062 * From "general" stats (struct stats_rx_non_phy):
3085 * 3063 *
3086 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) 3064 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3087 * Measure of energy of desired signal. Used for establishing a level 3065 * Measure of energy of desired signal. Used for establishing a level
@@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif {
3094 * uSecs of actual Rx time during beacon period (varies according to 3072 * uSecs of actual Rx time during beacon period (varies according to
3095 * how much time was spent transmitting). 3073 * how much time was spent transmitting).
3096 * 3074 *
3097 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: 3075 * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
3098 * 3076 *
3099 * false_alarm_cnt 3077 * false_alarm_cnt
3100 * Signal locks abandoned early (before phy-level header). 3078 * Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif {
3111 * 3089 *
3112 * Total number of false alarms = false_alarms + plcp_errs 3090 * Total number of false alarms = false_alarms + plcp_errs
3113 * 3091 *
3114 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd 3092 * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
3115 * (notice that the start points for OFDM are at or close to settings for 3093 * (notice that the start points for OFDM are at or close to settings for
3116 * maximum sensitivity): 3094 * maximum sensitivity):
3117 * 3095 *
3118 * START / MIN / MAX 3096 * START / MIN / MAX
3119 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 3097 * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120
3120 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 3098 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210
3121 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 3099 * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140
3122 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 3100 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270
3123 * 3101 *
3124 * If actual rate of OFDM false alarms (+ plcp_errors) is too high 3102 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3125 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity 3103 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif {
3152 * Reset this to 0 at the first beacon period that falls within the 3130 * Reset this to 0 at the first beacon period that falls within the
3153 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). 3131 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3154 * 3132 *
3155 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd 3133 * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
3156 * (notice that the start points for CCK are at maximum sensitivity): 3134 * (notice that the start points for CCK are at maximum sensitivity):
3157 * 3135 *
3158 * START / MIN / MAX 3136 * START / MIN / MAX
3159 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 3137 * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200
3160 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 3138 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400
3161 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 3139 * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100
3162 * 3140 *
3163 * If actual rate of CCK false alarms (+ plcp_errors) is too high 3141 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3164 * (greater than 50 for each 204.8 msecs listening), method for reducing 3142 * (greater than 50 for each 204.8 msecs listening), method for reducing
3165 * sensitivity is: 3143 * sensitivity is:
3166 * 3144 *
3167 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, 3145 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
3168 * up to max 400. 3146 * up to max 400.
3169 * 3147 *
3170 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, 3148 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
3171 * sensitivity has been reduced a significant amount; bring it up to 3149 * sensitivity has been reduced a significant amount; bring it up to
3172 * a moderate 161. Otherwise, *add* 3, up to max 200. 3150 * a moderate 161. Otherwise, *add* 3, up to max 200.
3173 * 3151 *
3174 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, 3152 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
3175 * sensitivity has been reduced only a moderate or small amount; 3153 * sensitivity has been reduced only a moderate or small amount;
3176 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, 3154 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
3177 * down to min 0. Otherwise (if gain has been significantly reduced), 3155 * down to min 0. Otherwise (if gain has been significantly reduced),
3178 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. 3156 * don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
3179 * 3157 *
3180 * b) Save a snapshot of the "silence reference". 3158 * b) Save a snapshot of the "silence reference".
3181 * 3159 *
@@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif {
3191 * 3169 *
3192 * Method for increasing sensitivity: 3170 * Method for increasing sensitivity:
3193 * 3171 *
3194 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, 3172 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
3195 * down to min 125. 3173 * down to min 125.
3196 * 3174 *
3197 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, 3175 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
3198 * down to min 200. 3176 * down to min 200.
3199 * 3177 *
3200 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. 3178 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
3201 * 3179 *
3202 * If actual rate of CCK false alarms (+ plcp_errors) is within good range 3180 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3203 * (between 5 and 50 for each 204.8 msecs listening): 3181 * (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif {
3206 * 3184 *
3207 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), 3185 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3208 * give some extra margin to energy threshold by *subtracting* 8 3186 * give some extra margin to energy threshold by *subtracting* 8
3209 * from value in HD_MIN_ENERGY_CCK_DET_INDEX. 3187 * from value in HD_MIN_ENERGY_CCK_DET_IDX.
3210 * 3188 *
3211 * For all cases (too few, too many, good range), make sure that the CCK 3189 * For all cases (too few, too many, good range), make sure that the CCK
3212 * detection threshold (energy) is below the energy level for robust 3190 * detection threshold (energy) is below the energy level for robust
3213 * detection over the past 10 beacon periods, the "Max cck energy". 3191 * detection over the past 10 beacon periods, the "Max cck energy".
3214 * Lower values mean higher energy; this means making sure that the value 3192 * Lower values mean higher energy; this means making sure that the value
3215 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". 3193 * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
3216 * 3194 *
3217 */ 3195 */
3218 3196
3219/* 3197/*
3220 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) 3198 * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
3221 */ 3199 */
3222#define HD_TABLE_SIZE (11) /* number of entries */ 3200#define HD_TBL_SIZE (11) /* number of entries */
3223#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ 3201#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */
3224#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) 3202#define HD_MIN_ENERGY_OFDM_DET_IDX (1)
3225#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) 3203#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2)
3226#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) 3204#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3)
3227#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) 3205#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4)
3228#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) 3206#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5)
3229#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) 3207#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6)
3230#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) 3208#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7)
3231#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) 3209#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8)
3232#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) 3210#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9)
3233#define HD_OFDM_ENERGY_TH_IN_INDEX (10) 3211#define HD_OFDM_ENERGY_TH_IN_IDX (10)
3234 3212
3235/* Control field in struct iwl_sensitivity_cmd */ 3213/* Control field in struct il_sensitivity_cmd */
3236#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) 3214#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0)
3237#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) 3215#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
3238 3216
3239/** 3217/**
3240 * struct iwl_sensitivity_cmd 3218 * struct il_sensitivity_cmd
3241 * @control: (1) updates working table, (0) updates default table 3219 * @control: (1) updates working table, (0) updates default table
3242 * @table: energy threshold values, use HD_* as index into table 3220 * @table: energy threshold values, use HD_* as idx into table
3243 * 3221 *
3244 * Always use "1" in "control" to update uCode's working table and DSP. 3222 * Always use "1" in "control" to update uCode's working table and DSP.
3245 */ 3223 */
3246struct iwl_sensitivity_cmd { 3224struct il_sensitivity_cmd {
3247 __le16 control; /* always use "1" */ 3225 __le16 control; /* always use "1" */
3248 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ 3226 __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
3249} __packed; 3227} __packed;
3250 3228
3251
3252/** 3229/**
3253 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) 3230 * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
3254 * 3231 *
3255 * This command sets the relative gains of 4965 device's 3 radio receiver chains. 3232 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3256 * 3233 *
3257 * After the first association, driver should accumulate signal and noise 3234 * After the first association, driver should accumulate signal and noise
3258 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 3235 * stats from the N_STATSs that follow the first 20
3259 * beacons from the associated network (don't collect statistics that come 3236 * beacons from the associated network (don't collect stats that come
3260 * in from scanning, or any other non-network source). 3237 * in from scanning, or any other non-network source).
3261 * 3238 *
3262 * DISCONNECTED ANTENNA: 3239 * DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd {
3264 * Driver should determine which antennas are actually connected, by comparing 3241 * Driver should determine which antennas are actually connected, by comparing
3265 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the 3242 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3266 * following values over 20 beacons, one accumulator for each of the chains 3243 * following values over 20 beacons, one accumulator for each of the chains
3267 * a/b/c, from struct statistics_rx_non_phy: 3244 * a/b/c, from struct stats_rx_non_phy:
3268 * 3245 *
3269 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) 3246 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3270 * 3247 *
@@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd {
3283 * to antennas, see above) for gain, by comparing the average signal levels 3260 * to antennas, see above) for gain, by comparing the average signal levels
3284 * detected during the silence after each beacon (background noise). 3261 * detected during the silence after each beacon (background noise).
3285 * Accumulate (add) the following values over 20 beacons, one accumulator for 3262 * Accumulate (add) the following values over 20 beacons, one accumulator for
3286 * each of the chains a/b/c, from struct statistics_rx_non_phy: 3263 * each of the chains a/b/c, from struct stats_rx_non_phy:
3287 * 3264 *
3288 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) 3265 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3289 * 3266 *
@@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd {
3294 * (accum_noise[i] - accum_noise[reference]) / 30 3271 * (accum_noise[i] - accum_noise[reference]) / 30
3295 * 3272 *
3296 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. 3273 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3297 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the 3274 * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
3298 * driver should limit the difference results to a range of 0-3 (0-4.5 dB), 3275 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3299 * and set bit 2 to indicate "reduce gain". The value for the reference 3276 * and set bit 2 to indicate "reduce gain". The value for the reference
3300 * (weakest) chain should be "0". 3277 * (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd {
3306 3283
3307/* Phy calibration command for series */ 3284/* Phy calibration command for series */
3308/* The default calibrate table size if not specified by firmware */ 3285/* The default calibrate table size if not specified by firmware */
3309#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 3286#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3310enum { 3287enum {
3311 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, 3288 IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3312 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, 3289 IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3313}; 3290};
3314 3291
3315#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) 3292#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3316 3293
3317struct iwl_calib_hdr { 3294struct il_calib_hdr {
3318 u8 op_code; 3295 u8 op_code;
3319 u8 first_group; 3296 u8 first_group;
3320 u8 groups_num; 3297 u8 groups_num;
3321 u8 data_valid; 3298 u8 data_valid;
3322} __packed; 3299} __packed;
3323 3300
3324/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ 3301/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3325struct iwl_calib_diff_gain_cmd { 3302struct il_calib_diff_gain_cmd {
3326 struct iwl_calib_hdr hdr; 3303 struct il_calib_hdr hdr;
3327 s8 diff_gain_a; /* see above */ 3304 s8 diff_gain_a; /* see above */
3328 s8 diff_gain_b; 3305 s8 diff_gain_b;
3329 s8 diff_gain_c; 3306 s8 diff_gain_c;
@@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd {
3338 3315
3339/* 3316/*
3340 * LEDs Command & Response 3317 * LEDs Command & Response
3341 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) 3318 * C_LEDS = 0x48 (command, has simple generic response)
3342 * 3319 *
3343 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), 3320 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3344 * this command turns it on or off, or sets up a periodic blinking cycle. 3321 * this command turns it on or off, or sets up a periodic blinking cycle.
3345 */ 3322 */
3346struct iwl_led_cmd { 3323struct il_led_cmd {
3347 __le32 interval; /* "interval" in uSec */ 3324 __le32 interval; /* "interval" in uSec */
3348 u8 id; /* 1: Activity, 2: Link, 3: Tech */ 3325 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3349 u8 off; /* # intervals off while blinking; 3326 u8 off; /* # intervals off while blinking;
@@ -3353,14 +3330,15 @@ struct iwl_led_cmd {
3353 u8 reserved; 3330 u8 reserved;
3354} __packed; 3331} __packed;
3355 3332
3356
3357/****************************************************************************** 3333/******************************************************************************
3358 * (13) 3334 * (13)
3359 * Union of all expected notifications/responses: 3335 * Union of all expected notifications/responses:
3360 * 3336 *
3361 *****************************************************************************/ 3337 *****************************************************************************/
3362 3338
3363struct iwl_rx_packet { 3339#define IL_RX_FRAME_SIZE_MSK 0x00003fff
3340
3341struct il_rx_pkt {
3364 /* 3342 /*
3365 * The first 4 bytes of the RX frame header contain both the RX frame 3343 * The first 4 bytes of the RX frame header contain both the RX frame
3366 * size and some flags. 3344 * size and some flags.
@@ -3372,27 +3350,27 @@ struct iwl_rx_packet {
3372 * 13-00: RX frame size 3350 * 13-00: RX frame size
3373 */ 3351 */
3374 __le32 len_n_flags; 3352 __le32 len_n_flags;
3375 struct iwl_cmd_header hdr; 3353 struct il_cmd_header hdr;
3376 union { 3354 union {
3377 struct iwl3945_rx_frame rx_frame; 3355 struct il3945_rx_frame rx_frame;
3378 struct iwl3945_tx_resp tx_resp; 3356 struct il3945_tx_resp tx_resp;
3379 struct iwl3945_beacon_notif beacon_status; 3357 struct il3945_beacon_notif beacon_status;
3380 3358
3381 struct iwl_alive_resp alive_frame; 3359 struct il_alive_resp alive_frame;
3382 struct iwl_spectrum_notification spectrum_notif; 3360 struct il_spectrum_notification spectrum_notif;
3383 struct iwl_csa_notification csa_notif; 3361 struct il_csa_notification csa_notif;
3384 struct iwl_error_resp err_resp; 3362 struct il_error_resp err_resp;
3385 struct iwl_card_state_notif card_state_notif; 3363 struct il_card_state_notif card_state_notif;
3386 struct iwl_add_sta_resp add_sta; 3364 struct il_add_sta_resp add_sta;
3387 struct iwl_rem_sta_resp rem_sta; 3365 struct il_rem_sta_resp rem_sta;
3388 struct iwl_sleep_notification sleep_notif; 3366 struct il_sleep_notification sleep_notif;
3389 struct iwl_spectrum_resp spectrum; 3367 struct il_spectrum_resp spectrum;
3390 struct iwl_notif_statistics stats; 3368 struct il_notif_stats stats;
3391 struct iwl_compressed_ba_resp compressed_ba; 3369 struct il_compressed_ba_resp compressed_ba;
3392 struct iwl_missed_beacon_notif missed_beacon; 3370 struct il_missed_beacon_notif missed_beacon;
3393 __le32 status; 3371 __le32 status;
3394 u8 raw[0]; 3372 u8 raw[0];
3395 } u; 3373 } u;
3396} __packed; 3374} __packed;
3397 3375
3398#endif /* __iwl_legacy_commands_h__ */ 3376#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644
index 000000000000..7e2924f332a7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -0,0 +1,5707 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/init.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/skbuff.h>
41#include <net/mac80211.h>
42
43#include "common.h"
44
45const char *
46il_get_cmd_string(u8 cmd)
47{
48 switch (cmd) {
49 IL_CMD(N_ALIVE);
50 IL_CMD(N_ERROR);
51 IL_CMD(C_RXON);
52 IL_CMD(C_RXON_ASSOC);
53 IL_CMD(C_QOS_PARAM);
54 IL_CMD(C_RXON_TIMING);
55 IL_CMD(C_ADD_STA);
56 IL_CMD(C_REM_STA);
57 IL_CMD(C_WEPKEY);
58 IL_CMD(N_3945_RX);
59 IL_CMD(C_TX);
60 IL_CMD(C_RATE_SCALE);
61 IL_CMD(C_LEDS);
62 IL_CMD(C_TX_LINK_QUALITY_CMD);
63 IL_CMD(C_CHANNEL_SWITCH);
64 IL_CMD(N_CHANNEL_SWITCH);
65 IL_CMD(C_SPECTRUM_MEASUREMENT);
66 IL_CMD(N_SPECTRUM_MEASUREMENT);
67 IL_CMD(C_POWER_TBL);
68 IL_CMD(N_PM_SLEEP);
69 IL_CMD(N_PM_DEBUG_STATS);
70 IL_CMD(C_SCAN);
71 IL_CMD(C_SCAN_ABORT);
72 IL_CMD(N_SCAN_START);
73 IL_CMD(N_SCAN_RESULTS);
74 IL_CMD(N_SCAN_COMPLETE);
75 IL_CMD(N_BEACON);
76 IL_CMD(C_TX_BEACON);
77 IL_CMD(C_TX_PWR_TBL);
78 IL_CMD(C_BT_CONFIG);
79 IL_CMD(C_STATS);
80 IL_CMD(N_STATS);
81 IL_CMD(N_CARD_STATE);
82 IL_CMD(N_MISSED_BEACONS);
83 IL_CMD(C_CT_KILL_CONFIG);
84 IL_CMD(C_SENSITIVITY);
85 IL_CMD(C_PHY_CALIBRATION);
86 IL_CMD(N_RX_PHY);
87 IL_CMD(N_RX_MPDU);
88 IL_CMD(N_RX);
89 IL_CMD(N_COMPRESSED_BA);
90 default:
91 return "UNKNOWN";
92
93 }
94}
95EXPORT_SYMBOL(il_get_cmd_string);
96
97#define HOST_COMPLETE_TIMEOUT (HZ / 2)
98
99static void
100il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
101 struct il_rx_pkt *pkt)
102{
103 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
104 IL_ERR("Bad return from %s (0x%08X)\n",
105 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
106 return;
107 }
108#ifdef CONFIG_IWLEGACY_DEBUG
109 switch (cmd->hdr.cmd) {
110 case C_TX_LINK_QUALITY_CMD:
111 case C_SENSITIVITY:
112 D_HC_DUMP("back from %s (0x%08X)\n",
113 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
114 break;
115 default:
116 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
117 pkt->hdr.flags);
118 }
119#endif
120}
121
122static int
123il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
124{
125 int ret;
126
127 BUG_ON(!(cmd->flags & CMD_ASYNC));
128
129 /* An asynchronous command can not expect an SKB to be set. */
130 BUG_ON(cmd->flags & CMD_WANT_SKB);
131
132 /* Assign a generic callback if one is not provided */
133 if (!cmd->callback)
134 cmd->callback = il_generic_cmd_callback;
135
136 if (test_bit(S_EXIT_PENDING, &il->status))
137 return -EBUSY;
138
139 ret = il_enqueue_hcmd(il, cmd);
140 if (ret < 0) {
141 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
142 il_get_cmd_string(cmd->id), ret);
143 return ret;
144 }
145 return 0;
146}
147
148int
149il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
150{
151 int cmd_idx;
152 int ret;
153
154 lockdep_assert_held(&il->mutex);
155
156 BUG_ON(cmd->flags & CMD_ASYNC);
157
158 /* A synchronous command can not have a callback set. */
159 BUG_ON(cmd->callback);
160
161 D_INFO("Attempting to send sync command %s\n",
162 il_get_cmd_string(cmd->id));
163
164 set_bit(S_HCMD_ACTIVE, &il->status);
165 D_INFO("Setting HCMD_ACTIVE for command %s\n",
166 il_get_cmd_string(cmd->id));
167
168 cmd_idx = il_enqueue_hcmd(il, cmd);
169 if (cmd_idx < 0) {
170 ret = cmd_idx;
171 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
172 il_get_cmd_string(cmd->id), ret);
173 goto out;
174 }
175
176 ret = wait_event_timeout(il->wait_command_queue,
177 !test_bit(S_HCMD_ACTIVE, &il->status),
178 HOST_COMPLETE_TIMEOUT);
179 if (!ret) {
180 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
181 IL_ERR("Error sending %s: time out after %dms.\n",
182 il_get_cmd_string(cmd->id),
183 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
184
185 clear_bit(S_HCMD_ACTIVE, &il->status);
186 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
187 il_get_cmd_string(cmd->id));
188 ret = -ETIMEDOUT;
189 goto cancel;
190 }
191 }
192
193 if (test_bit(S_RF_KILL_HW, &il->status)) {
194 IL_ERR("Command %s aborted: RF KILL Switch\n",
195 il_get_cmd_string(cmd->id));
196 ret = -ECANCELED;
197 goto fail;
198 }
199 if (test_bit(S_FW_ERROR, &il->status)) {
200 IL_ERR("Command %s failed: FW Error\n",
201 il_get_cmd_string(cmd->id));
202 ret = -EIO;
203 goto fail;
204 }
205 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
206 IL_ERR("Error: Response NULL in '%s'\n",
207 il_get_cmd_string(cmd->id));
208 ret = -EIO;
209 goto cancel;
210 }
211
212 ret = 0;
213 goto out;
214
215cancel:
216 if (cmd->flags & CMD_WANT_SKB) {
217 /*
218 * Cancel the CMD_WANT_SKB flag for the cmd in the
219 * TX cmd queue. Otherwise in case the cmd comes
220 * in later, it will possibly set an invalid
221 * address (cmd->meta.source).
222 */
223 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
224 }
225fail:
226 if (cmd->reply_page) {
227 il_free_pages(il, cmd->reply_page);
228 cmd->reply_page = 0;
229 }
230out:
231 return ret;
232}
233EXPORT_SYMBOL(il_send_cmd_sync);
234
235int
236il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
237{
238 if (cmd->flags & CMD_ASYNC)
239 return il_send_cmd_async(il, cmd);
240
241 return il_send_cmd_sync(il, cmd);
242}
243EXPORT_SYMBOL(il_send_cmd);
244
245int
246il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
247{
248 struct il_host_cmd cmd = {
249 .id = id,
250 .len = len,
251 .data = data,
252 };
253
254 return il_send_cmd_sync(il, &cmd);
255}
256EXPORT_SYMBOL(il_send_cmd_pdu);
257
258int
259il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
260 void (*callback) (struct il_priv *il,
261 struct il_device_cmd *cmd,
262 struct il_rx_pkt *pkt))
263{
264 struct il_host_cmd cmd = {
265 .id = id,
266 .len = len,
267 .data = data,
268 };
269
270 cmd.flags |= CMD_ASYNC;
271 cmd.callback = callback;
272
273 return il_send_cmd_async(il, &cmd);
274}
275EXPORT_SYMBOL(il_send_cmd_pdu_async);
276
277/* default: IL_LED_BLINK(0) using blinking idx table */
278static int led_mode;
279module_param(led_mode, int, S_IRUGO);
280MODULE_PARM_DESC(led_mode,
281 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
282
283/* Throughput OFF time(ms) ON time (ms)
284 * >300 25 25
285 * >200 to 300 40 40
286 * >100 to 200 55 55
287 * >70 to 100 65 65
288 * >50 to 70 75 75
289 * >20 to 50 85 85
290 * >10 to 20 95 95
291 * >5 to 10 110 110
292 * >1 to 5 130 130
293 * >0 to 1 167 167
294 * <=0 SOLID ON
295 */
296static const struct ieee80211_tpt_blink il_blink[] = {
297 {.throughput = 0, .blink_time = 334},
298 {.throughput = 1 * 1024 - 1, .blink_time = 260},
299 {.throughput = 5 * 1024 - 1, .blink_time = 220},
300 {.throughput = 10 * 1024 - 1, .blink_time = 190},
301 {.throughput = 20 * 1024 - 1, .blink_time = 170},
302 {.throughput = 50 * 1024 - 1, .blink_time = 150},
303 {.throughput = 70 * 1024 - 1, .blink_time = 130},
304 {.throughput = 100 * 1024 - 1, .blink_time = 110},
305 {.throughput = 200 * 1024 - 1, .blink_time = 80},
306 {.throughput = 300 * 1024 - 1, .blink_time = 50},
307};
308
309/*
310 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
311 * Led blink rate analysis showed an average deviation of 0% on 3945,
312 * 5% on 4965 HW.
313 * Need to compensate on the led on/off time per HW according to the deviation
314 * to achieve the desired led frequency
315 * The calculation is: (100-averageDeviation)/100 * blinkTime
316 * For code efficiency the calculation will be:
317 * compensation = (100 - averageDeviation) * 64 / 100
318 * NewBlinkTime = (compensation * BlinkTime) / 64
319 */
320static inline u8
321il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
322{
323 if (!compensation) {
324 IL_ERR("undefined blink compensation: "
325 "use pre-defined blinking time\n");
326 return time;
327 }
328
329 return (u8) ((time * compensation) >> 6);
330}
331
332/* Set led pattern command */
333static int
334il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
335{
336 struct il_led_cmd led_cmd = {
337 .id = IL_LED_LINK,
338 .interval = IL_DEF_LED_INTRVL
339 };
340 int ret;
341
342 if (!test_bit(S_READY, &il->status))
343 return -EBUSY;
344
345 if (il->blink_on == on && il->blink_off == off)
346 return 0;
347
348 if (off == 0) {
349 /* led is SOLID_ON */
350 on = IL_LED_SOLID;
351 }
352
353 D_LED("Led blink time compensation=%u\n",
354 il->cfg->base_params->led_compensation);
355 led_cmd.on =
356 il_blink_compensation(il, on,
357 il->cfg->base_params->led_compensation);
358 led_cmd.off =
359 il_blink_compensation(il, off,
360 il->cfg->base_params->led_compensation);
361
362 ret = il->cfg->ops->led->cmd(il, &led_cmd);
363 if (!ret) {
364 il->blink_on = on;
365 il->blink_off = off;
366 }
367 return ret;
368}
369
370static void
371il_led_brightness_set(struct led_classdev *led_cdev,
372 enum led_brightness brightness)
373{
374 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
375 unsigned long on = 0;
376
377 if (brightness > 0)
378 on = IL_LED_SOLID;
379
380 il_led_cmd(il, on, 0);
381}
382
383static int
384il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
385 unsigned long *delay_off)
386{
387 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
388
389 return il_led_cmd(il, *delay_on, *delay_off);
390}
391
392void
393il_leds_init(struct il_priv *il)
394{
395 int mode = led_mode;
396 int ret;
397
398 if (mode == IL_LED_DEFAULT)
399 mode = il->cfg->led_mode;
400
401 il->led.name =
402 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
403 il->led.brightness_set = il_led_brightness_set;
404 il->led.blink_set = il_led_blink_set;
405 il->led.max_brightness = 1;
406
407 switch (mode) {
408 case IL_LED_DEFAULT:
409 WARN_ON(1);
410 break;
411 case IL_LED_BLINK:
412 il->led.default_trigger =
413 ieee80211_create_tpt_led_trigger(il->hw,
414 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
415 il_blink,
416 ARRAY_SIZE(il_blink));
417 break;
418 case IL_LED_RF_STATE:
419 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
420 break;
421 }
422
423 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
424 if (ret) {
425 kfree(il->led.name);
426 return;
427 }
428
429 il->led_registered = true;
430}
431EXPORT_SYMBOL(il_leds_init);
432
433void
434il_leds_exit(struct il_priv *il)
435{
436 if (!il->led_registered)
437 return;
438
439 led_classdev_unregister(&il->led);
440 kfree(il->led.name);
441}
442EXPORT_SYMBOL(il_leds_exit);
443
444/************************** EEPROM BANDS ****************************
445 *
446 * The il_eeprom_band definitions below provide the mapping from the
447 * EEPROM contents to the specific channel number supported for each
448 * band.
449 *
450 * For example, il_priv->eeprom.band_3_channels[4] from the band_3
451 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
452 * The specific geography and calibration information for that channel
453 * is contained in the eeprom map itself.
454 *
455 * During init, we copy the eeprom information and channel map
456 * information into il->channel_info_24/52 and il->channel_map_24/52
457 *
458 * channel_map_24/52 provides the idx in the channel_info array for a
459 * given channel. We have to have two separate maps as there is channel
460 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
461 * band_2
462 *
463 * A value of 0xff stored in the channel_map indicates that the channel
464 * is not supported by the hardware at all.
465 *
466 * A value of 0xfe in the channel_map indicates that the channel is not
467 * valid for Tx with the current hardware. This means that
468 * while the system can tune and receive on a given channel, it may not
469 * be able to associate or transmit any frames on that
470 * channel. There is no corresponding channel information for that
471 * entry.
472 *
473 *********************************************************************/
474
475/* 2.4 GHz */
476const u8 il_eeprom_band_1[14] = {
477 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
478};
479
480/* 5.2 GHz bands */
481static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
482 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
483};
484
485static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
486 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
487};
488
489static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
490 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
491};
492
493static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
494 145, 149, 153, 157, 161, 165
495};
496
497static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
498 1, 2, 3, 4, 5, 6, 7
499};
500
501static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
502 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
503};
504
505/******************************************************************************
506 *
507 * EEPROM related functions
508 *
509******************************************************************************/
510
511static int
512il_eeprom_verify_signature(struct il_priv *il)
513{
514 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
515 int ret = 0;
516
517 D_EEPROM("EEPROM signature=0x%08x\n", gp);
518 switch (gp) {
519 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
520 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
521 break;
522 default:
523 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
524 ret = -ENOENT;
525 break;
526 }
527 return ret;
528}
529
530const u8 *
531il_eeprom_query_addr(const struct il_priv *il, size_t offset)
532{
533 BUG_ON(offset >= il->cfg->base_params->eeprom_size);
534 return &il->eeprom[offset];
535}
536EXPORT_SYMBOL(il_eeprom_query_addr);
537
538u16
539il_eeprom_query16(const struct il_priv *il, size_t offset)
540{
541 if (!il->eeprom)
542 return 0;
543 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
544}
545EXPORT_SYMBOL(il_eeprom_query16);
546
547/**
548 * il_eeprom_init - read EEPROM contents
549 *
550 * Load the EEPROM contents from adapter into il->eeprom
551 *
552 * NOTE: This routine uses the non-debug IO access functions.
553 */
554int
555il_eeprom_init(struct il_priv *il)
556{
557 __le16 *e;
558 u32 gp = _il_rd(il, CSR_EEPROM_GP);
559 int sz;
560 int ret;
561 u16 addr;
562
563 /* allocate eeprom */
564 sz = il->cfg->base_params->eeprom_size;
565 D_EEPROM("NVM size = %d\n", sz);
566 il->eeprom = kzalloc(sz, GFP_KERNEL);
567 if (!il->eeprom) {
568 ret = -ENOMEM;
569 goto alloc_err;
570 }
571 e = (__le16 *) il->eeprom;
572
573 il->cfg->ops->lib->apm_ops.init(il);
574
575 ret = il_eeprom_verify_signature(il);
576 if (ret < 0) {
577 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
578 ret = -ENOENT;
579 goto err;
580 }
581
582 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
583 ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
584 if (ret < 0) {
585 IL_ERR("Failed to acquire EEPROM semaphore.\n");
586 ret = -ENOENT;
587 goto err;
588 }
589
590 /* eeprom is an array of 16bit values */
591 for (addr = 0; addr < sz; addr += sizeof(u16)) {
592 u32 r;
593
594 _il_wr(il, CSR_EEPROM_REG,
595 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
596
597 ret =
598 _il_poll_bit(il, CSR_EEPROM_REG,
599 CSR_EEPROM_REG_READ_VALID_MSK,
600 CSR_EEPROM_REG_READ_VALID_MSK,
601 IL_EEPROM_ACCESS_TIMEOUT);
602 if (ret < 0) {
603 IL_ERR("Time out reading EEPROM[%d]\n", addr);
604 goto done;
605 }
606 r = _il_rd(il, CSR_EEPROM_REG);
607 e[addr / 2] = cpu_to_le16(r >> 16);
608 }
609
610 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
611 il_eeprom_query16(il, EEPROM_VERSION));
612
613 ret = 0;
614done:
615 il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
616
617err:
618 if (ret)
619 il_eeprom_free(il);
620 /* Reset chip to save power until we load uCode during "up". */
621 il_apm_stop(il);
622alloc_err:
623 return ret;
624}
625EXPORT_SYMBOL(il_eeprom_init);
626
627void
628il_eeprom_free(struct il_priv *il)
629{
630 kfree(il->eeprom);
631 il->eeprom = NULL;
632}
633EXPORT_SYMBOL(il_eeprom_free);
634
635static void
636il_init_band_reference(const struct il_priv *il, int eep_band,
637 int *eeprom_ch_count,
638 const struct il_eeprom_channel **eeprom_ch_info,
639 const u8 **eeprom_ch_idx)
640{
641 u32 offset =
642 il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
643 switch (eep_band) {
644 case 1: /* 2.4GHz band */
645 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
646 *eeprom_ch_info =
647 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
648 offset);
649 *eeprom_ch_idx = il_eeprom_band_1;
650 break;
651 case 2: /* 4.9GHz band */
652 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
653 *eeprom_ch_info =
654 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
655 offset);
656 *eeprom_ch_idx = il_eeprom_band_2;
657 break;
658 case 3: /* 5.2GHz band */
659 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
660 *eeprom_ch_info =
661 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
662 offset);
663 *eeprom_ch_idx = il_eeprom_band_3;
664 break;
665 case 4: /* 5.5GHz band */
666 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
667 *eeprom_ch_info =
668 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
669 offset);
670 *eeprom_ch_idx = il_eeprom_band_4;
671 break;
672 case 5: /* 5.7GHz band */
673 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
674 *eeprom_ch_info =
675 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
676 offset);
677 *eeprom_ch_idx = il_eeprom_band_5;
678 break;
679 case 6: /* 2.4GHz ht40 channels */
680 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
681 *eeprom_ch_info =
682 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
683 offset);
684 *eeprom_ch_idx = il_eeprom_band_6;
685 break;
686 case 7: /* 5 GHz ht40 channels */
687 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
688 *eeprom_ch_info =
689 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
690 offset);
691 *eeprom_ch_idx = il_eeprom_band_7;
692 break;
693 default:
694 BUG();
695 }
696}
697
698#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
699 ? # x " " : "")
700/**
701 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
702 *
703 * Does not set up a command, or touch hardware.
704 */
705static int
706il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
707 const struct il_eeprom_channel *eeprom_ch,
708 u8 clear_ht40_extension_channel)
709{
710 struct il_channel_info *ch_info;
711
712 ch_info =
713 (struct il_channel_info *)il_get_channel_info(il, band, channel);
714
715 if (!il_is_channel_valid(ch_info))
716 return -1;
717
718 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
719 " Ad-Hoc %ssupported\n", ch_info->channel,
720 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
721 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
722 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
723 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
724 eeprom_ch->max_power_avg,
725 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
726 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
727
728 ch_info->ht40_eeprom = *eeprom_ch;
729 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
730 ch_info->ht40_flags = eeprom_ch->flags;
731 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
732 ch_info->ht40_extension_channel &=
733 ~clear_ht40_extension_channel;
734
735 return 0;
736}
737
738#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
739 ? # x " " : "")
740
741/**
742 * il_init_channel_map - Set up driver's info for all possible channels
743 */
744int
745il_init_channel_map(struct il_priv *il)
746{
747 int eeprom_ch_count = 0;
748 const u8 *eeprom_ch_idx = NULL;
749 const struct il_eeprom_channel *eeprom_ch_info = NULL;
750 int band, ch;
751 struct il_channel_info *ch_info;
752
753 if (il->channel_count) {
754 D_EEPROM("Channel map already initialized.\n");
755 return 0;
756 }
757
758 D_EEPROM("Initializing regulatory info from EEPROM\n");
759
760 il->channel_count =
761 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
762 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
763 ARRAY_SIZE(il_eeprom_band_5);
764
765 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
766
767 il->channel_info =
768 kzalloc(sizeof(struct il_channel_info) * il->channel_count,
769 GFP_KERNEL);
770 if (!il->channel_info) {
771 IL_ERR("Could not allocate channel_info\n");
772 il->channel_count = 0;
773 return -ENOMEM;
774 }
775
776 ch_info = il->channel_info;
777
778 /* Loop through the 5 EEPROM bands adding them in order to the
779 * channel map we maintain (that contains additional information than
780 * what just in the EEPROM) */
781 for (band = 1; band <= 5; band++) {
782
783 il_init_band_reference(il, band, &eeprom_ch_count,
784 &eeprom_ch_info, &eeprom_ch_idx);
785
786 /* Loop through each band adding each of the channels */
787 for (ch = 0; ch < eeprom_ch_count; ch++) {
788 ch_info->channel = eeprom_ch_idx[ch];
789 ch_info->band =
790 (band ==
791 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
792
793 /* permanently store EEPROM's channel regulatory flags
794 * and max power in channel info database. */
795 ch_info->eeprom = eeprom_ch_info[ch];
796
797 /* Copy the run-time flags so they are there even on
798 * invalid channels */
799 ch_info->flags = eeprom_ch_info[ch].flags;
800 /* First write that ht40 is not enabled, and then enable
801 * one by one */
802 ch_info->ht40_extension_channel =
803 IEEE80211_CHAN_NO_HT40;
804
805 if (!(il_is_channel_valid(ch_info))) {
806 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
807 "No traffic\n", ch_info->channel,
808 ch_info->flags,
809 il_is_channel_a_band(ch_info) ? "5.2" :
810 "2.4");
811 ch_info++;
812 continue;
813 }
814
815 /* Initialize regulatory-based run-time data */
816 ch_info->max_power_avg = ch_info->curr_txpow =
817 eeprom_ch_info[ch].max_power_avg;
818 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
819 ch_info->min_power = 0;
820
821 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
822 " Ad-Hoc %ssupported\n", ch_info->channel,
823 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
824 CHECK_AND_PRINT_I(VALID),
825 CHECK_AND_PRINT_I(IBSS),
826 CHECK_AND_PRINT_I(ACTIVE),
827 CHECK_AND_PRINT_I(RADAR),
828 CHECK_AND_PRINT_I(WIDE),
829 CHECK_AND_PRINT_I(DFS),
830 eeprom_ch_info[ch].flags,
831 eeprom_ch_info[ch].max_power_avg,
832 ((eeprom_ch_info[ch].
833 flags & EEPROM_CHANNEL_IBSS) &&
834 !(eeprom_ch_info[ch].
835 flags & EEPROM_CHANNEL_RADAR)) ? "" :
836 "not ");
837
838 ch_info++;
839 }
840 }
841
842 /* Check if we do have HT40 channels */
843 if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
844 EEPROM_REGULATORY_BAND_NO_HT40 &&
845 il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
846 EEPROM_REGULATORY_BAND_NO_HT40)
847 return 0;
848
849 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
850 for (band = 6; band <= 7; band++) {
851 enum ieee80211_band ieeeband;
852
853 il_init_band_reference(il, band, &eeprom_ch_count,
854 &eeprom_ch_info, &eeprom_ch_idx);
855
856 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
857 ieeeband =
858 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
859
860 /* Loop through each band adding each of the channels */
861 for (ch = 0; ch < eeprom_ch_count; ch++) {
862 /* Set up driver's info for lower half */
863 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
864 &eeprom_ch_info[ch],
865 IEEE80211_CHAN_NO_HT40PLUS);
866
867 /* Set up driver's info for upper half */
868 il_mod_ht40_chan_info(il, ieeeband,
869 eeprom_ch_idx[ch] + 4,
870 &eeprom_ch_info[ch],
871 IEEE80211_CHAN_NO_HT40MINUS);
872 }
873 }
874
875 return 0;
876}
877EXPORT_SYMBOL(il_init_channel_map);
878
879/*
880 * il_free_channel_map - undo allocations in il_init_channel_map
881 */
882void
883il_free_channel_map(struct il_priv *il)
884{
885 kfree(il->channel_info);
886 il->channel_count = 0;
887}
888EXPORT_SYMBOL(il_free_channel_map);
889
890/**
891 * il_get_channel_info - Find driver's ilate channel info
892 *
893 * Based on band and channel number.
894 */
895const struct il_channel_info *
896il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
897 u16 channel)
898{
899 int i;
900
901 switch (band) {
902 case IEEE80211_BAND_5GHZ:
903 for (i = 14; i < il->channel_count; i++) {
904 if (il->channel_info[i].channel == channel)
905 return &il->channel_info[i];
906 }
907 break;
908 case IEEE80211_BAND_2GHZ:
909 if (channel >= 1 && channel <= 14)
910 return &il->channel_info[channel - 1];
911 break;
912 default:
913 BUG();
914 }
915
916 return NULL;
917}
918EXPORT_SYMBOL(il_get_channel_info);
919
920/*
921 * Setting power level allows the card to go to sleep when not busy.
922 *
923 * We calculate a sleep command based on the required latency, which
924 * we get from mac80211. In order to handle thermal throttling, we can
925 * also use pre-defined power levels.
926 */
927
928/*
929 * This defines the old power levels. They are still used by default
930 * (level 1) and for thermal throttle (levels 3 through 5)
931 */
932
933struct il_power_vec_entry {
934 struct il_powertable_cmd cmd;
935 u8 no_dtim; /* number of skip dtim */
936};
937
938static void
939il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
940{
941 memset(cmd, 0, sizeof(*cmd));
942
943 if (il->power_data.pci_pm)
944 cmd->flags |= IL_POWER_PCI_PM_MSK;
945
946 D_POWER("Sleep command for CAM\n");
947}
948
949static int
950il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
951{
952 D_POWER("Sending power/sleep command\n");
953 D_POWER("Flags value = 0x%08X\n", cmd->flags);
954 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
955 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
956 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
957 le32_to_cpu(cmd->sleep_interval[0]),
958 le32_to_cpu(cmd->sleep_interval[1]),
959 le32_to_cpu(cmd->sleep_interval[2]),
960 le32_to_cpu(cmd->sleep_interval[3]),
961 le32_to_cpu(cmd->sleep_interval[4]));
962
963 return il_send_cmd_pdu(il, C_POWER_TBL,
964 sizeof(struct il_powertable_cmd), cmd);
965}
966
967int
968il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
969{
970 int ret;
971 bool update_chains;
972
973 lockdep_assert_held(&il->mutex);
974
975 /* Don't update the RX chain when chain noise calibration is running */
976 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
977 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
978
979 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
980 return 0;
981
982 if (!il_is_ready_rf(il))
983 return -EIO;
984
985 /* scan complete use sleep_power_next, need to be updated */
986 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
987 if (test_bit(S_SCANNING, &il->status) && !force) {
988 D_INFO("Defer power set mode while scanning\n");
989 return 0;
990 }
991
992 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
993 set_bit(S_POWER_PMI, &il->status);
994
995 ret = il_set_power(il, cmd);
996 if (!ret) {
997 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
998 clear_bit(S_POWER_PMI, &il->status);
999
1000 if (il->cfg->ops->lib->update_chain_flags && update_chains)
1001 il->cfg->ops->lib->update_chain_flags(il);
1002 else if (il->cfg->ops->lib->update_chain_flags)
1003 D_POWER("Cannot update the power, chain noise "
1004 "calibration running: %d\n",
1005 il->chain_noise_data.state);
1006
1007 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1008 } else
1009 IL_ERR("set power fail, ret = %d", ret);
1010
1011 return ret;
1012}
1013
1014int
1015il_power_update_mode(struct il_priv *il, bool force)
1016{
1017 struct il_powertable_cmd cmd;
1018
1019 il_power_sleep_cam_cmd(il, &cmd);
1020 return il_power_set_mode(il, &cmd, force);
1021}
1022EXPORT_SYMBOL(il_power_update_mode);
1023
1024/* initialize to default */
1025void
1026il_power_initialize(struct il_priv *il)
1027{
1028 u16 lctl = il_pcie_link_ctl(il);
1029
1030 il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
1031
1032 il->power_data.debug_sleep_level_override = -1;
1033
1034 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1035}
1036EXPORT_SYMBOL(il_power_initialize);
1037
1038/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
1039 * sending probe req. This should be set long enough to hear probe responses
1040 * from more than one AP. */
1041#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
1042#define IL_ACTIVE_DWELL_TIME_52 (20)
1043
1044#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1045#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1046
1047/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
1048 * Must be set longer than active dwell time.
1049 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
1050#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
1051#define IL_PASSIVE_DWELL_TIME_52 (10)
1052#define IL_PASSIVE_DWELL_BASE (100)
1053#define IL_CHANNEL_TUNE_TIME 5
1054
1055static int
1056il_send_scan_abort(struct il_priv *il)
1057{
1058 int ret;
1059 struct il_rx_pkt *pkt;
1060 struct il_host_cmd cmd = {
1061 .id = C_SCAN_ABORT,
1062 .flags = CMD_WANT_SKB,
1063 };
1064
1065 /* Exit instantly with error when device is not ready
1066 * to receive scan abort command or it does not perform
1067 * hardware scan currently */
1068 if (!test_bit(S_READY, &il->status) ||
1069 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1070 !test_bit(S_SCAN_HW, &il->status) ||
1071 test_bit(S_FW_ERROR, &il->status) ||
1072 test_bit(S_EXIT_PENDING, &il->status))
1073 return -EIO;
1074
1075 ret = il_send_cmd_sync(il, &cmd);
1076 if (ret)
1077 return ret;
1078
1079 pkt = (struct il_rx_pkt *)cmd.reply_page;
1080 if (pkt->u.status != CAN_ABORT_STATUS) {
1081 /* The scan abort will return 1 for success or
1082 * 2 for "failure". A failure condition can be
1083 * due to simply not being in an active scan which
1084 * can occur if we send the scan abort before we
1085 * the microcode has notified us that a scan is
1086 * completed. */
1087 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1088 ret = -EIO;
1089 }
1090
1091 il_free_pages(il, cmd.reply_page);
1092 return ret;
1093}
1094
1095static void
1096il_complete_scan(struct il_priv *il, bool aborted)
1097{
1098 /* check if scan was requested from mac80211 */
1099 if (il->scan_request) {
1100 D_SCAN("Complete scan in mac80211\n");
1101 ieee80211_scan_completed(il->hw, aborted);
1102 }
1103
1104 il->scan_vif = NULL;
1105 il->scan_request = NULL;
1106}
1107
1108void
1109il_force_scan_end(struct il_priv *il)
1110{
1111 lockdep_assert_held(&il->mutex);
1112
1113 if (!test_bit(S_SCANNING, &il->status)) {
1114 D_SCAN("Forcing scan end while not scanning\n");
1115 return;
1116 }
1117
1118 D_SCAN("Forcing scan end\n");
1119 clear_bit(S_SCANNING, &il->status);
1120 clear_bit(S_SCAN_HW, &il->status);
1121 clear_bit(S_SCAN_ABORTING, &il->status);
1122 il_complete_scan(il, true);
1123}
1124
1125static void
1126il_do_scan_abort(struct il_priv *il)
1127{
1128 int ret;
1129
1130 lockdep_assert_held(&il->mutex);
1131
1132 if (!test_bit(S_SCANNING, &il->status)) {
1133 D_SCAN("Not performing scan to abort\n");
1134 return;
1135 }
1136
1137 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1138 D_SCAN("Scan abort in progress\n");
1139 return;
1140 }
1141
1142 ret = il_send_scan_abort(il);
1143 if (ret) {
1144 D_SCAN("Send scan abort failed %d\n", ret);
1145 il_force_scan_end(il);
1146 } else
1147 D_SCAN("Successfully send scan abort\n");
1148}
1149
1150/**
1151 * il_scan_cancel - Cancel any currently executing HW scan
1152 */
1153int
1154il_scan_cancel(struct il_priv *il)
1155{
1156 D_SCAN("Queuing abort scan\n");
1157 queue_work(il->workqueue, &il->abort_scan);
1158 return 0;
1159}
1160EXPORT_SYMBOL(il_scan_cancel);
1161
1162/**
1163 * il_scan_cancel_timeout - Cancel any currently executing HW scan
1164 * @ms: amount of time to wait (in milliseconds) for scan to abort
1165 *
1166 */
1167int
1168il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1169{
1170 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1171
1172 lockdep_assert_held(&il->mutex);
1173
1174 D_SCAN("Scan cancel timeout\n");
1175
1176 il_do_scan_abort(il);
1177
1178 while (time_before_eq(jiffies, timeout)) {
1179 if (!test_bit(S_SCAN_HW, &il->status))
1180 break;
1181 msleep(20);
1182 }
1183
1184 return test_bit(S_SCAN_HW, &il->status);
1185}
1186EXPORT_SYMBOL(il_scan_cancel_timeout);
1187
1188/* Service response to C_SCAN (0x80) */
1189static void
1190il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1191{
1192#ifdef CONFIG_IWLEGACY_DEBUG
1193 struct il_rx_pkt *pkt = rxb_addr(rxb);
1194 struct il_scanreq_notification *notif =
1195 (struct il_scanreq_notification *)pkt->u.raw;
1196
1197 D_SCAN("Scan request status = 0x%x\n", notif->status);
1198#endif
1199}
1200
1201/* Service N_SCAN_START (0x82) */
1202static void
1203il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1204{
1205 struct il_rx_pkt *pkt = rxb_addr(rxb);
1206 struct il_scanstart_notification *notif =
1207 (struct il_scanstart_notification *)pkt->u.raw;
1208 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1209 D_SCAN("Scan start: " "%d [802.11%s] "
1210 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1211 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1212 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1213}
1214
1215/* Service N_SCAN_RESULTS (0x83) */
1216static void
1217il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1218{
1219#ifdef CONFIG_IWLEGACY_DEBUG
1220 struct il_rx_pkt *pkt = rxb_addr(rxb);
1221 struct il_scanresults_notification *notif =
1222 (struct il_scanresults_notification *)pkt->u.raw;
1223
1224 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1225 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1226 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1227 le32_to_cpu(notif->stats[0]),
1228 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1229#endif
1230}
1231
1232/* Service N_SCAN_COMPLETE (0x84) */
1233static void
1234il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1235{
1236
1237#ifdef CONFIG_IWLEGACY_DEBUG
1238 struct il_rx_pkt *pkt = rxb_addr(rxb);
1239 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1240#endif
1241
1242 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1243 scan_notif->scanned_channels, scan_notif->tsf_low,
1244 scan_notif->tsf_high, scan_notif->status);
1245
1246 /* The HW is no longer scanning */
1247 clear_bit(S_SCAN_HW, &il->status);
1248
1249 D_SCAN("Scan on %sGHz took %dms\n",
1250 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1251 jiffies_to_msecs(jiffies - il->scan_start));
1252
1253 queue_work(il->workqueue, &il->scan_completed);
1254}
1255
1256void
1257il_setup_rx_scan_handlers(struct il_priv *il)
1258{
1259 /* scan handlers */
1260 il->handlers[C_SCAN] = il_hdl_scan;
1261 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1262 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1263 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1264}
1265EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1266
1267inline u16
1268il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1269 u8 n_probes)
1270{
1271 if (band == IEEE80211_BAND_5GHZ)
1272 return IL_ACTIVE_DWELL_TIME_52 +
1273 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1274 else
1275 return IL_ACTIVE_DWELL_TIME_24 +
1276 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1277}
1278EXPORT_SYMBOL(il_get_active_dwell_time);
1279
1280u16
1281il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1282 struct ieee80211_vif *vif)
1283{
1284 struct il_rxon_context *ctx = &il->ctx;
1285 u16 value;
1286
1287 u16 passive =
1288 (band ==
1289 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1290 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1291 IL_PASSIVE_DWELL_TIME_52;
1292
1293 if (il_is_any_associated(il)) {
1294 /*
1295 * If we're associated, we clamp the maximum passive
1296 * dwell time to be 98% of the smallest beacon interval
1297 * (minus 2 * channel tune time)
1298 */
1299 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
1300 if (value > IL_PASSIVE_DWELL_BASE || !value)
1301 value = IL_PASSIVE_DWELL_BASE;
1302 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1303 passive = min(value, passive);
1304 }
1305
1306 return passive;
1307}
1308EXPORT_SYMBOL(il_get_passive_dwell_time);
1309
1310void
1311il_init_scan_params(struct il_priv *il)
1312{
1313 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1314 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1315 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1316 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1317 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1318}
1319EXPORT_SYMBOL(il_init_scan_params);
1320
1321static int
1322il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1323{
1324 int ret;
1325
1326 lockdep_assert_held(&il->mutex);
1327
1328 if (WARN_ON(!il->cfg->ops->utils->request_scan))
1329 return -EOPNOTSUPP;
1330
1331 cancel_delayed_work(&il->scan_check);
1332
1333 if (!il_is_ready_rf(il)) {
1334 IL_WARN("Request scan called when driver not ready.\n");
1335 return -EIO;
1336 }
1337
1338 if (test_bit(S_SCAN_HW, &il->status)) {
1339 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1340 return -EBUSY;
1341 }
1342
1343 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1344 D_SCAN("Scan request while abort pending.\n");
1345 return -EBUSY;
1346 }
1347
1348 D_SCAN("Starting scan...\n");
1349
1350 set_bit(S_SCANNING, &il->status);
1351 il->scan_start = jiffies;
1352
1353 ret = il->cfg->ops->utils->request_scan(il, vif);
1354 if (ret) {
1355 clear_bit(S_SCANNING, &il->status);
1356 return ret;
1357 }
1358
1359 queue_delayed_work(il->workqueue, &il->scan_check,
1360 IL_SCAN_CHECK_WATCHDOG);
1361
1362 return 0;
1363}
1364
1365int
1366il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1367 struct cfg80211_scan_request *req)
1368{
1369 struct il_priv *il = hw->priv;
1370 int ret;
1371
1372 D_MAC80211("enter\n");
1373
1374 if (req->n_channels == 0)
1375 return -EINVAL;
1376
1377 mutex_lock(&il->mutex);
1378
1379 if (test_bit(S_SCANNING, &il->status)) {
1380 D_SCAN("Scan already in progress.\n");
1381 ret = -EAGAIN;
1382 goto out_unlock;
1383 }
1384
1385 /* mac80211 will only ask for one band at a time */
1386 il->scan_request = req;
1387 il->scan_vif = vif;
1388 il->scan_band = req->channels[0]->band;
1389
1390 ret = il_scan_initiate(il, vif);
1391
1392 D_MAC80211("leave\n");
1393
1394out_unlock:
1395 mutex_unlock(&il->mutex);
1396
1397 return ret;
1398}
1399EXPORT_SYMBOL(il_mac_hw_scan);
1400
1401static void
1402il_bg_scan_check(struct work_struct *data)
1403{
1404 struct il_priv *il =
1405 container_of(data, struct il_priv, scan_check.work);
1406
1407 D_SCAN("Scan check work\n");
1408
1409 /* Since we are here firmware does not finish scan and
1410 * most likely is in bad shape, so we don't bother to
1411 * send abort command, just force scan complete to mac80211 */
1412 mutex_lock(&il->mutex);
1413 il_force_scan_end(il);
1414 mutex_unlock(&il->mutex);
1415}
1416
1417/**
1418 * il_fill_probe_req - fill in all required fields and IE for probe request
1419 */
1420
1421u16
1422il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1423 const u8 *ta, const u8 *ies, int ie_len, int left)
1424{
1425 int len = 0;
1426 u8 *pos = NULL;
1427
1428 /* Make sure there is enough space for the probe request,
1429 * two mandatory IEs and the data */
1430 left -= 24;
1431 if (left < 0)
1432 return 0;
1433
1434 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1435 memcpy(frame->da, il_bcast_addr, ETH_ALEN);
1436 memcpy(frame->sa, ta, ETH_ALEN);
1437 memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
1438 frame->seq_ctrl = 0;
1439
1440 len += 24;
1441
1442 /* ...next IE... */
1443 pos = &frame->u.probe_req.variable[0];
1444
1445 /* fill in our indirect SSID IE */
1446 left -= 2;
1447 if (left < 0)
1448 return 0;
1449 *pos++ = WLAN_EID_SSID;
1450 *pos++ = 0;
1451
1452 len += 2;
1453
1454 if (WARN_ON(left < ie_len))
1455 return len;
1456
1457 if (ies && ie_len) {
1458 memcpy(pos, ies, ie_len);
1459 len += ie_len;
1460 }
1461
1462 return (u16) len;
1463}
1464EXPORT_SYMBOL(il_fill_probe_req);
1465
1466static void
1467il_bg_abort_scan(struct work_struct *work)
1468{
1469 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1470
1471 D_SCAN("Abort scan work\n");
1472
1473 /* We keep scan_check work queued in case when firmware will not
1474 * report back scan completed notification */
1475 mutex_lock(&il->mutex);
1476 il_scan_cancel_timeout(il, 200);
1477 mutex_unlock(&il->mutex);
1478}
1479
1480static void
1481il_bg_scan_completed(struct work_struct *work)
1482{
1483 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1484 bool aborted;
1485
1486 D_SCAN("Completed scan.\n");
1487
1488 cancel_delayed_work(&il->scan_check);
1489
1490 mutex_lock(&il->mutex);
1491
1492 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1493 if (aborted)
1494 D_SCAN("Aborted scan completed.\n");
1495
1496 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1497 D_SCAN("Scan already completed.\n");
1498 goto out_settings;
1499 }
1500
1501 il_complete_scan(il, aborted);
1502
1503out_settings:
1504 /* Can we still talk to firmware ? */
1505 if (!il_is_ready_rf(il))
1506 goto out;
1507
1508 /*
1509 * We do not commit power settings while scan is pending,
1510 * do it now if the settings changed.
1511 */
1512 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1513 il_set_tx_power(il, il->tx_power_next, false);
1514
1515 il->cfg->ops->utils->post_scan(il);
1516
1517out:
1518 mutex_unlock(&il->mutex);
1519}
1520
1521void
1522il_setup_scan_deferred_work(struct il_priv *il)
1523{
1524 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1525 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1526 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1527}
1528EXPORT_SYMBOL(il_setup_scan_deferred_work);
1529
1530void
1531il_cancel_scan_deferred_work(struct il_priv *il)
1532{
1533 cancel_work_sync(&il->abort_scan);
1534 cancel_work_sync(&il->scan_completed);
1535
1536 if (cancel_delayed_work_sync(&il->scan_check)) {
1537 mutex_lock(&il->mutex);
1538 il_force_scan_end(il);
1539 mutex_unlock(&il->mutex);
1540 }
1541}
1542EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1543
1544/* il->sta_lock must be held */
1545static void
1546il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1547{
1548
1549 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1550 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1551 sta_id, il->stations[sta_id].sta.sta.addr);
1552
1553 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1554 D_ASSOC("STA id %u addr %pM already present"
1555 " in uCode (according to driver)\n", sta_id,
1556 il->stations[sta_id].sta.sta.addr);
1557 } else {
1558 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1559 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1560 il->stations[sta_id].sta.sta.addr);
1561 }
1562}
1563
1564static int
1565il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1566 struct il_rx_pkt *pkt, bool sync)
1567{
1568 u8 sta_id = addsta->sta.sta_id;
1569 unsigned long flags;
1570 int ret = -EIO;
1571
1572 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1573 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1574 return ret;
1575 }
1576
1577 D_INFO("Processing response for adding station %u\n", sta_id);
1578
1579 spin_lock_irqsave(&il->sta_lock, flags);
1580
1581 switch (pkt->u.add_sta.status) {
1582 case ADD_STA_SUCCESS_MSK:
1583 D_INFO("C_ADD_STA PASSED\n");
1584 il_sta_ucode_activate(il, sta_id);
1585 ret = 0;
1586 break;
1587 case ADD_STA_NO_ROOM_IN_TBL:
1588 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1589 break;
1590 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1591 IL_ERR("Adding station %d failed, no block ack resource.\n",
1592 sta_id);
1593 break;
1594 case ADD_STA_MODIFY_NON_EXIST_STA:
1595 IL_ERR("Attempting to modify non-existing station %d\n",
1596 sta_id);
1597 break;
1598 default:
1599 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1600 break;
1601 }
1602
1603 D_INFO("%s station id %u addr %pM\n",
1604 il->stations[sta_id].sta.mode ==
1605 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1606 il->stations[sta_id].sta.sta.addr);
1607
1608 /*
1609 * XXX: The MAC address in the command buffer is often changed from
1610 * the original sent to the device. That is, the MAC address
1611 * written to the command buffer often is not the same MAC address
1612 * read from the command buffer when the command returns. This
1613 * issue has not yet been resolved and this debugging is left to
1614 * observe the problem.
1615 */
1616 D_INFO("%s station according to cmd buffer %pM\n",
1617 il->stations[sta_id].sta.mode ==
1618 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1619 spin_unlock_irqrestore(&il->sta_lock, flags);
1620
1621 return ret;
1622}
1623
1624static void
1625il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1626 struct il_rx_pkt *pkt)
1627{
1628 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1629
1630 il_process_add_sta_resp(il, addsta, pkt, false);
1631
1632}
1633
1634int
1635il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1636{
1637 struct il_rx_pkt *pkt = NULL;
1638 int ret = 0;
1639 u8 data[sizeof(*sta)];
1640 struct il_host_cmd cmd = {
1641 .id = C_ADD_STA,
1642 .flags = flags,
1643 .data = data,
1644 };
1645 u8 sta_id __maybe_unused = sta->sta.sta_id;
1646
1647 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1648 flags & CMD_ASYNC ? "a" : "");
1649
1650 if (flags & CMD_ASYNC)
1651 cmd.callback = il_add_sta_callback;
1652 else {
1653 cmd.flags |= CMD_WANT_SKB;
1654 might_sleep();
1655 }
1656
1657 cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
1658 ret = il_send_cmd(il, &cmd);
1659
1660 if (ret || (flags & CMD_ASYNC))
1661 return ret;
1662
1663 if (ret == 0) {
1664 pkt = (struct il_rx_pkt *)cmd.reply_page;
1665 ret = il_process_add_sta_resp(il, sta, pkt, true);
1666 }
1667 il_free_pages(il, cmd.reply_page);
1668
1669 return ret;
1670}
1671EXPORT_SYMBOL(il_send_add_sta);
1672
1673static void
1674il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
1675 struct il_rxon_context *ctx)
1676{
1677 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1678 __le32 sta_flags;
1679 u8 mimo_ps_mode;
1680
1681 if (!sta || !sta_ht_inf->ht_supported)
1682 goto done;
1683
1684 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
1685 D_ASSOC("spatial multiplexing power save mode: %s\n",
1686 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
1687 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
1688 "disabled");
1689
1690 sta_flags = il->stations[idx].sta.station_flags;
1691
1692 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1693
1694 switch (mimo_ps_mode) {
1695 case WLAN_HT_CAP_SM_PS_STATIC:
1696 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1697 break;
1698 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1699 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1700 break;
1701 case WLAN_HT_CAP_SM_PS_DISABLED:
1702 break;
1703 default:
1704 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
1705 break;
1706 }
1707
1708 sta_flags |=
1709 cpu_to_le32((u32) sta_ht_inf->
1710 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1711
1712 sta_flags |=
1713 cpu_to_le32((u32) sta_ht_inf->
1714 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1715
1716 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1717 sta_flags |= STA_FLG_HT40_EN_MSK;
1718 else
1719 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1720
1721 il->stations[idx].sta.station_flags = sta_flags;
1722done:
1723 return;
1724}
1725
1726/**
1727 * il_prep_station - Prepare station information for addition
1728 *
1729 * should be called with sta_lock held
1730 */
1731u8
1732il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
1733 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
1734{
1735 struct il_station_entry *station;
1736 int i;
1737 u8 sta_id = IL_INVALID_STATION;
1738 u16 rate;
1739
1740 if (is_ap)
1741 sta_id = ctx->ap_sta_id;
1742 else if (is_broadcast_ether_addr(addr))
1743 sta_id = ctx->bcast_sta_id;
1744 else
1745 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1746 if (!compare_ether_addr
1747 (il->stations[i].sta.sta.addr, addr)) {
1748 sta_id = i;
1749 break;
1750 }
1751
1752 if (!il->stations[i].used &&
1753 sta_id == IL_INVALID_STATION)
1754 sta_id = i;
1755 }
1756
1757 /*
1758 * These two conditions have the same outcome, but keep them
1759 * separate
1760 */
1761 if (unlikely(sta_id == IL_INVALID_STATION))
1762 return sta_id;
1763
1764 /*
1765 * uCode is not able to deal with multiple requests to add a
1766 * station. Keep track if one is in progress so that we do not send
1767 * another.
1768 */
1769 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1770 D_INFO("STA %d already in process of being added.\n", sta_id);
1771 return sta_id;
1772 }
1773
1774 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1775 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1776 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
1777 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1778 sta_id, addr);
1779 return sta_id;
1780 }
1781
1782 station = &il->stations[sta_id];
1783 station->used = IL_STA_DRIVER_ACTIVE;
1784 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1785 il->num_stations++;
1786
1787 /* Set up the C_ADD_STA command to send to device */
1788 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1789 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1790 station->sta.mode = 0;
1791 station->sta.sta.sta_id = sta_id;
1792 station->sta.station_flags = ctx->station_flags;
1793 station->ctxid = ctx->ctxid;
1794
1795 if (sta) {
1796 struct il_station_priv_common *sta_priv;
1797
1798 sta_priv = (void *)sta->drv_priv;
1799 sta_priv->ctx = ctx;
1800 }
1801
1802 /*
1803 * OK to call unconditionally, since local stations (IBSS BSSID
1804 * STA and broadcast STA) pass in a NULL sta, and mac80211
1805 * doesn't allow HT IBSS.
1806 */
1807 il_set_ht_add_station(il, sta_id, sta, ctx);
1808
1809 /* 3945 only */
1810 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
1811 /* Turn on both antennas for the station... */
1812 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1813
1814 return sta_id;
1815
1816}
1817EXPORT_SYMBOL_GPL(il_prep_station);
1818
1819#define STA_WAIT_TIMEOUT (HZ/2)
1820
1821/**
1822 * il_add_station_common -
1823 */
1824int
1825il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
1826 const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
1827 u8 *sta_id_r)
1828{
1829 unsigned long flags_spin;
1830 int ret = 0;
1831 u8 sta_id;
1832 struct il_addsta_cmd sta_cmd;
1833
1834 *sta_id_r = 0;
1835 spin_lock_irqsave(&il->sta_lock, flags_spin);
1836 sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
1837 if (sta_id == IL_INVALID_STATION) {
1838 IL_ERR("Unable to prepare station %pM for addition\n", addr);
1839 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1840 return -EINVAL;
1841 }
1842
1843 /*
1844 * uCode is not able to deal with multiple requests to add a
1845 * station. Keep track if one is in progress so that we do not send
1846 * another.
1847 */
1848 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1849 D_INFO("STA %d already in process of being added.\n", sta_id);
1850 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1851 return -EEXIST;
1852 }
1853
1854 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1855 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
1856 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1857 sta_id, addr);
1858 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1859 return -EEXIST;
1860 }
1861
1862 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
1863 memcpy(&sta_cmd, &il->stations[sta_id].sta,
1864 sizeof(struct il_addsta_cmd));
1865 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1866
1867 /* Add station to device's station table */
1868 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
1869 if (ret) {
1870 spin_lock_irqsave(&il->sta_lock, flags_spin);
1871 IL_ERR("Adding station %pM failed.\n",
1872 il->stations[sta_id].sta.sta.addr);
1873 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
1874 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
1875 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1876 }
1877 *sta_id_r = sta_id;
1878 return ret;
1879}
1880EXPORT_SYMBOL(il_add_station_common);
1881
1882/**
1883 * il_sta_ucode_deactivate - deactivate ucode status for a station
1884 *
1885 * il->sta_lock must be held
1886 */
1887static void
1888il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
1889{
1890 /* Ucode must be active and driver must be non active */
1891 if ((il->stations[sta_id].
1892 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
1893 IL_STA_UCODE_ACTIVE)
1894 IL_ERR("removed non active STA %u\n", sta_id);
1895
1896 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
1897
1898 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
1899 D_ASSOC("Removed STA %u\n", sta_id);
1900}
1901
1902static int
1903il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
1904 bool temporary)
1905{
1906 struct il_rx_pkt *pkt;
1907 int ret;
1908
1909 unsigned long flags_spin;
1910 struct il_rem_sta_cmd rm_sta_cmd;
1911
1912 struct il_host_cmd cmd = {
1913 .id = C_REM_STA,
1914 .len = sizeof(struct il_rem_sta_cmd),
1915 .flags = CMD_SYNC,
1916 .data = &rm_sta_cmd,
1917 };
1918
1919 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
1920 rm_sta_cmd.num_sta = 1;
1921 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
1922
1923 cmd.flags |= CMD_WANT_SKB;
1924
1925 ret = il_send_cmd(il, &cmd);
1926
1927 if (ret)
1928 return ret;
1929
1930 pkt = (struct il_rx_pkt *)cmd.reply_page;
1931 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1932 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
1933 ret = -EIO;
1934 }
1935
1936 if (!ret) {
1937 switch (pkt->u.rem_sta.status) {
1938 case REM_STA_SUCCESS_MSK:
1939 if (!temporary) {
1940 spin_lock_irqsave(&il->sta_lock, flags_spin);
1941 il_sta_ucode_deactivate(il, sta_id);
1942 spin_unlock_irqrestore(&il->sta_lock,
1943 flags_spin);
1944 }
1945 D_ASSOC("C_REM_STA PASSED\n");
1946 break;
1947 default:
1948 ret = -EIO;
1949 IL_ERR("C_REM_STA failed\n");
1950 break;
1951 }
1952 }
1953 il_free_pages(il, cmd.reply_page);
1954
1955 return ret;
1956}
1957
1958/**
1959 * il_remove_station - Remove driver's knowledge of station.
1960 */
1961int
1962il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
1963{
1964 unsigned long flags;
1965
1966 if (!il_is_ready(il)) {
1967 D_INFO("Unable to remove station %pM, device not ready.\n",
1968 addr);
1969 /*
1970 * It is typical for stations to be removed when we are
1971 * going down. Return success since device will be down
1972 * soon anyway
1973 */
1974 return 0;
1975 }
1976
1977 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
1978
1979 if (WARN_ON(sta_id == IL_INVALID_STATION))
1980 return -EINVAL;
1981
1982 spin_lock_irqsave(&il->sta_lock, flags);
1983
1984 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
1985 D_INFO("Removing %pM but non DRIVER active\n", addr);
1986 goto out_err;
1987 }
1988
1989 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
1990 D_INFO("Removing %pM but non UCODE active\n", addr);
1991 goto out_err;
1992 }
1993
1994 if (il->stations[sta_id].used & IL_STA_LOCAL) {
1995 kfree(il->stations[sta_id].lq);
1996 il->stations[sta_id].lq = NULL;
1997 }
1998
1999 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2000
2001 il->num_stations--;
2002
2003 BUG_ON(il->num_stations < 0);
2004
2005 spin_unlock_irqrestore(&il->sta_lock, flags);
2006
2007 return il_send_remove_station(il, addr, sta_id, false);
2008out_err:
2009 spin_unlock_irqrestore(&il->sta_lock, flags);
2010 return -EINVAL;
2011}
2012EXPORT_SYMBOL_GPL(il_remove_station);
2013
2014/**
2015 * il_clear_ucode_stations - clear ucode station table bits
2016 *
2017 * This function clears all the bits in the driver indicating
2018 * which stations are active in the ucode. Call when something
2019 * other than explicit station management would cause this in
2020 * the ucode, e.g. unassociated RXON.
2021 */
2022void
2023il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
2024{
2025 int i;
2026 unsigned long flags_spin;
2027 bool cleared = false;
2028
2029 D_INFO("Clearing ucode stations in driver\n");
2030
2031 spin_lock_irqsave(&il->sta_lock, flags_spin);
2032 for (i = 0; i < il->hw_params.max_stations; i++) {
2033 if (ctx && ctx->ctxid != il->stations[i].ctxid)
2034 continue;
2035
2036 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2037 D_INFO("Clearing ucode active for station %d\n", i);
2038 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2039 cleared = true;
2040 }
2041 }
2042 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2043
2044 if (!cleared)
2045 D_INFO("No active stations found to be cleared\n");
2046}
2047EXPORT_SYMBOL(il_clear_ucode_stations);
2048
2049/**
2050 * il_restore_stations() - Restore driver known stations to device
2051 *
2052 * All stations considered active by driver, but not present in ucode, is
2053 * restored.
2054 *
2055 * Function sleeps.
2056 */
2057void
2058il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
2059{
2060 struct il_addsta_cmd sta_cmd;
2061 struct il_link_quality_cmd lq;
2062 unsigned long flags_spin;
2063 int i;
2064 bool found = false;
2065 int ret;
2066 bool send_lq;
2067
2068 if (!il_is_ready(il)) {
2069 D_INFO("Not ready yet, not restoring any stations.\n");
2070 return;
2071 }
2072
2073 D_ASSOC("Restoring all known stations ... start.\n");
2074 spin_lock_irqsave(&il->sta_lock, flags_spin);
2075 for (i = 0; i < il->hw_params.max_stations; i++) {
2076 if (ctx->ctxid != il->stations[i].ctxid)
2077 continue;
2078 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2079 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2080 D_ASSOC("Restoring sta %pM\n",
2081 il->stations[i].sta.sta.addr);
2082 il->stations[i].sta.mode = 0;
2083 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2084 found = true;
2085 }
2086 }
2087
2088 for (i = 0; i < il->hw_params.max_stations; i++) {
2089 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2090 memcpy(&sta_cmd, &il->stations[i].sta,
2091 sizeof(struct il_addsta_cmd));
2092 send_lq = false;
2093 if (il->stations[i].lq) {
2094 memcpy(&lq, il->stations[i].lq,
2095 sizeof(struct il_link_quality_cmd));
2096 send_lq = true;
2097 }
2098 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2099 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2100 if (ret) {
2101 spin_lock_irqsave(&il->sta_lock, flags_spin);
2102 IL_ERR("Adding station %pM failed.\n",
2103 il->stations[i].sta.sta.addr);
2104 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2105 il->stations[i].used &=
2106 ~IL_STA_UCODE_INPROGRESS;
2107 spin_unlock_irqrestore(&il->sta_lock,
2108 flags_spin);
2109 }
2110 /*
2111 * Rate scaling has already been initialized, send
2112 * current LQ command
2113 */
2114 if (send_lq)
2115 il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
2116 spin_lock_irqsave(&il->sta_lock, flags_spin);
2117 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2118 }
2119 }
2120
2121 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2122 if (!found)
2123 D_INFO("Restoring all known stations"
2124 " .... no stations to be restored.\n");
2125 else
2126 D_INFO("Restoring all known stations" " .... complete.\n");
2127}
2128EXPORT_SYMBOL(il_restore_stations);
2129
2130int
2131il_get_free_ucode_key_idx(struct il_priv *il)
2132{
2133 int i;
2134
2135 for (i = 0; i < il->sta_key_max_num; i++)
2136 if (!test_and_set_bit(i, &il->ucode_key_table))
2137 return i;
2138
2139 return WEP_INVALID_OFFSET;
2140}
2141EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2142
2143void
2144il_dealloc_bcast_stations(struct il_priv *il)
2145{
2146 unsigned long flags;
2147 int i;
2148
2149 spin_lock_irqsave(&il->sta_lock, flags);
2150 for (i = 0; i < il->hw_params.max_stations; i++) {
2151 if (!(il->stations[i].used & IL_STA_BCAST))
2152 continue;
2153
2154 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2155 il->num_stations--;
2156 BUG_ON(il->num_stations < 0);
2157 kfree(il->stations[i].lq);
2158 il->stations[i].lq = NULL;
2159 }
2160 spin_unlock_irqrestore(&il->sta_lock, flags);
2161}
2162EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2163
2164#ifdef CONFIG_IWLEGACY_DEBUG
2165static void
2166il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2167{
2168 int i;
2169 D_RATE("lq station id 0x%x\n", lq->sta_id);
2170 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2171 lq->general_params.dual_stream_ant_msk);
2172
2173 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2174 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2175}
2176#else
2177static inline void
2178il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2179{
2180}
2181#endif
2182
2183/**
2184 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
2185 *
2186 * It sometimes happens when a HT rate has been in use and we
2187 * loose connectivity with AP then mac80211 will first tell us that the
2188 * current channel is not HT anymore before removing the station. In such a
2189 * scenario the RXON flags will be updated to indicate we are not
2190 * communicating HT anymore, but the LQ command may still contain HT rates.
2191 * Test for this to prevent driver from sending LQ command between the time
2192 * RXON flags are updated and when LQ command is updated.
2193 */
2194static bool
2195il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
2196 struct il_link_quality_cmd *lq)
2197{
2198 int i;
2199
2200 if (ctx->ht.enabled)
2201 return true;
2202
2203 D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
2204 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2205 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2206 D_INFO("idx %d of LQ expects HT channel\n", i);
2207 return false;
2208 }
2209 }
2210 return true;
2211}
2212
2213/**
2214 * il_send_lq_cmd() - Send link quality command
2215 * @init: This command is sent as part of station initialization right
2216 * after station has been added.
2217 *
2218 * The link quality command is sent as the last step of station creation.
2219 * This is the special case in which init is set and we call a callback in
2220 * this case to clear the state indicating that station creation is in
2221 * progress.
2222 */
2223int
2224il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2225 struct il_link_quality_cmd *lq, u8 flags, bool init)
2226{
2227 int ret = 0;
2228 unsigned long flags_spin;
2229
2230 struct il_host_cmd cmd = {
2231 .id = C_TX_LINK_QUALITY_CMD,
2232 .len = sizeof(struct il_link_quality_cmd),
2233 .flags = flags,
2234 .data = lq,
2235 };
2236
2237 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2238 return -EINVAL;
2239
2240 spin_lock_irqsave(&il->sta_lock, flags_spin);
2241 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2242 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2243 return -EINVAL;
2244 }
2245 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2246
2247 il_dump_lq_cmd(il, lq);
2248 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2249
2250 if (il_is_lq_table_valid(il, ctx, lq))
2251 ret = il_send_cmd(il, &cmd);
2252 else
2253 ret = -EINVAL;
2254
2255 if (cmd.flags & CMD_ASYNC)
2256 return ret;
2257
2258 if (init) {
2259 D_INFO("init LQ command complete,"
2260 " clearing sta addition status for sta %d\n",
2261 lq->sta_id);
2262 spin_lock_irqsave(&il->sta_lock, flags_spin);
2263 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2264 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2265 }
2266 return ret;
2267}
2268EXPORT_SYMBOL(il_send_lq_cmd);
2269
2270int
2271il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2272 struct ieee80211_sta *sta)
2273{
2274 struct il_priv *il = hw->priv;
2275 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2276 int ret;
2277
2278 D_INFO("received request to remove station %pM\n", sta->addr);
2279 mutex_lock(&il->mutex);
2280 D_INFO("proceeding to remove station %pM\n", sta->addr);
2281 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2282 if (ret)
2283 IL_ERR("Error removing station %pM\n", sta->addr);
2284 mutex_unlock(&il->mutex);
2285 return ret;
2286}
2287EXPORT_SYMBOL(il_mac_sta_remove);
2288
2289/************************** RX-FUNCTIONS ****************************/
2290/*
2291 * Rx theory of operation
2292 *
2293 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
2294 * each of which point to Receive Buffers to be filled by the NIC. These get
2295 * used not only for Rx frames, but for any command response or notification
2296 * from the NIC. The driver and NIC manage the Rx buffers by means
2297 * of idxes into the circular buffer.
2298 *
2299 * Rx Queue Indexes
2300 * The host/firmware share two idx registers for managing the Rx buffers.
2301 *
2302 * The READ idx maps to the first position that the firmware may be writing
2303 * to -- the driver can read up to (but not including) this position and get
2304 * good data.
2305 * The READ idx is managed by the firmware once the card is enabled.
2306 *
2307 * The WRITE idx maps to the last position the driver has read from -- the
2308 * position preceding WRITE is the last slot the firmware can place a packet.
2309 *
2310 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2311 * WRITE = READ.
2312 *
2313 * During initialization, the host sets up the READ queue position to the first
2314 * IDX position, and WRITE to the last (READ - 1 wrapped)
2315 *
2316 * When the firmware places a packet in a buffer, it will advance the READ idx
2317 * and fire the RX interrupt. The driver can then query the READ idx and
2318 * process as many packets as possible, moving the WRITE idx forward as it
2319 * resets the Rx queue buffers with new memory.
2320 *
2321 * The management in the driver is as follows:
2322 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2323 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2324 * to replenish the iwl->rxq->rx_free.
2325 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
2326 * iwl->rxq is replenished and the READ IDX is updated (updating the
2327 * 'processed' and 'read' driver idxes as well)
2328 * + A received packet is processed and handed to the kernel network stack,
2329 * detached from the iwl->rxq. The driver 'processed' idx is updated.
2330 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2331 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2332 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
2333 * were enough free buffers and RX_STALLED is set it is cleared.
2334 *
2335 *
2336 * Driver sequence:
2337 *
2338 * il_rx_queue_alloc() Allocates rx_free
2339 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
2340 * il_rx_queue_restock
2341 * il_rx_queue_restock() Moves available buffers from rx_free into Rx
2342 * queue, updates firmware pointers, and updates
2343 * the WRITE idx. If insufficient rx_free buffers
2344 * are available, schedules il_rx_replenish
2345 *
2346 * -- enable interrupts --
2347 * ISR - il_rx() Detach il_rx_bufs from pool up to the
2348 * READ IDX, detaching the SKB from the pool.
2349 * Moves the packet buffer from queue to rx_used.
2350 * Calls il_rx_queue_restock to refill any empty
2351 * slots.
2352 * ...
2353 *
2354 */
2355
2356/**
2357 * il_rx_queue_space - Return number of free slots available in queue.
2358 */
2359int
2360il_rx_queue_space(const struct il_rx_queue *q)
2361{
2362 int s = q->read - q->write;
2363 if (s <= 0)
2364 s += RX_QUEUE_SIZE;
2365 /* keep some buffer to not confuse full and empty queue */
2366 s -= 2;
2367 if (s < 0)
2368 s = 0;
2369 return s;
2370}
2371EXPORT_SYMBOL(il_rx_queue_space);
2372
2373/**
2374 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
2375 */
2376void
2377il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2378{
2379 unsigned long flags;
2380 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2381 u32 reg;
2382
2383 spin_lock_irqsave(&q->lock, flags);
2384
2385 if (q->need_update == 0)
2386 goto exit_unlock;
2387
2388 /* If power-saving is in use, make sure device is awake */
2389 if (test_bit(S_POWER_PMI, &il->status)) {
2390 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2391
2392 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2393 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2394 reg);
2395 il_set_bit(il, CSR_GP_CNTRL,
2396 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2397 goto exit_unlock;
2398 }
2399
2400 q->write_actual = (q->write & ~0x7);
2401 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2402
2403 /* Else device is assumed to be awake */
2404 } else {
2405 /* Device expects a multiple of 8 */
2406 q->write_actual = (q->write & ~0x7);
2407 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2408 }
2409
2410 q->need_update = 0;
2411
2412exit_unlock:
2413 spin_unlock_irqrestore(&q->lock, flags);
2414}
2415EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2416
2417int
2418il_rx_queue_alloc(struct il_priv *il)
2419{
2420 struct il_rx_queue *rxq = &il->rxq;
2421 struct device *dev = &il->pci_dev->dev;
2422 int i;
2423
2424 spin_lock_init(&rxq->lock);
2425 INIT_LIST_HEAD(&rxq->rx_free);
2426 INIT_LIST_HEAD(&rxq->rx_used);
2427
2428 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2429 rxq->bd =
2430 dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2431 GFP_KERNEL);
2432 if (!rxq->bd)
2433 goto err_bd;
2434
2435 rxq->rb_stts =
2436 dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2437 &rxq->rb_stts_dma, GFP_KERNEL);
2438 if (!rxq->rb_stts)
2439 goto err_rb;
2440
2441 /* Fill the rx_used queue with _all_ of the Rx buffers */
2442 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2443 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2444
2445 /* Set us so that we have processed and used all buffers, but have
2446 * not restocked the Rx queue with fresh buffers */
2447 rxq->read = rxq->write = 0;
2448 rxq->write_actual = 0;
2449 rxq->free_count = 0;
2450 rxq->need_update = 0;
2451 return 0;
2452
2453err_rb:
2454 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2455 rxq->bd_dma);
2456err_bd:
2457 return -ENOMEM;
2458}
2459EXPORT_SYMBOL(il_rx_queue_alloc);
2460
2461void
2462il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2463{
2464 struct il_rx_pkt *pkt = rxb_addr(rxb);
2465 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2466
2467 if (!report->state) {
2468 D_11H("Spectrum Measure Notification: Start\n");
2469 return;
2470 }
2471
2472 memcpy(&il->measure_report, report, sizeof(*report));
2473 il->measurement_status |= MEASUREMENT_READY;
2474}
2475EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2476
2477/*
2478 * returns non-zero if packet should be dropped
2479 */
2480int
2481il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2482 u32 decrypt_res, struct ieee80211_rx_status *stats)
2483{
2484 u16 fc = le16_to_cpu(hdr->frame_control);
2485
2486 /*
2487 * All contexts have the same setting here due to it being
2488 * a module parameter, so OK to check any context.
2489 */
2490 if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2491 return 0;
2492
2493 if (!(fc & IEEE80211_FCTL_PROTECTED))
2494 return 0;
2495
2496 D_RX("decrypt_res:0x%x\n", decrypt_res);
2497 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2498 case RX_RES_STATUS_SEC_TYPE_TKIP:
2499 /* The uCode has got a bad phase 1 Key, pushes the packet.
2500 * Decryption will be done in SW. */
2501 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2502 RX_RES_STATUS_BAD_KEY_TTAK)
2503 break;
2504
2505 case RX_RES_STATUS_SEC_TYPE_WEP:
2506 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2507 RX_RES_STATUS_BAD_ICV_MIC) {
2508 /* bad ICV, the packet is destroyed since the
2509 * decryption is inplace, drop it */
2510 D_RX("Packet destroyed\n");
2511 return -1;
2512 }
2513 case RX_RES_STATUS_SEC_TYPE_CCMP:
2514 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2515 RX_RES_STATUS_DECRYPT_OK) {
2516 D_RX("hw decrypt successfully!!!\n");
2517 stats->flag |= RX_FLAG_DECRYPTED;
2518 }
2519 break;
2520
2521 default:
2522 break;
2523 }
2524 return 0;
2525}
2526EXPORT_SYMBOL(il_set_decrypted_flag);
2527
2528/**
2529 * il_txq_update_write_ptr - Send new write idx to hardware
2530 */
2531void
2532il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2533{
2534 u32 reg = 0;
2535 int txq_id = txq->q.id;
2536
2537 if (txq->need_update == 0)
2538 return;
2539
2540 /* if we're trying to save power */
2541 if (test_bit(S_POWER_PMI, &il->status)) {
2542 /* wake up nic if it's powered down ...
2543 * uCode will wake up, and interrupt us again, so next
2544 * time we'll skip this part. */
2545 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2546
2547 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2548 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2549 txq_id, reg);
2550 il_set_bit(il, CSR_GP_CNTRL,
2551 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2552 return;
2553 }
2554
2555 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2556
2557 /*
2558 * else not in power-save mode,
2559 * uCode will never sleep when we're
2560 * trying to tx (during RFKILL, we're not trying to tx).
2561 */
2562 } else
2563 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2564 txq->need_update = 0;
2565}
2566EXPORT_SYMBOL(il_txq_update_write_ptr);
2567
2568/**
2569 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
2570 */
2571void
2572il_tx_queue_unmap(struct il_priv *il, int txq_id)
2573{
2574 struct il_tx_queue *txq = &il->txq[txq_id];
2575 struct il_queue *q = &txq->q;
2576
2577 if (q->n_bd == 0)
2578 return;
2579
2580 while (q->write_ptr != q->read_ptr) {
2581 il->cfg->ops->lib->txq_free_tfd(il, txq);
2582 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2583 }
2584}
2585EXPORT_SYMBOL(il_tx_queue_unmap);
2586
2587/**
2588 * il_tx_queue_free - Deallocate DMA queue.
2589 * @txq: Transmit queue to deallocate.
2590 *
2591 * Empty queue by removing and destroying all BD's.
2592 * Free all buffers.
2593 * 0-fill, but do not free "txq" descriptor structure.
2594 */
2595void
2596il_tx_queue_free(struct il_priv *il, int txq_id)
2597{
2598 struct il_tx_queue *txq = &il->txq[txq_id];
2599 struct device *dev = &il->pci_dev->dev;
2600 int i;
2601
2602 il_tx_queue_unmap(il, txq_id);
2603
2604 /* De-alloc array of command/tx buffers */
2605 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2606 kfree(txq->cmd[i]);
2607
2608 /* De-alloc circular buffer of TFDs */
2609 if (txq->q.n_bd)
2610 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2611 txq->tfds, txq->q.dma_addr);
2612
2613 /* De-alloc array of per-TFD driver data */
2614 kfree(txq->txb);
2615 txq->txb = NULL;
2616
2617 /* deallocate arrays */
2618 kfree(txq->cmd);
2619 kfree(txq->meta);
2620 txq->cmd = NULL;
2621 txq->meta = NULL;
2622
2623 /* 0-fill queue descriptor structure */
2624 memset(txq, 0, sizeof(*txq));
2625}
2626EXPORT_SYMBOL(il_tx_queue_free);
2627
2628/**
2629 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
2630 */
2631void
2632il_cmd_queue_unmap(struct il_priv *il)
2633{
2634 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2635 struct il_queue *q = &txq->q;
2636 int i;
2637
2638 if (q->n_bd == 0)
2639 return;
2640
2641 while (q->read_ptr != q->write_ptr) {
2642 i = il_get_cmd_idx(q, q->read_ptr, 0);
2643
2644 if (txq->meta[i].flags & CMD_MAPPED) {
2645 pci_unmap_single(il->pci_dev,
2646 dma_unmap_addr(&txq->meta[i], mapping),
2647 dma_unmap_len(&txq->meta[i], len),
2648 PCI_DMA_BIDIRECTIONAL);
2649 txq->meta[i].flags = 0;
2650 }
2651
2652 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2653 }
2654
2655 i = q->n_win;
2656 if (txq->meta[i].flags & CMD_MAPPED) {
2657 pci_unmap_single(il->pci_dev,
2658 dma_unmap_addr(&txq->meta[i], mapping),
2659 dma_unmap_len(&txq->meta[i], len),
2660 PCI_DMA_BIDIRECTIONAL);
2661 txq->meta[i].flags = 0;
2662 }
2663}
2664EXPORT_SYMBOL(il_cmd_queue_unmap);
2665
2666/**
2667 * il_cmd_queue_free - Deallocate DMA queue.
2668 * @txq: Transmit queue to deallocate.
2669 *
2670 * Empty queue by removing and destroying all BD's.
2671 * Free all buffers.
2672 * 0-fill, but do not free "txq" descriptor structure.
2673 */
2674void
2675il_cmd_queue_free(struct il_priv *il)
2676{
2677 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2678 struct device *dev = &il->pci_dev->dev;
2679 int i;
2680
2681 il_cmd_queue_unmap(il);
2682
2683 /* De-alloc array of command/tx buffers */
2684 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2685 kfree(txq->cmd[i]);
2686
2687 /* De-alloc circular buffer of TFDs */
2688 if (txq->q.n_bd)
2689 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2690 txq->tfds, txq->q.dma_addr);
2691
2692 /* deallocate arrays */
2693 kfree(txq->cmd);
2694 kfree(txq->meta);
2695 txq->cmd = NULL;
2696 txq->meta = NULL;
2697
2698 /* 0-fill queue descriptor structure */
2699 memset(txq, 0, sizeof(*txq));
2700}
2701EXPORT_SYMBOL(il_cmd_queue_free);
2702
2703/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
2704 * DMA services
2705 *
2706 * Theory of operation
2707 *
2708 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
2709 * of buffer descriptors, each of which points to one or more data buffers for
2710 * the device to read from or fill. Driver and device exchange status of each
2711 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
2712 * entries in each circular buffer, to protect against confusing empty and full
2713 * queue states.
2714 *
2715 * The device reads or writes the data in the queues via the device's several
2716 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
2717 *
2718 * For Tx queue, there are low mark and high mark limits. If, after queuing
2719 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2720 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2721 * Tx queue resumed.
2722 *
2723 * See more detailed info in 4965.h.
2724 ***************************************************/
2725
2726int
2727il_queue_space(const struct il_queue *q)
2728{
2729 int s = q->read_ptr - q->write_ptr;
2730
2731 if (q->read_ptr > q->write_ptr)
2732 s -= q->n_bd;
2733
2734 if (s <= 0)
2735 s += q->n_win;
2736 /* keep some reserve to not confuse empty and full situations */
2737 s -= 2;
2738 if (s < 0)
2739 s = 0;
2740 return s;
2741}
2742EXPORT_SYMBOL(il_queue_space);
2743
2744
2745/**
2746 * il_queue_init - Initialize queue's high/low-water and read/write idxes
2747 */
2748static int
2749il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
2750 u32 id)
2751{
2752 q->n_bd = count;
2753 q->n_win = slots_num;
2754 q->id = id;
2755
2756 /* count must be power-of-two size, otherwise il_queue_inc_wrap
2757 * and il_queue_dec_wrap are broken. */
2758 BUG_ON(!is_power_of_2(count));
2759
2760 /* slots_num must be power-of-two size, otherwise
2761 * il_get_cmd_idx is broken. */
2762 BUG_ON(!is_power_of_2(slots_num));
2763
2764 q->low_mark = q->n_win / 4;
2765 if (q->low_mark < 4)
2766 q->low_mark = 4;
2767
2768 q->high_mark = q->n_win / 8;
2769 if (q->high_mark < 2)
2770 q->high_mark = 2;
2771
2772 q->write_ptr = q->read_ptr = 0;
2773
2774 return 0;
2775}
2776
2777/**
2778 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
2779 */
2780static int
2781il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2782{
2783 struct device *dev = &il->pci_dev->dev;
2784 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2785
2786 /* Driver ilate data, only for Tx (not command) queues,
2787 * not shared with device. */
2788 if (id != il->cmd_queue) {
2789 txq->txb =
2790 kzalloc(sizeof(txq->txb[0]) * TFD_QUEUE_SIZE_MAX,
2791 GFP_KERNEL);
2792 if (!txq->txb) {
2793 IL_ERR("kmalloc for auxiliary BD "
2794 "structures failed\n");
2795 goto error;
2796 }
2797 } else {
2798 txq->txb = NULL;
2799 }
2800
2801 /* Circular buffer of transmit frame descriptors (TFDs),
2802 * shared with device */
2803 txq->tfds =
2804 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2805 if (!txq->tfds) {
2806 IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
2807 goto error;
2808 }
2809 txq->q.id = id;
2810
2811 return 0;
2812
2813error:
2814 kfree(txq->txb);
2815 txq->txb = NULL;
2816
2817 return -ENOMEM;
2818}
2819
2820/**
2821 * il_tx_queue_init - Allocate and initialize one tx/cmd queue
2822 */
2823int
2824il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
2825 u32 txq_id)
2826{
2827 int i, len;
2828 int ret;
2829 int actual_slots = slots_num;
2830
2831 /*
2832 * Alloc buffer array for commands (Tx or other types of commands).
2833 * For the command queue (#4/#9), allocate command space + one big
2834 * command for scan, since scan command is very huge; the system will
2835 * not have two scans at the same time, so only one is needed.
2836 * For normal Tx queues (all other queues), no super-size command
2837 * space is needed.
2838 */
2839 if (txq_id == il->cmd_queue)
2840 actual_slots++;
2841
2842 txq->meta =
2843 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
2844 txq->cmd =
2845 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
2846
2847 if (!txq->meta || !txq->cmd)
2848 goto out_free_arrays;
2849
2850 len = sizeof(struct il_device_cmd);
2851 for (i = 0; i < actual_slots; i++) {
2852 /* only happens for cmd queue */
2853 if (i == slots_num)
2854 len = IL_MAX_CMD_SIZE;
2855
2856 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
2857 if (!txq->cmd[i])
2858 goto err;
2859 }
2860
2861 /* Alloc driver data array and TFD circular buffer */
2862 ret = il_tx_queue_alloc(il, txq, txq_id);
2863 if (ret)
2864 goto err;
2865
2866 txq->need_update = 0;
2867
2868 /*
2869 * For the default queues 0-3, set up the swq_id
2870 * already -- all others need to get one later
2871 * (if they need one at all).
2872 */
2873 if (txq_id < 4)
2874 il_set_swq_id(txq, txq_id, txq_id);
2875
2876 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
2877 * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
2878 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2879
2880 /* Initialize queue's high/low-water marks, and head/tail idxes */
2881 il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
2882
2883 /* Tell device where to find queue */
2884 il->cfg->ops->lib->txq_init(il, txq);
2885
2886 return 0;
2887err:
2888 for (i = 0; i < actual_slots; i++)
2889 kfree(txq->cmd[i]);
2890out_free_arrays:
2891 kfree(txq->meta);
2892 kfree(txq->cmd);
2893
2894 return -ENOMEM;
2895}
2896EXPORT_SYMBOL(il_tx_queue_init);
2897
2898void
2899il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
2900 u32 txq_id)
2901{
2902 int actual_slots = slots_num;
2903
2904 if (txq_id == il->cmd_queue)
2905 actual_slots++;
2906
2907 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
2908
2909 txq->need_update = 0;
2910
2911 /* Initialize queue's high/low-water marks, and head/tail idxes */
2912 il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
2913
2914 /* Tell device where to find queue */
2915 il->cfg->ops->lib->txq_init(il, txq);
2916}
2917EXPORT_SYMBOL(il_tx_queue_reset);
2918
2919/*************** HOST COMMAND QUEUE FUNCTIONS *****/
2920
2921/**
2922 * il_enqueue_hcmd - enqueue a uCode command
2923 * @il: device ilate data point
2924 * @cmd: a point to the ucode command structure
2925 *
2926 * The function returns < 0 values to indicate the operation is
2927 * failed. On success, it turns the idx (> 0) of command in the
2928 * command queue.
2929 */
2930int
2931il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
2932{
2933 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2934 struct il_queue *q = &txq->q;
2935 struct il_device_cmd *out_cmd;
2936 struct il_cmd_meta *out_meta;
2937 dma_addr_t phys_addr;
2938 unsigned long flags;
2939 int len;
2940 u32 idx;
2941 u16 fix_size;
2942
2943 cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
2944 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
2945
2946 /* If any of the command structures end up being larger than
2947 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
2948 * we will need to increase the size of the TFD entries
2949 * Also, check to see if command buffer should not exceed the size
2950 * of device_cmd and max_cmd_size. */
2951 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
2952 !(cmd->flags & CMD_SIZE_HUGE));
2953 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
2954
2955 if (il_is_rfkill(il) || il_is_ctkill(il)) {
2956 IL_WARN("Not sending command - %s KILL\n",
2957 il_is_rfkill(il) ? "RF" : "CT");
2958 return -EIO;
2959 }
2960
2961 spin_lock_irqsave(&il->hcmd_lock, flags);
2962
2963 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
2964 spin_unlock_irqrestore(&il->hcmd_lock, flags);
2965
2966 IL_ERR("Restarting adapter due to command queue full\n");
2967 queue_work(il->workqueue, &il->restart);
2968 return -ENOSPC;
2969 }
2970
2971 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
2972 out_cmd = txq->cmd[idx];
2973 out_meta = &txq->meta[idx];
2974
2975 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
2976 spin_unlock_irqrestore(&il->hcmd_lock, flags);
2977 return -ENOSPC;
2978 }
2979
2980 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
2981 out_meta->flags = cmd->flags | CMD_MAPPED;
2982 if (cmd->flags & CMD_WANT_SKB)
2983 out_meta->source = cmd;
2984 if (cmd->flags & CMD_ASYNC)
2985 out_meta->callback = cmd->callback;
2986
2987 out_cmd->hdr.cmd = cmd->id;
2988 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
2989
2990 /* At this point, the out_cmd now has all of the incoming cmd
2991 * information */
2992
2993 out_cmd->hdr.flags = 0;
2994 out_cmd->hdr.sequence =
2995 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
2996 if (cmd->flags & CMD_SIZE_HUGE)
2997 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
2998 len = sizeof(struct il_device_cmd);
2999 if (idx == TFD_CMD_SLOTS)
3000 len = IL_MAX_CMD_SIZE;
3001
3002#ifdef CONFIG_IWLEGACY_DEBUG
3003 switch (out_cmd->hdr.cmd) {
3004 case C_TX_LINK_QUALITY_CMD:
3005 case C_SENSITIVITY:
3006 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3007 "%d bytes at %d[%d]:%d\n",
3008 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3009 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3010 q->write_ptr, idx, il->cmd_queue);
3011 break;
3012 default:
3013 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3014 "%d bytes at %d[%d]:%d\n",
3015 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3016 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3017 idx, il->cmd_queue);
3018 }
3019#endif
3020 txq->need_update = 1;
3021
3022 if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
3023 /* Set up entry in queue's byte count circular buffer */
3024 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
3025
3026 phys_addr =
3027 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3028 PCI_DMA_BIDIRECTIONAL);
3029 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3030 dma_unmap_len_set(out_meta, len, fix_size);
3031
3032 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
3033 1, U32_PAD(cmd->len));
3034
3035 /* Increment and update queue's write idx */
3036 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3037 il_txq_update_write_ptr(il, txq);
3038
3039 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3040 return idx;
3041}
3042
3043/**
3044 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
3045 *
3046 * When FW advances 'R' idx, all entries between old and new 'R' idx
3047 * need to be reclaimed. As result, some free space forms. If there is
3048 * enough free space (> low mark), wake the stack that feeds us.
3049 */
3050static void
3051il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3052{
3053 struct il_tx_queue *txq = &il->txq[txq_id];
3054 struct il_queue *q = &txq->q;
3055 int nfreed = 0;
3056
3057 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3058 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3059 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3060 q->write_ptr, q->read_ptr);
3061 return;
3062 }
3063
3064 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3065 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3066
3067 if (nfreed++ > 0) {
3068 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3069 q->write_ptr, q->read_ptr);
3070 queue_work(il->workqueue, &il->restart);
3071 }
3072
3073 }
3074}
3075
3076/**
3077 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3078 * @rxb: Rx buffer to reclaim
3079 *
3080 * If an Rx buffer has an async callback associated with it the callback
3081 * will be executed. The attached skb (if present) will only be freed
3082 * if the callback returns 1
3083 */
3084void
3085il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3086{
3087 struct il_rx_pkt *pkt = rxb_addr(rxb);
3088 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3089 int txq_id = SEQ_TO_QUEUE(sequence);
3090 int idx = SEQ_TO_IDX(sequence);
3091 int cmd_idx;
3092 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3093 struct il_device_cmd *cmd;
3094 struct il_cmd_meta *meta;
3095 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3096 unsigned long flags;
3097
3098 /* If a Tx command is being handled and it isn't in the actual
3099 * command queue then there a command routing bug has been introduced
3100 * in the queue management code. */
3101 if (WARN
3102 (txq_id != il->cmd_queue,
3103 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3104 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3105 il->txq[il->cmd_queue].q.write_ptr)) {
3106 il_print_hex_error(il, pkt, 32);
3107 return;
3108 }
3109
3110 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3111 cmd = txq->cmd[cmd_idx];
3112 meta = &txq->meta[cmd_idx];
3113
3114 txq->time_stamp = jiffies;
3115
3116 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3117 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3118
3119 /* Input error checking is done when commands are added to queue. */
3120 if (meta->flags & CMD_WANT_SKB) {
3121 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3122 rxb->page = NULL;
3123 } else if (meta->callback)
3124 meta->callback(il, cmd, pkt);
3125
3126 spin_lock_irqsave(&il->hcmd_lock, flags);
3127
3128 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3129
3130 if (!(meta->flags & CMD_ASYNC)) {
3131 clear_bit(S_HCMD_ACTIVE, &il->status);
3132 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3133 il_get_cmd_string(cmd->hdr.cmd));
3134 wake_up(&il->wait_command_queue);
3135 }
3136
3137 /* Mark as unmapped */
3138 meta->flags = 0;
3139
3140 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3141}
3142EXPORT_SYMBOL(il_tx_cmd_complete);
3143
3144MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3145MODULE_VERSION(IWLWIFI_VERSION);
3146MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3147MODULE_LICENSE("GPL");
3148
3149/*
3150 * set bt_coex_active to true, uCode will do kill/defer
3151 * every time the priority line is asserted (BT is sending signals on the
3152 * priority line in the PCIx).
3153 * set bt_coex_active to false, uCode will ignore the BT activity and
3154 * perform the normal operation
3155 *
3156 * User might experience transmit issue on some platform due to WiFi/BT
3157 * co-exist problem. The possible behaviors are:
3158 * Able to scan and finding all the available AP
3159 * Not able to associate with any AP
3160 * On those platforms, WiFi communication can be restored by set
3161 * "bt_coex_active" module parameter to "false"
3162 *
3163 * default: bt_coex_active = true (BT_COEX_ENABLE)
3164 */
3165static bool bt_coex_active = true;
3166module_param(bt_coex_active, bool, S_IRUGO);
3167MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3168
3169u32 il_debug_level;
3170EXPORT_SYMBOL(il_debug_level);
3171
3172const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3173EXPORT_SYMBOL(il_bcast_addr);
3174
3175/* This function both allocates and initializes hw and il. */
3176struct ieee80211_hw *
3177il_alloc_all(struct il_cfg *cfg)
3178{
3179 struct il_priv *il;
3180 /* mac80211 allocates memory for this device instance, including
3181 * space for this driver's ilate structure */
3182 struct ieee80211_hw *hw;
3183
3184 hw = ieee80211_alloc_hw(sizeof(struct il_priv),
3185 cfg->ops->ieee80211_ops);
3186 if (hw == NULL) {
3187 pr_err("%s: Can not allocate network device\n", cfg->name);
3188 goto out;
3189 }
3190
3191 il = hw->priv;
3192 il->hw = hw;
3193
3194out:
3195 return hw;
3196}
3197EXPORT_SYMBOL(il_alloc_all);
3198
3199#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
3200#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
3201static void
3202il_init_ht_hw_capab(const struct il_priv *il,
3203 struct ieee80211_sta_ht_cap *ht_info,
3204 enum ieee80211_band band)
3205{
3206 u16 max_bit_rate = 0;
3207 u8 rx_chains_num = il->hw_params.rx_chains_num;
3208 u8 tx_chains_num = il->hw_params.tx_chains_num;
3209
3210 ht_info->cap = 0;
3211 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3212
3213 ht_info->ht_supported = true;
3214
3215 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3216 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3217 if (il->hw_params.ht40_channel & BIT(band)) {
3218 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3219 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3220 ht_info->mcs.rx_mask[4] = 0x01;
3221 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3222 }
3223
3224 if (il->cfg->mod_params->amsdu_size_8K)
3225 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3226
3227 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3228 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3229
3230 ht_info->mcs.rx_mask[0] = 0xFF;
3231 if (rx_chains_num >= 2)
3232 ht_info->mcs.rx_mask[1] = 0xFF;
3233 if (rx_chains_num >= 3)
3234 ht_info->mcs.rx_mask[2] = 0xFF;
3235
3236 /* Highest supported Rx data rate */
3237 max_bit_rate *= rx_chains_num;
3238 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3239 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3240
3241 /* Tx MCS capabilities */
3242 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3243 if (tx_chains_num != rx_chains_num) {
3244 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3245 ht_info->mcs.tx_params |=
3246 ((tx_chains_num -
3247 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3248 }
3249}
3250
3251/**
3252 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
3253 */
3254int
3255il_init_geos(struct il_priv *il)
3256{
3257 struct il_channel_info *ch;
3258 struct ieee80211_supported_band *sband;
3259 struct ieee80211_channel *channels;
3260 struct ieee80211_channel *geo_ch;
3261 struct ieee80211_rate *rates;
3262 int i = 0;
3263 s8 max_tx_power = 0;
3264
3265 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3266 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3267 D_INFO("Geography modes already initialized.\n");
3268 set_bit(S_GEO_CONFIGURED, &il->status);
3269 return 0;
3270 }
3271
3272 channels =
3273 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
3274 GFP_KERNEL);
3275 if (!channels)
3276 return -ENOMEM;
3277
3278 rates =
3279 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3280 GFP_KERNEL);
3281 if (!rates) {
3282 kfree(channels);
3283 return -ENOMEM;
3284 }
3285
3286 /* 5.2GHz channels start after the 2.4GHz channels */
3287 sband = &il->bands[IEEE80211_BAND_5GHZ];
3288 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3289 /* just OFDM */
3290 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3291 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3292
3293 if (il->cfg->sku & IL_SKU_N)
3294 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
3295
3296 sband = &il->bands[IEEE80211_BAND_2GHZ];
3297 sband->channels = channels;
3298 /* OFDM & CCK */
3299 sband->bitrates = rates;
3300 sband->n_bitrates = RATE_COUNT_LEGACY;
3301
3302 if (il->cfg->sku & IL_SKU_N)
3303 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
3304
3305 il->ieee_channels = channels;
3306 il->ieee_rates = rates;
3307
3308 for (i = 0; i < il->channel_count; i++) {
3309 ch = &il->channel_info[i];
3310
3311 if (!il_is_channel_valid(ch))
3312 continue;
3313
3314 sband = &il->bands[ch->band];
3315
3316 geo_ch = &sband->channels[sband->n_channels++];
3317
3318 geo_ch->center_freq =
3319 ieee80211_channel_to_frequency(ch->channel, ch->band);
3320 geo_ch->max_power = ch->max_power_avg;
3321 geo_ch->max_antenna_gain = 0xff;
3322 geo_ch->hw_value = ch->channel;
3323
3324 if (il_is_channel_valid(ch)) {
3325 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3326 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
3327
3328 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3329 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
3330
3331 if (ch->flags & EEPROM_CHANNEL_RADAR)
3332 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3333
3334 geo_ch->flags |= ch->ht40_extension_channel;
3335
3336 if (ch->max_power_avg > max_tx_power)
3337 max_tx_power = ch->max_power_avg;
3338 } else {
3339 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3340 }
3341
3342 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3343 geo_ch->center_freq,
3344 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3345 geo_ch->
3346 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3347 geo_ch->flags);
3348 }
3349
3350 il->tx_power_device_lmt = max_tx_power;
3351 il->tx_power_user_lmt = max_tx_power;
3352 il->tx_power_next = max_tx_power;
3353
3354 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3355 (il->cfg->sku & IL_SKU_A)) {
3356 IL_INFO("Incorrectly detected BG card as ABG. "
3357 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3358 il->pci_dev->device, il->pci_dev->subsystem_device);
3359 il->cfg->sku &= ~IL_SKU_A;
3360 }
3361
3362 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3363 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3364 il->bands[IEEE80211_BAND_5GHZ].n_channels);
3365
3366 set_bit(S_GEO_CONFIGURED, &il->status);
3367
3368 return 0;
3369}
3370EXPORT_SYMBOL(il_init_geos);
3371
3372/*
3373 * il_free_geos - undo allocations in il_init_geos
3374 */
3375void
3376il_free_geos(struct il_priv *il)
3377{
3378 kfree(il->ieee_channels);
3379 kfree(il->ieee_rates);
3380 clear_bit(S_GEO_CONFIGURED, &il->status);
3381}
3382EXPORT_SYMBOL(il_free_geos);
3383
3384static bool
3385il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
3386 u16 channel, u8 extension_chan_offset)
3387{
3388 const struct il_channel_info *ch_info;
3389
3390 ch_info = il_get_channel_info(il, band, channel);
3391 if (!il_is_channel_valid(ch_info))
3392 return false;
3393
3394 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3395 return !(ch_info->
3396 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3397 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3398 return !(ch_info->
3399 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3400
3401 return false;
3402}
3403
3404bool
3405il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
3406 struct ieee80211_sta_ht_cap *ht_cap)
3407{
3408 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
3409 return false;
3410
3411 /*
3412 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
3413 * the bit will not set if it is pure 40MHz case
3414 */
3415 if (ht_cap && !ht_cap->ht_supported)
3416 return false;
3417
3418#ifdef CONFIG_IWLEGACY_DEBUGFS
3419 if (il->disable_ht40)
3420 return false;
3421#endif
3422
3423 return il_is_channel_extension(il, il->band,
3424 le16_to_cpu(ctx->staging.channel),
3425 ctx->ht.extension_chan_offset);
3426}
3427EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3428
3429static u16
3430il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3431{
3432 u16 new_val;
3433 u16 beacon_factor;
3434
3435 /*
3436 * If mac80211 hasn't given us a beacon interval, program
3437 * the default into the device.
3438 */
3439 if (!beacon_val)
3440 return DEFAULT_BEACON_INTERVAL;
3441
3442 /*
3443 * If the beacon interval we obtained from the peer
3444 * is too large, we'll have to wake up more often
3445 * (and in IBSS case, we'll beacon too much)
3446 *
3447 * For example, if max_beacon_val is 4096, and the
3448 * requested beacon interval is 7000, we'll have to
3449 * use 3500 to be able to wake up on the beacons.
3450 *
3451 * This could badly influence beacon detection stats.
3452 */
3453
3454 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3455 new_val = beacon_val / beacon_factor;
3456
3457 if (!new_val)
3458 new_val = max_beacon_val;
3459
3460 return new_val;
3461}
3462
3463int
3464il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
3465{
3466 u64 tsf;
3467 s32 interval_tm, rem;
3468 struct ieee80211_conf *conf = NULL;
3469 u16 beacon_int;
3470 struct ieee80211_vif *vif = ctx->vif;
3471
3472 conf = &il->hw->conf;
3473
3474 lockdep_assert_held(&il->mutex);
3475
3476 memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
3477
3478 ctx->timing.timestamp = cpu_to_le64(il->timestamp);
3479 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3480
3481 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3482
3483 /*
3484 * TODO: For IBSS we need to get atim_win from mac80211,
3485 * for now just always use 0
3486 */
3487 ctx->timing.atim_win = 0;
3488
3489 beacon_int =
3490 il_adjust_beacon_interval(beacon_int,
3491 il->hw_params.max_beacon_itrvl *
3492 TIME_UNIT);
3493 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
3494
3495 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
3496 interval_tm = beacon_int * TIME_UNIT;
3497 rem = do_div(tsf, interval_tm);
3498 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3499
3500 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3501
3502 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3503 le16_to_cpu(ctx->timing.beacon_interval),
3504 le32_to_cpu(ctx->timing.beacon_init_val),
3505 le16_to_cpu(ctx->timing.atim_win));
3506
3507 return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
3508 &ctx->timing);
3509}
3510EXPORT_SYMBOL(il_send_rxon_timing);
3511
3512void
3513il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
3514 int hw_decrypt)
3515{
3516 struct il_rxon_cmd *rxon = &ctx->staging;
3517
3518 if (hw_decrypt)
3519 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3520 else
3521 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3522
3523}
3524EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3525
3526/* validate RXON structure is valid */
3527int
3528il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
3529{
3530 struct il_rxon_cmd *rxon = &ctx->staging;
3531 bool error = false;
3532
3533 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3534 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3535 IL_WARN("check 2.4G: wrong narrow\n");
3536 error = true;
3537 }
3538 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3539 IL_WARN("check 2.4G: wrong radar\n");
3540 error = true;
3541 }
3542 } else {
3543 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3544 IL_WARN("check 5.2G: not short slot!\n");
3545 error = true;
3546 }
3547 if (rxon->flags & RXON_FLG_CCK_MSK) {
3548 IL_WARN("check 5.2G: CCK!\n");
3549 error = true;
3550 }
3551 }
3552 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3553 IL_WARN("mac/bssid mcast!\n");
3554 error = true;
3555 }
3556
3557 /* make sure basic rates 6Mbps and 1Mbps are supported */
3558 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3559 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3560 IL_WARN("neither 1 nor 6 are basic\n");
3561 error = true;
3562 }
3563
3564 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3565 IL_WARN("aid > 2007\n");
3566 error = true;
3567 }
3568
3569 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3570 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3571 IL_WARN("CCK and short slot\n");
3572 error = true;
3573 }
3574
3575 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3576 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3577 IL_WARN("CCK and auto detect");
3578 error = true;
3579 }
3580
3581 if ((rxon->
3582 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3583 RXON_FLG_TGG_PROTECT_MSK) {
3584 IL_WARN("TGg but no auto-detect\n");
3585 error = true;
3586 }
3587
3588 if (error)
3589 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3590
3591 if (error) {
3592 IL_ERR("Invalid RXON\n");
3593 return -EINVAL;
3594 }
3595 return 0;
3596}
3597EXPORT_SYMBOL(il_check_rxon_cmd);
3598
3599/**
3600 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
3601 * @il: staging_rxon is compared to active_rxon
3602 *
3603 * If the RXON structure is changing enough to require a new tune,
3604 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
3605 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
3606 */
3607int
3608il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
3609{
3610 const struct il_rxon_cmd *staging = &ctx->staging;
3611 const struct il_rxon_cmd *active = &ctx->active;
3612
3613#define CHK(cond) \
3614 if ((cond)) { \
3615 D_INFO("need full RXON - " #cond "\n"); \
3616 return 1; \
3617 }
3618
3619#define CHK_NEQ(c1, c2) \
3620 if ((c1) != (c2)) { \
3621 D_INFO("need full RXON - " \
3622 #c1 " != " #c2 " - %d != %d\n", \
3623 (c1), (c2)); \
3624 return 1; \
3625 }
3626
3627 /* These items are only settable from the full RXON command */
3628 CHK(!il_is_associated_ctx(ctx));
3629 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
3630 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
3631 CHK(compare_ether_addr
3632 (staging->wlap_bssid_addr, active->wlap_bssid_addr));
3633 CHK_NEQ(staging->dev_type, active->dev_type);
3634 CHK_NEQ(staging->channel, active->channel);
3635 CHK_NEQ(staging->air_propagation, active->air_propagation);
3636 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3637 active->ofdm_ht_single_stream_basic_rates);
3638 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3639 active->ofdm_ht_dual_stream_basic_rates);
3640 CHK_NEQ(staging->assoc_id, active->assoc_id);
3641
3642 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
3643 * be updated with the RXON_ASSOC command -- however only some
3644 * flag transitions are allowed using RXON_ASSOC */
3645
3646 /* Check if we are not switching bands */
3647 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3648 active->flags & RXON_FLG_BAND_24G_MSK);
3649
3650 /* Check if we are switching association toggle */
3651 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3652 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3653
3654#undef CHK
3655#undef CHK_NEQ
3656
3657 return 0;
3658}
3659EXPORT_SYMBOL(il_full_rxon_required);
3660
3661u8
3662il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
3663{
3664 /*
3665 * Assign the lowest rate -- should really get this from
3666 * the beacon skb from mac80211.
3667 */
3668 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
3669 return RATE_1M_PLCP;
3670 else
3671 return RATE_6M_PLCP;
3672}
3673EXPORT_SYMBOL(il_get_lowest_plcp);
3674
3675static void
3676_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
3677 struct il_rxon_context *ctx)
3678{
3679 struct il_rxon_cmd *rxon = &ctx->staging;
3680
3681 if (!ctx->ht.enabled) {
3682 rxon->flags &=
3683 ~(RXON_FLG_CHANNEL_MODE_MSK |
3684 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3685 | RXON_FLG_HT_PROT_MSK);
3686 return;
3687 }
3688
3689 rxon->flags |=
3690 cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3691
3692 /* Set up channel bandwidth:
3693 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
3694 /* clear the HT channel mode before set the mode */
3695 rxon->flags &=
3696 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3697 if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
3698 /* pure ht40 */
3699 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3700 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3701 /* Note: control channel is opposite of extension channel */
3702 switch (ctx->ht.extension_chan_offset) {
3703 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3704 rxon->flags &=
3705 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3706 break;
3707 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3708 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3709 break;
3710 }
3711 } else {
3712 /* Note: control channel is opposite of extension channel */
3713 switch (ctx->ht.extension_chan_offset) {
3714 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3715 rxon->flags &=
3716 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3717 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3718 break;
3719 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3720 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3721 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3722 break;
3723 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3724 default:
3725 /* channel location only valid if in Mixed mode */
3726 IL_ERR("invalid extension channel offset\n");
3727 break;
3728 }
3729 }
3730 } else {
3731 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3732 }
3733
3734 if (il->cfg->ops->hcmd->set_rxon_chain)
3735 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
3736
3737 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3738 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3739 ctx->ht.protection, ctx->ht.extension_chan_offset);
3740}
3741
3742void
3743il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3744{
3745 _il_set_rxon_ht(il, ht_conf, &il->ctx);
3746}
3747EXPORT_SYMBOL(il_set_rxon_ht);
3748
3749/* Return valid, unused, channel for a passive scan to reset the RF */
3750u8
3751il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
3752{
3753 const struct il_channel_info *ch_info;
3754 int i;
3755 u8 channel = 0;
3756 u8 min, max;
3757
3758 if (band == IEEE80211_BAND_5GHZ) {
3759 min = 14;
3760 max = il->channel_count;
3761 } else {
3762 min = 0;
3763 max = 14;
3764 }
3765
3766 for (i = min; i < max; i++) {
3767 channel = il->channel_info[i].channel;
3768 if (channel == le16_to_cpu(il->ctx.staging.channel))
3769 continue;
3770
3771 ch_info = il_get_channel_info(il, band, channel);
3772 if (il_is_channel_valid(ch_info))
3773 break;
3774 }
3775
3776 return channel;
3777}
3778EXPORT_SYMBOL(il_get_single_channel_number);
3779
3780/**
3781 * il_set_rxon_channel - Set the band and channel values in staging RXON
3782 * @ch: requested channel as a pointer to struct ieee80211_channel
3783
3784 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
3785 * in the staging RXON flag structure based on the ch->band
3786 */
3787int
3788il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
3789 struct il_rxon_context *ctx)
3790{
3791 enum ieee80211_band band = ch->band;
3792 u16 channel = ch->hw_value;
3793
3794 if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
3795 return 0;
3796
3797 ctx->staging.channel = cpu_to_le16(channel);
3798 if (band == IEEE80211_BAND_5GHZ)
3799 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3800 else
3801 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
3802
3803 il->band = band;
3804
3805 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3806
3807 return 0;
3808}
3809EXPORT_SYMBOL(il_set_rxon_channel);
3810
3811void
3812il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
3813 enum ieee80211_band band, struct ieee80211_vif *vif)
3814{
3815 if (band == IEEE80211_BAND_5GHZ) {
3816 ctx->staging.flags &=
3817 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3818 RXON_FLG_CCK_MSK);
3819 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3820 } else {
3821 /* Copied from il_post_associate() */
3822 if (vif && vif->bss_conf.use_short_slot)
3823 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3824 else
3825 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3826
3827 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
3828 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3829 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
3830 }
3831}
3832EXPORT_SYMBOL(il_set_flags_for_band);
3833
3834/*
3835 * initialize rxon structure with default values from eeprom
3836 */
3837void
3838il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
3839{
3840 const struct il_channel_info *ch_info;
3841
3842 memset(&ctx->staging, 0, sizeof(ctx->staging));
3843
3844 if (!ctx->vif) {
3845 ctx->staging.dev_type = ctx->unused_devtype;
3846 } else
3847 switch (ctx->vif->type) {
3848
3849 case NL80211_IFTYPE_STATION:
3850 ctx->staging.dev_type = ctx->station_devtype;
3851 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
3852 break;
3853
3854 case NL80211_IFTYPE_ADHOC:
3855 ctx->staging.dev_type = ctx->ibss_devtype;
3856 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
3857 ctx->staging.filter_flags =
3858 RXON_FILTER_BCON_AWARE_MSK |
3859 RXON_FILTER_ACCEPT_GRP_MSK;
3860 break;
3861
3862 default:
3863 IL_ERR("Unsupported interface type %d\n",
3864 ctx->vif->type);
3865 break;
3866 }
3867
3868#if 0
3869 /* TODO: Figure out when short_preamble would be set and cache from
3870 * that */
3871 if (!hw_to_local(il->hw)->short_preamble)
3872 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3873 else
3874 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3875#endif
3876
3877 ch_info =
3878 il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
3879
3880 if (!ch_info)
3881 ch_info = &il->channel_info[0];
3882
3883 ctx->staging.channel = cpu_to_le16(ch_info->channel);
3884 il->band = ch_info->band;
3885
3886 il_set_flags_for_band(il, ctx, il->band, ctx->vif);
3887
3888 ctx->staging.ofdm_basic_rates =
3889 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
3890 ctx->staging.cck_basic_rates =
3891 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
3892
3893 /* clear both MIX and PURE40 mode flag */
3894 ctx->staging.flags &=
3895 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
3896 if (ctx->vif)
3897 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
3898
3899 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
3900 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
3901}
3902EXPORT_SYMBOL(il_connection_init_rx_config);
3903
3904void
3905il_set_rate(struct il_priv *il)
3906{
3907 const struct ieee80211_supported_band *hw = NULL;
3908 struct ieee80211_rate *rate;
3909 int i;
3910
3911 hw = il_get_hw_mode(il, il->band);
3912 if (!hw) {
3913 IL_ERR("Failed to set rate: unable to get hw mode\n");
3914 return;
3915 }
3916
3917 il->active_rate = 0;
3918
3919 for (i = 0; i < hw->n_bitrates; i++) {
3920 rate = &(hw->bitrates[i]);
3921 if (rate->hw_value < RATE_COUNT_LEGACY)
3922 il->active_rate |= (1 << rate->hw_value);
3923 }
3924
3925 D_RATE("Set active_rate = %0x\n", il->active_rate);
3926
3927 il->ctx.staging.cck_basic_rates =
3928 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
3929
3930 il->ctx.staging.ofdm_basic_rates =
3931 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
3932}
3933EXPORT_SYMBOL(il_set_rate);
3934
3935void
3936il_chswitch_done(struct il_priv *il, bool is_success)
3937{
3938 struct il_rxon_context *ctx = &il->ctx;
3939
3940 if (test_bit(S_EXIT_PENDING, &il->status))
3941 return;
3942
3943 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
3944 ieee80211_chswitch_done(ctx->vif, is_success);
3945}
3946EXPORT_SYMBOL(il_chswitch_done);
3947
3948void
3949il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
3950{
3951 struct il_rx_pkt *pkt = rxb_addr(rxb);
3952 struct il_csa_notification *csa = &(pkt->u.csa_notif);
3953
3954 struct il_rxon_context *ctx = &il->ctx;
3955 struct il_rxon_cmd *rxon = (void *)&ctx->active;
3956
3957 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
3958 return;
3959
3960 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
3961 rxon->channel = csa->channel;
3962 ctx->staging.channel = csa->channel;
3963 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
3964 il_chswitch_done(il, true);
3965 } else {
3966 IL_ERR("CSA notif (fail) : channel %d\n",
3967 le16_to_cpu(csa->channel));
3968 il_chswitch_done(il, false);
3969 }
3970}
3971EXPORT_SYMBOL(il_hdl_csa);
3972
3973#ifdef CONFIG_IWLEGACY_DEBUG
3974void
3975il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
3976{
3977 struct il_rxon_cmd *rxon = &ctx->staging;
3978
3979 D_RADIO("RX CONFIG:\n");
3980 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
3981 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3982 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3983 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
3984 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3985 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
3986 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
3987 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3988 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
3989 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3990}
3991EXPORT_SYMBOL(il_print_rx_config_cmd);
3992#endif
3993/**
3994 * il_irq_handle_error - called for HW or SW error interrupt from card
3995 */
3996void
3997il_irq_handle_error(struct il_priv *il)
3998{
3999 /* Set the FW error flag -- cleared on il_down */
4000 set_bit(S_FW_ERROR, &il->status);
4001
4002 /* Cancel currently queued command. */
4003 clear_bit(S_HCMD_ACTIVE, &il->status);
4004
4005 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4006
4007 il->cfg->ops->lib->dump_nic_error_log(il);
4008 if (il->cfg->ops->lib->dump_fh)
4009 il->cfg->ops->lib->dump_fh(il, NULL, false);
4010#ifdef CONFIG_IWLEGACY_DEBUG
4011 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4012 il_print_rx_config_cmd(il, &il->ctx);
4013#endif
4014
4015 wake_up(&il->wait_command_queue);
4016
4017 /* Keep the restart process from trying to send host
4018 * commands by clearing the INIT status bit */
4019 clear_bit(S_READY, &il->status);
4020
4021 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4022 IL_DBG(IL_DL_FW_ERRORS,
4023 "Restarting adapter due to uCode error.\n");
4024
4025 if (il->cfg->mod_params->restart_fw)
4026 queue_work(il->workqueue, &il->restart);
4027 }
4028}
4029EXPORT_SYMBOL(il_irq_handle_error);
4030
4031static int
4032il_apm_stop_master(struct il_priv *il)
4033{
4034 int ret = 0;
4035
4036 /* stop device's busmaster DMA activity */
4037 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4038
4039 ret =
4040 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4041 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4042 if (ret)
4043 IL_WARN("Master Disable Timed Out, 100 usec\n");
4044
4045 D_INFO("stop master\n");
4046
4047 return ret;
4048}
4049
4050void
4051il_apm_stop(struct il_priv *il)
4052{
4053 D_INFO("Stop card, put in low power state\n");
4054
4055 /* Stop device's DMA activity */
4056 il_apm_stop_master(il);
4057
4058 /* Reset the entire device */
4059 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4060
4061 udelay(10);
4062
4063 /*
4064 * Clear "initialization complete" bit to move adapter from
4065 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
4066 */
4067 il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4068}
4069EXPORT_SYMBOL(il_apm_stop);
4070
4071/*
4072 * Start up NIC's basic functionality after it has been reset
4073 * (e.g. after platform boot, or shutdown via il_apm_stop())
4074 * NOTE: This does not load uCode nor start the embedded processor
4075 */
4076int
4077il_apm_init(struct il_priv *il)
4078{
4079 int ret = 0;
4080 u16 lctl;
4081
4082 D_INFO("Init card's basic functions\n");
4083
4084 /*
4085 * Use "set_bit" below rather than "write", to preserve any hardware
4086 * bits already set by default after reset.
4087 */
4088
4089 /* Disable L0S exit timer (platform NMI Work/Around) */
4090 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4091 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4092
4093 /*
4094 * Disable L0s without affecting L1;
4095 * don't wait for ICH L0s (ICH bug W/A)
4096 */
4097 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4098 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4099
4100 /* Set FH wait threshold to maximum (HW error during stress W/A) */
4101 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4102
4103 /*
4104 * Enable HAP INTA (interrupt from management bus) to
4105 * wake device's PCI Express link L1a -> L0s
4106 * NOTE: This is no-op for 3945 (non-existent bit)
4107 */
4108 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4109 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4110
4111 /*
4112 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
4113 * Check if BIOS (or OS) enabled L1-ASPM on this device.
4114 * If so (likely), disable L0S, so device moves directly L0->L1;
4115 * costs negligible amount of power savings.
4116 * If not (unlikely), enable L0S, so there is at least some
4117 * power savings, even without L1.
4118 */
4119 if (il->cfg->base_params->set_l0s) {
4120 lctl = il_pcie_link_ctl(il);
4121 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
4122 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
4123 /* L1-ASPM enabled; disable(!) L0S */
4124 il_set_bit(il, CSR_GIO_REG,
4125 CSR_GIO_REG_VAL_L0S_ENABLED);
4126 D_POWER("L1 Enabled; Disabling L0S\n");
4127 } else {
4128 /* L1-ASPM disabled; enable(!) L0S */
4129 il_clear_bit(il, CSR_GIO_REG,
4130 CSR_GIO_REG_VAL_L0S_ENABLED);
4131 D_POWER("L1 Disabled; Enabling L0S\n");
4132 }
4133 }
4134
4135 /* Configure analog phase-lock-loop before activating to D0A */
4136 if (il->cfg->base_params->pll_cfg_val)
4137 il_set_bit(il, CSR_ANA_PLL_CFG,
4138 il->cfg->base_params->pll_cfg_val);
4139
4140 /*
4141 * Set "initialization complete" bit to move adapter from
4142 * D0U* --> D0A* (powered-up active) state.
4143 */
4144 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4145
4146 /*
4147 * Wait for clock stabilization; once stabilized, access to
4148 * device-internal resources is supported, e.g. il_wr_prph()
4149 * and accesses to uCode SRAM.
4150 */
4151 ret =
4152 _il_poll_bit(il, CSR_GP_CNTRL,
4153 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4154 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4155 if (ret < 0) {
4156 D_INFO("Failed to init the card\n");
4157 goto out;
4158 }
4159
4160 /*
4161 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
4162 * BSM (Boostrap State Machine) is only in 3945 and 4965.
4163 *
4164 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
4165 * do not disable clocks. This preserves any hardware bits already
4166 * set by default in "CLK_CTRL_REG" after reset.
4167 */
4168 if (il->cfg->base_params->use_bsm)
4169 il_wr_prph(il, APMG_CLK_EN_REG,
4170 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4171 else
4172 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4173 udelay(20);
4174
4175 /* Disable L1-Active */
4176 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4177 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4178
4179out:
4180 return ret;
4181}
4182EXPORT_SYMBOL(il_apm_init);
4183
4184int
4185il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4186{
4187 int ret;
4188 s8 prev_tx_power;
4189 bool defer;
4190 struct il_rxon_context *ctx = &il->ctx;
4191
4192 lockdep_assert_held(&il->mutex);
4193
4194 if (il->tx_power_user_lmt == tx_power && !force)
4195 return 0;
4196
4197 if (!il->cfg->ops->lib->send_tx_power)
4198 return -EOPNOTSUPP;
4199
4200 /* 0 dBm mean 1 milliwatt */
4201 if (tx_power < 0) {
4202 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4203 return -EINVAL;
4204 }
4205
4206 if (tx_power > il->tx_power_device_lmt) {
4207 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4208 tx_power, il->tx_power_device_lmt);
4209 return -EINVAL;
4210 }
4211
4212 if (!il_is_ready_rf(il))
4213 return -EIO;
4214
4215 /* scan complete and commit_rxon use tx_power_next value,
4216 * it always need to be updated for newest request */
4217 il->tx_power_next = tx_power;
4218
4219 /* do not set tx power when scanning or channel changing */
4220 defer = test_bit(S_SCANNING, &il->status) ||
4221 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
4222 if (defer && !force) {
4223 D_INFO("Deferring tx power set\n");
4224 return 0;
4225 }
4226
4227 prev_tx_power = il->tx_power_user_lmt;
4228 il->tx_power_user_lmt = tx_power;
4229
4230 ret = il->cfg->ops->lib->send_tx_power(il);
4231
4232 /* if fail to set tx_power, restore the orig. tx power */
4233 if (ret) {
4234 il->tx_power_user_lmt = prev_tx_power;
4235 il->tx_power_next = prev_tx_power;
4236 }
4237 return ret;
4238}
4239EXPORT_SYMBOL(il_set_tx_power);
4240
4241void
4242il_send_bt_config(struct il_priv *il)
4243{
4244 struct il_bt_cmd bt_cmd = {
4245 .lead_time = BT_LEAD_TIME_DEF,
4246 .max_kill = BT_MAX_KILL_DEF,
4247 .kill_ack_mask = 0,
4248 .kill_cts_mask = 0,
4249 };
4250
4251 if (!bt_coex_active)
4252 bt_cmd.flags = BT_COEX_DISABLE;
4253 else
4254 bt_cmd.flags = BT_COEX_ENABLE;
4255
4256 D_INFO("BT coex %s\n",
4257 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4258
4259 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4260 IL_ERR("failed to send BT Coex Config\n");
4261}
4262EXPORT_SYMBOL(il_send_bt_config);
4263
4264int
4265il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4266{
4267 struct il_stats_cmd stats_cmd = {
4268 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4269 };
4270
4271 if (flags & CMD_ASYNC)
4272 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4273 &stats_cmd, NULL);
4274 else
4275 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4276 &stats_cmd);
4277}
4278EXPORT_SYMBOL(il_send_stats_request);
4279
4280void
4281il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4282{
4283#ifdef CONFIG_IWLEGACY_DEBUG
4284 struct il_rx_pkt *pkt = rxb_addr(rxb);
4285 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4286 D_RX("sleep mode: %d, src: %d\n",
4287 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4288#endif
4289}
4290EXPORT_SYMBOL(il_hdl_pm_sleep);
4291
4292void
4293il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4294{
4295 struct il_rx_pkt *pkt = rxb_addr(rxb);
4296 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4297 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4298 il_get_cmd_string(pkt->hdr.cmd));
4299 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4300}
4301EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4302
4303void
4304il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4305{
4306 struct il_rx_pkt *pkt = rxb_addr(rxb);
4307
4308 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4309 "seq 0x%04X ser 0x%08X\n",
4310 le32_to_cpu(pkt->u.err_resp.error_type),
4311 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4312 pkt->u.err_resp.cmd_id,
4313 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4314 le32_to_cpu(pkt->u.err_resp.error_info));
4315}
4316EXPORT_SYMBOL(il_hdl_error);
4317
4318void
4319il_clear_isr_stats(struct il_priv *il)
4320{
4321 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4322}
4323
4324int
4325il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4326 const struct ieee80211_tx_queue_params *params)
4327{
4328 struct il_priv *il = hw->priv;
4329 unsigned long flags;
4330 int q;
4331
4332 D_MAC80211("enter\n");
4333
4334 if (!il_is_ready_rf(il)) {
4335 D_MAC80211("leave - RF not ready\n");
4336 return -EIO;
4337 }
4338
4339 if (queue >= AC_NUM) {
4340 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4341 return 0;
4342 }
4343
4344 q = AC_NUM - 1 - queue;
4345
4346 spin_lock_irqsave(&il->lock, flags);
4347
4348 il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
4349 cpu_to_le16(params->cw_min);
4350 il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
4351 cpu_to_le16(params->cw_max);
4352 il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4353 il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
4354 cpu_to_le16((params->txop * 32));
4355
4356 il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
4357
4358 spin_unlock_irqrestore(&il->lock, flags);
4359
4360 D_MAC80211("leave\n");
4361 return 0;
4362}
4363EXPORT_SYMBOL(il_mac_conf_tx);
4364
4365int
4366il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4367{
4368 struct il_priv *il = hw->priv;
4369
4370 return il->ibss_manager == IL_IBSS_MANAGER;
4371}
4372EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4373
4374static int
4375il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
4376{
4377 il_connection_init_rx_config(il, ctx);
4378
4379 if (il->cfg->ops->hcmd->set_rxon_chain)
4380 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
4381
4382 return il_commit_rxon(il, ctx);
4383}
4384
4385static int
4386il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
4387{
4388 struct ieee80211_vif *vif = ctx->vif;
4389 int err;
4390
4391 lockdep_assert_held(&il->mutex);
4392
4393 /*
4394 * This variable will be correct only when there's just
4395 * a single context, but all code using it is for hardware
4396 * that supports only one context.
4397 */
4398 il->iw_mode = vif->type;
4399
4400 ctx->is_active = true;
4401
4402 err = il_set_mode(il, ctx);
4403 if (err) {
4404 if (!ctx->always_active)
4405 ctx->is_active = false;
4406 return err;
4407 }
4408
4409 return 0;
4410}
4411
4412int
4413il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4414{
4415 struct il_priv *il = hw->priv;
4416 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
4417 int err;
4418 u32 modes;
4419
4420 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4421
4422 mutex_lock(&il->mutex);
4423
4424 if (!il_is_ready_rf(il)) {
4425 IL_WARN("Try to add interface when device not ready\n");
4426 err = -EINVAL;
4427 goto out;
4428 }
4429
4430 /* check if busy context is exclusive */
4431 if (il->ctx.vif &&
4432 (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
4433 err = -EINVAL;
4434 goto out;
4435 }
4436
4437 modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
4438 if (!(modes & BIT(vif->type))) {
4439 err = -EOPNOTSUPP;
4440 goto out;
4441 }
4442
4443 vif_priv->ctx = &il->ctx;
4444 il->ctx.vif = vif;
4445
4446 err = il_setup_interface(il, &il->ctx);
4447 if (err) {
4448 il->ctx.vif = NULL;
4449 il->iw_mode = NL80211_IFTYPE_STATION;
4450 }
4451
4452out:
4453 mutex_unlock(&il->mutex);
4454
4455 D_MAC80211("leave\n");
4456 return err;
4457}
4458EXPORT_SYMBOL(il_mac_add_interface);
4459
4460static void
4461il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
4462 bool mode_change)
4463{
4464 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4465
4466 lockdep_assert_held(&il->mutex);
4467
4468 if (il->scan_vif == vif) {
4469 il_scan_cancel_timeout(il, 200);
4470 il_force_scan_end(il);
4471 }
4472
4473 if (!mode_change) {
4474 il_set_mode(il, ctx);
4475 if (!ctx->always_active)
4476 ctx->is_active = false;
4477 }
4478}
4479
4480void
4481il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4482{
4483 struct il_priv *il = hw->priv;
4484 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4485
4486 D_MAC80211("enter\n");
4487
4488 mutex_lock(&il->mutex);
4489
4490 WARN_ON(ctx->vif != vif);
4491 ctx->vif = NULL;
4492
4493 il_teardown_interface(il, vif, false);
4494
4495 memset(il->bssid, 0, ETH_ALEN);
4496 mutex_unlock(&il->mutex);
4497
4498 D_MAC80211("leave\n");
4499
4500}
4501EXPORT_SYMBOL(il_mac_remove_interface);
4502
4503int
4504il_alloc_txq_mem(struct il_priv *il)
4505{
4506 if (!il->txq)
4507 il->txq =
4508 kzalloc(sizeof(struct il_tx_queue) *
4509 il->cfg->base_params->num_of_queues, GFP_KERNEL);
4510 if (!il->txq) {
4511 IL_ERR("Not enough memory for txq\n");
4512 return -ENOMEM;
4513 }
4514 return 0;
4515}
4516EXPORT_SYMBOL(il_alloc_txq_mem);
4517
4518void
4519il_txq_mem(struct il_priv *il)
4520{
4521 kfree(il->txq);
4522 il->txq = NULL;
4523}
4524EXPORT_SYMBOL(il_txq_mem);
4525
4526#ifdef CONFIG_IWLEGACY_DEBUGFS
4527
4528#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
4529
4530void
4531il_reset_traffic_log(struct il_priv *il)
4532{
4533 il->tx_traffic_idx = 0;
4534 il->rx_traffic_idx = 0;
4535 if (il->tx_traffic)
4536 memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4537 if (il->rx_traffic)
4538 memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4539}
4540
4541int
4542il_alloc_traffic_mem(struct il_priv *il)
4543{
4544 u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
4545
4546 if (il_debug_level & IL_DL_TX) {
4547 if (!il->tx_traffic) {
4548 il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
4549 if (!il->tx_traffic)
4550 return -ENOMEM;
4551 }
4552 }
4553 if (il_debug_level & IL_DL_RX) {
4554 if (!il->rx_traffic) {
4555 il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
4556 if (!il->rx_traffic)
4557 return -ENOMEM;
4558 }
4559 }
4560 il_reset_traffic_log(il);
4561 return 0;
4562}
4563EXPORT_SYMBOL(il_alloc_traffic_mem);
4564
4565void
4566il_free_traffic_mem(struct il_priv *il)
4567{
4568 kfree(il->tx_traffic);
4569 il->tx_traffic = NULL;
4570
4571 kfree(il->rx_traffic);
4572 il->rx_traffic = NULL;
4573}
4574EXPORT_SYMBOL(il_free_traffic_mem);
4575
4576void
4577il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
4578 struct ieee80211_hdr *header)
4579{
4580 __le16 fc;
4581 u16 len;
4582
4583 if (likely(!(il_debug_level & IL_DL_TX)))
4584 return;
4585
4586 if (!il->tx_traffic)
4587 return;
4588
4589 fc = header->frame_control;
4590 if (ieee80211_is_data(fc)) {
4591 len =
4592 (length >
4593 IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
4594 memcpy((il->tx_traffic +
4595 (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
4596 len);
4597 il->tx_traffic_idx =
4598 (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
4599 }
4600}
4601EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
4602
4603void
4604il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
4605 struct ieee80211_hdr *header)
4606{
4607 __le16 fc;
4608 u16 len;
4609
4610 if (likely(!(il_debug_level & IL_DL_RX)))
4611 return;
4612
4613 if (!il->rx_traffic)
4614 return;
4615
4616 fc = header->frame_control;
4617 if (ieee80211_is_data(fc)) {
4618 len =
4619 (length >
4620 IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
4621 memcpy((il->rx_traffic +
4622 (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
4623 len);
4624 il->rx_traffic_idx =
4625 (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
4626 }
4627}
4628EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
4629
4630const char *
4631il_get_mgmt_string(int cmd)
4632{
4633 switch (cmd) {
4634 IL_CMD(MANAGEMENT_ASSOC_REQ);
4635 IL_CMD(MANAGEMENT_ASSOC_RESP);
4636 IL_CMD(MANAGEMENT_REASSOC_REQ);
4637 IL_CMD(MANAGEMENT_REASSOC_RESP);
4638 IL_CMD(MANAGEMENT_PROBE_REQ);
4639 IL_CMD(MANAGEMENT_PROBE_RESP);
4640 IL_CMD(MANAGEMENT_BEACON);
4641 IL_CMD(MANAGEMENT_ATIM);
4642 IL_CMD(MANAGEMENT_DISASSOC);
4643 IL_CMD(MANAGEMENT_AUTH);
4644 IL_CMD(MANAGEMENT_DEAUTH);
4645 IL_CMD(MANAGEMENT_ACTION);
4646 default:
4647 return "UNKNOWN";
4648
4649 }
4650}
4651
4652const char *
4653il_get_ctrl_string(int cmd)
4654{
4655 switch (cmd) {
4656 IL_CMD(CONTROL_BACK_REQ);
4657 IL_CMD(CONTROL_BACK);
4658 IL_CMD(CONTROL_PSPOLL);
4659 IL_CMD(CONTROL_RTS);
4660 IL_CMD(CONTROL_CTS);
4661 IL_CMD(CONTROL_ACK);
4662 IL_CMD(CONTROL_CFEND);
4663 IL_CMD(CONTROL_CFENDACK);
4664 default:
4665 return "UNKNOWN";
4666
4667 }
4668}
4669
4670void
4671il_clear_traffic_stats(struct il_priv *il)
4672{
4673 memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
4674 memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
4675}
4676
4677/*
4678 * if CONFIG_IWLEGACY_DEBUGFS defined,
4679 * il_update_stats function will
4680 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
4681 * Use debugFs to display the rx/rx_stats
4682 * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
4683 * information will be recorded, but DATA pkt still will be recorded
4684 * for the reason of il_led.c need to control the led blinking based on
4685 * number of tx and rx data.
4686 *
4687 */
4688void
4689il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
4690{
4691 struct traffic_stats *stats;
4692
4693 if (is_tx)
4694 stats = &il->tx_stats;
4695 else
4696 stats = &il->rx_stats;
4697
4698 if (ieee80211_is_mgmt(fc)) {
4699 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4700 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
4701 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
4702 break;
4703 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
4704 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
4705 break;
4706 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
4707 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
4708 break;
4709 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
4710 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
4711 break;
4712 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
4713 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
4714 break;
4715 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
4716 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
4717 break;
4718 case cpu_to_le16(IEEE80211_STYPE_BEACON):
4719 stats->mgmt[MANAGEMENT_BEACON]++;
4720 break;
4721 case cpu_to_le16(IEEE80211_STYPE_ATIM):
4722 stats->mgmt[MANAGEMENT_ATIM]++;
4723 break;
4724 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
4725 stats->mgmt[MANAGEMENT_DISASSOC]++;
4726 break;
4727 case cpu_to_le16(IEEE80211_STYPE_AUTH):
4728 stats->mgmt[MANAGEMENT_AUTH]++;
4729 break;
4730 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
4731 stats->mgmt[MANAGEMENT_DEAUTH]++;
4732 break;
4733 case cpu_to_le16(IEEE80211_STYPE_ACTION):
4734 stats->mgmt[MANAGEMENT_ACTION]++;
4735 break;
4736 }
4737 } else if (ieee80211_is_ctl(fc)) {
4738 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4739 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
4740 stats->ctrl[CONTROL_BACK_REQ]++;
4741 break;
4742 case cpu_to_le16(IEEE80211_STYPE_BACK):
4743 stats->ctrl[CONTROL_BACK]++;
4744 break;
4745 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
4746 stats->ctrl[CONTROL_PSPOLL]++;
4747 break;
4748 case cpu_to_le16(IEEE80211_STYPE_RTS):
4749 stats->ctrl[CONTROL_RTS]++;
4750 break;
4751 case cpu_to_le16(IEEE80211_STYPE_CTS):
4752 stats->ctrl[CONTROL_CTS]++;
4753 break;
4754 case cpu_to_le16(IEEE80211_STYPE_ACK):
4755 stats->ctrl[CONTROL_ACK]++;
4756 break;
4757 case cpu_to_le16(IEEE80211_STYPE_CFEND):
4758 stats->ctrl[CONTROL_CFEND]++;
4759 break;
4760 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
4761 stats->ctrl[CONTROL_CFENDACK]++;
4762 break;
4763 }
4764 } else {
4765 /* data */
4766 stats->data_cnt++;
4767 stats->data_bytes += len;
4768 }
4769}
4770EXPORT_SYMBOL(il_update_stats);
4771#endif
4772
4773int
4774il_force_reset(struct il_priv *il, bool external)
4775{
4776 struct il_force_reset *force_reset;
4777
4778 if (test_bit(S_EXIT_PENDING, &il->status))
4779 return -EINVAL;
4780
4781 force_reset = &il->force_reset;
4782 force_reset->reset_request_count++;
4783 if (!external) {
4784 if (force_reset->last_force_reset_jiffies &&
4785 time_after(force_reset->last_force_reset_jiffies +
4786 force_reset->reset_duration, jiffies)) {
4787 D_INFO("force reset rejected\n");
4788 force_reset->reset_reject_count++;
4789 return -EAGAIN;
4790 }
4791 }
4792 force_reset->reset_success_count++;
4793 force_reset->last_force_reset_jiffies = jiffies;
4794
4795 /*
4796 * if the request is from external(ex: debugfs),
4797 * then always perform the request in regardless the module
4798 * parameter setting
4799 * if the request is from internal (uCode error or driver
4800 * detect failure), then fw_restart module parameter
4801 * need to be check before performing firmware reload
4802 */
4803
4804 if (!external && !il->cfg->mod_params->restart_fw) {
4805 D_INFO("Cancel firmware reload based on "
4806 "module parameter setting\n");
4807 return 0;
4808 }
4809
4810 IL_ERR("On demand firmware reload\n");
4811
4812 /* Set the FW error flag -- cleared on il_down */
4813 set_bit(S_FW_ERROR, &il->status);
4814 wake_up(&il->wait_command_queue);
4815 /*
4816 * Keep the restart process from trying to send host
4817 * commands by clearing the INIT status bit
4818 */
4819 clear_bit(S_READY, &il->status);
4820 queue_work(il->workqueue, &il->restart);
4821
4822 return 0;
4823}
4824
4825int
4826il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4827 enum nl80211_iftype newtype, bool newp2p)
4828{
4829 struct il_priv *il = hw->priv;
4830 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4831 u32 modes;
4832 int err;
4833
4834 newtype = ieee80211_iftype_p2p(newtype, newp2p);
4835
4836 mutex_lock(&il->mutex);
4837
4838 if (!ctx->vif || !il_is_ready_rf(il)) {
4839 /*
4840 * Huh? But wait ... this can maybe happen when
4841 * we're in the middle of a firmware restart!
4842 */
4843 err = -EBUSY;
4844 goto out;
4845 }
4846
4847 modes = ctx->interface_modes | ctx->exclusive_interface_modes;
4848 if (!(modes & BIT(newtype))) {
4849 err = -EOPNOTSUPP;
4850 goto out;
4851 }
4852
4853 if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
4854 (il->ctx.exclusive_interface_modes & BIT(newtype))) {
4855 err = -EINVAL;
4856 goto out;
4857 }
4858
4859 /* success */
4860 il_teardown_interface(il, vif, true);
4861 vif->type = newtype;
4862 vif->p2p = newp2p;
4863 err = il_setup_interface(il, ctx);
4864 WARN_ON(err);
4865 /*
4866 * We've switched internally, but submitting to the
4867 * device may have failed for some reason. Mask this
4868 * error, because otherwise mac80211 will not switch
4869 * (and set the interface type back) and we'll be
4870 * out of sync with it.
4871 */
4872 err = 0;
4873
4874out:
4875 mutex_unlock(&il->mutex);
4876 return err;
4877}
4878EXPORT_SYMBOL(il_mac_change_interface);
4879
4880/*
4881 * On every watchdog tick we check (latest) time stamp. If it does not
4882 * change during timeout period and queue is not empty we reset firmware.
4883 */
4884static int
4885il_check_stuck_queue(struct il_priv *il, int cnt)
4886{
4887 struct il_tx_queue *txq = &il->txq[cnt];
4888 struct il_queue *q = &txq->q;
4889 unsigned long timeout;
4890 int ret;
4891
4892 if (q->read_ptr == q->write_ptr) {
4893 txq->time_stamp = jiffies;
4894 return 0;
4895 }
4896
4897 timeout =
4898 txq->time_stamp +
4899 msecs_to_jiffies(il->cfg->base_params->wd_timeout);
4900
4901 if (time_after(jiffies, timeout)) {
4902 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4903 il->cfg->base_params->wd_timeout);
4904 ret = il_force_reset(il, false);
4905 return (ret == -EAGAIN) ? 0 : 1;
4906 }
4907
4908 return 0;
4909}
4910
4911/*
4912 * Making watchdog tick be a quarter of timeout assure we will
4913 * discover the queue hung between timeout and 1.25*timeout
4914 */
4915#define IL_WD_TICK(timeout) ((timeout) / 4)
4916
4917/*
4918 * Watchdog timer callback, we check each tx queue for stuck, if if hung
4919 * we reset the firmware. If everything is fine just rearm the timer.
4920 */
4921void
4922il_bg_watchdog(unsigned long data)
4923{
4924 struct il_priv *il = (struct il_priv *)data;
4925 int cnt;
4926 unsigned long timeout;
4927
4928 if (test_bit(S_EXIT_PENDING, &il->status))
4929 return;
4930
4931 timeout = il->cfg->base_params->wd_timeout;
4932 if (timeout == 0)
4933 return;
4934
4935 /* monitor and check for stuck cmd queue */
4936 if (il_check_stuck_queue(il, il->cmd_queue))
4937 return;
4938
4939 /* monitor and check for other stuck queues */
4940 if (il_is_any_associated(il)) {
4941 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4942 /* skip as we already checked the command queue */
4943 if (cnt == il->cmd_queue)
4944 continue;
4945 if (il_check_stuck_queue(il, cnt))
4946 return;
4947 }
4948 }
4949
4950 mod_timer(&il->watchdog,
4951 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4952}
4953EXPORT_SYMBOL(il_bg_watchdog);
4954
4955void
4956il_setup_watchdog(struct il_priv *il)
4957{
4958 unsigned int timeout = il->cfg->base_params->wd_timeout;
4959
4960 if (timeout)
4961 mod_timer(&il->watchdog,
4962 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4963 else
4964 del_timer(&il->watchdog);
4965}
4966EXPORT_SYMBOL(il_setup_watchdog);
4967
4968/*
4969 * extended beacon time format
4970 * time in usec will be changed into a 32-bit value in extended:internal format
4971 * the extended part is the beacon counts
4972 * the internal part is the time in usec within one beacon interval
4973 */
4974u32
4975il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4976{
4977 u32 quot;
4978 u32 rem;
4979 u32 interval = beacon_interval * TIME_UNIT;
4980
4981 if (!interval || !usec)
4982 return 0;
4983
4984 quot =
4985 (usec /
4986 interval) & (il_beacon_time_mask_high(il,
4987 il->hw_params.
4988 beacon_time_tsf_bits) >> il->
4989 hw_params.beacon_time_tsf_bits);
4990 rem =
4991 (usec % interval) & il_beacon_time_mask_low(il,
4992 il->hw_params.
4993 beacon_time_tsf_bits);
4994
4995 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4996}
4997EXPORT_SYMBOL(il_usecs_to_beacons);
4998
4999/* base is usually what we get from ucode with each received frame,
5000 * the same as HW timer counter counting down
5001 */
5002__le32
5003il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
5004 u32 beacon_interval)
5005{
5006 u32 base_low = base & il_beacon_time_mask_low(il,
5007 il->hw_params.
5008 beacon_time_tsf_bits);
5009 u32 addon_low = addon & il_beacon_time_mask_low(il,
5010 il->hw_params.
5011 beacon_time_tsf_bits);
5012 u32 interval = beacon_interval * TIME_UNIT;
5013 u32 res = (base & il_beacon_time_mask_high(il,
5014 il->hw_params.
5015 beacon_time_tsf_bits)) +
5016 (addon & il_beacon_time_mask_high(il,
5017 il->hw_params.
5018 beacon_time_tsf_bits));
5019
5020 if (base_low > addon_low)
5021 res += base_low - addon_low;
5022 else if (base_low < addon_low) {
5023 res += interval + base_low - addon_low;
5024 res += (1 << il->hw_params.beacon_time_tsf_bits);
5025 } else
5026 res += (1 << il->hw_params.beacon_time_tsf_bits);
5027
5028 return cpu_to_le32(res);
5029}
5030EXPORT_SYMBOL(il_add_beacon_time);
5031
5032#ifdef CONFIG_PM
5033
5034int
5035il_pci_suspend(struct device *device)
5036{
5037 struct pci_dev *pdev = to_pci_dev(device);
5038 struct il_priv *il = pci_get_drvdata(pdev);
5039
5040 /*
5041 * This function is called when system goes into suspend state
5042 * mac80211 will call il_mac_stop() from the mac80211 suspend function
5043 * first but since il_mac_stop() has no knowledge of who the caller is,
5044 * it will not call apm_ops.stop() to stop the DMA operation.
5045 * Calling apm_ops.stop here to make sure we stop the DMA.
5046 */
5047 il_apm_stop(il);
5048
5049 return 0;
5050}
5051EXPORT_SYMBOL(il_pci_suspend);
5052
5053int
5054il_pci_resume(struct device *device)
5055{
5056 struct pci_dev *pdev = to_pci_dev(device);
5057 struct il_priv *il = pci_get_drvdata(pdev);
5058 bool hw_rfkill = false;
5059
5060 /*
5061 * We disable the RETRY_TIMEOUT register (0x41) to keep
5062 * PCI Tx retries from interfering with C3 CPU state.
5063 */
5064 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
5065
5066 il_enable_interrupts(il);
5067
5068 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5069 hw_rfkill = true;
5070
5071 if (hw_rfkill)
5072 set_bit(S_RF_KILL_HW, &il->status);
5073 else
5074 clear_bit(S_RF_KILL_HW, &il->status);
5075
5076 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
5077
5078 return 0;
5079}
5080EXPORT_SYMBOL(il_pci_resume);
5081
5082const struct dev_pm_ops il_pm_ops = {
5083 .suspend = il_pci_suspend,
5084 .resume = il_pci_resume,
5085 .freeze = il_pci_suspend,
5086 .thaw = il_pci_resume,
5087 .poweroff = il_pci_suspend,
5088 .restore = il_pci_resume,
5089};
5090EXPORT_SYMBOL(il_pm_ops);
5091
5092#endif /* CONFIG_PM */
5093
5094static void
5095il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
5096{
5097 if (test_bit(S_EXIT_PENDING, &il->status))
5098 return;
5099
5100 if (!ctx->is_active)
5101 return;
5102
5103 ctx->qos_data.def_qos_parm.qos_flags = 0;
5104
5105 if (ctx->qos_data.qos_active)
5106 ctx->qos_data.def_qos_parm.qos_flags |=
5107 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5108
5109 if (ctx->ht.enabled)
5110 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5111
5112 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
5113 ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
5114
5115 il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
5116 &ctx->qos_data.def_qos_parm, NULL);
5117}
5118
5119/**
5120 * il_mac_config - mac80211 config callback
5121 */
5122int
5123il_mac_config(struct ieee80211_hw *hw, u32 changed)
5124{
5125 struct il_priv *il = hw->priv;
5126 const struct il_channel_info *ch_info;
5127 struct ieee80211_conf *conf = &hw->conf;
5128 struct ieee80211_channel *channel = conf->channel;
5129 struct il_ht_config *ht_conf = &il->current_ht_config;
5130 struct il_rxon_context *ctx = &il->ctx;
5131 unsigned long flags = 0;
5132 int ret = 0;
5133 u16 ch;
5134 int scan_active = 0;
5135 bool ht_changed = false;
5136
5137 if (WARN_ON(!il->cfg->ops->legacy))
5138 return -EOPNOTSUPP;
5139
5140 mutex_lock(&il->mutex);
5141
5142 D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
5143 changed);
5144
5145 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5146 scan_active = 1;
5147 D_MAC80211("scan active\n");
5148 }
5149
5150 if (changed &
5151 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5152 /* mac80211 uses static for non-HT which is what we want */
5153 il->current_ht_config.smps = conf->smps_mode;
5154
5155 /*
5156 * Recalculate chain counts.
5157 *
5158 * If monitor mode is enabled then mac80211 will
5159 * set up the SM PS mode to OFF if an HT channel is
5160 * configured.
5161 */
5162 if (il->cfg->ops->hcmd->set_rxon_chain)
5163 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
5164 }
5165
5166 /* during scanning mac80211 will delay channel setting until
5167 * scan finish with changed = 0
5168 */
5169 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5170
5171 if (scan_active)
5172 goto set_ch_out;
5173
5174 ch = channel->hw_value;
5175 ch_info = il_get_channel_info(il, channel->band, ch);
5176 if (!il_is_channel_valid(ch_info)) {
5177 D_MAC80211("leave - invalid channel\n");
5178 ret = -EINVAL;
5179 goto set_ch_out;
5180 }
5181
5182 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5183 !il_is_channel_ibss(ch_info)) {
5184 D_MAC80211("leave - not IBSS channel\n");
5185 ret = -EINVAL;
5186 goto set_ch_out;
5187 }
5188
5189 spin_lock_irqsave(&il->lock, flags);
5190
5191 /* Configure HT40 channels */
5192 if (ctx->ht.enabled != conf_is_ht(conf)) {
5193 ctx->ht.enabled = conf_is_ht(conf);
5194 ht_changed = true;
5195 }
5196 if (ctx->ht.enabled) {
5197 if (conf_is_ht40_minus(conf)) {
5198 ctx->ht.extension_chan_offset =
5199 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5200 ctx->ht.is_40mhz = true;
5201 } else if (conf_is_ht40_plus(conf)) {
5202 ctx->ht.extension_chan_offset =
5203 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5204 ctx->ht.is_40mhz = true;
5205 } else {
5206 ctx->ht.extension_chan_offset =
5207 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5208 ctx->ht.is_40mhz = false;
5209 }
5210 } else
5211 ctx->ht.is_40mhz = false;
5212
5213 /*
5214 * Default to no protection. Protection mode will
5215 * later be set from BSS config in il_ht_conf
5216 */
5217 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5218
5219 /* if we are switching from ht to 2.4 clear flags
5220 * from any ht related info since 2.4 does not
5221 * support ht */
5222 if ((le16_to_cpu(ctx->staging.channel) != ch))
5223 ctx->staging.flags = 0;
5224
5225 il_set_rxon_channel(il, channel, ctx);
5226 il_set_rxon_ht(il, ht_conf);
5227
5228 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
5229
5230 spin_unlock_irqrestore(&il->lock, flags);
5231
5232 if (il->cfg->ops->legacy->update_bcast_stations)
5233 ret = il->cfg->ops->legacy->update_bcast_stations(il);
5234
5235set_ch_out:
5236 /* The list of supported rates and rate mask can be different
5237 * for each band; since the band may have changed, reset
5238 * the rate mask to what mac80211 lists */
5239 il_set_rate(il);
5240 }
5241
5242 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5243 ret = il_power_update_mode(il, false);
5244 if (ret)
5245 D_MAC80211("Error setting sleep level\n");
5246 }
5247
5248 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5249 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5250 conf->power_level);
5251
5252 il_set_tx_power(il, conf->power_level, false);
5253 }
5254
5255 if (!il_is_ready(il)) {
5256 D_MAC80211("leave - not ready\n");
5257 goto out;
5258 }
5259
5260 if (scan_active)
5261 goto out;
5262
5263 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
5264 il_commit_rxon(il, ctx);
5265 else
5266 D_INFO("Not re-sending same RXON configuration.\n");
5267 if (ht_changed)
5268 il_update_qos(il, ctx);
5269
5270out:
5271 D_MAC80211("leave\n");
5272 mutex_unlock(&il->mutex);
5273 return ret;
5274}
5275EXPORT_SYMBOL(il_mac_config);
5276
5277void
5278il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5279{
5280 struct il_priv *il = hw->priv;
5281 unsigned long flags;
5282 struct il_rxon_context *ctx = &il->ctx;
5283
5284 if (WARN_ON(!il->cfg->ops->legacy))
5285 return;
5286
5287 mutex_lock(&il->mutex);
5288 D_MAC80211("enter\n");
5289
5290 spin_lock_irqsave(&il->lock, flags);
5291 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5292 spin_unlock_irqrestore(&il->lock, flags);
5293
5294 spin_lock_irqsave(&il->lock, flags);
5295
5296 /* new association get rid of ibss beacon skb */
5297 if (il->beacon_skb)
5298 dev_kfree_skb(il->beacon_skb);
5299
5300 il->beacon_skb = NULL;
5301
5302 il->timestamp = 0;
5303
5304 spin_unlock_irqrestore(&il->lock, flags);
5305
5306 il_scan_cancel_timeout(il, 100);
5307 if (!il_is_ready_rf(il)) {
5308 D_MAC80211("leave - not ready\n");
5309 mutex_unlock(&il->mutex);
5310 return;
5311 }
5312
5313 /* we are restarting association process
5314 * clear RXON_FILTER_ASSOC_MSK bit
5315 */
5316 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5317 il_commit_rxon(il, ctx);
5318
5319 il_set_rate(il);
5320
5321 mutex_unlock(&il->mutex);
5322
5323 D_MAC80211("leave\n");
5324}
5325EXPORT_SYMBOL(il_mac_reset_tsf);
5326
5327static void
5328il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5329{
5330 struct il_ht_config *ht_conf = &il->current_ht_config;
5331 struct ieee80211_sta *sta;
5332 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5333 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5334
5335 D_ASSOC("enter:\n");
5336
5337 if (!ctx->ht.enabled)
5338 return;
5339
5340 ctx->ht.protection =
5341 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5342 ctx->ht.non_gf_sta_present =
5343 !!(bss_conf->
5344 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5345
5346 ht_conf->single_chain_sufficient = false;
5347
5348 switch (vif->type) {
5349 case NL80211_IFTYPE_STATION:
5350 rcu_read_lock();
5351 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5352 if (sta) {
5353 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5354 int maxstreams;
5355
5356 maxstreams =
5357 (ht_cap->mcs.
5358 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5359 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5360 maxstreams += 1;
5361
5362 if (ht_cap->mcs.rx_mask[1] == 0 &&
5363 ht_cap->mcs.rx_mask[2] == 0)
5364 ht_conf->single_chain_sufficient = true;
5365 if (maxstreams <= 1)
5366 ht_conf->single_chain_sufficient = true;
5367 } else {
5368 /*
5369 * If at all, this can only happen through a race
5370 * when the AP disconnects us while we're still
5371 * setting up the connection, in that case mac80211
5372 * will soon tell us about that.
5373 */
5374 ht_conf->single_chain_sufficient = true;
5375 }
5376 rcu_read_unlock();
5377 break;
5378 case NL80211_IFTYPE_ADHOC:
5379 ht_conf->single_chain_sufficient = true;
5380 break;
5381 default:
5382 break;
5383 }
5384
5385 D_ASSOC("leave\n");
5386}
5387
5388static inline void
5389il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5390{
5391 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5392
5393 /*
5394 * inform the ucode that there is no longer an
5395 * association and that no more packets should be
5396 * sent
5397 */
5398 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5399 ctx->staging.assoc_id = 0;
5400 il_commit_rxon(il, ctx);
5401}
5402
5403static void
5404il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5405{
5406 struct il_priv *il = hw->priv;
5407 unsigned long flags;
5408 __le64 timestamp;
5409 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5410
5411 if (!skb)
5412 return;
5413
5414 D_MAC80211("enter\n");
5415
5416 lockdep_assert_held(&il->mutex);
5417
5418 if (!il->beacon_ctx) {
5419 IL_ERR("update beacon but no beacon context!\n");
5420 dev_kfree_skb(skb);
5421 return;
5422 }
5423
5424 spin_lock_irqsave(&il->lock, flags);
5425
5426 if (il->beacon_skb)
5427 dev_kfree_skb(il->beacon_skb);
5428
5429 il->beacon_skb = skb;
5430
5431 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5432 il->timestamp = le64_to_cpu(timestamp);
5433
5434 D_MAC80211("leave\n");
5435 spin_unlock_irqrestore(&il->lock, flags);
5436
5437 if (!il_is_ready_rf(il)) {
5438 D_MAC80211("leave - RF not ready\n");
5439 return;
5440 }
5441
5442 il->cfg->ops->legacy->post_associate(il);
5443}
5444
5445void
5446il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5447 struct ieee80211_bss_conf *bss_conf, u32 changes)
5448{
5449 struct il_priv *il = hw->priv;
5450 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5451 int ret;
5452
5453 if (WARN_ON(!il->cfg->ops->legacy))
5454 return;
5455
5456 D_MAC80211("changes = 0x%X\n", changes);
5457
5458 mutex_lock(&il->mutex);
5459
5460 if (!il_is_alive(il)) {
5461 mutex_unlock(&il->mutex);
5462 return;
5463 }
5464
5465 if (changes & BSS_CHANGED_QOS) {
5466 unsigned long flags;
5467
5468 spin_lock_irqsave(&il->lock, flags);
5469 ctx->qos_data.qos_active = bss_conf->qos;
5470 il_update_qos(il, ctx);
5471 spin_unlock_irqrestore(&il->lock, flags);
5472 }
5473
5474 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5475 /*
5476 * the add_interface code must make sure we only ever
5477 * have a single interface that could be beaconing at
5478 * any time.
5479 */
5480 if (vif->bss_conf.enable_beacon)
5481 il->beacon_ctx = ctx;
5482 else
5483 il->beacon_ctx = NULL;
5484 }
5485
5486 if (changes & BSS_CHANGED_BSSID) {
5487 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5488
5489 /*
5490 * If there is currently a HW scan going on in the
5491 * background then we need to cancel it else the RXON
5492 * below/in post_associate will fail.
5493 */
5494 if (il_scan_cancel_timeout(il, 100)) {
5495 IL_WARN("Aborted scan still in progress after 100ms\n");
5496 D_MAC80211("leaving - scan abort failed.\n");
5497 mutex_unlock(&il->mutex);
5498 return;
5499 }
5500
5501 /* mac80211 only sets assoc when in STATION mode */
5502 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
5503 memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
5504 ETH_ALEN);
5505
5506 /* currently needed in a few places */
5507 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5508 } else {
5509 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5510 }
5511
5512 }
5513
5514 /*
5515 * This needs to be after setting the BSSID in case
5516 * mac80211 decides to do both changes at once because
5517 * it will invoke post_associate.
5518 */
5519 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5520 il_beacon_update(hw, vif);
5521
5522 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5523 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5524 if (bss_conf->use_short_preamble)
5525 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5526 else
5527 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5528 }
5529
5530 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5531 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5532 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
5533 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5534 else
5535 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5536 if (bss_conf->use_cts_prot)
5537 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
5538 else
5539 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5540 }
5541
5542 if (changes & BSS_CHANGED_BASIC_RATES) {
5543 /* XXX use this information
5544 *
5545 * To do that, remove code from il_set_rate() and put something
5546 * like this here:
5547 *
5548 if (A-band)
5549 ctx->staging.ofdm_basic_rates =
5550 bss_conf->basic_rates;
5551 else
5552 ctx->staging.ofdm_basic_rates =
5553 bss_conf->basic_rates >> 4;
5554 ctx->staging.cck_basic_rates =
5555 bss_conf->basic_rates & 0xF;
5556 */
5557 }
5558
5559 if (changes & BSS_CHANGED_HT) {
5560 il_ht_conf(il, vif);
5561
5562 if (il->cfg->ops->hcmd->set_rxon_chain)
5563 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
5564 }
5565
5566 if (changes & BSS_CHANGED_ASSOC) {
5567 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5568 if (bss_conf->assoc) {
5569 il->timestamp = bss_conf->timestamp;
5570
5571 if (!il_is_rfkill(il))
5572 il->cfg->ops->legacy->post_associate(il);
5573 } else
5574 il_set_no_assoc(il, vif);
5575 }
5576
5577 if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
5578 D_MAC80211("Changes (%#x) while associated\n", changes);
5579 ret = il_send_rxon_assoc(il, ctx);
5580 if (!ret) {
5581 /* Sync active_rxon with latest change. */
5582 memcpy((void *)&ctx->active, &ctx->staging,
5583 sizeof(struct il_rxon_cmd));
5584 }
5585 }
5586
5587 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5588 if (vif->bss_conf.enable_beacon) {
5589 memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
5590 ETH_ALEN);
5591 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5592 il->cfg->ops->legacy->config_ap(il);
5593 } else
5594 il_set_no_assoc(il, vif);
5595 }
5596
5597 if (changes & BSS_CHANGED_IBSS) {
5598 ret =
5599 il->cfg->ops->legacy->manage_ibss_station(il, vif,
5600 bss_conf->
5601 ibss_joined);
5602 if (ret)
5603 IL_ERR("failed to %s IBSS station %pM\n",
5604 bss_conf->ibss_joined ? "add" : "remove",
5605 bss_conf->bssid);
5606 }
5607
5608 mutex_unlock(&il->mutex);
5609
5610 D_MAC80211("leave\n");
5611}
5612EXPORT_SYMBOL(il_mac_bss_info_changed);
5613
5614irqreturn_t
5615il_isr(int irq, void *data)
5616{
5617 struct il_priv *il = data;
5618 u32 inta, inta_mask;
5619 u32 inta_fh;
5620 unsigned long flags;
5621 if (!il)
5622 return IRQ_NONE;
5623
5624 spin_lock_irqsave(&il->lock, flags);
5625
5626 /* Disable (but don't clear!) interrupts here to avoid
5627 * back-to-back ISRs and sporadic interrupts from our NIC.
5628 * If we have something to service, the tasklet will re-enable ints.
5629 * If we *don't* have something, we'll re-enable before leaving here. */
5630 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
5631 _il_wr(il, CSR_INT_MASK, 0x00000000);
5632
5633 /* Discover which interrupts are active/pending */
5634 inta = _il_rd(il, CSR_INT);
5635 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5636
5637 /* Ignore interrupt if there's nothing in NIC to service.
5638 * This may be due to IRQ shared with another device,
5639 * or due to sporadic interrupts thrown from our NIC. */
5640 if (!inta && !inta_fh) {
5641 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5642 goto none;
5643 }
5644
5645 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5646 /* Hardware disappeared. It might have already raised
5647 * an interrupt */
5648 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5649 goto unplugged;
5650 }
5651
5652 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5653 inta_fh);
5654
5655 inta &= ~CSR_INT_BIT_SCD;
5656
5657 /* il_irq_tasklet() will service interrupts and re-enable them */
5658 if (likely(inta || inta_fh))
5659 tasklet_schedule(&il->irq_tasklet);
5660
5661unplugged:
5662 spin_unlock_irqrestore(&il->lock, flags);
5663 return IRQ_HANDLED;
5664
5665none:
5666 /* re-enable interrupts here since we don't have anything to service. */
5667 /* only Re-enable if disabled by irq */
5668 if (test_bit(S_INT_ENABLED, &il->status))
5669 il_enable_interrupts(il);
5670 spin_unlock_irqrestore(&il->lock, flags);
5671 return IRQ_NONE;
5672}
5673EXPORT_SYMBOL(il_isr);
5674
5675/*
5676 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
5677 * function.
5678 */
5679void
5680il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5681 __le16 fc, __le32 *tx_flags)
5682{
5683 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5684 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5685 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5686 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5687
5688 if (!ieee80211_is_mgmt(fc))
5689 return;
5690
5691 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5692 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5693 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5694 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5695 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5696 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5697 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5698 break;
5699 }
5700 } else if (info->control.rates[0].
5701 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5702 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5703 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5704 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5705 }
5706}
5707EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644
index 000000000000..1bc0b02f559c
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -0,0 +1,3424 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#ifndef __il_core_h__
27#define __il_core_h__
28
29#include <linux/interrupt.h>
30#include <linux/pci.h> /* for struct pci_device_id */
31#include <linux/kernel.h>
32#include <linux/leds.h>
33#include <linux/wait.h>
34#include <net/mac80211.h>
35#include <net/ieee80211_radiotap.h>
36
37#include "commands.h"
38#include "csr.h"
39#include "prph.h"
40
41struct il_host_cmd;
42struct il_cmd;
43struct il_tx_queue;
44
45#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
46#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
47#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
48
49#define RX_QUEUE_SIZE 256
50#define RX_QUEUE_MASK 255
51#define RX_QUEUE_SIZE_LOG 8
52
53/*
54 * RX related structures and functions
55 */
56#define RX_FREE_BUFFERS 64
57#define RX_LOW_WATERMARK 8
58
59#define U32_PAD(n) ((4-(n))&0x3)
60
61/* CT-KILL constants */
62#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
63
64/* Default noise level to report when noise measurement is not available.
65 * This may be because we're:
66 * 1) Not associated (4965, no beacon stats being sent to driver)
67 * 2) Scanning (noise measurement does not apply to associated channel)
68 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
69 * Use default noise value of -127 ... this is below the range of measurable
70 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
71 * Also, -127 works better than 0 when averaging frames with/without
72 * noise info (e.g. averaging might be done in app); measured dBm values are
73 * always negative ... using a negative value as the default keeps all
74 * averages within an s8's (used in some apps) range of negative values. */
75#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
76
77/*
78 * RTS threshold here is total size [2347] minus 4 FCS bytes
79 * Per spec:
80 * a value of 0 means RTS on all data/management packets
81 * a value > max MSDU size means no RTS
82 * else RTS for data/management frames where MPDU is larger
83 * than RTS value.
84 */
85#define DEFAULT_RTS_THRESHOLD 2347U
86#define MIN_RTS_THRESHOLD 0U
87#define MAX_RTS_THRESHOLD 2347U
88#define MAX_MSDU_SIZE 2304U
89#define MAX_MPDU_SIZE 2346U
90#define DEFAULT_BEACON_INTERVAL 100U
91#define DEFAULT_SHORT_RETRY_LIMIT 7U
92#define DEFAULT_LONG_RETRY_LIMIT 4U
93
94struct il_rx_buf {
95 dma_addr_t page_dma;
96 struct page *page;
97 struct list_head list;
98};
99
100#define rxb_addr(r) page_address(r->page)
101
102/* defined below */
103struct il_device_cmd;
104
105struct il_cmd_meta {
106 /* only for SYNC commands, iff the reply skb is wanted */
107 struct il_host_cmd *source;
108 /*
109 * only for ASYNC commands
110 * (which is somewhat stupid -- look at common.c for instance
111 * which duplicates a bunch of code because the callback isn't
112 * invoked for SYNC commands, if it were and its result passed
113 * through it would be simpler...)
114 */
115 void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
116 struct il_rx_pkt *pkt);
117
118 /* The CMD_SIZE_HUGE flag bit indicates that the command
119 * structure is stored at the end of the shared queue memory. */
120 u32 flags;
121
122 DEFINE_DMA_UNMAP_ADDR(mapping);
123 DEFINE_DMA_UNMAP_LEN(len);
124};
125
126/*
127 * Generic queue structure
128 *
129 * Contains common data for Rx and Tx queues
130 */
131struct il_queue {
132 int n_bd; /* number of BDs in this queue */
133 int write_ptr; /* 1-st empty entry (idx) host_w */
134 int read_ptr; /* last used entry (idx) host_r */
135 /* use for monitoring and recovering the stuck queue */
136 dma_addr_t dma_addr; /* physical addr for BD's */
137 int n_win; /* safe queue win */
138 u32 id;
139 int low_mark; /* low watermark, resume queue if free
140 * space more than this */
141 int high_mark; /* high watermark, stop queue if free
142 * space less than this */
143};
144
145/* One for each TFD */
146struct il_tx_info {
147 struct sk_buff *skb;
148 struct il_rxon_context *ctx;
149};
150
151/**
152 * struct il_tx_queue - Tx Queue for DMA
153 * @q: generic Rx/Tx queue descriptor
154 * @bd: base of circular buffer of TFDs
155 * @cmd: array of command/TX buffer pointers
156 * @meta: array of meta data for each command/tx buffer
157 * @dma_addr_cmd: physical address of cmd/tx buffer array
158 * @txb: array of per-TFD driver data
159 * @time_stamp: time (in jiffies) of last read_ptr change
160 * @need_update: indicates need to update read/write idx
161 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
162 *
163 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
164 * descriptors) and required locking structures.
165 */
166#define TFD_TX_CMD_SLOTS 256
167#define TFD_CMD_SLOTS 32
168
169struct il_tx_queue {
170 struct il_queue q;
171 void *tfds;
172 struct il_device_cmd **cmd;
173 struct il_cmd_meta *meta;
174 struct il_tx_info *txb;
175 unsigned long time_stamp;
176 u8 need_update;
177 u8 sched_retry;
178 u8 active;
179 u8 swq_id;
180};
181
182/*
183 * EEPROM access time values:
184 *
185 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
186 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
187 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
188 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
189 */
190#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
191
192#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
193#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
194
195/*
196 * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
197 *
198 * IBSS and/or AP operation is allowed *only* on those channels with
199 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
200 * RADAR detection is not supported by the 4965 driver, but is a
201 * requirement for establishing a new network for legal operation on channels
202 * requiring RADAR detection or restricting ACTIVE scanning.
203 *
204 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
205 * It only indicates that 20 MHz channel use is supported; HT40 channel
206 * usage is indicated by a separate set of regulatory flags for each
207 * HT40 channel pair.
208 *
209 * NOTE: Using a channel inappropriately will result in a uCode error!
210 */
211#define IL_NUM_TX_CALIB_GROUPS 5
212enum {
213 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
214 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
215 /* Bit 2 Reserved */
216 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
217 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
218 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
219 /* Bit 6 Reserved (was Narrow Channel) */
220 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
221};
222
223/* SKU Capabilities */
224/* 3945 only */
225#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
226#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
227
228/* *regulatory* channel data format in eeprom, one for each channel.
229 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
230struct il_eeprom_channel {
231 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
232 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
233} __packed;
234
235/* 3945 Specific */
236#define EEPROM_3945_EEPROM_VERSION (0x2f)
237
238/* 4965 has two radio transmitters (and 3 radio receivers) */
239#define EEPROM_TX_POWER_TX_CHAINS (2)
240
241/* 4965 has room for up to 8 sets of txpower calibration data */
242#define EEPROM_TX_POWER_BANDS (8)
243
244/* 4965 factory calibration measures txpower gain settings for
245 * each of 3 target output levels */
246#define EEPROM_TX_POWER_MEASUREMENTS (3)
247
248/* 4965 Specific */
249/* 4965 driver does not work with txpower calibration version < 5 */
250#define EEPROM_4965_TX_POWER_VERSION (5)
251#define EEPROM_4965_EEPROM_VERSION (0x2f)
252#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
253#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
254#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
255#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
256
257/* 2.4 GHz */
258extern const u8 il_eeprom_band_1[14];
259
260/*
261 * factory calibration data for one txpower level, on one channel,
262 * measured on one of the 2 tx chains (radio transmitter and associated
263 * antenna). EEPROM contains:
264 *
265 * 1) Temperature (degrees Celsius) of device when measurement was made.
266 *
267 * 2) Gain table idx used to achieve the target measurement power.
268 * This refers to the "well-known" gain tables (see 4965.h).
269 *
270 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
271 *
272 * 4) RF power amplifier detector level measurement (not used).
273 */
274struct il_eeprom_calib_measure {
275 u8 temperature; /* Device temperature (Celsius) */
276 u8 gain_idx; /* Index into gain table */
277 u8 actual_pow; /* Measured RF output power, half-dBm */
278 s8 pa_det; /* Power amp detector level (not used) */
279} __packed;
280
281/*
282 * measurement set for one channel. EEPROM contains:
283 *
284 * 1) Channel number measured
285 *
286 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
287 * (a.k.a. "tx chains") (6 measurements altogether)
288 */
289struct il_eeprom_calib_ch_info {
290 u8 ch_num;
291 struct il_eeprom_calib_measure
292 measurements[EEPROM_TX_POWER_TX_CHAINS]
293 [EEPROM_TX_POWER_MEASUREMENTS];
294} __packed;
295
296/*
297 * txpower subband info.
298 *
299 * For each frequency subband, EEPROM contains the following:
300 *
301 * 1) First and last channels within range of the subband. "0" values
302 * indicate that this sample set is not being used.
303 *
304 * 2) Sample measurement sets for 2 channels close to the range endpoints.
305 */
306struct il_eeprom_calib_subband_info {
307 u8 ch_from; /* channel number of lowest channel in subband */
308 u8 ch_to; /* channel number of highest channel in subband */
309 struct il_eeprom_calib_ch_info ch1;
310 struct il_eeprom_calib_ch_info ch2;
311} __packed;
312
313/*
314 * txpower calibration info. EEPROM contains:
315 *
316 * 1) Factory-measured saturation power levels (maximum levels at which
317 * tx power amplifier can output a signal without too much distortion).
318 * There is one level for 2.4 GHz band and one for 5 GHz band. These
319 * values apply to all channels within each of the bands.
320 *
321 * 2) Factory-measured power supply voltage level. This is assumed to be
322 * constant (i.e. same value applies to all channels/bands) while the
323 * factory measurements are being made.
324 *
325 * 3) Up to 8 sets of factory-measured txpower calibration values.
326 * These are for different frequency ranges, since txpower gain
327 * characteristics of the analog radio circuitry vary with frequency.
328 *
329 * Not all sets need to be filled with data;
330 * struct il_eeprom_calib_subband_info contains range of channels
331 * (0 if unused) for each set of data.
332 */
333struct il_eeprom_calib_info {
334 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
335 u8 saturation_power52; /* half-dBm */
336 __le16 voltage; /* signed */
337 struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
338} __packed;
339
340/* General */
341#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
342#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
343#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
344#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
345#define EEPROM_VERSION (2*0x44) /* 2 bytes */
346#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
347#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
348#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
349#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
350#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
351
352/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
353#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
354#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
355#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
356#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
357#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
358#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
359
360#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
361#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
362
363/*
364 * Per-channel regulatory data.
365 *
366 * Each channel that *might* be supported by iwl has a fixed location
367 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
368 * txpower (MSB).
369 *
370 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
371 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
372 *
373 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
374 */
375#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
376#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
377#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
378
379/*
380 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
381 * 5.0 GHz channels 7, 8, 11, 12, 16
382 * (4915-5080MHz) (none of these is ever supported)
383 */
384#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
385#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
386
387/*
388 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
389 * (5170-5320MHz)
390 */
391#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
392#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
393
394/*
395 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
396 * (5500-5700MHz)
397 */
398#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
399#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
400
401/*
402 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
403 * (5725-5825MHz)
404 */
405#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
406#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
407
408/*
409 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
410 *
411 * The channel listed is the center of the lower 20 MHz half of the channel.
412 * The overall center frequency is actually 2 channels (10 MHz) above that,
413 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
414 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
415 * and the overall HT40 channel width centers on channel 3.
416 *
417 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
418 * control channel to which to tune. RXON also specifies whether the
419 * control channel is the upper or lower half of a HT40 channel.
420 *
421 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
422 */
423#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
424
425/*
426 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
427 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
428 */
429#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
430
431#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
432
433struct il_eeprom_ops {
434 const u32 regulatory_bands[7];
435 int (*acquire_semaphore) (struct il_priv *il);
436 void (*release_semaphore) (struct il_priv *il);
437};
438
439int il_eeprom_init(struct il_priv *il);
440void il_eeprom_free(struct il_priv *il);
441const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
442u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
443int il_init_channel_map(struct il_priv *il);
444void il_free_channel_map(struct il_priv *il);
445const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
446 enum ieee80211_band band,
447 u16 channel);
448
449#define IL_NUM_SCAN_RATES (2)
450
451struct il4965_channel_tgd_info {
452 u8 type;
453 s8 max_power;
454};
455
456struct il4965_channel_tgh_info {
457 s64 last_radar_time;
458};
459
460#define IL4965_MAX_RATE (33)
461
462struct il3945_clip_group {
463 /* maximum power level to prevent clipping for each rate, derived by
464 * us from this band's saturation power in EEPROM */
465 const s8 clip_powers[IL_MAX_RATES];
466};
467
468/* current Tx power values to use, one for each rate for each channel.
469 * requested power is limited by:
470 * -- regulatory EEPROM limits for this channel
471 * -- hardware capabilities (clip-powers)
472 * -- spectrum management
473 * -- user preference (e.g. iwconfig)
474 * when requested power is set, base power idx must also be set. */
475struct il3945_channel_power_info {
476 struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
477 s8 power_table_idx; /* actual (compenst'd) idx into gain table */
478 s8 base_power_idx; /* gain idx for power at factory temp. */
479 s8 requested_power; /* power (dBm) requested for this chnl/rate */
480};
481
482/* current scan Tx power values to use, one for each scan rate for each
483 * channel. */
484struct il3945_scan_power_info {
485 struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
486 s8 power_table_idx; /* actual (compenst'd) idx into gain table */
487 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
488};
489
490/*
491 * One for each channel, holds all channel setup data
492 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
493 * with one another!
494 */
495struct il_channel_info {
496 struct il4965_channel_tgd_info tgd;
497 struct il4965_channel_tgh_info tgh;
498 struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */
499 struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
500 * HT40 channel */
501
502 u8 channel; /* channel number */
503 u8 flags; /* flags copied from EEPROM */
504 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
505 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
506 s8 min_power; /* always 0 */
507 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
508
509 u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
510 u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
511 enum ieee80211_band band;
512
513 /* HT40 channel info */
514 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
515 u8 ht40_flags; /* flags copied from EEPROM */
516 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
517
518 /* Radio/DSP gain settings for each "normal" data Tx rate.
519 * These include, in addition to RF and DSP gain, a few fields for
520 * remembering/modifying gain settings (idxes). */
521 struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
522
523 /* Radio/DSP gain settings for each scan rate, for directed scans. */
524 struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
525};
526
527#define IL_TX_FIFO_BK 0 /* shared */
528#define IL_TX_FIFO_BE 1
529#define IL_TX_FIFO_VI 2 /* shared */
530#define IL_TX_FIFO_VO 3
531#define IL_TX_FIFO_UNUSED -1
532
533/* Minimum number of queues. MAX_NUM is defined in hw specific files.
534 * Set the minimum to accommodate the 4 standard TX queues, 1 command
535 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
536#define IL_MIN_NUM_QUEUES 10
537
538#define IL_DEFAULT_CMD_QUEUE_NUM 4
539
540#define IEEE80211_DATA_LEN 2304
541#define IEEE80211_4ADDR_LEN 30
542#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
543#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
544
545struct il_frame {
546 union {
547 struct ieee80211_hdr frame;
548 struct il_tx_beacon_cmd beacon;
549 u8 raw[IEEE80211_FRAME_LEN];
550 u8 cmd[360];
551 } u;
552 struct list_head list;
553};
554
555#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
556#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
557#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
558
559enum {
560 CMD_SYNC = 0,
561 CMD_SIZE_NORMAL = 0,
562 CMD_NO_SKB = 0,
563 CMD_SIZE_HUGE = (1 << 0),
564 CMD_ASYNC = (1 << 1),
565 CMD_WANT_SKB = (1 << 2),
566 CMD_MAPPED = (1 << 3),
567};
568
569#define DEF_CMD_PAYLOAD_SIZE 320
570
571/**
572 * struct il_device_cmd
573 *
574 * For allocation of the command and tx queues, this establishes the overall
575 * size of the largest command we send to uCode, except for a scan command
576 * (which is relatively huge; space is allocated separately).
577 */
578struct il_device_cmd {
579 struct il_cmd_header hdr; /* uCode API */
580 union {
581 u32 flags;
582 u8 val8;
583 u16 val16;
584 u32 val32;
585 struct il_tx_cmd tx;
586 u8 payload[DEF_CMD_PAYLOAD_SIZE];
587 } __packed cmd;
588} __packed;
589
590#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
591
592struct il_host_cmd {
593 const void *data;
594 unsigned long reply_page;
595 void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
596 struct il_rx_pkt *pkt);
597 u32 flags;
598 u16 len;
599 u8 id;
600};
601
602#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
603#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
604#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
605
606/**
607 * struct il_rx_queue - Rx queue
608 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
609 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
610 * @read: Shared idx to newest available Rx buffer
611 * @write: Shared idx to oldest written Rx packet
612 * @free_count: Number of pre-allocated buffers in rx_free
613 * @rx_free: list of free SKBs for use
614 * @rx_used: List of Rx buffers with no SKB
615 * @need_update: flag to indicate we need to update read/write idx
616 * @rb_stts: driver's pointer to receive buffer status
617 * @rb_stts_dma: bus address of receive buffer status
618 *
619 * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs
620 */
621struct il_rx_queue {
622 __le32 *bd;
623 dma_addr_t bd_dma;
624 struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
625 struct il_rx_buf *queue[RX_QUEUE_SIZE];
626 u32 read;
627 u32 write;
628 u32 free_count;
629 u32 write_actual;
630 struct list_head rx_free;
631 struct list_head rx_used;
632 int need_update;
633 struct il_rb_status *rb_stts;
634 dma_addr_t rb_stts_dma;
635 spinlock_t lock;
636};
637
638#define IL_SUPPORTED_RATES_IE_LEN 8
639
640#define MAX_TID_COUNT 9
641
642#define IL_INVALID_RATE 0xFF
643#define IL_INVALID_VALUE -1
644
645/**
646 * struct il_ht_agg -- aggregation status while waiting for block-ack
647 * @txq_id: Tx queue used for Tx attempt
648 * @frame_count: # frames attempted by Tx command
649 * @wait_for_ba: Expect block-ack before next Tx reply
650 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
651 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
652 * @bitmap1: High order, one bit for each frame pending ACK in Tx win
653 * @rate_n_flags: Rate at which Tx was attempted
654 *
655 * If C_TX indicates that aggregation was attempted, driver must wait
656 * for block ack (N_COMPRESSED_BA). This struct stores tx reply info
657 * until block ack arrives.
658 */
659struct il_ht_agg {
660 u16 txq_id;
661 u16 frame_count;
662 u16 wait_for_ba;
663 u16 start_idx;
664 u64 bitmap;
665 u32 rate_n_flags;
666#define IL_AGG_OFF 0
667#define IL_AGG_ON 1
668#define IL_EMPTYING_HW_QUEUE_ADDBA 2
669#define IL_EMPTYING_HW_QUEUE_DELBA 3
670 u8 state;
671};
672
673struct il_tid_data {
674 u16 seq_number; /* 4965 only */
675 u16 tfds_in_queue;
676 struct il_ht_agg agg;
677};
678
679struct il_hw_key {
680 u32 cipher;
681 int keylen;
682 u8 keyidx;
683 u8 key[32];
684};
685
686union il_ht_rate_supp {
687 u16 rates;
688 struct {
689 u8 siso_rate;
690 u8 mimo_rate;
691 };
692};
693
694#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
695#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
696#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
697#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
698#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
699#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
700#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
701
702/*
703 * Maximal MPDU density for TX aggregation
704 * 4 - 2us density
705 * 5 - 4us density
706 * 6 - 8us density
707 * 7 - 16us density
708 */
709#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
710#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
711#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
712#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
713#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
714#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
715#define CFG_HT_MPDU_DENSITY_MIN (0x1)
716
717struct il_ht_config {
718 bool single_chain_sufficient;
719 enum ieee80211_smps_mode smps; /* current smps mode */
720};
721
722/* QoS structures */
723struct il_qos_info {
724 int qos_active;
725 struct il_qosparam_cmd def_qos_parm;
726};
727
728/*
729 * Structure should be accessed with sta_lock held. When station addition
730 * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
731 * the commands (il_addsta_cmd and il_link_quality_cmd) without
732 * sta_lock held.
733 */
734struct il_station_entry {
735 struct il_addsta_cmd sta;
736 struct il_tid_data tid[MAX_TID_COUNT];
737 u8 used, ctxid;
738 struct il_hw_key keyinfo;
739 struct il_link_quality_cmd *lq;
740};
741
742struct il_station_priv_common {
743 struct il_rxon_context *ctx;
744 u8 sta_id;
745};
746
747/**
748 * struct il_vif_priv - driver's ilate per-interface information
749 *
750 * When mac80211 allocates a virtual interface, it can allocate
751 * space for us to put data into.
752 */
753struct il_vif_priv {
754 struct il_rxon_context *ctx;
755 u8 ibss_bssid_sta_id;
756};
757
758/* one for each uCode image (inst/data, boot/init/runtime) */
759struct fw_desc {
760 void *v_addr; /* access by driver */
761 dma_addr_t p_addr; /* access by card's busmaster DMA */
762 u32 len; /* bytes */
763};
764
765/* uCode file layout */
766struct il_ucode_header {
767 __le32 ver; /* major/minor/API/serial */
768 struct {
769 __le32 inst_size; /* bytes of runtime code */
770 __le32 data_size; /* bytes of runtime data */
771 __le32 init_size; /* bytes of init code */
772 __le32 init_data_size; /* bytes of init data */
773 __le32 boot_size; /* bytes of bootstrap code */
774 u8 data[0]; /* in same order as sizes */
775 } v1;
776};
777
778struct il4965_ibss_seq {
779 u8 mac[ETH_ALEN];
780 u16 seq_num;
781 u16 frag_num;
782 unsigned long packet_time;
783 struct list_head list;
784};
785
786struct il_sensitivity_ranges {
787 u16 min_nrg_cck;
788 u16 max_nrg_cck;
789
790 u16 nrg_th_cck;
791 u16 nrg_th_ofdm;
792
793 u16 auto_corr_min_ofdm;
794 u16 auto_corr_min_ofdm_mrc;
795 u16 auto_corr_min_ofdm_x1;
796 u16 auto_corr_min_ofdm_mrc_x1;
797
798 u16 auto_corr_max_ofdm;
799 u16 auto_corr_max_ofdm_mrc;
800 u16 auto_corr_max_ofdm_x1;
801 u16 auto_corr_max_ofdm_mrc_x1;
802
803 u16 auto_corr_max_cck;
804 u16 auto_corr_max_cck_mrc;
805 u16 auto_corr_min_cck;
806 u16 auto_corr_min_cck_mrc;
807
808 u16 barker_corr_th_min;
809 u16 barker_corr_th_min_mrc;
810 u16 nrg_th_cca;
811};
812
813#define KELVIN_TO_CELSIUS(x) ((x)-273)
814#define CELSIUS_TO_KELVIN(x) ((x)+273)
815
816/**
817 * struct il_hw_params
818 * @max_txq_num: Max # Tx queues supported
819 * @dma_chnl_num: Number of Tx DMA/FIFO channels
820 * @scd_bc_tbls_size: size of scheduler byte count tables
821 * @tfd_size: TFD size
822 * @tx/rx_chains_num: Number of TX/RX chains
823 * @valid_tx/rx_ant: usable antennas
824 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
825 * @max_rxq_log: Log-base-2 of max_rxq_size
826 * @rx_page_order: Rx buffer page order
827 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
828 * @max_stations:
829 * @ht40_channel: is 40MHz width possible in band 2.4
830 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
831 * @sw_crypto: 0 for hw, 1 for sw
832 * @max_xxx_size: for ucode uses
833 * @ct_kill_threshold: temperature threshold
834 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
835 * @struct il_sensitivity_ranges: range of sensitivity values
836 */
837struct il_hw_params {
838 u8 max_txq_num;
839 u8 dma_chnl_num;
840 u16 scd_bc_tbls_size;
841 u32 tfd_size;
842 u8 tx_chains_num;
843 u8 rx_chains_num;
844 u8 valid_tx_ant;
845 u8 valid_rx_ant;
846 u16 max_rxq_size;
847 u16 max_rxq_log;
848 u32 rx_page_order;
849 u32 rx_wrt_ptr_reg;
850 u8 max_stations;
851 u8 ht40_channel;
852 u8 max_beacon_itrvl; /* in 1024 ms */
853 u32 max_inst_size;
854 u32 max_data_size;
855 u32 max_bsm_size;
856 u32 ct_kill_threshold; /* value in hw-dependent units */
857 u16 beacon_time_tsf_bits;
858 const struct il_sensitivity_ranges *sens;
859};
860
861/******************************************************************************
862 *
863 * Functions implemented in core module which are forward declared here
864 * for use by iwl-[4-5].c
865 *
866 * NOTE: The implementation of these functions are not hardware specific
867 * which is why they are in the core module files.
868 *
869 * Naming convention --
870 * il_ <-- Is part of iwlwifi
871 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
872 * il4965_bg_ <-- Called from work queue context
873 * il4965_mac_ <-- mac80211 callback
874 *
875 ****************************************************************************/
876extern void il4965_update_chain_flags(struct il_priv *il);
877extern const u8 il_bcast_addr[ETH_ALEN];
878extern int il_queue_space(const struct il_queue *q);
879static inline int
880il_queue_used(const struct il_queue *q, int i)
881{
882 return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
883 i < q->write_ptr) : !(i <
884 q->read_ptr
885 && i >=
886 q->
887 write_ptr);
888}
889
890static inline u8
891il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
892{
893 /*
894 * This is for init calibration result and scan command which
895 * required buffer > TFD_MAX_PAYLOAD_SIZE,
896 * the big buffer at end of command array
897 */
898 if (is_huge)
899 return q->n_win; /* must be power of 2 */
900
901 /* Otherwise, use normal size buffers */
902 return idx & (q->n_win - 1);
903}
904
905struct il_dma_ptr {
906 dma_addr_t dma;
907 void *addr;
908 size_t size;
909};
910
911#define IL_OPERATION_MODE_AUTO 0
912#define IL_OPERATION_MODE_HT_ONLY 1
913#define IL_OPERATION_MODE_MIXED 2
914#define IL_OPERATION_MODE_20MHZ 3
915
916#define IL_TX_CRC_SIZE 4
917#define IL_TX_DELIMITER_SIZE 4
918
919#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
920
921/* Sensitivity and chain noise calibration */
922#define INITIALIZATION_VALUE 0xFFFF
923#define IL4965_CAL_NUM_BEACONS 20
924#define IL_CAL_NUM_BEACONS 16
925#define MAXIMUM_ALLOWED_PATHLOSS 15
926
927#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
928
929#define MAX_FA_OFDM 50
930#define MIN_FA_OFDM 5
931#define MAX_FA_CCK 50
932#define MIN_FA_CCK 5
933
934#define AUTO_CORR_STEP_OFDM 1
935
936#define AUTO_CORR_STEP_CCK 3
937#define AUTO_CORR_MAX_TH_CCK 160
938
939#define NRG_DIFF 2
940#define NRG_STEP_CCK 2
941#define NRG_MARGIN 8
942#define MAX_NUMBER_CCK_NO_FA 100
943
944#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
945
946#define CHAIN_A 0
947#define CHAIN_B 1
948#define CHAIN_C 2
949#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
950#define ALL_BAND_FILTER 0xFF00
951#define IN_BAND_FILTER 0xFF
952#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
953
954#define NRG_NUM_PREV_STAT_L 20
955#define NUM_RX_CHAINS 3
956
957enum il4965_false_alarm_state {
958 IL_FA_TOO_MANY = 0,
959 IL_FA_TOO_FEW = 1,
960 IL_FA_GOOD_RANGE = 2,
961};
962
963enum il4965_chain_noise_state {
964 IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
965 IL_CHAIN_NOISE_ACCUMULATE,
966 IL_CHAIN_NOISE_CALIBRATED,
967 IL_CHAIN_NOISE_DONE,
968};
969
970enum il4965_calib_enabled_state {
971 IL_CALIB_DISABLED = 0, /* must be 0 */
972 IL_CALIB_ENABLED = 1,
973};
974
975/*
976 * enum il_calib
977 * defines the order in which results of initial calibrations
978 * should be sent to the runtime uCode
979 */
980enum il_calib {
981 IL_CALIB_MAX,
982};
983
984/* Opaque calibration results */
985struct il_calib_result {
986 void *buf;
987 size_t buf_len;
988};
989
990enum ucode_type {
991 UCODE_NONE = 0,
992 UCODE_INIT,
993 UCODE_RT
994};
995
996/* Sensitivity calib data */
997struct il_sensitivity_data {
998 u32 auto_corr_ofdm;
999 u32 auto_corr_ofdm_mrc;
1000 u32 auto_corr_ofdm_x1;
1001 u32 auto_corr_ofdm_mrc_x1;
1002 u32 auto_corr_cck;
1003 u32 auto_corr_cck_mrc;
1004
1005 u32 last_bad_plcp_cnt_ofdm;
1006 u32 last_fa_cnt_ofdm;
1007 u32 last_bad_plcp_cnt_cck;
1008 u32 last_fa_cnt_cck;
1009
1010 u32 nrg_curr_state;
1011 u32 nrg_prev_state;
1012 u32 nrg_value[10];
1013 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
1014 u32 nrg_silence_ref;
1015 u32 nrg_energy_idx;
1016 u32 nrg_silence_idx;
1017 u32 nrg_th_cck;
1018 s32 nrg_auto_corr_silence_diff;
1019 u32 num_in_cck_no_fa;
1020 u32 nrg_th_ofdm;
1021
1022 u16 barker_corr_th_min;
1023 u16 barker_corr_th_min_mrc;
1024 u16 nrg_th_cca;
1025};
1026
1027/* Chain noise (differential Rx gain) calib data */
1028struct il_chain_noise_data {
1029 u32 active_chains;
1030 u32 chain_noise_a;
1031 u32 chain_noise_b;
1032 u32 chain_noise_c;
1033 u32 chain_signal_a;
1034 u32 chain_signal_b;
1035 u32 chain_signal_c;
1036 u16 beacon_count;
1037 u8 disconn_array[NUM_RX_CHAINS];
1038 u8 delta_gain_code[NUM_RX_CHAINS];
1039 u8 radio_write;
1040 u8 state;
1041};
1042
1043#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
1044#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
1045
1046#define IL_TRAFFIC_ENTRIES (256)
1047#define IL_TRAFFIC_ENTRY_SIZE (64)
1048
1049enum {
1050 MEASUREMENT_READY = (1 << 0),
1051 MEASUREMENT_ACTIVE = (1 << 1),
1052};
1053
1054/* interrupt stats */
1055struct isr_stats {
1056 u32 hw;
1057 u32 sw;
1058 u32 err_code;
1059 u32 sch;
1060 u32 alive;
1061 u32 rfkill;
1062 u32 ctkill;
1063 u32 wakeup;
1064 u32 rx;
1065 u32 handlers[IL_CN_MAX];
1066 u32 tx;
1067 u32 unhandled;
1068};
1069
1070/* management stats */
1071enum il_mgmt_stats {
1072 MANAGEMENT_ASSOC_REQ = 0,
1073 MANAGEMENT_ASSOC_RESP,
1074 MANAGEMENT_REASSOC_REQ,
1075 MANAGEMENT_REASSOC_RESP,
1076 MANAGEMENT_PROBE_REQ,
1077 MANAGEMENT_PROBE_RESP,
1078 MANAGEMENT_BEACON,
1079 MANAGEMENT_ATIM,
1080 MANAGEMENT_DISASSOC,
1081 MANAGEMENT_AUTH,
1082 MANAGEMENT_DEAUTH,
1083 MANAGEMENT_ACTION,
1084 MANAGEMENT_MAX,
1085};
1086/* control stats */
1087enum il_ctrl_stats {
1088 CONTROL_BACK_REQ = 0,
1089 CONTROL_BACK,
1090 CONTROL_PSPOLL,
1091 CONTROL_RTS,
1092 CONTROL_CTS,
1093 CONTROL_ACK,
1094 CONTROL_CFEND,
1095 CONTROL_CFENDACK,
1096 CONTROL_MAX,
1097};
1098
1099struct traffic_stats {
1100#ifdef CONFIG_IWLEGACY_DEBUGFS
1101 u32 mgmt[MANAGEMENT_MAX];
1102 u32 ctrl[CONTROL_MAX];
1103 u32 data_cnt;
1104 u64 data_bytes;
1105#endif
1106};
1107
1108/*
1109 * host interrupt timeout value
1110 * used with setting interrupt coalescing timer
1111 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
1112 *
1113 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
1114 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
1115 */
1116#define IL_HOST_INT_TIMEOUT_MAX (0xFF)
1117#define IL_HOST_INT_TIMEOUT_DEF (0x40)
1118#define IL_HOST_INT_TIMEOUT_MIN (0x0)
1119#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
1120#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
1121#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
1122
1123#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1124
1125/* TX queue watchdog timeouts in mSecs */
1126#define IL_DEF_WD_TIMEOUT (2000)
1127#define IL_LONG_WD_TIMEOUT (10000)
1128#define IL_MAX_WD_TIMEOUT (120000)
1129
1130struct il_force_reset {
1131 int reset_request_count;
1132 int reset_success_count;
1133 int reset_reject_count;
1134 unsigned long reset_duration;
1135 unsigned long last_force_reset_jiffies;
1136};
1137
1138/* extend beacon time format bit shifting */
1139/*
1140 * for _3945 devices
1141 * bits 31:24 - extended
1142 * bits 23:0 - interval
1143 */
1144#define IL3945_EXT_BEACON_TIME_POS 24
1145/*
1146 * for _4965 devices
1147 * bits 31:22 - extended
1148 * bits 21:0 - interval
1149 */
1150#define IL4965_EXT_BEACON_TIME_POS 22
1151
1152struct il_rxon_context {
1153 struct ieee80211_vif *vif;
1154
1155 const u8 *ac_to_fifo;
1156 const u8 *ac_to_queue;
1157 u8 mcast_queue;
1158
1159 /*
1160 * We could use the vif to indicate active, but we
1161 * also need it to be active during disabling when
1162 * we already removed the vif for type setting.
1163 */
1164 bool always_active, is_active;
1165
1166 bool ht_need_multiple_chains;
1167
1168 int ctxid;
1169
1170 u32 interface_modes, exclusive_interface_modes;
1171 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
1172
1173 /*
1174 * We declare this const so it can only be
1175 * changed via explicit cast within the
1176 * routines that actually update the physical
1177 * hardware.
1178 */
1179 const struct il_rxon_cmd active;
1180 struct il_rxon_cmd staging;
1181
1182 struct il_rxon_time_cmd timing;
1183
1184 struct il_qos_info qos_data;
1185
1186 u8 bcast_sta_id, ap_sta_id;
1187
1188 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
1189 u8 qos_cmd;
1190 u8 wep_key_cmd;
1191
1192 struct il_wep_key wep_keys[WEP_KEYS_MAX];
1193 u8 key_mapping_keys;
1194
1195 __le32 station_flags;
1196
1197 struct {
1198 bool non_gf_sta_present;
1199 u8 protection;
1200 bool enabled, is_40mhz;
1201 u8 extension_chan_offset;
1202 } ht;
1203};
1204
1205struct il_power_mgr {
1206 struct il_powertable_cmd sleep_cmd;
1207 struct il_powertable_cmd sleep_cmd_next;
1208 int debug_sleep_level_override;
1209 bool pci_pm;
1210};
1211
1212struct il_priv {
1213
1214 /* ieee device used by generic ieee processing code */
1215 struct ieee80211_hw *hw;
1216 struct ieee80211_channel *ieee_channels;
1217 struct ieee80211_rate *ieee_rates;
1218 struct il_cfg *cfg;
1219
1220 /* temporary frame storage list */
1221 struct list_head free_frames;
1222 int frames_count;
1223
1224 enum ieee80211_band band;
1225 int alloc_rxb_page;
1226
1227 void (*handlers[IL_CN_MAX]) (struct il_priv *il,
1228 struct il_rx_buf *rxb);
1229
1230 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1231
1232 /* spectrum measurement report caching */
1233 struct il_spectrum_notification measure_report;
1234 u8 measurement_status;
1235
1236 /* ucode beacon time */
1237 u32 ucode_beacon_time;
1238 int missed_beacon_threshold;
1239
1240 /* track IBSS manager (last beacon) status */
1241 u32 ibss_manager;
1242
1243 /* force reset */
1244 struct il_force_reset force_reset;
1245
1246 /* we allocate array of il_channel_info for NIC's valid channels.
1247 * Access via channel # using indirect idx array */
1248 struct il_channel_info *channel_info; /* channel info array */
1249 u8 channel_count; /* # of channels */
1250
1251 /* thermal calibration */
1252 s32 temperature; /* degrees Kelvin */
1253 s32 last_temperature;
1254
1255 /* init calibration results */
1256 struct il_calib_result calib_results[IL_CALIB_MAX];
1257
1258 /* Scan related variables */
1259 unsigned long scan_start;
1260 unsigned long scan_start_tsf;
1261 void *scan_cmd;
1262 enum ieee80211_band scan_band;
1263 struct cfg80211_scan_request *scan_request;
1264 struct ieee80211_vif *scan_vif;
1265 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1266 u8 mgmt_tx_ant;
1267
1268 /* spinlock */
1269 spinlock_t lock; /* protect general shared data */
1270 spinlock_t hcmd_lock; /* protect hcmd */
1271 spinlock_t reg_lock; /* protect hw register access */
1272 struct mutex mutex;
1273
1274 /* basic pci-network driver stuff */
1275 struct pci_dev *pci_dev;
1276
1277 /* pci hardware address support */
1278 void __iomem *hw_base;
1279 u32 hw_rev;
1280 u32 hw_wa_rev;
1281 u8 rev_id;
1282
1283 /* command queue number */
1284 u8 cmd_queue;
1285
1286 /* max number of station keys */
1287 u8 sta_key_max_num;
1288
1289 /* EEPROM MAC addresses */
1290 struct mac_address addresses[1];
1291
1292 /* uCode images, save to reload in case of failure */
1293 int fw_idx; /* firmware we're trying to load */
1294 u32 ucode_ver; /* version of ucode, copy of
1295 il_ucode.ver */
1296 struct fw_desc ucode_code; /* runtime inst */
1297 struct fw_desc ucode_data; /* runtime data original */
1298 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1299 struct fw_desc ucode_init; /* initialization inst */
1300 struct fw_desc ucode_init_data; /* initialization data */
1301 struct fw_desc ucode_boot; /* bootstrap inst */
1302 enum ucode_type ucode_type;
1303 u8 ucode_write_complete; /* the image write is complete */
1304 char firmware_name[25];
1305
1306 struct il_rxon_context ctx;
1307
1308 __le16 switch_channel;
1309
1310 /* 1st responses from initialize and runtime uCode images.
1311 * _4965's initialize alive response contains some calibration data. */
1312 struct il_init_alive_resp card_alive_init;
1313 struct il_alive_resp card_alive;
1314
1315 u16 active_rate;
1316
1317 u8 start_calib;
1318 struct il_sensitivity_data sensitivity_data;
1319 struct il_chain_noise_data chain_noise_data;
1320 __le16 sensitivity_tbl[HD_TBL_SIZE];
1321
1322 struct il_ht_config current_ht_config;
1323
1324 /* Rate scaling data */
1325 u8 retry_rate;
1326
1327 wait_queue_head_t wait_command_queue;
1328
1329 int activity_timer_active;
1330
1331 /* Rx and Tx DMA processing queues */
1332 struct il_rx_queue rxq;
1333 struct il_tx_queue *txq;
1334 unsigned long txq_ctx_active_msk;
1335 struct il_dma_ptr kw; /* keep warm address */
1336 struct il_dma_ptr scd_bc_tbls;
1337
1338 u32 scd_base_addr; /* scheduler sram base address */
1339
1340 unsigned long status;
1341
1342 /* counts mgmt, ctl, and data packets */
1343 struct traffic_stats tx_stats;
1344 struct traffic_stats rx_stats;
1345
1346 /* counts interrupts */
1347 struct isr_stats isr_stats;
1348
1349 struct il_power_mgr power_data;
1350
1351 /* context information */
1352 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1353
1354 /* station table variables */
1355
1356 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1357 spinlock_t sta_lock;
1358 int num_stations;
1359 struct il_station_entry stations[IL_STATION_COUNT];
1360 unsigned long ucode_key_table;
1361
1362 /* queue refcounts */
1363#define IL_MAX_HW_QUEUES 32
1364 unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
1365 /* for each AC */
1366 atomic_t queue_stop_count[4];
1367
1368 /* Indication if ieee80211_ops->open has been called */
1369 u8 is_open;
1370
1371 u8 mac80211_registered;
1372
1373 /* eeprom -- this is in the card's little endian byte order */
1374 u8 *eeprom;
1375 struct il_eeprom_calib_info *calib_info;
1376
1377 enum nl80211_iftype iw_mode;
1378
1379 /* Last Rx'd beacon timestamp */
1380 u64 timestamp;
1381
1382 union {
1383#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1384 struct {
1385 void *shared_virt;
1386 dma_addr_t shared_phys;
1387
1388 struct delayed_work thermal_periodic;
1389 struct delayed_work rfkill_poll;
1390
1391 struct il3945_notif_stats stats;
1392#ifdef CONFIG_IWLEGACY_DEBUGFS
1393 struct il3945_notif_stats accum_stats;
1394 struct il3945_notif_stats delta_stats;
1395 struct il3945_notif_stats max_delta;
1396#endif
1397
1398 u32 sta_supp_rates;
1399 int last_rx_rssi; /* From Rx packet stats */
1400
1401 /* Rx'd packet timing information */
1402 u32 last_beacon_time;
1403 u64 last_tsf;
1404
1405 /*
1406 * each calibration channel group in the
1407 * EEPROM has a derived clip setting for
1408 * each rate.
1409 */
1410 const struct il3945_clip_group clip_groups[5];
1411
1412 } _3945;
1413#endif
1414#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1415 struct {
1416 struct il_rx_phy_res last_phy_res;
1417 bool last_phy_res_valid;
1418
1419 struct completion firmware_loading_complete;
1420
1421 /*
1422 * chain noise reset and gain commands are the
1423 * two extra calibration commands follows the standard
1424 * phy calibration commands
1425 */
1426 u8 phy_calib_chain_noise_reset_cmd;
1427 u8 phy_calib_chain_noise_gain_cmd;
1428
1429 struct il_notif_stats stats;
1430#ifdef CONFIG_IWLEGACY_DEBUGFS
1431 struct il_notif_stats accum_stats;
1432 struct il_notif_stats delta_stats;
1433 struct il_notif_stats max_delta;
1434#endif
1435
1436 } _4965;
1437#endif
1438 };
1439
1440 struct il_hw_params hw_params;
1441
1442 u32 inta_mask;
1443
1444 struct workqueue_struct *workqueue;
1445
1446 struct work_struct restart;
1447 struct work_struct scan_completed;
1448 struct work_struct rx_replenish;
1449 struct work_struct abort_scan;
1450
1451 struct il_rxon_context *beacon_ctx;
1452 struct sk_buff *beacon_skb;
1453
1454 struct work_struct tx_flush;
1455
1456 struct tasklet_struct irq_tasklet;
1457
1458 struct delayed_work init_alive_start;
1459 struct delayed_work alive_start;
1460 struct delayed_work scan_check;
1461
1462 /* TX Power */
1463 s8 tx_power_user_lmt;
1464 s8 tx_power_device_lmt;
1465 s8 tx_power_next;
1466
1467#ifdef CONFIG_IWLEGACY_DEBUG
1468 /* debugging info */
1469 u32 debug_level; /* per device debugging will override global
1470 il_debug_level if set */
1471#endif /* CONFIG_IWLEGACY_DEBUG */
1472#ifdef CONFIG_IWLEGACY_DEBUGFS
1473 /* debugfs */
1474 u16 tx_traffic_idx;
1475 u16 rx_traffic_idx;
1476 u8 *tx_traffic;
1477 u8 *rx_traffic;
1478 struct dentry *debugfs_dir;
1479 u32 dbgfs_sram_offset, dbgfs_sram_len;
1480 bool disable_ht40;
1481#endif /* CONFIG_IWLEGACY_DEBUGFS */
1482
1483 struct work_struct txpower_work;
1484 u32 disable_sens_cal;
1485 u32 disable_chain_noise_cal;
1486 u32 disable_tx_power_cal;
1487 struct work_struct run_time_calib_work;
1488 struct timer_list stats_periodic;
1489 struct timer_list watchdog;
1490 bool hw_ready;
1491
1492 struct led_classdev led;
1493 unsigned long blink_on, blink_off;
1494 bool led_registered;
1495}; /*il_priv */
1496
1497static inline void
1498il_txq_ctx_activate(struct il_priv *il, int txq_id)
1499{
1500 set_bit(txq_id, &il->txq_ctx_active_msk);
1501}
1502
1503static inline void
1504il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
1505{
1506 clear_bit(txq_id, &il->txq_ctx_active_msk);
1507}
1508
1509static inline struct ieee80211_hdr *
1510il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
1511{
1512 if (il->txq[txq_id].txb[idx].skb)
1513 return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
1514 data;
1515 return NULL;
1516}
1517
1518static inline struct il_rxon_context *
1519il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1520{
1521 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1522
1523 return vif_priv->ctx;
1524}
1525
1526#define for_each_context(il, _ctx) \
1527 for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
1528
1529static inline int
1530il_is_associated(struct il_priv *il)
1531{
1532 return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1533}
1534
1535static inline int
1536il_is_any_associated(struct il_priv *il)
1537{
1538 return il_is_associated(il);
1539}
1540
1541static inline int
1542il_is_associated_ctx(struct il_rxon_context *ctx)
1543{
1544 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1545}
1546
1547static inline int
1548il_is_channel_valid(const struct il_channel_info *ch_info)
1549{
1550 if (ch_info == NULL)
1551 return 0;
1552 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1553}
1554
1555static inline int
1556il_is_channel_radar(const struct il_channel_info *ch_info)
1557{
1558 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1559}
1560
1561static inline u8
1562il_is_channel_a_band(const struct il_channel_info *ch_info)
1563{
1564 return ch_info->band == IEEE80211_BAND_5GHZ;
1565}
1566
1567static inline int
1568il_is_channel_passive(const struct il_channel_info *ch)
1569{
1570 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1571}
1572
1573static inline int
1574il_is_channel_ibss(const struct il_channel_info *ch)
1575{
1576 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1577}
1578
1579static inline void
1580__il_free_pages(struct il_priv *il, struct page *page)
1581{
1582 __free_pages(page, il->hw_params.rx_page_order);
1583 il->alloc_rxb_page--;
1584}
1585
1586static inline void
1587il_free_pages(struct il_priv *il, unsigned long page)
1588{
1589 free_pages(page, il->hw_params.rx_page_order);
1590 il->alloc_rxb_page--;
1591}
1592
1593#define IWLWIFI_VERSION "in-tree:"
1594#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
1595#define DRV_AUTHOR "<ilw@linux.intel.com>"
1596
1597#define IL_PCI_DEVICE(dev, subdev, cfg) \
1598 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
1599 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
1600 .driver_data = (kernel_ulong_t)&(cfg)
1601
1602#define TIME_UNIT 1024
1603
1604#define IL_SKU_G 0x1
1605#define IL_SKU_A 0x2
1606#define IL_SKU_N 0x8
1607
1608#define IL_CMD(x) case x: return #x
1609
1610/* Size of one Rx buffer in host DRAM */
1611#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
1612#define IL_RX_BUF_SIZE_4K (4 * 1024)
1613#define IL_RX_BUF_SIZE_8K (8 * 1024)
1614
1615struct il_hcmd_ops {
1616 int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
1617 int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
1618 void (*set_rxon_chain) (struct il_priv *il,
1619 struct il_rxon_context *ctx);
1620};
1621
1622struct il_hcmd_utils_ops {
1623 u16(*get_hcmd_size) (u8 cmd_id, u16 len);
1624 u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
1625 int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
1626 void (*post_scan) (struct il_priv *il);
1627};
1628
1629struct il_apm_ops {
1630 int (*init) (struct il_priv *il);
1631 void (*config) (struct il_priv *il);
1632};
1633
1634#ifdef CONFIG_IWLEGACY_DEBUGFS
1635struct il_debugfs_ops {
1636 ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
1637 size_t count, loff_t *ppos);
1638 ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
1639 size_t count, loff_t *ppos);
1640 ssize_t(*general_stats_read) (struct file *file,
1641 char __user *user_buf, size_t count,
1642 loff_t *ppos);
1643};
1644#endif
1645
1646struct il_temp_ops {
1647 void (*temperature) (struct il_priv *il);
1648};
1649
1650struct il_lib_ops {
1651 /* set hw dependent parameters */
1652 int (*set_hw_params) (struct il_priv *il);
1653 /* Handling TX */
1654 void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
1655 struct il_tx_queue *txq,
1656 u16 byte_cnt);
1657 int (*txq_attach_buf_to_tfd) (struct il_priv *il,
1658 struct il_tx_queue *txq, dma_addr_t addr,
1659 u16 len, u8 reset, u8 pad);
1660 void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
1661 int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
1662 /* setup Rx handler */
1663 void (*handler_setup) (struct il_priv *il);
1664 /* alive notification after init uCode load */
1665 void (*init_alive_start) (struct il_priv *il);
1666 /* check validity of rtc data address */
1667 int (*is_valid_rtc_data_addr) (u32 addr);
1668 /* 1st ucode load */
1669 int (*load_ucode) (struct il_priv *il);
1670
1671 void (*dump_nic_error_log) (struct il_priv *il);
1672 int (*dump_fh) (struct il_priv *il, char **buf, bool display);
1673 int (*set_channel_switch) (struct il_priv *il,
1674 struct ieee80211_channel_switch *ch_switch);
1675 /* power management */
1676 struct il_apm_ops apm_ops;
1677
1678 /* power */
1679 int (*send_tx_power) (struct il_priv *il);
1680 void (*update_chain_flags) (struct il_priv *il);
1681
1682 /* eeprom operations */
1683 struct il_eeprom_ops eeprom_ops;
1684
1685 /* temperature */
1686 struct il_temp_ops temp_ops;
1687
1688#ifdef CONFIG_IWLEGACY_DEBUGFS
1689 struct il_debugfs_ops debugfs_ops;
1690#endif
1691
1692};
1693
1694struct il_led_ops {
1695 int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
1696};
1697
1698struct il_legacy_ops {
1699 void (*post_associate) (struct il_priv *il);
1700 void (*config_ap) (struct il_priv *il);
1701 /* station management */
1702 int (*update_bcast_stations) (struct il_priv *il);
1703 int (*manage_ibss_station) (struct il_priv *il,
1704 struct ieee80211_vif *vif, bool add);
1705};
1706
1707struct il_ops {
1708 const struct il_lib_ops *lib;
1709 const struct il_hcmd_ops *hcmd;
1710 const struct il_hcmd_utils_ops *utils;
1711 const struct il_led_ops *led;
1712 const struct il_nic_ops *nic;
1713 const struct il_legacy_ops *legacy;
1714 const struct ieee80211_ops *ieee80211_ops;
1715};
1716
1717struct il_mod_params {
1718 int sw_crypto; /* def: 0 = using hardware encryption */
1719 int disable_hw_scan; /* def: 0 = use h/w scan */
1720 int num_of_queues; /* def: HW dependent */
1721 int disable_11n; /* def: 0 = 11n capabilities enabled */
1722 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
1723 int antenna; /* def: 0 = both antennas (use diversity) */
1724 int restart_fw; /* def: 1 = restart firmware */
1725};
1726
1727/*
1728 * @led_compensation: compensate on the led on/off time per HW according
1729 * to the deviation to achieve the desired led frequency.
1730 * The detail algorithm is described in common.c
1731 * @chain_noise_num_beacons: number of beacons used to compute chain noise
1732 * @wd_timeout: TX queues watchdog timeout
1733 * @temperature_kelvin: temperature report by uCode in kelvin
1734 * @ucode_tracing: support ucode continuous tracing
1735 * @sensitivity_calib_by_driver: driver has the capability to perform
1736 * sensitivity calibration operation
1737 * @chain_noise_calib_by_driver: driver has the capability to perform
1738 * chain noise calibration operation
1739 */
1740struct il_base_params {
1741 int eeprom_size;
1742 int num_of_queues; /* def: HW dependent */
1743 int num_of_ampdu_queues; /* def: HW dependent */
1744 /* for il_apm_init() */
1745 u32 pll_cfg_val;
1746 bool set_l0s;
1747 bool use_bsm;
1748
1749 u16 led_compensation;
1750 int chain_noise_num_beacons;
1751 unsigned int wd_timeout;
1752 bool temperature_kelvin;
1753 const bool ucode_tracing;
1754 const bool sensitivity_calib_by_driver;
1755 const bool chain_noise_calib_by_driver;
1756};
1757
1758#define IL_LED_SOLID 11
1759#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
1760
1761#define IL_LED_ACTIVITY (0<<1)
1762#define IL_LED_LINK (1<<1)
1763
1764/*
1765 * LED mode
1766 * IL_LED_DEFAULT: use device default
1767 * IL_LED_RF_STATE: turn LED on/off based on RF state
1768 * LED ON = RF ON
1769 * LED OFF = RF OFF
1770 * IL_LED_BLINK: adjust led blink rate based on blink table
1771 */
1772enum il_led_mode {
1773 IL_LED_DEFAULT,
1774 IL_LED_RF_STATE,
1775 IL_LED_BLINK,
1776};
1777
1778void il_leds_init(struct il_priv *il);
1779void il_leds_exit(struct il_priv *il);
1780
1781/**
1782 * struct il_cfg
1783 * @fw_name_pre: Firmware filename prefix. The api version and extension
1784 * (.ucode) will be added to filename before loading from disk. The
1785 * filename is constructed as fw_name_pre<api>.ucode.
1786 * @ucode_api_max: Highest version of uCode API supported by driver.
1787 * @ucode_api_min: Lowest version of uCode API supported by driver.
1788 * @scan_antennas: available antenna for scan operation
1789 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
1790 *
1791 * We enable the driver to be backward compatible wrt API version. The
1792 * driver specifies which APIs it supports (with @ucode_api_max being the
1793 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
1794 * it has a supported API version. The firmware's API version will be
1795 * stored in @il_priv, enabling the driver to make runtime changes based
1796 * on firmware version used.
1797 *
1798 * For example,
1799 * if (IL_UCODE_API(il->ucode_ver) >= 2) {
1800 * Driver interacts with Firmware API version >= 2.
1801 * } else {
1802 * Driver interacts with Firmware API version 1.
1803 * }
1804 *
1805 * The ideal usage of this infrastructure is to treat a new ucode API
1806 * release as a new hardware revision. That is, through utilizing the
1807 * il_hcmd_utils_ops etc. we accommodate different command structures
1808 * and flows between hardware versions as well as their API
1809 * versions.
1810 *
1811 */
1812struct il_cfg {
1813 /* params specific to an individual device within a device family */
1814 const char *name;
1815 const char *fw_name_pre;
1816 const unsigned int ucode_api_max;
1817 const unsigned int ucode_api_min;
1818 u8 valid_tx_ant;
1819 u8 valid_rx_ant;
1820 unsigned int sku;
1821 u16 eeprom_ver;
1822 u16 eeprom_calib_ver;
1823 const struct il_ops *ops;
1824 /* module based parameters which can be set from modprobe cmd */
1825 const struct il_mod_params *mod_params;
1826 /* params not likely to change within a device family */
1827 struct il_base_params *base_params;
1828 /* params likely to change within a device family */
1829 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
1830 enum il_led_mode led_mode;
1831};
1832
1833/***************************
1834 * L i b *
1835 ***************************/
1836
1837struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
1838int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1839 u16 queue, const struct ieee80211_tx_queue_params *params);
1840int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
1841
1842void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
1843 int hw_decrypt);
1844int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
1845int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
1846int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
1847 struct il_rxon_context *ctx);
1848void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
1849 enum ieee80211_band band, struct ieee80211_vif *vif);
1850u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
1851void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
1852bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
1853 struct ieee80211_sta_ht_cap *ht_cap);
1854void il_connection_init_rx_config(struct il_priv *il,
1855 struct il_rxon_context *ctx);
1856void il_set_rate(struct il_priv *il);
1857int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
1858 u32 decrypt_res, struct ieee80211_rx_status *stats);
1859void il_irq_handle_error(struct il_priv *il);
1860int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1861void il_mac_remove_interface(struct ieee80211_hw *hw,
1862 struct ieee80211_vif *vif);
1863int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1864 enum nl80211_iftype newtype, bool newp2p);
1865int il_alloc_txq_mem(struct il_priv *il);
1866void il_txq_mem(struct il_priv *il);
1867
1868#ifdef CONFIG_IWLEGACY_DEBUGFS
1869int il_alloc_traffic_mem(struct il_priv *il);
1870void il_free_traffic_mem(struct il_priv *il);
1871void il_reset_traffic_log(struct il_priv *il);
1872void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
1873 struct ieee80211_hdr *header);
1874void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
1875 struct ieee80211_hdr *header);
1876const char *il_get_mgmt_string(int cmd);
1877const char *il_get_ctrl_string(int cmd);
1878void il_clear_traffic_stats(struct il_priv *il);
1879void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
1880#else
1881static inline int
1882il_alloc_traffic_mem(struct il_priv *il)
1883{
1884 return 0;
1885}
1886
1887static inline void
1888il_free_traffic_mem(struct il_priv *il)
1889{
1890}
1891
1892static inline void
1893il_reset_traffic_log(struct il_priv *il)
1894{
1895}
1896
1897static inline void
1898il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
1899 struct ieee80211_hdr *header)
1900{
1901}
1902
1903static inline void
1904il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
1905 struct ieee80211_hdr *header)
1906{
1907}
1908
1909static inline void
1910il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
1911{
1912}
1913#endif
1914/*****************************************************
1915 * RX handlers.
1916 * **************************************************/
1917void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
1918void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
1919void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
1920
1921/*****************************************************
1922* RX
1923******************************************************/
1924void il_cmd_queue_unmap(struct il_priv *il);
1925void il_cmd_queue_free(struct il_priv *il);
1926int il_rx_queue_alloc(struct il_priv *il);
1927void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
1928int il_rx_queue_space(const struct il_rx_queue *q);
1929void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
1930/* Handlers */
1931void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
1932void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
1933void il_chswitch_done(struct il_priv *il, bool is_success);
1934void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
1935
1936/* TX helpers */
1937
1938/*****************************************************
1939* TX
1940******************************************************/
1941void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
1942int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
1943 u32 txq_id);
1944void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
1945 int slots_num, u32 txq_id);
1946void il_tx_queue_unmap(struct il_priv *il, int txq_id);
1947void il_tx_queue_free(struct il_priv *il, int txq_id);
1948void il_setup_watchdog(struct il_priv *il);
1949/*****************************************************
1950 * TX power
1951 ****************************************************/
1952int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
1953
1954/*******************************************************************************
1955 * Rate
1956 ******************************************************************************/
1957
1958u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
1959
1960/*******************************************************************************
1961 * Scanning
1962 ******************************************************************************/
1963void il_init_scan_params(struct il_priv *il);
1964int il_scan_cancel(struct il_priv *il);
1965int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
1966void il_force_scan_end(struct il_priv *il);
1967int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1968 struct cfg80211_scan_request *req);
1969void il_internal_short_hw_scan(struct il_priv *il);
1970int il_force_reset(struct il_priv *il, bool external);
1971u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1972 const u8 *ta, const u8 *ie, int ie_len, int left);
1973void il_setup_rx_scan_handlers(struct il_priv *il);
1974u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1975 u8 n_probes);
1976u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1977 struct ieee80211_vif *vif);
1978void il_setup_scan_deferred_work(struct il_priv *il);
1979void il_cancel_scan_deferred_work(struct il_priv *il);
1980
1981/* For faster active scanning, scan will move to the next channel if fewer than
1982 * PLCP_QUIET_THRESH packets are heard on this channel within
1983 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
1984 * time if it's a quiet channel (nothing responded to our probe, and there's
1985 * no other traffic).
1986 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
1987#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
1988#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
1989
1990#define IL_SCAN_CHECK_WATCHDOG (HZ * 7)
1991
1992/*****************************************************
1993 * S e n d i n g H o s t C o m m a n d s *
1994 *****************************************************/
1995
1996const char *il_get_cmd_string(u8 cmd);
1997int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
1998int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
1999int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
2000 const void *data);
2001int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
2002 void (*callback) (struct il_priv *il,
2003 struct il_device_cmd *cmd,
2004 struct il_rx_pkt *pkt));
2005
2006int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
2007
2008/*****************************************************
2009 * PCI *
2010 *****************************************************/
2011
2012static inline u16
2013il_pcie_link_ctl(struct il_priv *il)
2014{
2015 int pos;
2016 u16 pci_lnk_ctl;
2017 pos = pci_pcie_cap(il->pci_dev);
2018 pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
2019 return pci_lnk_ctl;
2020}
2021
2022void il_bg_watchdog(unsigned long data);
2023u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
2024__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
2025 u32 beacon_interval);
2026
2027#ifdef CONFIG_PM
2028int il_pci_suspend(struct device *device);
2029int il_pci_resume(struct device *device);
2030extern const struct dev_pm_ops il_pm_ops;
2031
2032#define IL_LEGACY_PM_OPS (&il_pm_ops)
2033
2034#else /* !CONFIG_PM */
2035
2036#define IL_LEGACY_PM_OPS NULL
2037
2038#endif /* !CONFIG_PM */
2039
2040/*****************************************************
2041* Error Handling Debugging
2042******************************************************/
2043void il4965_dump_nic_error_log(struct il_priv *il);
2044#ifdef CONFIG_IWLEGACY_DEBUG
2045void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
2046#else
2047static inline void
2048il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
2049{
2050}
2051#endif
2052
2053void il_clear_isr_stats(struct il_priv *il);
2054
2055/*****************************************************
2056* GEOS
2057******************************************************/
2058int il_init_geos(struct il_priv *il);
2059void il_free_geos(struct il_priv *il);
2060
2061/*************** DRIVER STATUS FUNCTIONS *****/
2062
2063#define S_HCMD_ACTIVE 0 /* host command in progress */
2064/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
2065#define S_INT_ENABLED 2
2066#define S_RF_KILL_HW 3
2067#define S_CT_KILL 4
2068#define S_INIT 5
2069#define S_ALIVE 6
2070#define S_READY 7
2071#define S_TEMPERATURE 8
2072#define S_GEO_CONFIGURED 9
2073#define S_EXIT_PENDING 10
2074#define S_STATS 12
2075#define S_SCANNING 13
2076#define S_SCAN_ABORTING 14
2077#define S_SCAN_HW 15
2078#define S_POWER_PMI 16
2079#define S_FW_ERROR 17
2080#define S_CHANNEL_SWITCH_PENDING 18
2081
2082static inline int
2083il_is_ready(struct il_priv *il)
2084{
2085 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
2086 * set but EXIT_PENDING is not */
2087 return test_bit(S_READY, &il->status) &&
2088 test_bit(S_GEO_CONFIGURED, &il->status) &&
2089 !test_bit(S_EXIT_PENDING, &il->status);
2090}
2091
2092static inline int
2093il_is_alive(struct il_priv *il)
2094{
2095 return test_bit(S_ALIVE, &il->status);
2096}
2097
2098static inline int
2099il_is_init(struct il_priv *il)
2100{
2101 return test_bit(S_INIT, &il->status);
2102}
2103
2104static inline int
2105il_is_rfkill_hw(struct il_priv *il)
2106{
2107 return test_bit(S_RF_KILL_HW, &il->status);
2108}
2109
2110static inline int
2111il_is_rfkill(struct il_priv *il)
2112{
2113 return il_is_rfkill_hw(il);
2114}
2115
2116static inline int
2117il_is_ctkill(struct il_priv *il)
2118{
2119 return test_bit(S_CT_KILL, &il->status);
2120}
2121
2122static inline int
2123il_is_ready_rf(struct il_priv *il)
2124{
2125
2126 if (il_is_rfkill(il))
2127 return 0;
2128
2129 return il_is_ready(il);
2130}
2131
2132extern void il_send_bt_config(struct il_priv *il);
2133extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
2134void il_apm_stop(struct il_priv *il);
2135int il_apm_init(struct il_priv *il);
2136
2137int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
2138static inline int
2139il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
2140{
2141 return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
2142}
2143
2144static inline int
2145il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
2146{
2147 return il->cfg->ops->hcmd->commit_rxon(il, ctx);
2148}
2149
2150static inline const struct ieee80211_supported_band *
2151il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
2152{
2153 return il->hw->wiphy->bands[band];
2154}
2155
2156/* mac80211 handlers */
2157int il_mac_config(struct ieee80211_hw *hw, u32 changed);
2158void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
2159void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2160 struct ieee80211_bss_conf *bss_conf, u32 changes);
2161void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
2162 __le16 fc, __le32 *tx_flags);
2163
2164irqreturn_t il_isr(int irq, void *data);
2165
2166#include <linux/io.h>
2167
2168static inline void
2169_il_write8(struct il_priv *il, u32 ofs, u8 val)
2170{
2171 iowrite8(val, il->hw_base + ofs);
2172}
2173#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
2174
2175static inline void
2176_il_wr(struct il_priv *il, u32 ofs, u32 val)
2177{
2178 iowrite32(val, il->hw_base + ofs);
2179}
2180
2181static inline u32
2182_il_rd(struct il_priv *il, u32 ofs)
2183{
2184 return ioread32(il->hw_base + ofs);
2185}
2186
2187#define IL_POLL_INTERVAL 10 /* microseconds */
2188static inline int
2189_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
2190{
2191 int t = 0;
2192
2193 do {
2194 if ((_il_rd(il, addr) & mask) == (bits & mask))
2195 return t;
2196 udelay(IL_POLL_INTERVAL);
2197 t += IL_POLL_INTERVAL;
2198 } while (t < timeout);
2199
2200 return -ETIMEDOUT;
2201}
2202
2203static inline void
2204_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
2205{
2206 _il_wr(il, reg, _il_rd(il, reg) | mask);
2207}
2208
2209static inline void
2210il_set_bit(struct il_priv *p, u32 r, u32 m)
2211{
2212 unsigned long reg_flags;
2213
2214 spin_lock_irqsave(&p->reg_lock, reg_flags);
2215 _il_set_bit(p, r, m);
2216 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
2217}
2218
2219static inline void
2220_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
2221{
2222 _il_wr(il, reg, _il_rd(il, reg) & ~mask);
2223}
2224
2225static inline void
2226il_clear_bit(struct il_priv *p, u32 r, u32 m)
2227{
2228 unsigned long reg_flags;
2229
2230 spin_lock_irqsave(&p->reg_lock, reg_flags);
2231 _il_clear_bit(p, r, m);
2232 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
2233}
2234
2235static inline int
2236_il_grab_nic_access(struct il_priv *il)
2237{
2238 int ret;
2239 u32 val;
2240
2241 /* this bit wakes up the NIC */
2242 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2243
2244 /*
2245 * These bits say the device is running, and should keep running for
2246 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2247 * but they do not indicate that embedded SRAM is restored yet;
2248 * 3945 and 4965 have volatile SRAM, and must save/restore contents
2249 * to/from host DRAM when sleeping/waking for power-saving.
2250 * Each direction takes approximately 1/4 millisecond; with this
2251 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2252 * series of register accesses are expected (e.g. reading Event Log),
2253 * to keep device from sleeping.
2254 *
2255 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2256 * SRAM is okay/restored. We don't check that here because this call
2257 * is just for hardware register access; but GP1 MAC_SLEEP check is a
2258 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
2259 *
2260 */
2261 ret =
2262 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
2263 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2264 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2265 if (ret < 0) {
2266 val = _il_rd(il, CSR_GP_CNTRL);
2267 IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
2268 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
2269 return -EIO;
2270 }
2271
2272 return 0;
2273}
2274
2275static inline void
2276_il_release_nic_access(struct il_priv *il)
2277{
2278 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2279}
2280
2281static inline u32
2282il_rd(struct il_priv *il, u32 reg)
2283{
2284 u32 value;
2285 unsigned long reg_flags;
2286
2287 spin_lock_irqsave(&il->reg_lock, reg_flags);
2288 _il_grab_nic_access(il);
2289 value = _il_rd(il, reg);
2290 _il_release_nic_access(il);
2291 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2292 return value;
2293
2294}
2295
2296static inline void
2297il_wr(struct il_priv *il, u32 reg, u32 value)
2298{
2299 unsigned long reg_flags;
2300
2301 spin_lock_irqsave(&il->reg_lock, reg_flags);
2302 if (!_il_grab_nic_access(il)) {
2303 _il_wr(il, reg, value);
2304 _il_release_nic_access(il);
2305 }
2306 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2307}
2308
2309static inline void
2310il_write_reg_buf(struct il_priv *il, u32 reg, u32 len, u32 * values)
2311{
2312 u32 count = sizeof(u32);
2313
2314 if (il != NULL && values != NULL) {
2315 for (; 0 < len; len -= count, reg += count, values++)
2316 il_wr(il, reg, *values);
2317 }
2318}
2319
2320static inline int
2321il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
2322{
2323 int t = 0;
2324
2325 do {
2326 if ((il_rd(il, addr) & mask) == mask)
2327 return t;
2328 udelay(IL_POLL_INTERVAL);
2329 t += IL_POLL_INTERVAL;
2330 } while (t < timeout);
2331
2332 return -ETIMEDOUT;
2333}
2334
2335static inline u32
2336_il_rd_prph(struct il_priv *il, u32 reg)
2337{
2338 _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
2339 rmb();
2340 return _il_rd(il, HBUS_TARG_PRPH_RDAT);
2341}
2342
2343static inline u32
2344il_rd_prph(struct il_priv *il, u32 reg)
2345{
2346 unsigned long reg_flags;
2347 u32 val;
2348
2349 spin_lock_irqsave(&il->reg_lock, reg_flags);
2350 _il_grab_nic_access(il);
2351 val = _il_rd_prph(il, reg);
2352 _il_release_nic_access(il);
2353 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2354 return val;
2355}
2356
2357static inline void
2358_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
2359{
2360 _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
2361 wmb();
2362 _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
2363}
2364
2365static inline void
2366il_wr_prph(struct il_priv *il, u32 addr, u32 val)
2367{
2368 unsigned long reg_flags;
2369
2370 spin_lock_irqsave(&il->reg_lock, reg_flags);
2371 if (!_il_grab_nic_access(il)) {
2372 _il_wr_prph(il, addr, val);
2373 _il_release_nic_access(il);
2374 }
2375 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2376}
2377
2378#define _il_set_bits_prph(il, reg, mask) \
2379_il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask))
2380
2381static inline void
2382il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
2383{
2384 unsigned long reg_flags;
2385
2386 spin_lock_irqsave(&il->reg_lock, reg_flags);
2387 _il_grab_nic_access(il);
2388 _il_set_bits_prph(il, reg, mask);
2389 _il_release_nic_access(il);
2390 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2391}
2392
2393#define _il_set_bits_mask_prph(il, reg, bits, mask) \
2394_il_wr_prph(il, reg, \
2395 ((_il_rd_prph(il, reg) & mask) | bits))
2396
2397static inline void
2398il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
2399{
2400 unsigned long reg_flags;
2401
2402 spin_lock_irqsave(&il->reg_lock, reg_flags);
2403 _il_grab_nic_access(il);
2404 _il_set_bits_mask_prph(il, reg, bits, mask);
2405 _il_release_nic_access(il);
2406 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2407}
2408
2409static inline void
2410il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
2411{
2412 unsigned long reg_flags;
2413 u32 val;
2414
2415 spin_lock_irqsave(&il->reg_lock, reg_flags);
2416 _il_grab_nic_access(il);
2417 val = _il_rd_prph(il, reg);
2418 _il_wr_prph(il, reg, (val & ~mask));
2419 _il_release_nic_access(il);
2420 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2421}
2422
2423static inline u32
2424il_read_targ_mem(struct il_priv *il, u32 addr)
2425{
2426 unsigned long reg_flags;
2427 u32 value;
2428
2429 spin_lock_irqsave(&il->reg_lock, reg_flags);
2430 _il_grab_nic_access(il);
2431
2432 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
2433 rmb();
2434 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
2435
2436 _il_release_nic_access(il);
2437 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2438 return value;
2439}
2440
2441static inline void
2442il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
2443{
2444 unsigned long reg_flags;
2445
2446 spin_lock_irqsave(&il->reg_lock, reg_flags);
2447 if (!_il_grab_nic_access(il)) {
2448 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
2449 wmb();
2450 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
2451 _il_release_nic_access(il);
2452 }
2453 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2454}
2455
2456static inline void
2457il_write_targ_mem_buf(struct il_priv *il, u32 addr, u32 len, u32 * values)
2458{
2459 unsigned long reg_flags;
2460
2461 spin_lock_irqsave(&il->reg_lock, reg_flags);
2462 if (!_il_grab_nic_access(il)) {
2463 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
2464 wmb();
2465 for (; 0 < len; len -= sizeof(u32), values++)
2466 _il_wr(il, HBUS_TARG_MEM_WDAT, *values);
2467
2468 _il_release_nic_access(il);
2469 }
2470 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2471}
2472
2473#define HW_KEY_DYNAMIC 0
2474#define HW_KEY_DEFAULT 1
2475
2476#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
2477#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
2478#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
2479 being activated */
2480#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
2481 (this is for the IBSS BSSID stations) */
2482#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
2483
2484void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
2485void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
2486void il_dealloc_bcast_stations(struct il_priv *il);
2487int il_get_free_ucode_key_idx(struct il_priv *il);
2488int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
2489int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
2490 const u8 *addr, bool is_ap,
2491 struct ieee80211_sta *sta, u8 *sta_id_r);
2492int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
2493int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2494 struct ieee80211_sta *sta);
2495
2496u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
2497 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
2498
2499int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2500 struct il_link_quality_cmd *lq, u8 flags, bool init);
2501
2502/**
2503 * il_clear_driver_stations - clear knowledge of all stations from driver
2504 * @il: iwl il struct
2505 *
2506 * This is called during il_down() to make sure that in the case
2507 * we're coming there from a hardware restart mac80211 will be
2508 * able to reconfigure stations -- if we're getting there in the
2509 * normal down flow then the stations will already be cleared.
2510 */
2511static inline void
2512il_clear_driver_stations(struct il_priv *il)
2513{
2514 unsigned long flags;
2515 struct il_rxon_context *ctx = &il->ctx;
2516
2517 spin_lock_irqsave(&il->sta_lock, flags);
2518 memset(il->stations, 0, sizeof(il->stations));
2519 il->num_stations = 0;
2520
2521 il->ucode_key_table = 0;
2522
2523 /*
2524 * Remove all key information that is not stored as part
2525 * of station information since mac80211 may not have had
2526 * a chance to remove all the keys. When device is
2527 * reconfigured by mac80211 after an error all keys will
2528 * be reconfigured.
2529 */
2530 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
2531 ctx->key_mapping_keys = 0;
2532
2533 spin_unlock_irqrestore(&il->sta_lock, flags);
2534}
2535
2536static inline int
2537il_sta_id(struct ieee80211_sta *sta)
2538{
2539 if (WARN_ON(!sta))
2540 return IL_INVALID_STATION;
2541
2542 return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
2543}
2544
2545/**
2546 * il_sta_id_or_broadcast - return sta_id or broadcast sta
2547 * @il: iwl il
2548 * @context: the current context
2549 * @sta: mac80211 station
2550 *
2551 * In certain circumstances mac80211 passes a station pointer
2552 * that may be %NULL, for example during TX or key setup. In
2553 * that case, we need to use the broadcast station, so this
2554 * inline wraps that pattern.
2555 */
2556static inline int
2557il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
2558 struct ieee80211_sta *sta)
2559{
2560 int sta_id;
2561
2562 if (!sta)
2563 return context->bcast_sta_id;
2564
2565 sta_id = il_sta_id(sta);
2566
2567 /*
2568 * mac80211 should not be passing a partially
2569 * initialised station!
2570 */
2571 WARN_ON(sta_id == IL_INVALID_STATION);
2572
2573 return sta_id;
2574}
2575
2576/**
2577 * il_queue_inc_wrap - increment queue idx, wrap back to beginning
2578 * @idx -- current idx
2579 * @n_bd -- total number of entries in queue (must be power of 2)
2580 */
2581static inline int
2582il_queue_inc_wrap(int idx, int n_bd)
2583{
2584 return ++idx & (n_bd - 1);
2585}
2586
2587/**
2588 * il_queue_dec_wrap - decrement queue idx, wrap back to end
2589 * @idx -- current idx
2590 * @n_bd -- total number of entries in queue (must be power of 2)
2591 */
2592static inline int
2593il_queue_dec_wrap(int idx, int n_bd)
2594{
2595 return --idx & (n_bd - 1);
2596}
2597
2598/* TODO: Move fw_desc functions to iwl-pci.ko */
2599static inline void
2600il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2601{
2602 if (desc->v_addr)
2603 dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
2604 desc->p_addr);
2605 desc->v_addr = NULL;
2606 desc->len = 0;
2607}
2608
2609static inline int
2610il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2611{
2612 if (!desc->len) {
2613 desc->v_addr = NULL;
2614 return -EINVAL;
2615 }
2616
2617 desc->v_addr =
2618 dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
2619 GFP_KERNEL);
2620 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
2621}
2622
2623/*
2624 * we have 8 bits used like this:
2625 *
2626 * 7 6 5 4 3 2 1 0
2627 * | | | | | | | |
2628 * | | | | | | +-+-------- AC queue (0-3)
2629 * | | | | | |
2630 * | +-+-+-+-+------------ HW queue ID
2631 * |
2632 * +---------------------- unused
2633 */
2634static inline void
2635il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
2636{
2637 BUG_ON(ac > 3); /* only have 2 bits */
2638 BUG_ON(hwq > 31); /* only use 5 bits */
2639
2640 txq->swq_id = (hwq << 2) | ac;
2641}
2642
2643static inline void
2644il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
2645{
2646 u8 queue = txq->swq_id;
2647 u8 ac = queue & 3;
2648 u8 hwq = (queue >> 2) & 0x1f;
2649
2650 if (test_and_clear_bit(hwq, il->queue_stopped))
2651 if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
2652 ieee80211_wake_queue(il->hw, ac);
2653}
2654
2655static inline void
2656il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
2657{
2658 u8 queue = txq->swq_id;
2659 u8 ac = queue & 3;
2660 u8 hwq = (queue >> 2) & 0x1f;
2661
2662 if (!test_and_set_bit(hwq, il->queue_stopped))
2663 if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
2664 ieee80211_stop_queue(il->hw, ac);
2665}
2666
2667#ifdef ieee80211_stop_queue
2668#undef ieee80211_stop_queue
2669#endif
2670
2671#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
2672
2673#ifdef ieee80211_wake_queue
2674#undef ieee80211_wake_queue
2675#endif
2676
2677#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
2678
2679static inline void
2680il_disable_interrupts(struct il_priv *il)
2681{
2682 clear_bit(S_INT_ENABLED, &il->status);
2683
2684 /* disable interrupts from uCode/NIC to host */
2685 _il_wr(il, CSR_INT_MASK, 0x00000000);
2686
2687 /* acknowledge/clear/reset any interrupts still pending
2688 * from uCode or flow handler (Rx/Tx DMA) */
2689 _il_wr(il, CSR_INT, 0xffffffff);
2690 _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
2691}
2692
2693static inline void
2694il_enable_rfkill_int(struct il_priv *il)
2695{
2696 _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
2697}
2698
2699static inline void
2700il_enable_interrupts(struct il_priv *il)
2701{
2702 set_bit(S_INT_ENABLED, &il->status);
2703 _il_wr(il, CSR_INT_MASK, il->inta_mask);
2704}
2705
2706/**
2707 * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
2708 * @il -- pointer to il_priv data structure
2709 * @tsf_bits -- number of bits need to shift for masking)
2710 */
2711static inline u32
2712il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
2713{
2714 return (1 << tsf_bits) - 1;
2715}
2716
2717/**
2718 * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
2719 * @il -- pointer to il_priv data structure
2720 * @tsf_bits -- number of bits need to shift for masking)
2721 */
2722static inline u32
2723il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
2724{
2725 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
2726}
2727
2728/**
2729 * struct il_rb_status - reseve buffer status host memory mapped FH registers
2730 *
2731 * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
2732 * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
2733 * @finished_rb_num [0:11] - Indicates the idx of the current RB
2734 * in which the last frame was written to
2735 * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
2736 * which was transferred
2737 */
2738struct il_rb_status {
2739 __le16 closed_rb_num;
2740 __le16 closed_fr_num;
2741 __le16 finished_rb_num;
2742 __le16 finished_fr_nam;
2743 __le32 __unused; /* 3945 only */
2744} __packed;
2745
2746#define TFD_QUEUE_SIZE_MAX (256)
2747#define TFD_QUEUE_SIZE_BC_DUP (64)
2748#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
2749#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
2750#define IL_NUM_OF_TBS 20
2751
2752static inline u8
2753il_get_dma_hi_addr(dma_addr_t addr)
2754{
2755 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
2756}
2757
2758/**
2759 * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
2760 *
2761 * This structure contains dma address and length of transmission address
2762 *
2763 * @lo: low [31:0] portion of the dma address of TX buffer every even is
2764 * unaligned on 16 bit boundary
2765 * @hi_n_len: 0-3 [35:32] portion of dma
2766 * 4-15 length of the tx buffer
2767 */
2768struct il_tfd_tb {
2769 __le32 lo;
2770 __le16 hi_n_len;
2771} __packed;
2772
2773/**
2774 * struct il_tfd
2775 *
2776 * Transmit Frame Descriptor (TFD)
2777 *
2778 * @ __reserved1[3] reserved
2779 * @ num_tbs 0-4 number of active tbs
2780 * 5 reserved
2781 * 6-7 padding (not used)
2782 * @ tbs[20] transmit frame buffer descriptors
2783 * @ __pad padding
2784 *
2785 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
2786 * Both driver and device share these circular buffers, each of which must be
2787 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
2788 *
2789 * Driver must indicate the physical address of the base of each
2790 * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
2791 *
2792 * Each TFD contains pointer/size information for up to 20 data buffers
2793 * in host DRAM. These buffers collectively contain the (one) frame described
2794 * by the TFD. Each buffer must be a single contiguous block of memory within
2795 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
2796 * of (4K - 4). The concatenates all of a TFD's buffers into a single
2797 * Tx frame, up to 8 KBytes in size.
2798 *
2799 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
2800 */
2801struct il_tfd {
2802 u8 __reserved1[3];
2803 u8 num_tbs;
2804 struct il_tfd_tb tbs[IL_NUM_OF_TBS];
2805 __le32 __pad;
2806} __packed;
2807/* PCI registers */
2808#define PCI_CFG_RETRY_TIMEOUT 0x041
2809
2810/* PCI register values */
2811#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
2812#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
2813
2814struct il_rate_info {
2815 u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
2816 u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
2817 u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */
2818 u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
2819 u8 prev_ieee; /* previous rate in IEEE speeds */
2820 u8 next_ieee; /* next rate in IEEE speeds */
2821 u8 prev_rs; /* previous rate used in rs algo */
2822 u8 next_rs; /* next rate used in rs algo */
2823 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
2824 u8 next_rs_tgg; /* next rate used in TGG rs algo */
2825};
2826
2827struct il3945_rate_info {
2828 u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
2829 u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
2830 u8 prev_ieee; /* previous rate in IEEE speeds */
2831 u8 next_ieee; /* next rate in IEEE speeds */
2832 u8 prev_rs; /* previous rate used in rs algo */
2833 u8 next_rs; /* next rate used in rs algo */
2834 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
2835 u8 next_rs_tgg; /* next rate used in TGG rs algo */
2836 u8 table_rs_idx; /* idx in rate scale table cmd */
2837 u8 prev_table_rs; /* prev in rate table cmd */
2838};
2839
2840/*
2841 * These serve as idxes into
2842 * struct il_rate_info il_rates[RATE_COUNT];
2843 */
2844enum {
2845 RATE_1M_IDX = 0,
2846 RATE_2M_IDX,
2847 RATE_5M_IDX,
2848 RATE_11M_IDX,
2849 RATE_6M_IDX,
2850 RATE_9M_IDX,
2851 RATE_12M_IDX,
2852 RATE_18M_IDX,
2853 RATE_24M_IDX,
2854 RATE_36M_IDX,
2855 RATE_48M_IDX,
2856 RATE_54M_IDX,
2857 RATE_60M_IDX,
2858 RATE_COUNT,
2859 RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */
2860 RATE_COUNT_3945 = RATE_COUNT - 1,
2861 RATE_INVM_IDX = RATE_COUNT,
2862 RATE_INVALID = RATE_COUNT,
2863};
2864
2865enum {
2866 RATE_6M_IDX_TBL = 0,
2867 RATE_9M_IDX_TBL,
2868 RATE_12M_IDX_TBL,
2869 RATE_18M_IDX_TBL,
2870 RATE_24M_IDX_TBL,
2871 RATE_36M_IDX_TBL,
2872 RATE_48M_IDX_TBL,
2873 RATE_54M_IDX_TBL,
2874 RATE_1M_IDX_TBL,
2875 RATE_2M_IDX_TBL,
2876 RATE_5M_IDX_TBL,
2877 RATE_11M_IDX_TBL,
2878 RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
2879};
2880
2881enum {
2882 IL_FIRST_OFDM_RATE = RATE_6M_IDX,
2883 IL39_LAST_OFDM_RATE = RATE_54M_IDX,
2884 IL_LAST_OFDM_RATE = RATE_60M_IDX,
2885 IL_FIRST_CCK_RATE = RATE_1M_IDX,
2886 IL_LAST_CCK_RATE = RATE_11M_IDX,
2887};
2888
2889/* #define vs. enum to keep from defaulting to 'large integer' */
2890#define RATE_6M_MASK (1 << RATE_6M_IDX)
2891#define RATE_9M_MASK (1 << RATE_9M_IDX)
2892#define RATE_12M_MASK (1 << RATE_12M_IDX)
2893#define RATE_18M_MASK (1 << RATE_18M_IDX)
2894#define RATE_24M_MASK (1 << RATE_24M_IDX)
2895#define RATE_36M_MASK (1 << RATE_36M_IDX)
2896#define RATE_48M_MASK (1 << RATE_48M_IDX)
2897#define RATE_54M_MASK (1 << RATE_54M_IDX)
2898#define RATE_60M_MASK (1 << RATE_60M_IDX)
2899#define RATE_1M_MASK (1 << RATE_1M_IDX)
2900#define RATE_2M_MASK (1 << RATE_2M_IDX)
2901#define RATE_5M_MASK (1 << RATE_5M_IDX)
2902#define RATE_11M_MASK (1 << RATE_11M_IDX)
2903
2904/* uCode API values for legacy bit rates, both OFDM and CCK */
2905enum {
2906 RATE_6M_PLCP = 13,
2907 RATE_9M_PLCP = 15,
2908 RATE_12M_PLCP = 5,
2909 RATE_18M_PLCP = 7,
2910 RATE_24M_PLCP = 9,
2911 RATE_36M_PLCP = 11,
2912 RATE_48M_PLCP = 1,
2913 RATE_54M_PLCP = 3,
2914 RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */
2915 RATE_1M_PLCP = 10,
2916 RATE_2M_PLCP = 20,
2917 RATE_5M_PLCP = 55,
2918 RATE_11M_PLCP = 110,
2919 /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
2920};
2921
2922/* uCode API values for OFDM high-throughput (HT) bit rates */
2923enum {
2924 RATE_SISO_6M_PLCP = 0,
2925 RATE_SISO_12M_PLCP = 1,
2926 RATE_SISO_18M_PLCP = 2,
2927 RATE_SISO_24M_PLCP = 3,
2928 RATE_SISO_36M_PLCP = 4,
2929 RATE_SISO_48M_PLCP = 5,
2930 RATE_SISO_54M_PLCP = 6,
2931 RATE_SISO_60M_PLCP = 7,
2932 RATE_MIMO2_6M_PLCP = 0x8,
2933 RATE_MIMO2_12M_PLCP = 0x9,
2934 RATE_MIMO2_18M_PLCP = 0xa,
2935 RATE_MIMO2_24M_PLCP = 0xb,
2936 RATE_MIMO2_36M_PLCP = 0xc,
2937 RATE_MIMO2_48M_PLCP = 0xd,
2938 RATE_MIMO2_54M_PLCP = 0xe,
2939 RATE_MIMO2_60M_PLCP = 0xf,
2940 RATE_SISO_INVM_PLCP,
2941 RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
2942};
2943
2944/* MAC header values for bit rates */
2945enum {
2946 RATE_6M_IEEE = 12,
2947 RATE_9M_IEEE = 18,
2948 RATE_12M_IEEE = 24,
2949 RATE_18M_IEEE = 36,
2950 RATE_24M_IEEE = 48,
2951 RATE_36M_IEEE = 72,
2952 RATE_48M_IEEE = 96,
2953 RATE_54M_IEEE = 108,
2954 RATE_60M_IEEE = 120,
2955 RATE_1M_IEEE = 2,
2956 RATE_2M_IEEE = 4,
2957 RATE_5M_IEEE = 11,
2958 RATE_11M_IEEE = 22,
2959};
2960
2961#define IL_CCK_BASIC_RATES_MASK \
2962 (RATE_1M_MASK | \
2963 RATE_2M_MASK)
2964
2965#define IL_CCK_RATES_MASK \
2966 (IL_CCK_BASIC_RATES_MASK | \
2967 RATE_5M_MASK | \
2968 RATE_11M_MASK)
2969
2970#define IL_OFDM_BASIC_RATES_MASK \
2971 (RATE_6M_MASK | \
2972 RATE_12M_MASK | \
2973 RATE_24M_MASK)
2974
2975#define IL_OFDM_RATES_MASK \
2976 (IL_OFDM_BASIC_RATES_MASK | \
2977 RATE_9M_MASK | \
2978 RATE_18M_MASK | \
2979 RATE_36M_MASK | \
2980 RATE_48M_MASK | \
2981 RATE_54M_MASK)
2982
2983#define IL_BASIC_RATES_MASK \
2984 (IL_OFDM_BASIC_RATES_MASK | \
2985 IL_CCK_BASIC_RATES_MASK)
2986
2987#define RATES_MASK ((1 << RATE_COUNT) - 1)
2988#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
2989
2990#define IL_INVALID_VALUE -1
2991
2992#define IL_MIN_RSSI_VAL -100
2993#define IL_MAX_RSSI_VAL 0
2994
2995/* These values specify how many Tx frame attempts before
2996 * searching for a new modulation mode */
2997#define IL_LEGACY_FAILURE_LIMIT 160
2998#define IL_LEGACY_SUCCESS_LIMIT 480
2999#define IL_LEGACY_TBL_COUNT 160
3000
3001#define IL_NONE_LEGACY_FAILURE_LIMIT 400
3002#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500
3003#define IL_NONE_LEGACY_TBL_COUNT 1500
3004
3005/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
3006#define IL_RS_GOOD_RATIO 12800 /* 100% */
3007#define RATE_SCALE_SWITCH 10880 /* 85% */
3008#define RATE_HIGH_TH 10880 /* 85% */
3009#define RATE_INCREASE_TH 6400 /* 50% */
3010#define RATE_DECREASE_TH 1920 /* 15% */
3011
3012/* possible actions when in legacy mode */
3013#define IL_LEGACY_SWITCH_ANTENNA1 0
3014#define IL_LEGACY_SWITCH_ANTENNA2 1
3015#define IL_LEGACY_SWITCH_SISO 2
3016#define IL_LEGACY_SWITCH_MIMO2_AB 3
3017#define IL_LEGACY_SWITCH_MIMO2_AC 4
3018#define IL_LEGACY_SWITCH_MIMO2_BC 5
3019
3020/* possible actions when in siso mode */
3021#define IL_SISO_SWITCH_ANTENNA1 0
3022#define IL_SISO_SWITCH_ANTENNA2 1
3023#define IL_SISO_SWITCH_MIMO2_AB 2
3024#define IL_SISO_SWITCH_MIMO2_AC 3
3025#define IL_SISO_SWITCH_MIMO2_BC 4
3026#define IL_SISO_SWITCH_GI 5
3027
3028/* possible actions when in mimo mode */
3029#define IL_MIMO2_SWITCH_ANTENNA1 0
3030#define IL_MIMO2_SWITCH_ANTENNA2 1
3031#define IL_MIMO2_SWITCH_SISO_A 2
3032#define IL_MIMO2_SWITCH_SISO_B 3
3033#define IL_MIMO2_SWITCH_SISO_C 4
3034#define IL_MIMO2_SWITCH_GI 5
3035
3036#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
3037
3038#define IL_ACTION_LIMIT 3 /* # possible actions */
3039
3040#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
3041
3042/* load per tid defines for A-MPDU activation */
3043#define IL_AGG_TPT_THREHOLD 0
3044#define IL_AGG_LOAD_THRESHOLD 10
3045#define IL_AGG_ALL_TID 0xff
3046#define TID_QUEUE_CELL_SPACING 50 /*mS */
3047#define TID_QUEUE_MAX_SIZE 20
3048#define TID_ROUND_VALUE 5 /* mS */
3049#define TID_MAX_LOAD_COUNT 8
3050
3051#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
3052#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
3053
3054extern const struct il_rate_info il_rates[RATE_COUNT];
3055
3056enum il_table_type {
3057 LQ_NONE,
3058 LQ_G, /* legacy types */
3059 LQ_A,
3060 LQ_SISO, /* high-throughput types */
3061 LQ_MIMO2,
3062 LQ_MAX,
3063};
3064
3065#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
3066#define is_siso(tbl) ((tbl) == LQ_SISO)
3067#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
3068#define is_mimo(tbl) (is_mimo2(tbl))
3069#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
3070#define is_a_band(tbl) ((tbl) == LQ_A)
3071#define is_g_and(tbl) ((tbl) == LQ_G)
3072
3073#define ANT_NONE 0x0
3074#define ANT_A BIT(0)
3075#define ANT_B BIT(1)
3076#define ANT_AB (ANT_A | ANT_B)
3077#define ANT_C BIT(2)
3078#define ANT_AC (ANT_A | ANT_C)
3079#define ANT_BC (ANT_B | ANT_C)
3080#define ANT_ABC (ANT_AB | ANT_C)
3081
3082#define IL_MAX_MCS_DISPLAY_SIZE 12
3083
3084struct il_rate_mcs_info {
3085 char mbps[IL_MAX_MCS_DISPLAY_SIZE];
3086 char mcs[IL_MAX_MCS_DISPLAY_SIZE];
3087};
3088
3089/**
3090 * struct il_rate_scale_data -- tx success history for one rate
3091 */
3092struct il_rate_scale_data {
3093 u64 data; /* bitmap of successful frames */
3094 s32 success_counter; /* number of frames successful */
3095 s32 success_ratio; /* per-cent * 128 */
3096 s32 counter; /* number of frames attempted */
3097 s32 average_tpt; /* success ratio * expected throughput */
3098 unsigned long stamp;
3099};
3100
3101/**
3102 * struct il_scale_tbl_info -- tx params and success history for all rates
3103 *
3104 * There are two of these in struct il_lq_sta,
3105 * one for "active", and one for "search".
3106 */
3107struct il_scale_tbl_info {
3108 enum il_table_type lq_type;
3109 u8 ant_type;
3110 u8 is_SGI; /* 1 = short guard interval */
3111 u8 is_ht40; /* 1 = 40 MHz channel width */
3112 u8 is_dup; /* 1 = duplicated data streams */
3113 u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
3114 u8 max_search; /* maximun number of tables we can search */
3115 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
3116 u32 current_rate; /* rate_n_flags, uCode API format */
3117 struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */
3118};
3119
3120struct il_traffic_load {
3121 unsigned long time_stamp; /* age of the oldest stats */
3122 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
3123 * slice */
3124 u32 total; /* total num of packets during the
3125 * last TID_MAX_TIME_DIFF */
3126 u8 queue_count; /* number of queues that has
3127 * been used since the last cleanup */
3128 u8 head; /* start of the circular buffer */
3129};
3130
3131/**
3132 * struct il_lq_sta -- driver's rate scaling ilate structure
3133 *
3134 * Pointer to this gets passed back and forth between driver and mac80211.
3135 */
3136struct il_lq_sta {
3137 u8 active_tbl; /* idx of active table, range 0-1 */
3138 u8 enable_counter; /* indicates HT mode */
3139 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
3140 u8 search_better_tbl; /* 1: currently trying alternate mode */
3141 s32 last_tpt;
3142
3143 /* The following determine when to search for a new mode */
3144 u32 table_count_limit;
3145 u32 max_failure_limit; /* # failed frames before new search */
3146 u32 max_success_limit; /* # successful frames before new search */
3147 u32 table_count;
3148 u32 total_failed; /* total failed frames, any/all rates */
3149 u32 total_success; /* total successful frames, any/all rates */
3150 u64 flush_timer; /* time staying in mode before new search */
3151
3152 u8 action_counter; /* # mode-switch actions tried */
3153 u8 is_green;
3154 u8 is_dup;
3155 enum ieee80211_band band;
3156
3157 /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
3158 u32 supp_rates;
3159 u16 active_legacy_rate;
3160 u16 active_siso_rate;
3161 u16 active_mimo2_rate;
3162 s8 max_rate_idx; /* Max rate set by user */
3163 u8 missed_rate_counter;
3164
3165 struct il_link_quality_cmd lq;
3166 struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
3167 struct il_traffic_load load[TID_MAX_LOAD_COUNT];
3168 u8 tx_agg_tid_en;
3169#ifdef CONFIG_MAC80211_DEBUGFS
3170 struct dentry *rs_sta_dbgfs_scale_table_file;
3171 struct dentry *rs_sta_dbgfs_stats_table_file;
3172 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
3173 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
3174 u32 dbg_fixed_rate;
3175#endif
3176 struct il_priv *drv;
3177
3178 /* used to be in sta_info */
3179 int last_txrate_idx;
3180 /* last tx rate_n_flags */
3181 u32 last_rate_n_flags;
3182 /* packets destined for this STA are aggregated */
3183 u8 is_agg;
3184};
3185
3186/*
3187 * il_station_priv: Driver's ilate station information
3188 *
3189 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
3190 * in the structure for use by driver. This structure is places in that
3191 * space.
3192 *
3193 * The common struct MUST be first because it is shared between
3194 * 3945 and 4965!
3195 */
3196struct il_station_priv {
3197 struct il_station_priv_common common;
3198 struct il_lq_sta lq_sta;
3199 atomic_t pending_frames;
3200 bool client;
3201 bool asleep;
3202};
3203
3204static inline u8
3205il4965_num_of_ant(u8 m)
3206{
3207 return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
3208}
3209
3210static inline u8
3211il4965_first_antenna(u8 mask)
3212{
3213 if (mask & ANT_A)
3214 return ANT_A;
3215 if (mask & ANT_B)
3216 return ANT_B;
3217 return ANT_C;
3218}
3219
3220/**
3221 * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
3222 *
3223 * The specific throughput table used is based on the type of network
3224 * the associated with, including A, B, G, and G w/ TGG protection
3225 */
3226extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
3227
3228/* Initialize station's rate scaling information after adding station */
3229extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
3230 u8 sta_id);
3231extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
3232 u8 sta_id);
3233
3234/**
3235 * il_rate_control_register - Register the rate control algorithm callbacks
3236 *
3237 * Since the rate control algorithm is hardware specific, there is no need
3238 * or reason to place it as a stand alone module. The driver can call
3239 * il_rate_control_register in order to register the rate control callbacks
3240 * with the mac80211 subsystem. This should be performed prior to calling
3241 * ieee80211_register_hw
3242 *
3243 */
3244extern int il4965_rate_control_register(void);
3245extern int il3945_rate_control_register(void);
3246
3247/**
3248 * il_rate_control_unregister - Unregister the rate control callbacks
3249 *
3250 * This should be called after calling ieee80211_unregister_hw, but before
3251 * the driver is unloaded.
3252 */
3253extern void il4965_rate_control_unregister(void);
3254extern void il3945_rate_control_unregister(void);
3255
3256extern int il_power_update_mode(struct il_priv *il, bool force);
3257extern void il_power_initialize(struct il_priv *il);
3258
3259extern u32 il_debug_level;
3260
3261#ifdef CONFIG_IWLEGACY_DEBUG
3262/*
3263 * il_get_debug_level: Return active debug level for device
3264 *
3265 * Using sysfs it is possible to set per device debug level. This debug
3266 * level will be used if set, otherwise the global debug level which can be
3267 * set via module parameter is used.
3268 */
3269static inline u32
3270il_get_debug_level(struct il_priv *il)
3271{
3272 if (il->debug_level)
3273 return il->debug_level;
3274 else
3275 return il_debug_level;
3276}
3277#else
3278static inline u32
3279il_get_debug_level(struct il_priv *il)
3280{
3281 return il_debug_level;
3282}
3283#endif
3284
3285#define il_print_hex_error(il, p, len) \
3286do { \
3287 print_hex_dump(KERN_ERR, "iwl data: ", \
3288 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
3289} while (0)
3290
3291#ifdef CONFIG_IWLEGACY_DEBUG
3292#define IL_DBG(level, fmt, args...) \
3293do { \
3294 if (il_get_debug_level(il) & level) \
3295 dev_printk(KERN_ERR, &il->hw->wiphy->dev, \
3296 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
3297 __func__ , ## args); \
3298} while (0)
3299
3300#define il_print_hex_dump(il, level, p, len) \
3301do { \
3302 if (il_get_debug_level(il) & level) \
3303 print_hex_dump(KERN_DEBUG, "iwl data: ", \
3304 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
3305} while (0)
3306
3307#else
3308#define IL_DBG(level, fmt, args...)
3309static inline void
3310il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
3311{
3312}
3313#endif /* CONFIG_IWLEGACY_DEBUG */
3314
3315#ifdef CONFIG_IWLEGACY_DEBUGFS
3316int il_dbgfs_register(struct il_priv *il, const char *name);
3317void il_dbgfs_unregister(struct il_priv *il);
3318#else
3319static inline int
3320il_dbgfs_register(struct il_priv *il, const char *name)
3321{
3322 return 0;
3323}
3324
3325static inline void
3326il_dbgfs_unregister(struct il_priv *il)
3327{
3328}
3329#endif /* CONFIG_IWLEGACY_DEBUGFS */
3330
3331/*
3332 * To use the debug system:
3333 *
3334 * If you are defining a new debug classification, simply add it to the #define
3335 * list here in the form of
3336 *
3337 * #define IL_DL_xxxx VALUE
3338 *
3339 * where xxxx should be the name of the classification (for example, WEP).
3340 *
3341 * You then need to either add a IL_xxxx_DEBUG() macro definition for your
3342 * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
3343 * to send output to that classification.
3344 *
3345 * The active debug levels can be accessed via files
3346 *
3347 * /sys/module/iwl4965/parameters/debug
3348 * /sys/module/iwl3945/parameters/debug
3349 * /sys/class/net/wlan0/device/debug_level
3350 *
3351 * when CONFIG_IWLEGACY_DEBUG=y.
3352 */
3353
3354/* 0x0000000F - 0x00000001 */
3355#define IL_DL_INFO (1 << 0)
3356#define IL_DL_MAC80211 (1 << 1)
3357#define IL_DL_HCMD (1 << 2)
3358#define IL_DL_STATE (1 << 3)
3359/* 0x000000F0 - 0x00000010 */
3360#define IL_DL_MACDUMP (1 << 4)
3361#define IL_DL_HCMD_DUMP (1 << 5)
3362#define IL_DL_EEPROM (1 << 6)
3363#define IL_DL_RADIO (1 << 7)
3364/* 0x00000F00 - 0x00000100 */
3365#define IL_DL_POWER (1 << 8)
3366#define IL_DL_TEMP (1 << 9)
3367#define IL_DL_NOTIF (1 << 10)
3368#define IL_DL_SCAN (1 << 11)
3369/* 0x0000F000 - 0x00001000 */
3370#define IL_DL_ASSOC (1 << 12)
3371#define IL_DL_DROP (1 << 13)
3372#define IL_DL_TXPOWER (1 << 14)
3373#define IL_DL_AP (1 << 15)
3374/* 0x000F0000 - 0x00010000 */
3375#define IL_DL_FW (1 << 16)
3376#define IL_DL_RF_KILL (1 << 17)
3377#define IL_DL_FW_ERRORS (1 << 18)
3378#define IL_DL_LED (1 << 19)
3379/* 0x00F00000 - 0x00100000 */
3380#define IL_DL_RATE (1 << 20)
3381#define IL_DL_CALIB (1 << 21)
3382#define IL_DL_WEP (1 << 22)
3383#define IL_DL_TX (1 << 23)
3384/* 0x0F000000 - 0x01000000 */
3385#define IL_DL_RX (1 << 24)
3386#define IL_DL_ISR (1 << 25)
3387#define IL_DL_HT (1 << 26)
3388/* 0xF0000000 - 0x10000000 */
3389#define IL_DL_11H (1 << 28)
3390#define IL_DL_STATS (1 << 29)
3391#define IL_DL_TX_REPLY (1 << 30)
3392#define IL_DL_QOS (1 << 31)
3393
3394#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a)
3395#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a)
3396#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a)
3397#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a)
3398#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a)
3399#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a)
3400#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a)
3401#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a)
3402#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a)
3403#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a)
3404#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a)
3405#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
3406#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a)
3407#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a)
3408#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a)
3409#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a)
3410#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a)
3411#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a)
3412#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a)
3413#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a)
3414#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a)
3415#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a)
3416#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a)
3417#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a)
3418#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a)
3419#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a)
3420#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a)
3421#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a)
3422#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a)
3423
3424#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h
index 668a9616c269..9138e15004fa 100644
--- a/drivers/net/wireless/iwlegacy/iwl-csr.h
+++ b/drivers/net/wireless/iwlegacy/csr.h
@@ -60,8 +60,8 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_legacy_csr_h__ 63#ifndef __il_csr_h__
64#define __iwl_legacy_csr_h__ 64#define __il_csr_h__
65/* 65/*
66 * CSR (control and status registers) 66 * CSR (control and status registers)
67 * 67 *
@@ -70,9 +70,9 @@
70 * low power states due to driver-invoked device resets 70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. 71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 * 72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers; 73 * Use _il_wr() and _il_rd() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC. 74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_legacy_write_direct32() family for these registers; 75 * Do not use il_wr() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. 76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing 77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers. 78 * the CSR registers.
@@ -82,16 +82,16 @@
82 */ 82 */
83#define CSR_BASE (0x000) 83#define CSR_BASE (0x000)
84 84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ 85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ 86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ 87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ 88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ 89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ 90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ 91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
92#define CSR_GP_CNTRL (CSR_BASE+0x024) 92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93 93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ 94/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) 95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96 96
97/* 97/*
@@ -166,26 +166,26 @@
166 166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) 167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ 169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ 170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ 171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172 172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ 173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ 174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
175 175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), 176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */ 177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ 178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ 179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ 180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ 181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ 182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ 183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ 184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ 185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ 186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ 187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ 188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189 189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ 190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \ 191 CSR_INT_BIT_HW_ERR | \
@@ -197,21 +197,20 @@
197 CSR_INT_BIT_ALIVE) 197 CSR_INT_BIT_ALIVE)
198 198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ 199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ 200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ 201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ 202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ 203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ 204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ 205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ 206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ 207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208 208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ 209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \ 210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \ 211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0) 212 CSR_FH_INT_BIT_RX_CHNL0)
213 213
214
215#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ 214#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
216 CSR_FH_INT_BIT_TX_CHNL1 | \ 215 CSR_FH_INT_BIT_TX_CHNL1 | \
217 CSR_FH_INT_BIT_TX_CHNL0) 216 CSR_FH_INT_BIT_TX_CHNL0)
@@ -285,7 +284,6 @@
285#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) 284#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
286#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) 285#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
287 286
288
289/* EEPROM REG */ 287/* EEPROM REG */
290#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 288#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
291#define CSR_EEPROM_REG_BIT_CMD (0x00000002) 289#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -293,19 +291,18 @@
293#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) 291#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
294 292
295/* EEPROM GP */ 293/* EEPROM GP */
296#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ 294#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
297#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) 295#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
298#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) 296#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
299#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) 297#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
300 298
301/* GP REG */ 299/* GP REG */
302#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ 300#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
303#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) 301#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
304#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) 302#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
305#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) 303#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
306#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) 304#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
307 305
308
309/* CSR GIO */ 306/* CSR GIO */
310#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) 307#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
311 308
@@ -357,7 +354,7 @@
357/* HPET MEM debug */ 354/* HPET MEM debug */
358#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) 355#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
359 356
360/* DRAM INT TABLE */ 357/* DRAM INT TBL */
361#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) 358#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
362#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) 359#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
363 360
@@ -368,13 +365,13 @@
368 * to indirectly access device's internal memory or registers that 365 * to indirectly access device's internal memory or registers that
369 * may be powered-down. 366 * may be powered-down.
370 * 367 *
371 * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family 368 * Use il_wr()/il_rd() family
372 * for these registers; 369 * for these registers;
373 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ 370 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
374 * to make sure the MAC (uCode processor, etc.) is powered up for accessing 371 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
375 * internal resources. 372 * internal resources.
376 * 373 *
377 * Do not use iwl_write32()/iwl_read32() family to access these registers; 374 * Do not use _il_wr()/_il_rd() family to access these registers;
378 * these provide only simple PCI bus access, without waking up the MAC. 375 * these provide only simple PCI bus access, without waking up the MAC.
379 */ 376 */
380#define HBUS_BASE (0x400) 377#define HBUS_BASE (0x400)
@@ -411,12 +408,12 @@
411#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) 408#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
412 409
413/* 410/*
414 * Per-Tx-queue write pointer (index, really!) 411 * Per-Tx-queue write pointer (idx, really!)
415 * Indicates index to next TFD that driver will fill (1 past latest filled). 412 * Indicates idx to next TFD that driver will fill (1 past latest filled).
416 * Bit usage: 413 * Bit usage:
417 * 0-7: queue write index 414 * 0-7: queue write idx
418 * 11-8: queue selector 415 * 11-8: queue selector
419 */ 416 */
420#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) 417#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
421 418
422#endif /* !__iwl_legacy_csr_h__ */ 419#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644
index 000000000000..b1b8926a9c7b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -0,0 +1,1411 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <linux/export.h>
30#include <net/mac80211.h>
31
32#include "common.h"
33
34/* create and remove of files */
35#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
36 if (!debugfs_create_file(#name, mode, parent, il, \
37 &il_dbgfs_##name##_ops)) \
38 goto err; \
39} while (0)
40
41#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
42 struct dentry *__tmp; \
43 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
44 parent, ptr); \
45 if (IS_ERR(__tmp) || !__tmp) \
46 goto err; \
47} while (0)
48
49#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
50 struct dentry *__tmp; \
51 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
52 parent, ptr); \
53 if (IS_ERR(__tmp) || !__tmp) \
54 goto err; \
55} while (0)
56
57/* file operation */
58#define DEBUGFS_READ_FUNC(name) \
59static ssize_t il_dbgfs_##name##_read(struct file *file, \
60 char __user *user_buf, \
61 size_t count, loff_t *ppos);
62
63#define DEBUGFS_WRITE_FUNC(name) \
64static ssize_t il_dbgfs_##name##_write(struct file *file, \
65 const char __user *user_buf, \
66 size_t count, loff_t *ppos);
67
68static int
69il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
70{
71 file->private_data = inode->i_private;
72 return 0;
73}
74
75#define DEBUGFS_READ_FILE_OPS(name) \
76 DEBUGFS_READ_FUNC(name); \
77static const struct file_operations il_dbgfs_##name##_ops = { \
78 .read = il_dbgfs_##name##_read, \
79 .open = il_dbgfs_open_file_generic, \
80 .llseek = generic_file_llseek, \
81};
82
83#define DEBUGFS_WRITE_FILE_OPS(name) \
84 DEBUGFS_WRITE_FUNC(name); \
85static const struct file_operations il_dbgfs_##name##_ops = { \
86 .write = il_dbgfs_##name##_write, \
87 .open = il_dbgfs_open_file_generic, \
88 .llseek = generic_file_llseek, \
89};
90
91#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
92 DEBUGFS_READ_FUNC(name); \
93 DEBUGFS_WRITE_FUNC(name); \
94static const struct file_operations il_dbgfs_##name##_ops = { \
95 .write = il_dbgfs_##name##_write, \
96 .read = il_dbgfs_##name##_read, \
97 .open = il_dbgfs_open_file_generic, \
98 .llseek = generic_file_llseek, \
99};
100
101static ssize_t
102il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
103 loff_t *ppos)
104{
105
106 struct il_priv *il = file->private_data;
107 char *buf;
108 int pos = 0;
109
110 int cnt;
111 ssize_t ret;
112 const size_t bufsz =
113 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
114 buf = kzalloc(bufsz, GFP_KERNEL);
115 if (!buf)
116 return -ENOMEM;
117 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
118 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
119 pos +=
120 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
121 il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
122 }
123 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
124 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
125 pos +=
126 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
127 il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
128 }
129 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
130 pos +=
131 scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
132 il->tx_stats.data_cnt);
133 pos +=
134 scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
135 il->tx_stats.data_bytes);
136 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
137 kfree(buf);
138 return ret;
139}
140
141static ssize_t
142il_dbgfs_clear_traffic_stats_write(struct file *file,
143 const char __user *user_buf, size_t count,
144 loff_t *ppos)
145{
146 struct il_priv *il = file->private_data;
147 u32 clear_flag;
148 char buf[8];
149 int buf_size;
150
151 memset(buf, 0, sizeof(buf));
152 buf_size = min(count, sizeof(buf) - 1);
153 if (copy_from_user(buf, user_buf, buf_size))
154 return -EFAULT;
155 if (sscanf(buf, "%x", &clear_flag) != 1)
156 return -EFAULT;
157 il_clear_traffic_stats(il);
158
159 return count;
160}
161
162static ssize_t
163il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
164 loff_t *ppos)
165{
166
167 struct il_priv *il = file->private_data;
168 char *buf;
169 int pos = 0;
170 int cnt;
171 ssize_t ret;
172 const size_t bufsz =
173 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
174 buf = kzalloc(bufsz, GFP_KERNEL);
175 if (!buf)
176 return -ENOMEM;
177
178 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
179 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
180 pos +=
181 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
182 il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
183 }
184 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
185 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
186 pos +=
187 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
188 il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
189 }
190 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
191 pos +=
192 scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
193 il->rx_stats.data_cnt);
194 pos +=
195 scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
196 il->rx_stats.data_bytes);
197
198 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
199 kfree(buf);
200 return ret;
201}
202
203#define BYTE1_MASK 0x000000ff;
204#define BYTE2_MASK 0x0000ffff;
205#define BYTE3_MASK 0x00ffffff;
206static ssize_t
207il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
208 loff_t *ppos)
209{
210 u32 val;
211 char *buf;
212 ssize_t ret;
213 int i;
214 int pos = 0;
215 struct il_priv *il = file->private_data;
216 size_t bufsz;
217
218 /* default is to dump the entire data segment */
219 if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
220 il->dbgfs_sram_offset = 0x800000;
221 if (il->ucode_type == UCODE_INIT)
222 il->dbgfs_sram_len = il->ucode_init_data.len;
223 else
224 il->dbgfs_sram_len = il->ucode_data.len;
225 }
226 bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
227 buf = kmalloc(bufsz, GFP_KERNEL);
228 if (!buf)
229 return -ENOMEM;
230 pos +=
231 scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
232 il->dbgfs_sram_len);
233 pos +=
234 scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 il->dbgfs_sram_offset);
236 for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
237 val =
238 il_read_targ_mem(il,
239 il->dbgfs_sram_offset +
240 il->dbgfs_sram_len - i);
241 if (i < 4) {
242 switch (i) {
243 case 1:
244 val &= BYTE1_MASK;
245 break;
246 case 2:
247 val &= BYTE2_MASK;
248 break;
249 case 3:
250 val &= BYTE3_MASK;
251 break;
252 }
253 }
254 if (!(i % 16))
255 pos += scnprintf(buf + pos, bufsz - pos, "\n");
256 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
257 }
258 pos += scnprintf(buf + pos, bufsz - pos, "\n");
259
260 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
261 kfree(buf);
262 return ret;
263}
264
265static ssize_t
266il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
267 size_t count, loff_t *ppos)
268{
269 struct il_priv *il = file->private_data;
270 char buf[64];
271 int buf_size;
272 u32 offset, len;
273
274 memset(buf, 0, sizeof(buf));
275 buf_size = min(count, sizeof(buf) - 1);
276 if (copy_from_user(buf, user_buf, buf_size))
277 return -EFAULT;
278
279 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
280 il->dbgfs_sram_offset = offset;
281 il->dbgfs_sram_len = len;
282 } else {
283 il->dbgfs_sram_offset = 0;
284 il->dbgfs_sram_len = 0;
285 }
286
287 return count;
288}
289
290static ssize_t
291il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
292 loff_t *ppos)
293{
294 struct il_priv *il = file->private_data;
295 struct il_station_entry *station;
296 int max_sta = il->hw_params.max_stations;
297 char *buf;
298 int i, j, pos = 0;
299 ssize_t ret;
300 /* Add 30 for initial string */
301 const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
302
303 buf = kmalloc(bufsz, GFP_KERNEL);
304 if (!buf)
305 return -ENOMEM;
306
307 pos +=
308 scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
309 il->num_stations);
310
311 for (i = 0; i < max_sta; i++) {
312 station = &il->stations[i];
313 if (!station->used)
314 continue;
315 pos +=
316 scnprintf(buf + pos, bufsz - pos,
317 "station %d - addr: %pM, flags: %#x\n", i,
318 station->sta.sta.addr,
319 station->sta.station_flags_msk);
320 pos +=
321 scnprintf(buf + pos, bufsz - pos,
322 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
323 pos +=
324 scnprintf(buf + pos, bufsz - pos,
325 "start_idx\tbitmap\t\t\trate_n_flags\n");
326
327 for (j = 0; j < MAX_TID_COUNT; j++) {
328 pos +=
329 scnprintf(buf + pos, bufsz - pos,
330 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
331 j, station->tid[j].seq_number,
332 station->tid[j].agg.txq_id,
333 station->tid[j].agg.frame_count,
334 station->tid[j].tfds_in_queue,
335 station->tid[j].agg.start_idx,
336 station->tid[j].agg.bitmap,
337 station->tid[j].agg.rate_n_flags);
338
339 if (station->tid[j].agg.wait_for_ba)
340 pos +=
341 scnprintf(buf + pos, bufsz - pos,
342 " - waitforba");
343 pos += scnprintf(buf + pos, bufsz - pos, "\n");
344 }
345
346 pos += scnprintf(buf + pos, bufsz - pos, "\n");
347 }
348
349 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
350 kfree(buf);
351 return ret;
352}
353
354static ssize_t
355il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
356 loff_t *ppos)
357{
358 ssize_t ret;
359 struct il_priv *il = file->private_data;
360 int pos = 0, ofs = 0, buf_size = 0;
361 const u8 *ptr;
362 char *buf;
363 u16 eeprom_ver;
364 size_t eeprom_len = il->cfg->base_params->eeprom_size;
365 buf_size = 4 * eeprom_len + 256;
366
367 if (eeprom_len % 16) {
368 IL_ERR("NVM size is not multiple of 16.\n");
369 return -ENODATA;
370 }
371
372 ptr = il->eeprom;
373 if (!ptr) {
374 IL_ERR("Invalid EEPROM memory\n");
375 return -ENOMEM;
376 }
377
378 /* 4 characters for byte 0xYY */
379 buf = kzalloc(buf_size, GFP_KERNEL);
380 if (!buf) {
381 IL_ERR("Can not allocate Buffer\n");
382 return -ENOMEM;
383 }
384 eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
385 pos +=
386 scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
387 eeprom_ver);
388 for (ofs = 0; ofs < eeprom_len; ofs += 16) {
389 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
390 hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
391 buf_size - pos, 0);
392 pos += strlen(buf + pos);
393 if (buf_size - pos > 0)
394 buf[pos++] = '\n';
395 }
396
397 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
398 kfree(buf);
399 return ret;
400}
401
402static ssize_t
403il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
404 loff_t *ppos)
405{
406 struct il_priv *il = file->private_data;
407 struct ieee80211_channel *channels = NULL;
408 const struct ieee80211_supported_band *supp_band = NULL;
409 int pos = 0, i, bufsz = PAGE_SIZE;
410 char *buf;
411 ssize_t ret;
412
413 if (!test_bit(S_GEO_CONFIGURED, &il->status))
414 return -EAGAIN;
415
416 buf = kzalloc(bufsz, GFP_KERNEL);
417 if (!buf) {
418 IL_ERR("Can not allocate Buffer\n");
419 return -ENOMEM;
420 }
421
422 supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
423 if (supp_band) {
424 channels = supp_band->channels;
425
426 pos +=
427 scnprintf(buf + pos, bufsz - pos,
428 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
429 supp_band->n_channels);
430
431 for (i = 0; i < supp_band->n_channels; i++)
432 pos +=
433 scnprintf(buf + pos, bufsz - pos,
434 "%d: %ddBm: BSS%s%s, %s.\n",
435 channels[i].hw_value,
436 channels[i].max_power,
437 channels[i].
438 flags & IEEE80211_CHAN_RADAR ?
439 " (IEEE 802.11h required)" : "",
440 ((channels[i].
441 flags & IEEE80211_CHAN_NO_IBSS) ||
442 (channels[i].
443 flags & IEEE80211_CHAN_RADAR)) ? "" :
444 ", IBSS",
445 channels[i].
446 flags & IEEE80211_CHAN_PASSIVE_SCAN ?
447 "passive only" : "active/passive");
448 }
449 supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
450 if (supp_band) {
451 channels = supp_band->channels;
452
453 pos +=
454 scnprintf(buf + pos, bufsz - pos,
455 "Displaying %d channels in 5.2GHz band (802.11a)\n",
456 supp_band->n_channels);
457
458 for (i = 0; i < supp_band->n_channels; i++)
459 pos +=
460 scnprintf(buf + pos, bufsz - pos,
461 "%d: %ddBm: BSS%s%s, %s.\n",
462 channels[i].hw_value,
463 channels[i].max_power,
464 channels[i].
465 flags & IEEE80211_CHAN_RADAR ?
466 " (IEEE 802.11h required)" : "",
467 ((channels[i].
468 flags & IEEE80211_CHAN_NO_IBSS) ||
469 (channels[i].
470 flags & IEEE80211_CHAN_RADAR)) ? "" :
471 ", IBSS",
472 channels[i].
473 flags & IEEE80211_CHAN_PASSIVE_SCAN ?
474 "passive only" : "active/passive");
475 }
476 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
477 kfree(buf);
478 return ret;
479}
480
481static ssize_t
482il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
483 loff_t *ppos)
484{
485
486 struct il_priv *il = file->private_data;
487 char buf[512];
488 int pos = 0;
489 const size_t bufsz = sizeof(buf);
490
491 pos +=
492 scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
493 test_bit(S_HCMD_ACTIVE, &il->status));
494 pos +=
495 scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
496 test_bit(S_INT_ENABLED, &il->status));
497 pos +=
498 scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
499 test_bit(S_RF_KILL_HW, &il->status));
500 pos +=
501 scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
502 test_bit(S_CT_KILL, &il->status));
503 pos +=
504 scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
505 test_bit(S_INIT, &il->status));
506 pos +=
507 scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
508 test_bit(S_ALIVE, &il->status));
509 pos +=
510 scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
511 test_bit(S_READY, &il->status));
512 pos +=
513 scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
514 test_bit(S_TEMPERATURE, &il->status));
515 pos +=
516 scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
517 test_bit(S_GEO_CONFIGURED, &il->status));
518 pos +=
519 scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
520 test_bit(S_EXIT_PENDING, &il->status));
521 pos +=
522 scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
523 test_bit(S_STATS, &il->status));
524 pos +=
525 scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
526 test_bit(S_SCANNING, &il->status));
527 pos +=
528 scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
529 test_bit(S_SCAN_ABORTING, &il->status));
530 pos +=
531 scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
532 test_bit(S_SCAN_HW, &il->status));
533 pos +=
534 scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
535 test_bit(S_POWER_PMI, &il->status));
536 pos +=
537 scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
538 test_bit(S_FW_ERROR, &il->status));
539 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
540}
541
542static ssize_t
543il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
544 loff_t *ppos)
545{
546
547 struct il_priv *il = file->private_data;
548 int pos = 0;
549 int cnt = 0;
550 char *buf;
551 int bufsz = 24 * 64; /* 24 items * 64 char per item */
552 ssize_t ret;
553
554 buf = kzalloc(bufsz, GFP_KERNEL);
555 if (!buf) {
556 IL_ERR("Can not allocate Buffer\n");
557 return -ENOMEM;
558 }
559
560 pos +=
561 scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
562
563 pos +=
564 scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
565 il->isr_stats.hw);
566 pos +=
567 scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
568 il->isr_stats.sw);
569 if (il->isr_stats.sw || il->isr_stats.hw) {
570 pos +=
571 scnprintf(buf + pos, bufsz - pos,
572 "\tLast Restarting Code: 0x%X\n",
573 il->isr_stats.err_code);
574 }
575#ifdef CONFIG_IWLEGACY_DEBUG
576 pos +=
577 scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
578 il->isr_stats.sch);
579 pos +=
580 scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
581 il->isr_stats.alive);
582#endif
583 pos +=
584 scnprintf(buf + pos, bufsz - pos,
585 "HW RF KILL switch toggled:\t %u\n",
586 il->isr_stats.rfkill);
587
588 pos +=
589 scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
590 il->isr_stats.ctkill);
591
592 pos +=
593 scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
594 il->isr_stats.wakeup);
595
596 pos +=
597 scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
598 il->isr_stats.rx);
599 for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
600 if (il->isr_stats.handlers[cnt] > 0)
601 pos +=
602 scnprintf(buf + pos, bufsz - pos,
603 "\tRx handler[%36s]:\t\t %u\n",
604 il_get_cmd_string(cnt),
605 il->isr_stats.handlers[cnt]);
606 }
607
608 pos +=
609 scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
610 il->isr_stats.tx);
611
612 pos +=
613 scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
614 il->isr_stats.unhandled);
615
616 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
617 kfree(buf);
618 return ret;
619}
620
621static ssize_t
622il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
623 size_t count, loff_t *ppos)
624{
625 struct il_priv *il = file->private_data;
626 char buf[8];
627 int buf_size;
628 u32 reset_flag;
629
630 memset(buf, 0, sizeof(buf));
631 buf_size = min(count, sizeof(buf) - 1);
632 if (copy_from_user(buf, user_buf, buf_size))
633 return -EFAULT;
634 if (sscanf(buf, "%x", &reset_flag) != 1)
635 return -EFAULT;
636 if (reset_flag == 0)
637 il_clear_isr_stats(il);
638
639 return count;
640}
641
642static ssize_t
643il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
644 loff_t *ppos)
645{
646 struct il_priv *il = file->private_data;
647 struct il_rxon_context *ctx = &il->ctx;
648 int pos = 0, i;
649 char buf[256];
650 const size_t bufsz = sizeof(buf);
651
652 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
653 for (i = 0; i < AC_NUM; i++) {
654 pos +=
655 scnprintf(buf + pos, bufsz - pos,
656 "\tcw_min\tcw_max\taifsn\ttxop\n");
657 pos +=
658 scnprintf(buf + pos, bufsz - pos,
659 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
660 ctx->qos_data.def_qos_parm.ac[i].cw_min,
661 ctx->qos_data.def_qos_parm.ac[i].cw_max,
662 ctx->qos_data.def_qos_parm.ac[i].aifsn,
663 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
664 }
665
666 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
667}
668
669static ssize_t
670il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
671 size_t count, loff_t *ppos)
672{
673 struct il_priv *il = file->private_data;
674 char buf[8];
675 int buf_size;
676 int ht40;
677
678 memset(buf, 0, sizeof(buf));
679 buf_size = min(count, sizeof(buf) - 1);
680 if (copy_from_user(buf, user_buf, buf_size))
681 return -EFAULT;
682 if (sscanf(buf, "%d", &ht40) != 1)
683 return -EFAULT;
684 if (!il_is_any_associated(il))
685 il->disable_ht40 = ht40 ? true : false;
686 else {
687 IL_ERR("Sta associated with AP - "
688 "Change to 40MHz channel support is not allowed\n");
689 return -EINVAL;
690 }
691
692 return count;
693}
694
695static ssize_t
696il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
697 size_t count, loff_t *ppos)
698{
699 struct il_priv *il = file->private_data;
700 char buf[100];
701 int pos = 0;
702 const size_t bufsz = sizeof(buf);
703
704 pos +=
705 scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
706 il->disable_ht40 ? "Disabled" : "Enabled");
707 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
708}
709
710DEBUGFS_READ_WRITE_FILE_OPS(sram);
711DEBUGFS_READ_FILE_OPS(nvm);
712DEBUGFS_READ_FILE_OPS(stations);
713DEBUGFS_READ_FILE_OPS(channels);
714DEBUGFS_READ_FILE_OPS(status);
715DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
716DEBUGFS_READ_FILE_OPS(qos);
717DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
718
719static ssize_t
720il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
721 size_t count, loff_t *ppos)
722{
723 struct il_priv *il = file->private_data;
724 int pos = 0, ofs = 0;
725 int cnt = 0, entry;
726 struct il_tx_queue *txq;
727 struct il_queue *q;
728 struct il_rx_queue *rxq = &il->rxq;
729 char *buf;
730 int bufsz =
731 ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
732 (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
733 const u8 *ptr;
734 ssize_t ret;
735
736 if (!il->txq) {
737 IL_ERR("txq not ready\n");
738 return -EAGAIN;
739 }
740 buf = kzalloc(bufsz, GFP_KERNEL);
741 if (!buf) {
742 IL_ERR("Can not allocate buffer\n");
743 return -ENOMEM;
744 }
745 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
746 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
747 txq = &il->txq[cnt];
748 q = &txq->q;
749 pos +=
750 scnprintf(buf + pos, bufsz - pos,
751 "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
752 q->read_ptr, q->write_ptr);
753 }
754 if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
755 ptr = il->tx_traffic;
756 pos +=
757 scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
758 il->tx_traffic_idx);
759 for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
760 for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
761 entry++, ofs += 16) {
762 pos +=
763 scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
764 ofs);
765 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
766 buf + pos, bufsz - pos, 0);
767 pos += strlen(buf + pos);
768 if (bufsz - pos > 0)
769 buf[pos++] = '\n';
770 }
771 }
772 }
773
774 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
775 pos +=
776 scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
777 rxq->read, rxq->write);
778
779 if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
780 ptr = il->rx_traffic;
781 pos +=
782 scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
783 il->rx_traffic_idx);
784 for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
785 for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
786 entry++, ofs += 16) {
787 pos +=
788 scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
789 ofs);
790 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
791 buf + pos, bufsz - pos, 0);
792 pos += strlen(buf + pos);
793 if (bufsz - pos > 0)
794 buf[pos++] = '\n';
795 }
796 }
797 }
798
799 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
800 kfree(buf);
801 return ret;
802}
803
804static ssize_t
805il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
806 size_t count, loff_t *ppos)
807{
808 struct il_priv *il = file->private_data;
809 char buf[8];
810 int buf_size;
811 int traffic_log;
812
813 memset(buf, 0, sizeof(buf));
814 buf_size = min(count, sizeof(buf) - 1);
815 if (copy_from_user(buf, user_buf, buf_size))
816 return -EFAULT;
817 if (sscanf(buf, "%d", &traffic_log) != 1)
818 return -EFAULT;
819 if (traffic_log == 0)
820 il_reset_traffic_log(il);
821
822 return count;
823}
824
825static ssize_t
826il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
827 loff_t *ppos)
828{
829
830 struct il_priv *il = file->private_data;
831 struct il_tx_queue *txq;
832 struct il_queue *q;
833 char *buf;
834 int pos = 0;
835 int cnt;
836 int ret;
837 const size_t bufsz =
838 sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
839
840 if (!il->txq) {
841 IL_ERR("txq not ready\n");
842 return -EAGAIN;
843 }
844 buf = kzalloc(bufsz, GFP_KERNEL);
845 if (!buf)
846 return -ENOMEM;
847
848 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
849 txq = &il->txq[cnt];
850 q = &txq->q;
851 pos +=
852 scnprintf(buf + pos, bufsz - pos,
853 "hwq %.2d: read=%u write=%u stop=%d"
854 " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
855 q->read_ptr, q->write_ptr,
856 !!test_bit(cnt, il->queue_stopped),
857 txq->swq_id, txq->swq_id & 3,
858 (txq->swq_id >> 2) & 0x1f);
859 if (cnt >= 4)
860 continue;
861 /* for the ACs, display the stop count too */
862 pos +=
863 scnprintf(buf + pos, bufsz - pos,
864 " stop-count: %d\n",
865 atomic_read(&il->queue_stop_count[cnt]));
866 }
867 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
868 kfree(buf);
869 return ret;
870}
871
872static ssize_t
873il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
874 loff_t *ppos)
875{
876
877 struct il_priv *il = file->private_data;
878 struct il_rx_queue *rxq = &il->rxq;
879 char buf[256];
880 int pos = 0;
881 const size_t bufsz = sizeof(buf);
882
883 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
884 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
885 pos +=
886 scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
887 rxq->free_count);
888 if (rxq->rb_stts) {
889 pos +=
890 scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
891 le16_to_cpu(rxq->rb_stts->
892 closed_rb_num) & 0x0FFF);
893 } else {
894 pos +=
895 scnprintf(buf + pos, bufsz - pos,
896 "closed_rb_num: Not Allocated\n");
897 }
898 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
899}
900
901static ssize_t
902il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
903 size_t count, loff_t *ppos)
904{
905 struct il_priv *il = file->private_data;
906 return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
907 count, ppos);
908}
909
910static ssize_t
911il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
912 size_t count, loff_t *ppos)
913{
914 struct il_priv *il = file->private_data;
915 return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
916 count, ppos);
917}
918
919static ssize_t
920il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
921 size_t count, loff_t *ppos)
922{
923 struct il_priv *il = file->private_data;
924 return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
925 count, ppos);
926}
927
928static ssize_t
929il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
930 size_t count, loff_t *ppos)
931{
932
933 struct il_priv *il = file->private_data;
934 int pos = 0;
935 int cnt = 0;
936 char *buf;
937 int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
938 ssize_t ret;
939 struct il_sensitivity_data *data;
940
941 data = &il->sensitivity_data;
942 buf = kzalloc(bufsz, GFP_KERNEL);
943 if (!buf) {
944 IL_ERR("Can not allocate Buffer\n");
945 return -ENOMEM;
946 }
947
948 pos +=
949 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
950 data->auto_corr_ofdm);
951 pos +=
952 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
953 data->auto_corr_ofdm_mrc);
954 pos +=
955 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
956 data->auto_corr_ofdm_x1);
957 pos +=
958 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
959 data->auto_corr_ofdm_mrc_x1);
960 pos +=
961 scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
962 data->auto_corr_cck);
963 pos +=
964 scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
965 data->auto_corr_cck_mrc);
966 pos +=
967 scnprintf(buf + pos, bufsz - pos,
968 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
969 data->last_bad_plcp_cnt_ofdm);
970 pos +=
971 scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
972 data->last_fa_cnt_ofdm);
973 pos +=
974 scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
975 data->last_bad_plcp_cnt_cck);
976 pos +=
977 scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
978 data->last_fa_cnt_cck);
979 pos +=
980 scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
981 data->nrg_curr_state);
982 pos +=
983 scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
984 data->nrg_prev_state);
985 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
986 for (cnt = 0; cnt < 10; cnt++) {
987 pos +=
988 scnprintf(buf + pos, bufsz - pos, " %u",
989 data->nrg_value[cnt]);
990 }
991 pos += scnprintf(buf + pos, bufsz - pos, "\n");
992 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
993 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
994 pos +=
995 scnprintf(buf + pos, bufsz - pos, " %u",
996 data->nrg_silence_rssi[cnt]);
997 }
998 pos += scnprintf(buf + pos, bufsz - pos, "\n");
999 pos +=
1000 scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
1001 data->nrg_silence_ref);
1002 pos +=
1003 scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
1004 data->nrg_energy_idx);
1005 pos +=
1006 scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
1007 data->nrg_silence_idx);
1008 pos +=
1009 scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
1010 data->nrg_th_cck);
1011 pos +=
1012 scnprintf(buf + pos, bufsz - pos,
1013 "nrg_auto_corr_silence_diff:\t %u\n",
1014 data->nrg_auto_corr_silence_diff);
1015 pos +=
1016 scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
1017 data->num_in_cck_no_fa);
1018 pos +=
1019 scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
1020 data->nrg_th_ofdm);
1021
1022 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1023 kfree(buf);
1024 return ret;
1025}
1026
1027static ssize_t
1028il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
1029 size_t count, loff_t *ppos)
1030{
1031
1032 struct il_priv *il = file->private_data;
1033 int pos = 0;
1034 int cnt = 0;
1035 char *buf;
1036 int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
1037 ssize_t ret;
1038 struct il_chain_noise_data *data;
1039
1040 data = &il->chain_noise_data;
1041 buf = kzalloc(bufsz, GFP_KERNEL);
1042 if (!buf) {
1043 IL_ERR("Can not allocate Buffer\n");
1044 return -ENOMEM;
1045 }
1046
1047 pos +=
1048 scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1049 data->active_chains);
1050 pos +=
1051 scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1052 data->chain_noise_a);
1053 pos +=
1054 scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1055 data->chain_noise_b);
1056 pos +=
1057 scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1058 data->chain_noise_c);
1059 pos +=
1060 scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1061 data->chain_signal_a);
1062 pos +=
1063 scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1064 data->chain_signal_b);
1065 pos +=
1066 scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1067 data->chain_signal_c);
1068 pos +=
1069 scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1070 data->beacon_count);
1071
1072 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1073 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1074 pos +=
1075 scnprintf(buf + pos, bufsz - pos, " %u",
1076 data->disconn_array[cnt]);
1077 }
1078 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1079 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1080 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1081 pos +=
1082 scnprintf(buf + pos, bufsz - pos, " %u",
1083 data->delta_gain_code[cnt]);
1084 }
1085 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1086 pos +=
1087 scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1088 data->radio_write);
1089 pos +=
1090 scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1091 data->state);
1092
1093 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1094 kfree(buf);
1095 return ret;
1096}
1097
1098static ssize_t
1099il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
1100 size_t count, loff_t *ppos)
1101{
1102 struct il_priv *il = file->private_data;
1103 char buf[60];
1104 int pos = 0;
1105 const size_t bufsz = sizeof(buf);
1106 u32 pwrsave_status;
1107
1108 pwrsave_status =
1109 _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1110
1111 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1112 pos +=
1113 scnprintf(buf + pos, bufsz - pos, "%s\n",
1114 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1115 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1116 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1117 "error");
1118
1119 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1120}
1121
1122static ssize_t
1123il_dbgfs_clear_ucode_stats_write(struct file *file,
1124 const char __user *user_buf, size_t count,
1125 loff_t *ppos)
1126{
1127 struct il_priv *il = file->private_data;
1128 char buf[8];
1129 int buf_size;
1130 int clear;
1131
1132 memset(buf, 0, sizeof(buf));
1133 buf_size = min(count, sizeof(buf) - 1);
1134 if (copy_from_user(buf, user_buf, buf_size))
1135 return -EFAULT;
1136 if (sscanf(buf, "%d", &clear) != 1)
1137 return -EFAULT;
1138
1139 /* make request to uCode to retrieve stats information */
1140 mutex_lock(&il->mutex);
1141 il_send_stats_request(il, CMD_SYNC, true);
1142 mutex_unlock(&il->mutex);
1143
1144 return count;
1145}
1146
1147static ssize_t
1148il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
1149 size_t count, loff_t *ppos)
1150{
1151
1152 struct il_priv *il = file->private_data;
1153 int len = 0;
1154 char buf[20];
1155
1156 len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
1157 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1158}
1159
1160static ssize_t
1161il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
1162 size_t count, loff_t *ppos)
1163{
1164
1165 struct il_priv *il = file->private_data;
1166 int len = 0;
1167 char buf[20];
1168
1169 len =
1170 sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
1171 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1172}
1173
1174static ssize_t
1175il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
1176 loff_t *ppos)
1177{
1178 struct il_priv *il = file->private_data;
1179 char *buf;
1180 int pos = 0;
1181 ssize_t ret = -EFAULT;
1182
1183 if (il->cfg->ops->lib->dump_fh) {
1184 ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
1185 if (buf) {
1186 ret =
1187 simple_read_from_buffer(user_buf, count, ppos, buf,
1188 pos);
1189 kfree(buf);
1190 }
1191 }
1192
1193 return ret;
1194}
1195
1196static ssize_t
1197il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
1198 size_t count, loff_t *ppos)
1199{
1200
1201 struct il_priv *il = file->private_data;
1202 int pos = 0;
1203 char buf[12];
1204 const size_t bufsz = sizeof(buf);
1205
1206 pos +=
1207 scnprintf(buf + pos, bufsz - pos, "%d\n",
1208 il->missed_beacon_threshold);
1209
1210 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1211}
1212
1213static ssize_t
1214il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
1215 size_t count, loff_t *ppos)
1216{
1217 struct il_priv *il = file->private_data;
1218 char buf[8];
1219 int buf_size;
1220 int missed;
1221
1222 memset(buf, 0, sizeof(buf));
1223 buf_size = min(count, sizeof(buf) - 1);
1224 if (copy_from_user(buf, user_buf, buf_size))
1225 return -EFAULT;
1226 if (sscanf(buf, "%d", &missed) != 1)
1227 return -EINVAL;
1228
1229 if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
1230 missed > IL_MISSED_BEACON_THRESHOLD_MAX)
1231 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
1232 else
1233 il->missed_beacon_threshold = missed;
1234
1235 return count;
1236}
1237
1238static ssize_t
1239il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
1240 size_t count, loff_t *ppos)
1241{
1242
1243 struct il_priv *il = file->private_data;
1244 int pos = 0;
1245 char buf[300];
1246 const size_t bufsz = sizeof(buf);
1247 struct il_force_reset *force_reset;
1248
1249 force_reset = &il->force_reset;
1250
1251 pos +=
1252 scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
1253 force_reset->reset_request_count);
1254 pos +=
1255 scnprintf(buf + pos, bufsz - pos,
1256 "\tnumber of reset request success: %d\n",
1257 force_reset->reset_success_count);
1258 pos +=
1259 scnprintf(buf + pos, bufsz - pos,
1260 "\tnumber of reset request reject: %d\n",
1261 force_reset->reset_reject_count);
1262 pos +=
1263 scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
1264 force_reset->reset_duration);
1265
1266 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1267}
1268
1269static ssize_t
1270il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
1271 size_t count, loff_t *ppos)
1272{
1273
1274 int ret;
1275 struct il_priv *il = file->private_data;
1276
1277 ret = il_force_reset(il, true);
1278
1279 return ret ? ret : count;
1280}
1281
1282static ssize_t
1283il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
1284 size_t count, loff_t *ppos)
1285{
1286
1287 struct il_priv *il = file->private_data;
1288 char buf[8];
1289 int buf_size;
1290 int timeout;
1291
1292 memset(buf, 0, sizeof(buf));
1293 buf_size = min(count, sizeof(buf) - 1);
1294 if (copy_from_user(buf, user_buf, buf_size))
1295 return -EFAULT;
1296 if (sscanf(buf, "%d", &timeout) != 1)
1297 return -EINVAL;
1298 if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
1299 timeout = IL_DEF_WD_TIMEOUT;
1300
1301 il->cfg->base_params->wd_timeout = timeout;
1302 il_setup_watchdog(il);
1303 return count;
1304}
1305
1306DEBUGFS_READ_FILE_OPS(rx_stats);
1307DEBUGFS_READ_FILE_OPS(tx_stats);
1308DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1309DEBUGFS_READ_FILE_OPS(rx_queue);
1310DEBUGFS_READ_FILE_OPS(tx_queue);
1311DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1312DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1313DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1314DEBUGFS_READ_FILE_OPS(sensitivity);
1315DEBUGFS_READ_FILE_OPS(chain_noise);
1316DEBUGFS_READ_FILE_OPS(power_save_status);
1317DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
1318DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
1319DEBUGFS_READ_FILE_OPS(fh_reg);
1320DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1321DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1322DEBUGFS_READ_FILE_OPS(rxon_flags);
1323DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1324DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1325
1326/*
1327 * Create the debugfs files and directories
1328 *
1329 */
1330int
1331il_dbgfs_register(struct il_priv *il, const char *name)
1332{
1333 struct dentry *phyd = il->hw->wiphy->debugfsdir;
1334 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1335
1336 dir_drv = debugfs_create_dir(name, phyd);
1337 if (!dir_drv)
1338 return -ENOMEM;
1339
1340 il->debugfs_dir = dir_drv;
1341
1342 dir_data = debugfs_create_dir("data", dir_drv);
1343 if (!dir_data)
1344 goto err;
1345 dir_rf = debugfs_create_dir("rf", dir_drv);
1346 if (!dir_rf)
1347 goto err;
1348 dir_debug = debugfs_create_dir("debug", dir_drv);
1349 if (!dir_debug)
1350 goto err;
1351
1352 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1353 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1354 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1355 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1356 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1357 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1358 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1359 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1360 DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
1361 DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
1362 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1363 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1364 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1365 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1366 DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
1367 DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
1368 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1369 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1370 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1371 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1372 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1373 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1374
1375 if (il->cfg->base_params->sensitivity_calib_by_driver)
1376 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1377 if (il->cfg->base_params->chain_noise_calib_by_driver)
1378 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1379 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1380 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1381 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1382 if (il->cfg->base_params->sensitivity_calib_by_driver)
1383 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1384 &il->disable_sens_cal);
1385 if (il->cfg->base_params->chain_noise_calib_by_driver)
1386 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1387 &il->disable_chain_noise_cal);
1388 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
1389 return 0;
1390
1391err:
1392 IL_ERR("Can't create the debugfs directory\n");
1393 il_dbgfs_unregister(il);
1394 return -ENOMEM;
1395}
1396EXPORT_SYMBOL(il_dbgfs_register);
1397
1398/**
1399 * Remove the debugfs files and directories
1400 *
1401 */
1402void
1403il_dbgfs_unregister(struct il_priv *il)
1404{
1405 if (!il->debugfs_dir)
1406 return;
1407
1408 debugfs_remove_recursive(il->debugfs_dir);
1409 il->debugfs_dir = NULL;
1410}
1411EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644
index cfabb38793ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
+++ /dev/null
@@ -1,523 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-3945-debugfs.h"
30
31
32static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
33{
34 int p = 0;
35
36 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
37 le32_to_cpu(priv->_3945.statistics.flag));
38 if (le32_to_cpu(priv->_3945.statistics.flag) &
39 UCODE_STATISTICS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(priv->_3945.statistics.flag) &
44 UCODE_STATISTICS_FREQUENCY_MSK)
45 ? "2.4 GHz" : "5.2 GHz");
46 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
47 (le32_to_cpu(priv->_3945.statistics.flag) &
48 UCODE_STATISTICS_NARROW_BAND_MSK)
49 ? "enabled" : "disabled");
50 return p;
51}
52
53ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
54 char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 struct iwl_priv *priv = file->private_data;
58 int pos = 0;
59 char *buf;
60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
62 ssize_t ret;
63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
64 *max_ofdm;
65 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
66 struct iwl39_statistics_rx_non_phy *general, *accum_general;
67 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
68
69 if (!iwl_legacy_is_alive(priv))
70 return -EAGAIN;
71
72 buf = kzalloc(bufsz, GFP_KERNEL);
73 if (!buf) {
74 IWL_ERR(priv, "Can not allocate Buffer\n");
75 return -ENOMEM;
76 }
77
78 /*
79 * The statistic information display here is based on
80 * the last statistics notification from uCode
81 * might not reflect the current uCode activity
82 */
83 ofdm = &priv->_3945.statistics.rx.ofdm;
84 cck = &priv->_3945.statistics.rx.cck;
85 general = &priv->_3945.statistics.rx.general;
86 accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
87 accum_cck = &priv->_3945.accum_statistics.rx.cck;
88 accum_general = &priv->_3945.accum_statistics.rx.general;
89 delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
90 delta_cck = &priv->_3945.delta_statistics.rx.cck;
91 delta_general = &priv->_3945.delta_statistics.rx.general;
92 max_ofdm = &priv->_3945.max_delta.rx.ofdm;
93 max_cck = &priv->_3945.max_delta.rx.cck;
94 max_general = &priv->_3945.max_delta.rx.general;
95
96 pos += iwl3945_statistics_flag(priv, buf, bufsz);
97 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos += scnprintf(buf + pos, bufsz - pos,
101 " %-30s %10u %10u %10u %10u\n",
102 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
103 accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 " %-30s %10u %10u %10u %10u\n",
107 "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos += scnprintf(buf + pos, bufsz - pos,
111 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
112 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
113 delta_ofdm->plcp_err, max_ofdm->plcp_err);
114 pos += scnprintf(buf + pos, bufsz - pos,
115 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
116 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
117 delta_ofdm->crc32_err, max_ofdm->crc32_err);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
120 le32_to_cpu(ofdm->overrun_err),
121 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
122 max_ofdm->overrun_err);
123 pos += scnprintf(buf + pos, bufsz - pos,
124 " %-30s %10u %10u %10u %10u\n",
125 "early_overrun_err:",
126 le32_to_cpu(ofdm->early_overrun_err),
127 accum_ofdm->early_overrun_err,
128 delta_ofdm->early_overrun_err,
129 max_ofdm->early_overrun_err);
130 pos += scnprintf(buf + pos, bufsz - pos,
131 " %-30s %10u %10u %10u %10u\n",
132 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
133 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
134 max_ofdm->crc32_good);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
137 le32_to_cpu(ofdm->false_alarm_cnt),
138 accum_ofdm->false_alarm_cnt,
139 delta_ofdm->false_alarm_cnt,
140 max_ofdm->false_alarm_cnt);
141 pos += scnprintf(buf + pos, bufsz - pos,
142 " %-30s %10u %10u %10u %10u\n",
143 "fina_sync_err_cnt:",
144 le32_to_cpu(ofdm->fina_sync_err_cnt),
145 accum_ofdm->fina_sync_err_cnt,
146 delta_ofdm->fina_sync_err_cnt,
147 max_ofdm->fina_sync_err_cnt);
148 pos += scnprintf(buf + pos, bufsz - pos,
149 " %-30s %10u %10u %10u %10u\n",
150 "sfd_timeout:",
151 le32_to_cpu(ofdm->sfd_timeout),
152 accum_ofdm->sfd_timeout,
153 delta_ofdm->sfd_timeout,
154 max_ofdm->sfd_timeout);
155 pos += scnprintf(buf + pos, bufsz - pos,
156 " %-30s %10u %10u %10u %10u\n",
157 "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout),
159 accum_ofdm->fina_timeout,
160 delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 " %-30s %10u %10u %10u %10u\n",
164 "unresponded_rts:",
165 le32_to_cpu(ofdm->unresponded_rts),
166 accum_ofdm->unresponded_rts,
167 delta_ofdm->unresponded_rts,
168 max_ofdm->unresponded_rts);
169 pos += scnprintf(buf + pos, bufsz - pos,
170 " %-30s %10u %10u %10u %10u\n",
171 "rxe_frame_lmt_ovrun:",
172 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
173 accum_ofdm->rxe_frame_limit_overrun,
174 delta_ofdm->rxe_frame_limit_overrun,
175 max_ofdm->rxe_frame_limit_overrun);
176 pos += scnprintf(buf + pos, bufsz - pos,
177 " %-30s %10u %10u %10u %10u\n",
178 "sent_ack_cnt:",
179 le32_to_cpu(ofdm->sent_ack_cnt),
180 accum_ofdm->sent_ack_cnt,
181 delta_ofdm->sent_ack_cnt,
182 max_ofdm->sent_ack_cnt);
183 pos += scnprintf(buf + pos, bufsz - pos,
184 " %-30s %10u %10u %10u %10u\n",
185 "sent_cts_cnt:",
186 le32_to_cpu(ofdm->sent_cts_cnt),
187 accum_ofdm->sent_cts_cnt,
188 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
189
190 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
191 "acumulative delta max\n",
192 "Statistics_Rx - CCK:");
193 pos += scnprintf(buf + pos, bufsz - pos,
194 " %-30s %10u %10u %10u %10u\n",
195 "ina_cnt:",
196 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
197 delta_cck->ina_cnt, max_cck->ina_cnt);
198 pos += scnprintf(buf + pos, bufsz - pos,
199 " %-30s %10u %10u %10u %10u\n",
200 "fina_cnt:",
201 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
202 delta_cck->fina_cnt, max_cck->fina_cnt);
203 pos += scnprintf(buf + pos, bufsz - pos,
204 " %-30s %10u %10u %10u %10u\n",
205 "plcp_err:",
206 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
207 delta_cck->plcp_err, max_cck->plcp_err);
208 pos += scnprintf(buf + pos, bufsz - pos,
209 " %-30s %10u %10u %10u %10u\n",
210 "crc32_err:",
211 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
212 delta_cck->crc32_err, max_cck->crc32_err);
213 pos += scnprintf(buf + pos, bufsz - pos,
214 " %-30s %10u %10u %10u %10u\n",
215 "overrun_err:",
216 le32_to_cpu(cck->overrun_err),
217 accum_cck->overrun_err,
218 delta_cck->overrun_err, max_cck->overrun_err);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 " %-30s %10u %10u %10u %10u\n",
221 "early_overrun_err:",
222 le32_to_cpu(cck->early_overrun_err),
223 accum_cck->early_overrun_err,
224 delta_cck->early_overrun_err,
225 max_cck->early_overrun_err);
226 pos += scnprintf(buf + pos, bufsz - pos,
227 " %-30s %10u %10u %10u %10u\n",
228 "crc32_good:",
229 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
230 delta_cck->crc32_good,
231 max_cck->crc32_good);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 " %-30s %10u %10u %10u %10u\n",
234 "false_alarm_cnt:",
235 le32_to_cpu(cck->false_alarm_cnt),
236 accum_cck->false_alarm_cnt,
237 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 " %-30s %10u %10u %10u %10u\n",
240 "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt,
244 max_cck->fina_sync_err_cnt);
245 pos += scnprintf(buf + pos, bufsz - pos,
246 " %-30s %10u %10u %10u %10u\n",
247 "sfd_timeout:",
248 le32_to_cpu(cck->sfd_timeout),
249 accum_cck->sfd_timeout,
250 delta_cck->sfd_timeout, max_cck->sfd_timeout);
251 pos += scnprintf(buf + pos, bufsz - pos,
252 " %-30s %10u %10u %10u %10u\n",
253 "fina_timeout:",
254 le32_to_cpu(cck->fina_timeout),
255 accum_cck->fina_timeout,
256 delta_cck->fina_timeout, max_cck->fina_timeout);
257 pos += scnprintf(buf + pos, bufsz - pos,
258 " %-30s %10u %10u %10u %10u\n",
259 "unresponded_rts:",
260 le32_to_cpu(cck->unresponded_rts),
261 accum_cck->unresponded_rts,
262 delta_cck->unresponded_rts,
263 max_cck->unresponded_rts);
264 pos += scnprintf(buf + pos, bufsz - pos,
265 " %-30s %10u %10u %10u %10u\n",
266 "rxe_frame_lmt_ovrun:",
267 le32_to_cpu(cck->rxe_frame_limit_overrun),
268 accum_cck->rxe_frame_limit_overrun,
269 delta_cck->rxe_frame_limit_overrun,
270 max_cck->rxe_frame_limit_overrun);
271 pos += scnprintf(buf + pos, bufsz - pos,
272 " %-30s %10u %10u %10u %10u\n",
273 "sent_ack_cnt:",
274 le32_to_cpu(cck->sent_ack_cnt),
275 accum_cck->sent_ack_cnt,
276 delta_cck->sent_ack_cnt,
277 max_cck->sent_ack_cnt);
278 pos += scnprintf(buf + pos, bufsz - pos,
279 " %-30s %10u %10u %10u %10u\n",
280 "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt,
283 delta_cck->sent_cts_cnt,
284 max_cck->sent_cts_cnt);
285
286 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
287 "acumulative delta max\n",
288 "Statistics_Rx - GENERAL:");
289 pos += scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n",
291 "bogus_cts:",
292 le32_to_cpu(general->bogus_cts),
293 accum_general->bogus_cts,
294 delta_general->bogus_cts, max_general->bogus_cts);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 " %-30s %10u %10u %10u %10u\n",
297 "bogus_ack:",
298 le32_to_cpu(general->bogus_ack),
299 accum_general->bogus_ack,
300 delta_general->bogus_ack, max_general->bogus_ack);
301 pos += scnprintf(buf + pos, bufsz - pos,
302 " %-30s %10u %10u %10u %10u\n",
303 "non_bssid_frames:",
304 le32_to_cpu(general->non_bssid_frames),
305 accum_general->non_bssid_frames,
306 delta_general->non_bssid_frames,
307 max_general->non_bssid_frames);
308 pos += scnprintf(buf + pos, bufsz - pos,
309 " %-30s %10u %10u %10u %10u\n",
310 "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos += scnprintf(buf + pos, bufsz - pos,
316 " %-30s %10u %10u %10u %10u\n",
317 "non_channel_beacons:",
318 le32_to_cpu(general->non_channel_beacons),
319 accum_general->non_channel_beacons,
320 delta_general->non_channel_beacons,
321 max_general->non_channel_beacons);
322
323 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
324 kfree(buf);
325 return ret;
326}
327
328ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
329 char __user *user_buf,
330 size_t count, loff_t *ppos)
331{
332 struct iwl_priv *priv = file->private_data;
333 int pos = 0;
334 char *buf;
335 int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
336 ssize_t ret;
337 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
338
339 if (!iwl_legacy_is_alive(priv))
340 return -EAGAIN;
341
342 buf = kzalloc(bufsz, GFP_KERNEL);
343 if (!buf) {
344 IWL_ERR(priv, "Can not allocate Buffer\n");
345 return -ENOMEM;
346 }
347
348 /*
349 * The statistic information display here is based on
350 * the last statistics notification from uCode
351 * might not reflect the current uCode activity
352 */
353 tx = &priv->_3945.statistics.tx;
354 accum_tx = &priv->_3945.accum_statistics.tx;
355 delta_tx = &priv->_3945.delta_statistics.tx;
356 max_tx = &priv->_3945.max_delta.tx;
357 pos += iwl3945_statistics_flag(priv, buf, bufsz);
358 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
359 "acumulative delta max\n",
360 "Statistics_Tx:");
361 pos += scnprintf(buf + pos, bufsz - pos,
362 " %-30s %10u %10u %10u %10u\n",
363 "preamble:",
364 le32_to_cpu(tx->preamble_cnt),
365 accum_tx->preamble_cnt,
366 delta_tx->preamble_cnt, max_tx->preamble_cnt);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 " %-30s %10u %10u %10u %10u\n",
369 "rx_detected_cnt:",
370 le32_to_cpu(tx->rx_detected_cnt),
371 accum_tx->rx_detected_cnt,
372 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 " %-30s %10u %10u %10u %10u\n",
375 "bt_prio_defer_cnt:",
376 le32_to_cpu(tx->bt_prio_defer_cnt),
377 accum_tx->bt_prio_defer_cnt,
378 delta_tx->bt_prio_defer_cnt,
379 max_tx->bt_prio_defer_cnt);
380 pos += scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n",
382 "bt_prio_kill_cnt:",
383 le32_to_cpu(tx->bt_prio_kill_cnt),
384 accum_tx->bt_prio_kill_cnt,
385 delta_tx->bt_prio_kill_cnt,
386 max_tx->bt_prio_kill_cnt);
387 pos += scnprintf(buf + pos, bufsz - pos,
388 " %-30s %10u %10u %10u %10u\n",
389 "few_bytes_cnt:",
390 le32_to_cpu(tx->few_bytes_cnt),
391 accum_tx->few_bytes_cnt,
392 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
393 pos += scnprintf(buf + pos, bufsz - pos,
394 " %-30s %10u %10u %10u %10u\n",
395 "cts_timeout:",
396 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
397 delta_tx->cts_timeout, max_tx->cts_timeout);
398 pos += scnprintf(buf + pos, bufsz - pos,
399 " %-30s %10u %10u %10u %10u\n",
400 "ack_timeout:",
401 le32_to_cpu(tx->ack_timeout),
402 accum_tx->ack_timeout,
403 delta_tx->ack_timeout, max_tx->ack_timeout);
404 pos += scnprintf(buf + pos, bufsz - pos,
405 " %-30s %10u %10u %10u %10u\n",
406 "expected_ack_cnt:",
407 le32_to_cpu(tx->expected_ack_cnt),
408 accum_tx->expected_ack_cnt,
409 delta_tx->expected_ack_cnt,
410 max_tx->expected_ack_cnt);
411 pos += scnprintf(buf + pos, bufsz - pos,
412 " %-30s %10u %10u %10u %10u\n",
413 "actual_ack_cnt:",
414 le32_to_cpu(tx->actual_ack_cnt),
415 accum_tx->actual_ack_cnt,
416 delta_tx->actual_ack_cnt,
417 max_tx->actual_ack_cnt);
418
419 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
420 kfree(buf);
421 return ret;
422}
423
424ssize_t iwl3945_ucode_general_stats_read(struct file *file,
425 char __user *user_buf,
426 size_t count, loff_t *ppos)
427{
428 struct iwl_priv *priv = file->private_data;
429 int pos = 0;
430 char *buf;
431 int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
432 ssize_t ret;
433 struct iwl39_statistics_general *general, *accum_general;
434 struct iwl39_statistics_general *delta_general, *max_general;
435 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
436 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
437
438 if (!iwl_legacy_is_alive(priv))
439 return -EAGAIN;
440
441 buf = kzalloc(bufsz, GFP_KERNEL);
442 if (!buf) {
443 IWL_ERR(priv, "Can not allocate Buffer\n");
444 return -ENOMEM;
445 }
446
447 /*
448 * The statistic information display here is based on
449 * the last statistics notification from uCode
450 * might not reflect the current uCode activity
451 */
452 general = &priv->_3945.statistics.general;
453 dbg = &priv->_3945.statistics.general.dbg;
454 div = &priv->_3945.statistics.general.div;
455 accum_general = &priv->_3945.accum_statistics.general;
456 delta_general = &priv->_3945.delta_statistics.general;
457 max_general = &priv->_3945.max_delta.general;
458 accum_dbg = &priv->_3945.accum_statistics.general.dbg;
459 delta_dbg = &priv->_3945.delta_statistics.general.dbg;
460 max_dbg = &priv->_3945.max_delta.general.dbg;
461 accum_div = &priv->_3945.accum_statistics.general.div;
462 delta_div = &priv->_3945.delta_statistics.general.div;
463 max_div = &priv->_3945.max_delta.general.div;
464 pos += iwl3945_statistics_flag(priv, buf, bufsz);
465 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
466 "acumulative delta max\n",
467 "Statistics_General:");
468 pos += scnprintf(buf + pos, bufsz - pos,
469 " %-30s %10u %10u %10u %10u\n",
470 "burst_check:",
471 le32_to_cpu(dbg->burst_check),
472 accum_dbg->burst_check,
473 delta_dbg->burst_check, max_dbg->burst_check);
474 pos += scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n",
476 "burst_count:",
477 le32_to_cpu(dbg->burst_count),
478 accum_dbg->burst_count,
479 delta_dbg->burst_count, max_dbg->burst_count);
480 pos += scnprintf(buf + pos, bufsz - pos,
481 " %-30s %10u %10u %10u %10u\n",
482 "sleep_time:",
483 le32_to_cpu(general->sleep_time),
484 accum_general->sleep_time,
485 delta_general->sleep_time, max_general->sleep_time);
486 pos += scnprintf(buf + pos, bufsz - pos,
487 " %-30s %10u %10u %10u %10u\n",
488 "slots_out:",
489 le32_to_cpu(general->slots_out),
490 accum_general->slots_out,
491 delta_general->slots_out, max_general->slots_out);
492 pos += scnprintf(buf + pos, bufsz - pos,
493 " %-30s %10u %10u %10u %10u\n",
494 "slots_idle:",
495 le32_to_cpu(general->slots_idle),
496 accum_general->slots_idle,
497 delta_general->slots_idle, max_general->slots_idle);
498 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
499 le32_to_cpu(general->ttl_timestamp));
500 pos += scnprintf(buf + pos, bufsz - pos,
501 " %-30s %10u %10u %10u %10u\n",
502 "tx_on_a:",
503 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
504 delta_div->tx_on_a, max_div->tx_on_a);
505 pos += scnprintf(buf + pos, bufsz - pos,
506 " %-30s %10u %10u %10u %10u\n",
507 "tx_on_b:",
508 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
509 delta_div->tx_on_b, max_div->tx_on_b);
510 pos += scnprintf(buf + pos, bufsz - pos,
511 " %-30s %10u %10u %10u %10u\n",
512 "exec_time:",
513 le32_to_cpu(div->exec_time), accum_div->exec_time,
514 delta_div->exec_time, max_div->exec_time);
515 pos += scnprintf(buf + pos, bufsz - pos,
516 " %-30s %10u %10u %10u %10u\n",
517 "probe_time:",
518 le32_to_cpu(div->probe_time), accum_div->probe_time,
519 delta_div->probe_time, max_div->probe_time);
520 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
521 kfree(buf);
522 return ret;
523}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644
index 8fef4b32b447..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl3945_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count,
40 loff_t *ppos);
41#else
42static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
43 char __user *user_buf, size_t count,
44 loff_t *ppos)
45{
46 return 0;
47}
48static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
49 char __user *user_buf, size_t count,
50 loff_t *ppos)
51{
52 return 0;
53}
54static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
55 char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 return 0;
59}
60#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644
index 836c9919f82e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
+++ /dev/null
@@ -1,187 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_3945_fh_h__
64#define __iwl_3945_fh_h__
65
66/************************************/
67/* iwl3945 Flow Handler Definitions */
68/************************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH39_MEM_LOWER_BOUND (0x0800)
75#define FH39_MEM_UPPER_BOUND (0x1000)
76
77#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
78#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
79#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
80#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
81#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
82#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
83
84/* TFDB (Transmit Frame Buffer Descriptor) */
85#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
86 ((_ch) * 2 + (buf)) * 0x28)
87#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
88
89/* CBCC channel is [0,2] */
90#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
91#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
92#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
93
94/* RCSR channel is [0,2] */
95#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
96#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
97#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
98#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
99#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
100
101#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
102
103/* RSSR */
104#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
105#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
106
107/* TCSR */
108#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
109#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
110#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
111#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
112
113/* TSSR */
114#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
115#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
116#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
117
118
119/* DBM */
120
121#define FH39_SRVC_CHNL (6)
122
123#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
124#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
125
126#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
127
128#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
129
130#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
131
132#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
133
134#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
135
136#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
137
138#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
139#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
140
141#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
142#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
143
144#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
145
146#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
147
148#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
149#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
150
151#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
152
153#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
154
155#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
156#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
157
158#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
159
160#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
161#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
162
163#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
164#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
165
166#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
167#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
168
169#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
170 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
171 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
172
173#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
174
175struct iwl3945_tfd_tb {
176 __le32 addr;
177 __le32 len;
178} __packed;
179
180struct iwl3945_tfd {
181 __le32 control_flags;
182 struct iwl3945_tfd_tb tbs[4];
183 u8 __pad[28];
184} __packed;
185
186
187#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644
index 5c3a68d3af12..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ /dev/null
@@ -1,291 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
65 * Please use iwl-commands.h for uCode API definitions.
66 * Please use iwl-3945.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_3945_hw__
70#define __iwl_3945_hw__
71
72#include "iwl-eeprom.h"
73
74/* RSSI to dBm */
75#define IWL39_RSSI_OFFSET 95
76
77/*
78 * EEPROM related constants, enums, and structures.
79 */
80#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
81
82/*
83 * Mapping of a Tx power level, at factory calibration temperature,
84 * to a radio/DSP gain table index.
85 * One for each of 5 "sample" power levels in each band.
86 * v_det is measured at the factory, using the 3945's built-in power amplifier
87 * (PA) output voltage detector. This same detector is used during Tx of
88 * long packets in normal operation to provide feedback as to proper output
89 * level.
90 * Data copied from EEPROM.
91 * DO NOT ALTER THIS STRUCTURE!!!
92 */
93struct iwl3945_eeprom_txpower_sample {
94 u8 gain_index; /* index into power (gain) setup table ... */
95 s8 power; /* ... for this pwr level for this chnl group */
96 u16 v_det; /* PA output voltage */
97} __packed;
98
99/*
100 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
101 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
102 * Tx power setup code interpolates between the 5 "sample" power levels
103 * to determine the nominal setup for a requested power level.
104 * Data copied from EEPROM.
105 * DO NOT ALTER THIS STRUCTURE!!!
106 */
107struct iwl3945_eeprom_txpower_group {
108 struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
109 s32 a, b, c, d, e; /* coefficients for voltage->power
110 * formula (signed) */
111 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
112 * frequency (signed) */
113 s8 saturation_power; /* highest power possible by h/w in this
114 * band */
115 u8 group_channel; /* "representative" channel # in this band */
116 s16 temperature; /* h/w temperature at factory calib this band
117 * (signed) */
118} __packed;
119
120/*
121 * Temperature-based Tx-power compensation data, not band-specific.
122 * These coefficients are use to modify a/b/c/d/e coeffs based on
123 * difference between current temperature and factory calib temperature.
124 * Data copied from EEPROM.
125 */
126struct iwl3945_eeprom_temperature_corr {
127 u32 Ta;
128 u32 Tb;
129 u32 Tc;
130 u32 Td;
131 u32 Te;
132} __packed;
133
134/*
135 * EEPROM map
136 */
137struct iwl3945_eeprom {
138 u8 reserved0[16];
139 u16 device_id; /* abs.ofs: 16 */
140 u8 reserved1[2];
141 u16 pmc; /* abs.ofs: 20 */
142 u8 reserved2[20];
143 u8 mac_address[6]; /* abs.ofs: 42 */
144 u8 reserved3[58];
145 u16 board_revision; /* abs.ofs: 106 */
146 u8 reserved4[11];
147 u8 board_pba_number[9]; /* abs.ofs: 119 */
148 u8 reserved5[8];
149 u16 version; /* abs.ofs: 136 */
150 u8 sku_cap; /* abs.ofs: 138 */
151 u8 leds_mode; /* abs.ofs: 139 */
152 u16 oem_mode;
153 u16 wowlan_mode; /* abs.ofs: 142 */
154 u16 leds_time_interval; /* abs.ofs: 144 */
155 u8 leds_off_time; /* abs.ofs: 146 */
156 u8 leds_on_time; /* abs.ofs: 147 */
157 u8 almgor_m_version; /* abs.ofs: 148 */
158 u8 antenna_switch_type; /* abs.ofs: 149 */
159 u8 reserved6[42];
160 u8 sku_id[4]; /* abs.ofs: 192 */
161
162/*
163 * Per-channel regulatory data.
164 *
165 * Each channel that *might* be supported by 3945 has a fixed location
166 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
167 * txpower (MSB).
168 *
169 * Entries immediately below are for 20 MHz channel width.
170 *
171 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
172 */
173 u16 band_1_count; /* abs.ofs: 196 */
174 struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
175
176/*
177 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
178 * 5.0 GHz channels 7, 8, 11, 12, 16
179 * (4915-5080MHz) (none of these is ever supported)
180 */
181 u16 band_2_count; /* abs.ofs: 226 */
182 struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
183
184/*
185 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
186 * (5170-5320MHz)
187 */
188 u16 band_3_count; /* abs.ofs: 254 */
189 struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
190
191/*
192 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
193 * (5500-5700MHz)
194 */
195 u16 band_4_count; /* abs.ofs: 280 */
196 struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
197
198/*
199 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
200 * (5725-5825MHz)
201 */
202 u16 band_5_count; /* abs.ofs: 304 */
203 struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
204
205 u8 reserved9[194];
206
207/*
208 * 3945 Txpower calibration data.
209 */
210#define IWL_NUM_TX_CALIB_GROUPS 5
211 struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
212/* abs.ofs: 512 */
213 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
214 u8 reserved16[172]; /* fill out to full 1024 byte block */
215} __packed;
216
217#define IWL3945_EEPROM_IMG_SIZE 1024
218
219/* End of EEPROM */
220
221#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
222#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
223
224/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
225#define IWL39_NUM_QUEUES 5
226#define IWL39_CMD_QUEUE_NUM 4
227
228#define IWL_DEFAULT_TX_RETRY 15
229
230/*********************************************/
231
232#define RFD_SIZE 4
233#define NUM_TFD_CHUNKS 4
234
235#define RX_QUEUE_SIZE 256
236#define RX_QUEUE_MASK 255
237#define RX_QUEUE_SIZE_LOG 8
238
239#define U32_PAD(n) ((4-(n))&0x3)
240
241#define TFD_CTL_COUNT_SET(n) (n << 24)
242#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
243#define TFD_CTL_PAD_SET(n) (n << 28)
244#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
245
246/* Sizes and addresses for instruction and data memory (SRAM) in
247 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
248#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
249#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
250
251#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
252#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
253
254#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
255 IWL39_RTC_INST_LOWER_BOUND)
256#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
257 IWL39_RTC_DATA_LOWER_BOUND)
258
259#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
260#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
261
262/* Size of uCode instruction memory in bootstrap state machine */
263#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
264
265static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
266{
267 return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
268 (addr < IWL39_RTC_DATA_UPPER_BOUND);
269}
270
271/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
272 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
273struct iwl3945_shared {
274 __le32 tx_base_ptr[8];
275} __packed;
276
277static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
278{
279 return le16_to_cpu(rate_n_flags) & 0xFF;
280}
281
282static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
283{
284 return le16_to_cpu(rate_n_flags);
285}
286
287static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
288{
289 return cpu_to_le16((u16)rate|flags);
290}
291#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644
index 7a7f0f38c8ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38
39#include "iwl-commands.h"
40#include "iwl-3945.h"
41#include "iwl-core.h"
42#include "iwl-dev.h"
43#include "iwl-3945-led.h"
44
45
46/* Send led command */
47static int iwl3945_send_led_cmd(struct iwl_priv *priv,
48 struct iwl_led_cmd *led_cmd)
49{
50 struct iwl_host_cmd cmd = {
51 .id = REPLY_LEDS_CMD,
52 .len = sizeof(struct iwl_led_cmd),
53 .data = led_cmd,
54 .flags = CMD_ASYNC,
55 .callback = NULL,
56 };
57
58 return iwl_legacy_send_cmd(priv, &cmd);
59}
60
61const struct iwl_led_ops iwl3945_led_ops = {
62 .cmd = iwl3945_send_led_cmd,
63};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644
index 96716276eb0d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_3945_led_h__
28#define __iwl_3945_led_h__
29
30extern const struct iwl_led_ops iwl3945_led_ops;
31
32#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644
index 8faeaf2dddec..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ /dev/null
@@ -1,996 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-commands.h"
40#include "iwl-3945.h"
41#include "iwl-sta.h"
42
43#define RS_NAME "iwl-3945-rs"
44
45static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
46 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
47};
48
49static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
50 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
51};
52
53static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
54 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
55};
56
57static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
58 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
59};
60
61struct iwl3945_tpt_entry {
62 s8 min_rssi;
63 u8 index;
64};
65
66static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
67 {-60, IWL_RATE_54M_INDEX},
68 {-64, IWL_RATE_48M_INDEX},
69 {-72, IWL_RATE_36M_INDEX},
70 {-80, IWL_RATE_24M_INDEX},
71 {-84, IWL_RATE_18M_INDEX},
72 {-85, IWL_RATE_12M_INDEX},
73 {-87, IWL_RATE_9M_INDEX},
74 {-89, IWL_RATE_6M_INDEX}
75};
76
77static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
78 {-60, IWL_RATE_54M_INDEX},
79 {-64, IWL_RATE_48M_INDEX},
80 {-68, IWL_RATE_36M_INDEX},
81 {-80, IWL_RATE_24M_INDEX},
82 {-84, IWL_RATE_18M_INDEX},
83 {-85, IWL_RATE_12M_INDEX},
84 {-86, IWL_RATE_11M_INDEX},
85 {-88, IWL_RATE_5M_INDEX},
86 {-90, IWL_RATE_2M_INDEX},
87 {-92, IWL_RATE_1M_INDEX}
88};
89
90#define IWL_RATE_MAX_WINDOW 62
91#define IWL_RATE_FLUSH (3*HZ)
92#define IWL_RATE_WIN_FLUSH (HZ/2)
93#define IWL39_RATE_HIGH_TH 11520
94#define IWL_SUCCESS_UP_TH 8960
95#define IWL_SUCCESS_DOWN_TH 10880
96#define IWL_RATE_MIN_FAILURE_TH 6
97#define IWL_RATE_MIN_SUCCESS_TH 8
98#define IWL_RATE_DECREASE_TH 1920
99#define IWL_RATE_RETRY_TH 15
100
101static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
102{
103 u32 index = 0;
104 u32 table_size = 0;
105 struct iwl3945_tpt_entry *tpt_table = NULL;
106
107 if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
108 rssi = IWL_MIN_RSSI_VAL;
109
110 switch (band) {
111 case IEEE80211_BAND_2GHZ:
112 tpt_table = iwl3945_tpt_table_g;
113 table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
114 break;
115
116 case IEEE80211_BAND_5GHZ:
117 tpt_table = iwl3945_tpt_table_a;
118 table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
119 break;
120
121 default:
122 BUG();
123 break;
124 }
125
126 while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
127 index++;
128
129 index = min(index, (table_size - 1));
130
131 return tpt_table[index].index;
132}
133
134static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
135{
136 window->data = 0;
137 window->success_counter = 0;
138 window->success_ratio = -1;
139 window->counter = 0;
140 window->average_tpt = IWL_INVALID_VALUE;
141 window->stamp = 0;
142}
143
144/**
145 * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
146 *
147 * Returns the number of windows that have gathered data but were
148 * not flushed. If there were any that were not flushed, then
149 * reschedule the rate flushing routine.
150 */
151static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
152{
153 int unflushed = 0;
154 int i;
155 unsigned long flags;
156 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
157
158 /*
159 * For each rate, if we have collected data on that rate
160 * and it has been more than IWL_RATE_WIN_FLUSH
161 * since we flushed, clear out the gathered statistics
162 */
163 for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
164 if (!rs_sta->win[i].counter)
165 continue;
166
167 spin_lock_irqsave(&rs_sta->lock, flags);
168 if (time_after(jiffies, rs_sta->win[i].stamp +
169 IWL_RATE_WIN_FLUSH)) {
170 IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
171 "index %d\n",
172 rs_sta->win[i].counter, i);
173 iwl3945_clear_window(&rs_sta->win[i]);
174 } else
175 unflushed++;
176 spin_unlock_irqrestore(&rs_sta->lock, flags);
177 }
178
179 return unflushed;
180}
181
182#define IWL_RATE_FLUSH_MAX 5000 /* msec */
183#define IWL_RATE_FLUSH_MIN 50 /* msec */
184#define IWL_AVERAGE_PACKETS 1500
185
186static void iwl3945_bg_rate_scale_flush(unsigned long data)
187{
188 struct iwl3945_rs_sta *rs_sta = (void *)data;
189 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
190 int unflushed = 0;
191 unsigned long flags;
192 u32 packet_count, duration, pps;
193
194 IWL_DEBUG_RATE(priv, "enter\n");
195
196 unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
197
198 spin_lock_irqsave(&rs_sta->lock, flags);
199
200 /* Number of packets Rx'd since last time this timer ran */
201 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
202
203 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
204
205 if (unflushed) {
206 duration =
207 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
208
209 IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
210 packet_count, duration);
211
212 /* Determine packets per second */
213 if (duration)
214 pps = (packet_count * 1000) / duration;
215 else
216 pps = 0;
217
218 if (pps) {
219 duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
220 if (duration < IWL_RATE_FLUSH_MIN)
221 duration = IWL_RATE_FLUSH_MIN;
222 else if (duration > IWL_RATE_FLUSH_MAX)
223 duration = IWL_RATE_FLUSH_MAX;
224 } else
225 duration = IWL_RATE_FLUSH_MAX;
226
227 rs_sta->flush_time = msecs_to_jiffies(duration);
228
229 IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
230 duration, packet_count);
231
232 mod_timer(&rs_sta->rate_scale_flush, jiffies +
233 rs_sta->flush_time);
234
235 rs_sta->last_partial_flush = jiffies;
236 } else {
237 rs_sta->flush_time = IWL_RATE_FLUSH;
238 rs_sta->flush_pending = 0;
239 }
240 /* If there weren't any unflushed entries, we don't schedule the timer
241 * to run again */
242
243 rs_sta->last_flush = jiffies;
244
245 spin_unlock_irqrestore(&rs_sta->lock, flags);
246
247 IWL_DEBUG_RATE(priv, "leave\n");
248}
249
250/**
251 * iwl3945_collect_tx_data - Update the success/failure sliding window
252 *
253 * We keep a sliding window of the last 64 packets transmitted
254 * at this rate. window->data contains the bitmask of successful
255 * packets.
256 */
257static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
258 struct iwl3945_rate_scale_data *window,
259 int success, int retries, int index)
260{
261 unsigned long flags;
262 s32 fail_count;
263 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
264
265 if (!retries) {
266 IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
267 return;
268 }
269
270 spin_lock_irqsave(&rs_sta->lock, flags);
271
272 /*
273 * Keep track of only the latest 62 tx frame attempts in this rate's
274 * history window; anything older isn't really relevant any more.
275 * If we have filled up the sliding window, drop the oldest attempt;
276 * if the oldest attempt (highest bit in bitmap) shows "success",
277 * subtract "1" from the success counter (this is the main reason
278 * we keep these bitmaps!).
279 * */
280 while (retries > 0) {
281 if (window->counter >= IWL_RATE_MAX_WINDOW) {
282
283 /* remove earliest */
284 window->counter = IWL_RATE_MAX_WINDOW - 1;
285
286 if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
287 window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
288 window->success_counter--;
289 }
290 }
291
292 /* Increment frames-attempted counter */
293 window->counter++;
294
295 /* Shift bitmap by one frame (throw away oldest history),
296 * OR in "1", and increment "success" if this
297 * frame was successful. */
298 window->data <<= 1;
299 if (success > 0) {
300 window->success_counter++;
301 window->data |= 0x1;
302 success--;
303 }
304
305 retries--;
306 }
307
308 /* Calculate current success ratio, avoid divide-by-0! */
309 if (window->counter > 0)
310 window->success_ratio = 128 * (100 * window->success_counter)
311 / window->counter;
312 else
313 window->success_ratio = IWL_INVALID_VALUE;
314
315 fail_count = window->counter - window->success_counter;
316
317 /* Calculate average throughput, if we have enough history. */
318 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
319 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
320 window->average_tpt = ((window->success_ratio *
321 rs_sta->expected_tpt[index] + 64) / 128);
322 else
323 window->average_tpt = IWL_INVALID_VALUE;
324
325 /* Tag this window as having been updated */
326 window->stamp = jiffies;
327
328 spin_unlock_irqrestore(&rs_sta->lock, flags);
329
330}
331
332/*
333 * Called after adding a new station to initialize rate scaling
334 */
335void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
336{
337 struct ieee80211_hw *hw = priv->hw;
338 struct ieee80211_conf *conf = &priv->hw->conf;
339 struct iwl3945_sta_priv *psta;
340 struct iwl3945_rs_sta *rs_sta;
341 struct ieee80211_supported_band *sband;
342 int i;
343
344 IWL_DEBUG_INFO(priv, "enter\n");
345 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
346 goto out;
347
348 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
349 rs_sta = &psta->rs_sta;
350 sband = hw->wiphy->bands[conf->channel->band];
351
352 rs_sta->priv = priv;
353
354 rs_sta->start_rate = IWL_RATE_INVALID;
355
356 /* default to just 802.11b */
357 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
358
359 rs_sta->last_partial_flush = jiffies;
360 rs_sta->last_flush = jiffies;
361 rs_sta->flush_time = IWL_RATE_FLUSH;
362 rs_sta->last_tx_packets = 0;
363
364 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
365 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
366
367 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
368 iwl3945_clear_window(&rs_sta->win[i]);
369
370 /* TODO: what is a good starting rate for STA? About middle? Maybe not
371 * the lowest or the highest rate.. Could consider using RSSI from
372 * previous packets? Need to have IEEE 802.1X auth succeed immediately
373 * after assoc.. */
374
375 for (i = sband->n_bitrates - 1; i >= 0; i--) {
376 if (sta->supp_rates[sband->band] & (1 << i)) {
377 rs_sta->last_txrate_idx = i;
378 break;
379 }
380 }
381
382 priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
383 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
384 if (sband->band == IEEE80211_BAND_5GHZ) {
385 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
386 priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
387 IWL_FIRST_OFDM_RATE;
388 }
389
390out:
391 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
392
393 IWL_DEBUG_INFO(priv, "leave\n");
394}
395
396static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
397{
398 return hw->priv;
399}
400
401/* rate scale requires free function to be implemented */
402static void iwl3945_rs_free(void *priv)
403{
404 return;
405}
406
407static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
408{
409 struct iwl3945_rs_sta *rs_sta;
410 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
411 struct iwl_priv *priv __maybe_unused = iwl_priv;
412
413 IWL_DEBUG_RATE(priv, "enter\n");
414
415 rs_sta = &psta->rs_sta;
416
417 spin_lock_init(&rs_sta->lock);
418 init_timer(&rs_sta->rate_scale_flush);
419
420 IWL_DEBUG_RATE(priv, "leave\n");
421
422 return rs_sta;
423}
424
425static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
426 void *priv_sta)
427{
428 struct iwl3945_rs_sta *rs_sta = priv_sta;
429
430 /*
431 * Be careful not to use any members of iwl3945_rs_sta (like trying
432 * to use iwl_priv to print out debugging) since it may not be fully
433 * initialized at this point.
434 */
435 del_timer_sync(&rs_sta->rate_scale_flush);
436}
437
438
439/**
440 * iwl3945_rs_tx_status - Update rate control values based on Tx results
441 *
442 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
443 * the hardware for each rate.
444 */
445static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
446 struct ieee80211_sta *sta, void *priv_sta,
447 struct sk_buff *skb)
448{
449 s8 retries = 0, current_count;
450 int scale_rate_index, first_index, last_index;
451 unsigned long flags;
452 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
453 struct iwl3945_rs_sta *rs_sta = priv_sta;
454 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
455
456 IWL_DEBUG_RATE(priv, "enter\n");
457
458 retries = info->status.rates[0].count;
459 /* Sanity Check for retries */
460 if (retries > IWL_RATE_RETRY_TH)
461 retries = IWL_RATE_RETRY_TH;
462
463 first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
464 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
465 IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
466 return;
467 }
468
469 if (!priv_sta) {
470 IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
471 return;
472 }
473
474 /* Treat uninitialized rate scaling data same as non-existing. */
475 if (!rs_sta->priv) {
476 IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
477 return;
478 }
479
480
481 rs_sta->tx_packets++;
482
483 scale_rate_index = first_index;
484 last_index = first_index;
485
486 /*
487 * Update the window for each rate. We determine which rates
488 * were Tx'd based on the total number of retries vs. the number
489 * of retries configured for each rate -- currently set to the
490 * priv value 'retry_rate' vs. rate specific
491 *
492 * On exit from this while loop last_index indicates the rate
493 * at which the frame was finally transmitted (or failed if no
494 * ACK)
495 */
496 while (retries > 1) {
497 if ((retries - 1) < priv->retry_rate) {
498 current_count = (retries - 1);
499 last_index = scale_rate_index;
500 } else {
501 current_count = priv->retry_rate;
502 last_index = iwl3945_rs_next_rate(priv,
503 scale_rate_index);
504 }
505
506 /* Update this rate accounting for as many retries
507 * as was used for it (per current_count) */
508 iwl3945_collect_tx_data(rs_sta,
509 &rs_sta->win[scale_rate_index],
510 0, current_count, scale_rate_index);
511 IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
512 scale_rate_index, current_count);
513
514 retries -= current_count;
515
516 scale_rate_index = last_index;
517 }
518
519
520 /* Update the last index window with success/failure based on ACK */
521 IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
522 last_index,
523 (info->flags & IEEE80211_TX_STAT_ACK) ?
524 "success" : "failure");
525 iwl3945_collect_tx_data(rs_sta,
526 &rs_sta->win[last_index],
527 info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
528
529 /* We updated the rate scale window -- if its been more than
530 * flush_time since the last run, schedule the flush
531 * again */
532 spin_lock_irqsave(&rs_sta->lock, flags);
533
534 if (!rs_sta->flush_pending &&
535 time_after(jiffies, rs_sta->last_flush +
536 rs_sta->flush_time)) {
537
538 rs_sta->last_partial_flush = jiffies;
539 rs_sta->flush_pending = 1;
540 mod_timer(&rs_sta->rate_scale_flush,
541 jiffies + rs_sta->flush_time);
542 }
543
544 spin_unlock_irqrestore(&rs_sta->lock, flags);
545
546 IWL_DEBUG_RATE(priv, "leave\n");
547}
548
549static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
550 u8 index, u16 rate_mask, enum ieee80211_band band)
551{
552 u8 high = IWL_RATE_INVALID;
553 u8 low = IWL_RATE_INVALID;
554 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
555
556 /* 802.11A walks to the next literal adjacent rate in
557 * the rate table */
558 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
559 int i;
560 u32 mask;
561
562 /* Find the previous rate that is in the rate mask */
563 i = index - 1;
564 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
565 if (rate_mask & mask) {
566 low = i;
567 break;
568 }
569 }
570
571 /* Find the next rate that is in the rate mask */
572 i = index + 1;
573 for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
574 i++, mask <<= 1) {
575 if (rate_mask & mask) {
576 high = i;
577 break;
578 }
579 }
580
581 return (high << 8) | low;
582 }
583
584 low = index;
585 while (low != IWL_RATE_INVALID) {
586 if (rs_sta->tgg)
587 low = iwl3945_rates[low].prev_rs_tgg;
588 else
589 low = iwl3945_rates[low].prev_rs;
590 if (low == IWL_RATE_INVALID)
591 break;
592 if (rate_mask & (1 << low))
593 break;
594 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
595 }
596
597 high = index;
598 while (high != IWL_RATE_INVALID) {
599 if (rs_sta->tgg)
600 high = iwl3945_rates[high].next_rs_tgg;
601 else
602 high = iwl3945_rates[high].next_rs;
603 if (high == IWL_RATE_INVALID)
604 break;
605 if (rate_mask & (1 << high))
606 break;
607 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
608 }
609
610 return (high << 8) | low;
611}
612
613/**
614 * iwl3945_rs_get_rate - find the rate for the requested packet
615 *
616 * Returns the ieee80211_rate structure allocated by the driver.
617 *
618 * The rate control algorithm has no internal mapping between hw_mode's
619 * rate ordering and the rate ordering used by the rate control algorithm.
620 *
621 * The rate control algorithm uses a single table of rates that goes across
622 * the entire A/B/G spectrum vs. being limited to just one particular
623 * hw_mode.
624 *
625 * As such, we can't convert the index obtained below into the hw_mode's
626 * rate table and must reference the driver allocated rate table
627 *
628 */
629static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
630 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
631{
632 struct ieee80211_supported_band *sband = txrc->sband;
633 struct sk_buff *skb = txrc->skb;
634 u8 low = IWL_RATE_INVALID;
635 u8 high = IWL_RATE_INVALID;
636 u16 high_low;
637 int index;
638 struct iwl3945_rs_sta *rs_sta = priv_sta;
639 struct iwl3945_rate_scale_data *window = NULL;
640 int current_tpt = IWL_INVALID_VALUE;
641 int low_tpt = IWL_INVALID_VALUE;
642 int high_tpt = IWL_INVALID_VALUE;
643 u32 fail_count;
644 s8 scale_action = 0;
645 unsigned long flags;
646 u16 rate_mask;
647 s8 max_rate_idx = -1;
648 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
649 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
650
651 IWL_DEBUG_RATE(priv, "enter\n");
652
653 /* Treat uninitialized rate scaling data same as non-existing. */
654 if (rs_sta && !rs_sta->priv) {
655 IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
656 priv_sta = NULL;
657 }
658
659 if (rate_control_send_low(sta, priv_sta, txrc))
660 return;
661
662 rate_mask = sta->supp_rates[sband->band];
663
664 /* get user max rate if set */
665 max_rate_idx = txrc->max_rate_idx;
666 if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
667 max_rate_idx += IWL_FIRST_OFDM_RATE;
668 if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
669 max_rate_idx = -1;
670
671 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
672
673 if (sband->band == IEEE80211_BAND_5GHZ)
674 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
675
676 spin_lock_irqsave(&rs_sta->lock, flags);
677
678 /* for recent assoc, choose best rate regarding
679 * to rssi value
680 */
681 if (rs_sta->start_rate != IWL_RATE_INVALID) {
682 if (rs_sta->start_rate < index &&
683 (rate_mask & (1 << rs_sta->start_rate)))
684 index = rs_sta->start_rate;
685 rs_sta->start_rate = IWL_RATE_INVALID;
686 }
687
688 /* force user max rate if set by user */
689 if ((max_rate_idx != -1) && (max_rate_idx < index)) {
690 if (rate_mask & (1 << max_rate_idx))
691 index = max_rate_idx;
692 }
693
694 window = &(rs_sta->win[index]);
695
696 fail_count = window->counter - window->success_counter;
697
698 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
699 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
700 spin_unlock_irqrestore(&rs_sta->lock, flags);
701
702 IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
703 "counter: %d, success_counter: %d, "
704 "expected_tpt is %sNULL\n",
705 index,
706 window->counter,
707 window->success_counter,
708 rs_sta->expected_tpt ? "not " : "");
709
710 /* Can't calculate this yet; not enough history */
711 window->average_tpt = IWL_INVALID_VALUE;
712 goto out;
713
714 }
715
716 current_tpt = window->average_tpt;
717
718 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
719 sband->band);
720 low = high_low & 0xff;
721 high = (high_low >> 8) & 0xff;
722
723 /* If user set max rate, dont allow higher than user constrain */
724 if ((max_rate_idx != -1) && (max_rate_idx < high))
725 high = IWL_RATE_INVALID;
726
727 /* Collect Measured throughputs of adjacent rates */
728 if (low != IWL_RATE_INVALID)
729 low_tpt = rs_sta->win[low].average_tpt;
730
731 if (high != IWL_RATE_INVALID)
732 high_tpt = rs_sta->win[high].average_tpt;
733
734 spin_unlock_irqrestore(&rs_sta->lock, flags);
735
736 scale_action = 0;
737
738 /* Low success ratio , need to drop the rate */
739 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
740 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
741 scale_action = -1;
742 /* No throughput measured yet for adjacent rates,
743 * try increase */
744 } else if ((low_tpt == IWL_INVALID_VALUE) &&
745 (high_tpt == IWL_INVALID_VALUE)) {
746
747 if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
748 scale_action = 1;
749 else if (low != IWL_RATE_INVALID)
750 scale_action = 0;
751
752 /* Both adjacent throughputs are measured, but neither one has
753 * better throughput; we're using the best rate, don't change
754 * it! */
755 } else if ((low_tpt != IWL_INVALID_VALUE) &&
756 (high_tpt != IWL_INVALID_VALUE) &&
757 (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
758
759 IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
760 "current_tpt [%d]\n",
761 low_tpt, high_tpt, current_tpt);
762 scale_action = 0;
763
764 /* At least one of the rates has better throughput */
765 } else {
766 if (high_tpt != IWL_INVALID_VALUE) {
767
768 /* High rate has better throughput, Increase
769 * rate */
770 if (high_tpt > current_tpt &&
771 window->success_ratio >= IWL_RATE_INCREASE_TH)
772 scale_action = 1;
773 else {
774 IWL_DEBUG_RATE(priv,
775 "decrease rate because of high tpt\n");
776 scale_action = 0;
777 }
778 } else if (low_tpt != IWL_INVALID_VALUE) {
779 if (low_tpt > current_tpt) {
780 IWL_DEBUG_RATE(priv,
781 "decrease rate because of low tpt\n");
782 scale_action = -1;
783 } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
784 /* Lower rate has better
785 * throughput,decrease rate */
786 scale_action = 1;
787 }
788 }
789 }
790
791 /* Sanity check; asked for decrease, but success rate or throughput
792 * has been good at old rate. Don't change it. */
793 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
794 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
795 (current_tpt > (100 * rs_sta->expected_tpt[low]))))
796 scale_action = 0;
797
798 switch (scale_action) {
799 case -1:
800
801 /* Decrese rate */
802 if (low != IWL_RATE_INVALID)
803 index = low;
804 break;
805
806 case 1:
807 /* Increase rate */
808 if (high != IWL_RATE_INVALID)
809 index = high;
810
811 break;
812
813 case 0:
814 default:
815 /* No change */
816 break;
817 }
818
819 IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
820 index, scale_action, low, high);
821
822 out:
823
824 if (sband->band == IEEE80211_BAND_5GHZ) {
825 if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
826 index = IWL_FIRST_OFDM_RATE;
827 rs_sta->last_txrate_idx = index;
828 info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
829 } else {
830 rs_sta->last_txrate_idx = index;
831 info->control.rates[0].idx = rs_sta->last_txrate_idx;
832 }
833
834 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
835}
836
837#ifdef CONFIG_MAC80211_DEBUGFS
838static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
839{
840 file->private_data = inode->i_private;
841 return 0;
842}
843
844static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
845 char __user *user_buf,
846 size_t count, loff_t *ppos)
847{
848 char *buff;
849 int desc = 0;
850 int j;
851 ssize_t ret;
852 struct iwl3945_rs_sta *lq_sta = file->private_data;
853
854 buff = kmalloc(1024, GFP_KERNEL);
855 if (!buff)
856 return -ENOMEM;
857
858 desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
859 "rate=0x%X flush time %d\n",
860 lq_sta->tx_packets,
861 lq_sta->last_txrate_idx,
862 lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
863 for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
864 desc += sprintf(buff+desc,
865 "counter=%d success=%d %%=%d\n",
866 lq_sta->win[j].counter,
867 lq_sta->win[j].success_counter,
868 lq_sta->win[j].success_ratio);
869 }
870 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
871 kfree(buff);
872 return ret;
873}
874
875static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
876 .read = iwl3945_sta_dbgfs_stats_table_read,
877 .open = iwl3945_open_file_generic,
878 .llseek = default_llseek,
879};
880
881static void iwl3945_add_debugfs(void *priv, void *priv_sta,
882 struct dentry *dir)
883{
884 struct iwl3945_rs_sta *lq_sta = priv_sta;
885
886 lq_sta->rs_sta_dbgfs_stats_table_file =
887 debugfs_create_file("rate_stats_table", 0600, dir,
888 lq_sta, &rs_sta_dbgfs_stats_table_ops);
889
890}
891
892static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
893{
894 struct iwl3945_rs_sta *lq_sta = priv_sta;
895 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
896}
897#endif
898
899/*
900 * Initialization of rate scaling information is done by driver after
901 * the station is added. Since mac80211 calls this function before a
902 * station is added we ignore it.
903 */
904static void iwl3945_rs_rate_init_stub(void *priv_r,
905 struct ieee80211_supported_band *sband,
906 struct ieee80211_sta *sta, void *priv_sta)
907{
908}
909
910static struct rate_control_ops rs_ops = {
911 .module = NULL,
912 .name = RS_NAME,
913 .tx_status = iwl3945_rs_tx_status,
914 .get_rate = iwl3945_rs_get_rate,
915 .rate_init = iwl3945_rs_rate_init_stub,
916 .alloc = iwl3945_rs_alloc,
917 .free = iwl3945_rs_free,
918 .alloc_sta = iwl3945_rs_alloc_sta,
919 .free_sta = iwl3945_rs_free_sta,
920#ifdef CONFIG_MAC80211_DEBUGFS
921 .add_sta_debugfs = iwl3945_add_debugfs,
922 .remove_sta_debugfs = iwl3945_remove_debugfs,
923#endif
924
925};
926void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
927{
928 struct iwl_priv *priv = hw->priv;
929 s32 rssi = 0;
930 unsigned long flags;
931 struct iwl3945_rs_sta *rs_sta;
932 struct ieee80211_sta *sta;
933 struct iwl3945_sta_priv *psta;
934
935 IWL_DEBUG_RATE(priv, "enter\n");
936
937 rcu_read_lock();
938
939 sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
940 priv->stations[sta_id].sta.sta.addr);
941 if (!sta) {
942 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
943 rcu_read_unlock();
944 return;
945 }
946
947 psta = (void *) sta->drv_priv;
948 rs_sta = &psta->rs_sta;
949
950 spin_lock_irqsave(&rs_sta->lock, flags);
951
952 rs_sta->tgg = 0;
953 switch (priv->band) {
954 case IEEE80211_BAND_2GHZ:
955 /* TODO: this always does G, not a regression */
956 if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
957 RXON_FLG_TGG_PROTECT_MSK) {
958 rs_sta->tgg = 1;
959 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
960 } else
961 rs_sta->expected_tpt = iwl3945_expected_tpt_g;
962 break;
963
964 case IEEE80211_BAND_5GHZ:
965 rs_sta->expected_tpt = iwl3945_expected_tpt_a;
966 break;
967 case IEEE80211_NUM_BANDS:
968 BUG();
969 break;
970 }
971
972 spin_unlock_irqrestore(&rs_sta->lock, flags);
973
974 rssi = priv->_3945.last_rx_rssi;
975 if (rssi == 0)
976 rssi = IWL_MIN_RSSI_VAL;
977
978 IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
979
980 rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
981
982 IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
983 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
984 iwl3945_rates[rs_sta->start_rate].plcp);
985 rcu_read_unlock();
986}
987
988int iwl3945_rate_control_register(void)
989{
990 return ieee80211_rate_control_register(&rs_ops);
991}
992
993void iwl3945_rate_control_unregister(void)
994{
995 ieee80211_rate_control_unregister(&rs_ops);
996}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644
index f7c0a7438476..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ /dev/null
@@ -1,2741 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/firmware.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40#include <net/mac80211.h>
41
42#include "iwl-fh.h"
43#include "iwl-3945-fh.h"
44#include "iwl-commands.h"
45#include "iwl-sta.h"
46#include "iwl-3945.h"
47#include "iwl-eeprom.h"
48#include "iwl-core.h"
49#include "iwl-helpers.h"
50#include "iwl-led.h"
51#include "iwl-3945-led.h"
52#include "iwl-3945-debugfs.h"
53
54#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
55 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
56 IWL_RATE_##r##M_IEEE, \
57 IWL_RATE_##ip##M_INDEX, \
58 IWL_RATE_##in##M_INDEX, \
59 IWL_RATE_##rp##M_INDEX, \
60 IWL_RATE_##rn##M_INDEX, \
61 IWL_RATE_##pp##M_INDEX, \
62 IWL_RATE_##np##M_INDEX, \
63 IWL_RATE_##r##M_INDEX_TABLE, \
64 IWL_RATE_##ip##M_INDEX_TABLE }
65
66/*
67 * Parameter order:
68 * rate, prev rate, next rate, prev tgg rate, next tgg rate
69 *
70 * If there isn't a valid next or previous rate then INV is used which
71 * maps to IWL_RATE_INVALID
72 *
73 */
74const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
75 IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
76 IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
77 IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
78 IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
79 IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
80 IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
81 IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
82 IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
83 IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
84 IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
85 IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
86 IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
87};
88
89static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
90{
91 u8 rate = iwl3945_rates[rate_index].prev_ieee;
92
93 if (rate == IWL_RATE_INVALID)
94 rate = rate_index;
95 return rate;
96}
97
98/* 1 = enable the iwl3945_disable_events() function */
99#define IWL_EVT_DISABLE (0)
100#define IWL_EVT_DISABLE_SIZE (1532/32)
101
102/**
103 * iwl3945_disable_events - Disable selected events in uCode event log
104 *
105 * Disable an event by writing "1"s into "disable"
106 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
107 * Default values of 0 enable uCode events to be logged.
108 * Use for only special debugging. This function is just a placeholder as-is,
109 * you'll need to provide the special bits! ...
110 * ... and set IWL_EVT_DISABLE to 1. */
111void iwl3945_disable_events(struct iwl_priv *priv)
112{
113 int i;
114 u32 base; /* SRAM address of event log header */
115 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
116 u32 array_size; /* # of u32 entries in array */
117 static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
118 0x00000000, /* 31 - 0 Event id numbers */
119 0x00000000, /* 63 - 32 */
120 0x00000000, /* 95 - 64 */
121 0x00000000, /* 127 - 96 */
122 0x00000000, /* 159 - 128 */
123 0x00000000, /* 191 - 160 */
124 0x00000000, /* 223 - 192 */
125 0x00000000, /* 255 - 224 */
126 0x00000000, /* 287 - 256 */
127 0x00000000, /* 319 - 288 */
128 0x00000000, /* 351 - 320 */
129 0x00000000, /* 383 - 352 */
130 0x00000000, /* 415 - 384 */
131 0x00000000, /* 447 - 416 */
132 0x00000000, /* 479 - 448 */
133 0x00000000, /* 511 - 480 */
134 0x00000000, /* 543 - 512 */
135 0x00000000, /* 575 - 544 */
136 0x00000000, /* 607 - 576 */
137 0x00000000, /* 639 - 608 */
138 0x00000000, /* 671 - 640 */
139 0x00000000, /* 703 - 672 */
140 0x00000000, /* 735 - 704 */
141 0x00000000, /* 767 - 736 */
142 0x00000000, /* 799 - 768 */
143 0x00000000, /* 831 - 800 */
144 0x00000000, /* 863 - 832 */
145 0x00000000, /* 895 - 864 */
146 0x00000000, /* 927 - 896 */
147 0x00000000, /* 959 - 928 */
148 0x00000000, /* 991 - 960 */
149 0x00000000, /* 1023 - 992 */
150 0x00000000, /* 1055 - 1024 */
151 0x00000000, /* 1087 - 1056 */
152 0x00000000, /* 1119 - 1088 */
153 0x00000000, /* 1151 - 1120 */
154 0x00000000, /* 1183 - 1152 */
155 0x00000000, /* 1215 - 1184 */
156 0x00000000, /* 1247 - 1216 */
157 0x00000000, /* 1279 - 1248 */
158 0x00000000, /* 1311 - 1280 */
159 0x00000000, /* 1343 - 1312 */
160 0x00000000, /* 1375 - 1344 */
161 0x00000000, /* 1407 - 1376 */
162 0x00000000, /* 1439 - 1408 */
163 0x00000000, /* 1471 - 1440 */
164 0x00000000, /* 1503 - 1472 */
165 };
166
167 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
168 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
169 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
170 return;
171 }
172
173 disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
174 array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
175
176 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
177 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
178 disable_ptr);
179 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
180 iwl_legacy_write_targ_mem(priv,
181 disable_ptr + (i * sizeof(u32)),
182 evt_disable[i]);
183
184 } else {
185 IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
186 IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
187 IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
188 disable_ptr, array_size);
189 }
190
191}
192
193static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
194{
195 int idx;
196
197 for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
198 if (iwl3945_rates[idx].plcp == plcp)
199 return idx;
200 return -1;
201}
202
203#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
204#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
205
206static const char *iwl3945_get_tx_fail_reason(u32 status)
207{
208 switch (status & TX_STATUS_MSK) {
209 case TX_3945_STATUS_SUCCESS:
210 return "SUCCESS";
211 TX_STATUS_ENTRY(SHORT_LIMIT);
212 TX_STATUS_ENTRY(LONG_LIMIT);
213 TX_STATUS_ENTRY(FIFO_UNDERRUN);
214 TX_STATUS_ENTRY(MGMNT_ABORT);
215 TX_STATUS_ENTRY(NEXT_FRAG);
216 TX_STATUS_ENTRY(LIFE_EXPIRE);
217 TX_STATUS_ENTRY(DEST_PS);
218 TX_STATUS_ENTRY(ABORTED);
219 TX_STATUS_ENTRY(BT_RETRY);
220 TX_STATUS_ENTRY(STA_INVALID);
221 TX_STATUS_ENTRY(FRAG_DROPPED);
222 TX_STATUS_ENTRY(TID_DISABLE);
223 TX_STATUS_ENTRY(FRAME_FLUSHED);
224 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
225 TX_STATUS_ENTRY(TX_LOCKED);
226 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
227 }
228
229 return "UNKNOWN";
230}
231#else
232static inline const char *iwl3945_get_tx_fail_reason(u32 status)
233{
234 return "";
235}
236#endif
237
238/*
239 * get ieee prev rate from rate scale table.
240 * for A and B mode we need to overright prev
241 * value
242 */
243int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
244{
245 int next_rate = iwl3945_get_prev_ieee_rate(rate);
246
247 switch (priv->band) {
248 case IEEE80211_BAND_5GHZ:
249 if (rate == IWL_RATE_12M_INDEX)
250 next_rate = IWL_RATE_9M_INDEX;
251 else if (rate == IWL_RATE_6M_INDEX)
252 next_rate = IWL_RATE_6M_INDEX;
253 break;
254 case IEEE80211_BAND_2GHZ:
255 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
256 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
257 if (rate == IWL_RATE_11M_INDEX)
258 next_rate = IWL_RATE_5M_INDEX;
259 }
260 break;
261
262 default:
263 break;
264 }
265
266 return next_rate;
267}
268
269
270/**
271 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
272 *
273 * When FW advances 'R' index, all entries between old and new 'R' index
274 * need to be reclaimed. As result, some free space forms. If there is
275 * enough free space (> low mark), wake the stack that feeds us.
276 */
277static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
278 int txq_id, int index)
279{
280 struct iwl_tx_queue *txq = &priv->txq[txq_id];
281 struct iwl_queue *q = &txq->q;
282 struct iwl_tx_info *tx_info;
283
284 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
285
286 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
287 q->read_ptr != index;
288 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
289
290 tx_info = &txq->txb[txq->q.read_ptr];
291 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
292 tx_info->skb = NULL;
293 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
294 }
295
296 if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
297 (txq_id != IWL39_CMD_QUEUE_NUM) &&
298 priv->mac80211_registered)
299 iwl_legacy_wake_queue(priv, txq);
300}
301
302/**
303 * iwl3945_rx_reply_tx - Handle Tx response
304 */
305static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
306 struct iwl_rx_mem_buffer *rxb)
307{
308 struct iwl_rx_packet *pkt = rxb_addr(rxb);
309 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
310 int txq_id = SEQ_TO_QUEUE(sequence);
311 int index = SEQ_TO_INDEX(sequence);
312 struct iwl_tx_queue *txq = &priv->txq[txq_id];
313 struct ieee80211_tx_info *info;
314 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
315 u32 status = le32_to_cpu(tx_resp->status);
316 int rate_idx;
317 int fail;
318
319 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
320 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
321 "is out of range [0-%d] %d %d\n", txq_id,
322 index, txq->q.n_bd, txq->q.write_ptr,
323 txq->q.read_ptr);
324 return;
325 }
326
327 txq->time_stamp = jiffies;
328 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
329 ieee80211_tx_info_clear_status(info);
330
331 /* Fill the MRR chain with some info about on-chip retransmissions */
332 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
333 if (info->band == IEEE80211_BAND_5GHZ)
334 rate_idx -= IWL_FIRST_OFDM_RATE;
335
336 fail = tx_resp->failure_frame;
337
338 info->status.rates[0].idx = rate_idx;
339 info->status.rates[0].count = fail + 1; /* add final attempt */
340
341 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
342 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
343 IEEE80211_TX_STAT_ACK : 0;
344
345 IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
346 txq_id, iwl3945_get_tx_fail_reason(status), status,
347 tx_resp->rate, tx_resp->failure_frame);
348
349 IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
350 iwl3945_tx_queue_reclaim(priv, txq_id, index);
351
352 if (status & TX_ABORT_REQUIRED_MSK)
353 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
354}
355
356
357
358/*****************************************************************************
359 *
360 * Intel PRO/Wireless 3945ABG/BG Network Connection
361 *
362 * RX handler implementations
363 *
364 *****************************************************************************/
365#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
366static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
367 __le32 *stats)
368{
369 int i;
370 __le32 *prev_stats;
371 u32 *accum_stats;
372 u32 *delta, *max_delta;
373
374 prev_stats = (__le32 *)&priv->_3945.statistics;
375 accum_stats = (u32 *)&priv->_3945.accum_statistics;
376 delta = (u32 *)&priv->_3945.delta_statistics;
377 max_delta = (u32 *)&priv->_3945.max_delta;
378
379 for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
380 i += sizeof(__le32), stats++, prev_stats++, delta++,
381 max_delta++, accum_stats++) {
382 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
383 *delta = (le32_to_cpu(*stats) -
384 le32_to_cpu(*prev_stats));
385 *accum_stats += *delta;
386 if (*delta > *max_delta)
387 *max_delta = *delta;
388 }
389 }
390
391 /* reset accumulative statistics for "no-counter" type statistics */
392 priv->_3945.accum_statistics.general.temperature =
393 priv->_3945.statistics.general.temperature;
394 priv->_3945.accum_statistics.general.ttl_timestamp =
395 priv->_3945.statistics.general.ttl_timestamp;
396}
397#endif
398
399void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
400 struct iwl_rx_mem_buffer *rxb)
401{
402 struct iwl_rx_packet *pkt = rxb_addr(rxb);
403
404 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
405 (int)sizeof(struct iwl3945_notif_statistics),
406 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
407#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
408 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
409#endif
410
411 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
412}
413
414void iwl3945_reply_statistics(struct iwl_priv *priv,
415 struct iwl_rx_mem_buffer *rxb)
416{
417 struct iwl_rx_packet *pkt = rxb_addr(rxb);
418 __le32 *flag = (__le32 *)&pkt->u.raw;
419
420 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
421#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
422 memset(&priv->_3945.accum_statistics, 0,
423 sizeof(struct iwl3945_notif_statistics));
424 memset(&priv->_3945.delta_statistics, 0,
425 sizeof(struct iwl3945_notif_statistics));
426 memset(&priv->_3945.max_delta, 0,
427 sizeof(struct iwl3945_notif_statistics));
428#endif
429 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
430 }
431 iwl3945_hw_rx_statistics(priv, rxb);
432}
433
434
435/******************************************************************************
436 *
437 * Misc. internal state and helper functions
438 *
439 ******************************************************************************/
440
441/* This is necessary only for a number of statistics, see the caller. */
442static int iwl3945_is_network_packet(struct iwl_priv *priv,
443 struct ieee80211_hdr *header)
444{
445 /* Filter incoming packets to determine if they are targeted toward
446 * this network, discarding packets coming from ourselves */
447 switch (priv->iw_mode) {
448 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
449 /* packets to our IBSS update information */
450 return !compare_ether_addr(header->addr3, priv->bssid);
451 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
452 /* packets to our IBSS update information */
453 return !compare_ether_addr(header->addr2, priv->bssid);
454 default:
455 return 1;
456 }
457}
458
459static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
460 struct iwl_rx_mem_buffer *rxb,
461 struct ieee80211_rx_status *stats)
462{
463 struct iwl_rx_packet *pkt = rxb_addr(rxb);
464 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
465 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
466 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
467 u16 len = le16_to_cpu(rx_hdr->len);
468 struct sk_buff *skb;
469 __le16 fc = hdr->frame_control;
470
471 /* We received data from the HW, so stop the watchdog */
472 if (unlikely(len + IWL39_RX_FRAME_SIZE >
473 PAGE_SIZE << priv->hw_params.rx_page_order)) {
474 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
475 return;
476 }
477
478 /* We only process data packets if the interface is open */
479 if (unlikely(!priv->is_open)) {
480 IWL_DEBUG_DROP_LIMIT(priv,
481 "Dropping packet while interface is not open.\n");
482 return;
483 }
484
485 skb = dev_alloc_skb(128);
486 if (!skb) {
487 IWL_ERR(priv, "dev_alloc_skb failed\n");
488 return;
489 }
490
491 if (!iwl3945_mod_params.sw_crypto)
492 iwl_legacy_set_decrypted_flag(priv,
493 (struct ieee80211_hdr *)rxb_addr(rxb),
494 le32_to_cpu(rx_end->status), stats);
495
496 skb_add_rx_frag(skb, 0, rxb->page,
497 (void *)rx_hdr->payload - (void *)pkt, len);
498
499 iwl_legacy_update_stats(priv, false, fc, len);
500 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
501
502 ieee80211_rx(priv->hw, skb);
503 priv->alloc_rxb_page--;
504 rxb->page = NULL;
505}
506
507#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
508
509static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
510 struct iwl_rx_mem_buffer *rxb)
511{
512 struct ieee80211_hdr *header;
513 struct ieee80211_rx_status rx_status;
514 struct iwl_rx_packet *pkt = rxb_addr(rxb);
515 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
516 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
517 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
518 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
519 u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
520 u8 network_packet;
521
522 rx_status.flag = 0;
523 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
524 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
525 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
526 rx_status.freq =
527 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
528 rx_status.band);
529
530 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
531 if (rx_status.band == IEEE80211_BAND_5GHZ)
532 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
533
534 rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
535 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
536
537 /* set the preamble flag if appropriate */
538 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
539 rx_status.flag |= RX_FLAG_SHORTPRE;
540
541 if ((unlikely(rx_stats->phy_count > 20))) {
542 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
543 rx_stats->phy_count);
544 return;
545 }
546
547 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
548 || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
549 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
550 return;
551 }
552
553
554
555 /* Convert 3945's rssi indicator to dBm */
556 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
557
558 IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
559 rx_status.signal, rx_stats_sig_avg,
560 rx_stats_noise_diff);
561
562 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
563
564 network_packet = iwl3945_is_network_packet(priv, header);
565
566 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
567 network_packet ? '*' : ' ',
568 le16_to_cpu(rx_hdr->channel),
569 rx_status.signal, rx_status.signal,
570 rx_status.rate_idx);
571
572 iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
573 header);
574
575 if (network_packet) {
576 priv->_3945.last_beacon_time =
577 le32_to_cpu(rx_end->beacon_timestamp);
578 priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
579 priv->_3945.last_rx_rssi = rx_status.signal;
580 }
581
582 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
583}
584
585int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
586 struct iwl_tx_queue *txq,
587 dma_addr_t addr, u16 len, u8 reset, u8 pad)
588{
589 int count;
590 struct iwl_queue *q;
591 struct iwl3945_tfd *tfd, *tfd_tmp;
592
593 q = &txq->q;
594 tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
595 tfd = &tfd_tmp[q->write_ptr];
596
597 if (reset)
598 memset(tfd, 0, sizeof(*tfd));
599
600 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
601
602 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
603 IWL_ERR(priv, "Error can not send more than %d chunks\n",
604 NUM_TFD_CHUNKS);
605 return -EINVAL;
606 }
607
608 tfd->tbs[count].addr = cpu_to_le32(addr);
609 tfd->tbs[count].len = cpu_to_le32(len);
610
611 count++;
612
613 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
614 TFD_CTL_PAD_SET(pad));
615
616 return 0;
617}
618
619/**
620 * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
621 *
622 * Does NOT advance any indexes
623 */
624void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
625{
626 struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
627 int index = txq->q.read_ptr;
628 struct iwl3945_tfd *tfd = &tfd_tmp[index];
629 struct pci_dev *dev = priv->pci_dev;
630 int i;
631 int counter;
632
633 /* sanity check */
634 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
635 if (counter > NUM_TFD_CHUNKS) {
636 IWL_ERR(priv, "Too many chunks: %i\n", counter);
637 /* @todo issue fatal error, it is quite serious situation */
638 return;
639 }
640
641 /* Unmap tx_cmd */
642 if (counter)
643 pci_unmap_single(dev,
644 dma_unmap_addr(&txq->meta[index], mapping),
645 dma_unmap_len(&txq->meta[index], len),
646 PCI_DMA_TODEVICE);
647
648 /* unmap chunks if any */
649
650 for (i = 1; i < counter; i++)
651 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
652 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
653
654 /* free SKB */
655 if (txq->txb) {
656 struct sk_buff *skb;
657
658 skb = txq->txb[txq->q.read_ptr].skb;
659
660 /* can be called from irqs-disabled context */
661 if (skb) {
662 dev_kfree_skb_any(skb);
663 txq->txb[txq->q.read_ptr].skb = NULL;
664 }
665 }
666}
667
668/**
669 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
670 *
671*/
672void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
673 struct iwl_device_cmd *cmd,
674 struct ieee80211_tx_info *info,
675 struct ieee80211_hdr *hdr,
676 int sta_id, int tx_id)
677{
678 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
679 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
680 u16 rate_mask;
681 int rate;
682 u8 rts_retry_limit;
683 u8 data_retry_limit;
684 __le32 tx_flags;
685 __le16 fc = hdr->frame_control;
686 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
687
688 rate = iwl3945_rates[rate_index].plcp;
689 tx_flags = tx_cmd->tx_flags;
690
691 /* We need to figure out how to get the sta->supp_rates while
692 * in this running context */
693 rate_mask = IWL_RATES_MASK_3945;
694
695 /* Set retry limit on DATA packets and Probe Responses*/
696 if (ieee80211_is_probe_resp(fc))
697 data_retry_limit = 3;
698 else
699 data_retry_limit = IWL_DEFAULT_TX_RETRY;
700 tx_cmd->data_retry_limit = data_retry_limit;
701
702 if (tx_id >= IWL39_CMD_QUEUE_NUM)
703 rts_retry_limit = 3;
704 else
705 rts_retry_limit = 7;
706
707 if (data_retry_limit < rts_retry_limit)
708 rts_retry_limit = data_retry_limit;
709 tx_cmd->rts_retry_limit = rts_retry_limit;
710
711 tx_cmd->rate = rate;
712 tx_cmd->tx_flags = tx_flags;
713
714 /* OFDM */
715 tx_cmd->supp_rates[0] =
716 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
717
718 /* CCK */
719 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
720
721 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
722 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
723 tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
724 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
725}
726
727static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
728{
729 unsigned long flags_spin;
730 struct iwl_station_entry *station;
731
732 if (sta_id == IWL_INVALID_STATION)
733 return IWL_INVALID_STATION;
734
735 spin_lock_irqsave(&priv->sta_lock, flags_spin);
736 station = &priv->stations[sta_id];
737
738 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
739 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
740 station->sta.mode = STA_CONTROL_MODIFY_MSK;
741 iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
742 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
743
744 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
745 sta_id, tx_rate);
746 return sta_id;
747}
748
749static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
750{
751/*
752 * (for documentation purposes)
753 * to set power to V_AUX, do
754
755 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
756 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
757 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
758 ~APMG_PS_CTRL_MSK_PWR_SRC);
759
760 iwl_poll_bit(priv, CSR_GPIO_IN,
761 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
762 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
763 }
764 */
765
766 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
767 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
768 ~APMG_PS_CTRL_MSK_PWR_SRC);
769
770 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
771 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
772}
773
774static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
775{
776 iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
777 iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
778 rxq->rb_stts_dma);
779 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
780 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
781 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
782 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
783 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
784 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
785 (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
786 FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
787 (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
788 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
789
790 /* fake read to flush all prev I/O */
791 iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
792
793 return 0;
794}
795
796static int iwl3945_tx_reset(struct iwl_priv *priv)
797{
798
799 /* bypass mode */
800 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
801
802 /* RA 0 is active */
803 iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
804
805 /* all 6 fifo are active */
806 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
807
808 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
809 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
810 iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
811 iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
812
813 iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
814 priv->_3945.shared_phys);
815
816 iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
817 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
818 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
819 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
820 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
821 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
822 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
823 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
824
825
826 return 0;
827}
828
829/**
830 * iwl3945_txq_ctx_reset - Reset TX queue context
831 *
832 * Destroys all DMA structures and initialize them again
833 */
834static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
835{
836 int rc;
837 int txq_id, slots_num;
838
839 iwl3945_hw_txq_ctx_free(priv);
840
841 /* allocate tx queue structure */
842 rc = iwl_legacy_alloc_txq_mem(priv);
843 if (rc)
844 return rc;
845
846 /* Tx CMD queue */
847 rc = iwl3945_tx_reset(priv);
848 if (rc)
849 goto error;
850
851 /* Tx queue(s) */
852 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
853 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
854 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
855 rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
856 slots_num, txq_id);
857 if (rc) {
858 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
859 goto error;
860 }
861 }
862
863 return rc;
864
865 error:
866 iwl3945_hw_txq_ctx_free(priv);
867 return rc;
868}
869
870
871/*
872 * Start up 3945's basic functionality after it has been reset
873 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
874 * NOTE: This does not load uCode nor start the embedded processor
875 */
876static int iwl3945_apm_init(struct iwl_priv *priv)
877{
878 int ret = iwl_legacy_apm_init(priv);
879
880 /* Clear APMG (NIC's internal power management) interrupts */
881 iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
882 iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
883
884 /* Reset radio chip */
885 iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
886 APMG_PS_CTRL_VAL_RESET_REQ);
887 udelay(5);
888 iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
889 APMG_PS_CTRL_VAL_RESET_REQ);
890
891 return ret;
892}
893
894static void iwl3945_nic_config(struct iwl_priv *priv)
895{
896 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
897 unsigned long flags;
898 u8 rev_id = priv->pci_dev->revision;
899
900 spin_lock_irqsave(&priv->lock, flags);
901
902 /* Determine HW type */
903 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
904
905 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
906 IWL_DEBUG_INFO(priv, "RTP type\n");
907 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
908 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
909 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
910 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
911 } else {
912 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
913 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
914 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
915 }
916
917 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
918 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
919 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
920 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
921 } else
922 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
923
924 if ((eeprom->board_revision & 0xF0) == 0xD0) {
925 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
926 eeprom->board_revision);
927 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
928 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
929 } else {
930 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
931 eeprom->board_revision);
932 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
933 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
934 }
935
936 if (eeprom->almgor_m_version <= 1) {
937 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
938 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
939 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
940 eeprom->almgor_m_version);
941 } else {
942 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
943 eeprom->almgor_m_version);
944 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
945 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
946 }
947 spin_unlock_irqrestore(&priv->lock, flags);
948
949 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
950 IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
951
952 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
953 IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
954}
955
956int iwl3945_hw_nic_init(struct iwl_priv *priv)
957{
958 int rc;
959 unsigned long flags;
960 struct iwl_rx_queue *rxq = &priv->rxq;
961
962 spin_lock_irqsave(&priv->lock, flags);
963 priv->cfg->ops->lib->apm_ops.init(priv);
964 spin_unlock_irqrestore(&priv->lock, flags);
965
966 iwl3945_set_pwr_vmain(priv);
967
968 priv->cfg->ops->lib->apm_ops.config(priv);
969
970 /* Allocate the RX queue, or reset if it is already allocated */
971 if (!rxq->bd) {
972 rc = iwl_legacy_rx_queue_alloc(priv);
973 if (rc) {
974 IWL_ERR(priv, "Unable to initialize Rx queue\n");
975 return -ENOMEM;
976 }
977 } else
978 iwl3945_rx_queue_reset(priv, rxq);
979
980 iwl3945_rx_replenish(priv);
981
982 iwl3945_rx_init(priv, rxq);
983
984
985 /* Look at using this instead:
986 rxq->need_update = 1;
987 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
988 */
989
990 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
991
992 rc = iwl3945_txq_ctx_reset(priv);
993 if (rc)
994 return rc;
995
996 set_bit(STATUS_INIT, &priv->status);
997
998 return 0;
999}
1000
1001/**
1002 * iwl3945_hw_txq_ctx_free - Free TXQ Context
1003 *
1004 * Destroy all TX DMA queues and structures
1005 */
1006void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1007{
1008 int txq_id;
1009
1010 /* Tx queues */
1011 if (priv->txq)
1012 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1013 txq_id++)
1014 if (txq_id == IWL39_CMD_QUEUE_NUM)
1015 iwl_legacy_cmd_queue_free(priv);
1016 else
1017 iwl_legacy_tx_queue_free(priv, txq_id);
1018
1019 /* free tx queue structure */
1020 iwl_legacy_txq_mem(priv);
1021}
1022
1023void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1024{
1025 int txq_id;
1026
1027 /* stop SCD */
1028 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
1029 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1030
1031 /* reset TFD queues */
1032 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
1033 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1034 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1035 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1036 1000);
1037 }
1038
1039 iwl3945_hw_txq_ctx_free(priv);
1040}
1041
1042/**
1043 * iwl3945_hw_reg_adjust_power_by_temp
1044 * return index delta into power gain settings table
1045*/
1046static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1047{
1048 return (new_reading - old_reading) * (-11) / 100;
1049}
1050
1051/**
1052 * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1053 */
1054static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
1055{
1056 return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
1057}
1058
1059int iwl3945_hw_get_temperature(struct iwl_priv *priv)
1060{
1061 return iwl_read32(priv, CSR_UCODE_DRV_GP2);
1062}
1063
1064/**
1065 * iwl3945_hw_reg_txpower_get_temperature
1066 * get the current temperature by reading from NIC
1067*/
1068static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1069{
1070 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1071 int temperature;
1072
1073 temperature = iwl3945_hw_get_temperature(priv);
1074
1075 /* driver's okay range is -260 to +25.
1076 * human readable okay range is 0 to +285 */
1077 IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
1078
1079 /* handle insane temp reading */
1080 if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
1081 IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
1082
1083 /* if really really hot(?),
1084 * substitute the 3rd band/group's temp measured at factory */
1085 if (priv->last_temperature > 100)
1086 temperature = eeprom->groups[2].temperature;
1087 else /* else use most recent "sane" value from driver */
1088 temperature = priv->last_temperature;
1089 }
1090
1091 return temperature; /* raw, not "human readable" */
1092}
1093
1094/* Adjust Txpower only if temperature variance is greater than threshold.
1095 *
1096 * Both are lower than older versions' 9 degrees */
1097#define IWL_TEMPERATURE_LIMIT_TIMER 6
1098
1099/**
1100 * iwl3945_is_temp_calib_needed - determines if new calibration is needed
1101 *
1102 * records new temperature in tx_mgr->temperature.
1103 * replaces tx_mgr->last_temperature *only* if calib needed
1104 * (assumes caller will actually do the calibration!). */
1105static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
1106{
1107 int temp_diff;
1108
1109 priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
1110 temp_diff = priv->temperature - priv->last_temperature;
1111
1112 /* get absolute value */
1113 if (temp_diff < 0) {
1114 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
1115 temp_diff = -temp_diff;
1116 } else if (temp_diff == 0)
1117 IWL_DEBUG_POWER(priv, "Same temp,\n");
1118 else
1119 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
1120
1121 /* if we don't need calibration, *don't* update last_temperature */
1122 if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
1123 IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
1124 return 0;
1125 }
1126
1127 IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
1128
1129 /* assume that caller will actually do calib ...
1130 * update the "last temperature" value */
1131 priv->last_temperature = priv->temperature;
1132 return 1;
1133}
1134
1135#define IWL_MAX_GAIN_ENTRIES 78
1136#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
1137#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
1138
1139/* radio and DSP power table, each step is 1/2 dB.
1140 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1141static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
1142 {
1143 {251, 127}, /* 2.4 GHz, highest power */
1144 {251, 127},
1145 {251, 127},
1146 {251, 127},
1147 {251, 125},
1148 {251, 110},
1149 {251, 105},
1150 {251, 98},
1151 {187, 125},
1152 {187, 115},
1153 {187, 108},
1154 {187, 99},
1155 {243, 119},
1156 {243, 111},
1157 {243, 105},
1158 {243, 97},
1159 {243, 92},
1160 {211, 106},
1161 {211, 100},
1162 {179, 120},
1163 {179, 113},
1164 {179, 107},
1165 {147, 125},
1166 {147, 119},
1167 {147, 112},
1168 {147, 106},
1169 {147, 101},
1170 {147, 97},
1171 {147, 91},
1172 {115, 107},
1173 {235, 121},
1174 {235, 115},
1175 {235, 109},
1176 {203, 127},
1177 {203, 121},
1178 {203, 115},
1179 {203, 108},
1180 {203, 102},
1181 {203, 96},
1182 {203, 92},
1183 {171, 110},
1184 {171, 104},
1185 {171, 98},
1186 {139, 116},
1187 {227, 125},
1188 {227, 119},
1189 {227, 113},
1190 {227, 107},
1191 {227, 101},
1192 {227, 96},
1193 {195, 113},
1194 {195, 106},
1195 {195, 102},
1196 {195, 95},
1197 {163, 113},
1198 {163, 106},
1199 {163, 102},
1200 {163, 95},
1201 {131, 113},
1202 {131, 106},
1203 {131, 102},
1204 {131, 95},
1205 {99, 113},
1206 {99, 106},
1207 {99, 102},
1208 {99, 95},
1209 {67, 113},
1210 {67, 106},
1211 {67, 102},
1212 {67, 95},
1213 {35, 113},
1214 {35, 106},
1215 {35, 102},
1216 {35, 95},
1217 {3, 113},
1218 {3, 106},
1219 {3, 102},
1220 {3, 95} }, /* 2.4 GHz, lowest power */
1221 {
1222 {251, 127}, /* 5.x GHz, highest power */
1223 {251, 120},
1224 {251, 114},
1225 {219, 119},
1226 {219, 101},
1227 {187, 113},
1228 {187, 102},
1229 {155, 114},
1230 {155, 103},
1231 {123, 117},
1232 {123, 107},
1233 {123, 99},
1234 {123, 92},
1235 {91, 108},
1236 {59, 125},
1237 {59, 118},
1238 {59, 109},
1239 {59, 102},
1240 {59, 96},
1241 {59, 90},
1242 {27, 104},
1243 {27, 98},
1244 {27, 92},
1245 {115, 118},
1246 {115, 111},
1247 {115, 104},
1248 {83, 126},
1249 {83, 121},
1250 {83, 113},
1251 {83, 105},
1252 {83, 99},
1253 {51, 118},
1254 {51, 111},
1255 {51, 104},
1256 {51, 98},
1257 {19, 116},
1258 {19, 109},
1259 {19, 102},
1260 {19, 98},
1261 {19, 93},
1262 {171, 113},
1263 {171, 107},
1264 {171, 99},
1265 {139, 120},
1266 {139, 113},
1267 {139, 107},
1268 {139, 99},
1269 {107, 120},
1270 {107, 113},
1271 {107, 107},
1272 {107, 99},
1273 {75, 120},
1274 {75, 113},
1275 {75, 107},
1276 {75, 99},
1277 {43, 120},
1278 {43, 113},
1279 {43, 107},
1280 {43, 99},
1281 {11, 120},
1282 {11, 113},
1283 {11, 107},
1284 {11, 99},
1285 {131, 107},
1286 {131, 99},
1287 {99, 120},
1288 {99, 113},
1289 {99, 107},
1290 {99, 99},
1291 {67, 120},
1292 {67, 113},
1293 {67, 107},
1294 {67, 99},
1295 {35, 120},
1296 {35, 113},
1297 {35, 107},
1298 {35, 99},
1299 {3, 120} } /* 5.x GHz, lowest power */
1300};
1301
1302static inline u8 iwl3945_hw_reg_fix_power_index(int index)
1303{
1304 if (index < 0)
1305 return 0;
1306 if (index >= IWL_MAX_GAIN_ENTRIES)
1307 return IWL_MAX_GAIN_ENTRIES - 1;
1308 return (u8) index;
1309}
1310
1311/* Kick off thermal recalibration check every 60 seconds */
1312#define REG_RECALIB_PERIOD (60)
1313
1314/**
1315 * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1316 *
1317 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1318 * or 6 Mbit (OFDM) rates.
1319 */
1320static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
1321 s32 rate_index, const s8 *clip_pwrs,
1322 struct iwl_channel_info *ch_info,
1323 int band_index)
1324{
1325 struct iwl3945_scan_power_info *scan_power_info;
1326 s8 power;
1327 u8 power_index;
1328
1329 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
1330
1331 /* use this channel group's 6Mbit clipping/saturation pwr,
1332 * but cap at regulatory scan power restriction (set during init
1333 * based on eeprom channel data) for this channel. */
1334 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
1335
1336 power = min(power, priv->tx_power_user_lmt);
1337 scan_power_info->requested_power = power;
1338
1339 /* find difference between new scan *power* and current "normal"
1340 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1341 * current "normal" temperature-compensated Tx power *index* for
1342 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1343 * *index*. */
1344 power_index = ch_info->power_info[rate_index].power_table_index
1345 - (power - ch_info->power_info
1346 [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
1347
1348 /* store reference index that we use when adjusting *all* scan
1349 * powers. So we can accommodate user (all channel) or spectrum
1350 * management (single channel) power changes "between" temperature
1351 * feedback compensation procedures.
1352 * don't force fit this reference index into gain table; it may be a
1353 * negative number. This will help avoid errors when we're at
1354 * the lower bounds (highest gains, for warmest temperatures)
1355 * of the table. */
1356
1357 /* don't exceed table bounds for "real" setting */
1358 power_index = iwl3945_hw_reg_fix_power_index(power_index);
1359
1360 scan_power_info->power_table_index = power_index;
1361 scan_power_info->tpc.tx_gain =
1362 power_gain_table[band_index][power_index].tx_gain;
1363 scan_power_info->tpc.dsp_atten =
1364 power_gain_table[band_index][power_index].dsp_atten;
1365}
1366
1367/**
1368 * iwl3945_send_tx_power - fill in Tx Power command with gain settings
1369 *
1370 * Configures power settings for all rates for the current channel,
1371 * using values from channel info struct, and send to NIC
1372 */
1373static int iwl3945_send_tx_power(struct iwl_priv *priv)
1374{
1375 int rate_idx, i;
1376 const struct iwl_channel_info *ch_info = NULL;
1377 struct iwl3945_txpowertable_cmd txpower = {
1378 .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
1379 };
1380 u16 chan;
1381
1382 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1383 "TX Power requested while scanning!\n"))
1384 return -EAGAIN;
1385
1386 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1387
1388 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1389 ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
1390 if (!ch_info) {
1391 IWL_ERR(priv,
1392 "Failed to get channel info for channel %d [%d]\n",
1393 chan, priv->band);
1394 return -EINVAL;
1395 }
1396
1397 if (!iwl_legacy_is_channel_valid(ch_info)) {
1398 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1399 "non-Tx channel.\n");
1400 return 0;
1401 }
1402
1403 /* fill cmd with power settings for all rates for current channel */
1404 /* Fill OFDM rate */
1405 for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
1406 rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
1407
1408 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1409 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1410
1411 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1412 le16_to_cpu(txpower.channel),
1413 txpower.band,
1414 txpower.power[i].tpc.tx_gain,
1415 txpower.power[i].tpc.dsp_atten,
1416 txpower.power[i].rate);
1417 }
1418 /* Fill CCK rates */
1419 for (rate_idx = IWL_FIRST_CCK_RATE;
1420 rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
1421 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1422 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1423
1424 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1425 le16_to_cpu(txpower.channel),
1426 txpower.band,
1427 txpower.power[i].tpc.tx_gain,
1428 txpower.power[i].tpc.dsp_atten,
1429 txpower.power[i].rate);
1430 }
1431
1432 return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1433 sizeof(struct iwl3945_txpowertable_cmd),
1434 &txpower);
1435
1436}
1437
1438/**
1439 * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
1440 * @ch_info: Channel to update. Uses power_info.requested_power.
1441 *
1442 * Replace requested_power and base_power_index ch_info fields for
1443 * one channel.
1444 *
1445 * Called if user or spectrum management changes power preferences.
1446 * Takes into account h/w and modulation limitations (clip power).
1447 *
1448 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1449 *
1450 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1451 * properly fill out the scan powers, and actual h/w gain settings,
1452 * and send changes to NIC
1453 */
1454static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
1455 struct iwl_channel_info *ch_info)
1456{
1457 struct iwl3945_channel_power_info *power_info;
1458 int power_changed = 0;
1459 int i;
1460 const s8 *clip_pwrs;
1461 int power;
1462
1463 /* Get this chnlgrp's rate-to-max/clip-powers table */
1464 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1465
1466 /* Get this channel's rate-to-current-power settings table */
1467 power_info = ch_info->power_info;
1468
1469 /* update OFDM Txpower settings */
1470 for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
1471 i++, ++power_info) {
1472 int delta_idx;
1473
1474 /* limit new power to be no more than h/w capability */
1475 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1476 if (power == power_info->requested_power)
1477 continue;
1478
1479 /* find difference between old and new requested powers,
1480 * update base (non-temp-compensated) power index */
1481 delta_idx = (power - power_info->requested_power) * 2;
1482 power_info->base_power_index -= delta_idx;
1483
1484 /* save new requested power value */
1485 power_info->requested_power = power;
1486
1487 power_changed = 1;
1488 }
1489
1490 /* update CCK Txpower settings, based on OFDM 12M setting ...
1491 * ... all CCK power settings for a given channel are the *same*. */
1492 if (power_changed) {
1493 power =
1494 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1495 requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
1496
1497 /* do all CCK rates' iwl3945_channel_power_info structures */
1498 for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
1499 power_info->requested_power = power;
1500 power_info->base_power_index =
1501 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1502 base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
1503 ++power_info;
1504 }
1505 }
1506
1507 return 0;
1508}
1509
1510/**
1511 * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1512 *
1513 * NOTE: Returned power limit may be less (but not more) than requested,
1514 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1515 * (no consideration for h/w clipping limitations).
1516 */
1517static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
1518{
1519 s8 max_power;
1520
1521#if 0
1522 /* if we're using TGd limits, use lower of TGd or EEPROM */
1523 if (ch_info->tgd_data.max_power != 0)
1524 max_power = min(ch_info->tgd_data.max_power,
1525 ch_info->eeprom.max_power_avg);
1526
1527 /* else just use EEPROM limits */
1528 else
1529#endif
1530 max_power = ch_info->eeprom.max_power_avg;
1531
1532 return min(max_power, ch_info->max_power_avg);
1533}
1534
1535/**
1536 * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
1537 *
1538 * Compensate txpower settings of *all* channels for temperature.
1539 * This only accounts for the difference between current temperature
1540 * and the factory calibration temperatures, and bases the new settings
1541 * on the channel's base_power_index.
1542 *
1543 * If RxOn is "associated", this sends the new Txpower to NIC!
1544 */
1545static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1546{
1547 struct iwl_channel_info *ch_info = NULL;
1548 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1549 int delta_index;
1550 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1551 u8 a_band;
1552 u8 rate_index;
1553 u8 scan_tbl_index;
1554 u8 i;
1555 int ref_temp;
1556 int temperature = priv->temperature;
1557
1558 if (priv->disable_tx_power_cal ||
1559 test_bit(STATUS_SCANNING, &priv->status)) {
1560 /* do not perform tx power calibration */
1561 return 0;
1562 }
1563 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1564 for (i = 0; i < priv->channel_count; i++) {
1565 ch_info = &priv->channel_info[i];
1566 a_band = iwl_legacy_is_channel_a_band(ch_info);
1567
1568 /* Get this chnlgrp's factory calibration temperature */
1569 ref_temp = (s16)eeprom->groups[ch_info->group_index].
1570 temperature;
1571
1572 /* get power index adjustment based on current and factory
1573 * temps */
1574 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
1575 ref_temp);
1576
1577 /* set tx power value for all rates, OFDM and CCK */
1578 for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
1579 rate_index++) {
1580 int power_idx =
1581 ch_info->power_info[rate_index].base_power_index;
1582
1583 /* temperature compensate */
1584 power_idx += delta_index;
1585
1586 /* stay within table range */
1587 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
1588 ch_info->power_info[rate_index].
1589 power_table_index = (u8) power_idx;
1590 ch_info->power_info[rate_index].tpc =
1591 power_gain_table[a_band][power_idx];
1592 }
1593
1594 /* Get this chnlgrp's rate-to-max/clip-powers table */
1595 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1596
1597 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1598 for (scan_tbl_index = 0;
1599 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
1600 s32 actual_index = (scan_tbl_index == 0) ?
1601 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
1602 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
1603 actual_index, clip_pwrs,
1604 ch_info, a_band);
1605 }
1606 }
1607
1608 /* send Txpower command for current channel to ucode */
1609 return priv->cfg->ops->lib->send_tx_power(priv);
1610}
1611
1612int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1613{
1614 struct iwl_channel_info *ch_info;
1615 s8 max_power;
1616 u8 a_band;
1617 u8 i;
1618
1619 if (priv->tx_power_user_lmt == power) {
1620 IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
1621 "limit: %ddBm.\n", power);
1622 return 0;
1623 }
1624
1625 IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
1626 priv->tx_power_user_lmt = power;
1627
1628 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1629
1630 for (i = 0; i < priv->channel_count; i++) {
1631 ch_info = &priv->channel_info[i];
1632 a_band = iwl_legacy_is_channel_a_band(ch_info);
1633
1634 /* find minimum power of all user and regulatory constraints
1635 * (does not consider h/w clipping limitations) */
1636 max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
1637 max_power = min(power, max_power);
1638 if (max_power != ch_info->curr_txpow) {
1639 ch_info->curr_txpow = max_power;
1640
1641 /* this considers the h/w clipping limitations */
1642 iwl3945_hw_reg_set_new_power(priv, ch_info);
1643 }
1644 }
1645
1646 /* update txpower settings for all channels,
1647 * send to NIC if associated. */
1648 iwl3945_is_temp_calib_needed(priv);
1649 iwl3945_hw_reg_comp_txpower_temp(priv);
1650
1651 return 0;
1652}
1653
1654static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1655 struct iwl_rxon_context *ctx)
1656{
1657 int rc = 0;
1658 struct iwl_rx_packet *pkt;
1659 struct iwl3945_rxon_assoc_cmd rxon_assoc;
1660 struct iwl_host_cmd cmd = {
1661 .id = REPLY_RXON_ASSOC,
1662 .len = sizeof(rxon_assoc),
1663 .flags = CMD_WANT_SKB,
1664 .data = &rxon_assoc,
1665 };
1666 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1667 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1668
1669 if ((rxon1->flags == rxon2->flags) &&
1670 (rxon1->filter_flags == rxon2->filter_flags) &&
1671 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1672 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1673 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1674 return 0;
1675 }
1676
1677 rxon_assoc.flags = ctx->staging.flags;
1678 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1679 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1680 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1681 rxon_assoc.reserved = 0;
1682
1683 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
1684 if (rc)
1685 return rc;
1686
1687 pkt = (struct iwl_rx_packet *)cmd.reply_page;
1688 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1689 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1690 rc = -EIO;
1691 }
1692
1693 iwl_legacy_free_pages(priv, cmd.reply_page);
1694
1695 return rc;
1696}
1697
1698/**
1699 * iwl3945_commit_rxon - commit staging_rxon to hardware
1700 *
1701 * The RXON command in staging_rxon is committed to the hardware and
1702 * the active_rxon structure is updated with the new data. This
1703 * function correctly transitions out of the RXON_ASSOC_MSK state if
1704 * a HW tune is required based on the RXON structure changes.
1705 */
1706int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1707{
1708 /* cast away the const for active_rxon in this function */
1709 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1710 struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1711 int rc = 0;
1712 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1713
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return -EINVAL;
1716
1717 if (!iwl_legacy_is_alive(priv))
1718 return -1;
1719
1720 /* always get timestamp with Rx frame */
1721 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1722
1723 /* select antenna */
1724 staging_rxon->flags &=
1725 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1726 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1727
1728 rc = iwl_legacy_check_rxon_cmd(priv, ctx);
1729 if (rc) {
1730 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1731 return -EINVAL;
1732 }
1733
1734 /* If we don't need to send a full RXON, we can use
1735 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1736 * and other flags for the current radio configuration. */
1737 if (!iwl_legacy_full_rxon_required(priv,
1738 &priv->contexts[IWL_RXON_CTX_BSS])) {
1739 rc = iwl_legacy_send_rxon_assoc(priv,
1740 &priv->contexts[IWL_RXON_CTX_BSS]);
1741 if (rc) {
1742 IWL_ERR(priv, "Error setting RXON_ASSOC "
1743 "configuration (%d).\n", rc);
1744 return rc;
1745 }
1746
1747 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1748 /*
1749 * We do not commit tx power settings while channel changing,
1750 * do it now if tx power changed.
1751 */
1752 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1753 return 0;
1754 }
1755
1756 /* If we are currently associated and the new config requires
1757 * an RXON_ASSOC and the new config wants the associated mask enabled,
1758 * we must clear the associated from the active configuration
1759 * before we apply the new config */
1760 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1761 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1762 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1763
1764 /*
1765 * reserved4 and 5 could have been filled by the iwlcore code.
1766 * Let's clear them before pushing to the 3945.
1767 */
1768 active_rxon->reserved4 = 0;
1769 active_rxon->reserved5 = 0;
1770 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1771 sizeof(struct iwl3945_rxon_cmd),
1772 &priv->contexts[IWL_RXON_CTX_BSS].active);
1773
1774 /* If the mask clearing failed then we set
1775 * active_rxon back to what it was previously */
1776 if (rc) {
1777 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1778 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
1779 "configuration (%d).\n", rc);
1780 return rc;
1781 }
1782 iwl_legacy_clear_ucode_stations(priv,
1783 &priv->contexts[IWL_RXON_CTX_BSS]);
1784 iwl_legacy_restore_stations(priv,
1785 &priv->contexts[IWL_RXON_CTX_BSS]);
1786 }
1787
1788 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1789 "* with%s RXON_FILTER_ASSOC_MSK\n"
1790 "* channel = %d\n"
1791 "* bssid = %pM\n",
1792 (new_assoc ? "" : "out"),
1793 le16_to_cpu(staging_rxon->channel),
1794 staging_rxon->bssid_addr);
1795
1796 /*
1797 * reserved4 and 5 could have been filled by the iwlcore code.
1798 * Let's clear them before pushing to the 3945.
1799 */
1800 staging_rxon->reserved4 = 0;
1801 staging_rxon->reserved5 = 0;
1802
1803 iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1804
1805 /* Apply the new configuration */
1806 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1807 sizeof(struct iwl3945_rxon_cmd),
1808 staging_rxon);
1809 if (rc) {
1810 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
1811 return rc;
1812 }
1813
1814 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1815
1816 if (!new_assoc) {
1817 iwl_legacy_clear_ucode_stations(priv,
1818 &priv->contexts[IWL_RXON_CTX_BSS]);
1819 iwl_legacy_restore_stations(priv,
1820 &priv->contexts[IWL_RXON_CTX_BSS]);
1821 }
1822
1823 /* If we issue a new RXON command which required a tune then we must
1824 * send a new TXPOWER command or we won't be able to Tx any frames */
1825 rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1826 if (rc) {
1827 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1828 return rc;
1829 }
1830
1831 /* Init the hardware's rate fallback order based on the band */
1832 rc = iwl3945_init_hw_rate_table(priv);
1833 if (rc) {
1834 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
1835 return -EIO;
1836 }
1837
1838 return 0;
1839}
1840
1841/**
1842 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
1843 *
1844 * -- reset periodic timer
1845 * -- see if temp has changed enough to warrant re-calibration ... if so:
1846 * -- correct coeffs for temp (can reset temp timer)
1847 * -- save this temp as "last",
1848 * -- send new set of gain settings to NIC
1849 * NOTE: This should continue working, even when we're not associated,
1850 * so we can keep our internal table of scan powers current. */
1851void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1852{
1853 /* This will kick in the "brute force"
1854 * iwl3945_hw_reg_comp_txpower_temp() below */
1855 if (!iwl3945_is_temp_calib_needed(priv))
1856 goto reschedule;
1857
1858 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1859 * This is based *only* on current temperature,
1860 * ignoring any previous power measurements */
1861 iwl3945_hw_reg_comp_txpower_temp(priv);
1862
1863 reschedule:
1864 queue_delayed_work(priv->workqueue,
1865 &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
1866}
1867
1868static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
1869{
1870 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1871 _3945.thermal_periodic.work);
1872
1873 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1874 return;
1875
1876 mutex_lock(&priv->mutex);
1877 iwl3945_reg_txpower_periodic(priv);
1878 mutex_unlock(&priv->mutex);
1879}
1880
1881/**
1882 * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
1883 * for the channel.
1884 *
1885 * This function is used when initializing channel-info structs.
1886 *
1887 * NOTE: These channel groups do *NOT* match the bands above!
1888 * These channel groups are based on factory-tested channels;
1889 * on A-band, EEPROM's "group frequency" entries represent the top
1890 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1891 */
1892static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1893 const struct iwl_channel_info *ch_info)
1894{
1895 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1896 struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1897 u8 group;
1898 u16 group_index = 0; /* based on factory calib frequencies */
1899 u8 grp_channel;
1900
1901 /* Find the group index for the channel ... don't use index 1(?) */
1902 if (iwl_legacy_is_channel_a_band(ch_info)) {
1903 for (group = 1; group < 5; group++) {
1904 grp_channel = ch_grp[group].group_channel;
1905 if (ch_info->channel <= grp_channel) {
1906 group_index = group;
1907 break;
1908 }
1909 }
1910 /* group 4 has a few channels *above* its factory cal freq */
1911 if (group == 5)
1912 group_index = 4;
1913 } else
1914 group_index = 0; /* 2.4 GHz, group 0 */
1915
1916 IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
1917 group_index);
1918 return group_index;
1919}
1920
1921/**
1922 * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
1923 *
1924 * Interpolate to get nominal (i.e. at factory calibration temperature) index
1925 * into radio/DSP gain settings table for requested power.
1926 */
1927static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
1928 s8 requested_power,
1929 s32 setting_index, s32 *new_index)
1930{
1931 const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
1932 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1933 s32 index0, index1;
1934 s32 power = 2 * requested_power;
1935 s32 i;
1936 const struct iwl3945_eeprom_txpower_sample *samples;
1937 s32 gains0, gains1;
1938 s32 res;
1939 s32 denominator;
1940
1941 chnl_grp = &eeprom->groups[setting_index];
1942 samples = chnl_grp->samples;
1943 for (i = 0; i < 5; i++) {
1944 if (power == samples[i].power) {
1945 *new_index = samples[i].gain_index;
1946 return 0;
1947 }
1948 }
1949
1950 if (power > samples[1].power) {
1951 index0 = 0;
1952 index1 = 1;
1953 } else if (power > samples[2].power) {
1954 index0 = 1;
1955 index1 = 2;
1956 } else if (power > samples[3].power) {
1957 index0 = 2;
1958 index1 = 3;
1959 } else {
1960 index0 = 3;
1961 index1 = 4;
1962 }
1963
1964 denominator = (s32) samples[index1].power - (s32) samples[index0].power;
1965 if (denominator == 0)
1966 return -EINVAL;
1967 gains0 = (s32) samples[index0].gain_index * (1 << 19);
1968 gains1 = (s32) samples[index1].gain_index * (1 << 19);
1969 res = gains0 + (gains1 - gains0) *
1970 ((s32) power - (s32) samples[index0].power) / denominator +
1971 (1 << 18);
1972 *new_index = res >> 19;
1973 return 0;
1974}
1975
1976static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
1977{
1978 u32 i;
1979 s32 rate_index;
1980 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1981 const struct iwl3945_eeprom_txpower_group *group;
1982
1983 IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
1984
1985 for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
1986 s8 *clip_pwrs; /* table of power levels for each rate */
1987 s8 satur_pwr; /* saturation power for each chnl group */
1988 group = &eeprom->groups[i];
1989
1990 /* sanity check on factory saturation power value */
1991 if (group->saturation_power < 40) {
1992 IWL_WARN(priv, "Error: saturation power is %d, "
1993 "less than minimum expected 40\n",
1994 group->saturation_power);
1995 return;
1996 }
1997
1998 /*
1999 * Derive requested power levels for each rate, based on
2000 * hardware capabilities (saturation power for band).
2001 * Basic value is 3dB down from saturation, with further
2002 * power reductions for highest 3 data rates. These
2003 * backoffs provide headroom for high rate modulation
2004 * power peaks, without too much distortion (clipping).
2005 */
2006 /* we'll fill in this array with h/w max power levels */
2007 clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
2008
2009 /* divide factory saturation power by 2 to find -3dB level */
2010 satur_pwr = (s8) (group->saturation_power >> 1);
2011
2012 /* fill in channel group's nominal powers for each rate */
2013 for (rate_index = 0;
2014 rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
2015 switch (rate_index) {
2016 case IWL_RATE_36M_INDEX_TABLE:
2017 if (i == 0) /* B/G */
2018 *clip_pwrs = satur_pwr;
2019 else /* A */
2020 *clip_pwrs = satur_pwr - 5;
2021 break;
2022 case IWL_RATE_48M_INDEX_TABLE:
2023 if (i == 0)
2024 *clip_pwrs = satur_pwr - 7;
2025 else
2026 *clip_pwrs = satur_pwr - 10;
2027 break;
2028 case IWL_RATE_54M_INDEX_TABLE:
2029 if (i == 0)
2030 *clip_pwrs = satur_pwr - 9;
2031 else
2032 *clip_pwrs = satur_pwr - 12;
2033 break;
2034 default:
2035 *clip_pwrs = satur_pwr;
2036 break;
2037 }
2038 }
2039 }
2040}
2041
2042/**
2043 * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2044 *
2045 * Second pass (during init) to set up priv->channel_info
2046 *
2047 * Set up Tx-power settings in our channel info database for each VALID
2048 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2049 * and current temperature.
2050 *
2051 * Since this is based on current temperature (at init time), these values may
2052 * not be valid for very long, but it gives us a starting/default point,
2053 * and allows us to active (i.e. using Tx) scan.
2054 *
2055 * This does *not* write values to NIC, just sets up our internal table.
2056 */
2057int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2058{
2059 struct iwl_channel_info *ch_info = NULL;
2060 struct iwl3945_channel_power_info *pwr_info;
2061 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2062 int delta_index;
2063 u8 rate_index;
2064 u8 scan_tbl_index;
2065 const s8 *clip_pwrs; /* array of power levels for each rate */
2066 u8 gain, dsp_atten;
2067 s8 power;
2068 u8 pwr_index, base_pwr_index, a_band;
2069 u8 i;
2070 int temperature;
2071
2072 /* save temperature reference,
2073 * so we can determine next time to calibrate */
2074 temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
2075 priv->last_temperature = temperature;
2076
2077 iwl3945_hw_reg_init_channel_groups(priv);
2078
2079 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2080 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
2081 i++, ch_info++) {
2082 a_band = iwl_legacy_is_channel_a_band(ch_info);
2083 if (!iwl_legacy_is_channel_valid(ch_info))
2084 continue;
2085
2086 /* find this channel's channel group (*not* "band") index */
2087 ch_info->group_index =
2088 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
2089
2090 /* Get this chnlgrp's rate->max/clip-powers table */
2091 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
2092
2093 /* calculate power index *adjustment* value according to
2094 * diff between current temperature and factory temperature */
2095 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
2096 eeprom->groups[ch_info->group_index].
2097 temperature);
2098
2099 IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
2100 ch_info->channel, delta_index, temperature +
2101 IWL_TEMP_CONVERT);
2102
2103 /* set tx power value for all OFDM rates */
2104 for (rate_index = 0; rate_index < IWL_OFDM_RATES;
2105 rate_index++) {
2106 s32 uninitialized_var(power_idx);
2107 int rc;
2108
2109 /* use channel group's clip-power table,
2110 * but don't exceed channel's max power */
2111 s8 pwr = min(ch_info->max_power_avg,
2112 clip_pwrs[rate_index]);
2113
2114 pwr_info = &ch_info->power_info[rate_index];
2115
2116 /* get base (i.e. at factory-measured temperature)
2117 * power table index for this rate's power */
2118 rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
2119 ch_info->group_index,
2120 &power_idx);
2121 if (rc) {
2122 IWL_ERR(priv, "Invalid power index\n");
2123 return rc;
2124 }
2125 pwr_info->base_power_index = (u8) power_idx;
2126
2127 /* temperature compensate */
2128 power_idx += delta_index;
2129
2130 /* stay within range of gain table */
2131 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
2132
2133 /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
2134 pwr_info->requested_power = pwr;
2135 pwr_info->power_table_index = (u8) power_idx;
2136 pwr_info->tpc.tx_gain =
2137 power_gain_table[a_band][power_idx].tx_gain;
2138 pwr_info->tpc.dsp_atten =
2139 power_gain_table[a_band][power_idx].dsp_atten;
2140 }
2141
2142 /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
2143 pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
2144 power = pwr_info->requested_power +
2145 IWL_CCK_FROM_OFDM_POWER_DIFF;
2146 pwr_index = pwr_info->power_table_index +
2147 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2148 base_pwr_index = pwr_info->base_power_index +
2149 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2150
2151 /* stay within table range */
2152 pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
2153 gain = power_gain_table[a_band][pwr_index].tx_gain;
2154 dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
2155
2156 /* fill each CCK rate's iwl3945_channel_power_info structure
2157 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2158 * NOTE: CCK rates start at end of OFDM rates! */
2159 for (rate_index = 0;
2160 rate_index < IWL_CCK_RATES; rate_index++) {
2161 pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
2162 pwr_info->requested_power = power;
2163 pwr_info->power_table_index = pwr_index;
2164 pwr_info->base_power_index = base_pwr_index;
2165 pwr_info->tpc.tx_gain = gain;
2166 pwr_info->tpc.dsp_atten = dsp_atten;
2167 }
2168
2169 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2170 for (scan_tbl_index = 0;
2171 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
2172 s32 actual_index = (scan_tbl_index == 0) ?
2173 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
2174 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
2175 actual_index, clip_pwrs, ch_info, a_band);
2176 }
2177 }
2178
2179 return 0;
2180}
2181
2182int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2183{
2184 int rc;
2185
2186 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2187 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2188 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2189 if (rc < 0)
2190 IWL_ERR(priv, "Can't stop Rx DMA.\n");
2191
2192 return 0;
2193}
2194
2195int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2196{
2197 int txq_id = txq->q.id;
2198
2199 struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
2200
2201 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2202
2203 iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2204 iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2205
2206 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2207 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2208 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2209 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2210 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2211 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2212
2213 /* fake read to flush all prev. writes */
2214 iwl_read32(priv, FH39_TSSR_CBB_BASE);
2215
2216 return 0;
2217}
2218
2219/*
2220 * HCMD utils
2221 */
2222static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2223{
2224 switch (cmd_id) {
2225 case REPLY_RXON:
2226 return sizeof(struct iwl3945_rxon_cmd);
2227 case POWER_TABLE_CMD:
2228 return sizeof(struct iwl3945_powertable_cmd);
2229 default:
2230 return len;
2231 }
2232}
2233
2234
2235static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
2236 u8 *data)
2237{
2238 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2239 addsta->mode = cmd->mode;
2240 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2241 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
2242 addsta->station_flags = cmd->station_flags;
2243 addsta->station_flags_msk = cmd->station_flags_msk;
2244 addsta->tid_disable_tx = cpu_to_le16(0);
2245 addsta->rate_n_flags = cmd->rate_n_flags;
2246 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2247 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2248 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2249
2250 return (u16)sizeof(struct iwl3945_addsta_cmd);
2251}
2252
2253static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2254 const u8 *addr, u8 *sta_id_r)
2255{
2256 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2257 int ret;
2258 u8 sta_id;
2259 unsigned long flags;
2260
2261 if (sta_id_r)
2262 *sta_id_r = IWL_INVALID_STATION;
2263
2264 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2265 if (ret) {
2266 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2267 return ret;
2268 }
2269
2270 if (sta_id_r)
2271 *sta_id_r = sta_id;
2272
2273 spin_lock_irqsave(&priv->sta_lock, flags);
2274 priv->stations[sta_id].used |= IWL_STA_LOCAL;
2275 spin_unlock_irqrestore(&priv->sta_lock, flags);
2276
2277 return 0;
2278}
2279static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2280 struct ieee80211_vif *vif, bool add)
2281{
2282 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2283 int ret;
2284
2285 if (add) {
2286 ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
2287 &vif_priv->ibss_bssid_sta_id);
2288 if (ret)
2289 return ret;
2290
2291 iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
2292 (priv->band == IEEE80211_BAND_5GHZ) ?
2293 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
2294 iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
2295
2296 return 0;
2297 }
2298
2299 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2300 vif->bss_conf.bssid);
2301}
2302
2303/**
2304 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
2305 */
2306int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2307{
2308 int rc, i, index, prev_index;
2309 struct iwl3945_rate_scaling_cmd rate_cmd = {
2310 .reserved = {0, 0, 0},
2311 };
2312 struct iwl3945_rate_scaling_info *table = rate_cmd.table;
2313
2314 for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
2315 index = iwl3945_rates[i].table_rs_index;
2316
2317 table[index].rate_n_flags =
2318 iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
2319 table[index].try_cnt = priv->retry_rate;
2320 prev_index = iwl3945_get_prev_ieee_rate(i);
2321 table[index].next_rate_index =
2322 iwl3945_rates[prev_index].table_rs_index;
2323 }
2324
2325 switch (priv->band) {
2326 case IEEE80211_BAND_5GHZ:
2327 IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
2328 /* If one of the following CCK rates is used,
2329 * have it fall back to the 6M OFDM rate */
2330 for (i = IWL_RATE_1M_INDEX_TABLE;
2331 i <= IWL_RATE_11M_INDEX_TABLE; i++)
2332 table[i].next_rate_index =
2333 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2334
2335 /* Don't fall back to CCK rates */
2336 table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
2337 IWL_RATE_9M_INDEX_TABLE;
2338
2339 /* Don't drop out of OFDM rates */
2340 table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
2341 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2342 break;
2343
2344 case IEEE80211_BAND_2GHZ:
2345 IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
2346 /* If an OFDM rate is used, have it fall back to the
2347 * 1M CCK rates */
2348
2349 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2350 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2351
2352 index = IWL_FIRST_CCK_RATE;
2353 for (i = IWL_RATE_6M_INDEX_TABLE;
2354 i <= IWL_RATE_54M_INDEX_TABLE; i++)
2355 table[i].next_rate_index =
2356 iwl3945_rates[index].table_rs_index;
2357
2358 index = IWL_RATE_11M_INDEX_TABLE;
2359 /* CCK shouldn't fall back to OFDM... */
2360 table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
2361 }
2362 break;
2363
2364 default:
2365 WARN_ON(1);
2366 break;
2367 }
2368
2369 /* Update the rate scaling for control frame Tx */
2370 rate_cmd.table_id = 0;
2371 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2372 &rate_cmd);
2373 if (rc)
2374 return rc;
2375
2376 /* Update the rate scaling for data frame Tx */
2377 rate_cmd.table_id = 1;
2378 return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2379 &rate_cmd);
2380}
2381
2382/* Called when initializing driver */
2383int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2384{
2385 memset((void *)&priv->hw_params, 0,
2386 sizeof(struct iwl_hw_params));
2387
2388 priv->_3945.shared_virt =
2389 dma_alloc_coherent(&priv->pci_dev->dev,
2390 sizeof(struct iwl3945_shared),
2391 &priv->_3945.shared_phys, GFP_KERNEL);
2392 if (!priv->_3945.shared_virt) {
2393 IWL_ERR(priv, "failed to allocate pci memory\n");
2394 return -ENOMEM;
2395 }
2396
2397 /* Assign number of Usable TX queues */
2398 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
2399
2400 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2401 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
2402 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2403 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2404 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
2405 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
2406
2407 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2408
2409 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2410 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
2411 priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
2412
2413 return 0;
2414}
2415
2416unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
2417 struct iwl3945_frame *frame, u8 rate)
2418{
2419 struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
2420 unsigned int frame_size;
2421
2422 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2423 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2424
2425 tx_beacon_cmd->tx.sta_id =
2426 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2427 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2428
2429 frame_size = iwl3945_fill_beacon_frame(priv,
2430 tx_beacon_cmd->frame,
2431 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2432
2433 BUG_ON(frame_size > MAX_MPDU_SIZE);
2434 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2435
2436 tx_beacon_cmd->tx.rate = rate;
2437 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2438 TX_CMD_FLG_TSF_MSK);
2439
2440 /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
2441 tx_beacon_cmd->tx.supp_rates[0] =
2442 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2443
2444 tx_beacon_cmd->tx.supp_rates[1] =
2445 (IWL_CCK_BASIC_RATES_MASK & 0xF);
2446
2447 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
2448}
2449
2450void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
2451{
2452 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
2453 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
2454}
2455
2456void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
2457{
2458 INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
2459 iwl3945_bg_reg_txpower_periodic);
2460}
2461
2462void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
2463{
2464 cancel_delayed_work(&priv->_3945.thermal_periodic);
2465}
2466
2467/* check contents of special bootstrap uCode SRAM */
2468static int iwl3945_verify_bsm(struct iwl_priv *priv)
2469 {
2470 __le32 *image = priv->ucode_boot.v_addr;
2471 u32 len = priv->ucode_boot.len;
2472 u32 reg;
2473 u32 val;
2474
2475 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2476
2477 /* verify BSM SRAM contents */
2478 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
2479 for (reg = BSM_SRAM_LOWER_BOUND;
2480 reg < BSM_SRAM_LOWER_BOUND + len;
2481 reg += sizeof(u32), image++) {
2482 val = iwl_legacy_read_prph(priv, reg);
2483 if (val != le32_to_cpu(*image)) {
2484 IWL_ERR(priv, "BSM uCode verification failed at "
2485 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2486 BSM_SRAM_LOWER_BOUND,
2487 reg - BSM_SRAM_LOWER_BOUND, len,
2488 val, le32_to_cpu(*image));
2489 return -EIO;
2490 }
2491 }
2492
2493 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
2494
2495 return 0;
2496}
2497
2498
2499/******************************************************************************
2500 *
2501 * EEPROM related functions
2502 *
2503 ******************************************************************************/
2504
2505/*
2506 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2507 * embedded controller) as EEPROM reader; each read is a series of pulses
2508 * to/from the EEPROM chip, not a single event, so even reads could conflict
2509 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2510 * simply claims ownership, which should be safe when this function is called
2511 * (i.e. before loading uCode!).
2512 */
2513static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2514{
2515 _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2516 return 0;
2517}
2518
2519
2520static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
2521{
2522 return;
2523}
2524
2525 /**
2526 * iwl3945_load_bsm - Load bootstrap instructions
2527 *
2528 * BSM operation:
2529 *
2530 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2531 * in special SRAM that does not power down during RFKILL. When powering back
2532 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2533 * the bootstrap program into the on-board processor, and starts it.
2534 *
2535 * The bootstrap program loads (via DMA) instructions and data for a new
2536 * program from host DRAM locations indicated by the host driver in the
2537 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2538 * automatically.
2539 *
2540 * When initializing the NIC, the host driver points the BSM to the
2541 * "initialize" uCode image. This uCode sets up some internal data, then
2542 * notifies host via "initialize alive" that it is complete.
2543 *
2544 * The host then replaces the BSM_DRAM_* pointer values to point to the
2545 * normal runtime uCode instructions and a backup uCode data cache buffer
2546 * (filled initially with starting data values for the on-board processor),
2547 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2548 * which begins normal operation.
2549 *
2550 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2551 * the backup data cache in DRAM before SRAM is powered down.
2552 *
2553 * When powering back up, the BSM loads the bootstrap program. This reloads
2554 * the runtime uCode instructions and the backup data cache into SRAM,
2555 * and re-launches the runtime uCode from where it left off.
2556 */
2557static int iwl3945_load_bsm(struct iwl_priv *priv)
2558{
2559 __le32 *image = priv->ucode_boot.v_addr;
2560 u32 len = priv->ucode_boot.len;
2561 dma_addr_t pinst;
2562 dma_addr_t pdata;
2563 u32 inst_len;
2564 u32 data_len;
2565 int rc;
2566 int i;
2567 u32 done;
2568 u32 reg_offset;
2569
2570 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
2571
2572 /* make sure bootstrap program is no larger than BSM's SRAM size */
2573 if (len > IWL39_MAX_BSM_SIZE)
2574 return -EINVAL;
2575
2576 /* Tell bootstrap uCode where to find the "Initialize" uCode
2577 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2578 * NOTE: iwl3945_initialize_alive_start() will replace these values,
2579 * after the "initialize" uCode has run, to point to
2580 * runtime/protocol instructions and backup data cache. */
2581 pinst = priv->ucode_init.p_addr;
2582 pdata = priv->ucode_init_data.p_addr;
2583 inst_len = priv->ucode_init.len;
2584 data_len = priv->ucode_init_data.len;
2585
2586 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2587 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2588 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2589 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2590
2591 /* Fill BSM memory with bootstrap instructions */
2592 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2593 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2594 reg_offset += sizeof(u32), image++)
2595 _iwl_legacy_write_prph(priv, reg_offset,
2596 le32_to_cpu(*image));
2597
2598 rc = iwl3945_verify_bsm(priv);
2599 if (rc)
2600 return rc;
2601
2602 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2603 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2604 iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
2605 IWL39_RTC_INST_LOWER_BOUND);
2606 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2607
2608 /* Load bootstrap code into instruction SRAM now,
2609 * to prepare to load "initialize" uCode */
2610 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2611 BSM_WR_CTRL_REG_BIT_START);
2612
2613 /* Wait for load of bootstrap uCode to finish */
2614 for (i = 0; i < 100; i++) {
2615 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
2616 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2617 break;
2618 udelay(10);
2619 }
2620 if (i < 100)
2621 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
2622 else {
2623 IWL_ERR(priv, "BSM write did not complete!\n");
2624 return -EIO;
2625 }
2626
2627 /* Enable future boot loads whenever power management unit triggers it
2628 * (e.g. when powering back up after power-save shutdown) */
2629 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2630 BSM_WR_CTRL_REG_BIT_START_EN);
2631
2632 return 0;
2633}
2634
2635static struct iwl_hcmd_ops iwl3945_hcmd = {
2636 .rxon_assoc = iwl3945_send_rxon_assoc,
2637 .commit_rxon = iwl3945_commit_rxon,
2638};
2639
2640static struct iwl_lib_ops iwl3945_lib = {
2641 .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
2642 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2643 .txq_init = iwl3945_hw_tx_queue_init,
2644 .load_ucode = iwl3945_load_bsm,
2645 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2646 .apm_ops = {
2647 .init = iwl3945_apm_init,
2648 .config = iwl3945_nic_config,
2649 },
2650 .eeprom_ops = {
2651 .regulatory_bands = {
2652 EEPROM_REGULATORY_BAND_1_CHANNELS,
2653 EEPROM_REGULATORY_BAND_2_CHANNELS,
2654 EEPROM_REGULATORY_BAND_3_CHANNELS,
2655 EEPROM_REGULATORY_BAND_4_CHANNELS,
2656 EEPROM_REGULATORY_BAND_5_CHANNELS,
2657 EEPROM_REGULATORY_BAND_NO_HT40,
2658 EEPROM_REGULATORY_BAND_NO_HT40,
2659 },
2660 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2661 .release_semaphore = iwl3945_eeprom_release_semaphore,
2662 },
2663 .send_tx_power = iwl3945_send_tx_power,
2664 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2665
2666 .debugfs_ops = {
2667 .rx_stats_read = iwl3945_ucode_rx_stats_read,
2668 .tx_stats_read = iwl3945_ucode_tx_stats_read,
2669 .general_stats_read = iwl3945_ucode_general_stats_read,
2670 },
2671};
2672
2673static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2674 .post_associate = iwl3945_post_associate,
2675 .config_ap = iwl3945_config_ap,
2676 .manage_ibss_station = iwl3945_manage_ibss_station,
2677};
2678
2679static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2680 .get_hcmd_size = iwl3945_get_hcmd_size,
2681 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2682 .request_scan = iwl3945_request_scan,
2683 .post_scan = iwl3945_post_scan,
2684};
2685
2686static const struct iwl_ops iwl3945_ops = {
2687 .lib = &iwl3945_lib,
2688 .hcmd = &iwl3945_hcmd,
2689 .utils = &iwl3945_hcmd_utils,
2690 .led = &iwl3945_led_ops,
2691 .legacy = &iwl3945_legacy_ops,
2692 .ieee80211_ops = &iwl3945_hw_ops,
2693};
2694
2695static struct iwl_base_params iwl3945_base_params = {
2696 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2697 .num_of_queues = IWL39_NUM_QUEUES,
2698 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2699 .set_l0s = false,
2700 .use_bsm = true,
2701 .led_compensation = 64,
2702 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2703};
2704
2705static struct iwl_cfg iwl3945_bg_cfg = {
2706 .name = "3945BG",
2707 .fw_name_pre = IWL3945_FW_PRE,
2708 .ucode_api_max = IWL3945_UCODE_API_MAX,
2709 .ucode_api_min = IWL3945_UCODE_API_MIN,
2710 .sku = IWL_SKU_G,
2711 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2712 .ops = &iwl3945_ops,
2713 .mod_params = &iwl3945_mod_params,
2714 .base_params = &iwl3945_base_params,
2715 .led_mode = IWL_LED_BLINK,
2716};
2717
2718static struct iwl_cfg iwl3945_abg_cfg = {
2719 .name = "3945ABG",
2720 .fw_name_pre = IWL3945_FW_PRE,
2721 .ucode_api_max = IWL3945_UCODE_API_MAX,
2722 .ucode_api_min = IWL3945_UCODE_API_MIN,
2723 .sku = IWL_SKU_A|IWL_SKU_G,
2724 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2725 .ops = &iwl3945_ops,
2726 .mod_params = &iwl3945_mod_params,
2727 .base_params = &iwl3945_base_params,
2728 .led_mode = IWL_LED_BLINK,
2729};
2730
2731DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
2732 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2733 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2734 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
2735 {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
2736 {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
2737 {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
2738 {0}
2739};
2740
2741MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644
index b118b59b71de..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.h
+++ /dev/null
@@ -1,308 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-3945.h) for driver implementation definitions.
28 * Please use iwl-3945-commands.h for uCode API definitions.
29 * Please use iwl-3945-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_3945_h__
33#define __iwl_3945_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h>
38
39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern const struct pci_device_id iwl3945_hw_card_ids[];
41
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-3945-hw.h"
46#include "iwl-debug.h"
47#include "iwl-power.h"
48#include "iwl-dev.h"
49#include "iwl-led.h"
50
51/* Highest firmware API version supported */
52#define IWL3945_UCODE_API_MAX 2
53
54/* Lowest firmware API version supported */
55#define IWL3945_UCODE_API_MIN 1
56
57#define IWL3945_FW_PRE "iwlwifi-3945-"
58#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
59#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
60
61/* Default noise level to report when noise measurement is not available.
62 * This may be because we're:
63 * 1) Not associated (4965, no beacon statistics being sent to driver)
64 * 2) Scanning (noise measurement does not apply to associated channel)
65 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
66 * Use default noise value of -127 ... this is below the range of measurable
67 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
68 * Also, -127 works better than 0 when averaging frames with/without
69 * noise info (e.g. averaging might be done in app); measured dBm values are
70 * always negative ... using a negative value as the default keeps all
71 * averages within an s8's (used in some apps) range of negative values. */
72#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
73
74/* Module parameters accessible from iwl-*.c */
75extern struct iwl_mod_params iwl3945_mod_params;
76
77struct iwl3945_rate_scale_data {
78 u64 data;
79 s32 success_counter;
80 s32 success_ratio;
81 s32 counter;
82 s32 average_tpt;
83 unsigned long stamp;
84};
85
86struct iwl3945_rs_sta {
87 spinlock_t lock;
88 struct iwl_priv *priv;
89 s32 *expected_tpt;
90 unsigned long last_partial_flush;
91 unsigned long last_flush;
92 u32 flush_time;
93 u32 last_tx_packets;
94 u32 tx_packets;
95 u8 tgg;
96 u8 flush_pending;
97 u8 start_rate;
98 struct timer_list rate_scale_flush;
99 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
100#ifdef CONFIG_MAC80211_DEBUGFS
101 struct dentry *rs_sta_dbgfs_stats_table_file;
102#endif
103
104 /* used to be in sta_info */
105 int last_txrate_idx;
106};
107
108
109/*
110 * The common struct MUST be first because it is shared between
111 * 3945 and 4965!
112 */
113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common;
115 struct iwl3945_rs_sta rs_sta;
116};
117
118enum iwl3945_antenna {
119 IWL_ANTENNA_DIVERSITY,
120 IWL_ANTENNA_MAIN,
121 IWL_ANTENNA_AUX
122};
123
124/*
125 * RTS threshold here is total size [2347] minus 4 FCS bytes
126 * Per spec:
127 * a value of 0 means RTS on all data/management packets
128 * a value > max MSDU size means no RTS
129 * else RTS for data/management frames where MPDU is larger
130 * than RTS value.
131 */
132#define DEFAULT_RTS_THRESHOLD 2347U
133#define MIN_RTS_THRESHOLD 0U
134#define MAX_RTS_THRESHOLD 2347U
135#define MAX_MSDU_SIZE 2304U
136#define MAX_MPDU_SIZE 2346U
137#define DEFAULT_BEACON_INTERVAL 100U
138#define DEFAULT_SHORT_RETRY_LIMIT 7U
139#define DEFAULT_LONG_RETRY_LIMIT 4U
140
141#define IWL_TX_FIFO_AC0 0
142#define IWL_TX_FIFO_AC1 1
143#define IWL_TX_FIFO_AC2 2
144#define IWL_TX_FIFO_AC3 3
145#define IWL_TX_FIFO_HCCA_1 5
146#define IWL_TX_FIFO_HCCA_2 6
147#define IWL_TX_FIFO_NONE 7
148
149#define IEEE80211_DATA_LEN 2304
150#define IEEE80211_4ADDR_LEN 30
151#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
152#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
153
154struct iwl3945_frame {
155 union {
156 struct ieee80211_hdr frame;
157 struct iwl3945_tx_beacon_cmd beacon;
158 u8 raw[IEEE80211_FRAME_LEN];
159 u8 cmd[360];
160 } u;
161 struct list_head list;
162};
163
164#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
165#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
166#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
167
168#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
169#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
170#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
171
172#define IWL_SUPPORTED_RATES_IE_LEN 8
173
174#define SCAN_INTERVAL 100
175
176#define MAX_TID_COUNT 9
177
178#define IWL_INVALID_RATE 0xFF
179#define IWL_INVALID_VALUE -1
180
181#define STA_PS_STATUS_WAKE 0
182#define STA_PS_STATUS_SLEEP 1
183
184struct iwl3945_ibss_seq {
185 u8 mac[ETH_ALEN];
186 u16 seq_num;
187 u16 frag_num;
188 unsigned long packet_time;
189 struct list_head list;
190};
191
192#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
193 x->u.rx_frame.stats.payload + \
194 x->u.rx_frame.stats.phy_count))
195#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
196 IWL_RX_HDR(x)->payload + \
197 le16_to_cpu(IWL_RX_HDR(x)->len)))
198#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
199#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
200
201
202/******************************************************************************
203 *
204 * Functions implemented in iwl3945-base.c which are forward declared here
205 * for use by iwl-*.c
206 *
207 *****************************************************************************/
208extern int iwl3945_calc_db_from_ratio(int sig_ratio);
209extern void iwl3945_rx_replenish(void *data);
210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
212 struct ieee80211_hdr *hdr, int left);
213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
214 char **buf, bool display);
215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
216
217/******************************************************************************
218 *
219 * Functions implemented in iwl-[34]*.c which are forward declared here
220 * for use by iwl3945-base.c
221 *
222 * NOTE: The implementation of these functions are hardware specific
223 * which is why they are in the hardware specific files (vs. iwl-base.c)
224 *
225 * Naming convention --
226 * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
227 * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
228 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
229 * iwl3945_bg_ <-- Called from work queue context
230 * iwl3945_mac_ <-- mac80211 callback
231 *
232 ****************************************************************************/
233extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
234extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
235extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
236extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
237extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
238extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
239extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
240extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
241extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
242extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
243extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
244 struct iwl_tx_queue *txq,
245 dma_addr_t addr, u16 len,
246 u8 reset, u8 pad);
247extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
248 struct iwl_tx_queue *txq);
249extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
250extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
251 struct iwl_tx_queue *txq);
252extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
253 struct iwl3945_frame *frame, u8 rate);
254void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
255 struct iwl_device_cmd *cmd,
256 struct ieee80211_tx_info *info,
257 struct ieee80211_hdr *hdr,
258 int sta_id, int tx_id);
259extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
260extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
261extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
262 struct iwl_rx_mem_buffer *rxb);
263void iwl3945_reply_statistics(struct iwl_priv *priv,
264 struct iwl_rx_mem_buffer *rxb);
265extern void iwl3945_disable_events(struct iwl_priv *priv);
266extern int iwl4965_get_temperature(const struct iwl_priv *priv);
267extern void iwl3945_post_associate(struct iwl_priv *priv);
268extern void iwl3945_config_ap(struct iwl_priv *priv);
269
270extern int iwl3945_commit_rxon(struct iwl_priv *priv,
271 struct iwl_rxon_context *ctx);
272
273/**
274 * iwl3945_hw_find_station - Find station id for a given BSSID
275 * @bssid: MAC address of station ID to find
276 *
277 * NOTE: This should not be hardware specific but the code has
278 * not yet been merged into a single common layer for managing the
279 * station tables.
280 */
281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
282
283extern struct ieee80211_ops iwl3945_hw_ops;
284
285/*
286 * Forward declare iwl-3945.c functions for iwl3945-base.c
287 */
288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
290extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
291extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
292
293extern const struct iwl_channel_info *iwl3945_get_channel_info(
294 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
295
296extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
297
298/* scanning */
299int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
300void iwl3945_post_scan(struct iwl_priv *priv);
301
302/* rates */
303extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
304
305/* Requires full declaration of iwl_priv before including */
306#include "iwl-io.h"
307
308#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
deleted file mode 100644
index f46c80e6e005..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_4965_calib_h__
63#define __iwl_4965_calib_h__
64
65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h"
68
69void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
70void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
71void iwl4965_init_sensitivity(struct iwl_priv *priv);
72void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
73void iwl4965_calib_free_results(struct iwl_priv *priv);
74
75#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644
index 1c93665766e4..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
+++ /dev/null
@@ -1,774 +0,0 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-4965.h"
29#include "iwl-4965-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
37{
38 int p = 0;
39 u32 flag;
40
41 flag = le32_to_cpu(priv->_4965.statistics.flag);
42
43 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
44 if (flag & UCODE_STATISTICS_CLEAR_MSK)
45 p += scnprintf(buf + p, bufsz - p,
46 "\tStatistics have been cleared\n");
47 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
48 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
49 ? "2.4 GHz" : "5.2 GHz");
50 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
51 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
52 ? "enabled" : "disabled");
53
54 return p;
55}
56
57ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 size_t count, loff_t *ppos)
59{
60 struct iwl_priv *priv = file->private_data;
61 int pos = 0;
62 char *buf;
63 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
64 sizeof(struct statistics_rx_non_phy) * 40 +
65 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
66 ssize_t ret;
67 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
68 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
69 struct statistics_rx_non_phy *general, *accum_general;
70 struct statistics_rx_non_phy *delta_general, *max_general;
71 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
72
73 if (!iwl_legacy_is_alive(priv))
74 return -EAGAIN;
75
76 buf = kzalloc(bufsz, GFP_KERNEL);
77 if (!buf) {
78 IWL_ERR(priv, "Can not allocate Buffer\n");
79 return -ENOMEM;
80 }
81
82 /*
83 * the statistic information display here is based on
84 * the last statistics notification from uCode
85 * might not reflect the current uCode activity
86 */
87 ofdm = &priv->_4965.statistics.rx.ofdm;
88 cck = &priv->_4965.statistics.rx.cck;
89 general = &priv->_4965.statistics.rx.general;
90 ht = &priv->_4965.statistics.rx.ofdm_ht;
91 accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
92 accum_cck = &priv->_4965.accum_statistics.rx.cck;
93 accum_general = &priv->_4965.accum_statistics.rx.general;
94 accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
95 delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
96 delta_cck = &priv->_4965.delta_statistics.rx.cck;
97 delta_general = &priv->_4965.delta_statistics.rx.general;
98 delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
99 max_ofdm = &priv->_4965.max_delta.rx.ofdm;
100 max_cck = &priv->_4965.max_delta.rx.cck;
101 max_general = &priv->_4965.max_delta.rx.general;
102 max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
103
104 pos += iwl4965_statistics_flag(priv, buf, bufsz);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 fmt_header, "Statistics_Rx - OFDM:");
107 pos += scnprintf(buf + pos, bufsz - pos,
108 fmt_table, "ina_cnt:",
109 le32_to_cpu(ofdm->ina_cnt),
110 accum_ofdm->ina_cnt,
111 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 fmt_table, "fina_cnt:",
114 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
115 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
116 pos += scnprintf(buf + pos, bufsz - pos,
117 fmt_table, "plcp_err:",
118 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
119 delta_ofdm->plcp_err, max_ofdm->plcp_err);
120 pos += scnprintf(buf + pos, bufsz - pos,
121 fmt_table, "crc32_err:",
122 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
123 delta_ofdm->crc32_err, max_ofdm->crc32_err);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 fmt_table, "overrun_err:",
126 le32_to_cpu(ofdm->overrun_err),
127 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
128 max_ofdm->overrun_err);
129 pos += scnprintf(buf + pos, bufsz - pos,
130 fmt_table, "early_overrun_err:",
131 le32_to_cpu(ofdm->early_overrun_err),
132 accum_ofdm->early_overrun_err,
133 delta_ofdm->early_overrun_err,
134 max_ofdm->early_overrun_err);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 fmt_table, "crc32_good:",
137 le32_to_cpu(ofdm->crc32_good),
138 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
139 max_ofdm->crc32_good);
140 pos += scnprintf(buf + pos, bufsz - pos,
141 fmt_table, "false_alarm_cnt:",
142 le32_to_cpu(ofdm->false_alarm_cnt),
143 accum_ofdm->false_alarm_cnt,
144 delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos += scnprintf(buf + pos, bufsz - pos,
153 fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout),
155 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
156 max_ofdm->sfd_timeout);
157 pos += scnprintf(buf + pos, bufsz - pos,
158 fmt_table, "fina_timeout:",
159 le32_to_cpu(ofdm->fina_timeout),
160 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 fmt_table, "unresponded_rts:",
164 le32_to_cpu(ofdm->unresponded_rts),
165 accum_ofdm->unresponded_rts,
166 delta_ofdm->unresponded_rts,
167 max_ofdm->unresponded_rts);
168 pos += scnprintf(buf + pos, bufsz - pos,
169 fmt_table, "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos += scnprintf(buf + pos, bufsz - pos,
175 fmt_table, "sent_ack_cnt:",
176 le32_to_cpu(ofdm->sent_ack_cnt),
177 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
178 max_ofdm->sent_ack_cnt);
179 pos += scnprintf(buf + pos, bufsz - pos,
180 fmt_table, "sent_cts_cnt:",
181 le32_to_cpu(ofdm->sent_cts_cnt),
182 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
183 max_ofdm->sent_cts_cnt);
184 pos += scnprintf(buf + pos, bufsz - pos,
185 fmt_table, "sent_ba_rsp_cnt:",
186 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
187 accum_ofdm->sent_ba_rsp_cnt,
188 delta_ofdm->sent_ba_rsp_cnt,
189 max_ofdm->sent_ba_rsp_cnt);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 fmt_table, "dsp_self_kill:",
192 le32_to_cpu(ofdm->dsp_self_kill),
193 accum_ofdm->dsp_self_kill,
194 delta_ofdm->dsp_self_kill,
195 max_ofdm->dsp_self_kill);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 fmt_table, "mh_format_err:",
198 le32_to_cpu(ofdm->mh_format_err),
199 accum_ofdm->mh_format_err,
200 delta_ofdm->mh_format_err,
201 max_ofdm->mh_format_err);
202 pos += scnprintf(buf + pos, bufsz - pos,
203 fmt_table, "re_acq_main_rssi_sum:",
204 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
205 accum_ofdm->re_acq_main_rssi_sum,
206 delta_ofdm->re_acq_main_rssi_sum,
207 max_ofdm->re_acq_main_rssi_sum);
208
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_header, "Statistics_Rx - CCK:");
211 pos += scnprintf(buf + pos, bufsz - pos,
212 fmt_table, "ina_cnt:",
213 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
214 delta_cck->ina_cnt, max_cck->ina_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "fina_cnt:",
217 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
218 delta_cck->fina_cnt, max_cck->fina_cnt);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 fmt_table, "plcp_err:",
221 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
222 delta_cck->plcp_err, max_cck->plcp_err);
223 pos += scnprintf(buf + pos, bufsz - pos,
224 fmt_table, "crc32_err:",
225 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
226 delta_cck->crc32_err, max_cck->crc32_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "overrun_err:",
229 le32_to_cpu(cck->overrun_err),
230 accum_cck->overrun_err, delta_cck->overrun_err,
231 max_cck->overrun_err);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 fmt_table, "early_overrun_err:",
234 le32_to_cpu(cck->early_overrun_err),
235 accum_cck->early_overrun_err,
236 delta_cck->early_overrun_err,
237 max_cck->early_overrun_err);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "crc32_good:",
240 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
241 delta_cck->crc32_good, max_cck->crc32_good);
242 pos += scnprintf(buf + pos, bufsz - pos,
243 fmt_table, "false_alarm_cnt:",
244 le32_to_cpu(cck->false_alarm_cnt),
245 accum_cck->false_alarm_cnt,
246 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 fmt_table, "fina_sync_err_cnt:",
249 le32_to_cpu(cck->fina_sync_err_cnt),
250 accum_cck->fina_sync_err_cnt,
251 delta_cck->fina_sync_err_cnt,
252 max_cck->fina_sync_err_cnt);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 fmt_table, "sfd_timeout:",
255 le32_to_cpu(cck->sfd_timeout),
256 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
257 max_cck->sfd_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 fmt_table, "fina_timeout:",
260 le32_to_cpu(cck->fina_timeout),
261 accum_cck->fina_timeout, delta_cck->fina_timeout,
262 max_cck->fina_timeout);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "unresponded_rts:",
265 le32_to_cpu(cck->unresponded_rts),
266 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
267 max_cck->unresponded_rts);
268 pos += scnprintf(buf + pos, bufsz - pos,
269 fmt_table, "rxe_frame_lmt_ovrun:",
270 le32_to_cpu(cck->rxe_frame_limit_overrun),
271 accum_cck->rxe_frame_limit_overrun,
272 delta_cck->rxe_frame_limit_overrun,
273 max_cck->rxe_frame_limit_overrun);
274 pos += scnprintf(buf + pos, bufsz - pos,
275 fmt_table, "sent_ack_cnt:",
276 le32_to_cpu(cck->sent_ack_cnt),
277 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
278 max_cck->sent_ack_cnt);
279 pos += scnprintf(buf + pos, bufsz - pos,
280 fmt_table, "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
283 max_cck->sent_cts_cnt);
284 pos += scnprintf(buf + pos, bufsz - pos,
285 fmt_table, "sent_ba_rsp_cnt:",
286 le32_to_cpu(cck->sent_ba_rsp_cnt),
287 accum_cck->sent_ba_rsp_cnt,
288 delta_cck->sent_ba_rsp_cnt,
289 max_cck->sent_ba_rsp_cnt);
290 pos += scnprintf(buf + pos, bufsz - pos,
291 fmt_table, "dsp_self_kill:",
292 le32_to_cpu(cck->dsp_self_kill),
293 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
294 max_cck->dsp_self_kill);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 fmt_table, "mh_format_err:",
297 le32_to_cpu(cck->mh_format_err),
298 accum_cck->mh_format_err, delta_cck->mh_format_err,
299 max_cck->mh_format_err);
300 pos += scnprintf(buf + pos, bufsz - pos,
301 fmt_table, "re_acq_main_rssi_sum:",
302 le32_to_cpu(cck->re_acq_main_rssi_sum),
303 accum_cck->re_acq_main_rssi_sum,
304 delta_cck->re_acq_main_rssi_sum,
305 max_cck->re_acq_main_rssi_sum);
306
307 pos += scnprintf(buf + pos, bufsz - pos,
308 fmt_header, "Statistics_Rx - GENERAL:");
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "bogus_cts:",
311 le32_to_cpu(general->bogus_cts),
312 accum_general->bogus_cts, delta_general->bogus_cts,
313 max_general->bogus_cts);
314 pos += scnprintf(buf + pos, bufsz - pos,
315 fmt_table, "bogus_ack:",
316 le32_to_cpu(general->bogus_ack),
317 accum_general->bogus_ack, delta_general->bogus_ack,
318 max_general->bogus_ack);
319 pos += scnprintf(buf + pos, bufsz - pos,
320 fmt_table, "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "filtered_frames:",
327 le32_to_cpu(general->filtered_frames),
328 accum_general->filtered_frames,
329 delta_general->filtered_frames,
330 max_general->filtered_frames);
331 pos += scnprintf(buf + pos, bufsz - pos,
332 fmt_table, "non_channel_beacons:",
333 le32_to_cpu(general->non_channel_beacons),
334 accum_general->non_channel_beacons,
335 delta_general->non_channel_beacons,
336 max_general->non_channel_beacons);
337 pos += scnprintf(buf + pos, bufsz - pos,
338 fmt_table, "channel_beacons:",
339 le32_to_cpu(general->channel_beacons),
340 accum_general->channel_beacons,
341 delta_general->channel_beacons,
342 max_general->channel_beacons);
343 pos += scnprintf(buf + pos, bufsz - pos,
344 fmt_table, "num_missed_bcon:",
345 le32_to_cpu(general->num_missed_bcon),
346 accum_general->num_missed_bcon,
347 delta_general->num_missed_bcon,
348 max_general->num_missed_bcon);
349 pos += scnprintf(buf + pos, bufsz - pos,
350 fmt_table, "adc_rx_saturation_time:",
351 le32_to_cpu(general->adc_rx_saturation_time),
352 accum_general->adc_rx_saturation_time,
353 delta_general->adc_rx_saturation_time,
354 max_general->adc_rx_saturation_time);
355 pos += scnprintf(buf + pos, bufsz - pos,
356 fmt_table, "ina_detect_search_tm:",
357 le32_to_cpu(general->ina_detection_search_time),
358 accum_general->ina_detection_search_time,
359 delta_general->ina_detection_search_time,
360 max_general->ina_detection_search_time);
361 pos += scnprintf(buf + pos, bufsz - pos,
362 fmt_table, "beacon_silence_rssi_a:",
363 le32_to_cpu(general->beacon_silence_rssi_a),
364 accum_general->beacon_silence_rssi_a,
365 delta_general->beacon_silence_rssi_a,
366 max_general->beacon_silence_rssi_a);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 fmt_table, "beacon_silence_rssi_b:",
369 le32_to_cpu(general->beacon_silence_rssi_b),
370 accum_general->beacon_silence_rssi_b,
371 delta_general->beacon_silence_rssi_b,
372 max_general->beacon_silence_rssi_b);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 fmt_table, "beacon_silence_rssi_c:",
375 le32_to_cpu(general->beacon_silence_rssi_c),
376 accum_general->beacon_silence_rssi_c,
377 delta_general->beacon_silence_rssi_c,
378 max_general->beacon_silence_rssi_c);
379 pos += scnprintf(buf + pos, bufsz - pos,
380 fmt_table, "interference_data_flag:",
381 le32_to_cpu(general->interference_data_flag),
382 accum_general->interference_data_flag,
383 delta_general->interference_data_flag,
384 max_general->interference_data_flag);
385 pos += scnprintf(buf + pos, bufsz - pos,
386 fmt_table, "channel_load:",
387 le32_to_cpu(general->channel_load),
388 accum_general->channel_load,
389 delta_general->channel_load,
390 max_general->channel_load);
391 pos += scnprintf(buf + pos, bufsz - pos,
392 fmt_table, "dsp_false_alarms:",
393 le32_to_cpu(general->dsp_false_alarms),
394 accum_general->dsp_false_alarms,
395 delta_general->dsp_false_alarms,
396 max_general->dsp_false_alarms);
397 pos += scnprintf(buf + pos, bufsz - pos,
398 fmt_table, "beacon_rssi_a:",
399 le32_to_cpu(general->beacon_rssi_a),
400 accum_general->beacon_rssi_a,
401 delta_general->beacon_rssi_a,
402 max_general->beacon_rssi_a);
403 pos += scnprintf(buf + pos, bufsz - pos,
404 fmt_table, "beacon_rssi_b:",
405 le32_to_cpu(general->beacon_rssi_b),
406 accum_general->beacon_rssi_b,
407 delta_general->beacon_rssi_b,
408 max_general->beacon_rssi_b);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 fmt_table, "beacon_rssi_c:",
411 le32_to_cpu(general->beacon_rssi_c),
412 accum_general->beacon_rssi_c,
413 delta_general->beacon_rssi_c,
414 max_general->beacon_rssi_c);
415 pos += scnprintf(buf + pos, bufsz - pos,
416 fmt_table, "beacon_energy_a:",
417 le32_to_cpu(general->beacon_energy_a),
418 accum_general->beacon_energy_a,
419 delta_general->beacon_energy_a,
420 max_general->beacon_energy_a);
421 pos += scnprintf(buf + pos, bufsz - pos,
422 fmt_table, "beacon_energy_b:",
423 le32_to_cpu(general->beacon_energy_b),
424 accum_general->beacon_energy_b,
425 delta_general->beacon_energy_b,
426 max_general->beacon_energy_b);
427 pos += scnprintf(buf + pos, bufsz - pos,
428 fmt_table, "beacon_energy_c:",
429 le32_to_cpu(general->beacon_energy_c),
430 accum_general->beacon_energy_c,
431 delta_general->beacon_energy_c,
432 max_general->beacon_energy_c);
433
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_header, "Statistics_Rx - OFDM_HT:");
436 pos += scnprintf(buf + pos, bufsz - pos,
437 fmt_table, "plcp_err:",
438 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
439 delta_ht->plcp_err, max_ht->plcp_err);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "overrun_err:",
442 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
443 delta_ht->overrun_err, max_ht->overrun_err);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 fmt_table, "early_overrun_err:",
446 le32_to_cpu(ht->early_overrun_err),
447 accum_ht->early_overrun_err,
448 delta_ht->early_overrun_err,
449 max_ht->early_overrun_err);
450 pos += scnprintf(buf + pos, bufsz - pos,
451 fmt_table, "crc32_good:",
452 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
453 delta_ht->crc32_good, max_ht->crc32_good);
454 pos += scnprintf(buf + pos, bufsz - pos,
455 fmt_table, "crc32_err:",
456 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
457 delta_ht->crc32_err, max_ht->crc32_err);
458 pos += scnprintf(buf + pos, bufsz - pos,
459 fmt_table, "mh_format_err:",
460 le32_to_cpu(ht->mh_format_err),
461 accum_ht->mh_format_err,
462 delta_ht->mh_format_err, max_ht->mh_format_err);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "agg_crc32_good:",
465 le32_to_cpu(ht->agg_crc32_good),
466 accum_ht->agg_crc32_good,
467 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
468 pos += scnprintf(buf + pos, bufsz - pos,
469 fmt_table, "agg_mpdu_cnt:",
470 le32_to_cpu(ht->agg_mpdu_cnt),
471 accum_ht->agg_mpdu_cnt,
472 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 fmt_table, "agg_cnt:",
475 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
476 delta_ht->agg_cnt, max_ht->agg_cnt);
477 pos += scnprintf(buf + pos, bufsz - pos,
478 fmt_table, "unsupport_mcs:",
479 le32_to_cpu(ht->unsupport_mcs),
480 accum_ht->unsupport_mcs,
481 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
482
483 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
484 kfree(buf);
485 return ret;
486}
487
488ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
489 char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct iwl_priv *priv = file->private_data;
493 int pos = 0;
494 char *buf;
495 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
496 ssize_t ret;
497 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
498
499 if (!iwl_legacy_is_alive(priv))
500 return -EAGAIN;
501
502 buf = kzalloc(bufsz, GFP_KERNEL);
503 if (!buf) {
504 IWL_ERR(priv, "Can not allocate Buffer\n");
505 return -ENOMEM;
506 }
507
508 /* the statistic information display here is based on
509 * the last statistics notification from uCode
510 * might not reflect the current uCode activity
511 */
512 tx = &priv->_4965.statistics.tx;
513 accum_tx = &priv->_4965.accum_statistics.tx;
514 delta_tx = &priv->_4965.delta_statistics.tx;
515 max_tx = &priv->_4965.max_delta.tx;
516
517 pos += iwl4965_statistics_flag(priv, buf, bufsz);
518 pos += scnprintf(buf + pos, bufsz - pos,
519 fmt_header, "Statistics_Tx:");
520 pos += scnprintf(buf + pos, bufsz - pos,
521 fmt_table, "preamble:",
522 le32_to_cpu(tx->preamble_cnt),
523 accum_tx->preamble_cnt,
524 delta_tx->preamble_cnt, max_tx->preamble_cnt);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 fmt_table, "rx_detected_cnt:",
527 le32_to_cpu(tx->rx_detected_cnt),
528 accum_tx->rx_detected_cnt,
529 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
530 pos += scnprintf(buf + pos, bufsz - pos,
531 fmt_table, "bt_prio_defer_cnt:",
532 le32_to_cpu(tx->bt_prio_defer_cnt),
533 accum_tx->bt_prio_defer_cnt,
534 delta_tx->bt_prio_defer_cnt,
535 max_tx->bt_prio_defer_cnt);
536 pos += scnprintf(buf + pos, bufsz - pos,
537 fmt_table, "bt_prio_kill_cnt:",
538 le32_to_cpu(tx->bt_prio_kill_cnt),
539 accum_tx->bt_prio_kill_cnt,
540 delta_tx->bt_prio_kill_cnt,
541 max_tx->bt_prio_kill_cnt);
542 pos += scnprintf(buf + pos, bufsz - pos,
543 fmt_table, "few_bytes_cnt:",
544 le32_to_cpu(tx->few_bytes_cnt),
545 accum_tx->few_bytes_cnt,
546 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
547 pos += scnprintf(buf + pos, bufsz - pos,
548 fmt_table, "cts_timeout:",
549 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
550 delta_tx->cts_timeout, max_tx->cts_timeout);
551 pos += scnprintf(buf + pos, bufsz - pos,
552 fmt_table, "ack_timeout:",
553 le32_to_cpu(tx->ack_timeout),
554 accum_tx->ack_timeout,
555 delta_tx->ack_timeout, max_tx->ack_timeout);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 fmt_table, "expected_ack_cnt:",
558 le32_to_cpu(tx->expected_ack_cnt),
559 accum_tx->expected_ack_cnt,
560 delta_tx->expected_ack_cnt,
561 max_tx->expected_ack_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "actual_ack_cnt:",
564 le32_to_cpu(tx->actual_ack_cnt),
565 accum_tx->actual_ack_cnt,
566 delta_tx->actual_ack_cnt,
567 max_tx->actual_ack_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "dump_msdu_cnt:",
570 le32_to_cpu(tx->dump_msdu_cnt),
571 accum_tx->dump_msdu_cnt,
572 delta_tx->dump_msdu_cnt,
573 max_tx->dump_msdu_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "abort_nxt_frame_mismatch:",
576 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
577 accum_tx->burst_abort_next_frame_mismatch_cnt,
578 delta_tx->burst_abort_next_frame_mismatch_cnt,
579 max_tx->burst_abort_next_frame_mismatch_cnt);
580 pos += scnprintf(buf + pos, bufsz - pos,
581 fmt_table, "abort_missing_nxt_frame:",
582 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
583 accum_tx->burst_abort_missing_next_frame_cnt,
584 delta_tx->burst_abort_missing_next_frame_cnt,
585 max_tx->burst_abort_missing_next_frame_cnt);
586 pos += scnprintf(buf + pos, bufsz - pos,
587 fmt_table, "cts_timeout_collision:",
588 le32_to_cpu(tx->cts_timeout_collision),
589 accum_tx->cts_timeout_collision,
590 delta_tx->cts_timeout_collision,
591 max_tx->cts_timeout_collision);
592 pos += scnprintf(buf + pos, bufsz - pos,
593 fmt_table, "ack_ba_timeout_collision:",
594 le32_to_cpu(tx->ack_or_ba_timeout_collision),
595 accum_tx->ack_or_ba_timeout_collision,
596 delta_tx->ack_or_ba_timeout_collision,
597 max_tx->ack_or_ba_timeout_collision);
598 pos += scnprintf(buf + pos, bufsz - pos,
599 fmt_table, "agg ba_timeout:",
600 le32_to_cpu(tx->agg.ba_timeout),
601 accum_tx->agg.ba_timeout,
602 delta_tx->agg.ba_timeout,
603 max_tx->agg.ba_timeout);
604 pos += scnprintf(buf + pos, bufsz - pos,
605 fmt_table, "agg ba_resched_frames:",
606 le32_to_cpu(tx->agg.ba_reschedule_frames),
607 accum_tx->agg.ba_reschedule_frames,
608 delta_tx->agg.ba_reschedule_frames,
609 max_tx->agg.ba_reschedule_frames);
610 pos += scnprintf(buf + pos, bufsz - pos,
611 fmt_table, "agg scd_query_agg_frame:",
612 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
613 accum_tx->agg.scd_query_agg_frame_cnt,
614 delta_tx->agg.scd_query_agg_frame_cnt,
615 max_tx->agg.scd_query_agg_frame_cnt);
616 pos += scnprintf(buf + pos, bufsz - pos,
617 fmt_table, "agg scd_query_no_agg:",
618 le32_to_cpu(tx->agg.scd_query_no_agg),
619 accum_tx->agg.scd_query_no_agg,
620 delta_tx->agg.scd_query_no_agg,
621 max_tx->agg.scd_query_no_agg);
622 pos += scnprintf(buf + pos, bufsz - pos,
623 fmt_table, "agg scd_query_agg:",
624 le32_to_cpu(tx->agg.scd_query_agg),
625 accum_tx->agg.scd_query_agg,
626 delta_tx->agg.scd_query_agg,
627 max_tx->agg.scd_query_agg);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 fmt_table, "agg scd_query_mismatch:",
630 le32_to_cpu(tx->agg.scd_query_mismatch),
631 accum_tx->agg.scd_query_mismatch,
632 delta_tx->agg.scd_query_mismatch,
633 max_tx->agg.scd_query_mismatch);
634 pos += scnprintf(buf + pos, bufsz - pos,
635 fmt_table, "agg frame_not_ready:",
636 le32_to_cpu(tx->agg.frame_not_ready),
637 accum_tx->agg.frame_not_ready,
638 delta_tx->agg.frame_not_ready,
639 max_tx->agg.frame_not_ready);
640 pos += scnprintf(buf + pos, bufsz - pos,
641 fmt_table, "agg underrun:",
642 le32_to_cpu(tx->agg.underrun),
643 accum_tx->agg.underrun,
644 delta_tx->agg.underrun, max_tx->agg.underrun);
645 pos += scnprintf(buf + pos, bufsz - pos,
646 fmt_table, "agg bt_prio_kill:",
647 le32_to_cpu(tx->agg.bt_prio_kill),
648 accum_tx->agg.bt_prio_kill,
649 delta_tx->agg.bt_prio_kill,
650 max_tx->agg.bt_prio_kill);
651 pos += scnprintf(buf + pos, bufsz - pos,
652 fmt_table, "agg rx_ba_rsp_cnt:",
653 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
654 accum_tx->agg.rx_ba_rsp_cnt,
655 delta_tx->agg.rx_ba_rsp_cnt,
656 max_tx->agg.rx_ba_rsp_cnt);
657
658 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
659 kfree(buf);
660 return ret;
661}
662
663ssize_t
664iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 int pos = 0;
669 char *buf;
670 int bufsz = sizeof(struct statistics_general) * 10 + 300;
671 ssize_t ret;
672 struct statistics_general_common *general, *accum_general;
673 struct statistics_general_common *delta_general, *max_general;
674 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
675 struct statistics_div *div, *accum_div, *delta_div, *max_div;
676
677 if (!iwl_legacy_is_alive(priv))
678 return -EAGAIN;
679
680 buf = kzalloc(bufsz, GFP_KERNEL);
681 if (!buf) {
682 IWL_ERR(priv, "Can not allocate Buffer\n");
683 return -ENOMEM;
684 }
685
686 /* the statistic information display here is based on
687 * the last statistics notification from uCode
688 * might not reflect the current uCode activity
689 */
690 general = &priv->_4965.statistics.general.common;
691 dbg = &priv->_4965.statistics.general.common.dbg;
692 div = &priv->_4965.statistics.general.common.div;
693 accum_general = &priv->_4965.accum_statistics.general.common;
694 accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
695 accum_div = &priv->_4965.accum_statistics.general.common.div;
696 delta_general = &priv->_4965.delta_statistics.general.common;
697 max_general = &priv->_4965.max_delta.general.common;
698 delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
699 max_dbg = &priv->_4965.max_delta.general.common.dbg;
700 delta_div = &priv->_4965.delta_statistics.general.common.div;
701 max_div = &priv->_4965.max_delta.general.common.div;
702
703 pos += iwl4965_statistics_flag(priv, buf, bufsz);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 fmt_header, "Statistics_General:");
706 pos += scnprintf(buf + pos, bufsz - pos,
707 fmt_value, "temperature:",
708 le32_to_cpu(general->temperature));
709 pos += scnprintf(buf + pos, bufsz - pos,
710 fmt_value, "ttl_timestamp:",
711 le32_to_cpu(general->ttl_timestamp));
712 pos += scnprintf(buf + pos, bufsz - pos,
713 fmt_table, "burst_check:",
714 le32_to_cpu(dbg->burst_check),
715 accum_dbg->burst_check,
716 delta_dbg->burst_check, max_dbg->burst_check);
717 pos += scnprintf(buf + pos, bufsz - pos,
718 fmt_table, "burst_count:",
719 le32_to_cpu(dbg->burst_count),
720 accum_dbg->burst_count,
721 delta_dbg->burst_count, max_dbg->burst_count);
722 pos += scnprintf(buf + pos, bufsz - pos,
723 fmt_table, "wait_for_silence_timeout_count:",
724 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
725 accum_dbg->wait_for_silence_timeout_cnt,
726 delta_dbg->wait_for_silence_timeout_cnt,
727 max_dbg->wait_for_silence_timeout_cnt);
728 pos += scnprintf(buf + pos, bufsz - pos,
729 fmt_table, "sleep_time:",
730 le32_to_cpu(general->sleep_time),
731 accum_general->sleep_time,
732 delta_general->sleep_time, max_general->sleep_time);
733 pos += scnprintf(buf + pos, bufsz - pos,
734 fmt_table, "slots_out:",
735 le32_to_cpu(general->slots_out),
736 accum_general->slots_out,
737 delta_general->slots_out, max_general->slots_out);
738 pos += scnprintf(buf + pos, bufsz - pos,
739 fmt_table, "slots_idle:",
740 le32_to_cpu(general->slots_idle),
741 accum_general->slots_idle,
742 delta_general->slots_idle, max_general->slots_idle);
743 pos += scnprintf(buf + pos, bufsz - pos,
744 fmt_table, "tx_on_a:",
745 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
746 delta_div->tx_on_a, max_div->tx_on_a);
747 pos += scnprintf(buf + pos, bufsz - pos,
748 fmt_table, "tx_on_b:",
749 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
750 delta_div->tx_on_b, max_div->tx_on_b);
751 pos += scnprintf(buf + pos, bufsz - pos,
752 fmt_table, "exec_time:",
753 le32_to_cpu(div->exec_time), accum_div->exec_time,
754 delta_div->exec_time, max_div->exec_time);
755 pos += scnprintf(buf + pos, bufsz - pos,
756 fmt_table, "probe_time:",
757 le32_to_cpu(div->probe_time), accum_div->probe_time,
758 delta_div->probe_time, max_div->probe_time);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_table, "rx_enable_counter:",
761 le32_to_cpu(general->rx_enable_counter),
762 accum_general->rx_enable_counter,
763 delta_general->rx_enable_counter,
764 max_general->rx_enable_counter);
765 pos += scnprintf(buf + pos, bufsz - pos,
766 fmt_table, "num_of_sos_states:",
767 le32_to_cpu(general->num_of_sos_states),
768 accum_general->num_of_sos_states,
769 delta_general->num_of_sos_states,
770 max_general->num_of_sos_states);
771 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
772 kfree(buf);
773 return ret;
774}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644
index 6c8e35361a9e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl4965_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count, loff_t *ppos);
40#else
41static ssize_t
42iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos)
44{
45 return 0;
46}
47static ssize_t
48iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
49 size_t count, loff_t *ppos)
50{
51 return 0;
52}
53static ssize_t
54iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 return 0;
58}
59#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644
index cb9baab1ff7d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-4965.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl4965_eeprom_check_version(struct iwl_priv *priv)
124{
125 u16 eeprom_ver;
126 u16 calib_ver;
127
128 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = iwl_legacy_eeprom_query16(priv,
130 EEPROM_4965_CALIB_VERSION_OFFSET);
131
132 if (eeprom_ver < priv->cfg->eeprom_ver ||
133 calib_ver < priv->cfg->eeprom_calib_ver)
134 goto err;
135
136 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
137 eeprom_ver, calib_ver);
138
139 return 0;
140err:
141 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
142 "CALIB=0x%x < 0x%x\n",
143 eeprom_ver, priv->cfg->eeprom_ver,
144 calib_ver, priv->cfg->eeprom_calib_ver);
145 return -EINVAL;
146
147}
148
149void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
150{
151 const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
152 EEPROM_MAC_ADDRESS);
153 memcpy(mac, addr, ETH_ALEN);
154}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644
index fc6fa2886d9c..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ /dev/null
@@ -1,811 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
65 * Use iwl-commands.h for uCode API definitions.
66 * Use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_4965_hw_h__
70#define __iwl_4965_hw_h__
71
72#include "iwl-fh.h"
73
74/* EEPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024
76
77/*
78 * uCode queue management definitions ...
79 * The first queue used for block-ack aggregation is #7 (4965 only).
80 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
81 */
82#define IWL49_FIRST_AMPDU_QUEUE 7
83
84/* Sizes and addresses for instruction and data memory (SRAM) in
85 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
86#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
87#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
88
89#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
90#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
91
92#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
93 IWL49_RTC_INST_LOWER_BOUND)
94#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
95 IWL49_RTC_DATA_LOWER_BOUND)
96
97#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
98#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
99
100/* Size of uCode instruction memory in bootstrap state machine */
101#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
102
103static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
104{
105 return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
106 (addr < IWL49_RTC_DATA_UPPER_BOUND);
107}
108
109/********************* START TEMPERATURE *************************************/
110
111/**
112 * 4965 temperature calculation.
113 *
114 * The driver must calculate the device temperature before calculating
115 * a txpower setting (amplifier gain is temperature dependent). The
116 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
117 * values used for the life of the driver, and one of which (R4) is the
118 * real-time temperature indicator.
119 *
120 * uCode provides all 4 values to the driver via the "initialize alive"
121 * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
122 * image loads, uCode updates the R4 value via statistics notifications
123 * (see STATISTICS_NOTIFICATION), which occur after each received beacon
124 * when associated, or can be requested via REPLY_STATISTICS_CMD.
125 *
126 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
127 * must sign-extend to 32 bits before applying formula below.
128 *
129 * Formula:
130 *
131 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
132 *
133 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
134 * an additional correction, which should be centered around 0 degrees
135 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
136 * centering the 97/100 correction around 0 degrees K.
137 *
138 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
139 * temperature with factory-measured temperatures when calculating txpower
140 * settings.
141 */
142#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
143#define TEMPERATURE_CALIB_A_VAL 259
144
145/* Limit range of calculated temperature to be between these Kelvin values */
146#define IWL_TX_POWER_TEMPERATURE_MIN (263)
147#define IWL_TX_POWER_TEMPERATURE_MAX (410)
148
149#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
150 (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
151 ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
152
153/********************* END TEMPERATURE ***************************************/
154
155/********************* START TXPOWER *****************************************/
156
157/**
158 * 4965 txpower calculations rely on information from three sources:
159 *
160 * 1) EEPROM
161 * 2) "initialize" alive notification
162 * 3) statistics notifications
163 *
164 * EEPROM data consists of:
165 *
166 * 1) Regulatory information (max txpower and channel usage flags) is provided
167 * separately for each channel that can possibly supported by 4965.
168 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
169 * (legacy) channels.
170 *
171 * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
172 * for locations in EEPROM.
173 *
174 * 2) Factory txpower calibration information is provided separately for
175 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
176 * but 5 GHz has several sub-bands.
177 *
178 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
179 *
180 * See struct iwl4965_eeprom_calib_info (and the tree of structures
181 * contained within it) for format, and struct iwl4965_eeprom for
182 * locations in EEPROM.
183 *
184 * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
185 * consists of:
186 *
187 * 1) Temperature calculation parameters.
188 *
189 * 2) Power supply voltage measurement.
190 *
191 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
192 *
193 * Statistics notifications deliver:
194 *
195 * 1) Current values for temperature param R4.
196 */
197
198/**
199 * To calculate a txpower setting for a given desired target txpower, channel,
200 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
201 * support MIMO and transmit diversity), driver must do the following:
202 *
203 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
204 * Do not exceed regulatory limit; reduce target txpower if necessary.
205 *
206 * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
207 * 2 transmitters will be used simultaneously; driver must reduce the
208 * regulatory limit by 3 dB (half-power) for each transmitter, so the
209 * combined total output of the 2 transmitters is within regulatory limits.
210 *
211 *
212 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
213 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
214 * reduce target txpower if necessary.
215 *
216 * Backoff values below are in 1/2 dB units (equivalent to steps in
217 * txpower gain tables):
218 *
219 * OFDM 6 - 36 MBit: 10 steps (5 dB)
220 * OFDM 48 MBit: 15 steps (7.5 dB)
221 * OFDM 54 MBit: 17 steps (8.5 dB)
222 * OFDM 60 MBit: 20 steps (10 dB)
223 * CCK all rates: 10 steps (5 dB)
224 *
225 * Backoff values apply to saturation txpower on a per-transmitter basis;
226 * when using MIMO (2 transmitters), each transmitter uses the same
227 * saturation level provided in EEPROM, and the same backoff values;
228 * no reduction (such as with regulatory txpower limits) is required.
229 *
230 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
231 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
232 * factory measurement for ht40 channels.
233 *
234 * The result of this step is the final target txpower. The rest of
235 * the steps figure out the proper settings for the device to achieve
236 * that target txpower.
237 *
238 *
239 * 3) Determine (EEPROM) calibration sub band for the target channel, by
240 * comparing against first and last channels in each sub band
241 * (see struct iwl4965_eeprom_calib_subband_info).
242 *
243 *
244 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
245 * referencing the 2 factory-measured (sample) channels within the sub band.
246 *
247 * Interpolation is based on difference between target channel's frequency
248 * and the sample channels' frequencies. Since channel numbers are based
249 * on frequency (5 MHz between each channel number), this is equivalent
250 * to interpolating based on channel number differences.
251 *
252 * Note that the sample channels may or may not be the channels at the
253 * edges of the sub band. The target channel may be "outside" of the
254 * span of the sampled channels.
255 *
256 * Driver may choose the pair (for 2 Tx chains) of measurements (see
257 * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
258 * txpower comes closest to the desired txpower. Usually, though,
259 * the middle set of measurements is closest to the regulatory limits,
260 * and is therefore a good choice for all txpower calculations (this
261 * assumes that high accuracy is needed for maximizing legal txpower,
262 * while lower txpower configurations do not need as much accuracy).
263 *
264 * Driver should interpolate both members of the chosen measurement pair,
265 * i.e. for both Tx chains (radio transmitters), unless the driver knows
266 * that only one of the chains will be used (e.g. only one tx antenna
267 * connected, but this should be unusual). The rate scaling algorithm
268 * switches antennas to find best performance, so both Tx chains will
269 * be used (although only one at a time) even for non-MIMO transmissions.
270 *
271 * Driver should interpolate factory values for temperature, gain table
272 * index, and actual power. The power amplifier detector values are
273 * not used by the driver.
274 *
275 * Sanity check: If the target channel happens to be one of the sample
276 * channels, the results should agree with the sample channel's
277 * measurements!
278 *
279 *
280 * 5) Find difference between desired txpower and (interpolated)
281 * factory-measured txpower. Using (interpolated) factory gain table index
282 * (shown elsewhere) as a starting point, adjust this index lower to
283 * increase txpower, or higher to decrease txpower, until the target
284 * txpower is reached. Each step in the gain table is 1/2 dB.
285 *
286 * For example, if factory measured txpower is 16 dBm, and target txpower
287 * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
288 * by 3 dB.
289 *
290 *
291 * 6) Find difference between current device temperature and (interpolated)
292 * factory-measured temperature for sub-band. Factory values are in
293 * degrees Celsius. To calculate current temperature, see comments for
294 * "4965 temperature calculation".
295 *
296 * If current temperature is higher than factory temperature, driver must
297 * increase gain (lower gain table index), and vice verse.
298 *
299 * Temperature affects gain differently for different channels:
300 *
301 * 2.4 GHz all channels: 3.5 degrees per half-dB step
302 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
303 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
304 *
305 * NOTE: Temperature can increase rapidly when transmitting, especially
306 * with heavy traffic at high txpowers. Driver should update
307 * temperature calculations often under these conditions to
308 * maintain strong txpower in the face of rising temperature.
309 *
310 *
311 * 7) Find difference between current power supply voltage indicator
312 * (from "initialize alive") and factory-measured power supply voltage
313 * indicator (EEPROM).
314 *
315 * If the current voltage is higher (indicator is lower) than factory
316 * voltage, gain should be reduced (gain table index increased) by:
317 *
318 * (eeprom - current) / 7
319 *
320 * If the current voltage is lower (indicator is higher) than factory
321 * voltage, gain should be increased (gain table index decreased) by:
322 *
323 * 2 * (current - eeprom) / 7
324 *
325 * If number of index steps in either direction turns out to be > 2,
326 * something is wrong ... just use 0.
327 *
328 * NOTE: Voltage compensation is independent of band/channel.
329 *
330 * NOTE: "Initialize" uCode measures current voltage, which is assumed
331 * to be constant after this initial measurement. Voltage
332 * compensation for txpower (number of steps in gain table)
333 * may be calculated once and used until the next uCode bootload.
334 *
335 *
336 * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
337 * adjust txpower for each transmitter chain, so txpower is balanced
338 * between the two chains. There are 5 pairs of tx_atten[group][chain]
339 * values in "initialize alive", one pair for each of 5 channel ranges:
340 *
341 * Group 0: 5 GHz channel 34-43
342 * Group 1: 5 GHz channel 44-70
343 * Group 2: 5 GHz channel 71-124
344 * Group 3: 5 GHz channel 125-200
345 * Group 4: 2.4 GHz all channels
346 *
347 * Add the tx_atten[group][chain] value to the index for the target chain.
348 * The values are signed, but are in pairs of 0 and a non-negative number,
349 * so as to reduce gain (if necessary) of the "hotter" channel. This
350 * avoids any need to double-check for regulatory compliance after
351 * this step.
352 *
353 *
354 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
355 * value to the index:
356 *
357 * Hardware rev B: 9 steps (4.5 dB)
358 * Hardware rev C: 5 steps (2.5 dB)
359 *
360 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
361 * bits [3:2], 1 = B, 2 = C.
362 *
363 * NOTE: This compensation is in addition to any saturation backoff that
364 * might have been applied in an earlier step.
365 *
366 *
367 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
368 *
369 * Limit the adjusted index to stay within the table!
370 *
371 *
372 * 11) Read gain table entries for DSP and radio gain, place into appropriate
373 * location(s) in command (struct iwl4965_txpowertable_cmd).
374 */
375
376/**
377 * When MIMO is used (2 transmitters operating simultaneously), driver should
378 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
379 * for the device. That is, use half power for each transmitter, so total
380 * txpower is within regulatory limits.
381 *
382 * The value "6" represents number of steps in gain table to reduce power 3 dB.
383 * Each step is 1/2 dB.
384 */
385#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
386
387/**
388 * CCK gain compensation.
389 *
390 * When calculating txpowers for CCK, after making sure that the target power
391 * is within regulatory and saturation limits, driver must additionally
392 * back off gain by adding these values to the gain table index.
393 *
394 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
395 * bits [3:2], 1 = B, 2 = C.
396 */
397#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
398#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
399
400/*
401 * 4965 power supply voltage compensation for txpower
402 */
403#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
404
405/**
406 * Gain tables.
407 *
408 * The following tables contain pair of values for setting txpower, i.e.
409 * gain settings for the output of the device's digital signal processor (DSP),
410 * and for the analog gain structure of the transmitter.
411 *
412 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
413 * are *relative* steps, not indications of absolute output power. Output
414 * power varies with temperature, voltage, and channel frequency, and also
415 * requires consideration of average power (to satisfy regulatory constraints),
416 * and peak power (to avoid distortion of the output signal).
417 *
418 * Each entry contains two values:
419 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
420 * linear value that multiplies the output of the digital signal processor,
421 * before being sent to the analog radio.
422 * 2) Radio gain. This sets the analog gain of the radio Tx path.
423 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
424 *
425 * EEPROM contains factory calibration data for txpower. This maps actual
426 * measured txpower levels to gain settings in the "well known" tables
427 * below ("well-known" means here that both factory calibration *and* the
428 * driver work with the same table).
429 *
430 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
431 * has an extension (into negative indexes), in case the driver needs to
432 * boost power setting for high device temperatures (higher than would be
433 * present during factory calibration). A 5 Ghz EEPROM index of "40"
434 * corresponds to the 49th entry in the table used by the driver.
435 */
436#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
437#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
438
439/**
440 * 2.4 GHz gain table
441 *
442 * Index Dsp gain Radio gain
443 * 0 110 0x3f (highest gain)
444 * 1 104 0x3f
445 * 2 98 0x3f
446 * 3 110 0x3e
447 * 4 104 0x3e
448 * 5 98 0x3e
449 * 6 110 0x3d
450 * 7 104 0x3d
451 * 8 98 0x3d
452 * 9 110 0x3c
453 * 10 104 0x3c
454 * 11 98 0x3c
455 * 12 110 0x3b
456 * 13 104 0x3b
457 * 14 98 0x3b
458 * 15 110 0x3a
459 * 16 104 0x3a
460 * 17 98 0x3a
461 * 18 110 0x39
462 * 19 104 0x39
463 * 20 98 0x39
464 * 21 110 0x38
465 * 22 104 0x38
466 * 23 98 0x38
467 * 24 110 0x37
468 * 25 104 0x37
469 * 26 98 0x37
470 * 27 110 0x36
471 * 28 104 0x36
472 * 29 98 0x36
473 * 30 110 0x35
474 * 31 104 0x35
475 * 32 98 0x35
476 * 33 110 0x34
477 * 34 104 0x34
478 * 35 98 0x34
479 * 36 110 0x33
480 * 37 104 0x33
481 * 38 98 0x33
482 * 39 110 0x32
483 * 40 104 0x32
484 * 41 98 0x32
485 * 42 110 0x31
486 * 43 104 0x31
487 * 44 98 0x31
488 * 45 110 0x30
489 * 46 104 0x30
490 * 47 98 0x30
491 * 48 110 0x6
492 * 49 104 0x6
493 * 50 98 0x6
494 * 51 110 0x5
495 * 52 104 0x5
496 * 53 98 0x5
497 * 54 110 0x4
498 * 55 104 0x4
499 * 56 98 0x4
500 * 57 110 0x3
501 * 58 104 0x3
502 * 59 98 0x3
503 * 60 110 0x2
504 * 61 104 0x2
505 * 62 98 0x2
506 * 63 110 0x1
507 * 64 104 0x1
508 * 65 98 0x1
509 * 66 110 0x0
510 * 67 104 0x0
511 * 68 98 0x0
512 * 69 97 0
513 * 70 96 0
514 * 71 95 0
515 * 72 94 0
516 * 73 93 0
517 * 74 92 0
518 * 75 91 0
519 * 76 90 0
520 * 77 89 0
521 * 78 88 0
522 * 79 87 0
523 * 80 86 0
524 * 81 85 0
525 * 82 84 0
526 * 83 83 0
527 * 84 82 0
528 * 85 81 0
529 * 86 80 0
530 * 87 79 0
531 * 88 78 0
532 * 89 77 0
533 * 90 76 0
534 * 91 75 0
535 * 92 74 0
536 * 93 73 0
537 * 94 72 0
538 * 95 71 0
539 * 96 70 0
540 * 97 69 0
541 * 98 68 0
542 */
543
544/**
545 * 5 GHz gain table
546 *
547 * Index Dsp gain Radio gain
548 * -9 123 0x3F (highest gain)
549 * -8 117 0x3F
550 * -7 110 0x3F
551 * -6 104 0x3F
552 * -5 98 0x3F
553 * -4 110 0x3E
554 * -3 104 0x3E
555 * -2 98 0x3E
556 * -1 110 0x3D
557 * 0 104 0x3D
558 * 1 98 0x3D
559 * 2 110 0x3C
560 * 3 104 0x3C
561 * 4 98 0x3C
562 * 5 110 0x3B
563 * 6 104 0x3B
564 * 7 98 0x3B
565 * 8 110 0x3A
566 * 9 104 0x3A
567 * 10 98 0x3A
568 * 11 110 0x39
569 * 12 104 0x39
570 * 13 98 0x39
571 * 14 110 0x38
572 * 15 104 0x38
573 * 16 98 0x38
574 * 17 110 0x37
575 * 18 104 0x37
576 * 19 98 0x37
577 * 20 110 0x36
578 * 21 104 0x36
579 * 22 98 0x36
580 * 23 110 0x35
581 * 24 104 0x35
582 * 25 98 0x35
583 * 26 110 0x34
584 * 27 104 0x34
585 * 28 98 0x34
586 * 29 110 0x33
587 * 30 104 0x33
588 * 31 98 0x33
589 * 32 110 0x32
590 * 33 104 0x32
591 * 34 98 0x32
592 * 35 110 0x31
593 * 36 104 0x31
594 * 37 98 0x31
595 * 38 110 0x30
596 * 39 104 0x30
597 * 40 98 0x30
598 * 41 110 0x25
599 * 42 104 0x25
600 * 43 98 0x25
601 * 44 110 0x24
602 * 45 104 0x24
603 * 46 98 0x24
604 * 47 110 0x23
605 * 48 104 0x23
606 * 49 98 0x23
607 * 50 110 0x22
608 * 51 104 0x18
609 * 52 98 0x18
610 * 53 110 0x17
611 * 54 104 0x17
612 * 55 98 0x17
613 * 56 110 0x16
614 * 57 104 0x16
615 * 58 98 0x16
616 * 59 110 0x15
617 * 60 104 0x15
618 * 61 98 0x15
619 * 62 110 0x14
620 * 63 104 0x14
621 * 64 98 0x14
622 * 65 110 0x13
623 * 66 104 0x13
624 * 67 98 0x13
625 * 68 110 0x12
626 * 69 104 0x08
627 * 70 98 0x08
628 * 71 110 0x07
629 * 72 104 0x07
630 * 73 98 0x07
631 * 74 110 0x06
632 * 75 104 0x06
633 * 76 98 0x06
634 * 77 110 0x05
635 * 78 104 0x05
636 * 79 98 0x05
637 * 80 110 0x04
638 * 81 104 0x04
639 * 82 98 0x04
640 * 83 110 0x03
641 * 84 104 0x03
642 * 85 98 0x03
643 * 86 110 0x02
644 * 87 104 0x02
645 * 88 98 0x02
646 * 89 110 0x01
647 * 90 104 0x01
648 * 91 98 0x01
649 * 92 110 0x00
650 * 93 104 0x00
651 * 94 98 0x00
652 * 95 93 0x00
653 * 96 88 0x00
654 * 97 83 0x00
655 * 98 78 0x00
656 */
657
658
659/**
660 * Sanity checks and default values for EEPROM regulatory levels.
661 * If EEPROM values fall outside MIN/MAX range, use default values.
662 *
663 * Regulatory limits refer to the maximum average txpower allowed by
664 * regulatory agencies in the geographies in which the device is meant
665 * to be operated. These limits are SKU-specific (i.e. geography-specific),
666 * and channel-specific; each channel has an individual regulatory limit
667 * listed in the EEPROM.
668 *
669 * Units are in half-dBm (i.e. "34" means 17 dBm).
670 */
671#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
672#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
673#define IWL_TX_POWER_REGULATORY_MIN (0)
674#define IWL_TX_POWER_REGULATORY_MAX (34)
675
676/**
677 * Sanity checks and default values for EEPROM saturation levels.
678 * If EEPROM values fall outside MIN/MAX range, use default values.
679 *
680 * Saturation is the highest level that the output power amplifier can produce
681 * without significant clipping distortion. This is a "peak" power level.
682 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
683 * require differing amounts of backoff, relative to their average power output,
684 * in order to avoid clipping distortion.
685 *
686 * Driver must make sure that it is violating neither the saturation limit,
687 * nor the regulatory limit, when calculating Tx power settings for various
688 * rates.
689 *
690 * Units are in half-dBm (i.e. "38" means 19 dBm).
691 */
692#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
693#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
694#define IWL_TX_POWER_SATURATION_MIN (20)
695#define IWL_TX_POWER_SATURATION_MAX (50)
696
697/**
698 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
699 * and thermal Txpower calibration.
700 *
701 * When calculating txpower, driver must compensate for current device
702 * temperature; higher temperature requires higher gain. Driver must calculate
703 * current temperature (see "4965 temperature calculation"), then compare vs.
704 * factory calibration temperature in EEPROM; if current temperature is higher
705 * than factory temperature, driver must *increase* gain by proportions shown
706 * in table below. If current temperature is lower than factory, driver must
707 * *decrease* gain.
708 *
709 * Different frequency ranges require different compensation, as shown below.
710 */
711/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
712#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
713#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
714
715/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
716#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
717#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
718
719/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
720#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
721#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
722
723/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
724#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
725#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
726
727/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
728#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
729#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
730
731enum {
732 CALIB_CH_GROUP_1 = 0,
733 CALIB_CH_GROUP_2 = 1,
734 CALIB_CH_GROUP_3 = 2,
735 CALIB_CH_GROUP_4 = 3,
736 CALIB_CH_GROUP_5 = 4,
737 CALIB_CH_GROUP_MAX
738};
739
740/********************* END TXPOWER *****************************************/
741
742
743/**
744 * Tx/Rx Queues
745 *
746 * Most communication between driver and 4965 is via queues of data buffers.
747 * For example, all commands that the driver issues to device's embedded
748 * controller (uCode) are via the command queue (one of the Tx queues). All
749 * uCode command responses/replies/notifications, including Rx frames, are
750 * conveyed from uCode to driver via the Rx queue.
751 *
752 * Most support for these queues, including handshake support, resides in
753 * structures in host DRAM, shared between the driver and the device. When
754 * allocating this memory, the driver must make sure that data written by
755 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
756 * cache memory), so DRAM and cache are consistent, and the device can
757 * immediately see changes made by the driver.
758 *
759 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
760 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
761 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
762 */
763#define IWL49_NUM_FIFOS 7
764#define IWL49_CMD_FIFO_NUM 4
765#define IWL49_NUM_QUEUES 16
766#define IWL49_NUM_AMPDU_QUEUES 8
767
768
769/**
770 * struct iwl4965_schedq_bc_tbl
771 *
772 * Byte Count table
773 *
774 * Each Tx queue uses a byte-count table containing 320 entries:
775 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
776 * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
777 * max Tx window is 64 TFDs).
778 *
779 * When driver sets up a new TFD, it must also enter the total byte count
780 * of the frame to be transmitted into the corresponding entry in the byte
781 * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
782 * must duplicate the byte count entry in corresponding index 256-319.
783 *
784 * padding puts each byte count table on a 1024-byte boundary;
785 * 4965 assumes tables are separated by 1024 bytes.
786 */
787struct iwl4965_scd_bc_tbl {
788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __packed;
791
792
793#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
794
795/* RSSI to dBm */
796#define IWL4965_RSSI_OFFSET 44
797
798/* PCI registers */
799#define PCI_CFG_RETRY_TIMEOUT 0x041
800
801/* PCI register values */
802#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
803#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
804
805#define IWL4965_DEFAULT_TX_RETRY 15
806
807/* EEPROM */
808#define IWL4965_FIRST_AMPDU_QUEUE 10
809
810
811#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644
index 6862fdcaee62..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38
39#include "iwl-commands.h"
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43#include "iwl-4965-led.h"
44
45/* Send led command */
46static int
47iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
48{
49 struct iwl_host_cmd cmd = {
50 .id = REPLY_LEDS_CMD,
51 .len = sizeof(struct iwl_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56 u32 reg;
57
58 reg = iwl_read32(priv, CSR_LED_REG);
59 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
60 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
61
62 return iwl_legacy_send_cmd(priv, &cmd);
63}
64
65/* Set led register off */
66void iwl4965_led_enable(struct iwl_priv *priv)
67{
68 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
69}
70
71const struct iwl_led_ops iwl4965_led_ops = {
72 .cmd = iwl4965_send_led_cmd,
73};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644
index 5ed3615fc338..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_4965_led_h__
28#define __iwl_4965_led_h__
29
30extern const struct iwl_led_ops iwl4965_led_ops;
31void iwl4965_led_enable(struct iwl_priv *priv);
32
33#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644
index 2be6d9e3b019..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ /dev/null
@@ -1,1194 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-sta.h"
42
43void iwl4965_check_abort_status(struct iwl_priv *priv,
44 u8 frame_count, u32 status)
45{
46 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
47 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
48 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
49 queue_work(priv->workqueue, &priv->tx_flush);
50 }
51}
52
53/*
54 * EEPROM
55 */
56struct iwl_mod_params iwl4965_mod_params = {
57 .amsdu_size_8K = 1,
58 .restart_fw = 1,
59 /* the rest are 0 by default */
60};
61
62void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
63{
64 unsigned long flags;
65 int i;
66 spin_lock_irqsave(&rxq->lock, flags);
67 INIT_LIST_HEAD(&rxq->rx_free);
68 INIT_LIST_HEAD(&rxq->rx_used);
69 /* Fill the rx_used queue with _all_ of the Rx buffers */
70 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
71 /* In the reset function, these buffers may have been allocated
72 * to an SKB, so we need to unmap and free potential storage */
73 if (rxq->pool[i].page != NULL) {
74 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
75 PAGE_SIZE << priv->hw_params.rx_page_order,
76 PCI_DMA_FROMDEVICE);
77 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
78 rxq->pool[i].page = NULL;
79 }
80 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
81 }
82
83 for (i = 0; i < RX_QUEUE_SIZE; i++)
84 rxq->queue[i] = NULL;
85
86 /* Set us so that we have processed and used all buffers, but have
87 * not restocked the Rx queue with fresh buffers */
88 rxq->read = rxq->write = 0;
89 rxq->write_actual = 0;
90 rxq->free_count = 0;
91 spin_unlock_irqrestore(&rxq->lock, flags);
92}
93
94int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
95{
96 u32 rb_size;
97 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
98 u32 rb_timeout = 0;
99
100 if (priv->cfg->mod_params->amsdu_size_8K)
101 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
102 else
103 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
104
105 /* Stop Rx DMA */
106 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
107
108 /* Reset driver's Rx queue write index */
109 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
110
111 /* Tell device where to find RBD circular buffer in DRAM */
112 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
113 (u32)(rxq->bd_dma >> 8));
114
115 /* Tell device where in DRAM to update its Rx status */
116 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
117 rxq->rb_stts_dma >> 4);
118
119 /* Enable Rx DMA
120 * Direct rx interrupts to hosts
121 * Rx buffer size 4 or 8k
122 * RB timeout 0x10
123 * 256 RBDs
124 */
125 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
126 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
127 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
128 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
129 rb_size|
130 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
131 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
132
133 /* Set interrupt coalescing timer to default (2048 usecs) */
134 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
135
136 return 0;
137}
138
139static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
140{
141/*
142 * (for documentation purposes)
143 * to set power to V_AUX, do:
144
145 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
146 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
147 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
148 ~APMG_PS_CTRL_MSK_PWR_SRC);
149 */
150
151 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
152 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
153 ~APMG_PS_CTRL_MSK_PWR_SRC);
154}
155
156int iwl4965_hw_nic_init(struct iwl_priv *priv)
157{
158 unsigned long flags;
159 struct iwl_rx_queue *rxq = &priv->rxq;
160 int ret;
161
162 /* nic_init */
163 spin_lock_irqsave(&priv->lock, flags);
164 priv->cfg->ops->lib->apm_ops.init(priv);
165
166 /* Set interrupt coalescing calibration timer to default (512 usecs) */
167 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 iwl4965_set_pwr_vmain(priv);
172
173 priv->cfg->ops->lib->apm_ops.config(priv);
174
175 /* Allocate the RX queue, or reset if it is already allocated */
176 if (!rxq->bd) {
177 ret = iwl_legacy_rx_queue_alloc(priv);
178 if (ret) {
179 IWL_ERR(priv, "Unable to initialize Rx queue\n");
180 return -ENOMEM;
181 }
182 } else
183 iwl4965_rx_queue_reset(priv, rxq);
184
185 iwl4965_rx_replenish(priv);
186
187 iwl4965_rx_init(priv, rxq);
188
189 spin_lock_irqsave(&priv->lock, flags);
190
191 rxq->need_update = 1;
192 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
193
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 /* Allocate or reset and init all Tx and Command queues */
197 if (!priv->txq) {
198 ret = iwl4965_txq_ctx_alloc(priv);
199 if (ret)
200 return ret;
201 } else
202 iwl4965_txq_ctx_reset(priv);
203
204 set_bit(STATUS_INIT, &priv->status);
205
206 return 0;
207}
208
209/**
210 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
211 */
212static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
213 dma_addr_t dma_addr)
214{
215 return cpu_to_le32((u32)(dma_addr >> 8));
216}
217
218/**
219 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
220 *
221 * If there are slots in the RX queue that need to be restocked,
222 * and we have free pre-allocated buffers, fill the ranks as much
223 * as we can, pulling from rx_free.
224 *
225 * This moves the 'write' index forward to catch up with 'processed', and
226 * also updates the memory address in the firmware to reference the new
227 * target buffer.
228 */
229void iwl4965_rx_queue_restock(struct iwl_priv *priv)
230{
231 struct iwl_rx_queue *rxq = &priv->rxq;
232 struct list_head *element;
233 struct iwl_rx_mem_buffer *rxb;
234 unsigned long flags;
235
236 spin_lock_irqsave(&rxq->lock, flags);
237 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
238 /* The overwritten rxb must be a used one */
239 rxb = rxq->queue[rxq->write];
240 BUG_ON(rxb && rxb->page);
241
242 /* Get next free Rx buffer, remove from free list */
243 element = rxq->rx_free.next;
244 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
245 list_del(element);
246
247 /* Point to Rx buffer via next RBD in circular buffer */
248 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
249 rxb->page_dma);
250 rxq->queue[rxq->write] = rxb;
251 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
252 rxq->free_count--;
253 }
254 spin_unlock_irqrestore(&rxq->lock, flags);
255 /* If the pre-allocated buffer pool is dropping low, schedule to
256 * refill it */
257 if (rxq->free_count <= RX_LOW_WATERMARK)
258 queue_work(priv->workqueue, &priv->rx_replenish);
259
260
261 /* If we've added more space for the firmware to place data, tell it.
262 * Increment device's write pointer in multiples of 8. */
263 if (rxq->write_actual != (rxq->write & ~0x7)) {
264 spin_lock_irqsave(&rxq->lock, flags);
265 rxq->need_update = 1;
266 spin_unlock_irqrestore(&rxq->lock, flags);
267 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
268 }
269}
270
271/**
272 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
273 *
274 * When moving to rx_free an SKB is allocated for the slot.
275 *
276 * Also restock the Rx queue via iwl_rx_queue_restock.
277 * This is called as a scheduled work item (except for during initialization)
278 */
279static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
280{
281 struct iwl_rx_queue *rxq = &priv->rxq;
282 struct list_head *element;
283 struct iwl_rx_mem_buffer *rxb;
284 struct page *page;
285 unsigned long flags;
286 gfp_t gfp_mask = priority;
287
288 while (1) {
289 spin_lock_irqsave(&rxq->lock, flags);
290 if (list_empty(&rxq->rx_used)) {
291 spin_unlock_irqrestore(&rxq->lock, flags);
292 return;
293 }
294 spin_unlock_irqrestore(&rxq->lock, flags);
295
296 if (rxq->free_count > RX_LOW_WATERMARK)
297 gfp_mask |= __GFP_NOWARN;
298
299 if (priv->hw_params.rx_page_order > 0)
300 gfp_mask |= __GFP_COMP;
301
302 /* Alloc a new receive buffer */
303 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
304 if (!page) {
305 if (net_ratelimit())
306 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
307 "order: %d\n",
308 priv->hw_params.rx_page_order);
309
310 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
311 net_ratelimit())
312 IWL_CRIT(priv,
313 "Failed to alloc_pages with %s. "
314 "Only %u free buffers remaining.\n",
315 priority == GFP_ATOMIC ?
316 "GFP_ATOMIC" : "GFP_KERNEL",
317 rxq->free_count);
318 /* We don't reschedule replenish work here -- we will
319 * call the restock method and if it still needs
320 * more buffers it will schedule replenish */
321 return;
322 }
323
324 spin_lock_irqsave(&rxq->lock, flags);
325
326 if (list_empty(&rxq->rx_used)) {
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 __free_pages(page, priv->hw_params.rx_page_order);
329 return;
330 }
331 element = rxq->rx_used.next;
332 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
333 list_del(element);
334
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 BUG_ON(rxb->page);
338 rxb->page = page;
339 /* Get physical address of the RB */
340 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
343 /* dma address must be no more than 36 bits */
344 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
345 /* and also 256 byte aligned! */
346 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
347
348 spin_lock_irqsave(&rxq->lock, flags);
349
350 list_add_tail(&rxb->list, &rxq->rx_free);
351 rxq->free_count++;
352 priv->alloc_rxb_page++;
353
354 spin_unlock_irqrestore(&rxq->lock, flags);
355 }
356}
357
358void iwl4965_rx_replenish(struct iwl_priv *priv)
359{
360 unsigned long flags;
361
362 iwl4965_rx_allocate(priv, GFP_KERNEL);
363
364 spin_lock_irqsave(&priv->lock, flags);
365 iwl4965_rx_queue_restock(priv);
366 spin_unlock_irqrestore(&priv->lock, flags);
367}
368
369void iwl4965_rx_replenish_now(struct iwl_priv *priv)
370{
371 iwl4965_rx_allocate(priv, GFP_ATOMIC);
372
373 iwl4965_rx_queue_restock(priv);
374}
375
376/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
377 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
378 * This free routine walks the list of POOL entries and if SKB is set to
379 * non NULL it is unmapped and freed
380 */
381void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
382{
383 int i;
384 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
385 if (rxq->pool[i].page != NULL) {
386 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
387 PAGE_SIZE << priv->hw_params.rx_page_order,
388 PCI_DMA_FROMDEVICE);
389 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
390 rxq->pool[i].page = NULL;
391 }
392 }
393
394 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
395 rxq->bd_dma);
396 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
397 rxq->rb_stts, rxq->rb_stts_dma);
398 rxq->bd = NULL;
399 rxq->rb_stts = NULL;
400}
401
402int iwl4965_rxq_stop(struct iwl_priv *priv)
403{
404
405 /* stop Rx DMA */
406 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
407 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
408 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
409
410 return 0;
411}
412
413int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
414{
415 int idx = 0;
416 int band_offset = 0;
417
418 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
419 if (rate_n_flags & RATE_MCS_HT_MSK) {
420 idx = (rate_n_flags & 0xff);
421 return idx;
422 /* Legacy rate format, search for match in table */
423 } else {
424 if (band == IEEE80211_BAND_5GHZ)
425 band_offset = IWL_FIRST_OFDM_RATE;
426 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
427 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
428 return idx - band_offset;
429 }
430
431 return -1;
432}
433
434static int iwl4965_calc_rssi(struct iwl_priv *priv,
435 struct iwl_rx_phy_res *rx_resp)
436{
437 /* data from PHY/DSP regarding signal strength, etc.,
438 * contents are always there, not configurable by host. */
439 struct iwl4965_rx_non_cfg_phy *ncphy =
440 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
441 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
442 >> IWL49_AGC_DB_POS;
443
444 u32 valid_antennae =
445 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
446 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
447 u8 max_rssi = 0;
448 u32 i;
449
450 /* Find max rssi among 3 possible receivers.
451 * These values are measured by the digital signal processor (DSP).
452 * They should stay fairly constant even as the signal strength varies,
453 * if the radio's automatic gain control (AGC) is working right.
454 * AGC value (see below) will provide the "interesting" info. */
455 for (i = 0; i < 3; i++)
456 if (valid_antennae & (1 << i))
457 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
458
459 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
460 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
461 max_rssi, agc);
462
463 /* dBm = max_rssi dB - agc dB - constant.
464 * Higher AGC (higher radio gain) means lower signal. */
465 return max_rssi - agc - IWL4965_RSSI_OFFSET;
466}
467
468
469static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
470{
471 u32 decrypt_out = 0;
472
473 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
474 RX_RES_STATUS_STATION_FOUND)
475 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
476 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
477
478 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
479
480 /* packet was not encrypted */
481 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
482 RX_RES_STATUS_SEC_TYPE_NONE)
483 return decrypt_out;
484
485 /* packet was encrypted with unknown alg */
486 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
487 RX_RES_STATUS_SEC_TYPE_ERR)
488 return decrypt_out;
489
490 /* decryption was not done in HW */
491 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
492 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
493 return decrypt_out;
494
495 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
496
497 case RX_RES_STATUS_SEC_TYPE_CCMP:
498 /* alg is CCM: check MIC only */
499 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
500 /* Bad MIC */
501 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
502 else
503 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
504
505 break;
506
507 case RX_RES_STATUS_SEC_TYPE_TKIP:
508 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
509 /* Bad TTAK */
510 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
511 break;
512 }
513 /* fall through if TTAK OK */
514 default:
515 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
516 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
517 else
518 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
519 break;
520 }
521
522 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
523 decrypt_in, decrypt_out);
524
525 return decrypt_out;
526}
527
528static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
529 struct ieee80211_hdr *hdr,
530 u16 len,
531 u32 ampdu_status,
532 struct iwl_rx_mem_buffer *rxb,
533 struct ieee80211_rx_status *stats)
534{
535 struct sk_buff *skb;
536 __le16 fc = hdr->frame_control;
537
538 /* We only process data packets if the interface is open */
539 if (unlikely(!priv->is_open)) {
540 IWL_DEBUG_DROP_LIMIT(priv,
541 "Dropping packet while interface is not open.\n");
542 return;
543 }
544
545 /* In case of HW accelerated crypto and bad decryption, drop */
546 if (!priv->cfg->mod_params->sw_crypto &&
547 iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
548 return;
549
550 skb = dev_alloc_skb(128);
551 if (!skb) {
552 IWL_ERR(priv, "dev_alloc_skb failed\n");
553 return;
554 }
555
556 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
557
558 iwl_legacy_update_stats(priv, false, fc, len);
559 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
560
561 ieee80211_rx(priv->hw, skb);
562 priv->alloc_rxb_page--;
563 rxb->page = NULL;
564}
565
566/* Called for REPLY_RX (legacy ABG frames), or
567 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
568void iwl4965_rx_reply_rx(struct iwl_priv *priv,
569 struct iwl_rx_mem_buffer *rxb)
570{
571 struct ieee80211_hdr *header;
572 struct ieee80211_rx_status rx_status;
573 struct iwl_rx_packet *pkt = rxb_addr(rxb);
574 struct iwl_rx_phy_res *phy_res;
575 __le32 rx_pkt_status;
576 struct iwl_rx_mpdu_res_start *amsdu;
577 u32 len;
578 u32 ampdu_status;
579 u32 rate_n_flags;
580
581 /**
582 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
583 * REPLY_RX: physical layer info is in this buffer
584 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
585 * command and cached in priv->last_phy_res
586 *
587 * Here we set up local variables depending on which command is
588 * received.
589 */
590 if (pkt->hdr.cmd == REPLY_RX) {
591 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
592 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
593 + phy_res->cfg_phy_cnt);
594
595 len = le16_to_cpu(phy_res->byte_count);
596 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
597 phy_res->cfg_phy_cnt + len);
598 ampdu_status = le32_to_cpu(rx_pkt_status);
599 } else {
600 if (!priv->_4965.last_phy_res_valid) {
601 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
602 return;
603 }
604 phy_res = &priv->_4965.last_phy_res;
605 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
606 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
607 len = le16_to_cpu(amsdu->byte_count);
608 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
609 ampdu_status = iwl4965_translate_rx_status(priv,
610 le32_to_cpu(rx_pkt_status));
611 }
612
613 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
614 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
615 phy_res->cfg_phy_cnt);
616 return;
617 }
618
619 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
620 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
621 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
622 le32_to_cpu(rx_pkt_status));
623 return;
624 }
625
626 /* This will be used in several places later */
627 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
628
629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
632 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
633 rx_status.freq =
634 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
635 rx_status.band);
636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0;
639
640 /* TSF isn't reliable. In order to allow smooth user experience,
641 * this W/A doesn't propagate it to the mac80211 */
642 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
643
644 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
645
646 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
647 rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
648
649 iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
650 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
651 rx_status.signal, (unsigned long long)rx_status.mactime);
652
653 /*
654 * "antenna number"
655 *
656 * It seems that the antenna field in the phy flags value
657 * is actually a bit field. This is undefined by radiotap,
658 * it wants an actual antenna number but I always get "7"
659 * for most legacy frames I receive indicating that the
660 * same frame was received on all three RX chains.
661 *
662 * I think this field should be removed in favor of a
663 * new 802.11n radiotap field "RX chains" that is defined
664 * as a bitmask.
665 */
666 rx_status.antenna =
667 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
668 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
669
670 /* set the preamble flag if appropriate */
671 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
672 rx_status.flag |= RX_FLAG_SHORTPRE;
673
674 /* Set up the HT phy flags */
675 if (rate_n_flags & RATE_MCS_HT_MSK)
676 rx_status.flag |= RX_FLAG_HT;
677 if (rate_n_flags & RATE_MCS_HT40_MSK)
678 rx_status.flag |= RX_FLAG_40MHZ;
679 if (rate_n_flags & RATE_MCS_SGI_MSK)
680 rx_status.flag |= RX_FLAG_SHORT_GI;
681
682 iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
683 rxb, &rx_status);
684}
685
686/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
687 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
688void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
689 struct iwl_rx_mem_buffer *rxb)
690{
691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
692 priv->_4965.last_phy_res_valid = true;
693 memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
694 sizeof(struct iwl_rx_phy_res));
695}
696
697static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
698 struct ieee80211_vif *vif,
699 enum ieee80211_band band,
700 u8 is_active, u8 n_probes,
701 struct iwl_scan_channel *scan_ch)
702{
703 struct ieee80211_channel *chan;
704 const struct ieee80211_supported_band *sband;
705 const struct iwl_channel_info *ch_info;
706 u16 passive_dwell = 0;
707 u16 active_dwell = 0;
708 int added, i;
709 u16 channel;
710
711 sband = iwl_get_hw_mode(priv, band);
712 if (!sband)
713 return 0;
714
715 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
716 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
717
718 if (passive_dwell <= active_dwell)
719 passive_dwell = active_dwell + 1;
720
721 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
722 chan = priv->scan_request->channels[i];
723
724 if (chan->band != band)
725 continue;
726
727 channel = chan->hw_value;
728 scan_ch->channel = cpu_to_le16(channel);
729
730 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
731 if (!iwl_legacy_is_channel_valid(ch_info)) {
732 IWL_DEBUG_SCAN(priv,
733 "Channel %d is INVALID for this band.\n",
734 channel);
735 continue;
736 }
737
738 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
739 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
740 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
741 else
742 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
743
744 if (n_probes)
745 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
746
747 scan_ch->active_dwell = cpu_to_le16(active_dwell);
748 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
749
750 /* Set txpower levels to defaults */
751 scan_ch->dsp_atten = 110;
752
753 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
754 * power level:
755 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
756 */
757 if (band == IEEE80211_BAND_5GHZ)
758 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
759 else
760 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
761
762 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
763 channel, le32_to_cpu(scan_ch->type),
764 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
765 "ACTIVE" : "PASSIVE",
766 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
767 active_dwell : passive_dwell);
768
769 scan_ch++;
770 added++;
771 }
772
773 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
774 return added;
775}
776
777int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
778{
779 struct iwl_host_cmd cmd = {
780 .id = REPLY_SCAN_CMD,
781 .len = sizeof(struct iwl_scan_cmd),
782 .flags = CMD_SIZE_HUGE,
783 };
784 struct iwl_scan_cmd *scan;
785 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
786 u32 rate_flags = 0;
787 u16 cmd_len;
788 u16 rx_chain = 0;
789 enum ieee80211_band band;
790 u8 n_probes = 0;
791 u8 rx_ant = priv->hw_params.valid_rx_ant;
792 u8 rate;
793 bool is_active = false;
794 int chan_mod;
795 u8 active_chains;
796 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
797 int ret;
798
799 lockdep_assert_held(&priv->mutex);
800
801 if (vif)
802 ctx = iwl_legacy_rxon_ctx_from_vif(vif);
803
804 if (!priv->scan_cmd) {
805 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
806 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
807 if (!priv->scan_cmd) {
808 IWL_DEBUG_SCAN(priv,
809 "fail to allocate memory for scan\n");
810 return -ENOMEM;
811 }
812 }
813 scan = priv->scan_cmd;
814 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
815
816 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
817 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
818
819 if (iwl_legacy_is_any_associated(priv)) {
820 u16 interval;
821 u32 extra;
822 u32 suspend_time = 100;
823 u32 scan_suspend_time = 100;
824
825 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
826 interval = vif->bss_conf.beacon_int;
827
828 scan->suspend_time = 0;
829 scan->max_out_time = cpu_to_le32(200 * 1024);
830 if (!interval)
831 interval = suspend_time;
832
833 extra = (suspend_time / interval) << 22;
834 scan_suspend_time = (extra |
835 ((suspend_time % interval) * 1024));
836 scan->suspend_time = cpu_to_le32(scan_suspend_time);
837 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
838 scan_suspend_time, interval);
839 }
840
841 if (priv->scan_request->n_ssids) {
842 int i, p = 0;
843 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
844 for (i = 0; i < priv->scan_request->n_ssids; i++) {
845 /* always does wildcard anyway */
846 if (!priv->scan_request->ssids[i].ssid_len)
847 continue;
848 scan->direct_scan[p].id = WLAN_EID_SSID;
849 scan->direct_scan[p].len =
850 priv->scan_request->ssids[i].ssid_len;
851 memcpy(scan->direct_scan[p].ssid,
852 priv->scan_request->ssids[i].ssid,
853 priv->scan_request->ssids[i].ssid_len);
854 n_probes++;
855 p++;
856 }
857 is_active = true;
858 } else
859 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
860
861 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
862 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
863 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
864
865 switch (priv->scan_band) {
866 case IEEE80211_BAND_2GHZ:
867 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
868 chan_mod = le32_to_cpu(
869 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
870 RXON_FLG_CHANNEL_MODE_MSK)
871 >> RXON_FLG_CHANNEL_MODE_POS;
872 if (chan_mod == CHANNEL_MODE_PURE_40) {
873 rate = IWL_RATE_6M_PLCP;
874 } else {
875 rate = IWL_RATE_1M_PLCP;
876 rate_flags = RATE_MCS_CCK_MSK;
877 }
878 break;
879 case IEEE80211_BAND_5GHZ:
880 rate = IWL_RATE_6M_PLCP;
881 break;
882 default:
883 IWL_WARN(priv, "Invalid scan band\n");
884 return -EIO;
885 }
886
887 /*
888 * If active scanning is requested but a certain channel is
889 * marked passive, we can do active scanning if we detect
890 * transmissions.
891 *
892 * There is an issue with some firmware versions that triggers
893 * a sysassert on a "good CRC threshold" of zero (== disabled),
894 * on a radar channel even though this means that we should NOT
895 * send probes.
896 *
897 * The "good CRC threshold" is the number of frames that we
898 * need to receive during our dwell time on a channel before
899 * sending out probes -- setting this to a huge value will
900 * mean we never reach it, but at the same time work around
901 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
902 * here instead of IWL_GOOD_CRC_TH_DISABLED.
903 */
904 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
905 IWL_GOOD_CRC_TH_NEVER;
906
907 band = priv->scan_band;
908
909 if (priv->cfg->scan_rx_antennas[band])
910 rx_ant = priv->cfg->scan_rx_antennas[band];
911
912 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
913 priv->scan_tx_ant[band],
914 scan_tx_antennas);
915 rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
916 scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
917
918 /* In power save mode use one chain, otherwise use all chains */
919 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
920 /* rx_ant has been set to all valid chains previously */
921 active_chains = rx_ant &
922 ((u8)(priv->chain_noise_data.active_chains));
923 if (!active_chains)
924 active_chains = rx_ant;
925
926 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
927 priv->chain_noise_data.active_chains);
928
929 rx_ant = iwl4965_first_antenna(active_chains);
930 }
931
932 /* MIMO is not used here, but value is required */
933 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
934 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
935 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
936 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
937 scan->rx_chain = cpu_to_le16(rx_chain);
938
939 cmd_len = iwl_legacy_fill_probe_req(priv,
940 (struct ieee80211_mgmt *)scan->data,
941 vif->addr,
942 priv->scan_request->ie,
943 priv->scan_request->ie_len,
944 IWL_MAX_SCAN_SIZE - sizeof(*scan));
945 scan->tx_cmd.len = cpu_to_le16(cmd_len);
946
947 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
948 RXON_FILTER_BCON_AWARE_MSK);
949
950 scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
951 is_active, n_probes,
952 (void *)&scan->data[cmd_len]);
953 if (scan->channel_count == 0) {
954 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
955 return -EIO;
956 }
957
958 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
959 scan->channel_count * sizeof(struct iwl_scan_channel);
960 cmd.data = scan;
961 scan->len = cpu_to_le16(cmd.len);
962
963 set_bit(STATUS_SCAN_HW, &priv->status);
964
965 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
966 if (ret)
967 clear_bit(STATUS_SCAN_HW, &priv->status);
968
969 return ret;
970}
971
972int iwl4965_manage_ibss_station(struct iwl_priv *priv,
973 struct ieee80211_vif *vif, bool add)
974{
975 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
976
977 if (add)
978 return iwl4965_add_bssid_station(priv, vif_priv->ctx,
979 vif->bss_conf.bssid,
980 &vif_priv->ibss_bssid_sta_id);
981 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
982 vif->bss_conf.bssid);
983}
984
985void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
986 int sta_id, int tid, int freed)
987{
988 lockdep_assert_held(&priv->sta_lock);
989
990 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
991 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
992 else {
993 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
994 priv->stations[sta_id].tid[tid].tfds_in_queue,
995 freed);
996 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
997 }
998}
999
1000#define IWL_TX_QUEUE_MSK 0xfffff
1001
1002static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
1003{
1004 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1005 priv->current_ht_config.single_chain_sufficient;
1006}
1007
1008#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1009#define IWL_NUM_RX_CHAINS_SINGLE 2
1010#define IWL_NUM_IDLE_CHAINS_DUAL 2
1011#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1012
1013/*
1014 * Determine how many receiver/antenna chains to use.
1015 *
1016 * More provides better reception via diversity. Fewer saves power
1017 * at the expense of throughput, but only when not in powersave to
1018 * start with.
1019 *
1020 * MIMO (dual stream) requires at least 2, but works better with 3.
1021 * This does not determine *which* chains to use, just how many.
1022 */
1023static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
1024{
1025 /* # of Rx chains to use when expecting MIMO. */
1026 if (iwl4965_is_single_rx_stream(priv))
1027 return IWL_NUM_RX_CHAINS_SINGLE;
1028 else
1029 return IWL_NUM_RX_CHAINS_MULTIPLE;
1030}
1031
1032/*
1033 * When we are in power saving mode, unless device support spatial
1034 * multiplexing power save, use the active count for rx chain count.
1035 */
1036static int
1037iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1038{
1039 /* # Rx chains when idling, depending on SMPS mode */
1040 switch (priv->current_ht_config.smps) {
1041 case IEEE80211_SMPS_STATIC:
1042 case IEEE80211_SMPS_DYNAMIC:
1043 return IWL_NUM_IDLE_CHAINS_SINGLE;
1044 case IEEE80211_SMPS_OFF:
1045 return active_cnt;
1046 default:
1047 WARN(1, "invalid SMPS mode %d",
1048 priv->current_ht_config.smps);
1049 return active_cnt;
1050 }
1051}
1052
1053/* up to 4 chains */
1054static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
1055{
1056 u8 res;
1057 res = (chain_bitmap & BIT(0)) >> 0;
1058 res += (chain_bitmap & BIT(1)) >> 1;
1059 res += (chain_bitmap & BIT(2)) >> 2;
1060 res += (chain_bitmap & BIT(3)) >> 3;
1061 return res;
1062}
1063
1064/**
1065 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1066 *
1067 * Selects how many and which Rx receivers/antennas/chains to use.
1068 * This should not be used for scan command ... it puts data in wrong place.
1069 */
1070void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1071{
1072 bool is_single = iwl4965_is_single_rx_stream(priv);
1073 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1074 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1075 u32 active_chains;
1076 u16 rx_chain;
1077
1078 /* Tell uCode which antennas are actually connected.
1079 * Before first association, we assume all antennas are connected.
1080 * Just after first association, iwl4965_chain_noise_calibration()
1081 * checks which antennas actually *are* connected. */
1082 if (priv->chain_noise_data.active_chains)
1083 active_chains = priv->chain_noise_data.active_chains;
1084 else
1085 active_chains = priv->hw_params.valid_rx_ant;
1086
1087 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1088
1089 /* How many receivers should we use? */
1090 active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
1091 idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
1092
1093
1094 /* correct rx chain count according hw settings
1095 * and chain noise calibration
1096 */
1097 valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
1098 if (valid_rx_cnt < active_rx_cnt)
1099 active_rx_cnt = valid_rx_cnt;
1100
1101 if (valid_rx_cnt < idle_rx_cnt)
1102 idle_rx_cnt = valid_rx_cnt;
1103
1104 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1105 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1106
1107 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1108
1109 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1110 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1111 else
1112 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1113
1114 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1115 ctx->staging.rx_chain,
1116 active_rx_cnt, idle_rx_cnt);
1117
1118 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1119 active_rx_cnt < idle_rx_cnt);
1120}
1121
1122u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1123{
1124 int i;
1125 u8 ind = ant;
1126
1127 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1128 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1129 if (valid & BIT(ind))
1130 return ind;
1131 }
1132 return ant;
1133}
1134
1135static const char *iwl4965_get_fh_string(int cmd)
1136{
1137 switch (cmd) {
1138 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1139 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1140 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1141 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1142 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1143 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1144 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1145 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1146 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1147 default:
1148 return "UNKNOWN";
1149 }
1150}
1151
1152int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1153{
1154 int i;
1155#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1156 int pos = 0;
1157 size_t bufsz = 0;
1158#endif
1159 static const u32 fh_tbl[] = {
1160 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1161 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1162 FH_RSCSR_CHNL0_WPTR,
1163 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1164 FH_MEM_RSSR_SHARED_CTRL_REG,
1165 FH_MEM_RSSR_RX_STATUS_REG,
1166 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1167 FH_TSSR_TX_STATUS_REG,
1168 FH_TSSR_TX_ERROR_REG
1169 };
1170#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1171 if (display) {
1172 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1173 *buf = kmalloc(bufsz, GFP_KERNEL);
1174 if (!*buf)
1175 return -ENOMEM;
1176 pos += scnprintf(*buf + pos, bufsz - pos,
1177 "FH register values:\n");
1178 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1179 pos += scnprintf(*buf + pos, bufsz - pos,
1180 " %34s: 0X%08x\n",
1181 iwl4965_get_fh_string(fh_tbl[i]),
1182 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1183 }
1184 return pos;
1185 }
1186#endif
1187 IWL_ERR(priv, "FH register values:\n");
1188 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1189 IWL_ERR(priv, " %34s: 0X%08x\n",
1190 iwl4965_get_fh_string(fh_tbl[i]),
1191 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1192 }
1193 return 0;
1194}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644
index 57ebe214e68c..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ /dev/null
@@ -1,2871 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37
38#include "iwl-dev.h"
39#include "iwl-sta.h"
40#include "iwl-core.h"
41#include "iwl-4965.h"
42
43#define IWL4965_RS_NAME "iwl-4965-rs"
44
45#define NUM_TRY_BEFORE_ANT_TOGGLE 1
46#define IWL_NUMBER_TRY 1
47#define IWL_HT_NUMBER_TRY 3
48
49#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
50#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
51#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
52
53/* max allowed rate miss before sync LQ cmd */
54#define IWL_MISSED_RATE_MAX 15
55/* max time to accum history 2 seconds */
56#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
57
58static u8 rs_ht_to_legacy[] = {
59 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
63 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
64 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
65 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
66};
67
68static const u8 ant_toggle_lookup[] = {
69 /*ANT_NONE -> */ ANT_NONE,
70 /*ANT_A -> */ ANT_B,
71 /*ANT_B -> */ ANT_C,
72 /*ANT_AB -> */ ANT_BC,
73 /*ANT_C -> */ ANT_A,
74 /*ANT_AC -> */ ANT_AB,
75 /*ANT_BC -> */ ANT_AC,
76 /*ANT_ABC -> */ ANT_ABC,
77};
78
79#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
80 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
81 IWL_RATE_SISO_##s##M_PLCP, \
82 IWL_RATE_MIMO2_##s##M_PLCP,\
83 IWL_RATE_##r##M_IEEE, \
84 IWL_RATE_##ip##M_INDEX, \
85 IWL_RATE_##in##M_INDEX, \
86 IWL_RATE_##rp##M_INDEX, \
87 IWL_RATE_##rn##M_INDEX, \
88 IWL_RATE_##pp##M_INDEX, \
89 IWL_RATE_##np##M_INDEX }
90
91/*
92 * Parameter order:
93 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
94 *
95 * If there isn't a valid next or previous rate then INV is used which
96 * maps to IWL_RATE_INVALID
97 *
98 */
99const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
100 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
101 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
102 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
103 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
104 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
105 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
106 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
107 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
108 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
109 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
110 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
111 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
112 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
113};
114
115static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
116{
117 int idx = 0;
118
119 /* HT rate format */
120 if (rate_n_flags & RATE_MCS_HT_MSK) {
121 idx = (rate_n_flags & 0xff);
122
123 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
124 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
125
126 idx += IWL_FIRST_OFDM_RATE;
127 /* skip 9M not supported in ht*/
128 if (idx >= IWL_RATE_9M_INDEX)
129 idx += 1;
130 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
131 return idx;
132
133 /* legacy rate format, search for match in table */
134 } else {
135 for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
136 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
137 return idx;
138 }
139
140 return -1;
141}
142
143static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
144 struct sk_buff *skb,
145 struct ieee80211_sta *sta,
146 struct iwl_lq_sta *lq_sta);
147static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
148 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
149static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
150 bool force_search);
151
152#ifdef CONFIG_MAC80211_DEBUGFS
153static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
154 u32 *rate_n_flags, int index);
155#else
156static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
157 u32 *rate_n_flags, int index)
158{}
159#endif
160
161/**
162 * The following tables contain the expected throughput metrics for all rates
163 *
164 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
165 *
166 * where invalid entries are zeros.
167 *
168 * CCK rates are only valid in legacy table and will only be used in G
169 * (2.4 GHz) band.
170 */
171
172static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
173 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
174};
175
176static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
177 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
178 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
179 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
180 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
181};
182
183static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
184 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
185 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
186 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
187 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
188};
189
190static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
191 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
192 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
193 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
194 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
195};
196
197static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
198 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
199 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
200 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
201 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
202};
203
204/* mbps, mcs */
205static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
206 { "1", "BPSK DSSS"},
207 { "2", "QPSK DSSS"},
208 {"5.5", "BPSK CCK"},
209 { "11", "QPSK CCK"},
210 { "6", "BPSK 1/2"},
211 { "9", "BPSK 1/2"},
212 { "12", "QPSK 1/2"},
213 { "18", "QPSK 3/4"},
214 { "24", "16QAM 1/2"},
215 { "36", "16QAM 3/4"},
216 { "48", "64QAM 2/3"},
217 { "54", "64QAM 3/4"},
218 { "60", "64QAM 5/6"},
219};
220
221#define MCS_INDEX_PER_STREAM (8)
222
223static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
224{
225 return (u8)(rate_n_flags & 0xFF);
226}
227
228static void
229iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
230{
231 window->data = 0;
232 window->success_counter = 0;
233 window->success_ratio = IWL_INVALID_VALUE;
234 window->counter = 0;
235 window->average_tpt = IWL_INVALID_VALUE;
236 window->stamp = 0;
237}
238
239static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
240{
241 return (ant_type & valid_antenna) == ant_type;
242}
243
244/*
245 * removes the old data from the statistics. All data that is older than
246 * TID_MAX_TIME_DIFF, will be deleted.
247 */
248static void
249iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
250{
251 /* The oldest age we want to keep */
252 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
253
254 while (tl->queue_count &&
255 (tl->time_stamp < oldest_time)) {
256 tl->total -= tl->packet_count[tl->head];
257 tl->packet_count[tl->head] = 0;
258 tl->time_stamp += TID_QUEUE_CELL_SPACING;
259 tl->queue_count--;
260 tl->head++;
261 if (tl->head >= TID_QUEUE_MAX_SIZE)
262 tl->head = 0;
263 }
264}
265
266/*
267 * increment traffic load value for tid and also remove
268 * any old values if passed the certain time period
269 */
270static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
271 struct ieee80211_hdr *hdr)
272{
273 u32 curr_time = jiffies_to_msecs(jiffies);
274 u32 time_diff;
275 s32 index;
276 struct iwl_traffic_load *tl = NULL;
277 u8 tid;
278
279 if (ieee80211_is_data_qos(hdr->frame_control)) {
280 u8 *qc = ieee80211_get_qos_ctl(hdr);
281 tid = qc[0] & 0xf;
282 } else
283 return MAX_TID_COUNT;
284
285 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
286 return MAX_TID_COUNT;
287
288 tl = &lq_data->load[tid];
289
290 curr_time -= curr_time % TID_ROUND_VALUE;
291
292 /* Happens only for the first packet. Initialize the data */
293 if (!(tl->queue_count)) {
294 tl->total = 1;
295 tl->time_stamp = curr_time;
296 tl->queue_count = 1;
297 tl->head = 0;
298 tl->packet_count[0] = 1;
299 return MAX_TID_COUNT;
300 }
301
302 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
303 index = time_diff / TID_QUEUE_CELL_SPACING;
304
305 /* The history is too long: remove data that is older than */
306 /* TID_MAX_TIME_DIFF */
307 if (index >= TID_QUEUE_MAX_SIZE)
308 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
309
310 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
311 tl->packet_count[index] = tl->packet_count[index] + 1;
312 tl->total = tl->total + 1;
313
314 if ((index + 1) > tl->queue_count)
315 tl->queue_count = index + 1;
316
317 return tid;
318}
319
320/*
321 get the traffic load value for tid
322*/
323static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
324{
325 u32 curr_time = jiffies_to_msecs(jiffies);
326 u32 time_diff;
327 s32 index;
328 struct iwl_traffic_load *tl = NULL;
329
330 if (tid >= TID_MAX_LOAD_COUNT)
331 return 0;
332
333 tl = &(lq_data->load[tid]);
334
335 curr_time -= curr_time % TID_ROUND_VALUE;
336
337 if (!(tl->queue_count))
338 return 0;
339
340 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
341 index = time_diff / TID_QUEUE_CELL_SPACING;
342
343 /* The history is too long: remove data that is older than */
344 /* TID_MAX_TIME_DIFF */
345 if (index >= TID_QUEUE_MAX_SIZE)
346 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
347
348 return tl->total;
349}
350
351static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
352 struct iwl_lq_sta *lq_data, u8 tid,
353 struct ieee80211_sta *sta)
354{
355 int ret = -EAGAIN;
356 u32 load;
357
358 load = iwl4965_rs_tl_get_load(lq_data, tid);
359
360 if (load > IWL_AGG_LOAD_THRESHOLD) {
361 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
362 sta->addr, tid);
363 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
364 if (ret == -EAGAIN) {
365 /*
366 * driver and mac80211 is out of sync
367 * this might be cause by reloading firmware
368 * stop the tx ba session here
369 */
370 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
371 tid);
372 ieee80211_stop_tx_ba_session(sta, tid);
373 }
374 } else {
375 IWL_ERR(priv, "Aggregation not enabled for tid %d "
376 "because load = %u\n", tid, load);
377 }
378 return ret;
379}
380
381static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
382 struct iwl_lq_sta *lq_data,
383 struct ieee80211_sta *sta)
384{
385 if (tid < TID_MAX_LOAD_COUNT)
386 iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
387 else
388 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
389 tid, TID_MAX_LOAD_COUNT);
390}
391
392static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
393{
394 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
395 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
397}
398
399/*
400 * Static function to get the expected throughput from an iwl_scale_tbl_info
401 * that wraps a NULL pointer check
402 */
403static s32
404iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
405{
406 if (tbl->expected_tpt)
407 return tbl->expected_tpt[rs_index];
408 return 0;
409}
410
411/**
412 * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
413 *
414 * We keep a sliding window of the last 62 packets transmitted
415 * at this rate. window->data contains the bitmask of successful
416 * packets.
417 */
418static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
419 int scale_index, int attempts, int successes)
420{
421 struct iwl_rate_scale_data *window = NULL;
422 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
423 s32 fail_count, tpt;
424
425 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
426 return -EINVAL;
427
428 /* Select window for current tx bit rate */
429 window = &(tbl->win[scale_index]);
430
431 /* Get expected throughput */
432 tpt = iwl4965_get_expected_tpt(tbl, scale_index);
433
434 /*
435 * Keep track of only the latest 62 tx frame attempts in this rate's
436 * history window; anything older isn't really relevant any more.
437 * If we have filled up the sliding window, drop the oldest attempt;
438 * if the oldest attempt (highest bit in bitmap) shows "success",
439 * subtract "1" from the success counter (this is the main reason
440 * we keep these bitmaps!).
441 */
442 while (attempts > 0) {
443 if (window->counter >= IWL_RATE_MAX_WINDOW) {
444
445 /* remove earliest */
446 window->counter = IWL_RATE_MAX_WINDOW - 1;
447
448 if (window->data & mask) {
449 window->data &= ~mask;
450 window->success_counter--;
451 }
452 }
453
454 /* Increment frames-attempted counter */
455 window->counter++;
456
457 /* Shift bitmap by one frame to throw away oldest history */
458 window->data <<= 1;
459
460 /* Mark the most recent #successes attempts as successful */
461 if (successes > 0) {
462 window->success_counter++;
463 window->data |= 0x1;
464 successes--;
465 }
466
467 attempts--;
468 }
469
470 /* Calculate current success ratio, avoid divide-by-0! */
471 if (window->counter > 0)
472 window->success_ratio = 128 * (100 * window->success_counter)
473 / window->counter;
474 else
475 window->success_ratio = IWL_INVALID_VALUE;
476
477 fail_count = window->counter - window->success_counter;
478
479 /* Calculate average throughput, if we have enough history. */
480 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
481 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
482 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
483 else
484 window->average_tpt = IWL_INVALID_VALUE;
485
486 /* Tag this window as having been updated */
487 window->stamp = jiffies;
488
489 return 0;
490}
491
492/*
493 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
494 */
495static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
496 struct iwl_scale_tbl_info *tbl,
497 int index, u8 use_green)
498{
499 u32 rate_n_flags = 0;
500
501 if (is_legacy(tbl->lq_type)) {
502 rate_n_flags = iwlegacy_rates[index].plcp;
503 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
504 rate_n_flags |= RATE_MCS_CCK_MSK;
505
506 } else if (is_Ht(tbl->lq_type)) {
507 if (index > IWL_LAST_OFDM_RATE) {
508 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
509 index = IWL_LAST_OFDM_RATE;
510 }
511 rate_n_flags = RATE_MCS_HT_MSK;
512
513 if (is_siso(tbl->lq_type))
514 rate_n_flags |= iwlegacy_rates[index].plcp_siso;
515 else
516 rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
517 } else {
518 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
519 }
520
521 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
522 RATE_MCS_ANT_ABC_MSK);
523
524 if (is_Ht(tbl->lq_type)) {
525 if (tbl->is_ht40) {
526 if (tbl->is_dup)
527 rate_n_flags |= RATE_MCS_DUP_MSK;
528 else
529 rate_n_flags |= RATE_MCS_HT40_MSK;
530 }
531 if (tbl->is_SGI)
532 rate_n_flags |= RATE_MCS_SGI_MSK;
533
534 if (use_green) {
535 rate_n_flags |= RATE_MCS_GF_MSK;
536 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
537 rate_n_flags &= ~RATE_MCS_SGI_MSK;
538 IWL_ERR(priv, "GF was set with SGI:SISO\n");
539 }
540 }
541 }
542 return rate_n_flags;
543}
544
545/*
546 * Interpret uCode API's rate_n_flags format,
547 * fill "search" or "active" tx mode table.
548 */
549static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
550 enum ieee80211_band band,
551 struct iwl_scale_tbl_info *tbl,
552 int *rate_idx)
553{
554 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
555 u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
556 u8 mcs;
557
558 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
559 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
560
561 if (*rate_idx == IWL_RATE_INVALID) {
562 *rate_idx = -1;
563 return -EINVAL;
564 }
565 tbl->is_SGI = 0; /* default legacy setup */
566 tbl->is_ht40 = 0;
567 tbl->is_dup = 0;
568 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
569 tbl->lq_type = LQ_NONE;
570 tbl->max_search = IWL_MAX_SEARCH;
571
572 /* legacy rate format */
573 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
574 if (iwl4965_num_of_ant == 1) {
575 if (band == IEEE80211_BAND_5GHZ)
576 tbl->lq_type = LQ_A;
577 else
578 tbl->lq_type = LQ_G;
579 }
580 /* HT rate format */
581 } else {
582 if (rate_n_flags & RATE_MCS_SGI_MSK)
583 tbl->is_SGI = 1;
584
585 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
586 (rate_n_flags & RATE_MCS_DUP_MSK))
587 tbl->is_ht40 = 1;
588
589 if (rate_n_flags & RATE_MCS_DUP_MSK)
590 tbl->is_dup = 1;
591
592 mcs = iwl4965_rs_extract_rate(rate_n_flags);
593
594 /* SISO */
595 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
596 if (iwl4965_num_of_ant == 1)
597 tbl->lq_type = LQ_SISO; /*else NONE*/
598 /* MIMO2 */
599 } else {
600 if (iwl4965_num_of_ant == 2)
601 tbl->lq_type = LQ_MIMO2;
602 }
603 }
604 return 0;
605}
606
607/* switch to another antenna/antennas and return 1 */
608/* if no other valid antenna found, return 0 */
609static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
610 struct iwl_scale_tbl_info *tbl)
611{
612 u8 new_ant_type;
613
614 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
615 return 0;
616
617 if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
618 return 0;
619
620 new_ant_type = ant_toggle_lookup[tbl->ant_type];
621
622 while ((new_ant_type != tbl->ant_type) &&
623 !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
624 new_ant_type = ant_toggle_lookup[new_ant_type];
625
626 if (new_ant_type == tbl->ant_type)
627 return 0;
628
629 tbl->ant_type = new_ant_type;
630 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
631 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
632 return 1;
633}
634
635/**
636 * Green-field mode is valid if the station supports it and
637 * there are no non-GF stations present in the BSS.
638 */
639static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
640{
641 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
642 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
643
644 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
645 !(ctx->ht.non_gf_sta_present);
646}
647
648/**
649 * iwl4965_rs_get_supported_rates - get the available rates
650 *
651 * if management frame or broadcast frame only return
652 * basic available rates.
653 *
654 */
655static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
656 struct ieee80211_hdr *hdr,
657 enum iwl_table_type rate_type)
658{
659 if (is_legacy(rate_type)) {
660 return lq_sta->active_legacy_rate;
661 } else {
662 if (is_siso(rate_type))
663 return lq_sta->active_siso_rate;
664 else
665 return lq_sta->active_mimo2_rate;
666 }
667}
668
669static u16
670iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
671 int rate_type)
672{
673 u8 high = IWL_RATE_INVALID;
674 u8 low = IWL_RATE_INVALID;
675
676 /* 802.11A or ht walks to the next literal adjacent rate in
677 * the rate table */
678 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
679 int i;
680 u32 mask;
681
682 /* Find the previous rate that is in the rate mask */
683 i = index - 1;
684 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
685 if (rate_mask & mask) {
686 low = i;
687 break;
688 }
689 }
690
691 /* Find the next rate that is in the rate mask */
692 i = index + 1;
693 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
694 if (rate_mask & mask) {
695 high = i;
696 break;
697 }
698 }
699
700 return (high << 8) | low;
701 }
702
703 low = index;
704 while (low != IWL_RATE_INVALID) {
705 low = iwlegacy_rates[low].prev_rs;
706 if (low == IWL_RATE_INVALID)
707 break;
708 if (rate_mask & (1 << low))
709 break;
710 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
711 }
712
713 high = index;
714 while (high != IWL_RATE_INVALID) {
715 high = iwlegacy_rates[high].next_rs;
716 if (high == IWL_RATE_INVALID)
717 break;
718 if (rate_mask & (1 << high))
719 break;
720 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
721 }
722
723 return (high << 8) | low;
724}
725
726static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
727 struct iwl_scale_tbl_info *tbl,
728 u8 scale_index, u8 ht_possible)
729{
730 s32 low;
731 u16 rate_mask;
732 u16 high_low;
733 u8 switch_to_legacy = 0;
734 u8 is_green = lq_sta->is_green;
735 struct iwl_priv *priv = lq_sta->drv;
736
737 /* check if we need to switch from HT to legacy rates.
738 * assumption is that mandatory rates (1Mbps or 6Mbps)
739 * are always supported (spec demand) */
740 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
741 switch_to_legacy = 1;
742 scale_index = rs_ht_to_legacy[scale_index];
743 if (lq_sta->band == IEEE80211_BAND_5GHZ)
744 tbl->lq_type = LQ_A;
745 else
746 tbl->lq_type = LQ_G;
747
748 if (iwl4965_num_of_ant(tbl->ant_type) > 1)
749 tbl->ant_type =
750 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
751
752 tbl->is_ht40 = 0;
753 tbl->is_SGI = 0;
754 tbl->max_search = IWL_MAX_SEARCH;
755 }
756
757 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
758
759 /* Mask with station rate restriction */
760 if (is_legacy(tbl->lq_type)) {
761 /* supp_rates has no CCK bits in A mode */
762 if (lq_sta->band == IEEE80211_BAND_5GHZ)
763 rate_mask = (u16)(rate_mask &
764 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
765 else
766 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
767 }
768
769 /* If we switched from HT to legacy, check current rate */
770 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
771 low = scale_index;
772 goto out;
773 }
774
775 high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
776 scale_index, rate_mask,
777 tbl->lq_type);
778 low = high_low & 0xff;
779
780 if (low == IWL_RATE_INVALID)
781 low = scale_index;
782
783out:
784 return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
785}
786
787/*
788 * Simple function to compare two rate scale table types
789 */
790static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
791 struct iwl_scale_tbl_info *b)
792{
793 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
794 (a->is_SGI == b->is_SGI);
795}
796
797/*
798 * mac80211 sends us Tx status
799 */
800static void
801iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
802 struct ieee80211_sta *sta, void *priv_sta,
803 struct sk_buff *skb)
804{
805 int legacy_success;
806 int retries;
807 int rs_index, mac_index, i;
808 struct iwl_lq_sta *lq_sta = priv_sta;
809 struct iwl_link_quality_cmd *table;
810 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
811 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
812 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
813 enum mac80211_rate_control_flags mac_flags;
814 u32 tx_rate;
815 struct iwl_scale_tbl_info tbl_type;
816 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
817 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
818 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
819
820 IWL_DEBUG_RATE_LIMIT(priv,
821 "get frame ack response, update rate scale window\n");
822
823 /* Treat uninitialized rate scaling data same as non-existing. */
824 if (!lq_sta) {
825 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
826 return;
827 } else if (!lq_sta->drv) {
828 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
829 return;
830 }
831
832 if (!ieee80211_is_data(hdr->frame_control) ||
833 info->flags & IEEE80211_TX_CTL_NO_ACK)
834 return;
835
836 /* This packet was aggregated but doesn't carry status info */
837 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
838 !(info->flags & IEEE80211_TX_STAT_AMPDU))
839 return;
840
841 /*
842 * Ignore this Tx frame response if its initial rate doesn't match
843 * that of latest Link Quality command. There may be stragglers
844 * from a previous Link Quality command, but we're no longer interested
845 * in those; they're either from the "active" mode while we're trying
846 * to check "search" mode, or a prior "search" mode after we've moved
847 * to a new "search" mode (which might become the new "active" mode).
848 */
849 table = &lq_sta->lq;
850 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
851 iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
852 priv->band, &tbl_type, &rs_index);
853 if (priv->band == IEEE80211_BAND_5GHZ)
854 rs_index -= IWL_FIRST_OFDM_RATE;
855 mac_flags = info->status.rates[0].flags;
856 mac_index = info->status.rates[0].idx;
857 /* For HT packets, map MCS to PLCP */
858 if (mac_flags & IEEE80211_TX_RC_MCS) {
859 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
860 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
861 mac_index++;
862 /*
863 * mac80211 HT index is always zero-indexed; we need to move
864 * HT OFDM rates after CCK rates in 2.4 GHz band
865 */
866 if (priv->band == IEEE80211_BAND_2GHZ)
867 mac_index += IWL_FIRST_OFDM_RATE;
868 }
869 /* Here we actually compare this rate to the latest LQ command */
870 if ((mac_index < 0) ||
871 (tbl_type.is_SGI !=
872 !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
873 (tbl_type.is_ht40 !=
874 !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
875 (tbl_type.is_dup !=
876 !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
877 (tbl_type.ant_type != info->antenna_sel_tx) ||
878 (!!(tx_rate & RATE_MCS_HT_MSK) !=
879 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
880 (!!(tx_rate & RATE_MCS_GF_MSK) !=
881 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
882 (rs_index != mac_index)) {
883 IWL_DEBUG_RATE(priv,
884 "initial rate %d does not match %d (0x%x)\n",
885 mac_index, rs_index, tx_rate);
886 /*
887 * Since rates mis-match, the last LQ command may have failed.
888 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
889 * ... driver.
890 */
891 lq_sta->missed_rate_counter++;
892 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
893 lq_sta->missed_rate_counter = 0;
894 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
895 CMD_ASYNC, false);
896 }
897 /* Regardless, ignore this status info for outdated rate */
898 return;
899 } else
900 /* Rate did match, so reset the missed_rate_counter */
901 lq_sta->missed_rate_counter = 0;
902
903 /* Figure out if rate scale algorithm is in active or search table */
904 if (iwl4965_table_type_matches(&tbl_type,
905 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
906 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
907 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
908 } else if (iwl4965_table_type_matches(&tbl_type,
909 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
910 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
911 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
912 } else {
913 IWL_DEBUG_RATE(priv,
914 "Neither active nor search matches tx rate\n");
915 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
916 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
917 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
918 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
919 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
920 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
921 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
922 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
923 /*
924 * no matching table found, let's by-pass the data collection
925 * and continue to perform rate scale to find the rate table
926 */
927 iwl4965_rs_stay_in_table(lq_sta, true);
928 goto done;
929 }
930
931 /*
932 * Updating the frame history depends on whether packets were
933 * aggregated.
934 *
935 * For aggregation, all packets were transmitted at the same rate, the
936 * first index into rate scale table.
937 */
938 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
939 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
940 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
941 &rs_index);
942 iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
943 info->status.ampdu_len,
944 info->status.ampdu_ack_len);
945
946 /* Update success/fail counts if not searching for new mode */
947 if (lq_sta->stay_in_tbl) {
948 lq_sta->total_success += info->status.ampdu_ack_len;
949 lq_sta->total_failed += (info->status.ampdu_len -
950 info->status.ampdu_ack_len);
951 }
952 } else {
953 /*
954 * For legacy, update frame history with for each Tx retry.
955 */
956 retries = info->status.rates[0].count - 1;
957 /* HW doesn't send more than 15 retries */
958 retries = min(retries, 15);
959
960 /* The last transmission may have been successful */
961 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
962 /* Collect data for each rate used during failed TX attempts */
963 for (i = 0; i <= retries; ++i) {
964 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
965 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
966 &tbl_type, &rs_index);
967 /*
968 * Only collect stats if retried rate is in the same RS
969 * table as active/search.
970 */
971 if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
972 tmp_tbl = curr_tbl;
973 else if (iwl4965_table_type_matches(&tbl_type,
974 other_tbl))
975 tmp_tbl = other_tbl;
976 else
977 continue;
978 iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
979 i < retries ? 0 : legacy_success);
980 }
981
982 /* Update success/fail counts if not searching for new mode */
983 if (lq_sta->stay_in_tbl) {
984 lq_sta->total_success += legacy_success;
985 lq_sta->total_failed += retries + (1 - legacy_success);
986 }
987 }
988 /* The last TX rate is cached in lq_sta; it's set in if/else above */
989 lq_sta->last_rate_n_flags = tx_rate;
990done:
991 /* See if there's a better rate or modulation mode to try. */
992 if (sta && sta->supp_rates[sband->band])
993 iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
994}
995
996/*
997 * Begin a period of staying with a selected modulation mode.
998 * Set "stay_in_tbl" flag to prevent any mode switches.
999 * Set frame tx success limits according to legacy vs. high-throughput,
1000 * and reset overall (spanning all rates) tx success history statistics.
1001 * These control how long we stay using same modulation mode before
1002 * searching for a new mode.
1003 */
1004static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1005 struct iwl_lq_sta *lq_sta)
1006{
1007 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1008 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1009 if (is_legacy) {
1010 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1011 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1012 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1013 } else {
1014 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1015 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1016 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1017 }
1018 lq_sta->table_count = 0;
1019 lq_sta->total_failed = 0;
1020 lq_sta->total_success = 0;
1021 lq_sta->flush_timer = jiffies;
1022 lq_sta->action_counter = 0;
1023}
1024
1025/*
1026 * Find correct throughput table for given mode of modulation
1027 */
1028static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1029 struct iwl_scale_tbl_info *tbl)
1030{
1031 /* Used to choose among HT tables */
1032 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1033
1034 /* Check for invalid LQ type */
1035 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1036 tbl->expected_tpt = expected_tpt_legacy;
1037 return;
1038 }
1039
1040 /* Legacy rates have only one table */
1041 if (is_legacy(tbl->lq_type)) {
1042 tbl->expected_tpt = expected_tpt_legacy;
1043 return;
1044 }
1045
1046 /* Choose among many HT tables depending on number of streams
1047 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1048 * status */
1049 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1050 ht_tbl_pointer = expected_tpt_siso20MHz;
1051 else if (is_siso(tbl->lq_type))
1052 ht_tbl_pointer = expected_tpt_siso40MHz;
1053 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1054 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1055 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1056 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1057
1058 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1059 tbl->expected_tpt = ht_tbl_pointer[0];
1060 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1061 tbl->expected_tpt = ht_tbl_pointer[1];
1062 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1063 tbl->expected_tpt = ht_tbl_pointer[2];
1064 else /* AGG+SGI */
1065 tbl->expected_tpt = ht_tbl_pointer[3];
1066}
1067
1068/*
1069 * Find starting rate for new "search" high-throughput mode of modulation.
1070 * Goal is to find lowest expected rate (under perfect conditions) that is
1071 * above the current measured throughput of "active" mode, to give new mode
1072 * a fair chance to prove itself without too many challenges.
1073 *
1074 * This gets called when transitioning to more aggressive modulation
1075 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1076 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1077 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1078 * bit rate will typically need to increase, but not if performance was bad.
1079 */
1080static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
1081 struct iwl_lq_sta *lq_sta,
1082 struct iwl_scale_tbl_info *tbl, /* "search" */
1083 u16 rate_mask, s8 index)
1084{
1085 /* "active" values */
1086 struct iwl_scale_tbl_info *active_tbl =
1087 &(lq_sta->lq_info[lq_sta->active_tbl]);
1088 s32 active_sr = active_tbl->win[index].success_ratio;
1089 s32 active_tpt = active_tbl->expected_tpt[index];
1090
1091 /* expected "search" throughput */
1092 s32 *tpt_tbl = tbl->expected_tpt;
1093
1094 s32 new_rate, high, low, start_hi;
1095 u16 high_low;
1096 s8 rate = index;
1097
1098 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1099
1100 for (; ;) {
1101 high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
1102 tbl->lq_type);
1103
1104 low = high_low & 0xff;
1105 high = (high_low >> 8) & 0xff;
1106
1107 /*
1108 * Lower the "search" bit rate, to give new "search" mode
1109 * approximately the same throughput as "active" if:
1110 *
1111 * 1) "Active" mode has been working modestly well (but not
1112 * great), and expected "search" throughput (under perfect
1113 * conditions) at candidate rate is above the actual
1114 * measured "active" throughput (but less than expected
1115 * "active" throughput under perfect conditions).
1116 * OR
1117 * 2) "Active" mode has been working perfectly or very well
1118 * and expected "search" throughput (under perfect
1119 * conditions) at candidate rate is above expected
1120 * "active" throughput (under perfect conditions).
1121 */
1122 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1123 ((active_sr > IWL_RATE_DECREASE_TH) &&
1124 (active_sr <= IWL_RATE_HIGH_TH) &&
1125 (tpt_tbl[rate] <= active_tpt))) ||
1126 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1127 (tpt_tbl[rate] > active_tpt))) {
1128
1129 /* (2nd or later pass)
1130 * If we've already tried to raise the rate, and are
1131 * now trying to lower it, use the higher rate. */
1132 if (start_hi != IWL_RATE_INVALID) {
1133 new_rate = start_hi;
1134 break;
1135 }
1136
1137 new_rate = rate;
1138
1139 /* Loop again with lower rate */
1140 if (low != IWL_RATE_INVALID)
1141 rate = low;
1142
1143 /* Lower rate not available, use the original */
1144 else
1145 break;
1146
1147 /* Else try to raise the "search" rate to match "active" */
1148 } else {
1149 /* (2nd or later pass)
1150 * If we've already tried to lower the rate, and are
1151 * now trying to raise it, use the lower rate. */
1152 if (new_rate != IWL_RATE_INVALID)
1153 break;
1154
1155 /* Loop again with higher rate */
1156 else if (high != IWL_RATE_INVALID) {
1157 start_hi = high;
1158 rate = high;
1159
1160 /* Higher rate not available, use the original */
1161 } else {
1162 new_rate = rate;
1163 break;
1164 }
1165 }
1166 }
1167
1168 return new_rate;
1169}
1170
1171/*
1172 * Set up search table for MIMO2
1173 */
1174static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
1175 struct iwl_lq_sta *lq_sta,
1176 struct ieee80211_conf *conf,
1177 struct ieee80211_sta *sta,
1178 struct iwl_scale_tbl_info *tbl, int index)
1179{
1180 u16 rate_mask;
1181 s32 rate;
1182 s8 is_green = lq_sta->is_green;
1183 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1184 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1185
1186 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1187 return -1;
1188
1189 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1190 == WLAN_HT_CAP_SM_PS_STATIC)
1191 return -1;
1192
1193 /* Need both Tx chains/antennas to support MIMO */
1194 if (priv->hw_params.tx_chains_num < 2)
1195 return -1;
1196
1197 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1198
1199 tbl->lq_type = LQ_MIMO2;
1200 tbl->is_dup = lq_sta->is_dup;
1201 tbl->action = 0;
1202 tbl->max_search = IWL_MAX_SEARCH;
1203 rate_mask = lq_sta->active_mimo2_rate;
1204
1205 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1206 tbl->is_ht40 = 1;
1207 else
1208 tbl->is_ht40 = 0;
1209
1210 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1211
1212 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1213
1214 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
1215 rate, rate_mask);
1216 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1217 IWL_DEBUG_RATE(priv,
1218 "Can't switch with index %d rate mask %x\n",
1219 rate, rate_mask);
1220 return -1;
1221 }
1222 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1223 tbl, rate, is_green);
1224
1225 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1226 tbl->current_rate, is_green);
1227 return 0;
1228}
1229
1230/*
1231 * Set up search table for SISO
1232 */
1233static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
1234 struct iwl_lq_sta *lq_sta,
1235 struct ieee80211_conf *conf,
1236 struct ieee80211_sta *sta,
1237 struct iwl_scale_tbl_info *tbl, int index)
1238{
1239 u16 rate_mask;
1240 u8 is_green = lq_sta->is_green;
1241 s32 rate;
1242 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1243 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1244
1245 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1246 return -1;
1247
1248 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1249
1250 tbl->is_dup = lq_sta->is_dup;
1251 tbl->lq_type = LQ_SISO;
1252 tbl->action = 0;
1253 tbl->max_search = IWL_MAX_SEARCH;
1254 rate_mask = lq_sta->active_siso_rate;
1255
1256 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1257 tbl->is_ht40 = 1;
1258 else
1259 tbl->is_ht40 = 0;
1260
1261 if (is_green)
1262 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1263
1264 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1265 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1266
1267 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1268 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1269 IWL_DEBUG_RATE(priv,
1270 "can not switch with index %d rate mask %x\n",
1271 rate, rate_mask);
1272 return -1;
1273 }
1274 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1275 tbl, rate, is_green);
1276 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1277 tbl->current_rate, is_green);
1278 return 0;
1279}
1280
1281/*
1282 * Try to switch to new modulation mode from legacy
1283 */
1284static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
1285 struct iwl_lq_sta *lq_sta,
1286 struct ieee80211_conf *conf,
1287 struct ieee80211_sta *sta,
1288 int index)
1289{
1290 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1291 struct iwl_scale_tbl_info *search_tbl =
1292 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1293 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1294 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1295 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1296 u8 start_action;
1297 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1298 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1299 int ret = 0;
1300 u8 update_search_tbl_counter = 0;
1301
1302 tbl->action = IWL_LEGACY_SWITCH_SISO;
1303
1304 start_action = tbl->action;
1305 for (; ;) {
1306 lq_sta->action_counter++;
1307 switch (tbl->action) {
1308 case IWL_LEGACY_SWITCH_ANTENNA1:
1309 case IWL_LEGACY_SWITCH_ANTENNA2:
1310 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1311
1312 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1313 tx_chains_num <= 1) ||
1314 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1315 tx_chains_num <= 2))
1316 break;
1317
1318 /* Don't change antenna if success has been great */
1319 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1320 break;
1321
1322 /* Set up search table to try other antenna */
1323 memcpy(search_tbl, tbl, sz);
1324
1325 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1326 &search_tbl->current_rate, search_tbl)) {
1327 update_search_tbl_counter = 1;
1328 iwl4965_rs_set_expected_tpt_table(lq_sta,
1329 search_tbl);
1330 goto out;
1331 }
1332 break;
1333 case IWL_LEGACY_SWITCH_SISO:
1334 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1335
1336 /* Set up search table to try SISO */
1337 memcpy(search_tbl, tbl, sz);
1338 search_tbl->is_SGI = 0;
1339 ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
1340 search_tbl, index);
1341 if (!ret) {
1342 lq_sta->action_counter = 0;
1343 goto out;
1344 }
1345
1346 break;
1347 case IWL_LEGACY_SWITCH_MIMO2_AB:
1348 case IWL_LEGACY_SWITCH_MIMO2_AC:
1349 case IWL_LEGACY_SWITCH_MIMO2_BC:
1350 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1351
1352 /* Set up search table to try MIMO */
1353 memcpy(search_tbl, tbl, sz);
1354 search_tbl->is_SGI = 0;
1355
1356 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1357 search_tbl->ant_type = ANT_AB;
1358 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1359 search_tbl->ant_type = ANT_AC;
1360 else
1361 search_tbl->ant_type = ANT_BC;
1362
1363 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1364 search_tbl->ant_type))
1365 break;
1366
1367 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1368 conf, sta,
1369 search_tbl, index);
1370 if (!ret) {
1371 lq_sta->action_counter = 0;
1372 goto out;
1373 }
1374 break;
1375 }
1376 tbl->action++;
1377 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1378 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1379
1380 if (tbl->action == start_action)
1381 break;
1382
1383 }
1384 search_tbl->lq_type = LQ_NONE;
1385 return 0;
1386
1387out:
1388 lq_sta->search_better_tbl = 1;
1389 tbl->action++;
1390 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1391 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1392 if (update_search_tbl_counter)
1393 search_tbl->action = tbl->action;
1394 return 0;
1395
1396}
1397
1398/*
1399 * Try to switch to new modulation mode from SISO
1400 */
1401static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
1402 struct iwl_lq_sta *lq_sta,
1403 struct ieee80211_conf *conf,
1404 struct ieee80211_sta *sta, int index)
1405{
1406 u8 is_green = lq_sta->is_green;
1407 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1408 struct iwl_scale_tbl_info *search_tbl =
1409 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1410 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1411 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1412 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1413 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1414 u8 start_action;
1415 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1416 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1417 u8 update_search_tbl_counter = 0;
1418 int ret;
1419
1420 start_action = tbl->action;
1421
1422 for (;;) {
1423 lq_sta->action_counter++;
1424 switch (tbl->action) {
1425 case IWL_SISO_SWITCH_ANTENNA1:
1426 case IWL_SISO_SWITCH_ANTENNA2:
1427 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1428 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1429 tx_chains_num <= 1) ||
1430 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1431 tx_chains_num <= 2))
1432 break;
1433
1434 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1435 break;
1436
1437 memcpy(search_tbl, tbl, sz);
1438 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1439 &search_tbl->current_rate, search_tbl)) {
1440 update_search_tbl_counter = 1;
1441 goto out;
1442 }
1443 break;
1444 case IWL_SISO_SWITCH_MIMO2_AB:
1445 case IWL_SISO_SWITCH_MIMO2_AC:
1446 case IWL_SISO_SWITCH_MIMO2_BC:
1447 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1448 memcpy(search_tbl, tbl, sz);
1449 search_tbl->is_SGI = 0;
1450
1451 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1452 search_tbl->ant_type = ANT_AB;
1453 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1454 search_tbl->ant_type = ANT_AC;
1455 else
1456 search_tbl->ant_type = ANT_BC;
1457
1458 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1459 search_tbl->ant_type))
1460 break;
1461
1462 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1463 conf, sta,
1464 search_tbl, index);
1465 if (!ret)
1466 goto out;
1467 break;
1468 case IWL_SISO_SWITCH_GI:
1469 if (!tbl->is_ht40 && !(ht_cap->cap &
1470 IEEE80211_HT_CAP_SGI_20))
1471 break;
1472 if (tbl->is_ht40 && !(ht_cap->cap &
1473 IEEE80211_HT_CAP_SGI_40))
1474 break;
1475
1476 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1477
1478 memcpy(search_tbl, tbl, sz);
1479 if (is_green) {
1480 if (!tbl->is_SGI)
1481 break;
1482 else
1483 IWL_ERR(priv,
1484 "SGI was set in GF+SISO\n");
1485 }
1486 search_tbl->is_SGI = !tbl->is_SGI;
1487 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1488 if (tbl->is_SGI) {
1489 s32 tpt = lq_sta->last_tpt / 100;
1490 if (tpt >= search_tbl->expected_tpt[index])
1491 break;
1492 }
1493 search_tbl->current_rate =
1494 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1495 index, is_green);
1496 update_search_tbl_counter = 1;
1497 goto out;
1498 }
1499 tbl->action++;
1500 if (tbl->action > IWL_SISO_SWITCH_GI)
1501 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1502
1503 if (tbl->action == start_action)
1504 break;
1505 }
1506 search_tbl->lq_type = LQ_NONE;
1507 return 0;
1508
1509 out:
1510 lq_sta->search_better_tbl = 1;
1511 tbl->action++;
1512 if (tbl->action > IWL_SISO_SWITCH_GI)
1513 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1514 if (update_search_tbl_counter)
1515 search_tbl->action = tbl->action;
1516
1517 return 0;
1518}
1519
1520/*
1521 * Try to switch to new modulation mode from MIMO2
1522 */
1523static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
1524 struct iwl_lq_sta *lq_sta,
1525 struct ieee80211_conf *conf,
1526 struct ieee80211_sta *sta, int index)
1527{
1528 s8 is_green = lq_sta->is_green;
1529 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1530 struct iwl_scale_tbl_info *search_tbl =
1531 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1532 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1533 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1534 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1535 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1536 u8 start_action;
1537 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1538 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1539 u8 update_search_tbl_counter = 0;
1540 int ret;
1541
1542 start_action = tbl->action;
1543 for (;;) {
1544 lq_sta->action_counter++;
1545 switch (tbl->action) {
1546 case IWL_MIMO2_SWITCH_ANTENNA1:
1547 case IWL_MIMO2_SWITCH_ANTENNA2:
1548 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1549
1550 if (tx_chains_num <= 2)
1551 break;
1552
1553 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1554 break;
1555
1556 memcpy(search_tbl, tbl, sz);
1557 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1558 &search_tbl->current_rate, search_tbl)) {
1559 update_search_tbl_counter = 1;
1560 goto out;
1561 }
1562 break;
1563 case IWL_MIMO2_SWITCH_SISO_A:
1564 case IWL_MIMO2_SWITCH_SISO_B:
1565 case IWL_MIMO2_SWITCH_SISO_C:
1566 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1567
1568 /* Set up new search table for SISO */
1569 memcpy(search_tbl, tbl, sz);
1570
1571 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1572 search_tbl->ant_type = ANT_A;
1573 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1574 search_tbl->ant_type = ANT_B;
1575 else
1576 search_tbl->ant_type = ANT_C;
1577
1578 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1579 search_tbl->ant_type))
1580 break;
1581
1582 ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
1583 conf, sta,
1584 search_tbl, index);
1585 if (!ret)
1586 goto out;
1587
1588 break;
1589
1590 case IWL_MIMO2_SWITCH_GI:
1591 if (!tbl->is_ht40 && !(ht_cap->cap &
1592 IEEE80211_HT_CAP_SGI_20))
1593 break;
1594 if (tbl->is_ht40 && !(ht_cap->cap &
1595 IEEE80211_HT_CAP_SGI_40))
1596 break;
1597
1598 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1599
1600 /* Set up new search table for MIMO2 */
1601 memcpy(search_tbl, tbl, sz);
1602 search_tbl->is_SGI = !tbl->is_SGI;
1603 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1604 /*
1605 * If active table already uses the fastest possible
1606 * modulation (dual stream with short guard interval),
1607 * and it's working well, there's no need to look
1608 * for a better type of modulation!
1609 */
1610 if (tbl->is_SGI) {
1611 s32 tpt = lq_sta->last_tpt / 100;
1612 if (tpt >= search_tbl->expected_tpt[index])
1613 break;
1614 }
1615 search_tbl->current_rate =
1616 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1617 index, is_green);
1618 update_search_tbl_counter = 1;
1619 goto out;
1620
1621 }
1622 tbl->action++;
1623 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1624 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1625
1626 if (tbl->action == start_action)
1627 break;
1628 }
1629 search_tbl->lq_type = LQ_NONE;
1630 return 0;
1631 out:
1632 lq_sta->search_better_tbl = 1;
1633 tbl->action++;
1634 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1635 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1636 if (update_search_tbl_counter)
1637 search_tbl->action = tbl->action;
1638
1639 return 0;
1640
1641}
1642
1643/*
1644 * Check whether we should continue using same modulation mode, or
1645 * begin search for a new mode, based on:
1646 * 1) # tx successes or failures while using this mode
1647 * 2) # times calling this function
1648 * 3) elapsed time in this mode (not used, for now)
1649 */
1650static void
1651iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1652{
1653 struct iwl_scale_tbl_info *tbl;
1654 int i;
1655 int active_tbl;
1656 int flush_interval_passed = 0;
1657 struct iwl_priv *priv;
1658
1659 priv = lq_sta->drv;
1660 active_tbl = lq_sta->active_tbl;
1661
1662 tbl = &(lq_sta->lq_info[active_tbl]);
1663
1664 /* If we've been disallowing search, see if we should now allow it */
1665 if (lq_sta->stay_in_tbl) {
1666
1667 /* Elapsed time using current modulation mode */
1668 if (lq_sta->flush_timer)
1669 flush_interval_passed =
1670 time_after(jiffies,
1671 (unsigned long)(lq_sta->flush_timer +
1672 IWL_RATE_SCALE_FLUSH_INTVL));
1673
1674 /*
1675 * Check if we should allow search for new modulation mode.
1676 * If many frames have failed or succeeded, or we've used
1677 * this same modulation for a long time, allow search, and
1678 * reset history stats that keep track of whether we should
1679 * allow a new search. Also (below) reset all bitmaps and
1680 * stats in active history.
1681 */
1682 if (force_search ||
1683 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1684 (lq_sta->total_success > lq_sta->max_success_limit) ||
1685 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1686 && (flush_interval_passed))) {
1687 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1688 lq_sta->total_failed,
1689 lq_sta->total_success,
1690 flush_interval_passed);
1691
1692 /* Allow search for new mode */
1693 lq_sta->stay_in_tbl = 0; /* only place reset */
1694 lq_sta->total_failed = 0;
1695 lq_sta->total_success = 0;
1696 lq_sta->flush_timer = 0;
1697
1698 /*
1699 * Else if we've used this modulation mode enough repetitions
1700 * (regardless of elapsed time or success/failure), reset
1701 * history bitmaps and rate-specific stats for all rates in
1702 * active table.
1703 */
1704 } else {
1705 lq_sta->table_count++;
1706 if (lq_sta->table_count >=
1707 lq_sta->table_count_limit) {
1708 lq_sta->table_count = 0;
1709
1710 IWL_DEBUG_RATE(priv,
1711 "LQ: stay in table clear win\n");
1712 for (i = 0; i < IWL_RATE_COUNT; i++)
1713 iwl4965_rs_rate_scale_clear_window(
1714 &(tbl->win[i]));
1715 }
1716 }
1717
1718 /* If transitioning to allow "search", reset all history
1719 * bitmaps and stats in active table (this will become the new
1720 * "search" table). */
1721 if (!lq_sta->stay_in_tbl) {
1722 for (i = 0; i < IWL_RATE_COUNT; i++)
1723 iwl4965_rs_rate_scale_clear_window(
1724 &(tbl->win[i]));
1725 }
1726 }
1727}
1728
1729/*
1730 * setup rate table in uCode
1731 * return rate_n_flags as used in the table
1732 */
1733static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
1734 struct iwl_rxon_context *ctx,
1735 struct iwl_lq_sta *lq_sta,
1736 struct iwl_scale_tbl_info *tbl,
1737 int index, u8 is_green)
1738{
1739 u32 rate;
1740
1741 /* Update uCode's rate table. */
1742 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
1743 iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
1744 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1745
1746 return rate;
1747}
1748
1749/*
1750 * Do rate scaling and search for new modulation mode.
1751 */
1752static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
1753 struct sk_buff *skb,
1754 struct ieee80211_sta *sta,
1755 struct iwl_lq_sta *lq_sta)
1756{
1757 struct ieee80211_hw *hw = priv->hw;
1758 struct ieee80211_conf *conf = &hw->conf;
1759 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1760 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1761 int low = IWL_RATE_INVALID;
1762 int high = IWL_RATE_INVALID;
1763 int index;
1764 int i;
1765 struct iwl_rate_scale_data *window = NULL;
1766 int current_tpt = IWL_INVALID_VALUE;
1767 int low_tpt = IWL_INVALID_VALUE;
1768 int high_tpt = IWL_INVALID_VALUE;
1769 u32 fail_count;
1770 s8 scale_action = 0;
1771 u16 rate_mask;
1772 u8 update_lq = 0;
1773 struct iwl_scale_tbl_info *tbl, *tbl1;
1774 u16 rate_scale_index_msk = 0;
1775 u32 rate;
1776 u8 is_green = 0;
1777 u8 active_tbl = 0;
1778 u8 done_search = 0;
1779 u16 high_low;
1780 s32 sr;
1781 u8 tid = MAX_TID_COUNT;
1782 struct iwl_tid_data *tid_data;
1783 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1784 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1785
1786 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1787
1788 /* Send management frames and NO_ACK data using lowest rate. */
1789 /* TODO: this could probably be improved.. */
1790 if (!ieee80211_is_data(hdr->frame_control) ||
1791 info->flags & IEEE80211_TX_CTL_NO_ACK)
1792 return;
1793
1794 if (!sta || !lq_sta)
1795 return;
1796
1797 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1798
1799 tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
1800 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1801 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1802 if (tid_data->agg.state == IWL_AGG_OFF)
1803 lq_sta->is_agg = 0;
1804 else
1805 lq_sta->is_agg = 1;
1806 } else
1807 lq_sta->is_agg = 0;
1808
1809 /*
1810 * Select rate-scale / modulation-mode table to work with in
1811 * the rest of this function: "search" if searching for better
1812 * modulation mode, or "active" if doing rate scaling within a mode.
1813 */
1814 if (!lq_sta->search_better_tbl)
1815 active_tbl = lq_sta->active_tbl;
1816 else
1817 active_tbl = 1 - lq_sta->active_tbl;
1818
1819 tbl = &(lq_sta->lq_info[active_tbl]);
1820 if (is_legacy(tbl->lq_type))
1821 lq_sta->is_green = 0;
1822 else
1823 lq_sta->is_green = iwl4965_rs_use_green(sta);
1824 is_green = lq_sta->is_green;
1825
1826 /* current tx rate */
1827 index = lq_sta->last_txrate_idx;
1828
1829 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1830 tbl->lq_type);
1831
1832 /* rates available for this association, and for modulation mode */
1833 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1834
1835 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
1836
1837 /* mask with station rate restriction */
1838 if (is_legacy(tbl->lq_type)) {
1839 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1840 /* supp_rates has no CCK bits in A mode */
1841 rate_scale_index_msk = (u16) (rate_mask &
1842 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
1843 else
1844 rate_scale_index_msk = (u16) (rate_mask &
1845 lq_sta->supp_rates);
1846
1847 } else
1848 rate_scale_index_msk = rate_mask;
1849
1850 if (!rate_scale_index_msk)
1851 rate_scale_index_msk = rate_mask;
1852
1853 if (!((1 << index) & rate_scale_index_msk)) {
1854 IWL_ERR(priv, "Current Rate is not valid\n");
1855 if (lq_sta->search_better_tbl) {
1856 /* revert to active table if search table is not valid*/
1857 tbl->lq_type = LQ_NONE;
1858 lq_sta->search_better_tbl = 0;
1859 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1860 /* get "active" rate info */
1861 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1862 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
1863 tbl, index, is_green);
1864 }
1865 return;
1866 }
1867
1868 /* Get expected throughput table and history window for current rate */
1869 if (!tbl->expected_tpt) {
1870 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1871 return;
1872 }
1873
1874 /* force user max rate if set by user */
1875 if ((lq_sta->max_rate_idx != -1) &&
1876 (lq_sta->max_rate_idx < index)) {
1877 index = lq_sta->max_rate_idx;
1878 update_lq = 1;
1879 window = &(tbl->win[index]);
1880 goto lq_update;
1881 }
1882
1883 window = &(tbl->win[index]);
1884
1885 /*
1886 * If there is not enough history to calculate actual average
1887 * throughput, keep analyzing results of more tx frames, without
1888 * changing rate or mode (bypass most of the rest of this function).
1889 * Set up new rate table in uCode only if old rate is not supported
1890 * in current association (use new rate found above).
1891 */
1892 fail_count = window->counter - window->success_counter;
1893 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1894 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1895 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1896 "for index %d\n",
1897 window->success_counter, window->counter, index);
1898
1899 /* Can't calculate this yet; not enough history */
1900 window->average_tpt = IWL_INVALID_VALUE;
1901
1902 /* Should we stay with this modulation mode,
1903 * or search for a new one? */
1904 iwl4965_rs_stay_in_table(lq_sta, false);
1905
1906 goto out;
1907 }
1908 /* Else we have enough samples; calculate estimate of
1909 * actual average throughput */
1910 if (window->average_tpt != ((window->success_ratio *
1911 tbl->expected_tpt[index] + 64) / 128)) {
1912 IWL_ERR(priv,
1913 "expected_tpt should have been calculated by now\n");
1914 window->average_tpt = ((window->success_ratio *
1915 tbl->expected_tpt[index] + 64) / 128);
1916 }
1917
1918 /* If we are searching for better modulation mode, check success. */
1919 if (lq_sta->search_better_tbl) {
1920 /* If good success, continue using the "search" mode;
1921 * no need to send new link quality command, since we're
1922 * continuing to use the setup that we've been trying. */
1923 if (window->average_tpt > lq_sta->last_tpt) {
1924
1925 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1926 "suc=%d cur-tpt=%d old-tpt=%d\n",
1927 window->success_ratio,
1928 window->average_tpt,
1929 lq_sta->last_tpt);
1930
1931 if (!is_legacy(tbl->lq_type))
1932 lq_sta->enable_counter = 1;
1933
1934 /* Swap tables; "search" becomes "active" */
1935 lq_sta->active_tbl = active_tbl;
1936 current_tpt = window->average_tpt;
1937
1938 /* Else poor success; go back to mode in "active" table */
1939 } else {
1940
1941 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1942 "suc=%d cur-tpt=%d old-tpt=%d\n",
1943 window->success_ratio,
1944 window->average_tpt,
1945 lq_sta->last_tpt);
1946
1947 /* Nullify "search" table */
1948 tbl->lq_type = LQ_NONE;
1949
1950 /* Revert to "active" table */
1951 active_tbl = lq_sta->active_tbl;
1952 tbl = &(lq_sta->lq_info[active_tbl]);
1953
1954 /* Revert to "active" rate and throughput info */
1955 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1956 current_tpt = lq_sta->last_tpt;
1957
1958 /* Need to set up a new rate table in uCode */
1959 update_lq = 1;
1960 }
1961
1962 /* Either way, we've made a decision; modulation mode
1963 * search is done, allow rate adjustment next time. */
1964 lq_sta->search_better_tbl = 0;
1965 done_search = 1; /* Don't switch modes below! */
1966 goto lq_update;
1967 }
1968
1969 /* (Else) not in search of better modulation mode, try for better
1970 * starting rate, while staying in this mode. */
1971 high_low = iwl4965_rs_get_adjacent_rate(priv, index,
1972 rate_scale_index_msk,
1973 tbl->lq_type);
1974 low = high_low & 0xff;
1975 high = (high_low >> 8) & 0xff;
1976
1977 /* If user set max rate, dont allow higher than user constrain */
1978 if ((lq_sta->max_rate_idx != -1) &&
1979 (lq_sta->max_rate_idx < high))
1980 high = IWL_RATE_INVALID;
1981
1982 sr = window->success_ratio;
1983
1984 /* Collect measured throughputs for current and adjacent rates */
1985 current_tpt = window->average_tpt;
1986 if (low != IWL_RATE_INVALID)
1987 low_tpt = tbl->win[low].average_tpt;
1988 if (high != IWL_RATE_INVALID)
1989 high_tpt = tbl->win[high].average_tpt;
1990
1991 scale_action = 0;
1992
1993 /* Too many failures, decrease rate */
1994 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1995 IWL_DEBUG_RATE(priv,
1996 "decrease rate because of low success_ratio\n");
1997 scale_action = -1;
1998
1999 /* No throughput measured yet for adjacent rates; try increase. */
2000 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2001 (high_tpt == IWL_INVALID_VALUE)) {
2002
2003 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2004 scale_action = 1;
2005 else if (low != IWL_RATE_INVALID)
2006 scale_action = 0;
2007 }
2008
2009 /* Both adjacent throughputs are measured, but neither one has better
2010 * throughput; we're using the best rate, don't change it! */
2011 else if ((low_tpt != IWL_INVALID_VALUE) &&
2012 (high_tpt != IWL_INVALID_VALUE) &&
2013 (low_tpt < current_tpt) &&
2014 (high_tpt < current_tpt))
2015 scale_action = 0;
2016
2017 /* At least one adjacent rate's throughput is measured,
2018 * and may have better performance. */
2019 else {
2020 /* Higher adjacent rate's throughput is measured */
2021 if (high_tpt != IWL_INVALID_VALUE) {
2022 /* Higher rate has better throughput */
2023 if (high_tpt > current_tpt &&
2024 sr >= IWL_RATE_INCREASE_TH) {
2025 scale_action = 1;
2026 } else {
2027 scale_action = 0;
2028 }
2029
2030 /* Lower adjacent rate's throughput is measured */
2031 } else if (low_tpt != IWL_INVALID_VALUE) {
2032 /* Lower rate has better throughput */
2033 if (low_tpt > current_tpt) {
2034 IWL_DEBUG_RATE(priv,
2035 "decrease rate because of low tpt\n");
2036 scale_action = -1;
2037 } else if (sr >= IWL_RATE_INCREASE_TH) {
2038 scale_action = 1;
2039 }
2040 }
2041 }
2042
2043 /* Sanity check; asked for decrease, but success rate or throughput
2044 * has been good at old rate. Don't change it. */
2045 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2046 ((sr > IWL_RATE_HIGH_TH) ||
2047 (current_tpt > (100 * tbl->expected_tpt[low]))))
2048 scale_action = 0;
2049
2050 switch (scale_action) {
2051 case -1:
2052 /* Decrease starting rate, update uCode's rate table */
2053 if (low != IWL_RATE_INVALID) {
2054 update_lq = 1;
2055 index = low;
2056 }
2057
2058 break;
2059 case 1:
2060 /* Increase starting rate, update uCode's rate table */
2061 if (high != IWL_RATE_INVALID) {
2062 update_lq = 1;
2063 index = high;
2064 }
2065
2066 break;
2067 case 0:
2068 /* No change */
2069 default:
2070 break;
2071 }
2072
2073 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2074 "high %d type %d\n",
2075 index, scale_action, low, high, tbl->lq_type);
2076
2077lq_update:
2078 /* Replace uCode's rate table for the destination station. */
2079 if (update_lq)
2080 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
2081 tbl, index, is_green);
2082
2083 /* Should we stay with this modulation mode,
2084 * or search for a new one? */
2085 iwl4965_rs_stay_in_table(lq_sta, false);
2086
2087 /*
2088 * Search for new modulation mode if we're:
2089 * 1) Not changing rates right now
2090 * 2) Not just finishing up a search
2091 * 3) Allowing a new search
2092 */
2093 if (!update_lq && !done_search &&
2094 !lq_sta->stay_in_tbl && window->counter) {
2095 /* Save current throughput to compare with "search" throughput*/
2096 lq_sta->last_tpt = current_tpt;
2097
2098 /* Select a new "search" modulation mode to try.
2099 * If one is found, set up the new "search" table. */
2100 if (is_legacy(tbl->lq_type))
2101 iwl4965_rs_move_legacy_other(priv, lq_sta,
2102 conf, sta, index);
2103 else if (is_siso(tbl->lq_type))
2104 iwl4965_rs_move_siso_to_other(priv, lq_sta,
2105 conf, sta, index);
2106 else /* (is_mimo2(tbl->lq_type)) */
2107 iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
2108 conf, sta, index);
2109
2110 /* If new "search" mode was selected, set up in uCode table */
2111 if (lq_sta->search_better_tbl) {
2112 /* Access the "search" table, clear its history. */
2113 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2114 for (i = 0; i < IWL_RATE_COUNT; i++)
2115 iwl4965_rs_rate_scale_clear_window(
2116 &(tbl->win[i]));
2117
2118 /* Use new "search" start rate */
2119 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
2120
2121 IWL_DEBUG_RATE(priv,
2122 "Switch current mcs: %X index: %d\n",
2123 tbl->current_rate, index);
2124 iwl4965_rs_fill_link_cmd(priv, lq_sta,
2125 tbl->current_rate);
2126 iwl_legacy_send_lq_cmd(priv, ctx,
2127 &lq_sta->lq, CMD_ASYNC, false);
2128 } else
2129 done_search = 1;
2130 }
2131
2132 if (done_search && !lq_sta->stay_in_tbl) {
2133 /* If the "active" (non-search) mode was legacy,
2134 * and we've tried switching antennas,
2135 * but we haven't been able to try HT modes (not available),
2136 * stay with best antenna legacy modulation for a while
2137 * before next round of mode comparisons. */
2138 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2139 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2140 lq_sta->action_counter > tbl1->max_search) {
2141 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2142 iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
2143 }
2144
2145 /* If we're in an HT mode, and all 3 mode switch actions
2146 * have been tried and compared, stay in this best modulation
2147 * mode for a while before next round of mode comparisons. */
2148 if (lq_sta->enable_counter &&
2149 (lq_sta->action_counter >= tbl1->max_search)) {
2150 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2151 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2152 (tid != MAX_TID_COUNT)) {
2153 tid_data =
2154 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2155 if (tid_data->agg.state == IWL_AGG_OFF) {
2156 IWL_DEBUG_RATE(priv,
2157 "try to aggregate tid %d\n",
2158 tid);
2159 iwl4965_rs_tl_turn_on_agg(priv, tid,
2160 lq_sta, sta);
2161 }
2162 }
2163 iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
2164 }
2165 }
2166
2167out:
2168 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
2169 index, is_green);
2170 i = index;
2171 lq_sta->last_txrate_idx = i;
2172}
2173
2174/**
2175 * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
2176 *
2177 * The uCode's station table contains a table of fallback rates
2178 * for automatic fallback during transmission.
2179 *
2180 * NOTE: This sets up a default set of values. These will be replaced later
2181 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2182 * rc80211_simple.
2183 *
2184 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2185 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2186 * which requires station table entry to exist).
2187 */
2188static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
2189 struct ieee80211_conf *conf,
2190 struct ieee80211_sta *sta,
2191 struct iwl_lq_sta *lq_sta)
2192{
2193 struct iwl_scale_tbl_info *tbl;
2194 int rate_idx;
2195 int i;
2196 u32 rate;
2197 u8 use_green = iwl4965_rs_use_green(sta);
2198 u8 active_tbl = 0;
2199 u8 valid_tx_ant;
2200 struct iwl_station_priv *sta_priv;
2201 struct iwl_rxon_context *ctx;
2202
2203 if (!sta || !lq_sta)
2204 return;
2205
2206 sta_priv = (void *)sta->drv_priv;
2207 ctx = sta_priv->common.ctx;
2208
2209 i = lq_sta->last_txrate_idx;
2210
2211 valid_tx_ant = priv->hw_params.valid_tx_ant;
2212
2213 if (!lq_sta->search_better_tbl)
2214 active_tbl = lq_sta->active_tbl;
2215 else
2216 active_tbl = 1 - lq_sta->active_tbl;
2217
2218 tbl = &(lq_sta->lq_info[active_tbl]);
2219
2220 if ((i < 0) || (i >= IWL_RATE_COUNT))
2221 i = 0;
2222
2223 rate = iwlegacy_rates[i].plcp;
2224 tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
2225 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2226
2227 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2228 rate |= RATE_MCS_CCK_MSK;
2229
2230 iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2231 if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2232 iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2233
2234 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2235 tbl->current_rate = rate;
2236 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
2237 iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2238 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2239 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2240}
2241
2242static void
2243iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2244 struct ieee80211_tx_rate_control *txrc)
2245{
2246
2247 struct sk_buff *skb = txrc->skb;
2248 struct ieee80211_supported_band *sband = txrc->sband;
2249 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2251 struct iwl_lq_sta *lq_sta = priv_sta;
2252 int rate_idx;
2253
2254 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2255
2256 /* Get max rate if user set max rate */
2257 if (lq_sta) {
2258 lq_sta->max_rate_idx = txrc->max_rate_idx;
2259 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2260 (lq_sta->max_rate_idx != -1))
2261 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2262 if ((lq_sta->max_rate_idx < 0) ||
2263 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2264 lq_sta->max_rate_idx = -1;
2265 }
2266
2267 /* Treat uninitialized rate scaling data same as non-existing. */
2268 if (lq_sta && !lq_sta->drv) {
2269 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2270 priv_sta = NULL;
2271 }
2272
2273 /* Send management frames and NO_ACK data using lowest rate. */
2274 if (rate_control_send_low(sta, priv_sta, txrc))
2275 return;
2276
2277 if (!lq_sta)
2278 return;
2279
2280 rate_idx = lq_sta->last_txrate_idx;
2281
2282 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2283 rate_idx -= IWL_FIRST_OFDM_RATE;
2284 /* 6M and 9M shared same MCS index */
2285 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2286 if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2287 IWL_RATE_MIMO2_6M_PLCP)
2288 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2289 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2290 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2291 info->control.rates[0].flags |=
2292 IEEE80211_TX_RC_SHORT_GI;
2293 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2294 info->control.rates[0].flags |=
2295 IEEE80211_TX_RC_DUP_DATA;
2296 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2297 info->control.rates[0].flags |=
2298 IEEE80211_TX_RC_40_MHZ_WIDTH;
2299 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2300 info->control.rates[0].flags |=
2301 IEEE80211_TX_RC_GREEN_FIELD;
2302 } else {
2303 /* Check for invalid rates */
2304 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2305 ((sband->band == IEEE80211_BAND_5GHZ) &&
2306 (rate_idx < IWL_FIRST_OFDM_RATE)))
2307 rate_idx = rate_lowest_index(sband, sta);
2308 /* On valid 5 GHz rate, adjust index */
2309 else if (sband->band == IEEE80211_BAND_5GHZ)
2310 rate_idx -= IWL_FIRST_OFDM_RATE;
2311 info->control.rates[0].flags = 0;
2312 }
2313 info->control.rates[0].idx = rate_idx;
2314
2315}
2316
2317static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2318 gfp_t gfp)
2319{
2320 struct iwl_lq_sta *lq_sta;
2321 struct iwl_station_priv *sta_priv =
2322 (struct iwl_station_priv *) sta->drv_priv;
2323 struct iwl_priv *priv;
2324
2325 priv = (struct iwl_priv *)priv_rate;
2326 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2327
2328 lq_sta = &sta_priv->lq_sta;
2329
2330 return lq_sta;
2331}
2332
2333/*
2334 * Called after adding a new station to initialize rate scaling
2335 */
2336void
2337iwl4965_rs_rate_init(struct iwl_priv *priv,
2338 struct ieee80211_sta *sta,
2339 u8 sta_id)
2340{
2341 int i, j;
2342 struct ieee80211_hw *hw = priv->hw;
2343 struct ieee80211_conf *conf = &priv->hw->conf;
2344 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2345 struct iwl_station_priv *sta_priv;
2346 struct iwl_lq_sta *lq_sta;
2347 struct ieee80211_supported_band *sband;
2348
2349 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2350 lq_sta = &sta_priv->lq_sta;
2351 sband = hw->wiphy->bands[conf->channel->band];
2352
2353
2354 lq_sta->lq.sta_id = sta_id;
2355
2356 for (j = 0; j < LQ_SIZE; j++)
2357 for (i = 0; i < IWL_RATE_COUNT; i++)
2358 iwl4965_rs_rate_scale_clear_window(
2359 &lq_sta->lq_info[j].win[i]);
2360
2361 lq_sta->flush_timer = 0;
2362 lq_sta->supp_rates = sta->supp_rates[sband->band];
2363 for (j = 0; j < LQ_SIZE; j++)
2364 for (i = 0; i < IWL_RATE_COUNT; i++)
2365 iwl4965_rs_rate_scale_clear_window(
2366 &lq_sta->lq_info[j].win[i]);
2367
2368 IWL_DEBUG_RATE(priv, "LQ:"
2369 "*** rate scale station global init for station %d ***\n",
2370 sta_id);
2371 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2372 * the lowest or the highest rate.. Could consider using RSSI from
2373 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2374 * after assoc.. */
2375
2376 lq_sta->is_dup = 0;
2377 lq_sta->max_rate_idx = -1;
2378 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2379 lq_sta->is_green = iwl4965_rs_use_green(sta);
2380 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2381 lq_sta->band = priv->band;
2382 /*
2383 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2384 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2385 */
2386 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2387 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2388 lq_sta->active_siso_rate &= ~((u16)0x2);
2389 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2390
2391 /* Same here */
2392 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2393 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2394 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2395 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2396
2397 /* These values will be overridden later */
2398 lq_sta->lq.general_params.single_stream_ant_msk =
2399 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2400 lq_sta->lq.general_params.dual_stream_ant_msk =
2401 priv->hw_params.valid_tx_ant &
2402 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2403 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2404 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2405 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2406 lq_sta->lq.general_params.dual_stream_ant_msk =
2407 priv->hw_params.valid_tx_ant;
2408 }
2409
2410 /* as default allow aggregation for all tids */
2411 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2412 lq_sta->drv = priv;
2413
2414 /* Set last_txrate_idx to lowest rate */
2415 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2416 if (sband->band == IEEE80211_BAND_5GHZ)
2417 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2418 lq_sta->is_agg = 0;
2419
2420#ifdef CONFIG_MAC80211_DEBUGFS
2421 lq_sta->dbg_fixed_rate = 0;
2422#endif
2423
2424 iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
2425}
2426
2427static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
2428 struct iwl_lq_sta *lq_sta, u32 new_rate)
2429{
2430 struct iwl_scale_tbl_info tbl_type;
2431 int index = 0;
2432 int rate_idx;
2433 int repeat_rate = 0;
2434 u8 ant_toggle_cnt = 0;
2435 u8 use_ht_possible = 1;
2436 u8 valid_tx_ant = 0;
2437 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2438
2439 /* Override starting rate (index 0) if needed for debug purposes */
2440 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2441
2442 /* Interpret new_rate (rate_n_flags) */
2443 iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2444 &tbl_type, &rate_idx);
2445
2446 /* How many times should we repeat the initial rate? */
2447 if (is_legacy(tbl_type.lq_type)) {
2448 ant_toggle_cnt = 1;
2449 repeat_rate = IWL_NUMBER_TRY;
2450 } else {
2451 repeat_rate = IWL_HT_NUMBER_TRY;
2452 }
2453
2454 lq_cmd->general_params.mimo_delimiter =
2455 is_mimo(tbl_type.lq_type) ? 1 : 0;
2456
2457 /* Fill 1st table entry (index 0) */
2458 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2459
2460 if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
2461 lq_cmd->general_params.single_stream_ant_msk =
2462 tbl_type.ant_type;
2463 } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
2464 lq_cmd->general_params.dual_stream_ant_msk =
2465 tbl_type.ant_type;
2466 } /* otherwise we don't modify the existing value */
2467
2468 index++;
2469 repeat_rate--;
2470 if (priv)
2471 valid_tx_ant = priv->hw_params.valid_tx_ant;
2472
2473 /* Fill rest of rate table */
2474 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2475 /* Repeat initial/next rate.
2476 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2477 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2478 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2479 if (is_legacy(tbl_type.lq_type)) {
2480 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2481 ant_toggle_cnt++;
2482 else if (priv &&
2483 iwl4965_rs_toggle_antenna(valid_tx_ant,
2484 &new_rate, &tbl_type))
2485 ant_toggle_cnt = 1;
2486 }
2487
2488 /* Override next rate if needed for debug purposes */
2489 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2490
2491 /* Fill next table entry */
2492 lq_cmd->rs_table[index].rate_n_flags =
2493 cpu_to_le32(new_rate);
2494 repeat_rate--;
2495 index++;
2496 }
2497
2498 iwl4965_rs_get_tbl_info_from_mcs(new_rate,
2499 lq_sta->band, &tbl_type,
2500 &rate_idx);
2501
2502 /* Indicate to uCode which entries might be MIMO.
2503 * If initial rate was MIMO, this will finally end up
2504 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2505 if (is_mimo(tbl_type.lq_type))
2506 lq_cmd->general_params.mimo_delimiter = index;
2507
2508 /* Get next rate */
2509 new_rate = iwl4965_rs_get_lower_rate(lq_sta,
2510 &tbl_type, rate_idx,
2511 use_ht_possible);
2512
2513 /* How many times should we repeat the next rate? */
2514 if (is_legacy(tbl_type.lq_type)) {
2515 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2516 ant_toggle_cnt++;
2517 else if (priv &&
2518 iwl4965_rs_toggle_antenna(valid_tx_ant,
2519 &new_rate, &tbl_type))
2520 ant_toggle_cnt = 1;
2521
2522 repeat_rate = IWL_NUMBER_TRY;
2523 } else {
2524 repeat_rate = IWL_HT_NUMBER_TRY;
2525 }
2526
2527 /* Don't allow HT rates after next pass.
2528 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2529 use_ht_possible = 0;
2530
2531 /* Override next rate if needed for debug purposes */
2532 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2533
2534 /* Fill next table entry */
2535 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2536
2537 index++;
2538 repeat_rate--;
2539 }
2540
2541 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2542 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2543
2544 lq_cmd->agg_params.agg_time_limit =
2545 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2546}
2547
2548static void
2549*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2550{
2551 return hw->priv;
2552}
2553/* rate scale requires free function to be implemented */
2554static void iwl4965_rs_free(void *priv_rate)
2555{
2556 return;
2557}
2558
2559static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2560 void *priv_sta)
2561{
2562 struct iwl_priv *priv __maybe_unused = priv_r;
2563
2564 IWL_DEBUG_RATE(priv, "enter\n");
2565 IWL_DEBUG_RATE(priv, "leave\n");
2566}
2567
2568
2569#ifdef CONFIG_MAC80211_DEBUGFS
2570static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
2571{
2572 file->private_data = inode->i_private;
2573 return 0;
2574}
2575static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2576 u32 *rate_n_flags, int index)
2577{
2578 struct iwl_priv *priv;
2579 u8 valid_tx_ant;
2580 u8 ant_sel_tx;
2581
2582 priv = lq_sta->drv;
2583 valid_tx_ant = priv->hw_params.valid_tx_ant;
2584 if (lq_sta->dbg_fixed_rate) {
2585 ant_sel_tx =
2586 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
2587 >> RATE_MCS_ANT_POS);
2588 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2589 *rate_n_flags = lq_sta->dbg_fixed_rate;
2590 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2591 } else {
2592 lq_sta->dbg_fixed_rate = 0;
2593 IWL_ERR(priv,
2594 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
2595 ant_sel_tx, valid_tx_ant);
2596 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2597 }
2598 } else {
2599 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2600 }
2601}
2602
2603static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2604 const char __user *user_buf, size_t count, loff_t *ppos)
2605{
2606 struct iwl_lq_sta *lq_sta = file->private_data;
2607 struct iwl_priv *priv;
2608 char buf[64];
2609 size_t buf_size;
2610 u32 parsed_rate;
2611 struct iwl_station_priv *sta_priv =
2612 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2613 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2614
2615 priv = lq_sta->drv;
2616 memset(buf, 0, sizeof(buf));
2617 buf_size = min(count, sizeof(buf) - 1);
2618 if (copy_from_user(buf, user_buf, buf_size))
2619 return -EFAULT;
2620
2621 if (sscanf(buf, "%x", &parsed_rate) == 1)
2622 lq_sta->dbg_fixed_rate = parsed_rate;
2623 else
2624 lq_sta->dbg_fixed_rate = 0;
2625
2626 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2627 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2628 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2629
2630 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2631 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2632
2633 if (lq_sta->dbg_fixed_rate) {
2634 iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2635 iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
2636 false);
2637 }
2638
2639 return count;
2640}
2641
2642static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
2643 char __user *user_buf, size_t count, loff_t *ppos)
2644{
2645 char *buff;
2646 int desc = 0;
2647 int i = 0;
2648 int index = 0;
2649 ssize_t ret;
2650
2651 struct iwl_lq_sta *lq_sta = file->private_data;
2652 struct iwl_priv *priv;
2653 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2654
2655 priv = lq_sta->drv;
2656 buff = kmalloc(1024, GFP_KERNEL);
2657 if (!buff)
2658 return -ENOMEM;
2659
2660 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2661 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2662 lq_sta->total_failed, lq_sta->total_success,
2663 lq_sta->active_legacy_rate);
2664 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2665 lq_sta->dbg_fixed_rate);
2666 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2667 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2668 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2669 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2670 desc += sprintf(buff+desc, "lq type %s\n",
2671 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2672 if (is_Ht(tbl->lq_type)) {
2673 desc += sprintf(buff+desc, " %s",
2674 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2675 desc += sprintf(buff+desc, " %s",
2676 (tbl->is_ht40) ? "40MHz" : "20MHz");
2677 desc += sprintf(buff+desc, " %s %s %s\n",
2678 (tbl->is_SGI) ? "SGI" : "",
2679 (lq_sta->is_green) ? "GF enabled" : "",
2680 (lq_sta->is_agg) ? "AGG on" : "");
2681 }
2682 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2683 lq_sta->last_rate_n_flags);
2684 desc += sprintf(buff+desc, "general:"
2685 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2686 lq_sta->lq.general_params.flags,
2687 lq_sta->lq.general_params.mimo_delimiter,
2688 lq_sta->lq.general_params.single_stream_ant_msk,
2689 lq_sta->lq.general_params.dual_stream_ant_msk);
2690
2691 desc += sprintf(buff+desc, "agg:"
2692 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2693 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2694 lq_sta->lq.agg_params.agg_dis_start_th,
2695 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2696
2697 desc += sprintf(buff+desc,
2698 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2699 lq_sta->lq.general_params.start_rate_index[0],
2700 lq_sta->lq.general_params.start_rate_index[1],
2701 lq_sta->lq.general_params.start_rate_index[2],
2702 lq_sta->lq.general_params.start_rate_index[3]);
2703
2704 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2705 index = iwl4965_hwrate_to_plcp_idx(
2706 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
2707 if (is_legacy(tbl->lq_type)) {
2708 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
2709 i,
2710 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2711 iwl_rate_mcs[index].mbps);
2712 } else {
2713 desc += sprintf(buff+desc,
2714 " rate[%d] 0x%X %smbps (%s)\n",
2715 i,
2716 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2717 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
2718 }
2719 }
2720
2721 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2722 kfree(buff);
2723 return ret;
2724}
2725
2726static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2727 .write = iwl4965_rs_sta_dbgfs_scale_table_write,
2728 .read = iwl4965_rs_sta_dbgfs_scale_table_read,
2729 .open = iwl4965_open_file_generic,
2730 .llseek = default_llseek,
2731};
2732static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
2733 char __user *user_buf, size_t count, loff_t *ppos)
2734{
2735 char *buff;
2736 int desc = 0;
2737 int i, j;
2738 ssize_t ret;
2739
2740 struct iwl_lq_sta *lq_sta = file->private_data;
2741
2742 buff = kmalloc(1024, GFP_KERNEL);
2743 if (!buff)
2744 return -ENOMEM;
2745
2746 for (i = 0; i < LQ_SIZE; i++) {
2747 desc += sprintf(buff+desc,
2748 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2749 "rate=0x%X\n",
2750 lq_sta->active_tbl == i ? "*" : "x",
2751 lq_sta->lq_info[i].lq_type,
2752 lq_sta->lq_info[i].is_SGI,
2753 lq_sta->lq_info[i].is_ht40,
2754 lq_sta->lq_info[i].is_dup,
2755 lq_sta->is_green,
2756 lq_sta->lq_info[i].current_rate);
2757 for (j = 0; j < IWL_RATE_COUNT; j++) {
2758 desc += sprintf(buff+desc,
2759 "counter=%d success=%d %%=%d\n",
2760 lq_sta->lq_info[i].win[j].counter,
2761 lq_sta->lq_info[i].win[j].success_counter,
2762 lq_sta->lq_info[i].win[j].success_ratio);
2763 }
2764 }
2765 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2766 kfree(buff);
2767 return ret;
2768}
2769
2770static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2771 .read = iwl4965_rs_sta_dbgfs_stats_table_read,
2772 .open = iwl4965_open_file_generic,
2773 .llseek = default_llseek,
2774};
2775
2776static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2777 char __user *user_buf, size_t count, loff_t *ppos)
2778{
2779 char buff[120];
2780 int desc = 0;
2781 ssize_t ret;
2782
2783 struct iwl_lq_sta *lq_sta = file->private_data;
2784 struct iwl_priv *priv;
2785 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2786
2787 priv = lq_sta->drv;
2788
2789 if (is_Ht(tbl->lq_type))
2790 desc += sprintf(buff+desc,
2791 "Bit Rate= %d Mb/s\n",
2792 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2793 else
2794 desc += sprintf(buff+desc,
2795 "Bit Rate= %d Mb/s\n",
2796 iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
2797
2798 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2799 return ret;
2800}
2801
2802static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2803 .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
2804 .open = iwl4965_open_file_generic,
2805 .llseek = default_llseek,
2806};
2807
2808static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
2809 struct dentry *dir)
2810{
2811 struct iwl_lq_sta *lq_sta = priv_sta;
2812 lq_sta->rs_sta_dbgfs_scale_table_file =
2813 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2814 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2815 lq_sta->rs_sta_dbgfs_stats_table_file =
2816 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2817 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2818 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2819 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
2820 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
2821 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2822 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2823 &lq_sta->tx_agg_tid_en);
2824
2825}
2826
2827static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
2828{
2829 struct iwl_lq_sta *lq_sta = priv_sta;
2830 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2831 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2832 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2833 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2834}
2835#endif
2836
2837/*
2838 * Initialization of rate scaling information is done by driver after
2839 * the station is added. Since mac80211 calls this function before a
2840 * station is added we ignore it.
2841 */
2842static void
2843iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2844 struct ieee80211_sta *sta, void *priv_sta)
2845{
2846}
2847static struct rate_control_ops rs_4965_ops = {
2848 .module = NULL,
2849 .name = IWL4965_RS_NAME,
2850 .tx_status = iwl4965_rs_tx_status,
2851 .get_rate = iwl4965_rs_get_rate,
2852 .rate_init = iwl4965_rs_rate_init_stub,
2853 .alloc = iwl4965_rs_alloc,
2854 .free = iwl4965_rs_free,
2855 .alloc_sta = iwl4965_rs_alloc_sta,
2856 .free_sta = iwl4965_rs_free_sta,
2857#ifdef CONFIG_MAC80211_DEBUGFS
2858 .add_sta_debugfs = iwl4965_rs_add_debugfs,
2859 .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
2860#endif
2861};
2862
2863int iwl4965_rate_control_register(void)
2864{
2865 return ieee80211_rate_control_register(&rs_4965_ops);
2866}
2867
2868void iwl4965_rate_control_unregister(void)
2869{
2870 ieee80211_rate_control_unregister(&rs_4965_ops);
2871}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644
index 2b144bbfc3c5..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ /dev/null
@@ -1,215 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-4965-calib.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41#include "iwl-4965-hw.h"
42#include "iwl-4965.h"
43
44void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb)
46
47{
48 struct iwl_rx_packet *pkt = rxb_addr(rxb);
49 struct iwl_missed_beacon_notif *missed_beacon;
50
51 missed_beacon = &pkt->u.missed_beacon;
52 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
53 priv->missed_beacon_threshold) {
54 IWL_DEBUG_CALIB(priv,
55 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
56 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
57 le32_to_cpu(missed_beacon->total_missed_becons),
58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl4965_init_sensitivity(priv);
62 }
63}
64
65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
69{
70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0;
72 int total_silence = 0;
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise;
75
76 rx_info = &(priv->_4965.statistics.rx.general);
77 bcn_silence_a =
78 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
79 bcn_silence_b =
80 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
81 bcn_silence_c =
82 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
83
84 if (bcn_silence_a) {
85 total_silence += bcn_silence_a;
86 num_active_rx++;
87 }
88 if (bcn_silence_b) {
89 total_silence += bcn_silence_b;
90 num_active_rx++;
91 }
92 if (bcn_silence_c) {
93 total_silence += bcn_silence_c;
94 num_active_rx++;
95 }
96
97 /* Average among active antennas */
98 if (num_active_rx)
99 last_rx_noise = (total_silence / num_active_rx) - 107;
100 else
101 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
102
103 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
104 bcn_silence_a, bcn_silence_b, bcn_silence_c,
105 last_rx_noise);
106}
107
108#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
109/*
110 * based on the assumption of all statistics counter are in DWORD
111 * FIXME: This function is for debugging, do not deal with
112 * the case of counters roll-over.
113 */
114static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
115 __le32 *stats)
116{
117 int i, size;
118 __le32 *prev_stats;
119 u32 *accum_stats;
120 u32 *delta, *max_delta;
121 struct statistics_general_common *general, *accum_general;
122 struct statistics_tx *tx, *accum_tx;
123
124 prev_stats = (__le32 *)&priv->_4965.statistics;
125 accum_stats = (u32 *)&priv->_4965.accum_statistics;
126 size = sizeof(struct iwl_notif_statistics);
127 general = &priv->_4965.statistics.general.common;
128 accum_general = &priv->_4965.accum_statistics.general.common;
129 tx = &priv->_4965.statistics.tx;
130 accum_tx = &priv->_4965.accum_statistics.tx;
131 delta = (u32 *)&priv->_4965.delta_statistics;
132 max_delta = (u32 *)&priv->_4965.max_delta;
133
134 for (i = sizeof(__le32); i < size;
135 i += sizeof(__le32), stats++, prev_stats++, delta++,
136 max_delta++, accum_stats++) {
137 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
138 *delta = (le32_to_cpu(*stats) -
139 le32_to_cpu(*prev_stats));
140 *accum_stats += *delta;
141 if (*delta > *max_delta)
142 *max_delta = *delta;
143 }
144 }
145
146 /* reset accumulative statistics for "no-counter" type statistics */
147 accum_general->temperature = general->temperature;
148 accum_general->ttl_timestamp = general->ttl_timestamp;
149}
150#endif
151
152#define REG_RECALIB_PERIOD (60)
153
154void iwl4965_rx_statistics(struct iwl_priv *priv,
155 struct iwl_rx_mem_buffer *rxb)
156{
157 int change;
158 struct iwl_rx_packet *pkt = rxb_addr(rxb);
159
160 IWL_DEBUG_RX(priv,
161 "Statistics notification received (%d vs %d).\n",
162 (int)sizeof(struct iwl_notif_statistics),
163 le32_to_cpu(pkt->len_n_flags) &
164 FH_RSCSR_FRAME_SIZE_MSK);
165
166 change = ((priv->_4965.statistics.general.common.temperature !=
167 pkt->u.stats.general.common.temperature) ||
168 ((priv->_4965.statistics.flag &
169 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
170 (pkt->u.stats.flag &
171 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
172#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
173 iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
174#endif
175
176 /* TODO: reading some of statistics is unneeded */
177 memcpy(&priv->_4965.statistics, &pkt->u.stats,
178 sizeof(priv->_4965.statistics));
179
180 set_bit(STATUS_STATISTICS, &priv->status);
181
182 /* Reschedule the statistics timer to occur in
183 * REG_RECALIB_PERIOD seconds to ensure we get a
184 * thermal update even if the uCode doesn't give
185 * us one */
186 mod_timer(&priv->statistics_periodic, jiffies +
187 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
188
189 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
190 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
191 iwl4965_rx_calc_noise(priv);
192 queue_work(priv->workqueue, &priv->run_time_calib_work);
193 }
194 if (priv->cfg->ops->lib->temp_ops.temperature && change)
195 priv->cfg->ops->lib->temp_ops.temperature(priv);
196}
197
198void iwl4965_reply_statistics(struct iwl_priv *priv,
199 struct iwl_rx_mem_buffer *rxb)
200{
201 struct iwl_rx_packet *pkt = rxb_addr(rxb);
202
203 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
205 memset(&priv->_4965.accum_statistics, 0,
206 sizeof(struct iwl_notif_statistics));
207 memset(&priv->_4965.delta_statistics, 0,
208 sizeof(struct iwl_notif_statistics));
209 memset(&priv->_4965.max_delta, 0,
210 sizeof(struct iwl_notif_statistics));
211#endif
212 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
213 }
214 iwl4965_rx_statistics(priv, rxb);
215}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644
index a262c23553d2..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+++ /dev/null
@@ -1,721 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-4965.h"
36
37static struct iwl_link_quality_cmd *
38iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
63 rate_flags);
64 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
65 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
66
67 link_cmd->general_params.single_stream_ant_msk =
68 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
69
70 link_cmd->general_params.dual_stream_ant_msk =
71 priv->hw_params.valid_tx_ant &
72 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
73 if (!link_cmd->general_params.dual_stream_ant_msk) {
74 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
75 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
76 link_cmd->general_params.dual_stream_ant_msk =
77 priv->hw_params.valid_tx_ant;
78 }
79
80 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
81 link_cmd->agg_params.agg_time_limit =
82 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
83
84 link_cmd->sta_id = sta_id;
85
86 return link_cmd;
87}
88
89/*
90 * iwl4965_add_bssid_station - Add the special IBSS BSSID station
91 *
92 * Function sleeps.
93 */
94int
95iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
96 const u8 *addr, u8 *sta_id_r)
97{
98 int ret;
99 u8 sta_id;
100 struct iwl_link_quality_cmd *link_cmd;
101 unsigned long flags;
102
103 if (sta_id_r)
104 *sta_id_r = IWL_INVALID_STATION;
105
106 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
107 if (ret) {
108 IWL_ERR(priv, "Unable to add station %pM\n", addr);
109 return ret;
110 }
111
112 if (sta_id_r)
113 *sta_id_r = sta_id;
114
115 spin_lock_irqsave(&priv->sta_lock, flags);
116 priv->stations[sta_id].used |= IWL_STA_LOCAL;
117 spin_unlock_irqrestore(&priv->sta_lock, flags);
118
119 /* Set up default rate scaling table in device's station table */
120 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
121 if (!link_cmd) {
122 IWL_ERR(priv,
123 "Unable to initialize rate scaling for station %pM.\n",
124 addr);
125 return -ENOMEM;
126 }
127
128 ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
129 if (ret)
130 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
131
132 spin_lock_irqsave(&priv->sta_lock, flags);
133 priv->stations[sta_id].lq = link_cmd;
134 spin_unlock_irqrestore(&priv->sta_lock, flags);
135
136 return 0;
137}
138
139static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
140 struct iwl_rxon_context *ctx,
141 bool send_if_empty)
142{
143 int i, not_empty = 0;
144 u8 buff[sizeof(struct iwl_wep_cmd) +
145 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
146 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
147 size_t cmd_size = sizeof(struct iwl_wep_cmd);
148 struct iwl_host_cmd cmd = {
149 .id = ctx->wep_key_cmd,
150 .data = wep_cmd,
151 .flags = CMD_SYNC,
152 };
153
154 might_sleep();
155
156 memset(wep_cmd, 0, cmd_size +
157 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
158
159 for (i = 0; i < WEP_KEYS_MAX ; i++) {
160 wep_cmd->key[i].key_index = i;
161 if (ctx->wep_keys[i].key_size) {
162 wep_cmd->key[i].key_offset = i;
163 not_empty = 1;
164 } else {
165 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
166 }
167
168 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
169 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
170 ctx->wep_keys[i].key_size);
171 }
172
173 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
174 wep_cmd->num_keys = WEP_KEYS_MAX;
175
176 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
177
178 cmd.len = cmd_size;
179
180 if (not_empty || send_if_empty)
181 return iwl_legacy_send_cmd(priv, &cmd);
182 else
183 return 0;
184}
185
186int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
187 struct iwl_rxon_context *ctx)
188{
189 lockdep_assert_held(&priv->mutex);
190
191 return iwl4965_static_wepkey_cmd(priv, ctx, false);
192}
193
194int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_key_conf *keyconf)
197{
198 int ret;
199
200 lockdep_assert_held(&priv->mutex);
201
202 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
203 keyconf->keyidx);
204
205 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
206 if (iwl_legacy_is_rfkill(priv)) {
207 IWL_DEBUG_WEP(priv,
208 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
209 /* but keys in device are clear anyway so return success */
210 return 0;
211 }
212 ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
213 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
214 keyconf->keyidx, ret);
215
216 return ret;
217}
218
219int iwl4965_set_default_wep_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf)
222{
223 int ret;
224
225 lockdep_assert_held(&priv->mutex);
226
227 if (keyconf->keylen != WEP_KEY_LEN_128 &&
228 keyconf->keylen != WEP_KEY_LEN_64) {
229 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
230 return -EINVAL;
231 }
232
233 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
234 keyconf->hw_key_idx = HW_KEY_DEFAULT;
235 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
236
237 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
238 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
239 keyconf->keylen);
240
241 ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
242 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
243 keyconf->keylen, keyconf->keyidx, ret);
244
245 return ret;
246}
247
248static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
249 struct iwl_rxon_context *ctx,
250 struct ieee80211_key_conf *keyconf,
251 u8 sta_id)
252{
253 unsigned long flags;
254 __le16 key_flags = 0;
255 struct iwl_legacy_addsta_cmd sta_cmd;
256
257 lockdep_assert_held(&priv->mutex);
258
259 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
260
261 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
262 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
263 key_flags &= ~STA_KEY_FLG_INVALID;
264
265 if (keyconf->keylen == WEP_KEY_LEN_128)
266 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
267
268 if (sta_id == ctx->bcast_sta_id)
269 key_flags |= STA_KEY_MULTICAST_MSK;
270
271 spin_lock_irqsave(&priv->sta_lock, flags);
272
273 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
274 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
275 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
276
277 memcpy(priv->stations[sta_id].keyinfo.key,
278 keyconf->key, keyconf->keylen);
279
280 memcpy(&priv->stations[sta_id].sta.key.key[3],
281 keyconf->key, keyconf->keylen);
282
283 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
284 == STA_KEY_FLG_NO_ENC)
285 priv->stations[sta_id].sta.key.key_offset =
286 iwl_legacy_get_free_ucode_key_index(priv);
287 /* else, we are overriding an existing key => no need to allocated room
288 * in uCode. */
289
290 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
291 "no space for a new key");
292
293 priv->stations[sta_id].sta.key.key_flags = key_flags;
294 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
295 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
296
297 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
298 sizeof(struct iwl_legacy_addsta_cmd));
299 spin_unlock_irqrestore(&priv->sta_lock, flags);
300
301 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
302}
303
304static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_key_conf *keyconf,
307 u8 sta_id)
308{
309 unsigned long flags;
310 __le16 key_flags = 0;
311 struct iwl_legacy_addsta_cmd sta_cmd;
312
313 lockdep_assert_held(&priv->mutex);
314
315 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
316 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
317 key_flags &= ~STA_KEY_FLG_INVALID;
318
319 if (sta_id == ctx->bcast_sta_id)
320 key_flags |= STA_KEY_MULTICAST_MSK;
321
322 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
323
324 spin_lock_irqsave(&priv->sta_lock, flags);
325 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
326 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
327
328 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
329 keyconf->keylen);
330
331 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
332 keyconf->keylen);
333
334 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
335 == STA_KEY_FLG_NO_ENC)
336 priv->stations[sta_id].sta.key.key_offset =
337 iwl_legacy_get_free_ucode_key_index(priv);
338 /* else, we are overriding an existing key => no need to allocated room
339 * in uCode. */
340
341 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
342 "no space for a new key");
343
344 priv->stations[sta_id].sta.key.key_flags = key_flags;
345 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
346 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
347
348 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
349 sizeof(struct iwl_legacy_addsta_cmd));
350 spin_unlock_irqrestore(&priv->sta_lock, flags);
351
352 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
353}
354
355static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx,
357 struct ieee80211_key_conf *keyconf,
358 u8 sta_id)
359{
360 unsigned long flags;
361 int ret = 0;
362 __le16 key_flags = 0;
363
364 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
365 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
366 key_flags &= ~STA_KEY_FLG_INVALID;
367
368 if (sta_id == ctx->bcast_sta_id)
369 key_flags |= STA_KEY_MULTICAST_MSK;
370
371 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
372 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
373
374 spin_lock_irqsave(&priv->sta_lock, flags);
375
376 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
377 priv->stations[sta_id].keyinfo.keylen = 16;
378
379 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
380 == STA_KEY_FLG_NO_ENC)
381 priv->stations[sta_id].sta.key.key_offset =
382 iwl_legacy_get_free_ucode_key_index(priv);
383 /* else, we are overriding an existing key => no need to allocated room
384 * in uCode. */
385
386 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
387 "no space for a new key");
388
389 priv->stations[sta_id].sta.key.key_flags = key_flags;
390
391
392 /* This copy is acutally not needed: we get the key with each TX */
393 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
394
395 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
396
397 spin_unlock_irqrestore(&priv->sta_lock, flags);
398
399 return ret;
400}
401
402void iwl4965_update_tkip_key(struct iwl_priv *priv,
403 struct iwl_rxon_context *ctx,
404 struct ieee80211_key_conf *keyconf,
405 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
406{
407 u8 sta_id;
408 unsigned long flags;
409 int i;
410
411 if (iwl_legacy_scan_cancel(priv)) {
412 /* cancel scan failed, just live w/ bad key and rely
413 briefly on SW decryption */
414 return;
415 }
416
417 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
418 if (sta_id == IWL_INVALID_STATION)
419 return;
420
421 spin_lock_irqsave(&priv->sta_lock, flags);
422
423 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
424
425 for (i = 0; i < 5; i++)
426 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
427 cpu_to_le16(phase1key[i]);
428
429 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
430 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
431
432 iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
433
434 spin_unlock_irqrestore(&priv->sta_lock, flags);
435
436}
437
438int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
439 struct iwl_rxon_context *ctx,
440 struct ieee80211_key_conf *keyconf,
441 u8 sta_id)
442{
443 unsigned long flags;
444 u16 key_flags;
445 u8 keyidx;
446 struct iwl_legacy_addsta_cmd sta_cmd;
447
448 lockdep_assert_held(&priv->mutex);
449
450 ctx->key_mapping_keys--;
451
452 spin_lock_irqsave(&priv->sta_lock, flags);
453 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
454 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
455
456 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
457 keyconf->keyidx, sta_id);
458
459 if (keyconf->keyidx != keyidx) {
460 /* We need to remove a key with index different that the one
461 * in the uCode. This means that the key we need to remove has
462 * been replaced by another one with different index.
463 * Don't do anything and return ok
464 */
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 return 0;
467 }
468
469 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
470 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
471 keyconf->keyidx, key_flags);
472 spin_unlock_irqrestore(&priv->sta_lock, flags);
473 return 0;
474 }
475
476 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
477 &priv->ucode_key_table))
478 IWL_ERR(priv, "index %d not used in uCode key table.\n",
479 priv->stations[sta_id].sta.key.key_offset);
480 memset(&priv->stations[sta_id].keyinfo, 0,
481 sizeof(struct iwl_hw_key));
482 memset(&priv->stations[sta_id].sta.key, 0,
483 sizeof(struct iwl4965_keyinfo));
484 priv->stations[sta_id].sta.key.key_flags =
485 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
486 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
487 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
488 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
489
490 if (iwl_legacy_is_rfkill(priv)) {
491 IWL_DEBUG_WEP(priv,
492 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
493 spin_unlock_irqrestore(&priv->sta_lock, flags);
494 return 0;
495 }
496 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
497 sizeof(struct iwl_legacy_addsta_cmd));
498 spin_unlock_irqrestore(&priv->sta_lock, flags);
499
500 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
501}
502
503int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
504 struct ieee80211_key_conf *keyconf, u8 sta_id)
505{
506 int ret;
507
508 lockdep_assert_held(&priv->mutex);
509
510 ctx->key_mapping_keys++;
511 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
512
513 switch (keyconf->cipher) {
514 case WLAN_CIPHER_SUITE_CCMP:
515 ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
516 keyconf, sta_id);
517 break;
518 case WLAN_CIPHER_SUITE_TKIP:
519 ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
520 keyconf, sta_id);
521 break;
522 case WLAN_CIPHER_SUITE_WEP40:
523 case WLAN_CIPHER_SUITE_WEP104:
524 ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
525 keyconf, sta_id);
526 break;
527 default:
528 IWL_ERR(priv,
529 "Unknown alg: %s cipher = %x\n", __func__,
530 keyconf->cipher);
531 ret = -EINVAL;
532 }
533
534 IWL_DEBUG_WEP(priv,
535 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
536 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
537 sta_id, ret);
538
539 return ret;
540}
541
542/**
543 * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
544 *
545 * This adds the broadcast station into the driver's station table
546 * and marks it driver active, so that it will be restored to the
547 * device at the next best time.
548 */
549int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
550 struct iwl_rxon_context *ctx)
551{
552 struct iwl_link_quality_cmd *link_cmd;
553 unsigned long flags;
554 u8 sta_id;
555
556 spin_lock_irqsave(&priv->sta_lock, flags);
557 sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
558 false, NULL);
559 if (sta_id == IWL_INVALID_STATION) {
560 IWL_ERR(priv, "Unable to prepare broadcast station\n");
561 spin_unlock_irqrestore(&priv->sta_lock, flags);
562
563 return -EINVAL;
564 }
565
566 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
567 priv->stations[sta_id].used |= IWL_STA_BCAST;
568 spin_unlock_irqrestore(&priv->sta_lock, flags);
569
570 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
571 if (!link_cmd) {
572 IWL_ERR(priv,
573 "Unable to initialize rate scaling for bcast station.\n");
574 return -ENOMEM;
575 }
576
577 spin_lock_irqsave(&priv->sta_lock, flags);
578 priv->stations[sta_id].lq = link_cmd;
579 spin_unlock_irqrestore(&priv->sta_lock, flags);
580
581 return 0;
582}
583
584/**
585 * iwl4965_update_bcast_station - update broadcast station's LQ command
586 *
587 * Only used by iwl4965. Placed here to have all bcast station management
588 * code together.
589 */
590static int iwl4965_update_bcast_station(struct iwl_priv *priv,
591 struct iwl_rxon_context *ctx)
592{
593 unsigned long flags;
594 struct iwl_link_quality_cmd *link_cmd;
595 u8 sta_id = ctx->bcast_sta_id;
596
597 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
598 if (!link_cmd) {
599 IWL_ERR(priv,
600 "Unable to initialize rate scaling for bcast station.\n");
601 return -ENOMEM;
602 }
603
604 spin_lock_irqsave(&priv->sta_lock, flags);
605 if (priv->stations[sta_id].lq)
606 kfree(priv->stations[sta_id].lq);
607 else
608 IWL_DEBUG_INFO(priv,
609 "Bcast station rate scaling has not been initialized yet.\n");
610 priv->stations[sta_id].lq = link_cmd;
611 spin_unlock_irqrestore(&priv->sta_lock, flags);
612
613 return 0;
614}
615
616int iwl4965_update_bcast_stations(struct iwl_priv *priv)
617{
618 struct iwl_rxon_context *ctx;
619 int ret = 0;
620
621 for_each_context(priv, ctx) {
622 ret = iwl4965_update_bcast_station(priv, ctx);
623 if (ret)
624 break;
625 }
626
627 return ret;
628}
629
630/**
631 * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
632 */
633int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
634{
635 unsigned long flags;
636 struct iwl_legacy_addsta_cmd sta_cmd;
637
638 lockdep_assert_held(&priv->mutex);
639
640 /* Remove "disable" flag, to enable Tx for this TID */
641 spin_lock_irqsave(&priv->sta_lock, flags);
642 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
643 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
644 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
646 sizeof(struct iwl_legacy_addsta_cmd));
647 spin_unlock_irqrestore(&priv->sta_lock, flags);
648
649 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
650}
651
652int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
653 int tid, u16 ssn)
654{
655 unsigned long flags;
656 int sta_id;
657 struct iwl_legacy_addsta_cmd sta_cmd;
658
659 lockdep_assert_held(&priv->mutex);
660
661 sta_id = iwl_legacy_sta_id(sta);
662 if (sta_id == IWL_INVALID_STATION)
663 return -ENXIO;
664
665 spin_lock_irqsave(&priv->sta_lock, flags);
666 priv->stations[sta_id].sta.station_flags_msk = 0;
667 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
668 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
669 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
670 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
671 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
672 sizeof(struct iwl_legacy_addsta_cmd));
673 spin_unlock_irqrestore(&priv->sta_lock, flags);
674
675 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
676}
677
678int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
679 int tid)
680{
681 unsigned long flags;
682 int sta_id;
683 struct iwl_legacy_addsta_cmd sta_cmd;
684
685 lockdep_assert_held(&priv->mutex);
686
687 sta_id = iwl_legacy_sta_id(sta);
688 if (sta_id == IWL_INVALID_STATION) {
689 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
690 return -ENXIO;
691 }
692
693 spin_lock_irqsave(&priv->sta_lock, flags);
694 priv->stations[sta_id].sta.station_flags_msk = 0;
695 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
696 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
697 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
698 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
699 sizeof(struct iwl_legacy_addsta_cmd));
700 spin_unlock_irqrestore(&priv->sta_lock, flags);
701
702 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
703}
704
705void
706iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
707{
708 unsigned long flags;
709
710 spin_lock_irqsave(&priv->sta_lock, flags);
711 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
712 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
713 priv->stations[sta_id].sta.sta.modify_mask =
714 STA_MODIFY_SLEEP_TX_COUNT_MSK;
715 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
716 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
717 iwl_legacy_send_add_sta(priv,
718 &priv->stations[sta_id].sta, CMD_ASYNC);
719 spin_unlock_irqrestore(&priv->sta_lock, flags);
720
721}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644
index 7f12e3638bae..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ /dev/null
@@ -1,1378 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-4965-hw.h"
41#include "iwl-4965.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80static inline int iwl4965_get_ac_from_tid(u16 tid)
81{
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
84
85 /* no support for TIDs 8-15 yet */
86 return -EINVAL;
87}
88
89static inline int
90iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99/*
100 * handle build REPLY_TX command notification.
101 */
102static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
103 struct sk_buff *skb,
104 struct iwl_tx_cmd *tx_cmd,
105 struct ieee80211_tx_info *info,
106 struct ieee80211_hdr *hdr,
107 u8 std_id)
108{
109 __le16 fc = hdr->frame_control;
110 __le32 tx_flags = tx_cmd->tx_flags;
111
112 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
113 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
114 tx_flags |= TX_CMD_FLG_ACK_MSK;
115 if (ieee80211_is_mgmt(fc))
116 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
117 if (ieee80211_is_probe_resp(fc) &&
118 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
119 tx_flags |= TX_CMD_FLG_TSF_MSK;
120 } else {
121 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
122 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
123 }
124
125 if (ieee80211_is_back_req(fc))
126 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
127
128 tx_cmd->sta_id = std_id;
129 if (ieee80211_has_morefrags(fc))
130 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
131
132 if (ieee80211_is_data_qos(fc)) {
133 u8 *qc = ieee80211_get_qos_ctl(hdr);
134 tx_cmd->tid_tspec = qc[0] & 0xf;
135 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
136 } else {
137 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
138 }
139
140 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
141
142 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
143 if (ieee80211_is_mgmt(fc)) {
144 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
145 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
146 else
147 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
148 } else {
149 tx_cmd->timeout.pm_frame_timeout = 0;
150 }
151
152 tx_cmd->driver_txop = 0;
153 tx_cmd->tx_flags = tx_flags;
154 tx_cmd->next_frame_len = 0;
155}
156
157#define RTS_DFAULT_RETRY_LIMIT 60
158
159static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
160 struct iwl_tx_cmd *tx_cmd,
161 struct ieee80211_tx_info *info,
162 __le16 fc)
163{
164 u32 rate_flags;
165 int rate_idx;
166 u8 rts_retry_limit;
167 u8 data_retry_limit;
168 u8 rate_plcp;
169
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc))
172 data_retry_limit = 3;
173 else
174 data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
175 tx_cmd->data_retry_limit = data_retry_limit;
176
177 /* Set retry limit on RTS packets */
178 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
179 if (data_retry_limit < rts_retry_limit)
180 rts_retry_limit = data_retry_limit;
181 tx_cmd->rts_retry_limit = rts_retry_limit;
182
183 /* DATA packets will use the uCode station table for rate/antenna
184 * selection */
185 if (ieee80211_is_data(fc)) {
186 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
188 return;
189 }
190
191 /**
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
195 * index is invalid.
196 */
197 rate_idx = info->control.rates[0].idx;
198 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
199 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
200 rate_idx = rate_lowest_index(&priv->bands[info->band],
201 info->control.sta);
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info->band == IEEE80211_BAND_5GHZ)
204 rate_idx += IWL_FIRST_OFDM_RATE;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp = iwlegacy_rates[rate_idx].plcp;
207 /* Zero out flags for this packet */
208 rate_flags = 0;
209
210 /* Set CCK flag as needed */
211 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
212 rate_flags |= RATE_MCS_CCK_MSK;
213
214 /* Set up antennas */
215 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
216 priv->hw_params.valid_tx_ant);
217
218 rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
219
220 /* Set the rate in the TX cmd */
221 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
222}
223
224static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
225 struct ieee80211_tx_info *info,
226 struct iwl_tx_cmd *tx_cmd,
227 struct sk_buff *skb_frag,
228 int sta_id)
229{
230 struct ieee80211_key_conf *keyconf = info->control.hw_key;
231
232 switch (keyconf->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
235 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
238 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
239 break;
240
241 case WLAN_CIPHER_SUITE_TKIP:
242 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
243 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
244 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
245 break;
246
247 case WLAN_CIPHER_SUITE_WEP104:
248 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
249 /* fall through */
250 case WLAN_CIPHER_SUITE_WEP40:
251 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
252 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
253
254 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
255
256 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
257 "with key %d\n", keyconf->keyidx);
258 break;
259
260 default:
261 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
262 break;
263 }
264}
265
266/*
267 * start REPLY_TX command process
268 */
269int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
270{
271 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
272 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
273 struct ieee80211_sta *sta = info->control.sta;
274 struct iwl_station_priv *sta_priv = NULL;
275 struct iwl_tx_queue *txq;
276 struct iwl_queue *q;
277 struct iwl_device_cmd *out_cmd;
278 struct iwl_cmd_meta *out_meta;
279 struct iwl_tx_cmd *tx_cmd;
280 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
281 int txq_id;
282 dma_addr_t phys_addr;
283 dma_addr_t txcmd_phys;
284 dma_addr_t scratch_phys;
285 u16 len, firstlen, secondlen;
286 u16 seq_number = 0;
287 __le16 fc;
288 u8 hdr_len;
289 u8 sta_id;
290 u8 wait_write_ptr = 0;
291 u8 tid = 0;
292 u8 *qc = NULL;
293 unsigned long flags;
294 bool is_agg = false;
295
296 if (info->control.vif)
297 ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
298
299 spin_lock_irqsave(&priv->lock, flags);
300 if (iwl_legacy_is_rfkill(priv)) {
301 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
302 goto drop_unlock;
303 }
304
305 fc = hdr->frame_control;
306
307#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
308 if (ieee80211_is_auth(fc))
309 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
310 else if (ieee80211_is_assoc_req(fc))
311 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
312 else if (ieee80211_is_reassoc_req(fc))
313 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
314#endif
315
316 hdr_len = ieee80211_hdrlen(fc);
317
318 /* For management frames use broadcast id to do not break aggregation */
319 if (!ieee80211_is_data(fc))
320 sta_id = ctx->bcast_sta_id;
321 else {
322 /* Find index into station table for destination station */
323 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
324
325 if (sta_id == IWL_INVALID_STATION) {
326 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
327 hdr->addr1);
328 goto drop_unlock;
329 }
330 }
331
332 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
333
334 if (sta)
335 sta_priv = (void *)sta->drv_priv;
336
337 if (sta_priv && sta_priv->asleep &&
338 (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
339 /*
340 * This sends an asynchronous command to the device,
341 * but we can rely on it being processed before the
342 * next frame is processed -- and the next frame to
343 * this station is the one that will consume this
344 * counter.
345 * For now set the counter to just 1 since we do not
346 * support uAPSD yet.
347 */
348 iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
349 }
350
351 /*
352 * Send this frame after DTIM -- there's a special queue
353 * reserved for this for contexts that support AP mode.
354 */
355 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
356 txq_id = ctx->mcast_queue;
357 /*
358 * The microcode will clear the more data
359 * bit in the last frame it transmits.
360 */
361 hdr->frame_control |=
362 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
363 } else
364 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
365
366 /* irqs already disabled/saved above when locking priv->lock */
367 spin_lock(&priv->sta_lock);
368
369 if (ieee80211_is_data_qos(fc)) {
370 qc = ieee80211_get_qos_ctl(hdr);
371 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
372 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
373 spin_unlock(&priv->sta_lock);
374 goto drop_unlock;
375 }
376 seq_number = priv->stations[sta_id].tid[tid].seq_number;
377 seq_number &= IEEE80211_SCTL_SEQ;
378 hdr->seq_ctrl = hdr->seq_ctrl &
379 cpu_to_le16(IEEE80211_SCTL_FRAG);
380 hdr->seq_ctrl |= cpu_to_le16(seq_number);
381 seq_number += 0x10;
382 /* aggregation is on for this <sta,tid> */
383 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
384 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
385 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
386 is_agg = true;
387 }
388 }
389
390 txq = &priv->txq[txq_id];
391 q = &txq->q;
392
393 if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
394 spin_unlock(&priv->sta_lock);
395 goto drop_unlock;
396 }
397
398 if (ieee80211_is_data_qos(fc)) {
399 priv->stations[sta_id].tid[tid].tfds_in_queue++;
400 if (!ieee80211_has_morefrags(fc))
401 priv->stations[sta_id].tid[tid].seq_number = seq_number;
402 }
403
404 spin_unlock(&priv->sta_lock);
405
406 /* Set up driver data for this TFD */
407 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
408 txq->txb[q->write_ptr].skb = skb;
409 txq->txb[q->write_ptr].ctx = ctx;
410
411 /* Set up first empty entry in queue's array of Tx/cmd buffers */
412 out_cmd = txq->cmd[q->write_ptr];
413 out_meta = &txq->meta[q->write_ptr];
414 tx_cmd = &out_cmd->cmd.tx;
415 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
416 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
417
418 /*
419 * Set up the Tx-command (not MAC!) header.
420 * Store the chosen Tx queue and TFD index within the sequence field;
421 * after Tx, uCode's Tx response will return this value so driver can
422 * locate the frame within the tx queue and do post-tx processing.
423 */
424 out_cmd->hdr.cmd = REPLY_TX;
425 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
426 INDEX_TO_SEQ(q->write_ptr)));
427
428 /* Copy MAC header from skb into command buffer */
429 memcpy(tx_cmd->hdr, hdr, hdr_len);
430
431
432 /* Total # bytes to be transmitted */
433 len = (u16)skb->len;
434 tx_cmd->len = cpu_to_le16(len);
435
436 if (info->control.hw_key)
437 iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
438
439 /* TODO need this for burst mode later on */
440 iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
441 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
442
443 iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
444
445 iwl_legacy_update_stats(priv, true, fc, len);
446 /*
447 * Use the first empty entry in this queue's command buffer array
448 * to contain the Tx command and MAC header concatenated together
449 * (payload data will be in another buffer).
450 * Size of this varies, due to varying MAC header length.
451 * If end is not dword aligned, we'll have 2 extra bytes at the end
452 * of the MAC header (device reads on dword boundaries).
453 * We'll tell device about this padding later.
454 */
455 len = sizeof(struct iwl_tx_cmd) +
456 sizeof(struct iwl_cmd_header) + hdr_len;
457 firstlen = (len + 3) & ~3;
458
459 /* Tell NIC about any 2-byte padding after MAC header */
460 if (firstlen != len)
461 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
462
463 /* Physical address of this Tx command's header (not MAC header!),
464 * within command buffer array. */
465 txcmd_phys = pci_map_single(priv->pci_dev,
466 &out_cmd->hdr, firstlen,
467 PCI_DMA_BIDIRECTIONAL);
468 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
469 dma_unmap_len_set(out_meta, len, firstlen);
470 /* Add buffer containing Tx command and MAC(!) header to TFD's
471 * first entry */
472 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
473 txcmd_phys, firstlen, 1, 0);
474
475 if (!ieee80211_has_morefrags(hdr->frame_control)) {
476 txq->need_update = 1;
477 } else {
478 wait_write_ptr = 1;
479 txq->need_update = 0;
480 }
481
482 /* Set up TFD's 2nd entry to point directly to remainder of skb,
483 * if any (802.11 null frames have no payload). */
484 secondlen = skb->len - hdr_len;
485 if (secondlen > 0) {
486 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
487 secondlen, PCI_DMA_TODEVICE);
488 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
489 phys_addr, secondlen,
490 0, 0);
491 }
492
493 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
494 offsetof(struct iwl_tx_cmd, scratch);
495
496 /* take back ownership of DMA buffer to enable update */
497 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
498 firstlen, PCI_DMA_BIDIRECTIONAL);
499 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
500 tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
501
502 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
503 le16_to_cpu(out_cmd->hdr.sequence));
504 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
505 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
506 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
507
508 /* Set up entry for this TFD in Tx byte-count array */
509 if (info->flags & IEEE80211_TX_CTL_AMPDU)
510 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
511 le16_to_cpu(tx_cmd->len));
512
513 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
514 firstlen, PCI_DMA_BIDIRECTIONAL);
515
516 trace_iwlwifi_legacy_dev_tx(priv,
517 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
518 sizeof(struct iwl_tfd),
519 &out_cmd->hdr, firstlen,
520 skb->data + hdr_len, secondlen);
521
522 /* Tell device the write index *just past* this latest filled TFD */
523 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
524 iwl_legacy_txq_update_write_ptr(priv, txq);
525 spin_unlock_irqrestore(&priv->lock, flags);
526
527 /*
528 * At this point the frame is "transmitted" successfully
529 * and we will get a TX status notification eventually,
530 * regardless of the value of ret. "ret" only indicates
531 * whether or not we should update the write pointer.
532 */
533
534 /*
535 * Avoid atomic ops if it isn't an associated client.
536 * Also, if this is a packet for aggregation, don't
537 * increase the counter because the ucode will stop
538 * aggregation queues when their respective station
539 * goes to sleep.
540 */
541 if (sta_priv && sta_priv->client && !is_agg)
542 atomic_inc(&sta_priv->pending_frames);
543
544 if ((iwl_legacy_queue_space(q) < q->high_mark) &&
545 priv->mac80211_registered) {
546 if (wait_write_ptr) {
547 spin_lock_irqsave(&priv->lock, flags);
548 txq->need_update = 1;
549 iwl_legacy_txq_update_write_ptr(priv, txq);
550 spin_unlock_irqrestore(&priv->lock, flags);
551 } else {
552 iwl_legacy_stop_queue(priv, txq);
553 }
554 }
555
556 return 0;
557
558drop_unlock:
559 spin_unlock_irqrestore(&priv->lock, flags);
560 return -1;
561}
562
563static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
564 struct iwl_dma_ptr *ptr, size_t size)
565{
566 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
567 GFP_KERNEL);
568 if (!ptr->addr)
569 return -ENOMEM;
570 ptr->size = size;
571 return 0;
572}
573
574static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
575 struct iwl_dma_ptr *ptr)
576{
577 if (unlikely(!ptr->addr))
578 return;
579
580 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
581 memset(ptr, 0, sizeof(*ptr));
582}
583
584/**
585 * iwl4965_hw_txq_ctx_free - Free TXQ Context
586 *
587 * Destroy all TX DMA queues and structures
588 */
589void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
590{
591 int txq_id;
592
593 /* Tx queues */
594 if (priv->txq) {
595 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
596 if (txq_id == priv->cmd_queue)
597 iwl_legacy_cmd_queue_free(priv);
598 else
599 iwl_legacy_tx_queue_free(priv, txq_id);
600 }
601 iwl4965_free_dma_ptr(priv, &priv->kw);
602
603 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
604
605 /* free tx queue structure */
606 iwl_legacy_txq_mem(priv);
607}
608
609/**
610 * iwl4965_txq_ctx_alloc - allocate TX queue context
611 * Allocate all Tx DMA structures and initialize them
612 *
613 * @param priv
614 * @return error code
615 */
616int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
617{
618 int ret;
619 int txq_id, slots_num;
620 unsigned long flags;
621
622 /* Free all tx/cmd queues and keep-warm buffer */
623 iwl4965_hw_txq_ctx_free(priv);
624
625 ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
626 priv->hw_params.scd_bc_tbls_size);
627 if (ret) {
628 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
629 goto error_bc_tbls;
630 }
631 /* Alloc keep-warm buffer */
632 ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
633 if (ret) {
634 IWL_ERR(priv, "Keep Warm allocation failed\n");
635 goto error_kw;
636 }
637
638 /* allocate tx queue structure */
639 ret = iwl_legacy_alloc_txq_mem(priv);
640 if (ret)
641 goto error;
642
643 spin_lock_irqsave(&priv->lock, flags);
644
645 /* Turn off all Tx DMA fifos */
646 iwl4965_txq_set_sched(priv, 0);
647
648 /* Tell NIC where to find the "keep warm" buffer */
649 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
650
651 spin_unlock_irqrestore(&priv->lock, flags);
652
653 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
654 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
655 slots_num = (txq_id == priv->cmd_queue) ?
656 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
657 ret = iwl_legacy_tx_queue_init(priv,
658 &priv->txq[txq_id], slots_num,
659 txq_id);
660 if (ret) {
661 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
662 goto error;
663 }
664 }
665
666 return ret;
667
668 error:
669 iwl4965_hw_txq_ctx_free(priv);
670 iwl4965_free_dma_ptr(priv, &priv->kw);
671 error_kw:
672 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
673 error_bc_tbls:
674 return ret;
675}
676
677void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
678{
679 int txq_id, slots_num;
680 unsigned long flags;
681
682 spin_lock_irqsave(&priv->lock, flags);
683
684 /* Turn off all Tx DMA fifos */
685 iwl4965_txq_set_sched(priv, 0);
686
687 /* Tell NIC where to find the "keep warm" buffer */
688 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
689
690 spin_unlock_irqrestore(&priv->lock, flags);
691
692 /* Alloc and init all Tx queues, including the command queue (#4) */
693 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
694 slots_num = txq_id == priv->cmd_queue ?
695 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
696 iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
697 slots_num, txq_id);
698 }
699}
700
701/**
702 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
703 */
704void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
705{
706 int ch, txq_id;
707 unsigned long flags;
708
709 /* Turn off all Tx DMA fifos */
710 spin_lock_irqsave(&priv->lock, flags);
711
712 iwl4965_txq_set_sched(priv, 0);
713
714 /* Stop each Tx DMA channel, and wait for it to be idle */
715 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
716 iwl_legacy_write_direct32(priv,
717 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
718 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
719 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
720 1000))
721 IWL_ERR(priv, "Failing on timeout while stopping"
722 " DMA channel %d [0x%08x]", ch,
723 iwl_legacy_read_direct32(priv,
724 FH_TSSR_TX_STATUS_REG));
725 }
726 spin_unlock_irqrestore(&priv->lock, flags);
727
728 if (!priv->txq)
729 return;
730
731 /* Unmap DMA from host system and free skb's */
732 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
733 if (txq_id == priv->cmd_queue)
734 iwl_legacy_cmd_queue_unmap(priv);
735 else
736 iwl_legacy_tx_queue_unmap(priv, txq_id);
737}
738
739/*
740 * Find first available (lowest unused) Tx Queue, mark it "active".
741 * Called only when finding queue for aggregation.
742 * Should never return anything < 7, because they should already
743 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
744 */
745static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
746{
747 int txq_id;
748
749 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
750 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
751 return txq_id;
752 return -1;
753}
754
755/**
756 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
757 */
758static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
759 u16 txq_id)
760{
761 /* Simply stop the queue, but don't change any configuration;
762 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
763 iwl_legacy_write_prph(priv,
764 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
765 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
766 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
767}
768
769/**
770 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
771 */
772static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
773 u16 txq_id)
774{
775 u32 tbl_dw_addr;
776 u32 tbl_dw;
777 u16 scd_q2ratid;
778
779 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
780
781 tbl_dw_addr = priv->scd_base_addr +
782 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
783
784 tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
785
786 if (txq_id & 0x1)
787 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
788 else
789 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
790
791 iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
792
793 return 0;
794}
795
796/**
797 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
798 *
799 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
800 * i.e. it must be one of the higher queues used for aggregation
801 */
802static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
803 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
804{
805 unsigned long flags;
806 u16 ra_tid;
807 int ret;
808
809 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
810 (IWL49_FIRST_AMPDU_QUEUE +
811 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
812 IWL_WARN(priv,
813 "queue number out of range: %d, must be %d to %d\n",
814 txq_id, IWL49_FIRST_AMPDU_QUEUE,
815 IWL49_FIRST_AMPDU_QUEUE +
816 priv->cfg->base_params->num_of_ampdu_queues - 1);
817 return -EINVAL;
818 }
819
820 ra_tid = BUILD_RAxTID(sta_id, tid);
821
822 /* Modify device's station table to Tx this TID */
823 ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
824 if (ret)
825 return ret;
826
827 spin_lock_irqsave(&priv->lock, flags);
828
829 /* Stop this Tx queue before configuring it */
830 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
831
832 /* Map receiver-address / traffic-ID to this queue */
833 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
834
835 /* Set this queue as a chain-building queue */
836 iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
837
838 /* Place first TFD at index corresponding to start sequence number.
839 * Assumes that ssn_idx is valid (!= 0xFFF) */
840 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
841 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
842 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
843
844 /* Set up Tx window size and frame limit for this queue */
845 iwl_legacy_write_targ_mem(priv,
846 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
847 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
848 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
849
850 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
851 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
852 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
853 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
854
855 iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
856
857 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
858 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
859
860 spin_unlock_irqrestore(&priv->lock, flags);
861
862 return 0;
863}
864
865
866int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
867 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
868{
869 int sta_id;
870 int tx_fifo;
871 int txq_id;
872 int ret;
873 unsigned long flags;
874 struct iwl_tid_data *tid_data;
875
876 tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
877 if (unlikely(tx_fifo < 0))
878 return tx_fifo;
879
880 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
881 __func__, sta->addr, tid);
882
883 sta_id = iwl_legacy_sta_id(sta);
884 if (sta_id == IWL_INVALID_STATION) {
885 IWL_ERR(priv, "Start AGG on invalid station\n");
886 return -ENXIO;
887 }
888 if (unlikely(tid >= MAX_TID_COUNT))
889 return -EINVAL;
890
891 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
892 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
893 return -ENXIO;
894 }
895
896 txq_id = iwl4965_txq_ctx_activate_free(priv);
897 if (txq_id == -1) {
898 IWL_ERR(priv, "No free aggregation queue available\n");
899 return -ENXIO;
900 }
901
902 spin_lock_irqsave(&priv->sta_lock, flags);
903 tid_data = &priv->stations[sta_id].tid[tid];
904 *ssn = SEQ_TO_SN(tid_data->seq_number);
905 tid_data->agg.txq_id = txq_id;
906 iwl_legacy_set_swq_id(&priv->txq[txq_id],
907 iwl4965_get_ac_from_tid(tid), txq_id);
908 spin_unlock_irqrestore(&priv->sta_lock, flags);
909
910 ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
911 sta_id, tid, *ssn);
912 if (ret)
913 return ret;
914
915 spin_lock_irqsave(&priv->sta_lock, flags);
916 tid_data = &priv->stations[sta_id].tid[tid];
917 if (tid_data->tfds_in_queue == 0) {
918 IWL_DEBUG_HT(priv, "HW queue is empty\n");
919 tid_data->agg.state = IWL_AGG_ON;
920 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
921 } else {
922 IWL_DEBUG_HT(priv,
923 "HW queue is NOT empty: %d packets in HW queue\n",
924 tid_data->tfds_in_queue);
925 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
926 }
927 spin_unlock_irqrestore(&priv->sta_lock, flags);
928 return ret;
929}
930
931/**
932 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
933 * priv->lock must be held by the caller
934 */
935static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
936 u16 ssn_idx, u8 tx_fifo)
937{
938 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
939 (IWL49_FIRST_AMPDU_QUEUE +
940 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
941 IWL_WARN(priv,
942 "queue number out of range: %d, must be %d to %d\n",
943 txq_id, IWL49_FIRST_AMPDU_QUEUE,
944 IWL49_FIRST_AMPDU_QUEUE +
945 priv->cfg->base_params->num_of_ampdu_queues - 1);
946 return -EINVAL;
947 }
948
949 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
950
951 iwl_legacy_clear_bits_prph(priv,
952 IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
953
954 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
955 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
956 /* supposes that ssn_idx is valid (!= 0xFFF) */
957 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
958
959 iwl_legacy_clear_bits_prph(priv,
960 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
961 iwl_txq_ctx_deactivate(priv, txq_id);
962 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
963
964 return 0;
965}
966
967int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
968 struct ieee80211_sta *sta, u16 tid)
969{
970 int tx_fifo_id, txq_id, sta_id, ssn;
971 struct iwl_tid_data *tid_data;
972 int write_ptr, read_ptr;
973 unsigned long flags;
974
975 tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
976 if (unlikely(tx_fifo_id < 0))
977 return tx_fifo_id;
978
979 sta_id = iwl_legacy_sta_id(sta);
980
981 if (sta_id == IWL_INVALID_STATION) {
982 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
983 return -ENXIO;
984 }
985
986 spin_lock_irqsave(&priv->sta_lock, flags);
987
988 tid_data = &priv->stations[sta_id].tid[tid];
989 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
990 txq_id = tid_data->agg.txq_id;
991
992 switch (priv->stations[sta_id].tid[tid].agg.state) {
993 case IWL_EMPTYING_HW_QUEUE_ADDBA:
994 /*
995 * This can happen if the peer stops aggregation
996 * again before we've had a chance to drain the
997 * queue we selected previously, i.e. before the
998 * session was really started completely.
999 */
1000 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1001 goto turn_off;
1002 case IWL_AGG_ON:
1003 break;
1004 default:
1005 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1006 }
1007
1008 write_ptr = priv->txq[txq_id].q.write_ptr;
1009 read_ptr = priv->txq[txq_id].q.read_ptr;
1010
1011 /* The queue is not empty */
1012 if (write_ptr != read_ptr) {
1013 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1014 priv->stations[sta_id].tid[tid].agg.state =
1015 IWL_EMPTYING_HW_QUEUE_DELBA;
1016 spin_unlock_irqrestore(&priv->sta_lock, flags);
1017 return 0;
1018 }
1019
1020 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1021 turn_off:
1022 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1023
1024 /* do not restore/save irqs */
1025 spin_unlock(&priv->sta_lock);
1026 spin_lock(&priv->lock);
1027
1028 /*
1029 * the only reason this call can fail is queue number out of range,
1030 * which can happen if uCode is reloaded and all the station
1031 * information are lost. if it is outside the range, there is no need
1032 * to deactivate the uCode queue, just return "success" to allow
1033 * mac80211 to clean up it own data.
1034 */
1035 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1036 spin_unlock_irqrestore(&priv->lock, flags);
1037
1038 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1039
1040 return 0;
1041}
1042
1043int iwl4965_txq_check_empty(struct iwl_priv *priv,
1044 int sta_id, u8 tid, int txq_id)
1045{
1046 struct iwl_queue *q = &priv->txq[txq_id].q;
1047 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1048 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1049 struct iwl_rxon_context *ctx;
1050
1051 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1052
1053 lockdep_assert_held(&priv->sta_lock);
1054
1055 switch (priv->stations[sta_id].tid[tid].agg.state) {
1056 case IWL_EMPTYING_HW_QUEUE_DELBA:
1057 /* We are reclaiming the last packet of the */
1058 /* aggregated HW queue */
1059 if ((txq_id == tid_data->agg.txq_id) &&
1060 (q->read_ptr == q->write_ptr)) {
1061 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1062 int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
1063 IWL_DEBUG_HT(priv,
1064 "HW queue empty: continue DELBA flow\n");
1065 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1066 tid_data->agg.state = IWL_AGG_OFF;
1067 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1068 }
1069 break;
1070 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1071 /* We are reclaiming the last packet of the queue */
1072 if (tid_data->tfds_in_queue == 0) {
1073 IWL_DEBUG_HT(priv,
1074 "HW queue empty: continue ADDBA flow\n");
1075 tid_data->agg.state = IWL_AGG_ON;
1076 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1077 }
1078 break;
1079 }
1080
1081 return 0;
1082}
1083
1084static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
1085 struct iwl_rxon_context *ctx,
1086 const u8 *addr1)
1087{
1088 struct ieee80211_sta *sta;
1089 struct iwl_station_priv *sta_priv;
1090
1091 rcu_read_lock();
1092 sta = ieee80211_find_sta(ctx->vif, addr1);
1093 if (sta) {
1094 sta_priv = (void *)sta->drv_priv;
1095 /* avoid atomic ops if this isn't a client */
1096 if (sta_priv->client &&
1097 atomic_dec_return(&sta_priv->pending_frames) == 0)
1098 ieee80211_sta_block_awake(priv->hw, sta, false);
1099 }
1100 rcu_read_unlock();
1101}
1102
1103static void
1104iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1105 bool is_agg)
1106{
1107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1108
1109 if (!is_agg)
1110 iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1111
1112 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1113}
1114
1115int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1116{
1117 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1118 struct iwl_queue *q = &txq->q;
1119 struct iwl_tx_info *tx_info;
1120 int nfreed = 0;
1121 struct ieee80211_hdr *hdr;
1122
1123 if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
1124 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1125 "is out of range [0-%d] %d %d.\n", txq_id,
1126 index, q->n_bd, q->write_ptr, q->read_ptr);
1127 return 0;
1128 }
1129
1130 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
1131 q->read_ptr != index;
1132 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1133
1134 tx_info = &txq->txb[txq->q.read_ptr];
1135
1136 if (WARN_ON_ONCE(tx_info->skb == NULL))
1137 continue;
1138
1139 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1140 if (ieee80211_is_data_qos(hdr->frame_control))
1141 nfreed++;
1142
1143 iwl4965_tx_status(priv, tx_info,
1144 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
1145 tx_info->skb = NULL;
1146
1147 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1148 }
1149 return nfreed;
1150}
1151
1152/**
1153 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1154 *
1155 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1156 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1157 */
1158static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1159 struct iwl_ht_agg *agg,
1160 struct iwl_compressed_ba_resp *ba_resp)
1161
1162{
1163 int i, sh, ack;
1164 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1165 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1166 int successes = 0;
1167 struct ieee80211_tx_info *info;
1168 u64 bitmap, sent_bitmap;
1169
1170 if (unlikely(!agg->wait_for_ba)) {
1171 if (unlikely(ba_resp->bitmap))
1172 IWL_ERR(priv, "Received BA when not expected\n");
1173 return -EINVAL;
1174 }
1175
1176 /* Mark that the expected block-ack response arrived */
1177 agg->wait_for_ba = 0;
1178 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
1179 ba_resp->seq_ctl);
1180
1181 /* Calculate shift to align block-ack bits with our Tx window bits */
1182 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1183 if (sh < 0) /* tbw something is wrong with indices */
1184 sh += 0x100;
1185
1186 if (agg->frame_count > (64 - sh)) {
1187 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1188 return -1;
1189 }
1190
1191 /* don't use 64-bit values for now */
1192 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1193
1194 /* check for success or failure according to the
1195 * transmitted bitmap and block-ack bitmap */
1196 sent_bitmap = bitmap & agg->bitmap;
1197
1198 /* For each frame attempted in aggregation,
1199 * update driver's record of tx frame's status. */
1200 i = 0;
1201 while (sent_bitmap) {
1202 ack = sent_bitmap & 1ULL;
1203 successes += ack;
1204 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1205 ack ? "ACK" : "NACK", i,
1206 (agg->start_idx + i) & 0xff,
1207 agg->start_idx + i);
1208 sent_bitmap >>= 1;
1209 ++i;
1210 }
1211
1212 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1213 (unsigned long long)bitmap);
1214
1215 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1216 memset(&info->status, 0, sizeof(info->status));
1217 info->flags |= IEEE80211_TX_STAT_ACK;
1218 info->flags |= IEEE80211_TX_STAT_AMPDU;
1219 info->status.ampdu_ack_len = successes;
1220 info->status.ampdu_len = agg->frame_count;
1221 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1222
1223 return 0;
1224}
1225
1226/**
1227 * translate ucode response to mac80211 tx status control values
1228 */
1229void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1230 struct ieee80211_tx_info *info)
1231{
1232 struct ieee80211_tx_rate *r = &info->control.rates[0];
1233
1234 info->antenna_sel_tx =
1235 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1236 if (rate_n_flags & RATE_MCS_HT_MSK)
1237 r->flags |= IEEE80211_TX_RC_MCS;
1238 if (rate_n_flags & RATE_MCS_GF_MSK)
1239 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1240 if (rate_n_flags & RATE_MCS_HT40_MSK)
1241 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1242 if (rate_n_flags & RATE_MCS_DUP_MSK)
1243 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1244 if (rate_n_flags & RATE_MCS_SGI_MSK)
1245 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1246 r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1247}
1248
1249/**
1250 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1251 *
1252 * Handles block-acknowledge notification from device, which reports success
1253 * of frames sent via aggregation.
1254 */
1255void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
1256 struct iwl_rx_mem_buffer *rxb)
1257{
1258 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1259 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1260 struct iwl_tx_queue *txq = NULL;
1261 struct iwl_ht_agg *agg;
1262 int index;
1263 int sta_id;
1264 int tid;
1265 unsigned long flags;
1266
1267 /* "flow" corresponds to Tx queue */
1268 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1269
1270 /* "ssn" is start of block-ack Tx window, corresponds to index
1271 * (in Tx queue's circular buffer) of first TFD/frame in window */
1272 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1273
1274 if (scd_flow >= priv->hw_params.max_txq_num) {
1275 IWL_ERR(priv,
1276 "BUG_ON scd_flow is bigger than number of queues\n");
1277 return;
1278 }
1279
1280 txq = &priv->txq[scd_flow];
1281 sta_id = ba_resp->sta_id;
1282 tid = ba_resp->tid;
1283 agg = &priv->stations[sta_id].tid[tid].agg;
1284 if (unlikely(agg->txq_id != scd_flow)) {
1285 /*
1286 * FIXME: this is a uCode bug which need to be addressed,
1287 * log the information and return for now!
1288 * since it is possible happen very often and in order
1289 * not to fill the syslog, don't enable the logging by default
1290 */
1291 IWL_DEBUG_TX_REPLY(priv,
1292 "BA scd_flow %d does not match txq_id %d\n",
1293 scd_flow, agg->txq_id);
1294 return;
1295 }
1296
1297 /* Find index just before block-ack window */
1298 index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1299
1300 spin_lock_irqsave(&priv->sta_lock, flags);
1301
1302 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1303 "sta_id = %d\n",
1304 agg->wait_for_ba,
1305 (u8 *) &ba_resp->sta_addr_lo32,
1306 ba_resp->sta_id);
1307 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1308 "scd_flow = "
1309 "%d, scd_ssn = %d\n",
1310 ba_resp->tid,
1311 ba_resp->seq_ctl,
1312 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1313 ba_resp->scd_flow,
1314 ba_resp->scd_ssn);
1315 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1316 agg->start_idx,
1317 (unsigned long long)agg->bitmap);
1318
1319 /* Update driver's record of ACK vs. not for each frame in window */
1320 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1321
1322 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1323 * block-ack window (we assume that they've been successfully
1324 * transmitted ... if not, it's too late anyway). */
1325 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1326 /* calculate mac80211 ampdu sw queue to wake */
1327 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
1328 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1329
1330 if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
1331 priv->mac80211_registered &&
1332 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1333 iwl_legacy_wake_queue(priv, txq);
1334
1335 iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
1336 }
1337
1338 spin_unlock_irqrestore(&priv->sta_lock, flags);
1339}
1340
1341#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1342const char *iwl4965_get_tx_fail_reason(u32 status)
1343{
1344#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1345#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1346
1347 switch (status & TX_STATUS_MSK) {
1348 case TX_STATUS_SUCCESS:
1349 return "SUCCESS";
1350 TX_STATUS_POSTPONE(DELAY);
1351 TX_STATUS_POSTPONE(FEW_BYTES);
1352 TX_STATUS_POSTPONE(QUIET_PERIOD);
1353 TX_STATUS_POSTPONE(CALC_TTAK);
1354 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1355 TX_STATUS_FAIL(SHORT_LIMIT);
1356 TX_STATUS_FAIL(LONG_LIMIT);
1357 TX_STATUS_FAIL(FIFO_UNDERRUN);
1358 TX_STATUS_FAIL(DRAIN_FLOW);
1359 TX_STATUS_FAIL(RFKILL_FLUSH);
1360 TX_STATUS_FAIL(LIFE_EXPIRE);
1361 TX_STATUS_FAIL(DEST_PS);
1362 TX_STATUS_FAIL(HOST_ABORTED);
1363 TX_STATUS_FAIL(BT_RETRY);
1364 TX_STATUS_FAIL(STA_INVALID);
1365 TX_STATUS_FAIL(FRAG_DROPPED);
1366 TX_STATUS_FAIL(TID_DISABLE);
1367 TX_STATUS_FAIL(FIFO_FLUSHED);
1368 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1369 TX_STATUS_FAIL(PASSIVE_NO_RX);
1370 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1371 }
1372
1373 return "UNKNOWN";
1374
1375#undef TX_STATUS_FAIL
1376#undef TX_STATUS_POSTPONE
1377}
1378#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644
index 001d148feb94..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-4965-calib.h"
42
43#define IWL_AC_UNSET -1
44
45/**
46 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
47 * using sample data 100 bytes apart. If these sample points are good,
48 * it's a pretty good bet that everything between them is good, too.
49 */
50static int
51iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
52{
53 u32 val;
54 int ret = 0;
55 u32 errcnt = 0;
56 u32 i;
57
58 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
59
60 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
61 /* read data comes through single port, auto-incr addr */
62 /* NOTE: Use the debugless read so we don't flood kernel log
63 * if IWL_DL_IO is set */
64 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
65 i + IWL4965_RTC_INST_LOWER_BOUND);
66 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
67 if (val != le32_to_cpu(*image)) {
68 ret = -EIO;
69 errcnt++;
70 if (errcnt >= 3)
71 break;
72 }
73 }
74
75 return ret;
76}
77
78/**
79 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
80 * looking at all data.
81 */
82static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
83 u32 len)
84{
85 u32 val;
86 u32 save_len = len;
87 int ret = 0;
88 u32 errcnt;
89
90 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
91
92 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
93 IWL4965_RTC_INST_LOWER_BOUND);
94
95 errcnt = 0;
96 for (; len > 0; len -= sizeof(u32), image++) {
97 /* read data comes through single port, auto-incr addr */
98 /* NOTE: Use the debugless read so we don't flood kernel log
99 * if IWL_DL_IO is set */
100 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
101 if (val != le32_to_cpu(*image)) {
102 IWL_ERR(priv, "uCode INST section is invalid at "
103 "offset 0x%x, is 0x%x, s/b 0x%x\n",
104 save_len - len, val, le32_to_cpu(*image));
105 ret = -EIO;
106 errcnt++;
107 if (errcnt >= 20)
108 break;
109 }
110 }
111
112 if (!errcnt)
113 IWL_DEBUG_INFO(priv,
114 "ucode image in INSTRUCTION memory is good\n");
115
116 return ret;
117}
118
119/**
120 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
121 * and verify its contents
122 */
123int iwl4965_verify_ucode(struct iwl_priv *priv)
124{
125 __le32 *image;
126 u32 len;
127 int ret;
128
129 /* Try bootstrap */
130 image = (__le32 *)priv->ucode_boot.v_addr;
131 len = priv->ucode_boot.len;
132 ret = iwl4965_verify_inst_sparse(priv, image, len);
133 if (!ret) {
134 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
135 return 0;
136 }
137
138 /* Try initialize */
139 image = (__le32 *)priv->ucode_init.v_addr;
140 len = priv->ucode_init.len;
141 ret = iwl4965_verify_inst_sparse(priv, image, len);
142 if (!ret) {
143 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
144 return 0;
145 }
146
147 /* Try runtime/protocol */
148 image = (__le32 *)priv->ucode_code.v_addr;
149 len = priv->ucode_code.len;
150 ret = iwl4965_verify_inst_sparse(priv, image, len);
151 if (!ret) {
152 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
153 return 0;
154 }
155
156 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
157
158 /* Since nothing seems to match, show first several data entries in
159 * instruction SRAM, so maybe visual inspection will give a clue.
160 * Selection of bootstrap image (vs. other images) is arbitrary. */
161 image = (__le32 *)priv->ucode_boot.v_addr;
162 len = priv->ucode_boot.len;
163 ret = iwl4965_verify_inst_full(priv, image, len);
164
165 return ret;
166}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644
index 86f4fce193e4..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ /dev/null
@@ -1,2183 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-helpers.h"
45#include "iwl-4965-calib.h"
46#include "iwl-sta.h"
47#include "iwl-4965-led.h"
48#include "iwl-4965.h"
49#include "iwl-4965-debugfs.h"
50
51static int iwl4965_send_tx_power(struct iwl_priv *priv);
52static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
53
54/* Highest firmware API version supported */
55#define IWL4965_UCODE_API_MAX 2
56
57/* Lowest firmware API version supported */
58#define IWL4965_UCODE_API_MIN 2
59
60#define IWL4965_FW_PRE "iwlwifi-4965-"
61#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
62#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
63
64/* check contents of special bootstrap uCode SRAM */
65static int iwl4965_verify_bsm(struct iwl_priv *priv)
66{
67 __le32 *image = priv->ucode_boot.v_addr;
68 u32 len = priv->ucode_boot.len;
69 u32 reg;
70 u32 val;
71
72 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
73
74 /* verify BSM SRAM contents */
75 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
76 for (reg = BSM_SRAM_LOWER_BOUND;
77 reg < BSM_SRAM_LOWER_BOUND + len;
78 reg += sizeof(u32), image++) {
79 val = iwl_legacy_read_prph(priv, reg);
80 if (val != le32_to_cpu(*image)) {
81 IWL_ERR(priv, "BSM uCode verification failed at "
82 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
83 BSM_SRAM_LOWER_BOUND,
84 reg - BSM_SRAM_LOWER_BOUND, len,
85 val, le32_to_cpu(*image));
86 return -EIO;
87 }
88 }
89
90 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
91
92 return 0;
93}
94
95/**
96 * iwl4965_load_bsm - Load bootstrap instructions
97 *
98 * BSM operation:
99 *
100 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
101 * in special SRAM that does not power down during RFKILL. When powering back
102 * up after power-saving sleeps (or during initial uCode load), the BSM loads
103 * the bootstrap program into the on-board processor, and starts it.
104 *
105 * The bootstrap program loads (via DMA) instructions and data for a new
106 * program from host DRAM locations indicated by the host driver in the
107 * BSM_DRAM_* registers. Once the new program is loaded, it starts
108 * automatically.
109 *
110 * When initializing the NIC, the host driver points the BSM to the
111 * "initialize" uCode image. This uCode sets up some internal data, then
112 * notifies host via "initialize alive" that it is complete.
113 *
114 * The host then replaces the BSM_DRAM_* pointer values to point to the
115 * normal runtime uCode instructions and a backup uCode data cache buffer
116 * (filled initially with starting data values for the on-board processor),
117 * then triggers the "initialize" uCode to load and launch the runtime uCode,
118 * which begins normal operation.
119 *
120 * When doing a power-save shutdown, runtime uCode saves data SRAM into
121 * the backup data cache in DRAM before SRAM is powered down.
122 *
123 * When powering back up, the BSM loads the bootstrap program. This reloads
124 * the runtime uCode instructions and the backup data cache into SRAM,
125 * and re-launches the runtime uCode from where it left off.
126 */
127static int iwl4965_load_bsm(struct iwl_priv *priv)
128{
129 __le32 *image = priv->ucode_boot.v_addr;
130 u32 len = priv->ucode_boot.len;
131 dma_addr_t pinst;
132 dma_addr_t pdata;
133 u32 inst_len;
134 u32 data_len;
135 int i;
136 u32 done;
137 u32 reg_offset;
138 int ret;
139
140 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
141
142 priv->ucode_type = UCODE_RT;
143
144 /* make sure bootstrap program is no larger than BSM's SRAM size */
145 if (len > IWL49_MAX_BSM_SIZE)
146 return -EINVAL;
147
148 /* Tell bootstrap uCode where to find the "Initialize" uCode
149 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
150 * NOTE: iwl_init_alive_start() will replace these values,
151 * after the "initialize" uCode has run, to point to
152 * runtime/protocol instructions and backup data cache.
153 */
154 pinst = priv->ucode_init.p_addr >> 4;
155 pdata = priv->ucode_init_data.p_addr >> 4;
156 inst_len = priv->ucode_init.len;
157 data_len = priv->ucode_init_data.len;
158
159 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
160 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
161 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
162 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
163
164 /* Fill BSM memory with bootstrap instructions */
165 for (reg_offset = BSM_SRAM_LOWER_BOUND;
166 reg_offset < BSM_SRAM_LOWER_BOUND + len;
167 reg_offset += sizeof(u32), image++)
168 _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
169
170 ret = iwl4965_verify_bsm(priv);
171 if (ret)
172 return ret;
173
174 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
175 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
176 iwl_legacy_write_prph(priv,
177 BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
178 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
179
180 /* Load bootstrap code into instruction SRAM now,
181 * to prepare to load "initialize" uCode */
182 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
183
184 /* Wait for load of bootstrap uCode to finish */
185 for (i = 0; i < 100; i++) {
186 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
187 if (!(done & BSM_WR_CTRL_REG_BIT_START))
188 break;
189 udelay(10);
190 }
191 if (i < 100)
192 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
193 else {
194 IWL_ERR(priv, "BSM write did not complete!\n");
195 return -EIO;
196 }
197
198 /* Enable future boot loads whenever power management unit triggers it
199 * (e.g. when powering back up after power-save shutdown) */
200 iwl_legacy_write_prph(priv,
201 BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
202
203
204 return 0;
205}
206
207/**
208 * iwl4965_set_ucode_ptrs - Set uCode address location
209 *
210 * Tell initialization uCode where to find runtime uCode.
211 *
212 * BSM registers initially contain pointers to initialization uCode.
213 * We need to replace them to load runtime uCode inst and data,
214 * and to save runtime data when powering down.
215 */
216static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
217{
218 dma_addr_t pinst;
219 dma_addr_t pdata;
220 int ret = 0;
221
222 /* bits 35:4 for 4965 */
223 pinst = priv->ucode_code.p_addr >> 4;
224 pdata = priv->ucode_data_backup.p_addr >> 4;
225
226 /* Tell bootstrap uCode where to find image to load */
227 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
228 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
229 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
230 priv->ucode_data.len);
231
232 /* Inst byte count must be last to set up, bit 31 signals uCode
233 * that all new ptr/size info is in place */
234 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
235 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
236 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
237
238 return ret;
239}
240
241/**
242 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
243 *
244 * Called after REPLY_ALIVE notification received from "initialize" uCode.
245 *
246 * The 4965 "initialize" ALIVE reply contains calibration data for:
247 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
248 * (3945 does not contain this data).
249 *
250 * Tell "initialize" uCode to go ahead and load the runtime uCode.
251*/
252static void iwl4965_init_alive_start(struct iwl_priv *priv)
253{
254 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
255 * This is a paranoid check, because we would not have gotten the
256 * "initialize" alive if code weren't properly loaded. */
257 if (iwl4965_verify_ucode(priv)) {
258 /* Runtime instruction load was bad;
259 * take it all the way back down so we can try again */
260 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
261 goto restart;
262 }
263
264 /* Calculate temperature */
265 priv->temperature = iwl4965_hw_get_temperature(priv);
266
267 /* Send pointers to protocol/runtime uCode image ... init code will
268 * load and launch runtime uCode, which will send us another "Alive"
269 * notification. */
270 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
271 if (iwl4965_set_ucode_ptrs(priv)) {
272 /* Runtime instruction load won't happen;
273 * take it all the way back down so we can try again */
274 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
275 goto restart;
276 }
277 return;
278
279restart:
280 queue_work(priv->workqueue, &priv->restart);
281}
282
283static bool iw4965_is_ht40_channel(__le32 rxon_flags)
284{
285 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
286 >> RXON_FLG_CHANNEL_MODE_POS;
287 return ((chan_mod == CHANNEL_MODE_PURE_40) ||
288 (chan_mod == CHANNEL_MODE_MIXED));
289}
290
291static void iwl4965_nic_config(struct iwl_priv *priv)
292{
293 unsigned long flags;
294 u16 radio_cfg;
295
296 spin_lock_irqsave(&priv->lock, flags);
297
298 radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
299
300 /* write radio config values to register */
301 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
302 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
303 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
304 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
305 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
306
307 /* set CSR_HW_CONFIG_REG for uCode use */
308 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
309 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
310 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
311
312 priv->calib_info = (struct iwl_eeprom_calib_info *)
313 iwl_legacy_eeprom_query_addr(priv,
314 EEPROM_4965_CALIB_TXPOWER_OFFSET);
315
316 spin_unlock_irqrestore(&priv->lock, flags);
317}
318
319/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
320 * Called after every association, but this runs only once!
321 * ... once chain noise is calibrated the first time, it's good forever. */
322static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
323{
324 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
325
326 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
327 iwl_legacy_is_any_associated(priv)) {
328 struct iwl_calib_diff_gain_cmd cmd;
329
330 /* clear data for chain noise calibration algorithm */
331 data->chain_noise_a = 0;
332 data->chain_noise_b = 0;
333 data->chain_noise_c = 0;
334 data->chain_signal_a = 0;
335 data->chain_signal_b = 0;
336 data->chain_signal_c = 0;
337 data->beacon_count = 0;
338
339 memset(&cmd, 0, sizeof(cmd));
340 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
341 cmd.diff_gain_a = 0;
342 cmd.diff_gain_b = 0;
343 cmd.diff_gain_c = 0;
344 if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
345 sizeof(cmd), &cmd))
346 IWL_ERR(priv,
347 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
348 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
349 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
350 }
351}
352
353static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
354 .min_nrg_cck = 97,
355 .max_nrg_cck = 0, /* not used, set to 0 */
356
357 .auto_corr_min_ofdm = 85,
358 .auto_corr_min_ofdm_mrc = 170,
359 .auto_corr_min_ofdm_x1 = 105,
360 .auto_corr_min_ofdm_mrc_x1 = 220,
361
362 .auto_corr_max_ofdm = 120,
363 .auto_corr_max_ofdm_mrc = 210,
364 .auto_corr_max_ofdm_x1 = 140,
365 .auto_corr_max_ofdm_mrc_x1 = 270,
366
367 .auto_corr_min_cck = 125,
368 .auto_corr_max_cck = 200,
369 .auto_corr_min_cck_mrc = 200,
370 .auto_corr_max_cck_mrc = 400,
371
372 .nrg_th_cck = 100,
373 .nrg_th_ofdm = 100,
374
375 .barker_corr_th_min = 190,
376 .barker_corr_th_min_mrc = 390,
377 .nrg_th_cca = 62,
378};
379
380static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
381{
382 /* want Kelvin */
383 priv->hw_params.ct_kill_threshold =
384 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
385}
386
387/**
388 * iwl4965_hw_set_hw_params
389 *
390 * Called when initializing driver
391 */
392static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
393{
394 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
395 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
396 priv->cfg->base_params->num_of_queues =
397 priv->cfg->mod_params->num_of_queues;
398
399 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
400 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
401 priv->hw_params.scd_bc_tbls_size =
402 priv->cfg->base_params->num_of_queues *
403 sizeof(struct iwl4965_scd_bc_tbl);
404 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
405 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
406 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
407 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
408 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
409 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
410 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
411
412 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
413
414 priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
415 priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
416 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
417 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
418
419 iwl4965_set_ct_threshold(priv);
420
421 priv->hw_params.sens = &iwl4965_sensitivity;
422 priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
423
424 return 0;
425}
426
427static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
428{
429 s32 sign = 1;
430
431 if (num < 0) {
432 sign = -sign;
433 num = -num;
434 }
435 if (denom < 0) {
436 sign = -sign;
437 denom = -denom;
438 }
439 *res = 1;
440 *res = ((num * 2 + denom) / (denom * 2)) * sign;
441
442 return 1;
443}
444
445/**
446 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
447 *
448 * Determines power supply voltage compensation for txpower calculations.
449 * Returns number of 1/2-dB steps to subtract from gain table index,
450 * to compensate for difference between power supply voltage during
451 * factory measurements, vs. current power supply voltage.
452 *
453 * Voltage indication is higher for lower voltage.
454 * Lower voltage requires more gain (lower gain table index).
455 */
456static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
457 s32 current_voltage)
458{
459 s32 comp = 0;
460
461 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
462 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
463 return 0;
464
465 iwl4965_math_div_round(current_voltage - eeprom_voltage,
466 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
467
468 if (current_voltage > eeprom_voltage)
469 comp *= 2;
470 if ((comp < -2) || (comp > 2))
471 comp = 0;
472
473 return comp;
474}
475
476static s32 iwl4965_get_tx_atten_grp(u16 channel)
477{
478 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
479 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
480 return CALIB_CH_GROUP_5;
481
482 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
483 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
484 return CALIB_CH_GROUP_1;
485
486 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
487 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
488 return CALIB_CH_GROUP_2;
489
490 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
491 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
492 return CALIB_CH_GROUP_3;
493
494 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
495 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
496 return CALIB_CH_GROUP_4;
497
498 return -EINVAL;
499}
500
501static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
502{
503 s32 b = -1;
504
505 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
506 if (priv->calib_info->band_info[b].ch_from == 0)
507 continue;
508
509 if ((channel >= priv->calib_info->band_info[b].ch_from)
510 && (channel <= priv->calib_info->band_info[b].ch_to))
511 break;
512 }
513
514 return b;
515}
516
517static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
518{
519 s32 val;
520
521 if (x2 == x1)
522 return y1;
523 else {
524 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
525 return val + y2;
526 }
527}
528
529/**
530 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
531 *
532 * Interpolates factory measurements from the two sample channels within a
533 * sub-band, to apply to channel of interest. Interpolation is proportional to
534 * differences in channel frequencies, which is proportional to differences
535 * in channel number.
536 */
537static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
538 struct iwl_eeprom_calib_ch_info *chan_info)
539{
540 s32 s = -1;
541 u32 c;
542 u32 m;
543 const struct iwl_eeprom_calib_measure *m1;
544 const struct iwl_eeprom_calib_measure *m2;
545 struct iwl_eeprom_calib_measure *omeas;
546 u32 ch_i1;
547 u32 ch_i2;
548
549 s = iwl4965_get_sub_band(priv, channel);
550 if (s >= EEPROM_TX_POWER_BANDS) {
551 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
552 return -1;
553 }
554
555 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
556 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
557 chan_info->ch_num = (u8) channel;
558
559 IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
560 channel, s, ch_i1, ch_i2);
561
562 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
563 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
564 m1 = &(priv->calib_info->band_info[s].ch1.
565 measurements[c][m]);
566 m2 = &(priv->calib_info->band_info[s].ch2.
567 measurements[c][m]);
568 omeas = &(chan_info->measurements[c][m]);
569
570 omeas->actual_pow =
571 (u8) iwl4965_interpolate_value(channel, ch_i1,
572 m1->actual_pow,
573 ch_i2,
574 m2->actual_pow);
575 omeas->gain_idx =
576 (u8) iwl4965_interpolate_value(channel, ch_i1,
577 m1->gain_idx, ch_i2,
578 m2->gain_idx);
579 omeas->temperature =
580 (u8) iwl4965_interpolate_value(channel, ch_i1,
581 m1->temperature,
582 ch_i2,
583 m2->temperature);
584 omeas->pa_det =
585 (s8) iwl4965_interpolate_value(channel, ch_i1,
586 m1->pa_det, ch_i2,
587 m2->pa_det);
588
589 IWL_DEBUG_TXPOWER(priv,
590 "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
591 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
592 IWL_DEBUG_TXPOWER(priv,
593 "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
594 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
595 IWL_DEBUG_TXPOWER(priv,
596 "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
597 m1->pa_det, m2->pa_det, omeas->pa_det);
598 IWL_DEBUG_TXPOWER(priv,
599 "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
600 m1->temperature, m2->temperature,
601 omeas->temperature);
602 }
603 }
604
605 return 0;
606}
607
608/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
609 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
610static s32 back_off_table[] = {
611 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
612 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
613 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
614 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
615 10 /* CCK */
616};
617
618/* Thermal compensation values for txpower for various frequency ranges ...
619 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
620static struct iwl4965_txpower_comp_entry {
621 s32 degrees_per_05db_a;
622 s32 degrees_per_05db_a_denom;
623} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
624 {9, 2}, /* group 0 5.2, ch 34-43 */
625 {4, 1}, /* group 1 5.2, ch 44-70 */
626 {4, 1}, /* group 2 5.2, ch 71-124 */
627 {4, 1}, /* group 3 5.2, ch 125-200 */
628 {3, 1} /* group 4 2.4, ch all */
629};
630
631static s32 get_min_power_index(s32 rate_power_index, u32 band)
632{
633 if (!band) {
634 if ((rate_power_index & 7) <= 4)
635 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
636 }
637 return MIN_TX_GAIN_INDEX;
638}
639
640struct gain_entry {
641 u8 dsp;
642 u8 radio;
643};
644
645static const struct gain_entry gain_table[2][108] = {
646 /* 5.2GHz power gain index table */
647 {
648 {123, 0x3F}, /* highest txpower */
649 {117, 0x3F},
650 {110, 0x3F},
651 {104, 0x3F},
652 {98, 0x3F},
653 {110, 0x3E},
654 {104, 0x3E},
655 {98, 0x3E},
656 {110, 0x3D},
657 {104, 0x3D},
658 {98, 0x3D},
659 {110, 0x3C},
660 {104, 0x3C},
661 {98, 0x3C},
662 {110, 0x3B},
663 {104, 0x3B},
664 {98, 0x3B},
665 {110, 0x3A},
666 {104, 0x3A},
667 {98, 0x3A},
668 {110, 0x39},
669 {104, 0x39},
670 {98, 0x39},
671 {110, 0x38},
672 {104, 0x38},
673 {98, 0x38},
674 {110, 0x37},
675 {104, 0x37},
676 {98, 0x37},
677 {110, 0x36},
678 {104, 0x36},
679 {98, 0x36},
680 {110, 0x35},
681 {104, 0x35},
682 {98, 0x35},
683 {110, 0x34},
684 {104, 0x34},
685 {98, 0x34},
686 {110, 0x33},
687 {104, 0x33},
688 {98, 0x33},
689 {110, 0x32},
690 {104, 0x32},
691 {98, 0x32},
692 {110, 0x31},
693 {104, 0x31},
694 {98, 0x31},
695 {110, 0x30},
696 {104, 0x30},
697 {98, 0x30},
698 {110, 0x25},
699 {104, 0x25},
700 {98, 0x25},
701 {110, 0x24},
702 {104, 0x24},
703 {98, 0x24},
704 {110, 0x23},
705 {104, 0x23},
706 {98, 0x23},
707 {110, 0x22},
708 {104, 0x18},
709 {98, 0x18},
710 {110, 0x17},
711 {104, 0x17},
712 {98, 0x17},
713 {110, 0x16},
714 {104, 0x16},
715 {98, 0x16},
716 {110, 0x15},
717 {104, 0x15},
718 {98, 0x15},
719 {110, 0x14},
720 {104, 0x14},
721 {98, 0x14},
722 {110, 0x13},
723 {104, 0x13},
724 {98, 0x13},
725 {110, 0x12},
726 {104, 0x08},
727 {98, 0x08},
728 {110, 0x07},
729 {104, 0x07},
730 {98, 0x07},
731 {110, 0x06},
732 {104, 0x06},
733 {98, 0x06},
734 {110, 0x05},
735 {104, 0x05},
736 {98, 0x05},
737 {110, 0x04},
738 {104, 0x04},
739 {98, 0x04},
740 {110, 0x03},
741 {104, 0x03},
742 {98, 0x03},
743 {110, 0x02},
744 {104, 0x02},
745 {98, 0x02},
746 {110, 0x01},
747 {104, 0x01},
748 {98, 0x01},
749 {110, 0x00},
750 {104, 0x00},
751 {98, 0x00},
752 {93, 0x00},
753 {88, 0x00},
754 {83, 0x00},
755 {78, 0x00},
756 },
757 /* 2.4GHz power gain index table */
758 {
759 {110, 0x3f}, /* highest txpower */
760 {104, 0x3f},
761 {98, 0x3f},
762 {110, 0x3e},
763 {104, 0x3e},
764 {98, 0x3e},
765 {110, 0x3d},
766 {104, 0x3d},
767 {98, 0x3d},
768 {110, 0x3c},
769 {104, 0x3c},
770 {98, 0x3c},
771 {110, 0x3b},
772 {104, 0x3b},
773 {98, 0x3b},
774 {110, 0x3a},
775 {104, 0x3a},
776 {98, 0x3a},
777 {110, 0x39},
778 {104, 0x39},
779 {98, 0x39},
780 {110, 0x38},
781 {104, 0x38},
782 {98, 0x38},
783 {110, 0x37},
784 {104, 0x37},
785 {98, 0x37},
786 {110, 0x36},
787 {104, 0x36},
788 {98, 0x36},
789 {110, 0x35},
790 {104, 0x35},
791 {98, 0x35},
792 {110, 0x34},
793 {104, 0x34},
794 {98, 0x34},
795 {110, 0x33},
796 {104, 0x33},
797 {98, 0x33},
798 {110, 0x32},
799 {104, 0x32},
800 {98, 0x32},
801 {110, 0x31},
802 {104, 0x31},
803 {98, 0x31},
804 {110, 0x30},
805 {104, 0x30},
806 {98, 0x30},
807 {110, 0x6},
808 {104, 0x6},
809 {98, 0x6},
810 {110, 0x5},
811 {104, 0x5},
812 {98, 0x5},
813 {110, 0x4},
814 {104, 0x4},
815 {98, 0x4},
816 {110, 0x3},
817 {104, 0x3},
818 {98, 0x3},
819 {110, 0x2},
820 {104, 0x2},
821 {98, 0x2},
822 {110, 0x1},
823 {104, 0x1},
824 {98, 0x1},
825 {110, 0x0},
826 {104, 0x0},
827 {98, 0x0},
828 {97, 0},
829 {96, 0},
830 {95, 0},
831 {94, 0},
832 {93, 0},
833 {92, 0},
834 {91, 0},
835 {90, 0},
836 {89, 0},
837 {88, 0},
838 {87, 0},
839 {86, 0},
840 {85, 0},
841 {84, 0},
842 {83, 0},
843 {82, 0},
844 {81, 0},
845 {80, 0},
846 {79, 0},
847 {78, 0},
848 {77, 0},
849 {76, 0},
850 {75, 0},
851 {74, 0},
852 {73, 0},
853 {72, 0},
854 {71, 0},
855 {70, 0},
856 {69, 0},
857 {68, 0},
858 {67, 0},
859 {66, 0},
860 {65, 0},
861 {64, 0},
862 {63, 0},
863 {62, 0},
864 {61, 0},
865 {60, 0},
866 {59, 0},
867 }
868};
869
870static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
871 u8 is_ht40, u8 ctrl_chan_high,
872 struct iwl4965_tx_power_db *tx_power_tbl)
873{
874 u8 saturation_power;
875 s32 target_power;
876 s32 user_target_power;
877 s32 power_limit;
878 s32 current_temp;
879 s32 reg_limit;
880 s32 current_regulatory;
881 s32 txatten_grp = CALIB_CH_GROUP_MAX;
882 int i;
883 int c;
884 const struct iwl_channel_info *ch_info = NULL;
885 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
886 const struct iwl_eeprom_calib_measure *measurement;
887 s16 voltage;
888 s32 init_voltage;
889 s32 voltage_compensation;
890 s32 degrees_per_05db_num;
891 s32 degrees_per_05db_denom;
892 s32 factory_temp;
893 s32 temperature_comp[2];
894 s32 factory_gain_index[2];
895 s32 factory_actual_pwr[2];
896 s32 power_index;
897
898 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
899 * are used for indexing into txpower table) */
900 user_target_power = 2 * priv->tx_power_user_lmt;
901
902 /* Get current (RXON) channel, band, width */
903 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
904 is_ht40);
905
906 ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
907
908 if (!iwl_legacy_is_channel_valid(ch_info))
909 return -EINVAL;
910
911 /* get txatten group, used to select 1) thermal txpower adjustment
912 * and 2) mimo txpower balance between Tx chains. */
913 txatten_grp = iwl4965_get_tx_atten_grp(channel);
914 if (txatten_grp < 0) {
915 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
916 channel);
917 return txatten_grp;
918 }
919
920 IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
921 channel, txatten_grp);
922
923 if (is_ht40) {
924 if (ctrl_chan_high)
925 channel -= 2;
926 else
927 channel += 2;
928 }
929
930 /* hardware txpower limits ...
931 * saturation (clipping distortion) txpowers are in half-dBm */
932 if (band)
933 saturation_power = priv->calib_info->saturation_power24;
934 else
935 saturation_power = priv->calib_info->saturation_power52;
936
937 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
938 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
939 if (band)
940 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
941 else
942 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
943 }
944
945 /* regulatory txpower limits ... reg_limit values are in half-dBm,
946 * max_power_avg values are in dBm, convert * 2 */
947 if (is_ht40)
948 reg_limit = ch_info->ht40_max_power_avg * 2;
949 else
950 reg_limit = ch_info->max_power_avg * 2;
951
952 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
953 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
954 if (band)
955 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
956 else
957 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
958 }
959
960 /* Interpolate txpower calibration values for this channel,
961 * based on factory calibration tests on spaced channels. */
962 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
963
964 /* calculate tx gain adjustment based on power supply voltage */
965 voltage = le16_to_cpu(priv->calib_info->voltage);
966 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
967 voltage_compensation =
968 iwl4965_get_voltage_compensation(voltage, init_voltage);
969
970 IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
971 init_voltage,
972 voltage, voltage_compensation);
973
974 /* get current temperature (Celsius) */
975 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
976 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
977 current_temp = KELVIN_TO_CELSIUS(current_temp);
978
979 /* select thermal txpower adjustment params, based on channel group
980 * (same frequency group used for mimo txatten adjustment) */
981 degrees_per_05db_num =
982 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
983 degrees_per_05db_denom =
984 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
985
986 /* get per-chain txpower values from factory measurements */
987 for (c = 0; c < 2; c++) {
988 measurement = &ch_eeprom_info.measurements[c][1];
989
990 /* txgain adjustment (in half-dB steps) based on difference
991 * between factory and current temperature */
992 factory_temp = measurement->temperature;
993 iwl4965_math_div_round((current_temp - factory_temp) *
994 degrees_per_05db_denom,
995 degrees_per_05db_num,
996 &temperature_comp[c]);
997
998 factory_gain_index[c] = measurement->gain_idx;
999 factory_actual_pwr[c] = measurement->actual_pow;
1000
1001 IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
1002 IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
1003 "curr tmp %d, comp %d steps\n",
1004 factory_temp, current_temp,
1005 temperature_comp[c]);
1006
1007 IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
1008 factory_gain_index[c],
1009 factory_actual_pwr[c]);
1010 }
1011
1012 /* for each of 33 bit-rates (including 1 for CCK) */
1013 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1014 u8 is_mimo_rate;
1015 union iwl4965_tx_power_dual_stream tx_power;
1016
1017 /* for mimo, reduce each chain's txpower by half
1018 * (3dB, 6 steps), so total output power is regulatory
1019 * compliant. */
1020 if (i & 0x8) {
1021 current_regulatory = reg_limit -
1022 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1023 is_mimo_rate = 1;
1024 } else {
1025 current_regulatory = reg_limit;
1026 is_mimo_rate = 0;
1027 }
1028
1029 /* find txpower limit, either hardware or regulatory */
1030 power_limit = saturation_power - back_off_table[i];
1031 if (power_limit > current_regulatory)
1032 power_limit = current_regulatory;
1033
1034 /* reduce user's txpower request if necessary
1035 * for this rate on this channel */
1036 target_power = user_target_power;
1037 if (target_power > power_limit)
1038 target_power = power_limit;
1039
1040 IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
1041 i, saturation_power - back_off_table[i],
1042 current_regulatory, user_target_power,
1043 target_power);
1044
1045 /* for each of 2 Tx chains (radio transmitters) */
1046 for (c = 0; c < 2; c++) {
1047 s32 atten_value;
1048
1049 if (is_mimo_rate)
1050 atten_value =
1051 (s32)le32_to_cpu(priv->card_alive_init.
1052 tx_atten[txatten_grp][c]);
1053 else
1054 atten_value = 0;
1055
1056 /* calculate index; higher index means lower txpower */
1057 power_index = (u8) (factory_gain_index[c] -
1058 (target_power -
1059 factory_actual_pwr[c]) -
1060 temperature_comp[c] -
1061 voltage_compensation +
1062 atten_value);
1063
1064/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
1065 power_index); */
1066
1067 if (power_index < get_min_power_index(i, band))
1068 power_index = get_min_power_index(i, band);
1069
1070 /* adjust 5 GHz index to support negative indexes */
1071 if (!band)
1072 power_index += 9;
1073
1074 /* CCK, rate 32, reduce txpower for CCK */
1075 if (i == POWER_TABLE_CCK_ENTRY)
1076 power_index +=
1077 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1078
1079 /* stay within the table! */
1080 if (power_index > 107) {
1081 IWL_WARN(priv, "txpower index %d > 107\n",
1082 power_index);
1083 power_index = 107;
1084 }
1085 if (power_index < 0) {
1086 IWL_WARN(priv, "txpower index %d < 0\n",
1087 power_index);
1088 power_index = 0;
1089 }
1090
1091 /* fill txpower command for this rate/chain */
1092 tx_power.s.radio_tx_gain[c] =
1093 gain_table[band][power_index].radio;
1094 tx_power.s.dsp_predis_atten[c] =
1095 gain_table[band][power_index].dsp;
1096
1097 IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
1098 "gain 0x%02x dsp %d\n",
1099 c, atten_value, power_index,
1100 tx_power.s.radio_tx_gain[c],
1101 tx_power.s.dsp_predis_atten[c]);
1102 } /* for each chain */
1103
1104 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1105
1106 } /* for each rate */
1107
1108 return 0;
1109}
1110
1111/**
1112 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
1113 *
1114 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1115 * The power limit is taken from priv->tx_power_user_lmt.
1116 */
1117static int iwl4965_send_tx_power(struct iwl_priv *priv)
1118{
1119 struct iwl4965_txpowertable_cmd cmd = { 0 };
1120 int ret;
1121 u8 band = 0;
1122 bool is_ht40 = false;
1123 u8 ctrl_chan_high = 0;
1124 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1125
1126 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1127 "TX Power requested while scanning!\n"))
1128 return -EAGAIN;
1129
1130 band = priv->band == IEEE80211_BAND_2GHZ;
1131
1132 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1133
1134 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1135 ctrl_chan_high = 1;
1136
1137 cmd.band = band;
1138 cmd.channel = ctx->active.channel;
1139
1140 ret = iwl4965_fill_txpower_tbl(priv, band,
1141 le16_to_cpu(ctx->active.channel),
1142 is_ht40, ctrl_chan_high, &cmd.tx_power);
1143 if (ret)
1144 goto out;
1145
1146 ret = iwl_legacy_send_cmd_pdu(priv,
1147 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1148
1149out:
1150 return ret;
1151}
1152
1153static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1154 struct iwl_rxon_context *ctx)
1155{
1156 int ret = 0;
1157 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1158 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1159 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1160
1161 if ((rxon1->flags == rxon2->flags) &&
1162 (rxon1->filter_flags == rxon2->filter_flags) &&
1163 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1164 (rxon1->ofdm_ht_single_stream_basic_rates ==
1165 rxon2->ofdm_ht_single_stream_basic_rates) &&
1166 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1167 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1168 (rxon1->rx_chain == rxon2->rx_chain) &&
1169 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1170 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1171 return 0;
1172 }
1173
1174 rxon_assoc.flags = ctx->staging.flags;
1175 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1176 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1177 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1178 rxon_assoc.reserved = 0;
1179 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1180 ctx->staging.ofdm_ht_single_stream_basic_rates;
1181 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1182 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1183 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1184
1185 ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1186 sizeof(rxon_assoc), &rxon_assoc, NULL);
1187
1188 return ret;
1189}
1190
1191static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1192{
1193 /* cast away the const for active_rxon in this function */
1194 struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
1195 int ret;
1196 bool new_assoc =
1197 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1198
1199 if (!iwl_legacy_is_alive(priv))
1200 return -EBUSY;
1201
1202 if (!ctx->is_active)
1203 return 0;
1204
1205 /* always get timestamp with Rx frame */
1206 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1207
1208 ret = iwl_legacy_check_rxon_cmd(priv, ctx);
1209 if (ret) {
1210 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1211 return -EINVAL;
1212 }
1213
1214 /*
1215 * receive commit_rxon request
1216 * abort any previous channel switch if still in process
1217 */
1218 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1219 (priv->switch_channel != ctx->staging.channel)) {
1220 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1221 le16_to_cpu(priv->switch_channel));
1222 iwl_legacy_chswitch_done(priv, false);
1223 }
1224
1225 /* If we don't need to send a full RXON, we can use
1226 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1227 * and other flags for the current radio configuration. */
1228 if (!iwl_legacy_full_rxon_required(priv, ctx)) {
1229 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
1230 if (ret) {
1231 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1232 return ret;
1233 }
1234
1235 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1236 iwl_legacy_print_rx_config_cmd(priv, ctx);
1237 /*
1238 * We do not commit tx power settings while channel changing,
1239 * do it now if tx power changed.
1240 */
1241 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1242 return 0;
1243 }
1244
1245 /* If we are currently associated and the new config requires
1246 * an RXON_ASSOC and the new config wants the associated mask enabled,
1247 * we must clear the associated from the active configuration
1248 * before we apply the new config */
1249 if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
1250 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1251 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1252
1253 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1254 sizeof(struct iwl_legacy_rxon_cmd),
1255 active_rxon);
1256
1257 /* If the mask clearing failed then we set
1258 * active_rxon back to what it was previously */
1259 if (ret) {
1260 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1261 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1262 return ret;
1263 }
1264 iwl_legacy_clear_ucode_stations(priv, ctx);
1265 iwl_legacy_restore_stations(priv, ctx);
1266 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1267 if (ret) {
1268 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1269 return ret;
1270 }
1271 }
1272
1273 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1274 "* with%s RXON_FILTER_ASSOC_MSK\n"
1275 "* channel = %d\n"
1276 "* bssid = %pM\n",
1277 (new_assoc ? "" : "out"),
1278 le16_to_cpu(ctx->staging.channel),
1279 ctx->staging.bssid_addr);
1280
1281 iwl_legacy_set_rxon_hwcrypto(priv, ctx,
1282 !priv->cfg->mod_params->sw_crypto);
1283
1284 /* Apply the new configuration
1285 * RXON unassoc clears the station table in uCode so restoration of
1286 * stations is needed after it (the RXON command) completes
1287 */
1288 if (!new_assoc) {
1289 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1290 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1291 if (ret) {
1292 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1293 return ret;
1294 }
1295 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1296 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1297 iwl_legacy_clear_ucode_stations(priv, ctx);
1298 iwl_legacy_restore_stations(priv, ctx);
1299 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1300 if (ret) {
1301 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1302 return ret;
1303 }
1304 }
1305 if (new_assoc) {
1306 priv->start_calib = 0;
1307 /* Apply the new configuration
1308 * RXON assoc doesn't clear the station table in uCode,
1309 */
1310 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1311 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1312 if (ret) {
1313 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1314 return ret;
1315 }
1316 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1317 }
1318 iwl_legacy_print_rx_config_cmd(priv, ctx);
1319
1320 iwl4965_init_sensitivity(priv);
1321
1322 /* If we issue a new RXON command which required a tune then we must
1323 * send a new TXPOWER command or we won't be able to Tx any frames */
1324 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1325 if (ret) {
1326 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1327 return ret;
1328 }
1329
1330 return 0;
1331}
1332
1333static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1334 struct ieee80211_channel_switch *ch_switch)
1335{
1336 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1337 int rc;
1338 u8 band = 0;
1339 bool is_ht40 = false;
1340 u8 ctrl_chan_high = 0;
1341 struct iwl4965_channel_switch_cmd cmd;
1342 const struct iwl_channel_info *ch_info;
1343 u32 switch_time_in_usec, ucode_switch_time;
1344 u16 ch;
1345 u32 tsf_low;
1346 u8 switch_count;
1347 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1348 struct ieee80211_vif *vif = ctx->vif;
1349 band = priv->band == IEEE80211_BAND_2GHZ;
1350
1351 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1352
1353 if (is_ht40 &&
1354 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1355 ctrl_chan_high = 1;
1356
1357 cmd.band = band;
1358 cmd.expect_beacon = 0;
1359 ch = ch_switch->channel->hw_value;
1360 cmd.channel = cpu_to_le16(ch);
1361 cmd.rxon_flags = ctx->staging.flags;
1362 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1363 switch_count = ch_switch->count;
1364 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1365 /*
1366 * calculate the ucode channel switch time
1367 * adding TSF as one of the factor for when to switch
1368 */
1369 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
1370 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
1371 beacon_interval)) {
1372 switch_count -= (priv->ucode_beacon_time -
1373 tsf_low) / beacon_interval;
1374 } else
1375 switch_count = 0;
1376 }
1377 if (switch_count <= 1)
1378 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1379 else {
1380 switch_time_in_usec =
1381 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1382 ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
1383 switch_time_in_usec,
1384 beacon_interval);
1385 cmd.switch_time = iwl_legacy_add_beacon_time(priv,
1386 priv->ucode_beacon_time,
1387 ucode_switch_time,
1388 beacon_interval);
1389 }
1390 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1391 cmd.switch_time);
1392 ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
1393 if (ch_info)
1394 cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
1395 else {
1396 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1397 ctx->active.channel, ch);
1398 return -EFAULT;
1399 }
1400
1401 rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
1402 ctrl_chan_high, &cmd.tx_power);
1403 if (rc) {
1404 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
1405 return rc;
1406 }
1407
1408 return iwl_legacy_send_cmd_pdu(priv,
1409 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1410}
1411
1412/**
1413 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1414 */
1415static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
1416 struct iwl_tx_queue *txq,
1417 u16 byte_cnt)
1418{
1419 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
1420 int txq_id = txq->q.id;
1421 int write_ptr = txq->q.write_ptr;
1422 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1423 __le16 bc_ent;
1424
1425 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1426
1427 bc_ent = cpu_to_le16(len & 0xFFF);
1428 /* Set up byte count within first 256 entries */
1429 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1430
1431 /* If within first 64 entries, duplicate at end */
1432 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1433 scd_bc_tbl[txq_id].
1434 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
1435}
1436
1437/**
1438 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1439 * @statistics: Provides the temperature reading from the uCode
1440 *
1441 * A return of <0 indicates bogus data in the statistics
1442 */
1443static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1444{
1445 s32 temperature;
1446 s32 vt;
1447 s32 R1, R2, R3;
1448 u32 R4;
1449
1450 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1451 (priv->_4965.statistics.flag &
1452 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1453 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1454 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1455 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1456 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1457 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1458 } else {
1459 IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
1460 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1461 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1462 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1463 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1464 }
1465
1466 /*
1467 * Temperature is only 23 bits, so sign extend out to 32.
1468 *
1469 * NOTE If we haven't received a statistics notification yet
1470 * with an updated temperature, use R4 provided to us in the
1471 * "initialize" ALIVE response.
1472 */
1473 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1474 vt = sign_extend32(R4, 23);
1475 else
1476 vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
1477 general.common.temperature), 23);
1478
1479 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1480
1481 if (R3 == R1) {
1482 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
1483 return -1;
1484 }
1485
1486 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1487 * Add offset to center the adjustment around 0 degrees Centigrade. */
1488 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1489 temperature /= (R3 - R1);
1490 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1491
1492 IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
1493 temperature, KELVIN_TO_CELSIUS(temperature));
1494
1495 return temperature;
1496}
1497
1498/* Adjust Txpower only if temperature variance is greater than threshold. */
1499#define IWL_TEMPERATURE_THRESHOLD 3
1500
1501/**
1502 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1503 *
1504 * If the temperature changed has changed sufficiently, then a recalibration
1505 * is needed.
1506 *
1507 * Assumes caller will replace priv->last_temperature once calibration
1508 * executed.
1509 */
1510static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1511{
1512 int temp_diff;
1513
1514 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1515 IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
1516 return 0;
1517 }
1518
1519 temp_diff = priv->temperature - priv->last_temperature;
1520
1521 /* get absolute value */
1522 if (temp_diff < 0) {
1523 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
1524 temp_diff = -temp_diff;
1525 } else if (temp_diff == 0)
1526 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
1527 else
1528 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
1529
1530 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1531 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
1532 return 0;
1533 }
1534
1535 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
1536
1537 return 1;
1538}
1539
1540static void iwl4965_temperature_calib(struct iwl_priv *priv)
1541{
1542 s32 temp;
1543
1544 temp = iwl4965_hw_get_temperature(priv);
1545 if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1546 return;
1547
1548 if (priv->temperature != temp) {
1549 if (priv->temperature)
1550 IWL_DEBUG_TEMP(priv, "Temperature changed "
1551 "from %dC to %dC\n",
1552 KELVIN_TO_CELSIUS(priv->temperature),
1553 KELVIN_TO_CELSIUS(temp));
1554 else
1555 IWL_DEBUG_TEMP(priv, "Temperature "
1556 "initialized to %dC\n",
1557 KELVIN_TO_CELSIUS(temp));
1558 }
1559
1560 priv->temperature = temp;
1561 set_bit(STATUS_TEMPERATURE, &priv->status);
1562
1563 if (!priv->disable_tx_power_cal &&
1564 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1565 iwl4965_is_temp_calib_needed(priv))
1566 queue_work(priv->workqueue, &priv->txpower_work);
1567}
1568
1569static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1570{
1571 switch (cmd_id) {
1572 case REPLY_RXON:
1573 return (u16) sizeof(struct iwl4965_rxon_cmd);
1574 default:
1575 return len;
1576 }
1577}
1578
1579static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
1580 u8 *data)
1581{
1582 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1583 addsta->mode = cmd->mode;
1584 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1585 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
1586 addsta->station_flags = cmd->station_flags;
1587 addsta->station_flags_msk = cmd->station_flags_msk;
1588 addsta->tid_disable_tx = cmd->tid_disable_tx;
1589 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1590 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1591 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1592 addsta->sleep_tx_count = cmd->sleep_tx_count;
1593 addsta->reserved1 = cpu_to_le16(0);
1594 addsta->reserved2 = cpu_to_le16(0);
1595
1596 return (u16)sizeof(struct iwl4965_addsta_cmd);
1597}
1598
1599static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
1600{
1601 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
1602}
1603
1604/**
1605 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
1606 */
1607static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1608 struct iwl_ht_agg *agg,
1609 struct iwl4965_tx_resp *tx_resp,
1610 int txq_id, u16 start_idx)
1611{
1612 u16 status;
1613 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
1614 struct ieee80211_tx_info *info = NULL;
1615 struct ieee80211_hdr *hdr = NULL;
1616 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1617 int i, sh, idx;
1618 u16 seq;
1619 if (agg->wait_for_ba)
1620 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
1621
1622 agg->frame_count = tx_resp->frame_count;
1623 agg->start_idx = start_idx;
1624 agg->rate_n_flags = rate_n_flags;
1625 agg->bitmap = 0;
1626
1627 /* num frames attempted by Tx command */
1628 if (agg->frame_count == 1) {
1629 /* Only one frame was attempted; no block-ack will arrive */
1630 status = le16_to_cpu(frame_status[0].status);
1631 idx = start_idx;
1632
1633 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
1634 agg->frame_count, agg->start_idx, idx);
1635
1636 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
1637 info->status.rates[0].count = tx_resp->failure_frame + 1;
1638 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1639 info->flags |= iwl4965_tx_status_to_mac80211(status);
1640 iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
1641
1642 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
1643 status & 0xff, tx_resp->failure_frame);
1644 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
1645
1646 agg->wait_for_ba = 0;
1647 } else {
1648 /* Two or more frames were attempted; expect block-ack */
1649 u64 bitmap = 0;
1650 int start = agg->start_idx;
1651
1652 /* Construct bit-map of pending frames within Tx window */
1653 for (i = 0; i < agg->frame_count; i++) {
1654 u16 sc;
1655 status = le16_to_cpu(frame_status[i].status);
1656 seq = le16_to_cpu(frame_status[i].sequence);
1657 idx = SEQ_TO_INDEX(seq);
1658 txq_id = SEQ_TO_QUEUE(seq);
1659
1660 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1661 AGG_TX_STATE_ABORT_MSK))
1662 continue;
1663
1664 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
1665 agg->frame_count, txq_id, idx);
1666
1667 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
1668 if (!hdr) {
1669 IWL_ERR(priv,
1670 "BUG_ON idx doesn't point to valid skb"
1671 " idx=%d, txq_id=%d\n", idx, txq_id);
1672 return -1;
1673 }
1674
1675 sc = le16_to_cpu(hdr->seq_ctrl);
1676 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1677 IWL_ERR(priv,
1678 "BUG_ON idx doesn't match seq control"
1679 " idx=%d, seq_idx=%d, seq=%d\n",
1680 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
1681 return -1;
1682 }
1683
1684 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
1685 i, idx, SEQ_TO_SN(sc));
1686
1687 sh = idx - start;
1688 if (sh > 64) {
1689 sh = (start - idx) + 0xff;
1690 bitmap = bitmap << sh;
1691 sh = 0;
1692 start = idx;
1693 } else if (sh < -64)
1694 sh = 0xff - (start - idx);
1695 else if (sh < 0) {
1696 sh = start - idx;
1697 start = idx;
1698 bitmap = bitmap << sh;
1699 sh = 0;
1700 }
1701 bitmap |= 1ULL << sh;
1702 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
1703 start, (unsigned long long)bitmap);
1704 }
1705
1706 agg->bitmap = bitmap;
1707 agg->start_idx = start;
1708 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
1709 agg->frame_count, agg->start_idx,
1710 (unsigned long long)agg->bitmap);
1711
1712 if (bitmap)
1713 agg->wait_for_ba = 1;
1714 }
1715 return 0;
1716}
1717
1718static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
1719{
1720 int i;
1721 int start = 0;
1722 int ret = IWL_INVALID_STATION;
1723 unsigned long flags;
1724
1725 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
1726 start = IWL_STA_ID;
1727
1728 if (is_broadcast_ether_addr(addr))
1729 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
1730
1731 spin_lock_irqsave(&priv->sta_lock, flags);
1732 for (i = start; i < priv->hw_params.max_stations; i++)
1733 if (priv->stations[i].used &&
1734 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
1735 addr))) {
1736 ret = i;
1737 goto out;
1738 }
1739
1740 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
1741 addr, priv->num_stations);
1742
1743 out:
1744 /*
1745 * It may be possible that more commands interacting with stations
1746 * arrive before we completed processing the adding of
1747 * station
1748 */
1749 if (ret != IWL_INVALID_STATION &&
1750 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
1751 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
1752 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
1753 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
1754 ret);
1755 ret = IWL_INVALID_STATION;
1756 }
1757 spin_unlock_irqrestore(&priv->sta_lock, flags);
1758 return ret;
1759}
1760
1761static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1762{
1763 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
1764 return IWL_AP_ID;
1765 } else {
1766 u8 *da = ieee80211_get_DA(hdr);
1767 return iwl4965_find_station(priv, da);
1768 }
1769}
1770
1771/**
1772 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
1773 */
1774static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
1775 struct iwl_rx_mem_buffer *rxb)
1776{
1777 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1778 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1779 int txq_id = SEQ_TO_QUEUE(sequence);
1780 int index = SEQ_TO_INDEX(sequence);
1781 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1782 struct ieee80211_hdr *hdr;
1783 struct ieee80211_tx_info *info;
1784 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1785 u32 status = le32_to_cpu(tx_resp->u.status);
1786 int uninitialized_var(tid);
1787 int sta_id;
1788 int freed;
1789 u8 *qc = NULL;
1790 unsigned long flags;
1791
1792 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
1793 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
1794 "is out of range [0-%d] %d %d\n", txq_id,
1795 index, txq->q.n_bd, txq->q.write_ptr,
1796 txq->q.read_ptr);
1797 return;
1798 }
1799
1800 txq->time_stamp = jiffies;
1801 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
1802 memset(&info->status, 0, sizeof(info->status));
1803
1804 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
1805 if (ieee80211_is_data_qos(hdr->frame_control)) {
1806 qc = ieee80211_get_qos_ctl(hdr);
1807 tid = qc[0] & 0xf;
1808 }
1809
1810 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
1811 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1812 IWL_ERR(priv, "Station not known\n");
1813 return;
1814 }
1815
1816 spin_lock_irqsave(&priv->sta_lock, flags);
1817 if (txq->sched_retry) {
1818 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
1819 struct iwl_ht_agg *agg = NULL;
1820 WARN_ON(!qc);
1821
1822 agg = &priv->stations[sta_id].tid[tid].agg;
1823
1824 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
1825
1826 /* check if BAR is needed */
1827 if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
1828 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1829
1830 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1831 index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
1832 txq->q.n_bd);
1833 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
1834 "%d index %d\n", scd_ssn , index);
1835 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1836 if (qc)
1837 iwl4965_free_tfds_in_queue(priv, sta_id,
1838 tid, freed);
1839
1840 if (priv->mac80211_registered &&
1841 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
1842 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1843 iwl_legacy_wake_queue(priv, txq);
1844 }
1845 } else {
1846 info->status.rates[0].count = tx_resp->failure_frame + 1;
1847 info->flags |= iwl4965_tx_status_to_mac80211(status);
1848 iwl4965_hwrate_to_tx_control(priv,
1849 le32_to_cpu(tx_resp->rate_n_flags),
1850 info);
1851
1852 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
1853 "rate_n_flags 0x%x retries %d\n",
1854 txq_id,
1855 iwl4965_get_tx_fail_reason(status), status,
1856 le32_to_cpu(tx_resp->rate_n_flags),
1857 tx_resp->failure_frame);
1858
1859 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1860 if (qc && likely(sta_id != IWL_INVALID_STATION))
1861 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1862 else if (sta_id == IWL_INVALID_STATION)
1863 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
1864
1865 if (priv->mac80211_registered &&
1866 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
1867 iwl_legacy_wake_queue(priv, txq);
1868 }
1869 if (qc && likely(sta_id != IWL_INVALID_STATION))
1870 iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
1871
1872 iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
1873
1874 spin_unlock_irqrestore(&priv->sta_lock, flags);
1875}
1876
1877static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
1878 struct iwl_rx_mem_buffer *rxb)
1879{
1880 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1881 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
1882 u8 rate __maybe_unused =
1883 iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
1884
1885 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
1886 "tsf:0x%.8x%.8x rate:%d\n",
1887 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
1888 beacon->beacon_notify_hdr.failure_frame,
1889 le32_to_cpu(beacon->ibss_mgr_status),
1890 le32_to_cpu(beacon->high_tsf),
1891 le32_to_cpu(beacon->low_tsf), rate);
1892
1893 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
1894}
1895
1896/* Set up 4965-specific Rx frame reply handlers */
1897static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
1898{
1899 /* Legacy Rx frames */
1900 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
1901 /* Tx response */
1902 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
1903 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
1904}
1905
1906static struct iwl_hcmd_ops iwl4965_hcmd = {
1907 .rxon_assoc = iwl4965_send_rxon_assoc,
1908 .commit_rxon = iwl4965_commit_rxon,
1909 .set_rxon_chain = iwl4965_set_rxon_chain,
1910};
1911
1912static void iwl4965_post_scan(struct iwl_priv *priv)
1913{
1914 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1915
1916 /*
1917 * Since setting the RXON may have been deferred while
1918 * performing the scan, fire one off if needed
1919 */
1920 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1921 iwl_legacy_commit_rxon(priv, ctx);
1922}
1923
1924static void iwl4965_post_associate(struct iwl_priv *priv)
1925{
1926 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1927 struct ieee80211_vif *vif = ctx->vif;
1928 struct ieee80211_conf *conf = NULL;
1929 int ret = 0;
1930
1931 if (!vif || !priv->is_open)
1932 return;
1933
1934 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1935 return;
1936
1937 iwl_legacy_scan_cancel_timeout(priv, 200);
1938
1939 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
1940
1941 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1942 iwl_legacy_commit_rxon(priv, ctx);
1943
1944 ret = iwl_legacy_send_rxon_timing(priv, ctx);
1945 if (ret)
1946 IWL_WARN(priv, "RXON timing - "
1947 "Attempting to continue.\n");
1948
1949 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1950
1951 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
1952
1953 if (priv->cfg->ops->hcmd->set_rxon_chain)
1954 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1955
1956 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1957
1958 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
1959 vif->bss_conf.aid, vif->bss_conf.beacon_int);
1960
1961 if (vif->bss_conf.use_short_preamble)
1962 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1963 else
1964 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1965
1966 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
1967 if (vif->bss_conf.use_short_slot)
1968 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1969 else
1970 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1971 }
1972
1973 iwl_legacy_commit_rxon(priv, ctx);
1974
1975 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
1976 vif->bss_conf.aid, ctx->active.bssid_addr);
1977
1978 switch (vif->type) {
1979 case NL80211_IFTYPE_STATION:
1980 break;
1981 case NL80211_IFTYPE_ADHOC:
1982 iwl4965_send_beacon_cmd(priv);
1983 break;
1984 default:
1985 IWL_ERR(priv, "%s Should not be called in %d mode\n",
1986 __func__, vif->type);
1987 break;
1988 }
1989
1990 /* the chain noise calibration will enabled PM upon completion
1991 * If chain noise has already been run, then we need to enable
1992 * power management here */
1993 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1994 iwl_legacy_power_update_mode(priv, false);
1995
1996 /* Enable Rx differential gain and sensitivity calibrations */
1997 iwl4965_chain_noise_reset(priv);
1998 priv->start_calib = 1;
1999}
2000
2001static void iwl4965_config_ap(struct iwl_priv *priv)
2002{
2003 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2004 struct ieee80211_vif *vif = ctx->vif;
2005 int ret = 0;
2006
2007 lockdep_assert_held(&priv->mutex);
2008
2009 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2010 return;
2011
2012 /* The following should be done only at AP bring up */
2013 if (!iwl_legacy_is_associated_ctx(ctx)) {
2014
2015 /* RXON - unassoc (to set timing command) */
2016 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2017 iwl_legacy_commit_rxon(priv, ctx);
2018
2019 /* RXON Timing */
2020 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2021 if (ret)
2022 IWL_WARN(priv, "RXON timing failed - "
2023 "Attempting to continue.\n");
2024
2025 /* AP has all antennas */
2026 priv->chain_noise_data.active_chains =
2027 priv->hw_params.valid_rx_ant;
2028 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2029 if (priv->cfg->ops->hcmd->set_rxon_chain)
2030 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2031
2032 ctx->staging.assoc_id = 0;
2033
2034 if (vif->bss_conf.use_short_preamble)
2035 ctx->staging.flags |=
2036 RXON_FLG_SHORT_PREAMBLE_MSK;
2037 else
2038 ctx->staging.flags &=
2039 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2040
2041 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2042 if (vif->bss_conf.use_short_slot)
2043 ctx->staging.flags |=
2044 RXON_FLG_SHORT_SLOT_MSK;
2045 else
2046 ctx->staging.flags &=
2047 ~RXON_FLG_SHORT_SLOT_MSK;
2048 }
2049 /* need to send beacon cmd before committing assoc RXON! */
2050 iwl4965_send_beacon_cmd(priv);
2051 /* restore RXON assoc */
2052 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2053 iwl_legacy_commit_rxon(priv, ctx);
2054 }
2055 iwl4965_send_beacon_cmd(priv);
2056}
2057
2058static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2059 .get_hcmd_size = iwl4965_get_hcmd_size,
2060 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2061 .request_scan = iwl4965_request_scan,
2062 .post_scan = iwl4965_post_scan,
2063};
2064
2065static struct iwl_lib_ops iwl4965_lib = {
2066 .set_hw_params = iwl4965_hw_set_hw_params,
2067 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2068 .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
2069 .txq_free_tfd = iwl4965_hw_txq_free_tfd,
2070 .txq_init = iwl4965_hw_tx_queue_init,
2071 .rx_handler_setup = iwl4965_rx_handler_setup,
2072 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2073 .init_alive_start = iwl4965_init_alive_start,
2074 .load_ucode = iwl4965_load_bsm,
2075 .dump_nic_error_log = iwl4965_dump_nic_error_log,
2076 .dump_fh = iwl4965_dump_fh,
2077 .set_channel_switch = iwl4965_hw_channel_switch,
2078 .apm_ops = {
2079 .init = iwl_legacy_apm_init,
2080 .config = iwl4965_nic_config,
2081 },
2082 .eeprom_ops = {
2083 .regulatory_bands = {
2084 EEPROM_REGULATORY_BAND_1_CHANNELS,
2085 EEPROM_REGULATORY_BAND_2_CHANNELS,
2086 EEPROM_REGULATORY_BAND_3_CHANNELS,
2087 EEPROM_REGULATORY_BAND_4_CHANNELS,
2088 EEPROM_REGULATORY_BAND_5_CHANNELS,
2089 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2090 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2091 },
2092 .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
2093 .release_semaphore = iwl4965_eeprom_release_semaphore,
2094 },
2095 .send_tx_power = iwl4965_send_tx_power,
2096 .update_chain_flags = iwl4965_update_chain_flags,
2097 .temp_ops = {
2098 .temperature = iwl4965_temperature_calib,
2099 },
2100 .debugfs_ops = {
2101 .rx_stats_read = iwl4965_ucode_rx_stats_read,
2102 .tx_stats_read = iwl4965_ucode_tx_stats_read,
2103 .general_stats_read = iwl4965_ucode_general_stats_read,
2104 },
2105};
2106
2107static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2108 .post_associate = iwl4965_post_associate,
2109 .config_ap = iwl4965_config_ap,
2110 .manage_ibss_station = iwl4965_manage_ibss_station,
2111 .update_bcast_stations = iwl4965_update_bcast_stations,
2112};
2113
2114struct ieee80211_ops iwl4965_hw_ops = {
2115 .tx = iwl4965_mac_tx,
2116 .start = iwl4965_mac_start,
2117 .stop = iwl4965_mac_stop,
2118 .add_interface = iwl_legacy_mac_add_interface,
2119 .remove_interface = iwl_legacy_mac_remove_interface,
2120 .change_interface = iwl_legacy_mac_change_interface,
2121 .config = iwl_legacy_mac_config,
2122 .configure_filter = iwl4965_configure_filter,
2123 .set_key = iwl4965_mac_set_key,
2124 .update_tkip_key = iwl4965_mac_update_tkip_key,
2125 .conf_tx = iwl_legacy_mac_conf_tx,
2126 .reset_tsf = iwl_legacy_mac_reset_tsf,
2127 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2128 .ampdu_action = iwl4965_mac_ampdu_action,
2129 .hw_scan = iwl_legacy_mac_hw_scan,
2130 .sta_add = iwl4965_mac_sta_add,
2131 .sta_remove = iwl_legacy_mac_sta_remove,
2132 .channel_switch = iwl4965_mac_channel_switch,
2133 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
2134};
2135
2136static const struct iwl_ops iwl4965_ops = {
2137 .lib = &iwl4965_lib,
2138 .hcmd = &iwl4965_hcmd,
2139 .utils = &iwl4965_hcmd_utils,
2140 .led = &iwl4965_led_ops,
2141 .legacy = &iwl4965_legacy_ops,
2142 .ieee80211_ops = &iwl4965_hw_ops,
2143};
2144
2145static struct iwl_base_params iwl4965_base_params = {
2146 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
2147 .num_of_queues = IWL49_NUM_QUEUES,
2148 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2149 .pll_cfg_val = 0,
2150 .set_l0s = true,
2151 .use_bsm = true,
2152 .led_compensation = 61,
2153 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2154 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2155 .temperature_kelvin = true,
2156 .ucode_tracing = true,
2157 .sensitivity_calib_by_driver = true,
2158 .chain_noise_calib_by_driver = true,
2159};
2160
2161struct iwl_cfg iwl4965_cfg = {
2162 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2163 .fw_name_pre = IWL4965_FW_PRE,
2164 .ucode_api_max = IWL4965_UCODE_API_MAX,
2165 .ucode_api_min = IWL4965_UCODE_API_MIN,
2166 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2167 .valid_tx_ant = ANT_AB,
2168 .valid_rx_ant = ANT_ABC,
2169 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2170 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2171 .ops = &iwl4965_ops,
2172 .mod_params = &iwl4965_mod_params,
2173 .base_params = &iwl4965_base_params,
2174 .led_mode = IWL_LED_BLINK,
2175 /*
2176 * Force use of chains B and C for scan RX on 5 GHz band
2177 * because the device has off-channel reception on chain A.
2178 */
2179 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2180};
2181
2182/* Module firmware */
2183MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644
index 01f8163daf16..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.h
+++ /dev/null
@@ -1,282 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_4965_h__
64#define __iwl_4965_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _4965 devices */
69extern struct iwl_cfg iwl4965_cfg;
70
71extern struct iwl_mod_params iwl4965_mod_params;
72
73extern struct ieee80211_ops iwl4965_hw_ops;
74
75/* tx queue */
76void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
77 int sta_id, int tid, int freed);
78
79/* RXON */
80void iwl4965_set_rxon_chain(struct iwl_priv *priv,
81 struct iwl_rxon_context *ctx);
82
83/* uCode */
84int iwl4965_verify_ucode(struct iwl_priv *priv);
85
86/* lib */
87void iwl4965_check_abort_status(struct iwl_priv *priv,
88 u8 frame_count, u32 status);
89
90void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
91int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
92int iwl4965_hw_nic_init(struct iwl_priv *priv);
93int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
94
95/* rx */
96void iwl4965_rx_queue_restock(struct iwl_priv *priv);
97void iwl4965_rx_replenish(struct iwl_priv *priv);
98void iwl4965_rx_replenish_now(struct iwl_priv *priv);
99void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
100int iwl4965_rxq_stop(struct iwl_priv *priv);
101int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
102void iwl4965_rx_reply_rx(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
105 struct iwl_rx_mem_buffer *rxb);
106void iwl4965_rx_handle(struct iwl_priv *priv);
107
108/* tx */
109void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
110int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq,
112 dma_addr_t addr, u16 len, u8 reset, u8 pad);
113int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq);
115void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
116 struct ieee80211_tx_info *info);
117int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
118int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
119 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
120int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
121 struct ieee80211_sta *sta, u16 tid);
122int iwl4965_txq_check_empty(struct iwl_priv *priv,
123 int sta_id, u8 tid, int txq_id);
124void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
125 struct iwl_rx_mem_buffer *rxb);
126int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
127void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
128int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
129void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
130void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
131void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
132
133/*
134 * Acquire priv->lock before calling this function !
135 */
136void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
137/**
138 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
139 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
140 * @scd_retry: (1) Indicates queue will be used in aggregation mode
141 *
142 * NOTE: Acquire priv->lock before calling this function !
143 */
144void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
145 struct iwl_tx_queue *txq,
146 int tx_fifo_id, int scd_retry);
147
148static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
149{
150 status &= TX_STATUS_MSK;
151
152 switch (status) {
153 case TX_STATUS_SUCCESS:
154 case TX_STATUS_DIRECT_DONE:
155 return IEEE80211_TX_STAT_ACK;
156 case TX_STATUS_FAIL_DEST_PS:
157 return IEEE80211_TX_STAT_TX_FILTERED;
158 default:
159 return 0;
160 }
161}
162
163static inline bool iwl4965_is_tx_success(u32 status)
164{
165 status &= TX_STATUS_MSK;
166 return (status == TX_STATUS_SUCCESS) ||
167 (status == TX_STATUS_DIRECT_DONE);
168}
169
170u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
171
172/* rx */
173void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb);
175bool iwl4965_good_plcp_health(struct iwl_priv *priv,
176 struct iwl_rx_packet *pkt);
177void iwl4965_rx_statistics(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb);
179void iwl4965_reply_statistics(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb);
181
182/* scan */
183int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
184
185/* station mgmt */
186int iwl4965_manage_ibss_station(struct iwl_priv *priv,
187 struct ieee80211_vif *vif, bool add);
188
189/* hcmd */
190int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
191
192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
193const char *iwl4965_get_tx_fail_reason(u32 status);
194#else
195static inline const char *
196iwl4965_get_tx_fail_reason(u32 status) { return ""; }
197#endif
198
199/* station management */
200int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
201 struct iwl_rxon_context *ctx);
202int iwl4965_add_bssid_station(struct iwl_priv *priv,
203 struct iwl_rxon_context *ctx,
204 const u8 *addr, u8 *sta_id_r);
205int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *key);
208int iwl4965_set_default_wep_key(struct iwl_priv *priv,
209 struct iwl_rxon_context *ctx,
210 struct ieee80211_key_conf *key);
211int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
212 struct iwl_rxon_context *ctx);
213int iwl4965_set_dynamic_key(struct iwl_priv *priv,
214 struct iwl_rxon_context *ctx,
215 struct ieee80211_key_conf *key, u8 sta_id);
216int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
217 struct iwl_rxon_context *ctx,
218 struct ieee80211_key_conf *key, u8 sta_id);
219void iwl4965_update_tkip_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf,
222 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
223int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
224 int sta_id, int tid);
225int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
226 int tid, u16 ssn);
227int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
228 int tid);
229void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
230 int sta_id, int cnt);
231int iwl4965_update_bcast_stations(struct iwl_priv *priv);
232
233/* rate */
234static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
235{
236 return BIT(ant_idx) << RATE_MCS_ANT_POS;
237}
238
239static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
240{
241 return le32_to_cpu(rate_n_flags) & 0xFF;
242}
243
244static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
245{
246 return cpu_to_le32(flags|(u32)rate);
247}
248
249/* eeprom */
250void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
251int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
252void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
253int iwl4965_eeprom_check_version(struct iwl_priv *priv);
254
255/* mac80211 handlers (for 4965) */
256void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
257int iwl4965_mac_start(struct ieee80211_hw *hw);
258void iwl4965_mac_stop(struct ieee80211_hw *hw);
259void iwl4965_configure_filter(struct ieee80211_hw *hw,
260 unsigned int changed_flags,
261 unsigned int *total_flags,
262 u64 multicast);
263int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
264 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
265 struct ieee80211_key_conf *key);
266void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
267 struct ieee80211_vif *vif,
268 struct ieee80211_key_conf *keyconf,
269 struct ieee80211_sta *sta,
270 u32 iv32, u16 *phase1key);
271int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
272 struct ieee80211_vif *vif,
273 enum ieee80211_ampdu_mlme_action action,
274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
275 u8 buf_size);
276int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta);
279void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
280 struct ieee80211_channel_switch *ch_switch);
281
282#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644
index 2bd5659310d7..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ /dev/null
@@ -1,2661 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44
45
46MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70
71u32 iwlegacy_debug_level;
72EXPORT_SYMBOL(iwlegacy_debug_level);
73
74const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75EXPORT_SYMBOL(iwlegacy_bcast_addr);
76
77
78/* This function both allocates and initializes hw and priv. */
79struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
80{
81 struct iwl_priv *priv;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw *hw;
85
86 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
87 cfg->ops->ieee80211_ops);
88 if (hw == NULL) {
89 pr_err("%s: Can not allocate network device\n",
90 cfg->name);
91 goto out;
92 }
93
94 priv = hw->priv;
95 priv->hw = hw;
96
97out:
98 return hw;
99}
100EXPORT_SYMBOL(iwl_legacy_alloc_all);
101
102#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
105 struct ieee80211_sta_ht_cap *ht_info,
106 enum ieee80211_band band)
107{
108 u16 max_bit_rate = 0;
109 u8 rx_chains_num = priv->hw_params.rx_chains_num;
110 u8 tx_chains_num = priv->hw_params.tx_chains_num;
111
112 ht_info->cap = 0;
113 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
114
115 ht_info->ht_supported = true;
116
117 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
118 max_bit_rate = MAX_BIT_RATE_20_MHZ;
119 if (priv->hw_params.ht40_channel & BIT(band)) {
120 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
121 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
122 ht_info->mcs.rx_mask[4] = 0x01;
123 max_bit_rate = MAX_BIT_RATE_40_MHZ;
124 }
125
126 if (priv->cfg->mod_params->amsdu_size_8K)
127 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
128
129 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
130 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
131
132 ht_info->mcs.rx_mask[0] = 0xFF;
133 if (rx_chains_num >= 2)
134 ht_info->mcs.rx_mask[1] = 0xFF;
135 if (rx_chains_num >= 3)
136 ht_info->mcs.rx_mask[2] = 0xFF;
137
138 /* Highest supported Rx data rate */
139 max_bit_rate *= rx_chains_num;
140 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
141 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
142
143 /* Tx MCS capabilities */
144 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
145 if (tx_chains_num != rx_chains_num) {
146 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
147 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
149 }
150}
151
152/**
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
154 */
155int iwl_legacy_init_geos(struct iwl_priv *priv)
156{
157 struct iwl_channel_info *ch;
158 struct ieee80211_supported_band *sband;
159 struct ieee80211_channel *channels;
160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates;
162 int i = 0;
163 s8 max_tx_power = 0;
164
165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
167 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
168 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
169 return 0;
170 }
171
172 channels = kzalloc(sizeof(struct ieee80211_channel) *
173 priv->channel_count, GFP_KERNEL);
174 if (!channels)
175 return -ENOMEM;
176
177 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
178 GFP_KERNEL);
179 if (!rates) {
180 kfree(channels);
181 return -ENOMEM;
182 }
183
184 /* 5.2GHz channels start after the 2.4GHz channels */
185 sband = &priv->bands[IEEE80211_BAND_5GHZ];
186 sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
187 /* just OFDM */
188 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
189 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
190
191 if (priv->cfg->sku & IWL_SKU_N)
192 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
193 IEEE80211_BAND_5GHZ);
194
195 sband = &priv->bands[IEEE80211_BAND_2GHZ];
196 sband->channels = channels;
197 /* OFDM & CCK */
198 sband->bitrates = rates;
199 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
200
201 if (priv->cfg->sku & IWL_SKU_N)
202 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
203 IEEE80211_BAND_2GHZ);
204
205 priv->ieee_channels = channels;
206 priv->ieee_rates = rates;
207
208 for (i = 0; i < priv->channel_count; i++) {
209 ch = &priv->channel_info[i];
210
211 if (!iwl_legacy_is_channel_valid(ch))
212 continue;
213
214 sband = &priv->bands[ch->band];
215
216 geo_ch = &sband->channels[sband->n_channels++];
217
218 geo_ch->center_freq =
219 ieee80211_channel_to_frequency(ch->channel, ch->band);
220 geo_ch->max_power = ch->max_power_avg;
221 geo_ch->max_antenna_gain = 0xff;
222 geo_ch->hw_value = ch->channel;
223
224 if (iwl_legacy_is_channel_valid(ch)) {
225 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
226 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
227
228 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
229 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
230
231 if (ch->flags & EEPROM_CHANNEL_RADAR)
232 geo_ch->flags |= IEEE80211_CHAN_RADAR;
233
234 geo_ch->flags |= ch->ht40_extension_channel;
235
236 if (ch->max_power_avg > max_tx_power)
237 max_tx_power = ch->max_power_avg;
238 } else {
239 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
240 }
241
242 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
243 ch->channel, geo_ch->center_freq,
244 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
245 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
246 "restricted" : "valid",
247 geo_ch->flags);
248 }
249
250 priv->tx_power_device_lmt = max_tx_power;
251 priv->tx_power_user_lmt = max_tx_power;
252 priv->tx_power_next = max_tx_power;
253
254 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
255 priv->cfg->sku & IWL_SKU_A) {
256 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
257 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
258 priv->pci_dev->device,
259 priv->pci_dev->subsystem_device);
260 priv->cfg->sku &= ~IWL_SKU_A;
261 }
262
263 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
264 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
265 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
266
267 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
268
269 return 0;
270}
271EXPORT_SYMBOL(iwl_legacy_init_geos);
272
273/*
274 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
275 */
276void iwl_legacy_free_geos(struct iwl_priv *priv)
277{
278 kfree(priv->ieee_channels);
279 kfree(priv->ieee_rates);
280 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
281}
282EXPORT_SYMBOL(iwl_legacy_free_geos);
283
284static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
285 enum ieee80211_band band,
286 u16 channel, u8 extension_chan_offset)
287{
288 const struct iwl_channel_info *ch_info;
289
290 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
291 if (!iwl_legacy_is_channel_valid(ch_info))
292 return false;
293
294 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
295 return !(ch_info->ht40_extension_channel &
296 IEEE80211_CHAN_NO_HT40PLUS);
297 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
298 return !(ch_info->ht40_extension_channel &
299 IEEE80211_CHAN_NO_HT40MINUS);
300
301 return false;
302}
303
304bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_sta_ht_cap *ht_cap)
307{
308 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
309 return false;
310
311 /*
312 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
313 * the bit will not set if it is pure 40MHz case
314 */
315 if (ht_cap && !ht_cap->ht_supported)
316 return false;
317
318#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
319 if (priv->disable_ht40)
320 return false;
321#endif
322
323 return iwl_legacy_is_channel_extension(priv, priv->band,
324 le16_to_cpu(ctx->staging.channel),
325 ctx->ht.extension_chan_offset);
326}
327EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
328
329static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
330{
331 u16 new_val;
332 u16 beacon_factor;
333
334 /*
335 * If mac80211 hasn't given us a beacon interval, program
336 * the default into the device.
337 */
338 if (!beacon_val)
339 return DEFAULT_BEACON_INTERVAL;
340
341 /*
342 * If the beacon interval we obtained from the peer
343 * is too large, we'll have to wake up more often
344 * (and in IBSS case, we'll beacon too much)
345 *
346 * For example, if max_beacon_val is 4096, and the
347 * requested beacon interval is 7000, we'll have to
348 * use 3500 to be able to wake up on the beacons.
349 *
350 * This could badly influence beacon detection stats.
351 */
352
353 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
354 new_val = beacon_val / beacon_factor;
355
356 if (!new_val)
357 new_val = max_beacon_val;
358
359 return new_val;
360}
361
362int
363iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
364{
365 u64 tsf;
366 s32 interval_tm, rem;
367 struct ieee80211_conf *conf = NULL;
368 u16 beacon_int;
369 struct ieee80211_vif *vif = ctx->vif;
370
371 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
372
373 lockdep_assert_held(&priv->mutex);
374
375 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
376
377 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
378 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
379
380 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
381
382 /*
383 * TODO: For IBSS we need to get atim_window from mac80211,
384 * for now just always use 0
385 */
386 ctx->timing.atim_window = 0;
387
388 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
389 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
390 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
391
392 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
393 interval_tm = beacon_int * TIME_UNIT;
394 rem = do_div(tsf, interval_tm);
395 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
396
397 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
398
399 IWL_DEBUG_ASSOC(priv,
400 "beacon interval %d beacon timer %d beacon tim %d\n",
401 le16_to_cpu(ctx->timing.beacon_interval),
402 le32_to_cpu(ctx->timing.beacon_init_val),
403 le16_to_cpu(ctx->timing.atim_window));
404
405 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
406 sizeof(ctx->timing), &ctx->timing);
407}
408EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
409
410void
411iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
412 struct iwl_rxon_context *ctx,
413 int hw_decrypt)
414{
415 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
416
417 if (hw_decrypt)
418 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
419 else
420 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
421
422}
423EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
424
425/* validate RXON structure is valid */
426int
427iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
428{
429 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
430 bool error = false;
431
432 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
433 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
434 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
435 error = true;
436 }
437 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
438 IWL_WARN(priv, "check 2.4G: wrong radar\n");
439 error = true;
440 }
441 } else {
442 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
443 IWL_WARN(priv, "check 5.2G: not short slot!\n");
444 error = true;
445 }
446 if (rxon->flags & RXON_FLG_CCK_MSK) {
447 IWL_WARN(priv, "check 5.2G: CCK!\n");
448 error = true;
449 }
450 }
451 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
452 IWL_WARN(priv, "mac/bssid mcast!\n");
453 error = true;
454 }
455
456 /* make sure basic rates 6Mbps and 1Mbps are supported */
457 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
458 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
459 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
460 error = true;
461 }
462
463 if (le16_to_cpu(rxon->assoc_id) > 2007) {
464 IWL_WARN(priv, "aid > 2007\n");
465 error = true;
466 }
467
468 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
469 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
470 IWL_WARN(priv, "CCK and short slot\n");
471 error = true;
472 }
473
474 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
475 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
476 IWL_WARN(priv, "CCK and auto detect");
477 error = true;
478 }
479
480 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
481 RXON_FLG_TGG_PROTECT_MSK)) ==
482 RXON_FLG_TGG_PROTECT_MSK) {
483 IWL_WARN(priv, "TGg but no auto-detect\n");
484 error = true;
485 }
486
487 if (error)
488 IWL_WARN(priv, "Tuning to channel %d\n",
489 le16_to_cpu(rxon->channel));
490
491 if (error) {
492 IWL_ERR(priv, "Invalid RXON\n");
493 return -EINVAL;
494 }
495 return 0;
496}
497EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
498
499/**
500 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
501 * @priv: staging_rxon is compared to active_rxon
502 *
503 * If the RXON structure is changing enough to require a new tune,
504 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
505 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
506 */
507int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
508 struct iwl_rxon_context *ctx)
509{
510 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
511 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
512
513#define CHK(cond) \
514 if ((cond)) { \
515 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
516 return 1; \
517 }
518
519#define CHK_NEQ(c1, c2) \
520 if ((c1) != (c2)) { \
521 IWL_DEBUG_INFO(priv, "need full RXON - " \
522 #c1 " != " #c2 " - %d != %d\n", \
523 (c1), (c2)); \
524 return 1; \
525 }
526
527 /* These items are only settable from the full RXON command */
528 CHK(!iwl_legacy_is_associated_ctx(ctx));
529 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
530 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
531 CHK(compare_ether_addr(staging->wlap_bssid_addr,
532 active->wlap_bssid_addr));
533 CHK_NEQ(staging->dev_type, active->dev_type);
534 CHK_NEQ(staging->channel, active->channel);
535 CHK_NEQ(staging->air_propagation, active->air_propagation);
536 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
537 active->ofdm_ht_single_stream_basic_rates);
538 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
539 active->ofdm_ht_dual_stream_basic_rates);
540 CHK_NEQ(staging->assoc_id, active->assoc_id);
541
542 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
543 * be updated with the RXON_ASSOC command -- however only some
544 * flag transitions are allowed using RXON_ASSOC */
545
546 /* Check if we are not switching bands */
547 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
548 active->flags & RXON_FLG_BAND_24G_MSK);
549
550 /* Check if we are switching association toggle */
551 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
552 active->filter_flags & RXON_FILTER_ASSOC_MSK);
553
554#undef CHK
555#undef CHK_NEQ
556
557 return 0;
558}
559EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
560
561u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
562 struct iwl_rxon_context *ctx)
563{
564 /*
565 * Assign the lowest rate -- should really get this from
566 * the beacon skb from mac80211.
567 */
568 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
569 return IWL_RATE_1M_PLCP;
570 else
571 return IWL_RATE_6M_PLCP;
572}
573EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
574
575static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
576 struct iwl_ht_config *ht_conf,
577 struct iwl_rxon_context *ctx)
578{
579 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
580
581 if (!ctx->ht.enabled) {
582 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
583 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
584 RXON_FLG_HT40_PROT_MSK |
585 RXON_FLG_HT_PROT_MSK);
586 return;
587 }
588
589 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
590 RXON_FLG_HT_OPERATING_MODE_POS);
591
592 /* Set up channel bandwidth:
593 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
594 /* clear the HT channel mode before set the mode */
595 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
596 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
597 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
598 /* pure ht40 */
599 if (ctx->ht.protection ==
600 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
601 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
602 /* Note: control channel is opposite of extension channel */
603 switch (ctx->ht.extension_chan_offset) {
604 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
605 rxon->flags &=
606 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
607 break;
608 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
609 rxon->flags |=
610 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
611 break;
612 }
613 } else {
614 /* Note: control channel is opposite of extension channel */
615 switch (ctx->ht.extension_chan_offset) {
616 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
617 rxon->flags &=
618 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
619 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
620 break;
621 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
622 rxon->flags |=
623 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
624 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
625 break;
626 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
627 default:
628 /* channel location only valid if in Mixed mode */
629 IWL_ERR(priv,
630 "invalid extension channel offset\n");
631 break;
632 }
633 }
634 } else {
635 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
636 }
637
638 if (priv->cfg->ops->hcmd->set_rxon_chain)
639 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
640
641 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
642 "extension channel offset 0x%x\n",
643 le32_to_cpu(rxon->flags), ctx->ht.protection,
644 ctx->ht.extension_chan_offset);
645}
646
647void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
648{
649 struct iwl_rxon_context *ctx;
650
651 for_each_context(priv, ctx)
652 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
653}
654EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
655
656/* Return valid, unused, channel for a passive scan to reset the RF */
657u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
658 enum ieee80211_band band)
659{
660 const struct iwl_channel_info *ch_info;
661 int i;
662 u8 channel = 0;
663 u8 min, max;
664 struct iwl_rxon_context *ctx;
665
666 if (band == IEEE80211_BAND_5GHZ) {
667 min = 14;
668 max = priv->channel_count;
669 } else {
670 min = 0;
671 max = 14;
672 }
673
674 for (i = min; i < max; i++) {
675 bool busy = false;
676
677 for_each_context(priv, ctx) {
678 busy = priv->channel_info[i].channel ==
679 le16_to_cpu(ctx->staging.channel);
680 if (busy)
681 break;
682 }
683
684 if (busy)
685 continue;
686
687 channel = priv->channel_info[i].channel;
688 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
689 if (iwl_legacy_is_channel_valid(ch_info))
690 break;
691 }
692
693 return channel;
694}
695EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
696
697/**
698 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
699 * @ch: requested channel as a pointer to struct ieee80211_channel
700
701 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
702 * in the staging RXON flag structure based on the ch->band
703 */
704int
705iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
706 struct iwl_rxon_context *ctx)
707{
708 enum ieee80211_band band = ch->band;
709 u16 channel = ch->hw_value;
710
711 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
712 (priv->band == band))
713 return 0;
714
715 ctx->staging.channel = cpu_to_le16(channel);
716 if (band == IEEE80211_BAND_5GHZ)
717 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
718 else
719 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
720
721 priv->band = band;
722
723 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
724
725 return 0;
726}
727EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
728
729void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
730 struct iwl_rxon_context *ctx,
731 enum ieee80211_band band,
732 struct ieee80211_vif *vif)
733{
734 if (band == IEEE80211_BAND_5GHZ) {
735 ctx->staging.flags &=
736 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
737 | RXON_FLG_CCK_MSK);
738 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
739 } else {
740 /* Copied from iwl_post_associate() */
741 if (vif && vif->bss_conf.use_short_slot)
742 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
743 else
744 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
745
746 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
747 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
748 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
749 }
750}
751EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
752
753/*
754 * initialize rxon structure with default values from eeprom
755 */
756void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
757 struct iwl_rxon_context *ctx)
758{
759 const struct iwl_channel_info *ch_info;
760
761 memset(&ctx->staging, 0, sizeof(ctx->staging));
762
763 if (!ctx->vif) {
764 ctx->staging.dev_type = ctx->unused_devtype;
765 } else
766 switch (ctx->vif->type) {
767
768 case NL80211_IFTYPE_STATION:
769 ctx->staging.dev_type = ctx->station_devtype;
770 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
771 break;
772
773 case NL80211_IFTYPE_ADHOC:
774 ctx->staging.dev_type = ctx->ibss_devtype;
775 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
776 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
777 RXON_FILTER_ACCEPT_GRP_MSK;
778 break;
779
780 default:
781 IWL_ERR(priv, "Unsupported interface type %d\n",
782 ctx->vif->type);
783 break;
784 }
785
786#if 0
787 /* TODO: Figure out when short_preamble would be set and cache from
788 * that */
789 if (!hw_to_local(priv->hw)->short_preamble)
790 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
791 else
792 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
793#endif
794
795 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
796 le16_to_cpu(ctx->active.channel));
797
798 if (!ch_info)
799 ch_info = &priv->channel_info[0];
800
801 ctx->staging.channel = cpu_to_le16(ch_info->channel);
802 priv->band = ch_info->band;
803
804 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
805
806 ctx->staging.ofdm_basic_rates =
807 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
808 ctx->staging.cck_basic_rates =
809 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
810
811 /* clear both MIX and PURE40 mode flag */
812 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
813 RXON_FLG_CHANNEL_MODE_PURE_40);
814 if (ctx->vif)
815 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
816
817 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
818 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
819}
820EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
821
822void iwl_legacy_set_rate(struct iwl_priv *priv)
823{
824 const struct ieee80211_supported_band *hw = NULL;
825 struct ieee80211_rate *rate;
826 struct iwl_rxon_context *ctx;
827 int i;
828
829 hw = iwl_get_hw_mode(priv, priv->band);
830 if (!hw) {
831 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
832 return;
833 }
834
835 priv->active_rate = 0;
836
837 for (i = 0; i < hw->n_bitrates; i++) {
838 rate = &(hw->bitrates[i]);
839 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
840 priv->active_rate |= (1 << rate->hw_value);
841 }
842
843 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
844
845 for_each_context(priv, ctx) {
846 ctx->staging.cck_basic_rates =
847 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
848
849 ctx->staging.ofdm_basic_rates =
850 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
851 }
852}
853EXPORT_SYMBOL(iwl_legacy_set_rate);
854
855void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
856{
857 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
858
859 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
860 return;
861
862 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
863 ieee80211_chswitch_done(ctx->vif, is_success);
864}
865EXPORT_SYMBOL(iwl_legacy_chswitch_done);
866
867void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
868{
869 struct iwl_rx_packet *pkt = rxb_addr(rxb);
870 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
871
872 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
873 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
874
875 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
876 return;
877
878 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
879 rxon->channel = csa->channel;
880 ctx->staging.channel = csa->channel;
881 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
882 le16_to_cpu(csa->channel));
883 iwl_legacy_chswitch_done(priv, true);
884 } else {
885 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
886 le16_to_cpu(csa->channel));
887 iwl_legacy_chswitch_done(priv, false);
888 }
889}
890EXPORT_SYMBOL(iwl_legacy_rx_csa);
891
892#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
893void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
894 struct iwl_rxon_context *ctx)
895{
896 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
897
898 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
899 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
900 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
901 le16_to_cpu(rxon->channel));
902 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
903 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
904 le32_to_cpu(rxon->filter_flags));
905 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
906 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
907 rxon->ofdm_basic_rates);
908 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
909 rxon->cck_basic_rates);
910 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
911 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
912 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
913 le16_to_cpu(rxon->assoc_id));
914}
915EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
916#endif
917/**
918 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
919 */
920void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
921{
922 /* Set the FW error flag -- cleared on iwl_down */
923 set_bit(STATUS_FW_ERROR, &priv->status);
924
925 /* Cancel currently queued command. */
926 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
927
928 IWL_ERR(priv, "Loaded firmware version: %s\n",
929 priv->hw->wiphy->fw_version);
930
931 priv->cfg->ops->lib->dump_nic_error_log(priv);
932 if (priv->cfg->ops->lib->dump_fh)
933 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
934#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
935 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
936 iwl_legacy_print_rx_config_cmd(priv,
937 &priv->contexts[IWL_RXON_CTX_BSS]);
938#endif
939
940 wake_up(&priv->wait_command_queue);
941
942 /* Keep the restart process from trying to send host
943 * commands by clearing the INIT status bit */
944 clear_bit(STATUS_READY, &priv->status);
945
946 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
947 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
948 "Restarting adapter due to uCode error.\n");
949
950 if (priv->cfg->mod_params->restart_fw)
951 queue_work(priv->workqueue, &priv->restart);
952 }
953}
954EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
955
956static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
957{
958 int ret = 0;
959
960 /* stop device's busmaster DMA activity */
961 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
962
963 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
964 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
965 if (ret)
966 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
967
968 IWL_DEBUG_INFO(priv, "stop master\n");
969
970 return ret;
971}
972
973void iwl_legacy_apm_stop(struct iwl_priv *priv)
974{
975 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
976
977 /* Stop device's DMA activity */
978 iwl_legacy_apm_stop_master(priv);
979
980 /* Reset the entire device */
981 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
982
983 udelay(10);
984
985 /*
986 * Clear "initialization complete" bit to move adapter from
987 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
988 */
989 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
990 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
991}
992EXPORT_SYMBOL(iwl_legacy_apm_stop);
993
994
995/*
996 * Start up NIC's basic functionality after it has been reset
997 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
998 * NOTE: This does not load uCode nor start the embedded processor
999 */
1000int iwl_legacy_apm_init(struct iwl_priv *priv)
1001{
1002 int ret = 0;
1003 u16 lctl;
1004
1005 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1006
1007 /*
1008 * Use "set_bit" below rather than "write", to preserve any hardware
1009 * bits already set by default after reset.
1010 */
1011
1012 /* Disable L0S exit timer (platform NMI Work/Around) */
1013 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1014 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1015
1016 /*
1017 * Disable L0s without affecting L1;
1018 * don't wait for ICH L0s (ICH bug W/A)
1019 */
1020 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1021 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1022
1023 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1024 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1025 CSR_DBG_HPET_MEM_REG_VAL);
1026
1027 /*
1028 * Enable HAP INTA (interrupt from management bus) to
1029 * wake device's PCI Express link L1a -> L0s
1030 * NOTE: This is no-op for 3945 (non-existent bit)
1031 */
1032 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1033 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1034
1035 /*
1036 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1037 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1038 * If so (likely), disable L0S, so device moves directly L0->L1;
1039 * costs negligible amount of power savings.
1040 * If not (unlikely), enable L0S, so there is at least some
1041 * power savings, even without L1.
1042 */
1043 if (priv->cfg->base_params->set_l0s) {
1044 lctl = iwl_legacy_pcie_link_ctl(priv);
1045 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1046 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1047 /* L1-ASPM enabled; disable(!) L0S */
1048 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1049 CSR_GIO_REG_VAL_L0S_ENABLED);
1050 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1051 } else {
1052 /* L1-ASPM disabled; enable(!) L0S */
1053 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1054 CSR_GIO_REG_VAL_L0S_ENABLED);
1055 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1056 }
1057 }
1058
1059 /* Configure analog phase-lock-loop before activating to D0A */
1060 if (priv->cfg->base_params->pll_cfg_val)
1061 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1062 priv->cfg->base_params->pll_cfg_val);
1063
1064 /*
1065 * Set "initialization complete" bit to move adapter from
1066 * D0U* --> D0A* (powered-up active) state.
1067 */
1068 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1069
1070 /*
1071 * Wait for clock stabilization; once stabilized, access to
1072 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1073 * and accesses to uCode SRAM.
1074 */
1075 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1076 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1077 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1078 if (ret < 0) {
1079 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1080 goto out;
1081 }
1082
1083 /*
1084 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1085 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1086 *
1087 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1088 * do not disable clocks. This preserves any hardware bits already
1089 * set by default in "CLK_CTRL_REG" after reset.
1090 */
1091 if (priv->cfg->base_params->use_bsm)
1092 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1093 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1094 else
1095 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1096 APMG_CLK_VAL_DMA_CLK_RQT);
1097 udelay(20);
1098
1099 /* Disable L1-Active */
1100 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1101 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1102
1103out:
1104 return ret;
1105}
1106EXPORT_SYMBOL(iwl_legacy_apm_init);
1107
1108
1109int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1110{
1111 int ret;
1112 s8 prev_tx_power;
1113 bool defer;
1114 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1115
1116 lockdep_assert_held(&priv->mutex);
1117
1118 if (priv->tx_power_user_lmt == tx_power && !force)
1119 return 0;
1120
1121 if (!priv->cfg->ops->lib->send_tx_power)
1122 return -EOPNOTSUPP;
1123
1124 /* 0 dBm mean 1 milliwatt */
1125 if (tx_power < 0) {
1126 IWL_WARN(priv,
1127 "Requested user TXPOWER %d below 1 mW.\n",
1128 tx_power);
1129 return -EINVAL;
1130 }
1131
1132 if (tx_power > priv->tx_power_device_lmt) {
1133 IWL_WARN(priv,
1134 "Requested user TXPOWER %d above upper limit %d.\n",
1135 tx_power, priv->tx_power_device_lmt);
1136 return -EINVAL;
1137 }
1138
1139 if (!iwl_legacy_is_ready_rf(priv))
1140 return -EIO;
1141
1142 /* scan complete and commit_rxon use tx_power_next value,
1143 * it always need to be updated for newest request */
1144 priv->tx_power_next = tx_power;
1145
1146 /* do not set tx power when scanning or channel changing */
1147 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1148 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1149 if (defer && !force) {
1150 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1151 return 0;
1152 }
1153
1154 prev_tx_power = priv->tx_power_user_lmt;
1155 priv->tx_power_user_lmt = tx_power;
1156
1157 ret = priv->cfg->ops->lib->send_tx_power(priv);
1158
1159 /* if fail to set tx_power, restore the orig. tx power */
1160 if (ret) {
1161 priv->tx_power_user_lmt = prev_tx_power;
1162 priv->tx_power_next = prev_tx_power;
1163 }
1164 return ret;
1165}
1166EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1167
1168void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1169{
1170 struct iwl_bt_cmd bt_cmd = {
1171 .lead_time = BT_LEAD_TIME_DEF,
1172 .max_kill = BT_MAX_KILL_DEF,
1173 .kill_ack_mask = 0,
1174 .kill_cts_mask = 0,
1175 };
1176
1177 if (!bt_coex_active)
1178 bt_cmd.flags = BT_COEX_DISABLE;
1179 else
1180 bt_cmd.flags = BT_COEX_ENABLE;
1181
1182 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1183 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1184
1185 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1186 sizeof(struct iwl_bt_cmd), &bt_cmd))
1187 IWL_ERR(priv, "failed to send BT Coex Config\n");
1188}
1189EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1190
1191int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1192{
1193 struct iwl_statistics_cmd statistics_cmd = {
1194 .configuration_flags =
1195 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1196 };
1197
1198 if (flags & CMD_ASYNC)
1199 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1200 sizeof(struct iwl_statistics_cmd),
1201 &statistics_cmd, NULL);
1202 else
1203 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1204 sizeof(struct iwl_statistics_cmd),
1205 &statistics_cmd);
1206}
1207EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1208
1209void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1210 struct iwl_rx_mem_buffer *rxb)
1211{
1212#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1213 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1214 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1215 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1216 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1217#endif
1218}
1219EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1220
1221void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1222 struct iwl_rx_mem_buffer *rxb)
1223{
1224 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1225 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1226 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1227 "notification for %s:\n", len,
1228 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1229 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1230}
1231EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1232
1233void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1234 struct iwl_rx_mem_buffer *rxb)
1235{
1236 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1237
1238 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1239 "seq 0x%04X ser 0x%08X\n",
1240 le32_to_cpu(pkt->u.err_resp.error_type),
1241 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1242 pkt->u.err_resp.cmd_id,
1243 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1244 le32_to_cpu(pkt->u.err_resp.error_info));
1245}
1246EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1247
1248void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1249{
1250 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1251}
1252
1253int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
1254 struct ieee80211_vif *vif, u16 queue,
1255 const struct ieee80211_tx_queue_params *params)
1256{
1257 struct iwl_priv *priv = hw->priv;
1258 struct iwl_rxon_context *ctx;
1259 unsigned long flags;
1260 int q;
1261
1262 IWL_DEBUG_MAC80211(priv, "enter\n");
1263
1264 if (!iwl_legacy_is_ready_rf(priv)) {
1265 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1266 return -EIO;
1267 }
1268
1269 if (queue >= AC_NUM) {
1270 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1271 return 0;
1272 }
1273
1274 q = AC_NUM - 1 - queue;
1275
1276 spin_lock_irqsave(&priv->lock, flags);
1277
1278 for_each_context(priv, ctx) {
1279 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1280 cpu_to_le16(params->cw_min);
1281 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1282 cpu_to_le16(params->cw_max);
1283 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1284 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1285 cpu_to_le16((params->txop * 32));
1286
1287 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1288 }
1289
1290 spin_unlock_irqrestore(&priv->lock, flags);
1291
1292 IWL_DEBUG_MAC80211(priv, "leave\n");
1293 return 0;
1294}
1295EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1296
1297int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1298{
1299 struct iwl_priv *priv = hw->priv;
1300
1301 return priv->ibss_manager == IWL_IBSS_MANAGER;
1302}
1303EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1304
1305static int
1306iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1307{
1308 iwl_legacy_connection_init_rx_config(priv, ctx);
1309
1310 if (priv->cfg->ops->hcmd->set_rxon_chain)
1311 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1312
1313 return iwl_legacy_commit_rxon(priv, ctx);
1314}
1315
1316static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1317 struct iwl_rxon_context *ctx)
1318{
1319 struct ieee80211_vif *vif = ctx->vif;
1320 int err;
1321
1322 lockdep_assert_held(&priv->mutex);
1323
1324 /*
1325 * This variable will be correct only when there's just
1326 * a single context, but all code using it is for hardware
1327 * that supports only one context.
1328 */
1329 priv->iw_mode = vif->type;
1330
1331 ctx->is_active = true;
1332
1333 err = iwl_legacy_set_mode(priv, ctx);
1334 if (err) {
1335 if (!ctx->always_active)
1336 ctx->is_active = false;
1337 return err;
1338 }
1339
1340 return 0;
1341}
1342
1343int
1344iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1345{
1346 struct iwl_priv *priv = hw->priv;
1347 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1348 struct iwl_rxon_context *tmp, *ctx = NULL;
1349 int err;
1350
1351 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1352 vif->type, vif->addr);
1353
1354 mutex_lock(&priv->mutex);
1355
1356 if (!iwl_legacy_is_ready_rf(priv)) {
1357 IWL_WARN(priv, "Try to add interface when device not ready\n");
1358 err = -EINVAL;
1359 goto out;
1360 }
1361
1362 for_each_context(priv, tmp) {
1363 u32 possible_modes =
1364 tmp->interface_modes | tmp->exclusive_interface_modes;
1365
1366 if (tmp->vif) {
1367 /* check if this busy context is exclusive */
1368 if (tmp->exclusive_interface_modes &
1369 BIT(tmp->vif->type)) {
1370 err = -EINVAL;
1371 goto out;
1372 }
1373 continue;
1374 }
1375
1376 if (!(possible_modes & BIT(vif->type)))
1377 continue;
1378
1379 /* have maybe usable context w/o interface */
1380 ctx = tmp;
1381 break;
1382 }
1383
1384 if (!ctx) {
1385 err = -EOPNOTSUPP;
1386 goto out;
1387 }
1388
1389 vif_priv->ctx = ctx;
1390 ctx->vif = vif;
1391
1392 err = iwl_legacy_setup_interface(priv, ctx);
1393 if (!err)
1394 goto out;
1395
1396 ctx->vif = NULL;
1397 priv->iw_mode = NL80211_IFTYPE_STATION;
1398 out:
1399 mutex_unlock(&priv->mutex);
1400
1401 IWL_DEBUG_MAC80211(priv, "leave\n");
1402 return err;
1403}
1404EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1405
1406static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1407 struct ieee80211_vif *vif,
1408 bool mode_change)
1409{
1410 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1411
1412 lockdep_assert_held(&priv->mutex);
1413
1414 if (priv->scan_vif == vif) {
1415 iwl_legacy_scan_cancel_timeout(priv, 200);
1416 iwl_legacy_force_scan_end(priv);
1417 }
1418
1419 if (!mode_change) {
1420 iwl_legacy_set_mode(priv, ctx);
1421 if (!ctx->always_active)
1422 ctx->is_active = false;
1423 }
1424}
1425
1426void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1427 struct ieee80211_vif *vif)
1428{
1429 struct iwl_priv *priv = hw->priv;
1430 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1431
1432 IWL_DEBUG_MAC80211(priv, "enter\n");
1433
1434 mutex_lock(&priv->mutex);
1435
1436 WARN_ON(ctx->vif != vif);
1437 ctx->vif = NULL;
1438
1439 iwl_legacy_teardown_interface(priv, vif, false);
1440
1441 memset(priv->bssid, 0, ETH_ALEN);
1442 mutex_unlock(&priv->mutex);
1443
1444 IWL_DEBUG_MAC80211(priv, "leave\n");
1445
1446}
1447EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1448
1449int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1450{
1451 if (!priv->txq)
1452 priv->txq = kzalloc(
1453 sizeof(struct iwl_tx_queue) *
1454 priv->cfg->base_params->num_of_queues,
1455 GFP_KERNEL);
1456 if (!priv->txq) {
1457 IWL_ERR(priv, "Not enough memory for txq\n");
1458 return -ENOMEM;
1459 }
1460 return 0;
1461}
1462EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1463
1464void iwl_legacy_txq_mem(struct iwl_priv *priv)
1465{
1466 kfree(priv->txq);
1467 priv->txq = NULL;
1468}
1469EXPORT_SYMBOL(iwl_legacy_txq_mem);
1470
1471#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1472
1473#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1474
1475void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1476{
1477 priv->tx_traffic_idx = 0;
1478 priv->rx_traffic_idx = 0;
1479 if (priv->tx_traffic)
1480 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1481 if (priv->rx_traffic)
1482 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1483}
1484
1485int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1486{
1487 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1488
1489 if (iwlegacy_debug_level & IWL_DL_TX) {
1490 if (!priv->tx_traffic) {
1491 priv->tx_traffic =
1492 kzalloc(traffic_size, GFP_KERNEL);
1493 if (!priv->tx_traffic)
1494 return -ENOMEM;
1495 }
1496 }
1497 if (iwlegacy_debug_level & IWL_DL_RX) {
1498 if (!priv->rx_traffic) {
1499 priv->rx_traffic =
1500 kzalloc(traffic_size, GFP_KERNEL);
1501 if (!priv->rx_traffic)
1502 return -ENOMEM;
1503 }
1504 }
1505 iwl_legacy_reset_traffic_log(priv);
1506 return 0;
1507}
1508EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1509
1510void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1511{
1512 kfree(priv->tx_traffic);
1513 priv->tx_traffic = NULL;
1514
1515 kfree(priv->rx_traffic);
1516 priv->rx_traffic = NULL;
1517}
1518EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1519
1520void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1521 u16 length, struct ieee80211_hdr *header)
1522{
1523 __le16 fc;
1524 u16 len;
1525
1526 if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
1527 return;
1528
1529 if (!priv->tx_traffic)
1530 return;
1531
1532 fc = header->frame_control;
1533 if (ieee80211_is_data(fc)) {
1534 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1535 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1536 memcpy((priv->tx_traffic +
1537 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1538 header, len);
1539 priv->tx_traffic_idx =
1540 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1541 }
1542}
1543EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1544
1545void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1546 u16 length, struct ieee80211_hdr *header)
1547{
1548 __le16 fc;
1549 u16 len;
1550
1551 if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
1552 return;
1553
1554 if (!priv->rx_traffic)
1555 return;
1556
1557 fc = header->frame_control;
1558 if (ieee80211_is_data(fc)) {
1559 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1560 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1561 memcpy((priv->rx_traffic +
1562 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1563 header, len);
1564 priv->rx_traffic_idx =
1565 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1566 }
1567}
1568EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1569
1570const char *iwl_legacy_get_mgmt_string(int cmd)
1571{
1572 switch (cmd) {
1573 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1574 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1575 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1576 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1577 IWL_CMD(MANAGEMENT_PROBE_REQ);
1578 IWL_CMD(MANAGEMENT_PROBE_RESP);
1579 IWL_CMD(MANAGEMENT_BEACON);
1580 IWL_CMD(MANAGEMENT_ATIM);
1581 IWL_CMD(MANAGEMENT_DISASSOC);
1582 IWL_CMD(MANAGEMENT_AUTH);
1583 IWL_CMD(MANAGEMENT_DEAUTH);
1584 IWL_CMD(MANAGEMENT_ACTION);
1585 default:
1586 return "UNKNOWN";
1587
1588 }
1589}
1590
1591const char *iwl_legacy_get_ctrl_string(int cmd)
1592{
1593 switch (cmd) {
1594 IWL_CMD(CONTROL_BACK_REQ);
1595 IWL_CMD(CONTROL_BACK);
1596 IWL_CMD(CONTROL_PSPOLL);
1597 IWL_CMD(CONTROL_RTS);
1598 IWL_CMD(CONTROL_CTS);
1599 IWL_CMD(CONTROL_ACK);
1600 IWL_CMD(CONTROL_CFEND);
1601 IWL_CMD(CONTROL_CFENDACK);
1602 default:
1603 return "UNKNOWN";
1604
1605 }
1606}
1607
1608void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1609{
1610 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1611 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1612}
1613
1614/*
1615 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1616 * iwl_legacy_update_stats function will
1617 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1618 * Use debugFs to display the rx/rx_statistics
1619 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1620 * information will be recorded, but DATA pkt still will be recorded
1621 * for the reason of iwl_led.c need to control the led blinking based on
1622 * number of tx and rx data.
1623 *
1624 */
1625void
1626iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1627{
1628 struct traffic_stats *stats;
1629
1630 if (is_tx)
1631 stats = &priv->tx_stats;
1632 else
1633 stats = &priv->rx_stats;
1634
1635 if (ieee80211_is_mgmt(fc)) {
1636 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1637 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1638 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1639 break;
1640 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1641 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1642 break;
1643 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1644 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1645 break;
1646 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1647 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1648 break;
1649 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1650 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1651 break;
1652 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1653 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1654 break;
1655 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1656 stats->mgmt[MANAGEMENT_BEACON]++;
1657 break;
1658 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1659 stats->mgmt[MANAGEMENT_ATIM]++;
1660 break;
1661 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1662 stats->mgmt[MANAGEMENT_DISASSOC]++;
1663 break;
1664 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1665 stats->mgmt[MANAGEMENT_AUTH]++;
1666 break;
1667 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1668 stats->mgmt[MANAGEMENT_DEAUTH]++;
1669 break;
1670 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1671 stats->mgmt[MANAGEMENT_ACTION]++;
1672 break;
1673 }
1674 } else if (ieee80211_is_ctl(fc)) {
1675 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1676 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1677 stats->ctrl[CONTROL_BACK_REQ]++;
1678 break;
1679 case cpu_to_le16(IEEE80211_STYPE_BACK):
1680 stats->ctrl[CONTROL_BACK]++;
1681 break;
1682 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1683 stats->ctrl[CONTROL_PSPOLL]++;
1684 break;
1685 case cpu_to_le16(IEEE80211_STYPE_RTS):
1686 stats->ctrl[CONTROL_RTS]++;
1687 break;
1688 case cpu_to_le16(IEEE80211_STYPE_CTS):
1689 stats->ctrl[CONTROL_CTS]++;
1690 break;
1691 case cpu_to_le16(IEEE80211_STYPE_ACK):
1692 stats->ctrl[CONTROL_ACK]++;
1693 break;
1694 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1695 stats->ctrl[CONTROL_CFEND]++;
1696 break;
1697 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1698 stats->ctrl[CONTROL_CFENDACK]++;
1699 break;
1700 }
1701 } else {
1702 /* data */
1703 stats->data_cnt++;
1704 stats->data_bytes += len;
1705 }
1706}
1707EXPORT_SYMBOL(iwl_legacy_update_stats);
1708#endif
1709
1710int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
1711{
1712 struct iwl_force_reset *force_reset;
1713
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return -EINVAL;
1716
1717 force_reset = &priv->force_reset;
1718 force_reset->reset_request_count++;
1719 if (!external) {
1720 if (force_reset->last_force_reset_jiffies &&
1721 time_after(force_reset->last_force_reset_jiffies +
1722 force_reset->reset_duration, jiffies)) {
1723 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1724 force_reset->reset_reject_count++;
1725 return -EAGAIN;
1726 }
1727 }
1728 force_reset->reset_success_count++;
1729 force_reset->last_force_reset_jiffies = jiffies;
1730
1731 /*
1732 * if the request is from external(ex: debugfs),
1733 * then always perform the request in regardless the module
1734 * parameter setting
1735 * if the request is from internal (uCode error or driver
1736 * detect failure), then fw_restart module parameter
1737 * need to be check before performing firmware reload
1738 */
1739
1740 if (!external && !priv->cfg->mod_params->restart_fw) {
1741 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1742 "module parameter setting\n");
1743 return 0;
1744 }
1745
1746 IWL_ERR(priv, "On demand firmware reload\n");
1747
1748 /* Set the FW error flag -- cleared on iwl_down */
1749 set_bit(STATUS_FW_ERROR, &priv->status);
1750 wake_up(&priv->wait_command_queue);
1751 /*
1752 * Keep the restart process from trying to send host
1753 * commands by clearing the INIT status bit
1754 */
1755 clear_bit(STATUS_READY, &priv->status);
1756 queue_work(priv->workqueue, &priv->restart);
1757
1758 return 0;
1759}
1760
1761int
1762iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1763 struct ieee80211_vif *vif,
1764 enum nl80211_iftype newtype, bool newp2p)
1765{
1766 struct iwl_priv *priv = hw->priv;
1767 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1768 struct iwl_rxon_context *tmp;
1769 u32 interface_modes;
1770 int err;
1771
1772 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1773
1774 mutex_lock(&priv->mutex);
1775
1776 if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
1777 /*
1778 * Huh? But wait ... this can maybe happen when
1779 * we're in the middle of a firmware restart!
1780 */
1781 err = -EBUSY;
1782 goto out;
1783 }
1784
1785 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1786
1787 if (!(interface_modes & BIT(newtype))) {
1788 err = -EBUSY;
1789 goto out;
1790 }
1791
1792 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1793 for_each_context(priv, tmp) {
1794 if (ctx == tmp)
1795 continue;
1796
1797 if (!tmp->vif)
1798 continue;
1799
1800 /*
1801 * The current mode switch would be exclusive, but
1802 * another context is active ... refuse the switch.
1803 */
1804 err = -EBUSY;
1805 goto out;
1806 }
1807 }
1808
1809 /* success */
1810 iwl_legacy_teardown_interface(priv, vif, true);
1811 vif->type = newtype;
1812 vif->p2p = newp2p;
1813 err = iwl_legacy_setup_interface(priv, ctx);
1814 WARN_ON(err);
1815 /*
1816 * We've switched internally, but submitting to the
1817 * device may have failed for some reason. Mask this
1818 * error, because otherwise mac80211 will not switch
1819 * (and set the interface type back) and we'll be
1820 * out of sync with it.
1821 */
1822 err = 0;
1823
1824 out:
1825 mutex_unlock(&priv->mutex);
1826 return err;
1827}
1828EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1829
1830/*
1831 * On every watchdog tick we check (latest) time stamp. If it does not
1832 * change during timeout period and queue is not empty we reset firmware.
1833 */
1834static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1835{
1836 struct iwl_tx_queue *txq = &priv->txq[cnt];
1837 struct iwl_queue *q = &txq->q;
1838 unsigned long timeout;
1839 int ret;
1840
1841 if (q->read_ptr == q->write_ptr) {
1842 txq->time_stamp = jiffies;
1843 return 0;
1844 }
1845
1846 timeout = txq->time_stamp +
1847 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1848
1849 if (time_after(jiffies, timeout)) {
1850 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1851 q->id, priv->cfg->base_params->wd_timeout);
1852 ret = iwl_legacy_force_reset(priv, false);
1853 return (ret == -EAGAIN) ? 0 : 1;
1854 }
1855
1856 return 0;
1857}
1858
1859/*
1860 * Making watchdog tick be a quarter of timeout assure we will
1861 * discover the queue hung between timeout and 1.25*timeout
1862 */
1863#define IWL_WD_TICK(timeout) ((timeout) / 4)
1864
1865/*
1866 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1867 * we reset the firmware. If everything is fine just rearm the timer.
1868 */
1869void iwl_legacy_bg_watchdog(unsigned long data)
1870{
1871 struct iwl_priv *priv = (struct iwl_priv *)data;
1872 int cnt;
1873 unsigned long timeout;
1874
1875 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1876 return;
1877
1878 timeout = priv->cfg->base_params->wd_timeout;
1879 if (timeout == 0)
1880 return;
1881
1882 /* monitor and check for stuck cmd queue */
1883 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1884 return;
1885
1886 /* monitor and check for other stuck queues */
1887 if (iwl_legacy_is_any_associated(priv)) {
1888 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1889 /* skip as we already checked the command queue */
1890 if (cnt == priv->cmd_queue)
1891 continue;
1892 if (iwl_legacy_check_stuck_queue(priv, cnt))
1893 return;
1894 }
1895 }
1896
1897 mod_timer(&priv->watchdog, jiffies +
1898 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1899}
1900EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1901
1902void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1903{
1904 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1905
1906 if (timeout)
1907 mod_timer(&priv->watchdog,
1908 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1909 else
1910 del_timer(&priv->watchdog);
1911}
1912EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1913
1914/*
1915 * extended beacon time format
1916 * time in usec will be changed into a 32-bit value in extended:internal format
1917 * the extended part is the beacon counts
1918 * the internal part is the time in usec within one beacon interval
1919 */
1920u32
1921iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1922 u32 usec, u32 beacon_interval)
1923{
1924 u32 quot;
1925 u32 rem;
1926 u32 interval = beacon_interval * TIME_UNIT;
1927
1928 if (!interval || !usec)
1929 return 0;
1930
1931 quot = (usec / interval) &
1932 (iwl_legacy_beacon_time_mask_high(priv,
1933 priv->hw_params.beacon_time_tsf_bits) >>
1934 priv->hw_params.beacon_time_tsf_bits);
1935 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1936 priv->hw_params.beacon_time_tsf_bits);
1937
1938 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1939}
1940EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1941
1942/* base is usually what we get from ucode with each received frame,
1943 * the same as HW timer counter counting down
1944 */
1945__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1946 u32 addon, u32 beacon_interval)
1947{
1948 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1949 priv->hw_params.beacon_time_tsf_bits);
1950 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1951 priv->hw_params.beacon_time_tsf_bits);
1952 u32 interval = beacon_interval * TIME_UNIT;
1953 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1954 priv->hw_params.beacon_time_tsf_bits)) +
1955 (addon & iwl_legacy_beacon_time_mask_high(priv,
1956 priv->hw_params.beacon_time_tsf_bits));
1957
1958 if (base_low > addon_low)
1959 res += base_low - addon_low;
1960 else if (base_low < addon_low) {
1961 res += interval + base_low - addon_low;
1962 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1963 } else
1964 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1965
1966 return cpu_to_le32(res);
1967}
1968EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1969
1970#ifdef CONFIG_PM
1971
1972int iwl_legacy_pci_suspend(struct device *device)
1973{
1974 struct pci_dev *pdev = to_pci_dev(device);
1975 struct iwl_priv *priv = pci_get_drvdata(pdev);
1976
1977 /*
1978 * This function is called when system goes into suspend state
1979 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1980 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1981 * it will not call apm_ops.stop() to stop the DMA operation.
1982 * Calling apm_ops.stop here to make sure we stop the DMA.
1983 */
1984 iwl_legacy_apm_stop(priv);
1985
1986 return 0;
1987}
1988EXPORT_SYMBOL(iwl_legacy_pci_suspend);
1989
1990int iwl_legacy_pci_resume(struct device *device)
1991{
1992 struct pci_dev *pdev = to_pci_dev(device);
1993 struct iwl_priv *priv = pci_get_drvdata(pdev);
1994 bool hw_rfkill = false;
1995
1996 /*
1997 * We disable the RETRY_TIMEOUT register (0x41) to keep
1998 * PCI Tx retries from interfering with C3 CPU state.
1999 */
2000 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2001
2002 iwl_legacy_enable_interrupts(priv);
2003
2004 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2005 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2006 hw_rfkill = true;
2007
2008 if (hw_rfkill)
2009 set_bit(STATUS_RF_KILL_HW, &priv->status);
2010 else
2011 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2012
2013 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2014
2015 return 0;
2016}
2017EXPORT_SYMBOL(iwl_legacy_pci_resume);
2018
2019const struct dev_pm_ops iwl_legacy_pm_ops = {
2020 .suspend = iwl_legacy_pci_suspend,
2021 .resume = iwl_legacy_pci_resume,
2022 .freeze = iwl_legacy_pci_suspend,
2023 .thaw = iwl_legacy_pci_resume,
2024 .poweroff = iwl_legacy_pci_suspend,
2025 .restore = iwl_legacy_pci_resume,
2026};
2027EXPORT_SYMBOL(iwl_legacy_pm_ops);
2028
2029#endif /* CONFIG_PM */
2030
2031static void
2032iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2033{
2034 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2035 return;
2036
2037 if (!ctx->is_active)
2038 return;
2039
2040 ctx->qos_data.def_qos_parm.qos_flags = 0;
2041
2042 if (ctx->qos_data.qos_active)
2043 ctx->qos_data.def_qos_parm.qos_flags |=
2044 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2045
2046 if (ctx->ht.enabled)
2047 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2048
2049 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2050 ctx->qos_data.qos_active,
2051 ctx->qos_data.def_qos_parm.qos_flags);
2052
2053 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2054 sizeof(struct iwl_qosparam_cmd),
2055 &ctx->qos_data.def_qos_parm, NULL);
2056}
2057
2058/**
2059 * iwl_legacy_mac_config - mac80211 config callback
2060 */
2061int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2062{
2063 struct iwl_priv *priv = hw->priv;
2064 const struct iwl_channel_info *ch_info;
2065 struct ieee80211_conf *conf = &hw->conf;
2066 struct ieee80211_channel *channel = conf->channel;
2067 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2068 struct iwl_rxon_context *ctx;
2069 unsigned long flags = 0;
2070 int ret = 0;
2071 u16 ch;
2072 int scan_active = 0;
2073 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2074
2075 if (WARN_ON(!priv->cfg->ops->legacy))
2076 return -EOPNOTSUPP;
2077
2078 mutex_lock(&priv->mutex);
2079
2080 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2081 channel->hw_value, changed);
2082
2083 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
2084 scan_active = 1;
2085 IWL_DEBUG_MAC80211(priv, "scan active\n");
2086 }
2087
2088 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2089 IEEE80211_CONF_CHANGE_CHANNEL)) {
2090 /* mac80211 uses static for non-HT which is what we want */
2091 priv->current_ht_config.smps = conf->smps_mode;
2092
2093 /*
2094 * Recalculate chain counts.
2095 *
2096 * If monitor mode is enabled then mac80211 will
2097 * set up the SM PS mode to OFF if an HT channel is
2098 * configured.
2099 */
2100 if (priv->cfg->ops->hcmd->set_rxon_chain)
2101 for_each_context(priv, ctx)
2102 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2103 }
2104
2105 /* during scanning mac80211 will delay channel setting until
2106 * scan finish with changed = 0
2107 */
2108 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2109 if (scan_active)
2110 goto set_ch_out;
2111
2112 ch = channel->hw_value;
2113 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2114 if (!iwl_legacy_is_channel_valid(ch_info)) {
2115 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2116 ret = -EINVAL;
2117 goto set_ch_out;
2118 }
2119
2120 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2121 !iwl_legacy_is_channel_ibss(ch_info)) {
2122 IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
2123 ret = -EINVAL;
2124 goto set_ch_out;
2125 }
2126
2127 spin_lock_irqsave(&priv->lock, flags);
2128
2129 for_each_context(priv, ctx) {
2130 /* Configure HT40 channels */
2131 if (ctx->ht.enabled != conf_is_ht(conf)) {
2132 ctx->ht.enabled = conf_is_ht(conf);
2133 ht_changed[ctx->ctxid] = true;
2134 }
2135 if (ctx->ht.enabled) {
2136 if (conf_is_ht40_minus(conf)) {
2137 ctx->ht.extension_chan_offset =
2138 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2139 ctx->ht.is_40mhz = true;
2140 } else if (conf_is_ht40_plus(conf)) {
2141 ctx->ht.extension_chan_offset =
2142 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2143 ctx->ht.is_40mhz = true;
2144 } else {
2145 ctx->ht.extension_chan_offset =
2146 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2147 ctx->ht.is_40mhz = false;
2148 }
2149 } else
2150 ctx->ht.is_40mhz = false;
2151
2152 /*
2153 * Default to no protection. Protection mode will
2154 * later be set from BSS config in iwl_ht_conf
2155 */
2156 ctx->ht.protection =
2157 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2158
2159 /* if we are switching from ht to 2.4 clear flags
2160 * from any ht related info since 2.4 does not
2161 * support ht */
2162 if ((le16_to_cpu(ctx->staging.channel) != ch))
2163 ctx->staging.flags = 0;
2164
2165 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2166 iwl_legacy_set_rxon_ht(priv, ht_conf);
2167
2168 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2169 ctx->vif);
2170 }
2171
2172 spin_unlock_irqrestore(&priv->lock, flags);
2173
2174 if (priv->cfg->ops->legacy->update_bcast_stations)
2175 ret =
2176 priv->cfg->ops->legacy->update_bcast_stations(priv);
2177
2178 set_ch_out:
2179 /* The list of supported rates and rate mask can be different
2180 * for each band; since the band may have changed, reset
2181 * the rate mask to what mac80211 lists */
2182 iwl_legacy_set_rate(priv);
2183 }
2184
2185 if (changed & (IEEE80211_CONF_CHANGE_PS |
2186 IEEE80211_CONF_CHANGE_IDLE)) {
2187 ret = iwl_legacy_power_update_mode(priv, false);
2188 if (ret)
2189 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2190 }
2191
2192 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2193 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2194 priv->tx_power_user_lmt, conf->power_level);
2195
2196 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2197 }
2198
2199 if (!iwl_legacy_is_ready(priv)) {
2200 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2201 goto out;
2202 }
2203
2204 if (scan_active)
2205 goto out;
2206
2207 for_each_context(priv, ctx) {
2208 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2209 iwl_legacy_commit_rxon(priv, ctx);
2210 else
2211 IWL_DEBUG_INFO(priv,
2212 "Not re-sending same RXON configuration.\n");
2213 if (ht_changed[ctx->ctxid])
2214 iwl_legacy_update_qos(priv, ctx);
2215 }
2216
2217out:
2218 IWL_DEBUG_MAC80211(priv, "leave\n");
2219 mutex_unlock(&priv->mutex);
2220 return ret;
2221}
2222EXPORT_SYMBOL(iwl_legacy_mac_config);
2223
2224void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
2225 struct ieee80211_vif *vif)
2226{
2227 struct iwl_priv *priv = hw->priv;
2228 unsigned long flags;
2229 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2230 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2231
2232 if (WARN_ON(!priv->cfg->ops->legacy))
2233 return;
2234
2235 mutex_lock(&priv->mutex);
2236 IWL_DEBUG_MAC80211(priv, "enter\n");
2237
2238 spin_lock_irqsave(&priv->lock, flags);
2239 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2240 spin_unlock_irqrestore(&priv->lock, flags);
2241
2242 spin_lock_irqsave(&priv->lock, flags);
2243
2244 /* new association get rid of ibss beacon skb */
2245 if (priv->beacon_skb)
2246 dev_kfree_skb(priv->beacon_skb);
2247
2248 priv->beacon_skb = NULL;
2249
2250 priv->timestamp = 0;
2251
2252 spin_unlock_irqrestore(&priv->lock, flags);
2253
2254 iwl_legacy_scan_cancel_timeout(priv, 100);
2255 if (!iwl_legacy_is_ready_rf(priv)) {
2256 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2257 mutex_unlock(&priv->mutex);
2258 return;
2259 }
2260
2261 /* we are restarting association process
2262 * clear RXON_FILTER_ASSOC_MSK bit
2263 */
2264 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2265 iwl_legacy_commit_rxon(priv, ctx);
2266
2267 iwl_legacy_set_rate(priv);
2268
2269 mutex_unlock(&priv->mutex);
2270
2271 IWL_DEBUG_MAC80211(priv, "leave\n");
2272}
2273EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2274
2275static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2276 struct ieee80211_vif *vif)
2277{
2278 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2279 struct ieee80211_sta *sta;
2280 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2281 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2282
2283 IWL_DEBUG_ASSOC(priv, "enter:\n");
2284
2285 if (!ctx->ht.enabled)
2286 return;
2287
2288 ctx->ht.protection =
2289 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2290 ctx->ht.non_gf_sta_present =
2291 !!(bss_conf->ht_operation_mode &
2292 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2293
2294 ht_conf->single_chain_sufficient = false;
2295
2296 switch (vif->type) {
2297 case NL80211_IFTYPE_STATION:
2298 rcu_read_lock();
2299 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2300 if (sta) {
2301 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2302 int maxstreams;
2303
2304 maxstreams = (ht_cap->mcs.tx_params &
2305 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2306 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2307 maxstreams += 1;
2308
2309 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2310 (ht_cap->mcs.rx_mask[2] == 0))
2311 ht_conf->single_chain_sufficient = true;
2312 if (maxstreams <= 1)
2313 ht_conf->single_chain_sufficient = true;
2314 } else {
2315 /*
2316 * If at all, this can only happen through a race
2317 * when the AP disconnects us while we're still
2318 * setting up the connection, in that case mac80211
2319 * will soon tell us about that.
2320 */
2321 ht_conf->single_chain_sufficient = true;
2322 }
2323 rcu_read_unlock();
2324 break;
2325 case NL80211_IFTYPE_ADHOC:
2326 ht_conf->single_chain_sufficient = true;
2327 break;
2328 default:
2329 break;
2330 }
2331
2332 IWL_DEBUG_ASSOC(priv, "leave\n");
2333}
2334
2335static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2336 struct ieee80211_vif *vif)
2337{
2338 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2339
2340 /*
2341 * inform the ucode that there is no longer an
2342 * association and that no more packets should be
2343 * sent
2344 */
2345 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2346 ctx->staging.assoc_id = 0;
2347 iwl_legacy_commit_rxon(priv, ctx);
2348}
2349
2350static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2351 struct ieee80211_vif *vif)
2352{
2353 struct iwl_priv *priv = hw->priv;
2354 unsigned long flags;
2355 __le64 timestamp;
2356 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2357
2358 if (!skb)
2359 return;
2360
2361 IWL_DEBUG_MAC80211(priv, "enter\n");
2362
2363 lockdep_assert_held(&priv->mutex);
2364
2365 if (!priv->beacon_ctx) {
2366 IWL_ERR(priv, "update beacon but no beacon context!\n");
2367 dev_kfree_skb(skb);
2368 return;
2369 }
2370
2371 spin_lock_irqsave(&priv->lock, flags);
2372
2373 if (priv->beacon_skb)
2374 dev_kfree_skb(priv->beacon_skb);
2375
2376 priv->beacon_skb = skb;
2377
2378 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2379 priv->timestamp = le64_to_cpu(timestamp);
2380
2381 IWL_DEBUG_MAC80211(priv, "leave\n");
2382 spin_unlock_irqrestore(&priv->lock, flags);
2383
2384 if (!iwl_legacy_is_ready_rf(priv)) {
2385 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2386 return;
2387 }
2388
2389 priv->cfg->ops->legacy->post_associate(priv);
2390}
2391
2392void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2393 struct ieee80211_vif *vif,
2394 struct ieee80211_bss_conf *bss_conf,
2395 u32 changes)
2396{
2397 struct iwl_priv *priv = hw->priv;
2398 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2399 int ret;
2400
2401 if (WARN_ON(!priv->cfg->ops->legacy))
2402 return;
2403
2404 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2405
2406 mutex_lock(&priv->mutex);
2407
2408 if (!iwl_legacy_is_alive(priv)) {
2409 mutex_unlock(&priv->mutex);
2410 return;
2411 }
2412
2413 if (changes & BSS_CHANGED_QOS) {
2414 unsigned long flags;
2415
2416 spin_lock_irqsave(&priv->lock, flags);
2417 ctx->qos_data.qos_active = bss_conf->qos;
2418 iwl_legacy_update_qos(priv, ctx);
2419 spin_unlock_irqrestore(&priv->lock, flags);
2420 }
2421
2422 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2423 /*
2424 * the add_interface code must make sure we only ever
2425 * have a single interface that could be beaconing at
2426 * any time.
2427 */
2428 if (vif->bss_conf.enable_beacon)
2429 priv->beacon_ctx = ctx;
2430 else
2431 priv->beacon_ctx = NULL;
2432 }
2433
2434 if (changes & BSS_CHANGED_BSSID) {
2435 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2436
2437 /*
2438 * If there is currently a HW scan going on in the
2439 * background then we need to cancel it else the RXON
2440 * below/in post_associate will fail.
2441 */
2442 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2443 IWL_WARN(priv,
2444 "Aborted scan still in progress after 100ms\n");
2445 IWL_DEBUG_MAC80211(priv,
2446 "leaving - scan abort failed.\n");
2447 mutex_unlock(&priv->mutex);
2448 return;
2449 }
2450
2451 /* mac80211 only sets assoc when in STATION mode */
2452 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2453 memcpy(ctx->staging.bssid_addr,
2454 bss_conf->bssid, ETH_ALEN);
2455
2456 /* currently needed in a few places */
2457 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2458 } else {
2459 ctx->staging.filter_flags &=
2460 ~RXON_FILTER_ASSOC_MSK;
2461 }
2462
2463 }
2464
2465 /*
2466 * This needs to be after setting the BSSID in case
2467 * mac80211 decides to do both changes at once because
2468 * it will invoke post_associate.
2469 */
2470 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2471 iwl_legacy_beacon_update(hw, vif);
2472
2473 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2474 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2475 bss_conf->use_short_preamble);
2476 if (bss_conf->use_short_preamble)
2477 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2478 else
2479 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2480 }
2481
2482 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2483 IWL_DEBUG_MAC80211(priv,
2484 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2485 if (bss_conf->use_cts_prot &&
2486 (priv->band != IEEE80211_BAND_5GHZ))
2487 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2488 else
2489 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2490 if (bss_conf->use_cts_prot)
2491 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2492 else
2493 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2494 }
2495
2496 if (changes & BSS_CHANGED_BASIC_RATES) {
2497 /* XXX use this information
2498 *
2499 * To do that, remove code from iwl_legacy_set_rate() and put something
2500 * like this here:
2501 *
2502 if (A-band)
2503 ctx->staging.ofdm_basic_rates =
2504 bss_conf->basic_rates;
2505 else
2506 ctx->staging.ofdm_basic_rates =
2507 bss_conf->basic_rates >> 4;
2508 ctx->staging.cck_basic_rates =
2509 bss_conf->basic_rates & 0xF;
2510 */
2511 }
2512
2513 if (changes & BSS_CHANGED_HT) {
2514 iwl_legacy_ht_conf(priv, vif);
2515
2516 if (priv->cfg->ops->hcmd->set_rxon_chain)
2517 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2518 }
2519
2520 if (changes & BSS_CHANGED_ASSOC) {
2521 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2522 if (bss_conf->assoc) {
2523 priv->timestamp = bss_conf->timestamp;
2524
2525 if (!iwl_legacy_is_rfkill(priv))
2526 priv->cfg->ops->legacy->post_associate(priv);
2527 } else
2528 iwl_legacy_set_no_assoc(priv, vif);
2529 }
2530
2531 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2532 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2533 changes);
2534 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2535 if (!ret) {
2536 /* Sync active_rxon with latest change. */
2537 memcpy((void *)&ctx->active,
2538 &ctx->staging,
2539 sizeof(struct iwl_legacy_rxon_cmd));
2540 }
2541 }
2542
2543 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2544 if (vif->bss_conf.enable_beacon) {
2545 memcpy(ctx->staging.bssid_addr,
2546 bss_conf->bssid, ETH_ALEN);
2547 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2548 priv->cfg->ops->legacy->config_ap(priv);
2549 } else
2550 iwl_legacy_set_no_assoc(priv, vif);
2551 }
2552
2553 if (changes & BSS_CHANGED_IBSS) {
2554 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2555 bss_conf->ibss_joined);
2556 if (ret)
2557 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2558 bss_conf->ibss_joined ? "add" : "remove",
2559 bss_conf->bssid);
2560 }
2561
2562 mutex_unlock(&priv->mutex);
2563
2564 IWL_DEBUG_MAC80211(priv, "leave\n");
2565}
2566EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2567
2568irqreturn_t iwl_legacy_isr(int irq, void *data)
2569{
2570 struct iwl_priv *priv = data;
2571 u32 inta, inta_mask;
2572 u32 inta_fh;
2573 unsigned long flags;
2574 if (!priv)
2575 return IRQ_NONE;
2576
2577 spin_lock_irqsave(&priv->lock, flags);
2578
2579 /* Disable (but don't clear!) interrupts here to avoid
2580 * back-to-back ISRs and sporadic interrupts from our NIC.
2581 * If we have something to service, the tasklet will re-enable ints.
2582 * If we *don't* have something, we'll re-enable before leaving here. */
2583 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2584 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2585
2586 /* Discover which interrupts are active/pending */
2587 inta = iwl_read32(priv, CSR_INT);
2588 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2589
2590 /* Ignore interrupt if there's nothing in NIC to service.
2591 * This may be due to IRQ shared with another device,
2592 * or due to sporadic interrupts thrown from our NIC. */
2593 if (!inta && !inta_fh) {
2594 IWL_DEBUG_ISR(priv,
2595 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2596 goto none;
2597 }
2598
2599 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2600 /* Hardware disappeared. It might have already raised
2601 * an interrupt */
2602 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2603 goto unplugged;
2604 }
2605
2606 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2607 inta, inta_mask, inta_fh);
2608
2609 inta &= ~CSR_INT_BIT_SCD;
2610
2611 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2612 if (likely(inta || inta_fh))
2613 tasklet_schedule(&priv->irq_tasklet);
2614
2615unplugged:
2616 spin_unlock_irqrestore(&priv->lock, flags);
2617 return IRQ_HANDLED;
2618
2619none:
2620 /* re-enable interrupts here since we don't have anything to service. */
2621 /* only Re-enable if disabled by irq */
2622 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2623 iwl_legacy_enable_interrupts(priv);
2624 spin_unlock_irqrestore(&priv->lock, flags);
2625 return IRQ_NONE;
2626}
2627EXPORT_SYMBOL(iwl_legacy_isr);
2628
2629/*
2630 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2631 * function.
2632 */
2633void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2634 struct ieee80211_tx_info *info,
2635 __le16 fc, __le32 *tx_flags)
2636{
2637 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2638 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2639 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2640 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2641
2642 if (!ieee80211_is_mgmt(fc))
2643 return;
2644
2645 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2646 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2647 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2648 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2649 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2650 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2651 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2652 break;
2653 }
2654 } else if (info->control.rates[0].flags &
2655 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2656 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2657 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2658 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2659 }
2660}
2661EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644
index d1271fe07d4b..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ /dev/null
@@ -1,636 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_core_h__
64#define __iwl_legacy_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "in-tree:"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82#define TIME_UNIT 1024
83
84#define IWL_SKU_G 0x1
85#define IWL_SKU_A 0x2
86#define IWL_SKU_N 0x8
87
88#define IWL_CMD(x) case x: return #x
89
90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
95};
96
97struct iwl_hcmd_utils_ops {
98 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
99 u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
100 u8 *data);
101 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
102 void (*post_scan)(struct iwl_priv *priv);
103};
104
105struct iwl_apm_ops {
106 int (*init)(struct iwl_priv *priv);
107 void (*config)(struct iwl_priv *priv);
108};
109
110struct iwl_debugfs_ops {
111 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
112 size_t count, loff_t *ppos);
113 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
114 size_t count, loff_t *ppos);
115 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
116 size_t count, loff_t *ppos);
117};
118
119struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv);
121};
122
123struct iwl_lib_ops {
124 /* set hw dependent parameters */
125 int (*set_hw_params)(struct iwl_priv *priv);
126 /* Handling TX */
127 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
128 struct iwl_tx_queue *txq,
129 u16 byte_cnt);
130 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
131 struct iwl_tx_queue *txq,
132 dma_addr_t addr,
133 u16 len, u8 reset, u8 pad);
134 void (*txq_free_tfd)(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136 int (*txq_init)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 /* setup Rx handler */
139 void (*rx_handler_setup)(struct iwl_priv *priv);
140 /* alive notification after init uCode load */
141 void (*init_alive_start)(struct iwl_priv *priv);
142 /* check validity of rtc data address */
143 int (*is_valid_rtc_data_addr)(u32 addr);
144 /* 1st ucode load */
145 int (*load_ucode)(struct iwl_priv *priv);
146
147 void (*dump_nic_error_log)(struct iwl_priv *priv);
148 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
149 int (*set_channel_switch)(struct iwl_priv *priv,
150 struct ieee80211_channel_switch *ch_switch);
151 /* power management */
152 struct iwl_apm_ops apm_ops;
153
154 /* power */
155 int (*send_tx_power) (struct iwl_priv *priv);
156 void (*update_chain_flags)(struct iwl_priv *priv);
157
158 /* eeprom operations (as defined in iwl-eeprom.h) */
159 struct iwl_eeprom_ops eeprom_ops;
160
161 /* temperature */
162 struct iwl_temp_ops temp_ops;
163
164 struct iwl_debugfs_ops debugfs_ops;
165
166};
167
168struct iwl_led_ops {
169 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
170};
171
172struct iwl_legacy_ops {
173 void (*post_associate)(struct iwl_priv *priv);
174 void (*config_ap)(struct iwl_priv *priv);
175 /* station management */
176 int (*update_bcast_stations)(struct iwl_priv *priv);
177 int (*manage_ibss_station)(struct iwl_priv *priv,
178 struct ieee80211_vif *vif, bool add);
179};
180
181struct iwl_ops {
182 const struct iwl_lib_ops *lib;
183 const struct iwl_hcmd_ops *hcmd;
184 const struct iwl_hcmd_utils_ops *utils;
185 const struct iwl_led_ops *led;
186 const struct iwl_nic_ops *nic;
187 const struct iwl_legacy_ops *legacy;
188 const struct ieee80211_ops *ieee80211_ops;
189};
190
191struct iwl_mod_params {
192 int sw_crypto; /* def: 0 = using hardware encryption */
193 int disable_hw_scan; /* def: 0 = use h/w scan */
194 int num_of_queues; /* def: HW dependent */
195 int disable_11n; /* def: 0 = 11n capabilities enabled */
196 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
197 int antenna; /* def: 0 = both antennas (use diversity) */
198 int restart_fw; /* def: 1 = restart firmware */
199};
200
201/*
202 * @led_compensation: compensate on the led on/off time per HW according
203 * to the deviation to achieve the desired led frequency.
204 * The detail algorithm is described in iwl-led.c
205 * @chain_noise_num_beacons: number of beacons used to compute chain noise
206 * @wd_timeout: TX queues watchdog timeout
207 * @temperature_kelvin: temperature report by uCode in kelvin
208 * @ucode_tracing: support ucode continuous tracing
209 * @sensitivity_calib_by_driver: driver has the capability to perform
210 * sensitivity calibration operation
211 * @chain_noise_calib_by_driver: driver has the capability to perform
212 * chain noise calibration operation
213 */
214struct iwl_base_params {
215 int eeprom_size;
216 int num_of_queues; /* def: HW dependent */
217 int num_of_ampdu_queues;/* def: HW dependent */
218 /* for iwl_legacy_apm_init() */
219 u32 pll_cfg_val;
220 bool set_l0s;
221 bool use_bsm;
222
223 u16 led_compensation;
224 int chain_noise_num_beacons;
225 unsigned int wd_timeout;
226 bool temperature_kelvin;
227 const bool ucode_tracing;
228 const bool sensitivity_calib_by_driver;
229 const bool chain_noise_calib_by_driver;
230};
231
232/**
233 * struct iwl_cfg
234 * @fw_name_pre: Firmware filename prefix. The api version and extension
235 * (.ucode) will be added to filename before loading from disk. The
236 * filename is constructed as fw_name_pre<api>.ucode.
237 * @ucode_api_max: Highest version of uCode API supported by driver.
238 * @ucode_api_min: Lowest version of uCode API supported by driver.
239 * @scan_antennas: available antenna for scan operation
240 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
241 *
242 * We enable the driver to be backward compatible wrt API version. The
243 * driver specifies which APIs it supports (with @ucode_api_max being the
244 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
245 * it has a supported API version. The firmware's API version will be
246 * stored in @iwl_priv, enabling the driver to make runtime changes based
247 * on firmware version used.
248 *
249 * For example,
250 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
251 * Driver interacts with Firmware API version >= 2.
252 * } else {
253 * Driver interacts with Firmware API version 1.
254 * }
255 *
256 * The ideal usage of this infrastructure is to treat a new ucode API
257 * release as a new hardware revision. That is, through utilizing the
258 * iwl_hcmd_utils_ops etc. we accommodate different command structures
259 * and flows between hardware versions as well as their API
260 * versions.
261 *
262 */
263struct iwl_cfg {
264 /* params specific to an individual device within a device family */
265 const char *name;
266 const char *fw_name_pre;
267 const unsigned int ucode_api_max;
268 const unsigned int ucode_api_min;
269 u8 valid_tx_ant;
270 u8 valid_rx_ant;
271 unsigned int sku;
272 u16 eeprom_ver;
273 u16 eeprom_calib_ver;
274 const struct iwl_ops *ops;
275 /* module based parameters which can be set from modprobe cmd */
276 const struct iwl_mod_params *mod_params;
277 /* params not likely to change within a device family */
278 struct iwl_base_params *base_params;
279 /* params likely to change within a device family */
280 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
281 enum iwl_led_mode led_mode;
282};
283
284/***************************
285 * L i b *
286 ***************************/
287
288struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
289int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
290 struct ieee80211_vif *vif, u16 queue,
291 const struct ieee80211_tx_queue_params *params);
292int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
293void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
294 struct iwl_rxon_context *ctx,
295 int hw_decrypt);
296int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
297 struct iwl_rxon_context *ctx);
298int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
299 struct iwl_rxon_context *ctx);
300int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
301 struct ieee80211_channel *ch,
302 struct iwl_rxon_context *ctx);
303void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
304 struct iwl_rxon_context *ctx,
305 enum ieee80211_band band,
306 struct ieee80211_vif *vif);
307u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
308 enum ieee80211_band band);
309void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
310 struct iwl_ht_config *ht_conf);
311bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
312 struct iwl_rxon_context *ctx,
313 struct ieee80211_sta_ht_cap *ht_cap);
314void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
315 struct iwl_rxon_context *ctx);
316void iwl_legacy_set_rate(struct iwl_priv *priv);
317int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
318 struct ieee80211_hdr *hdr,
319 u32 decrypt_res,
320 struct ieee80211_rx_status *stats);
321void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
322int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
323 struct ieee80211_vif *vif);
324void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
325 struct ieee80211_vif *vif);
326int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
327 struct ieee80211_vif *vif,
328 enum nl80211_iftype newtype, bool newp2p);
329int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
330void iwl_legacy_txq_mem(struct iwl_priv *priv);
331
332#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
333int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
334void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
335void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
336void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
337 u16 length, struct ieee80211_hdr *header);
338void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
339 u16 length, struct ieee80211_hdr *header);
340const char *iwl_legacy_get_mgmt_string(int cmd);
341const char *iwl_legacy_get_ctrl_string(int cmd);
342void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
343void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
344 u16 len);
345#else
346static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
347{
348 return 0;
349}
350static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
351{
352}
353static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
354{
355}
356static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
357 u16 length, struct ieee80211_hdr *header)
358{
359}
360static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
361 u16 length, struct ieee80211_hdr *header)
362{
363}
364static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
365 __le16 fc, u16 len)
366{
367}
368#endif
369/*****************************************************
370 * RX handlers.
371 * **************************************************/
372void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
373 struct iwl_rx_mem_buffer *rxb);
374void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
375 struct iwl_rx_mem_buffer *rxb);
376void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
377 struct iwl_rx_mem_buffer *rxb);
378
379/*****************************************************
380* RX
381******************************************************/
382void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
383void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
384int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
385void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
386 struct iwl_rx_queue *q);
387int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
388void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
389 struct iwl_rx_mem_buffer *rxb);
390/* Handlers */
391void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
392 struct iwl_rx_mem_buffer *rxb);
393void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
394 struct iwl_rx_packet *pkt);
395void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
396void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
397
398/* TX helpers */
399
400/*****************************************************
401* TX
402******************************************************/
403void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
404 struct iwl_tx_queue *txq);
405int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
406 int slots_num, u32 txq_id);
407void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
408 struct iwl_tx_queue *txq,
409 int slots_num, u32 txq_id);
410void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
411void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
412void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
413/*****************************************************
414 * TX power
415 ****************************************************/
416int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
417
418/*******************************************************************************
419 * Rate
420 ******************************************************************************/
421
422u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
423 struct iwl_rxon_context *ctx);
424
425/*******************************************************************************
426 * Scanning
427 ******************************************************************************/
428void iwl_legacy_init_scan_params(struct iwl_priv *priv);
429int iwl_legacy_scan_cancel(struct iwl_priv *priv);
430int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
431void iwl_legacy_force_scan_end(struct iwl_priv *priv);
432int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
433 struct ieee80211_vif *vif,
434 struct cfg80211_scan_request *req);
435void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
436int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
437u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
438 struct ieee80211_mgmt *frame,
439 const u8 *ta, const u8 *ie, int ie_len, int left);
440void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
441u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
442 enum ieee80211_band band,
443 u8 n_probes);
444u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
445 enum ieee80211_band band,
446 struct ieee80211_vif *vif);
447void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
448void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
449
450/* For faster active scanning, scan will move to the next channel if fewer than
451 * PLCP_QUIET_THRESH packets are heard on this channel within
452 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
453 * time if it's a quiet channel (nothing responded to our probe, and there's
454 * no other traffic).
455 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
456#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
457#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
458
459#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
460
461/*****************************************************
462 * S e n d i n g H o s t C o m m a n d s *
463 *****************************************************/
464
465const char *iwl_legacy_get_cmd_string(u8 cmd);
466int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
467 struct iwl_host_cmd *cmd);
468int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
469int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
470 u16 len, const void *data);
471int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
472 const void *data,
473 void (*callback)(struct iwl_priv *priv,
474 struct iwl_device_cmd *cmd,
475 struct iwl_rx_packet *pkt));
476
477int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
478
479
480/*****************************************************
481 * PCI *
482 *****************************************************/
483
484static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
485{
486 int pos;
487 u16 pci_lnk_ctl;
488 pos = pci_pcie_cap(priv->pci_dev);
489 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
490 return pci_lnk_ctl;
491}
492
493void iwl_legacy_bg_watchdog(unsigned long data);
494u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
495 u32 usec, u32 beacon_interval);
496__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
497 u32 addon, u32 beacon_interval);
498
499#ifdef CONFIG_PM
500int iwl_legacy_pci_suspend(struct device *device);
501int iwl_legacy_pci_resume(struct device *device);
502extern const struct dev_pm_ops iwl_legacy_pm_ops;
503
504#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
505
506#else /* !CONFIG_PM */
507
508#define IWL_LEGACY_PM_OPS NULL
509
510#endif /* !CONFIG_PM */
511
512/*****************************************************
513* Error Handling Debugging
514******************************************************/
515void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
516#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
517void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
518 struct iwl_rxon_context *ctx);
519#else
520static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
521 struct iwl_rxon_context *ctx)
522{
523}
524#endif
525
526void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
527
528/*****************************************************
529* GEOS
530******************************************************/
531int iwl_legacy_init_geos(struct iwl_priv *priv);
532void iwl_legacy_free_geos(struct iwl_priv *priv);
533
534/*************** DRIVER STATUS FUNCTIONS *****/
535
536#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
537/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
538#define STATUS_INT_ENABLED 2
539#define STATUS_RF_KILL_HW 3
540#define STATUS_CT_KILL 4
541#define STATUS_INIT 5
542#define STATUS_ALIVE 6
543#define STATUS_READY 7
544#define STATUS_TEMPERATURE 8
545#define STATUS_GEO_CONFIGURED 9
546#define STATUS_EXIT_PENDING 10
547#define STATUS_STATISTICS 12
548#define STATUS_SCANNING 13
549#define STATUS_SCAN_ABORTING 14
550#define STATUS_SCAN_HW 15
551#define STATUS_POWER_PMI 16
552#define STATUS_FW_ERROR 17
553#define STATUS_CHANNEL_SWITCH_PENDING 18
554
555static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
556{
557 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
558 * set but EXIT_PENDING is not */
559 return test_bit(STATUS_READY, &priv->status) &&
560 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
561 !test_bit(STATUS_EXIT_PENDING, &priv->status);
562}
563
564static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
565{
566 return test_bit(STATUS_ALIVE, &priv->status);
567}
568
569static inline int iwl_legacy_is_init(struct iwl_priv *priv)
570{
571 return test_bit(STATUS_INIT, &priv->status);
572}
573
574static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
575{
576 return test_bit(STATUS_RF_KILL_HW, &priv->status);
577}
578
579static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
580{
581 return iwl_legacy_is_rfkill_hw(priv);
582}
583
584static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
585{
586 return test_bit(STATUS_CT_KILL, &priv->status);
587}
588
589static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
590{
591
592 if (iwl_legacy_is_rfkill(priv))
593 return 0;
594
595 return iwl_legacy_is_ready(priv);
596}
597
598extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
599extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
600 u8 flags, bool clear);
601void iwl_legacy_apm_stop(struct iwl_priv *priv);
602int iwl_legacy_apm_init(struct iwl_priv *priv);
603
604int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
605 struct iwl_rxon_context *ctx);
606static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
607 struct iwl_rxon_context *ctx)
608{
609 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
610}
611static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
612 struct iwl_rxon_context *ctx)
613{
614 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
615}
616static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
617 struct iwl_priv *priv, enum ieee80211_band band)
618{
619 return priv->hw->wiphy->bands[band];
620}
621
622/* mac80211 handlers */
623int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
624void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
625 struct ieee80211_vif *vif);
626void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
627 struct ieee80211_vif *vif,
628 struct ieee80211_bss_conf *bss_conf,
629 u32 changes);
630void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
631 struct ieee80211_tx_info *info,
632 __le16 fc, __le32 *tx_flags);
633
634irqreturn_t iwl_legacy_isr(int irq, void *data);
635
636#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644
index ae13112701bf..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debug.h
+++ /dev/null
@@ -1,198 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_debug_h__
30#define __iwl_legacy_debug_h__
31
32struct iwl_priv;
33extern u32 iwlegacy_debug_level;
34
35#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
39
40#define iwl_print_hex_error(priv, p, len) \
41do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0)
45
46#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_legacy_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
64do { \
65 if (iwl_legacy_get_debug_level(priv) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0)
69
70#else
71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
77
78#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
79int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int
83iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
84{
85 return 0;
86}
87static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
88{
89}
90#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
91
92/*
93 * To use the debug system:
94 *
95 * If you are defining a new debug classification, simply add it to the #define
96 * list here in the form of
97 *
98 * #define IWL_DL_xxxx VALUE
99 *
100 * where xxxx should be the name of the classification (for example, WEP).
101 *
102 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
103 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
104 * to send output to that classification.
105 *
106 * The active debug levels can be accessed via files
107 *
108 * /sys/module/iwl4965/parameters/debug{50}
109 * /sys/module/iwl3945/parameters/debug
110 * /sys/class/net/wlan0/device/debug_level
111 *
112 * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
113 */
114
115/* 0x0000000F - 0x00000001 */
116#define IWL_DL_INFO (1 << 0)
117#define IWL_DL_MAC80211 (1 << 1)
118#define IWL_DL_HCMD (1 << 2)
119#define IWL_DL_STATE (1 << 3)
120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
124#define IWL_DL_RADIO (1 << 7)
125/* 0x00000F00 - 0x00000100 */
126#define IWL_DL_POWER (1 << 8)
127#define IWL_DL_TEMP (1 << 9)
128#define IWL_DL_NOTIF (1 << 10)
129#define IWL_DL_SCAN (1 << 11)
130/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC (1 << 12)
132#define IWL_DL_DROP (1 << 13)
133#define IWL_DL_TXPOWER (1 << 14)
134#define IWL_DL_AP (1 << 15)
135/* 0x000F0000 - 0x00010000 */
136#define IWL_DL_FW (1 << 16)
137#define IWL_DL_RF_KILL (1 << 17)
138#define IWL_DL_FW_ERRORS (1 << 18)
139#define IWL_DL_LED (1 << 19)
140/* 0x00F00000 - 0x00100000 */
141#define IWL_DL_RATE (1 << 20)
142#define IWL_DL_CALIB (1 << 21)
143#define IWL_DL_WEP (1 << 22)
144#define IWL_DL_TX (1 << 23)
145/* 0x0F000000 - 0x01000000 */
146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29)
153#define IWL_DL_TX_REPLY (1 << 30)
154#define IWL_DL_QOS (1 << 31)
155
156#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
157#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
158#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
159#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
160#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
161#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
162#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
163#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
164#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
172#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
173#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
181#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
182#define IWL_DEBUG_ASSOC(p, f, a...) \
183 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
185 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
186#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
187#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
188#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
189 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
191#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
192 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
194#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
195#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
196#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
197
198#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644
index 1407dca70def..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ /dev/null
@@ -1,1314 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <linux/export.h>
30#include <net/mac80211.h>
31
32
33#include "iwl-dev.h"
34#include "iwl-debug.h"
35#include "iwl-core.h"
36#include "iwl-io.h"
37
38/* create and remove of files */
39#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
40 if (!debugfs_create_file(#name, mode, parent, priv, \
41 &iwl_legacy_dbgfs_##name##_ops)) \
42 goto err; \
43} while (0)
44
45#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
46 struct dentry *__tmp; \
47 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
48 parent, ptr); \
49 if (IS_ERR(__tmp) || !__tmp) \
50 goto err; \
51} while (0)
52
53#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
54 struct dentry *__tmp; \
55 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
56 parent, ptr); \
57 if (IS_ERR(__tmp) || !__tmp) \
58 goto err; \
59} while (0)
60
61/* file operation */
62#define DEBUGFS_READ_FUNC(name) \
63static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
64 char __user *user_buf, \
65 size_t count, loff_t *ppos);
66
67#define DEBUGFS_WRITE_FUNC(name) \
68static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
69 const char __user *user_buf, \
70 size_t count, loff_t *ppos);
71
72
73static int
74iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
75{
76 file->private_data = inode->i_private;
77 return 0;
78}
79
80#define DEBUGFS_READ_FILE_OPS(name) \
81 DEBUGFS_READ_FUNC(name); \
82static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
83 .read = iwl_legacy_dbgfs_##name##_read, \
84 .open = iwl_legacy_dbgfs_open_file_generic, \
85 .llseek = generic_file_llseek, \
86};
87
88#define DEBUGFS_WRITE_FILE_OPS(name) \
89 DEBUGFS_WRITE_FUNC(name); \
90static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
91 .write = iwl_legacy_dbgfs_##name##_write, \
92 .open = iwl_legacy_dbgfs_open_file_generic, \
93 .llseek = generic_file_llseek, \
94};
95
96#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
97 DEBUGFS_READ_FUNC(name); \
98 DEBUGFS_WRITE_FUNC(name); \
99static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
100 .write = iwl_legacy_dbgfs_##name##_write, \
101 .read = iwl_legacy_dbgfs_##name##_read, \
102 .open = iwl_legacy_dbgfs_open_file_generic, \
103 .llseek = generic_file_llseek, \
104};
105
106static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
107 char __user *user_buf,
108 size_t count, loff_t *ppos) {
109
110 struct iwl_priv *priv = file->private_data;
111 char *buf;
112 int pos = 0;
113
114 int cnt;
115 ssize_t ret;
116 const size_t bufsz = 100 +
117 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
118 buf = kzalloc(bufsz, GFP_KERNEL);
119 if (!buf)
120 return -ENOMEM;
121 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
122 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
123 pos += scnprintf(buf + pos, bufsz - pos,
124 "\t%25s\t\t: %u\n",
125 iwl_legacy_get_mgmt_string(cnt),
126 priv->tx_stats.mgmt[cnt]);
127 }
128 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
129 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
130 pos += scnprintf(buf + pos, bufsz - pos,
131 "\t%25s\t\t: %u\n",
132 iwl_legacy_get_ctrl_string(cnt),
133 priv->tx_stats.ctrl[cnt]);
134 }
135 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
136 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
137 priv->tx_stats.data_cnt);
138 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
139 priv->tx_stats.data_bytes);
140 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
141 kfree(buf);
142 return ret;
143}
144
145static ssize_t
146iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
147 const char __user *user_buf,
148 size_t count, loff_t *ppos)
149{
150 struct iwl_priv *priv = file->private_data;
151 u32 clear_flag;
152 char buf[8];
153 int buf_size;
154
155 memset(buf, 0, sizeof(buf));
156 buf_size = min(count, sizeof(buf) - 1);
157 if (copy_from_user(buf, user_buf, buf_size))
158 return -EFAULT;
159 if (sscanf(buf, "%x", &clear_flag) != 1)
160 return -EFAULT;
161 iwl_legacy_clear_traffic_stats(priv);
162
163 return count;
164}
165
166static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
167 char __user *user_buf,
168 size_t count, loff_t *ppos) {
169
170 struct iwl_priv *priv = file->private_data;
171 char *buf;
172 int pos = 0;
173 int cnt;
174 ssize_t ret;
175 const size_t bufsz = 100 +
176 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
177 buf = kzalloc(bufsz, GFP_KERNEL);
178 if (!buf)
179 return -ENOMEM;
180
181 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
182 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
183 pos += scnprintf(buf + pos, bufsz - pos,
184 "\t%25s\t\t: %u\n",
185 iwl_legacy_get_mgmt_string(cnt),
186 priv->rx_stats.mgmt[cnt]);
187 }
188 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
189 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
190 pos += scnprintf(buf + pos, bufsz - pos,
191 "\t%25s\t\t: %u\n",
192 iwl_legacy_get_ctrl_string(cnt),
193 priv->rx_stats.ctrl[cnt]);
194 }
195 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
196 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
197 priv->rx_stats.data_cnt);
198 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
199 priv->rx_stats.data_bytes);
200
201 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
202 kfree(buf);
203 return ret;
204}
205
206#define BYTE1_MASK 0x000000ff;
207#define BYTE2_MASK 0x0000ffff;
208#define BYTE3_MASK 0x00ffffff;
209static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
210 char __user *user_buf,
211 size_t count, loff_t *ppos)
212{
213 u32 val;
214 char *buf;
215 ssize_t ret;
216 int i;
217 int pos = 0;
218 struct iwl_priv *priv = file->private_data;
219 size_t bufsz;
220
221 /* default is to dump the entire data segment */
222 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
223 priv->dbgfs_sram_offset = 0x800000;
224 if (priv->ucode_type == UCODE_INIT)
225 priv->dbgfs_sram_len = priv->ucode_init_data.len;
226 else
227 priv->dbgfs_sram_len = priv->ucode_data.len;
228 }
229 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
230 buf = kmalloc(bufsz, GFP_KERNEL);
231 if (!buf)
232 return -ENOMEM;
233 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
234 priv->dbgfs_sram_len);
235 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
236 priv->dbgfs_sram_offset);
237 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
238 val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
239 priv->dbgfs_sram_len - i);
240 if (i < 4) {
241 switch (i) {
242 case 1:
243 val &= BYTE1_MASK;
244 break;
245 case 2:
246 val &= BYTE2_MASK;
247 break;
248 case 3:
249 val &= BYTE3_MASK;
250 break;
251 }
252 }
253 if (!(i % 16))
254 pos += scnprintf(buf + pos, bufsz - pos, "\n");
255 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
256 }
257 pos += scnprintf(buf + pos, bufsz - pos, "\n");
258
259 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
260 kfree(buf);
261 return ret;
262}
263
264static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
265 const char __user *user_buf,
266 size_t count, loff_t *ppos)
267{
268 struct iwl_priv *priv = file->private_data;
269 char buf[64];
270 int buf_size;
271 u32 offset, len;
272
273 memset(buf, 0, sizeof(buf));
274 buf_size = min(count, sizeof(buf) - 1);
275 if (copy_from_user(buf, user_buf, buf_size))
276 return -EFAULT;
277
278 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
279 priv->dbgfs_sram_offset = offset;
280 priv->dbgfs_sram_len = len;
281 } else {
282 priv->dbgfs_sram_offset = 0;
283 priv->dbgfs_sram_len = 0;
284 }
285
286 return count;
287}
288
289static ssize_t
290iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
291 size_t count, loff_t *ppos)
292{
293 struct iwl_priv *priv = file->private_data;
294 struct iwl_station_entry *station;
295 int max_sta = priv->hw_params.max_stations;
296 char *buf;
297 int i, j, pos = 0;
298 ssize_t ret;
299 /* Add 30 for initial string */
300 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
301
302 buf = kmalloc(bufsz, GFP_KERNEL);
303 if (!buf)
304 return -ENOMEM;
305
306 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
307 priv->num_stations);
308
309 for (i = 0; i < max_sta; i++) {
310 station = &priv->stations[i];
311 if (!station->used)
312 continue;
313 pos += scnprintf(buf + pos, bufsz - pos,
314 "station %d - addr: %pM, flags: %#x\n",
315 i, station->sta.sta.addr,
316 station->sta.station_flags_msk);
317 pos += scnprintf(buf + pos, bufsz - pos,
318 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
319 pos += scnprintf(buf + pos, bufsz - pos,
320 "start_idx\tbitmap\t\t\trate_n_flags\n");
321
322 for (j = 0; j < MAX_TID_COUNT; j++) {
323 pos += scnprintf(buf + pos, bufsz - pos,
324 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
325 j, station->tid[j].seq_number,
326 station->tid[j].agg.txq_id,
327 station->tid[j].agg.frame_count,
328 station->tid[j].tfds_in_queue,
329 station->tid[j].agg.start_idx,
330 station->tid[j].agg.bitmap,
331 station->tid[j].agg.rate_n_flags);
332
333 if (station->tid[j].agg.wait_for_ba)
334 pos += scnprintf(buf + pos, bufsz - pos,
335 " - waitforba");
336 pos += scnprintf(buf + pos, bufsz - pos, "\n");
337 }
338
339 pos += scnprintf(buf + pos, bufsz - pos, "\n");
340 }
341
342 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
343 kfree(buf);
344 return ret;
345}
346
347static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
348 char __user *user_buf,
349 size_t count,
350 loff_t *ppos)
351{
352 ssize_t ret;
353 struct iwl_priv *priv = file->private_data;
354 int pos = 0, ofs = 0, buf_size = 0;
355 const u8 *ptr;
356 char *buf;
357 u16 eeprom_ver;
358 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
359 buf_size = 4 * eeprom_len + 256;
360
361 if (eeprom_len % 16) {
362 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
363 return -ENODATA;
364 }
365
366 ptr = priv->eeprom;
367 if (!ptr) {
368 IWL_ERR(priv, "Invalid EEPROM memory\n");
369 return -ENOMEM;
370 }
371
372 /* 4 characters for byte 0xYY */
373 buf = kzalloc(buf_size, GFP_KERNEL);
374 if (!buf) {
375 IWL_ERR(priv, "Can not allocate Buffer\n");
376 return -ENOMEM;
377 }
378 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
379 pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
380 "version: 0x%x\n", eeprom_ver);
381 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
382 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
383 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
384 buf_size - pos, 0);
385 pos += strlen(buf + pos);
386 if (buf_size - pos > 0)
387 buf[pos++] = '\n';
388 }
389
390 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
391 kfree(buf);
392 return ret;
393}
394
395static ssize_t
396iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
397 size_t count, loff_t *ppos)
398{
399 struct iwl_priv *priv = file->private_data;
400 struct ieee80211_channel *channels = NULL;
401 const struct ieee80211_supported_band *supp_band = NULL;
402 int pos = 0, i, bufsz = PAGE_SIZE;
403 char *buf;
404 ssize_t ret;
405
406 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
407 return -EAGAIN;
408
409 buf = kzalloc(bufsz, GFP_KERNEL);
410 if (!buf) {
411 IWL_ERR(priv, "Can not allocate Buffer\n");
412 return -ENOMEM;
413 }
414
415 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
416 if (supp_band) {
417 channels = supp_band->channels;
418
419 pos += scnprintf(buf + pos, bufsz - pos,
420 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
421 supp_band->n_channels);
422
423 for (i = 0; i < supp_band->n_channels; i++)
424 pos += scnprintf(buf + pos, bufsz - pos,
425 "%d: %ddBm: BSS%s%s, %s.\n",
426 channels[i].hw_value,
427 channels[i].max_power,
428 channels[i].flags & IEEE80211_CHAN_RADAR ?
429 " (IEEE 802.11h required)" : "",
430 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
431 || (channels[i].flags &
432 IEEE80211_CHAN_RADAR)) ? "" :
433 ", IBSS",
434 channels[i].flags &
435 IEEE80211_CHAN_PASSIVE_SCAN ?
436 "passive only" : "active/passive");
437 }
438 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
439 if (supp_band) {
440 channels = supp_band->channels;
441
442 pos += scnprintf(buf + pos, bufsz - pos,
443 "Displaying %d channels in 5.2GHz band (802.11a)\n",
444 supp_band->n_channels);
445
446 for (i = 0; i < supp_band->n_channels; i++)
447 pos += scnprintf(buf + pos, bufsz - pos,
448 "%d: %ddBm: BSS%s%s, %s.\n",
449 channels[i].hw_value,
450 channels[i].max_power,
451 channels[i].flags & IEEE80211_CHAN_RADAR ?
452 " (IEEE 802.11h required)" : "",
453 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
454 || (channels[i].flags &
455 IEEE80211_CHAN_RADAR)) ? "" :
456 ", IBSS",
457 channels[i].flags &
458 IEEE80211_CHAN_PASSIVE_SCAN ?
459 "passive only" : "active/passive");
460 }
461 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
462 kfree(buf);
463 return ret;
464}
465
466static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
467 char __user *user_buf,
468 size_t count, loff_t *ppos) {
469
470 struct iwl_priv *priv = file->private_data;
471 char buf[512];
472 int pos = 0;
473 const size_t bufsz = sizeof(buf);
474
475 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
476 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
477 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
478 test_bit(STATUS_INT_ENABLED, &priv->status));
479 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
480 test_bit(STATUS_RF_KILL_HW, &priv->status));
481 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
482 test_bit(STATUS_CT_KILL, &priv->status));
483 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
484 test_bit(STATUS_INIT, &priv->status));
485 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
486 test_bit(STATUS_ALIVE, &priv->status));
487 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
488 test_bit(STATUS_READY, &priv->status));
489 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
490 test_bit(STATUS_TEMPERATURE, &priv->status));
491 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
492 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
493 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
494 test_bit(STATUS_EXIT_PENDING, &priv->status));
495 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
496 test_bit(STATUS_STATISTICS, &priv->status));
497 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
498 test_bit(STATUS_SCANNING, &priv->status));
499 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
500 test_bit(STATUS_SCAN_ABORTING, &priv->status));
501 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
502 test_bit(STATUS_SCAN_HW, &priv->status));
503 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
504 test_bit(STATUS_POWER_PMI, &priv->status));
505 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
506 test_bit(STATUS_FW_ERROR, &priv->status));
507 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
508}
509
510static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
511 char __user *user_buf,
512 size_t count, loff_t *ppos) {
513
514 struct iwl_priv *priv = file->private_data;
515 int pos = 0;
516 int cnt = 0;
517 char *buf;
518 int bufsz = 24 * 64; /* 24 items * 64 char per item */
519 ssize_t ret;
520
521 buf = kzalloc(bufsz, GFP_KERNEL);
522 if (!buf) {
523 IWL_ERR(priv, "Can not allocate Buffer\n");
524 return -ENOMEM;
525 }
526
527 pos += scnprintf(buf + pos, bufsz - pos,
528 "Interrupt Statistics Report:\n");
529
530 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
531 priv->isr_stats.hw);
532 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
533 priv->isr_stats.sw);
534 if (priv->isr_stats.sw || priv->isr_stats.hw) {
535 pos += scnprintf(buf + pos, bufsz - pos,
536 "\tLast Restarting Code: 0x%X\n",
537 priv->isr_stats.err_code);
538 }
539#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
540 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
541 priv->isr_stats.sch);
542 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
543 priv->isr_stats.alive);
544#endif
545 pos += scnprintf(buf + pos, bufsz - pos,
546 "HW RF KILL switch toggled:\t %u\n",
547 priv->isr_stats.rfkill);
548
549 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
550 priv->isr_stats.ctkill);
551
552 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
553 priv->isr_stats.wakeup);
554
555 pos += scnprintf(buf + pos, bufsz - pos,
556 "Rx command responses:\t\t %u\n",
557 priv->isr_stats.rx);
558 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
559 if (priv->isr_stats.rx_handlers[cnt] > 0)
560 pos += scnprintf(buf + pos, bufsz - pos,
561 "\tRx handler[%36s]:\t\t %u\n",
562 iwl_legacy_get_cmd_string(cnt),
563 priv->isr_stats.rx_handlers[cnt]);
564 }
565
566 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
567 priv->isr_stats.tx);
568
569 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
570 priv->isr_stats.unhandled);
571
572 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
573 kfree(buf);
574 return ret;
575}
576
577static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
578 const char __user *user_buf,
579 size_t count, loff_t *ppos)
580{
581 struct iwl_priv *priv = file->private_data;
582 char buf[8];
583 int buf_size;
584 u32 reset_flag;
585
586 memset(buf, 0, sizeof(buf));
587 buf_size = min(count, sizeof(buf) - 1);
588 if (copy_from_user(buf, user_buf, buf_size))
589 return -EFAULT;
590 if (sscanf(buf, "%x", &reset_flag) != 1)
591 return -EFAULT;
592 if (reset_flag == 0)
593 iwl_legacy_clear_isr_stats(priv);
594
595 return count;
596}
597
598static ssize_t
599iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
600 size_t count, loff_t *ppos)
601{
602 struct iwl_priv *priv = file->private_data;
603 struct iwl_rxon_context *ctx;
604 int pos = 0, i;
605 char buf[256 * NUM_IWL_RXON_CTX];
606 const size_t bufsz = sizeof(buf);
607
608 for_each_context(priv, ctx) {
609 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
610 ctx->ctxid);
611 for (i = 0; i < AC_NUM; i++) {
612 pos += scnprintf(buf + pos, bufsz - pos,
613 "\tcw_min\tcw_max\taifsn\ttxop\n");
614 pos += scnprintf(buf + pos, bufsz - pos,
615 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
616 ctx->qos_data.def_qos_parm.ac[i].cw_min,
617 ctx->qos_data.def_qos_parm.ac[i].cw_max,
618 ctx->qos_data.def_qos_parm.ac[i].aifsn,
619 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
620 }
621 pos += scnprintf(buf + pos, bufsz - pos, "\n");
622 }
623 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
624}
625
626static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
627 const char __user *user_buf,
628 size_t count, loff_t *ppos)
629{
630 struct iwl_priv *priv = file->private_data;
631 char buf[8];
632 int buf_size;
633 int ht40;
634
635 memset(buf, 0, sizeof(buf));
636 buf_size = min(count, sizeof(buf) - 1);
637 if (copy_from_user(buf, user_buf, buf_size))
638 return -EFAULT;
639 if (sscanf(buf, "%d", &ht40) != 1)
640 return -EFAULT;
641 if (!iwl_legacy_is_any_associated(priv))
642 priv->disable_ht40 = ht40 ? true : false;
643 else {
644 IWL_ERR(priv, "Sta associated with AP - "
645 "Change to 40MHz channel support is not allowed\n");
646 return -EINVAL;
647 }
648
649 return count;
650}
651
652static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
653 char __user *user_buf,
654 size_t count, loff_t *ppos)
655{
656 struct iwl_priv *priv = file->private_data;
657 char buf[100];
658 int pos = 0;
659 const size_t bufsz = sizeof(buf);
660
661 pos += scnprintf(buf + pos, bufsz - pos,
662 "11n 40MHz Mode: %s\n",
663 priv->disable_ht40 ? "Disabled" : "Enabled");
664 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
665}
666
667DEBUGFS_READ_WRITE_FILE_OPS(sram);
668DEBUGFS_READ_FILE_OPS(nvm);
669DEBUGFS_READ_FILE_OPS(stations);
670DEBUGFS_READ_FILE_OPS(channels);
671DEBUGFS_READ_FILE_OPS(status);
672DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
673DEBUGFS_READ_FILE_OPS(qos);
674DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
675
676static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
679{
680 struct iwl_priv *priv = file->private_data;
681 int pos = 0, ofs = 0;
682 int cnt = 0, entry;
683 struct iwl_tx_queue *txq;
684 struct iwl_queue *q;
685 struct iwl_rx_queue *rxq = &priv->rxq;
686 char *buf;
687 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
688 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
689 const u8 *ptr;
690 ssize_t ret;
691
692 if (!priv->txq) {
693 IWL_ERR(priv, "txq not ready\n");
694 return -EAGAIN;
695 }
696 buf = kzalloc(bufsz, GFP_KERNEL);
697 if (!buf) {
698 IWL_ERR(priv, "Can not allocate buffer\n");
699 return -ENOMEM;
700 }
701 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
702 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
703 txq = &priv->txq[cnt];
704 q = &txq->q;
705 pos += scnprintf(buf + pos, bufsz - pos,
706 "q[%d]: read_ptr: %u, write_ptr: %u\n",
707 cnt, q->read_ptr, q->write_ptr);
708 }
709 if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
710 ptr = priv->tx_traffic;
711 pos += scnprintf(buf + pos, bufsz - pos,
712 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
713 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
714 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
715 entry++, ofs += 16) {
716 pos += scnprintf(buf + pos, bufsz - pos,
717 "0x%.4x ", ofs);
718 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
719 buf + pos, bufsz - pos, 0);
720 pos += strlen(buf + pos);
721 if (bufsz - pos > 0)
722 buf[pos++] = '\n';
723 }
724 }
725 }
726
727 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
728 pos += scnprintf(buf + pos, bufsz - pos,
729 "read: %u, write: %u\n",
730 rxq->read, rxq->write);
731
732 if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
733 ptr = priv->rx_traffic;
734 pos += scnprintf(buf + pos, bufsz - pos,
735 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
736 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
737 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
738 entry++, ofs += 16) {
739 pos += scnprintf(buf + pos, bufsz - pos,
740 "0x%.4x ", ofs);
741 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
742 buf + pos, bufsz - pos, 0);
743 pos += strlen(buf + pos);
744 if (bufsz - pos > 0)
745 buf[pos++] = '\n';
746 }
747 }
748 }
749
750 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
751 kfree(buf);
752 return ret;
753}
754
755static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
756 const char __user *user_buf,
757 size_t count, loff_t *ppos)
758{
759 struct iwl_priv *priv = file->private_data;
760 char buf[8];
761 int buf_size;
762 int traffic_log;
763
764 memset(buf, 0, sizeof(buf));
765 buf_size = min(count, sizeof(buf) - 1);
766 if (copy_from_user(buf, user_buf, buf_size))
767 return -EFAULT;
768 if (sscanf(buf, "%d", &traffic_log) != 1)
769 return -EFAULT;
770 if (traffic_log == 0)
771 iwl_legacy_reset_traffic_log(priv);
772
773 return count;
774}
775
776static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
777 char __user *user_buf,
778 size_t count, loff_t *ppos) {
779
780 struct iwl_priv *priv = file->private_data;
781 struct iwl_tx_queue *txq;
782 struct iwl_queue *q;
783 char *buf;
784 int pos = 0;
785 int cnt;
786 int ret;
787 const size_t bufsz = sizeof(char) * 64 *
788 priv->cfg->base_params->num_of_queues;
789
790 if (!priv->txq) {
791 IWL_ERR(priv, "txq not ready\n");
792 return -EAGAIN;
793 }
794 buf = kzalloc(bufsz, GFP_KERNEL);
795 if (!buf)
796 return -ENOMEM;
797
798 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
799 txq = &priv->txq[cnt];
800 q = &txq->q;
801 pos += scnprintf(buf + pos, bufsz - pos,
802 "hwq %.2d: read=%u write=%u stop=%d"
803 " swq_id=%#.2x (ac %d/hwq %d)\n",
804 cnt, q->read_ptr, q->write_ptr,
805 !!test_bit(cnt, priv->queue_stopped),
806 txq->swq_id, txq->swq_id & 3,
807 (txq->swq_id >> 2) & 0x1f);
808 if (cnt >= 4)
809 continue;
810 /* for the ACs, display the stop count too */
811 pos += scnprintf(buf + pos, bufsz - pos,
812 " stop-count: %d\n",
813 atomic_read(&priv->queue_stop_count[cnt]));
814 }
815 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
816 kfree(buf);
817 return ret;
818}
819
820static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
821 char __user *user_buf,
822 size_t count, loff_t *ppos) {
823
824 struct iwl_priv *priv = file->private_data;
825 struct iwl_rx_queue *rxq = &priv->rxq;
826 char buf[256];
827 int pos = 0;
828 const size_t bufsz = sizeof(buf);
829
830 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
831 rxq->read);
832 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
833 rxq->write);
834 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
835 rxq->free_count);
836 if (rxq->rb_stts) {
837 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
838 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
839 } else {
840 pos += scnprintf(buf + pos, bufsz - pos,
841 "closed_rb_num: Not Allocated\n");
842 }
843 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
844}
845
846static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
847 char __user *user_buf,
848 size_t count, loff_t *ppos)
849{
850 struct iwl_priv *priv = file->private_data;
851 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
852 user_buf, count, ppos);
853}
854
855static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
856 char __user *user_buf,
857 size_t count, loff_t *ppos)
858{
859 struct iwl_priv *priv = file->private_data;
860 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
861 user_buf, count, ppos);
862}
863
864static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
865 char __user *user_buf,
866 size_t count, loff_t *ppos)
867{
868 struct iwl_priv *priv = file->private_data;
869 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
870 user_buf, count, ppos);
871}
872
873static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
874 char __user *user_buf,
875 size_t count, loff_t *ppos) {
876
877 struct iwl_priv *priv = file->private_data;
878 int pos = 0;
879 int cnt = 0;
880 char *buf;
881 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
882 ssize_t ret;
883 struct iwl_sensitivity_data *data;
884
885 data = &priv->sensitivity_data;
886 buf = kzalloc(bufsz, GFP_KERNEL);
887 if (!buf) {
888 IWL_ERR(priv, "Can not allocate Buffer\n");
889 return -ENOMEM;
890 }
891
892 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
893 data->auto_corr_ofdm);
894 pos += scnprintf(buf + pos, bufsz - pos,
895 "auto_corr_ofdm_mrc:\t\t %u\n",
896 data->auto_corr_ofdm_mrc);
897 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
898 data->auto_corr_ofdm_x1);
899 pos += scnprintf(buf + pos, bufsz - pos,
900 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
901 data->auto_corr_ofdm_mrc_x1);
902 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
903 data->auto_corr_cck);
904 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
905 data->auto_corr_cck_mrc);
906 pos += scnprintf(buf + pos, bufsz - pos,
907 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
908 data->last_bad_plcp_cnt_ofdm);
909 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
910 data->last_fa_cnt_ofdm);
911 pos += scnprintf(buf + pos, bufsz - pos,
912 "last_bad_plcp_cnt_cck:\t\t %u\n",
913 data->last_bad_plcp_cnt_cck);
914 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
915 data->last_fa_cnt_cck);
916 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
917 data->nrg_curr_state);
918 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
919 data->nrg_prev_state);
920 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
921 for (cnt = 0; cnt < 10; cnt++) {
922 pos += scnprintf(buf + pos, bufsz - pos, " %u",
923 data->nrg_value[cnt]);
924 }
925 pos += scnprintf(buf + pos, bufsz - pos, "\n");
926 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
927 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
928 pos += scnprintf(buf + pos, bufsz - pos, " %u",
929 data->nrg_silence_rssi[cnt]);
930 }
931 pos += scnprintf(buf + pos, bufsz - pos, "\n");
932 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
933 data->nrg_silence_ref);
934 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
935 data->nrg_energy_idx);
936 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
937 data->nrg_silence_idx);
938 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
939 data->nrg_th_cck);
940 pos += scnprintf(buf + pos, bufsz - pos,
941 "nrg_auto_corr_silence_diff:\t %u\n",
942 data->nrg_auto_corr_silence_diff);
943 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
944 data->num_in_cck_no_fa);
945 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
946 data->nrg_th_ofdm);
947
948 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
949 kfree(buf);
950 return ret;
951}
952
953
954static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
955 char __user *user_buf,
956 size_t count, loff_t *ppos) {
957
958 struct iwl_priv *priv = file->private_data;
959 int pos = 0;
960 int cnt = 0;
961 char *buf;
962 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
963 ssize_t ret;
964 struct iwl_chain_noise_data *data;
965
966 data = &priv->chain_noise_data;
967 buf = kzalloc(bufsz, GFP_KERNEL);
968 if (!buf) {
969 IWL_ERR(priv, "Can not allocate Buffer\n");
970 return -ENOMEM;
971 }
972
973 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
974 data->active_chains);
975 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
976 data->chain_noise_a);
977 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
978 data->chain_noise_b);
979 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
980 data->chain_noise_c);
981 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
982 data->chain_signal_a);
983 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
984 data->chain_signal_b);
985 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
986 data->chain_signal_c);
987 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
988 data->beacon_count);
989
990 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
991 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
992 pos += scnprintf(buf + pos, bufsz - pos, " %u",
993 data->disconn_array[cnt]);
994 }
995 pos += scnprintf(buf + pos, bufsz - pos, "\n");
996 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
997 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
998 pos += scnprintf(buf + pos, bufsz - pos, " %u",
999 data->delta_gain_code[cnt]);
1000 }
1001 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1002 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1003 data->radio_write);
1004 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1005 data->state);
1006
1007 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1008 kfree(buf);
1009 return ret;
1010}
1011
1012static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
1013 char __user *user_buf,
1014 size_t count, loff_t *ppos)
1015{
1016 struct iwl_priv *priv = file->private_data;
1017 char buf[60];
1018 int pos = 0;
1019 const size_t bufsz = sizeof(buf);
1020 u32 pwrsave_status;
1021
1022 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1023 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1024
1025 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1026 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1027 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1028 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1029 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1030 "error");
1031
1032 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1033}
1034
1035static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
1036 const char __user *user_buf,
1037 size_t count, loff_t *ppos)
1038{
1039 struct iwl_priv *priv = file->private_data;
1040 char buf[8];
1041 int buf_size;
1042 int clear;
1043
1044 memset(buf, 0, sizeof(buf));
1045 buf_size = min(count, sizeof(buf) - 1);
1046 if (copy_from_user(buf, user_buf, buf_size))
1047 return -EFAULT;
1048 if (sscanf(buf, "%d", &clear) != 1)
1049 return -EFAULT;
1050
1051 /* make request to uCode to retrieve statistics information */
1052 mutex_lock(&priv->mutex);
1053 iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
1054 mutex_unlock(&priv->mutex);
1055
1056 return count;
1057}
1058
1059static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
1060 char __user *user_buf,
1061 size_t count, loff_t *ppos) {
1062
1063 struct iwl_priv *priv = file->private_data;
1064 int len = 0;
1065 char buf[20];
1066
1067 len = sprintf(buf, "0x%04X\n",
1068 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1069 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1070}
1071
1072static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
1073 char __user *user_buf,
1074 size_t count, loff_t *ppos) {
1075
1076 struct iwl_priv *priv = file->private_data;
1077 int len = 0;
1078 char buf[20];
1079
1080 len = sprintf(buf, "0x%04X\n",
1081 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1082 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1083}
1084
1085static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
1086 char __user *user_buf,
1087 size_t count, loff_t *ppos)
1088{
1089 struct iwl_priv *priv = file->private_data;
1090 char *buf;
1091 int pos = 0;
1092 ssize_t ret = -EFAULT;
1093
1094 if (priv->cfg->ops->lib->dump_fh) {
1095 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
1096 if (buf) {
1097 ret = simple_read_from_buffer(user_buf,
1098 count, ppos, buf, pos);
1099 kfree(buf);
1100 }
1101 }
1102
1103 return ret;
1104}
1105
1106static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
1107 char __user *user_buf,
1108 size_t count, loff_t *ppos) {
1109
1110 struct iwl_priv *priv = file->private_data;
1111 int pos = 0;
1112 char buf[12];
1113 const size_t bufsz = sizeof(buf);
1114
1115 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1116 priv->missed_beacon_threshold);
1117
1118 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1119}
1120
1121static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
1122 const char __user *user_buf,
1123 size_t count, loff_t *ppos)
1124{
1125 struct iwl_priv *priv = file->private_data;
1126 char buf[8];
1127 int buf_size;
1128 int missed;
1129
1130 memset(buf, 0, sizeof(buf));
1131 buf_size = min(count, sizeof(buf) - 1);
1132 if (copy_from_user(buf, user_buf, buf_size))
1133 return -EFAULT;
1134 if (sscanf(buf, "%d", &missed) != 1)
1135 return -EINVAL;
1136
1137 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1138 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1139 priv->missed_beacon_threshold =
1140 IWL_MISSED_BEACON_THRESHOLD_DEF;
1141 else
1142 priv->missed_beacon_threshold = missed;
1143
1144 return count;
1145}
1146
1147static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
1148 char __user *user_buf,
1149 size_t count, loff_t *ppos) {
1150
1151 struct iwl_priv *priv = file->private_data;
1152 int pos = 0;
1153 char buf[300];
1154 const size_t bufsz = sizeof(buf);
1155 struct iwl_force_reset *force_reset;
1156
1157 force_reset = &priv->force_reset;
1158
1159 pos += scnprintf(buf + pos, bufsz - pos,
1160 "\tnumber of reset request: %d\n",
1161 force_reset->reset_request_count);
1162 pos += scnprintf(buf + pos, bufsz - pos,
1163 "\tnumber of reset request success: %d\n",
1164 force_reset->reset_success_count);
1165 pos += scnprintf(buf + pos, bufsz - pos,
1166 "\tnumber of reset request reject: %d\n",
1167 force_reset->reset_reject_count);
1168 pos += scnprintf(buf + pos, bufsz - pos,
1169 "\treset duration: %lu\n",
1170 force_reset->reset_duration);
1171
1172 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1173}
1174
1175static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
1176 const char __user *user_buf,
1177 size_t count, loff_t *ppos) {
1178
1179 int ret;
1180 struct iwl_priv *priv = file->private_data;
1181
1182 ret = iwl_legacy_force_reset(priv, true);
1183
1184 return ret ? ret : count;
1185}
1186
1187static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
1188 const char __user *user_buf,
1189 size_t count, loff_t *ppos) {
1190
1191 struct iwl_priv *priv = file->private_data;
1192 char buf[8];
1193 int buf_size;
1194 int timeout;
1195
1196 memset(buf, 0, sizeof(buf));
1197 buf_size = min(count, sizeof(buf) - 1);
1198 if (copy_from_user(buf, user_buf, buf_size))
1199 return -EFAULT;
1200 if (sscanf(buf, "%d", &timeout) != 1)
1201 return -EINVAL;
1202 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1203 timeout = IWL_DEF_WD_TIMEOUT;
1204
1205 priv->cfg->base_params->wd_timeout = timeout;
1206 iwl_legacy_setup_watchdog(priv);
1207 return count;
1208}
1209
1210DEBUGFS_READ_FILE_OPS(rx_statistics);
1211DEBUGFS_READ_FILE_OPS(tx_statistics);
1212DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1213DEBUGFS_READ_FILE_OPS(rx_queue);
1214DEBUGFS_READ_FILE_OPS(tx_queue);
1215DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1216DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1217DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1218DEBUGFS_READ_FILE_OPS(sensitivity);
1219DEBUGFS_READ_FILE_OPS(chain_noise);
1220DEBUGFS_READ_FILE_OPS(power_save_status);
1221DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1222DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1223DEBUGFS_READ_FILE_OPS(fh_reg);
1224DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1225DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1226DEBUGFS_READ_FILE_OPS(rxon_flags);
1227DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1228DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1229
1230/*
1231 * Create the debugfs files and directories
1232 *
1233 */
1234int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
1235{
1236 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1237 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1238
1239 dir_drv = debugfs_create_dir(name, phyd);
1240 if (!dir_drv)
1241 return -ENOMEM;
1242
1243 priv->debugfs_dir = dir_drv;
1244
1245 dir_data = debugfs_create_dir("data", dir_drv);
1246 if (!dir_data)
1247 goto err;
1248 dir_rf = debugfs_create_dir("rf", dir_drv);
1249 if (!dir_rf)
1250 goto err;
1251 dir_debug = debugfs_create_dir("debug", dir_drv);
1252 if (!dir_debug)
1253 goto err;
1254
1255 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1256 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1257 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1258 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1259 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1260 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1261 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1262 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1263 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1264 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1265 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1266 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1267 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1268 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1269 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1270 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1271 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1272 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1273 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1274 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1275 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1276 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1277
1278 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1279 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1280 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1281 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1282 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1283 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1284 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1285 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1286 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1287 &priv->disable_sens_cal);
1288 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1289 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1290 &priv->disable_chain_noise_cal);
1291 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1292 &priv->disable_tx_power_cal);
1293 return 0;
1294
1295err:
1296 IWL_ERR(priv, "Can't create the debugfs directory\n");
1297 iwl_legacy_dbgfs_unregister(priv);
1298 return -ENOMEM;
1299}
1300EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
1301
1302/**
1303 * Remove the debugfs files and directories
1304 *
1305 */
1306void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
1307{
1308 if (!priv->debugfs_dir)
1309 return;
1310
1311 debugfs_remove_recursive(priv->debugfs_dir);
1312 priv->debugfs_dir = NULL;
1313}
1314EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644
index 9c786edf56fd..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ /dev/null
@@ -1,1364 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_legacy_dev_h__
33#define __iwl_legacy_dev_h__
34
35#include <linux/interrupt.h>
36#include <linux/pci.h> /* for struct pci_device_id */
37#include <linux/kernel.h>
38#include <linux/leds.h>
39#include <linux/wait.h>
40#include <net/ieee80211_radiotap.h>
41
42#include "iwl-eeprom.h"
43#include "iwl-csr.h"
44#include "iwl-prph.h"
45#include "iwl-fh.h"
46#include "iwl-debug.h"
47#include "iwl-4965-hw.h"
48#include "iwl-3945-hw.h"
49#include "iwl-led.h"
50#include "iwl-power.h"
51#include "iwl-legacy-rs.h"
52
53struct iwl_tx_queue;
54
55/* CT-KILL constants */
56#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
57
58/* Default noise level to report when noise measurement is not available.
59 * This may be because we're:
60 * 1) Not associated (4965, no beacon statistics being sent to driver)
61 * 2) Scanning (noise measurement does not apply to associated channel)
62 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
63 * Use default noise value of -127 ... this is below the range of measurable
64 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
65 * Also, -127 works better than 0 when averaging frames with/without
66 * noise info (e.g. averaging might be done in app); measured dBm values are
67 * always negative ... using a negative value as the default keeps all
68 * averages within an s8's (used in some apps) range of negative values. */
69#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
70
71/*
72 * RTS threshold here is total size [2347] minus 4 FCS bytes
73 * Per spec:
74 * a value of 0 means RTS on all data/management packets
75 * a value > max MSDU size means no RTS
76 * else RTS for data/management frames where MPDU is larger
77 * than RTS value.
78 */
79#define DEFAULT_RTS_THRESHOLD 2347U
80#define MIN_RTS_THRESHOLD 0U
81#define MAX_RTS_THRESHOLD 2347U
82#define MAX_MSDU_SIZE 2304U
83#define MAX_MPDU_SIZE 2346U
84#define DEFAULT_BEACON_INTERVAL 100U
85#define DEFAULT_SHORT_RETRY_LIMIT 7U
86#define DEFAULT_LONG_RETRY_LIMIT 4U
87
88struct iwl_rx_mem_buffer {
89 dma_addr_t page_dma;
90 struct page *page;
91 struct list_head list;
92};
93
94#define rxb_addr(r) page_address(r->page)
95
96/* defined below */
97struct iwl_device_cmd;
98
99struct iwl_cmd_meta {
100 /* only for SYNC commands, iff the reply skb is wanted */
101 struct iwl_host_cmd *source;
102 /*
103 * only for ASYNC commands
104 * (which is somewhat stupid -- look at iwl-sta.c for instance
105 * which duplicates a bunch of code because the callback isn't
106 * invoked for SYNC commands, if it were and its result passed
107 * through it would be simpler...)
108 */
109 void (*callback)(struct iwl_priv *priv,
110 struct iwl_device_cmd *cmd,
111 struct iwl_rx_packet *pkt);
112
113 /* The CMD_SIZE_HUGE flag bit indicates that the command
114 * structure is stored at the end of the shared queue memory. */
115 u32 flags;
116
117 DEFINE_DMA_UNMAP_ADDR(mapping);
118 DEFINE_DMA_UNMAP_LEN(len);
119};
120
121/*
122 * Generic queue structure
123 *
124 * Contains common data for Rx and Tx queues
125 */
126struct iwl_queue {
127 int n_bd; /* number of BDs in this queue */
128 int write_ptr; /* 1-st empty entry (index) host_w*/
129 int read_ptr; /* last used entry (index) host_r*/
130 /* use for monitoring and recovering the stuck queue */
131 dma_addr_t dma_addr; /* physical addr for BD's */
132 int n_window; /* safe queue window */
133 u32 id;
134 int low_mark; /* low watermark, resume queue if free
135 * space more than this */
136 int high_mark; /* high watermark, stop queue if free
137 * space less than this */
138};
139
140/* One for each TFD */
141struct iwl_tx_info {
142 struct sk_buff *skb;
143 struct iwl_rxon_context *ctx;
144};
145
146/**
147 * struct iwl_tx_queue - Tx Queue for DMA
148 * @q: generic Rx/Tx queue descriptor
149 * @bd: base of circular buffer of TFDs
150 * @cmd: array of command/TX buffer pointers
151 * @meta: array of meta data for each command/tx buffer
152 * @dma_addr_cmd: physical address of cmd/tx buffer array
153 * @txb: array of per-TFD driver data
154 * @time_stamp: time (in jiffies) of last read_ptr change
155 * @need_update: indicates need to update read/write index
156 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
157 *
158 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
159 * descriptors) and required locking structures.
160 */
161#define TFD_TX_CMD_SLOTS 256
162#define TFD_CMD_SLOTS 32
163
164struct iwl_tx_queue {
165 struct iwl_queue q;
166 void *tfds;
167 struct iwl_device_cmd **cmd;
168 struct iwl_cmd_meta *meta;
169 struct iwl_tx_info *txb;
170 unsigned long time_stamp;
171 u8 need_update;
172 u8 sched_retry;
173 u8 active;
174 u8 swq_id;
175};
176
177#define IWL_NUM_SCAN_RATES (2)
178
179struct iwl4965_channel_tgd_info {
180 u8 type;
181 s8 max_power;
182};
183
184struct iwl4965_channel_tgh_info {
185 s64 last_radar_time;
186};
187
188#define IWL4965_MAX_RATE (33)
189
190struct iwl3945_clip_group {
191 /* maximum power level to prevent clipping for each rate, derived by
192 * us from this band's saturation power in EEPROM */
193 const s8 clip_powers[IWL_MAX_RATES];
194};
195
196/* current Tx power values to use, one for each rate for each channel.
197 * requested power is limited by:
198 * -- regulatory EEPROM limits for this channel
199 * -- hardware capabilities (clip-powers)
200 * -- spectrum management
201 * -- user preference (e.g. iwconfig)
202 * when requested power is set, base power index must also be set. */
203struct iwl3945_channel_power_info {
204 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
205 s8 power_table_index; /* actual (compenst'd) index into gain table */
206 s8 base_power_index; /* gain index for power at factory temp. */
207 s8 requested_power; /* power (dBm) requested for this chnl/rate */
208};
209
210/* current scan Tx power values to use, one for each scan rate for each
211 * channel. */
212struct iwl3945_scan_power_info {
213 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
214 s8 power_table_index; /* actual (compenst'd) index into gain table */
215 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
216};
217
218/*
219 * One for each channel, holds all channel setup data
220 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
221 * with one another!
222 */
223struct iwl_channel_info {
224 struct iwl4965_channel_tgd_info tgd;
225 struct iwl4965_channel_tgh_info tgh;
226 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
227 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
228 * HT40 channel */
229
230 u8 channel; /* channel number */
231 u8 flags; /* flags copied from EEPROM */
232 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
233 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
234 s8 min_power; /* always 0 */
235 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
236
237 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
238 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
239 enum ieee80211_band band;
240
241 /* HT40 channel info */
242 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
243 u8 ht40_flags; /* flags copied from EEPROM */
244 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
245
246 /* Radio/DSP gain settings for each "normal" data Tx rate.
247 * These include, in addition to RF and DSP gain, a few fields for
248 * remembering/modifying gain settings (indexes). */
249 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
250
251 /* Radio/DSP gain settings for each scan rate, for directed scans. */
252 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
253};
254
255#define IWL_TX_FIFO_BK 0 /* shared */
256#define IWL_TX_FIFO_BE 1
257#define IWL_TX_FIFO_VI 2 /* shared */
258#define IWL_TX_FIFO_VO 3
259#define IWL_TX_FIFO_UNUSED -1
260
261/* Minimum number of queues. MAX_NUM is defined in hw specific files.
262 * Set the minimum to accommodate the 4 standard TX queues, 1 command
263 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
264#define IWL_MIN_NUM_QUEUES 10
265
266#define IWL_DEFAULT_CMD_QUEUE_NUM 4
267
268#define IEEE80211_DATA_LEN 2304
269#define IEEE80211_4ADDR_LEN 30
270#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
271#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
272
273struct iwl_frame {
274 union {
275 struct ieee80211_hdr frame;
276 struct iwl_tx_beacon_cmd beacon;
277 u8 raw[IEEE80211_FRAME_LEN];
278 u8 cmd[360];
279 } u;
280 struct list_head list;
281};
282
283#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
284#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
285#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
286
287enum {
288 CMD_SYNC = 0,
289 CMD_SIZE_NORMAL = 0,
290 CMD_NO_SKB = 0,
291 CMD_SIZE_HUGE = (1 << 0),
292 CMD_ASYNC = (1 << 1),
293 CMD_WANT_SKB = (1 << 2),
294 CMD_MAPPED = (1 << 3),
295};
296
297#define DEF_CMD_PAYLOAD_SIZE 320
298
299/**
300 * struct iwl_device_cmd
301 *
302 * For allocation of the command and tx queues, this establishes the overall
303 * size of the largest command we send to uCode, except for a scan command
304 * (which is relatively huge; space is allocated separately).
305 */
306struct iwl_device_cmd {
307 struct iwl_cmd_header hdr; /* uCode API */
308 union {
309 u32 flags;
310 u8 val8;
311 u16 val16;
312 u32 val32;
313 struct iwl_tx_cmd tx;
314 u8 payload[DEF_CMD_PAYLOAD_SIZE];
315 } __packed cmd;
316} __packed;
317
318#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
319
320
321struct iwl_host_cmd {
322 const void *data;
323 unsigned long reply_page;
324 void (*callback)(struct iwl_priv *priv,
325 struct iwl_device_cmd *cmd,
326 struct iwl_rx_packet *pkt);
327 u32 flags;
328 u16 len;
329 u8 id;
330};
331
332#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
333#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
334#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
335
336/**
337 * struct iwl_rx_queue - Rx queue
338 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
339 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
340 * @read: Shared index to newest available Rx buffer
341 * @write: Shared index to oldest written Rx packet
342 * @free_count: Number of pre-allocated buffers in rx_free
343 * @rx_free: list of free SKBs for use
344 * @rx_used: List of Rx buffers with no SKB
345 * @need_update: flag to indicate we need to update read/write index
346 * @rb_stts: driver's pointer to receive buffer status
347 * @rb_stts_dma: bus address of receive buffer status
348 *
349 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
350 */
351struct iwl_rx_queue {
352 __le32 *bd;
353 dma_addr_t bd_dma;
354 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
355 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
356 u32 read;
357 u32 write;
358 u32 free_count;
359 u32 write_actual;
360 struct list_head rx_free;
361 struct list_head rx_used;
362 int need_update;
363 struct iwl_rb_status *rb_stts;
364 dma_addr_t rb_stts_dma;
365 spinlock_t lock;
366};
367
368#define IWL_SUPPORTED_RATES_IE_LEN 8
369
370#define MAX_TID_COUNT 9
371
372#define IWL_INVALID_RATE 0xFF
373#define IWL_INVALID_VALUE -1
374
375/**
376 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
377 * @txq_id: Tx queue used for Tx attempt
378 * @frame_count: # frames attempted by Tx command
379 * @wait_for_ba: Expect block-ack before next Tx reply
380 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
381 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
382 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
383 * @rate_n_flags: Rate at which Tx was attempted
384 *
385 * If REPLY_TX indicates that aggregation was attempted, driver must wait
386 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
387 * until block ack arrives.
388 */
389struct iwl_ht_agg {
390 u16 txq_id;
391 u16 frame_count;
392 u16 wait_for_ba;
393 u16 start_idx;
394 u64 bitmap;
395 u32 rate_n_flags;
396#define IWL_AGG_OFF 0
397#define IWL_AGG_ON 1
398#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
399#define IWL_EMPTYING_HW_QUEUE_DELBA 3
400 u8 state;
401};
402
403
404struct iwl_tid_data {
405 u16 seq_number; /* 4965 only */
406 u16 tfds_in_queue;
407 struct iwl_ht_agg agg;
408};
409
410struct iwl_hw_key {
411 u32 cipher;
412 int keylen;
413 u8 keyidx;
414 u8 key[32];
415};
416
417union iwl_ht_rate_supp {
418 u16 rates;
419 struct {
420 u8 siso_rate;
421 u8 mimo_rate;
422 };
423};
424
425#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
426#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
427#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
428#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
429#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
430#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
431#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
432
433/*
434 * Maximal MPDU density for TX aggregation
435 * 4 - 2us density
436 * 5 - 4us density
437 * 6 - 8us density
438 * 7 - 16us density
439 */
440#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
441#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
442#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
443#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
444#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
445#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
446#define CFG_HT_MPDU_DENSITY_MIN (0x1)
447
448struct iwl_ht_config {
449 bool single_chain_sufficient;
450 enum ieee80211_smps_mode smps; /* current smps mode */
451};
452
453/* QoS structures */
454struct iwl_qos_info {
455 int qos_active;
456 struct iwl_qosparam_cmd def_qos_parm;
457};
458
459/*
460 * Structure should be accessed with sta_lock held. When station addition
461 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
462 * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
463 * sta_lock held.
464 */
465struct iwl_station_entry {
466 struct iwl_legacy_addsta_cmd sta;
467 struct iwl_tid_data tid[MAX_TID_COUNT];
468 u8 used, ctxid;
469 struct iwl_hw_key keyinfo;
470 struct iwl_link_quality_cmd *lq;
471};
472
473struct iwl_station_priv_common {
474 struct iwl_rxon_context *ctx;
475 u8 sta_id;
476};
477
478/*
479 * iwl_station_priv: Driver's private station information
480 *
481 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
482 * in the structure for use by driver. This structure is places in that
483 * space.
484 *
485 * The common struct MUST be first because it is shared between
486 * 3945 and 4965!
487 */
488struct iwl_station_priv {
489 struct iwl_station_priv_common common;
490 struct iwl_lq_sta lq_sta;
491 atomic_t pending_frames;
492 bool client;
493 bool asleep;
494};
495
496/**
497 * struct iwl_vif_priv - driver's private per-interface information
498 *
499 * When mac80211 allocates a virtual interface, it can allocate
500 * space for us to put data into.
501 */
502struct iwl_vif_priv {
503 struct iwl_rxon_context *ctx;
504 u8 ibss_bssid_sta_id;
505};
506
507/* one for each uCode image (inst/data, boot/init/runtime) */
508struct fw_desc {
509 void *v_addr; /* access by driver */
510 dma_addr_t p_addr; /* access by card's busmaster DMA */
511 u32 len; /* bytes */
512};
513
514/* uCode file layout */
515struct iwl_ucode_header {
516 __le32 ver; /* major/minor/API/serial */
517 struct {
518 __le32 inst_size; /* bytes of runtime code */
519 __le32 data_size; /* bytes of runtime data */
520 __le32 init_size; /* bytes of init code */
521 __le32 init_data_size; /* bytes of init data */
522 __le32 boot_size; /* bytes of bootstrap code */
523 u8 data[0]; /* in same order as sizes */
524 } v1;
525};
526
527struct iwl4965_ibss_seq {
528 u8 mac[ETH_ALEN];
529 u16 seq_num;
530 u16 frag_num;
531 unsigned long packet_time;
532 struct list_head list;
533};
534
535struct iwl_sensitivity_ranges {
536 u16 min_nrg_cck;
537 u16 max_nrg_cck;
538
539 u16 nrg_th_cck;
540 u16 nrg_th_ofdm;
541
542 u16 auto_corr_min_ofdm;
543 u16 auto_corr_min_ofdm_mrc;
544 u16 auto_corr_min_ofdm_x1;
545 u16 auto_corr_min_ofdm_mrc_x1;
546
547 u16 auto_corr_max_ofdm;
548 u16 auto_corr_max_ofdm_mrc;
549 u16 auto_corr_max_ofdm_x1;
550 u16 auto_corr_max_ofdm_mrc_x1;
551
552 u16 auto_corr_max_cck;
553 u16 auto_corr_max_cck_mrc;
554 u16 auto_corr_min_cck;
555 u16 auto_corr_min_cck_mrc;
556
557 u16 barker_corr_th_min;
558 u16 barker_corr_th_min_mrc;
559 u16 nrg_th_cca;
560};
561
562
563#define KELVIN_TO_CELSIUS(x) ((x)-273)
564#define CELSIUS_TO_KELVIN(x) ((x)+273)
565
566
567/**
568 * struct iwl_hw_params
569 * @max_txq_num: Max # Tx queues supported
570 * @dma_chnl_num: Number of Tx DMA/FIFO channels
571 * @scd_bc_tbls_size: size of scheduler byte count tables
572 * @tfd_size: TFD size
573 * @tx/rx_chains_num: Number of TX/RX chains
574 * @valid_tx/rx_ant: usable antennas
575 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
576 * @max_rxq_log: Log-base-2 of max_rxq_size
577 * @rx_page_order: Rx buffer page order
578 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
579 * @max_stations:
580 * @ht40_channel: is 40MHz width possible in band 2.4
581 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
582 * @sw_crypto: 0 for hw, 1 for sw
583 * @max_xxx_size: for ucode uses
584 * @ct_kill_threshold: temperature threshold
585 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
586 * @struct iwl_sensitivity_ranges: range of sensitivity values
587 */
588struct iwl_hw_params {
589 u8 max_txq_num;
590 u8 dma_chnl_num;
591 u16 scd_bc_tbls_size;
592 u32 tfd_size;
593 u8 tx_chains_num;
594 u8 rx_chains_num;
595 u8 valid_tx_ant;
596 u8 valid_rx_ant;
597 u16 max_rxq_size;
598 u16 max_rxq_log;
599 u32 rx_page_order;
600 u32 rx_wrt_ptr_reg;
601 u8 max_stations;
602 u8 ht40_channel;
603 u8 max_beacon_itrvl; /* in 1024 ms */
604 u32 max_inst_size;
605 u32 max_data_size;
606 u32 max_bsm_size;
607 u32 ct_kill_threshold; /* value in hw-dependent units */
608 u16 beacon_time_tsf_bits;
609 const struct iwl_sensitivity_ranges *sens;
610};
611
612
613/******************************************************************************
614 *
615 * Functions implemented in core module which are forward declared here
616 * for use by iwl-[4-5].c
617 *
618 * NOTE: The implementation of these functions are not hardware specific
619 * which is why they are in the core module files.
620 *
621 * Naming convention --
622 * iwl_ <-- Is part of iwlwifi
623 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
624 * iwl4965_bg_ <-- Called from work queue context
625 * iwl4965_mac_ <-- mac80211 callback
626 *
627 ****************************************************************************/
628extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
629extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
630extern int iwl_legacy_queue_space(const struct iwl_queue *q);
631static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
632{
633 return q->write_ptr >= q->read_ptr ?
634 (i >= q->read_ptr && i < q->write_ptr) :
635 !(i < q->read_ptr && i >= q->write_ptr);
636}
637
638
639static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
640 int is_huge)
641{
642 /*
643 * This is for init calibration result and scan command which
644 * required buffer > TFD_MAX_PAYLOAD_SIZE,
645 * the big buffer at end of command array
646 */
647 if (is_huge)
648 return q->n_window; /* must be power of 2 */
649
650 /* Otherwise, use normal size buffers */
651 return index & (q->n_window - 1);
652}
653
654
655struct iwl_dma_ptr {
656 dma_addr_t dma;
657 void *addr;
658 size_t size;
659};
660
661#define IWL_OPERATION_MODE_AUTO 0
662#define IWL_OPERATION_MODE_HT_ONLY 1
663#define IWL_OPERATION_MODE_MIXED 2
664#define IWL_OPERATION_MODE_20MHZ 3
665
666#define IWL_TX_CRC_SIZE 4
667#define IWL_TX_DELIMITER_SIZE 4
668
669#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
670
671/* Sensitivity and chain noise calibration */
672#define INITIALIZATION_VALUE 0xFFFF
673#define IWL4965_CAL_NUM_BEACONS 20
674#define IWL_CAL_NUM_BEACONS 16
675#define MAXIMUM_ALLOWED_PATHLOSS 15
676
677#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
678
679#define MAX_FA_OFDM 50
680#define MIN_FA_OFDM 5
681#define MAX_FA_CCK 50
682#define MIN_FA_CCK 5
683
684#define AUTO_CORR_STEP_OFDM 1
685
686#define AUTO_CORR_STEP_CCK 3
687#define AUTO_CORR_MAX_TH_CCK 160
688
689#define NRG_DIFF 2
690#define NRG_STEP_CCK 2
691#define NRG_MARGIN 8
692#define MAX_NUMBER_CCK_NO_FA 100
693
694#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
695
696#define CHAIN_A 0
697#define CHAIN_B 1
698#define CHAIN_C 2
699#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
700#define ALL_BAND_FILTER 0xFF00
701#define IN_BAND_FILTER 0xFF
702#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
703
704#define NRG_NUM_PREV_STAT_L 20
705#define NUM_RX_CHAINS 3
706
707enum iwl4965_false_alarm_state {
708 IWL_FA_TOO_MANY = 0,
709 IWL_FA_TOO_FEW = 1,
710 IWL_FA_GOOD_RANGE = 2,
711};
712
713enum iwl4965_chain_noise_state {
714 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
715 IWL_CHAIN_NOISE_ACCUMULATE,
716 IWL_CHAIN_NOISE_CALIBRATED,
717 IWL_CHAIN_NOISE_DONE,
718};
719
720enum iwl4965_calib_enabled_state {
721 IWL_CALIB_DISABLED = 0, /* must be 0 */
722 IWL_CALIB_ENABLED = 1,
723};
724
725/*
726 * enum iwl_calib
727 * defines the order in which results of initial calibrations
728 * should be sent to the runtime uCode
729 */
730enum iwl_calib {
731 IWL_CALIB_MAX,
732};
733
734/* Opaque calibration results */
735struct iwl_calib_result {
736 void *buf;
737 size_t buf_len;
738};
739
740enum ucode_type {
741 UCODE_NONE = 0,
742 UCODE_INIT,
743 UCODE_RT
744};
745
746/* Sensitivity calib data */
747struct iwl_sensitivity_data {
748 u32 auto_corr_ofdm;
749 u32 auto_corr_ofdm_mrc;
750 u32 auto_corr_ofdm_x1;
751 u32 auto_corr_ofdm_mrc_x1;
752 u32 auto_corr_cck;
753 u32 auto_corr_cck_mrc;
754
755 u32 last_bad_plcp_cnt_ofdm;
756 u32 last_fa_cnt_ofdm;
757 u32 last_bad_plcp_cnt_cck;
758 u32 last_fa_cnt_cck;
759
760 u32 nrg_curr_state;
761 u32 nrg_prev_state;
762 u32 nrg_value[10];
763 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
764 u32 nrg_silence_ref;
765 u32 nrg_energy_idx;
766 u32 nrg_silence_idx;
767 u32 nrg_th_cck;
768 s32 nrg_auto_corr_silence_diff;
769 u32 num_in_cck_no_fa;
770 u32 nrg_th_ofdm;
771
772 u16 barker_corr_th_min;
773 u16 barker_corr_th_min_mrc;
774 u16 nrg_th_cca;
775};
776
777/* Chain noise (differential Rx gain) calib data */
778struct iwl_chain_noise_data {
779 u32 active_chains;
780 u32 chain_noise_a;
781 u32 chain_noise_b;
782 u32 chain_noise_c;
783 u32 chain_signal_a;
784 u32 chain_signal_b;
785 u32 chain_signal_c;
786 u16 beacon_count;
787 u8 disconn_array[NUM_RX_CHAINS];
788 u8 delta_gain_code[NUM_RX_CHAINS];
789 u8 radio_write;
790 u8 state;
791};
792
793#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
794#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
795
796#define IWL_TRAFFIC_ENTRIES (256)
797#define IWL_TRAFFIC_ENTRY_SIZE (64)
798
799enum {
800 MEASUREMENT_READY = (1 << 0),
801 MEASUREMENT_ACTIVE = (1 << 1),
802};
803
804/* interrupt statistics */
805struct isr_statistics {
806 u32 hw;
807 u32 sw;
808 u32 err_code;
809 u32 sch;
810 u32 alive;
811 u32 rfkill;
812 u32 ctkill;
813 u32 wakeup;
814 u32 rx;
815 u32 rx_handlers[REPLY_MAX];
816 u32 tx;
817 u32 unhandled;
818};
819
820/* management statistics */
821enum iwl_mgmt_stats {
822 MANAGEMENT_ASSOC_REQ = 0,
823 MANAGEMENT_ASSOC_RESP,
824 MANAGEMENT_REASSOC_REQ,
825 MANAGEMENT_REASSOC_RESP,
826 MANAGEMENT_PROBE_REQ,
827 MANAGEMENT_PROBE_RESP,
828 MANAGEMENT_BEACON,
829 MANAGEMENT_ATIM,
830 MANAGEMENT_DISASSOC,
831 MANAGEMENT_AUTH,
832 MANAGEMENT_DEAUTH,
833 MANAGEMENT_ACTION,
834 MANAGEMENT_MAX,
835};
836/* control statistics */
837enum iwl_ctrl_stats {
838 CONTROL_BACK_REQ = 0,
839 CONTROL_BACK,
840 CONTROL_PSPOLL,
841 CONTROL_RTS,
842 CONTROL_CTS,
843 CONTROL_ACK,
844 CONTROL_CFEND,
845 CONTROL_CFENDACK,
846 CONTROL_MAX,
847};
848
849struct traffic_stats {
850#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
851 u32 mgmt[MANAGEMENT_MAX];
852 u32 ctrl[CONTROL_MAX];
853 u32 data_cnt;
854 u64 data_bytes;
855#endif
856};
857
858/*
859 * host interrupt timeout value
860 * used with setting interrupt coalescing timer
861 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
862 *
863 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
864 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
865 */
866#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
867#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
868#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
869#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
870#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
871#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
872
873#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
874
875/* TX queue watchdog timeouts in mSecs */
876#define IWL_DEF_WD_TIMEOUT (2000)
877#define IWL_LONG_WD_TIMEOUT (10000)
878#define IWL_MAX_WD_TIMEOUT (120000)
879
880struct iwl_force_reset {
881 int reset_request_count;
882 int reset_success_count;
883 int reset_reject_count;
884 unsigned long reset_duration;
885 unsigned long last_force_reset_jiffies;
886};
887
888/* extend beacon time format bit shifting */
889/*
890 * for _3945 devices
891 * bits 31:24 - extended
892 * bits 23:0 - interval
893 */
894#define IWL3945_EXT_BEACON_TIME_POS 24
895/*
896 * for _4965 devices
897 * bits 31:22 - extended
898 * bits 21:0 - interval
899 */
900#define IWL4965_EXT_BEACON_TIME_POS 22
901
902enum iwl_rxon_context_id {
903 IWL_RXON_CTX_BSS,
904
905 NUM_IWL_RXON_CTX
906};
907
908struct iwl_rxon_context {
909 struct ieee80211_vif *vif;
910
911 const u8 *ac_to_fifo;
912 const u8 *ac_to_queue;
913 u8 mcast_queue;
914
915 /*
916 * We could use the vif to indicate active, but we
917 * also need it to be active during disabling when
918 * we already removed the vif for type setting.
919 */
920 bool always_active, is_active;
921
922 bool ht_need_multiple_chains;
923
924 enum iwl_rxon_context_id ctxid;
925
926 u32 interface_modes, exclusive_interface_modes;
927 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
928
929 /*
930 * We declare this const so it can only be
931 * changed via explicit cast within the
932 * routines that actually update the physical
933 * hardware.
934 */
935 const struct iwl_legacy_rxon_cmd active;
936 struct iwl_legacy_rxon_cmd staging;
937
938 struct iwl_rxon_time_cmd timing;
939
940 struct iwl_qos_info qos_data;
941
942 u8 bcast_sta_id, ap_sta_id;
943
944 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
945 u8 qos_cmd;
946 u8 wep_key_cmd;
947
948 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
949 u8 key_mapping_keys;
950
951 __le32 station_flags;
952
953 struct {
954 bool non_gf_sta_present;
955 u8 protection;
956 bool enabled, is_40mhz;
957 u8 extension_chan_offset;
958 } ht;
959};
960
961struct iwl_priv {
962
963 /* ieee device used by generic ieee processing code */
964 struct ieee80211_hw *hw;
965 struct ieee80211_channel *ieee_channels;
966 struct ieee80211_rate *ieee_rates;
967 struct iwl_cfg *cfg;
968
969 /* temporary frame storage list */
970 struct list_head free_frames;
971 int frames_count;
972
973 enum ieee80211_band band;
974 int alloc_rxb_page;
975
976 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
977 struct iwl_rx_mem_buffer *rxb);
978
979 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
980
981 /* spectrum measurement report caching */
982 struct iwl_spectrum_notification measure_report;
983 u8 measurement_status;
984
985 /* ucode beacon time */
986 u32 ucode_beacon_time;
987 int missed_beacon_threshold;
988
989 /* track IBSS manager (last beacon) status */
990 u32 ibss_manager;
991
992 /* force reset */
993 struct iwl_force_reset force_reset;
994
995 /* we allocate array of iwl_channel_info for NIC's valid channels.
996 * Access via channel # using indirect index array */
997 struct iwl_channel_info *channel_info; /* channel info array */
998 u8 channel_count; /* # of channels */
999
1000 /* thermal calibration */
1001 s32 temperature; /* degrees Kelvin */
1002 s32 last_temperature;
1003
1004 /* init calibration results */
1005 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1006
1007 /* Scan related variables */
1008 unsigned long scan_start;
1009 unsigned long scan_start_tsf;
1010 void *scan_cmd;
1011 enum ieee80211_band scan_band;
1012 struct cfg80211_scan_request *scan_request;
1013 struct ieee80211_vif *scan_vif;
1014 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1015 u8 mgmt_tx_ant;
1016
1017 /* spinlock */
1018 spinlock_t lock; /* protect general shared data */
1019 spinlock_t hcmd_lock; /* protect hcmd */
1020 spinlock_t reg_lock; /* protect hw register access */
1021 struct mutex mutex;
1022
1023 /* basic pci-network driver stuff */
1024 struct pci_dev *pci_dev;
1025
1026 /* pci hardware address support */
1027 void __iomem *hw_base;
1028 u32 hw_rev;
1029 u32 hw_wa_rev;
1030 u8 rev_id;
1031
1032 /* microcode/device supports multiple contexts */
1033 u8 valid_contexts;
1034
1035 /* command queue number */
1036 u8 cmd_queue;
1037
1038 /* max number of station keys */
1039 u8 sta_key_max_num;
1040
1041 /* EEPROM MAC addresses */
1042 struct mac_address addresses[1];
1043
1044 /* uCode images, save to reload in case of failure */
1045 int fw_index; /* firmware we're trying to load */
1046 u32 ucode_ver; /* version of ucode, copy of
1047 iwl_ucode.ver */
1048 struct fw_desc ucode_code; /* runtime inst */
1049 struct fw_desc ucode_data; /* runtime data original */
1050 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1051 struct fw_desc ucode_init; /* initialization inst */
1052 struct fw_desc ucode_init_data; /* initialization data */
1053 struct fw_desc ucode_boot; /* bootstrap inst */
1054 enum ucode_type ucode_type;
1055 u8 ucode_write_complete; /* the image write is complete */
1056 char firmware_name[25];
1057
1058 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1059
1060 __le16 switch_channel;
1061
1062 /* 1st responses from initialize and runtime uCode images.
1063 * _4965's initialize alive response contains some calibration data. */
1064 struct iwl_init_alive_resp card_alive_init;
1065 struct iwl_alive_resp card_alive;
1066
1067 u16 active_rate;
1068
1069 u8 start_calib;
1070 struct iwl_sensitivity_data sensitivity_data;
1071 struct iwl_chain_noise_data chain_noise_data;
1072 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1073
1074 struct iwl_ht_config current_ht_config;
1075
1076 /* Rate scaling data */
1077 u8 retry_rate;
1078
1079 wait_queue_head_t wait_command_queue;
1080
1081 int activity_timer_active;
1082
1083 /* Rx and Tx DMA processing queues */
1084 struct iwl_rx_queue rxq;
1085 struct iwl_tx_queue *txq;
1086 unsigned long txq_ctx_active_msk;
1087 struct iwl_dma_ptr kw; /* keep warm address */
1088 struct iwl_dma_ptr scd_bc_tbls;
1089
1090 u32 scd_base_addr; /* scheduler sram base address */
1091
1092 unsigned long status;
1093
1094 /* counts mgmt, ctl, and data packets */
1095 struct traffic_stats tx_stats;
1096 struct traffic_stats rx_stats;
1097
1098 /* counts interrupts */
1099 struct isr_statistics isr_stats;
1100
1101 struct iwl_power_mgr power_data;
1102
1103 /* context information */
1104 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1105
1106 /* station table variables */
1107
1108 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1109 spinlock_t sta_lock;
1110 int num_stations;
1111 struct iwl_station_entry stations[IWL_STATION_COUNT];
1112 unsigned long ucode_key_table;
1113
1114 /* queue refcounts */
1115#define IWL_MAX_HW_QUEUES 32
1116 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1117 /* for each AC */
1118 atomic_t queue_stop_count[4];
1119
1120 /* Indication if ieee80211_ops->open has been called */
1121 u8 is_open;
1122
1123 u8 mac80211_registered;
1124
1125 /* eeprom -- this is in the card's little endian byte order */
1126 u8 *eeprom;
1127 struct iwl_eeprom_calib_info *calib_info;
1128
1129 enum nl80211_iftype iw_mode;
1130
1131 /* Last Rx'd beacon timestamp */
1132 u64 timestamp;
1133
1134 union {
1135#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1136 struct {
1137 void *shared_virt;
1138 dma_addr_t shared_phys;
1139
1140 struct delayed_work thermal_periodic;
1141 struct delayed_work rfkill_poll;
1142
1143 struct iwl3945_notif_statistics statistics;
1144#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1145 struct iwl3945_notif_statistics accum_statistics;
1146 struct iwl3945_notif_statistics delta_statistics;
1147 struct iwl3945_notif_statistics max_delta;
1148#endif
1149
1150 u32 sta_supp_rates;
1151 int last_rx_rssi; /* From Rx packet statistics */
1152
1153 /* Rx'd packet timing information */
1154 u32 last_beacon_time;
1155 u64 last_tsf;
1156
1157 /*
1158 * each calibration channel group in the
1159 * EEPROM has a derived clip setting for
1160 * each rate.
1161 */
1162 const struct iwl3945_clip_group clip_groups[5];
1163
1164 } _3945;
1165#endif
1166#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1167 struct {
1168 struct iwl_rx_phy_res last_phy_res;
1169 bool last_phy_res_valid;
1170
1171 struct completion firmware_loading_complete;
1172
1173 /*
1174 * chain noise reset and gain commands are the
1175 * two extra calibration commands follows the standard
1176 * phy calibration commands
1177 */
1178 u8 phy_calib_chain_noise_reset_cmd;
1179 u8 phy_calib_chain_noise_gain_cmd;
1180
1181 struct iwl_notif_statistics statistics;
1182#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1183 struct iwl_notif_statistics accum_statistics;
1184 struct iwl_notif_statistics delta_statistics;
1185 struct iwl_notif_statistics max_delta;
1186#endif
1187
1188 } _4965;
1189#endif
1190 };
1191
1192 struct iwl_hw_params hw_params;
1193
1194 u32 inta_mask;
1195
1196 struct workqueue_struct *workqueue;
1197
1198 struct work_struct restart;
1199 struct work_struct scan_completed;
1200 struct work_struct rx_replenish;
1201 struct work_struct abort_scan;
1202
1203 struct iwl_rxon_context *beacon_ctx;
1204 struct sk_buff *beacon_skb;
1205
1206 struct work_struct tx_flush;
1207
1208 struct tasklet_struct irq_tasklet;
1209
1210 struct delayed_work init_alive_start;
1211 struct delayed_work alive_start;
1212 struct delayed_work scan_check;
1213
1214 /* TX Power */
1215 s8 tx_power_user_lmt;
1216 s8 tx_power_device_lmt;
1217 s8 tx_power_next;
1218
1219
1220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1221 /* debugging info */
1222 u32 debug_level; /* per device debugging will override global
1223 iwlegacy_debug_level if set */
1224#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1225#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1226 /* debugfs */
1227 u16 tx_traffic_idx;
1228 u16 rx_traffic_idx;
1229 u8 *tx_traffic;
1230 u8 *rx_traffic;
1231 struct dentry *debugfs_dir;
1232 u32 dbgfs_sram_offset, dbgfs_sram_len;
1233 bool disable_ht40;
1234#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
1235
1236 struct work_struct txpower_work;
1237 u32 disable_sens_cal;
1238 u32 disable_chain_noise_cal;
1239 u32 disable_tx_power_cal;
1240 struct work_struct run_time_calib_work;
1241 struct timer_list statistics_periodic;
1242 struct timer_list watchdog;
1243 bool hw_ready;
1244
1245 struct led_classdev led;
1246 unsigned long blink_on, blink_off;
1247 bool led_registered;
1248}; /*iwl_priv */
1249
1250static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1251{
1252 set_bit(txq_id, &priv->txq_ctx_active_msk);
1253}
1254
1255static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1256{
1257 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1258}
1259
1260#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1261/*
1262 * iwl_legacy_get_debug_level: Return active debug level for device
1263 *
1264 * Using sysfs it is possible to set per device debug level. This debug
1265 * level will be used if set, otherwise the global debug level which can be
1266 * set via module parameter is used.
1267 */
1268static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1269{
1270 if (priv->debug_level)
1271 return priv->debug_level;
1272 else
1273 return iwlegacy_debug_level;
1274}
1275#else
1276static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1277{
1278 return iwlegacy_debug_level;
1279}
1280#endif
1281
1282
1283static inline struct ieee80211_hdr *
1284iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
1285 int txq_id, int idx)
1286{
1287 if (priv->txq[txq_id].txb[idx].skb)
1288 return (struct ieee80211_hdr *)priv->txq[txq_id].
1289 txb[idx].skb->data;
1290 return NULL;
1291}
1292
1293static inline struct iwl_rxon_context *
1294iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1295{
1296 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1297
1298 return vif_priv->ctx;
1299}
1300
1301#define for_each_context(priv, ctx) \
1302 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1303 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1304 if (priv->valid_contexts & BIT(ctx->ctxid))
1305
1306static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
1307 enum iwl_rxon_context_id ctxid)
1308{
1309 return (priv->contexts[ctxid].active.filter_flags &
1310 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1311}
1312
1313static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
1314{
1315 return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
1316}
1317
1318static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
1319{
1320 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1321}
1322
1323static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
1324{
1325 if (ch_info == NULL)
1326 return 0;
1327 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1328}
1329
1330static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
1331{
1332 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1333}
1334
1335static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
1336{
1337 return ch_info->band == IEEE80211_BAND_5GHZ;
1338}
1339
1340static inline int
1341iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1342{
1343 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1344}
1345
1346static inline int
1347iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
1348{
1349 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1350}
1351
1352static inline void
1353__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1354{
1355 __free_pages(page, priv->hw_params.rx_page_order);
1356 priv->alloc_rxb_page--;
1357}
1358
1359static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
1360{
1361 free_pages(page, priv->hw_params.rx_page_order);
1362 priv->alloc_rxb_page--;
1363}
1364#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644
index acec99197ce0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ /dev/null
@@ -1,42 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-dev.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
42#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644
index a443725ba6be..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ /dev/null
@@ -1,210 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_LEGACY_DEVICE_TRACE
29
30#include <linux/tracepoint.h>
31
32#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \
35static inline void trace_ ## name(proto) {}
36#endif
37
38
39#define PRIV_ENTRY __field(struct iwl_priv *, priv)
40#define PRIV_ASSIGN (__entry->priv = priv)
41
42#undef TRACE_SYSTEM
43#define TRACE_SYSTEM iwlwifi_legacy_io
44
45TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
46 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
47 TP_ARGS(priv, offs, val),
48 TP_STRUCT__entry(
49 PRIV_ENTRY
50 __field(u32, offs)
51 __field(u32, val)
52 ),
53 TP_fast_assign(
54 PRIV_ASSIGN;
55 __entry->offs = offs;
56 __entry->val = val;
57 ),
58 TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
59 __entry->offs, __entry->val)
60);
61
62TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
63 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
64 TP_ARGS(priv, offs, val),
65 TP_STRUCT__entry(
66 PRIV_ENTRY
67 __field(u32, offs)
68 __field(u8, val)
69 ),
70 TP_fast_assign(
71 PRIV_ASSIGN;
72 __entry->offs = offs;
73 __entry->val = val;
74 ),
75 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
76 __entry->offs, __entry->val)
77);
78
79TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
80 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
81 TP_ARGS(priv, offs, val),
82 TP_STRUCT__entry(
83 PRIV_ENTRY
84 __field(u32, offs)
85 __field(u32, val)
86 ),
87 TP_fast_assign(
88 PRIV_ASSIGN;
89 __entry->offs = offs;
90 __entry->val = val;
91 ),
92 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
93 __entry->offs, __entry->val)
94);
95
96#undef TRACE_SYSTEM
97#define TRACE_SYSTEM iwlwifi_legacy_ucode
98
99#undef TRACE_SYSTEM
100#define TRACE_SYSTEM iwlwifi
101
102TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
103 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
104 TP_ARGS(priv, hcmd, len, flags),
105 TP_STRUCT__entry(
106 PRIV_ENTRY
107 __dynamic_array(u8, hcmd, len)
108 __field(u32, flags)
109 ),
110 TP_fast_assign(
111 PRIV_ASSIGN;
112 memcpy(__get_dynamic_array(hcmd), hcmd, len);
113 __entry->flags = flags;
114 ),
115 TP_printk("[%p] hcmd %#.2x (%ssync)",
116 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
117 __entry->flags & CMD_ASYNC ? "a" : "")
118);
119
120TRACE_EVENT(iwlwifi_legacy_dev_rx,
121 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
122 TP_ARGS(priv, rxbuf, len),
123 TP_STRUCT__entry(
124 PRIV_ENTRY
125 __dynamic_array(u8, rxbuf, len)
126 ),
127 TP_fast_assign(
128 PRIV_ASSIGN;
129 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
130 ),
131 TP_printk("[%p] RX cmd %#.2x",
132 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
133);
134
135TRACE_EVENT(iwlwifi_legacy_dev_tx,
136 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
137 void *buf0, size_t buf0_len,
138 void *buf1, size_t buf1_len),
139 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
140 TP_STRUCT__entry(
141 PRIV_ENTRY
142
143 __field(size_t, framelen)
144 __dynamic_array(u8, tfd, tfdlen)
145
146 /*
147 * Do not insert between or below these items,
148 * we want to keep the frame together (except
149 * for the possible padding).
150 */
151 __dynamic_array(u8, buf0, buf0_len)
152 __dynamic_array(u8, buf1, buf1_len)
153 ),
154 TP_fast_assign(
155 PRIV_ASSIGN;
156 __entry->framelen = buf0_len + buf1_len;
157 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
158 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
159 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
160 ),
161 TP_printk("[%p] TX %.2x (%zu bytes)",
162 __entry->priv,
163 ((u8 *)__get_dynamic_array(buf0))[0],
164 __entry->framelen)
165);
166
167TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
168 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
169 u32 data1, u32 data2, u32 line, u32 blink1,
170 u32 blink2, u32 ilink1, u32 ilink2),
171 TP_ARGS(priv, desc, time, data1, data2, line,
172 blink1, blink2, ilink1, ilink2),
173 TP_STRUCT__entry(
174 PRIV_ENTRY
175 __field(u32, desc)
176 __field(u32, time)
177 __field(u32, data1)
178 __field(u32, data2)
179 __field(u32, line)
180 __field(u32, blink1)
181 __field(u32, blink2)
182 __field(u32, ilink1)
183 __field(u32, ilink2)
184 ),
185 TP_fast_assign(
186 PRIV_ASSIGN;
187 __entry->desc = desc;
188 __entry->time = time;
189 __entry->data1 = data1;
190 __entry->data2 = data2;
191 __entry->line = line;
192 __entry->blink1 = blink1;
193 __entry->blink2 = blink2;
194 __entry->ilink1 = ilink1;
195 __entry->ilink2 = ilink2;
196 ),
197 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
198 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
199 __entry->priv, __entry->desc, __entry->time, __entry->data1,
200 __entry->data2, __entry->line, __entry->blink1,
201 __entry->blink2, __entry->ilink1, __entry->ilink2)
202);
203
204#endif /* __IWLWIFI_DEVICE_TRACE */
205
206#undef TRACE_INCLUDE_PATH
207#define TRACE_INCLUDE_PATH .
208#undef TRACE_INCLUDE_FILE
209#define TRACE_INCLUDE_FILE iwl-devtrace
210#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644
index 5bf3f49b74ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ /dev/null
@@ -1,553 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwlegacy_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwlegacy_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0;
149
150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) {
152 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
153 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
154 break;
155 default:
156 IWL_ERR(priv, "bad EEPROM signature,"
157 "EEPROM_GP=0x%08x\n", gp);
158 ret = -ENOENT;
159 break;
160 }
161 return ret;
162}
163
164const u8
165*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
166{
167 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
168 return &priv->eeprom[offset];
169}
170EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
171
172u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
173{
174 if (!priv->eeprom)
175 return 0;
176 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
177}
178EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
179
180/**
181 * iwl_legacy_eeprom_init - read EEPROM contents
182 *
183 * Load the EEPROM contents from adapter into priv->eeprom
184 *
185 * NOTE: This routine uses the non-debug IO access functions.
186 */
187int iwl_legacy_eeprom_init(struct iwl_priv *priv)
188{
189 __le16 *e;
190 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
191 int sz;
192 int ret;
193 u16 addr;
194
195 /* allocate eeprom */
196 sz = priv->cfg->base_params->eeprom_size;
197 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
198 priv->eeprom = kzalloc(sz, GFP_KERNEL);
199 if (!priv->eeprom) {
200 ret = -ENOMEM;
201 goto alloc_err;
202 }
203 e = (__le16 *)priv->eeprom;
204
205 priv->cfg->ops->lib->apm_ops.init(priv);
206
207 ret = iwl_legacy_eeprom_verify_signature(priv);
208 if (ret < 0) {
209 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
210 ret = -ENOENT;
211 goto err;
212 }
213
214 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
215 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
216 if (ret < 0) {
217 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
218 ret = -ENOENT;
219 goto err;
220 }
221
222 /* eeprom is an array of 16bit values */
223 for (addr = 0; addr < sz; addr += sizeof(u16)) {
224 u32 r;
225
226 _iwl_legacy_write32(priv, CSR_EEPROM_REG,
227 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
228
229 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
230 CSR_EEPROM_REG_READ_VALID_MSK,
231 CSR_EEPROM_REG_READ_VALID_MSK,
232 IWL_EEPROM_ACCESS_TIMEOUT);
233 if (ret < 0) {
234 IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
235 addr);
236 goto done;
237 }
238 r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
239 e[addr / 2] = cpu_to_le16(r >> 16);
240 }
241
242 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
243 "EEPROM",
244 iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
245
246 ret = 0;
247done:
248 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
249
250err:
251 if (ret)
252 iwl_legacy_eeprom_free(priv);
253 /* Reset chip to save power until we load uCode during "up". */
254 iwl_legacy_apm_stop(priv);
255alloc_err:
256 return ret;
257}
258EXPORT_SYMBOL(iwl_legacy_eeprom_init);
259
260void iwl_legacy_eeprom_free(struct iwl_priv *priv)
261{
262 kfree(priv->eeprom);
263 priv->eeprom = NULL;
264}
265EXPORT_SYMBOL(iwl_legacy_eeprom_free);
266
267static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
268 int eep_band, int *eeprom_ch_count,
269 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const u8 **eeprom_ch_index)
271{
272 u32 offset = priv->cfg->ops->lib->
273 eeprom_ops.regulatory_bands[eep_band - 1];
274 switch (eep_band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
277 *eeprom_ch_info = (struct iwl_eeprom_channel *)
278 iwl_legacy_eeprom_query_addr(priv, offset);
279 *eeprom_ch_index = iwlegacy_eeprom_band_1;
280 break;
281 case 2: /* 4.9GHz band */
282 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
283 *eeprom_ch_info = (struct iwl_eeprom_channel *)
284 iwl_legacy_eeprom_query_addr(priv, offset);
285 *eeprom_ch_index = iwlegacy_eeprom_band_2;
286 break;
287 case 3: /* 5.2GHz band */
288 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
289 *eeprom_ch_info = (struct iwl_eeprom_channel *)
290 iwl_legacy_eeprom_query_addr(priv, offset);
291 *eeprom_ch_index = iwlegacy_eeprom_band_3;
292 break;
293 case 4: /* 5.5GHz band */
294 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
295 *eeprom_ch_info = (struct iwl_eeprom_channel *)
296 iwl_legacy_eeprom_query_addr(priv, offset);
297 *eeprom_ch_index = iwlegacy_eeprom_band_4;
298 break;
299 case 5: /* 5.7GHz band */
300 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
301 *eeprom_ch_info = (struct iwl_eeprom_channel *)
302 iwl_legacy_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwlegacy_eeprom_band_5;
304 break;
305 case 6: /* 2.4GHz ht40 channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
307 *eeprom_ch_info = (struct iwl_eeprom_channel *)
308 iwl_legacy_eeprom_query_addr(priv, offset);
309 *eeprom_ch_index = iwlegacy_eeprom_band_6;
310 break;
311 case 7: /* 5 GHz ht40 channels */
312 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
313 *eeprom_ch_info = (struct iwl_eeprom_channel *)
314 iwl_legacy_eeprom_query_addr(priv, offset);
315 *eeprom_ch_index = iwlegacy_eeprom_band_7;
316 break;
317 default:
318 BUG();
319 }
320}
321
322#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
323 ? # x " " : "")
324/**
325 * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
326 *
327 * Does not set up a command, or touch hardware.
328 */
329static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
330 enum ieee80211_band band, u16 channel,
331 const struct iwl_eeprom_channel *eeprom_ch,
332 u8 clear_ht40_extension_channel)
333{
334 struct iwl_channel_info *ch_info;
335
336 ch_info = (struct iwl_channel_info *)
337 iwl_legacy_get_channel_info(priv, band, channel);
338
339 if (!iwl_legacy_is_channel_valid(ch_info))
340 return -1;
341
342 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
343 " Ad-Hoc %ssupported\n",
344 ch_info->channel,
345 iwl_legacy_is_channel_a_band(ch_info) ?
346 "5.2" : "2.4",
347 CHECK_AND_PRINT(IBSS),
348 CHECK_AND_PRINT(ACTIVE),
349 CHECK_AND_PRINT(RADAR),
350 CHECK_AND_PRINT(WIDE),
351 CHECK_AND_PRINT(DFS),
352 eeprom_ch->flags,
353 eeprom_ch->max_power_avg,
354 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
355 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
356 "" : "not ");
357
358 ch_info->ht40_eeprom = *eeprom_ch;
359 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
360 ch_info->ht40_flags = eeprom_ch->flags;
361 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
362 ch_info->ht40_extension_channel &=
363 ~clear_ht40_extension_channel;
364
365 return 0;
366}
367
368#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
369 ? # x " " : "")
370
371/**
372 * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
373 */
374int iwl_legacy_init_channel_map(struct iwl_priv *priv)
375{
376 int eeprom_ch_count = 0;
377 const u8 *eeprom_ch_index = NULL;
378 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
379 int band, ch;
380 struct iwl_channel_info *ch_info;
381
382 if (priv->channel_count) {
383 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
384 return 0;
385 }
386
387 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
388
389 priv->channel_count =
390 ARRAY_SIZE(iwlegacy_eeprom_band_1) +
391 ARRAY_SIZE(iwlegacy_eeprom_band_2) +
392 ARRAY_SIZE(iwlegacy_eeprom_band_3) +
393 ARRAY_SIZE(iwlegacy_eeprom_band_4) +
394 ARRAY_SIZE(iwlegacy_eeprom_band_5);
395
396 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
397 priv->channel_count);
398
399 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
400 priv->channel_count, GFP_KERNEL);
401 if (!priv->channel_info) {
402 IWL_ERR(priv, "Could not allocate channel_info\n");
403 priv->channel_count = 0;
404 return -ENOMEM;
405 }
406
407 ch_info = priv->channel_info;
408
409 /* Loop through the 5 EEPROM bands adding them in order to the
410 * channel map we maintain (that contains additional information than
411 * what just in the EEPROM) */
412 for (band = 1; band <= 5; band++) {
413
414 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
415 &eeprom_ch_info, &eeprom_ch_index);
416
417 /* Loop through each band adding each of the channels */
418 for (ch = 0; ch < eeprom_ch_count; ch++) {
419 ch_info->channel = eeprom_ch_index[ch];
420 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
421 IEEE80211_BAND_5GHZ;
422
423 /* permanently store EEPROM's channel regulatory flags
424 * and max power in channel info database. */
425 ch_info->eeprom = eeprom_ch_info[ch];
426
427 /* Copy the run-time flags so they are there even on
428 * invalid channels */
429 ch_info->flags = eeprom_ch_info[ch].flags;
430 /* First write that ht40 is not enabled, and then enable
431 * one by one */
432 ch_info->ht40_extension_channel =
433 IEEE80211_CHAN_NO_HT40;
434
435 if (!(iwl_legacy_is_channel_valid(ch_info))) {
436 IWL_DEBUG_EEPROM(priv,
437 "Ch. %d Flags %x [%sGHz] - "
438 "No traffic\n",
439 ch_info->channel,
440 ch_info->flags,
441 iwl_legacy_is_channel_a_band(ch_info) ?
442 "5.2" : "2.4");
443 ch_info++;
444 continue;
445 }
446
447 /* Initialize regulatory-based run-time data */
448 ch_info->max_power_avg = ch_info->curr_txpow =
449 eeprom_ch_info[ch].max_power_avg;
450 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
451 ch_info->min_power = 0;
452
453 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
454 "%s%s%s%s%s%s(0x%02x %ddBm):"
455 " Ad-Hoc %ssupported\n",
456 ch_info->channel,
457 iwl_legacy_is_channel_a_band(ch_info) ?
458 "5.2" : "2.4",
459 CHECK_AND_PRINT_I(VALID),
460 CHECK_AND_PRINT_I(IBSS),
461 CHECK_AND_PRINT_I(ACTIVE),
462 CHECK_AND_PRINT_I(RADAR),
463 CHECK_AND_PRINT_I(WIDE),
464 CHECK_AND_PRINT_I(DFS),
465 eeprom_ch_info[ch].flags,
466 eeprom_ch_info[ch].max_power_avg,
467 ((eeprom_ch_info[ch].
468 flags & EEPROM_CHANNEL_IBSS)
469 && !(eeprom_ch_info[ch].
470 flags & EEPROM_CHANNEL_RADAR))
471 ? "" : "not ");
472
473 ch_info++;
474 }
475 }
476
477 /* Check if we do have HT40 channels */
478 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
479 EEPROM_REGULATORY_BAND_NO_HT40 &&
480 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
481 EEPROM_REGULATORY_BAND_NO_HT40)
482 return 0;
483
484 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
485 for (band = 6; band <= 7; band++) {
486 enum ieee80211_band ieeeband;
487
488 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
489 &eeprom_ch_info, &eeprom_ch_index);
490
491 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
492 ieeeband =
493 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
494
495 /* Loop through each band adding each of the channels */
496 for (ch = 0; ch < eeprom_ch_count; ch++) {
497 /* Set up driver's info for lower half */
498 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
499 eeprom_ch_index[ch],
500 &eeprom_ch_info[ch],
501 IEEE80211_CHAN_NO_HT40PLUS);
502
503 /* Set up driver's info for upper half */
504 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
505 eeprom_ch_index[ch] + 4,
506 &eeprom_ch_info[ch],
507 IEEE80211_CHAN_NO_HT40MINUS);
508 }
509 }
510
511 return 0;
512}
513EXPORT_SYMBOL(iwl_legacy_init_channel_map);
514
515/*
516 * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
517 */
518void iwl_legacy_free_channel_map(struct iwl_priv *priv)
519{
520 kfree(priv->channel_info);
521 priv->channel_count = 0;
522}
523EXPORT_SYMBOL(iwl_legacy_free_channel_map);
524
525/**
526 * iwl_legacy_get_channel_info - Find driver's private channel info
527 *
528 * Based on band and channel number.
529 */
530const struct
531iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
532 enum ieee80211_band band, u16 channel)
533{
534 int i;
535
536 switch (band) {
537 case IEEE80211_BAND_5GHZ:
538 for (i = 14; i < priv->channel_count; i++) {
539 if (priv->channel_info[i].channel == channel)
540 return &priv->channel_info[i];
541 }
542 break;
543 case IEEE80211_BAND_2GHZ:
544 if (channel >= 1 && channel <= 14)
545 return &priv->channel_info[channel - 1];
546 break;
547 default:
548 BUG();
549 }
550
551 return NULL;
552}
553EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644
index c59c81002022..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h
+++ /dev/null
@@ -1,344 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_eeprom_h__
64#define __iwl_legacy_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __packed;
123
124/* 3945 Specific */
125#define EEPROM_3945_EEPROM_VERSION (0x2f)
126
127/* 4965 has two radio transmitters (and 3 radio receivers) */
128#define EEPROM_TX_POWER_TX_CHAINS (2)
129
130/* 4965 has room for up to 8 sets of txpower calibration data */
131#define EEPROM_TX_POWER_BANDS (8)
132
133/* 4965 factory calibration measures txpower gain settings for
134 * each of 3 target output levels */
135#define EEPROM_TX_POWER_MEASUREMENTS (3)
136
137/* 4965 Specific */
138/* 4965 driver does not work with txpower calibration version < 5 */
139#define EEPROM_4965_TX_POWER_VERSION (5)
140#define EEPROM_4965_EEPROM_VERSION (0x2f)
141#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
142#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
143#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
144#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
145
146/* 2.4 GHz */
147extern const u8 iwlegacy_eeprom_band_1[14];
148
149/*
150 * factory calibration data for one txpower level, on one channel,
151 * measured on one of the 2 tx chains (radio transmitter and associated
152 * antenna). EEPROM contains:
153 *
154 * 1) Temperature (degrees Celsius) of device when measurement was made.
155 *
156 * 2) Gain table index used to achieve the target measurement power.
157 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
158 *
159 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
160 *
161 * 4) RF power amplifier detector level measurement (not used).
162 */
163struct iwl_eeprom_calib_measure {
164 u8 temperature; /* Device temperature (Celsius) */
165 u8 gain_idx; /* Index into gain table */
166 u8 actual_pow; /* Measured RF output power, half-dBm */
167 s8 pa_det; /* Power amp detector level (not used) */
168} __packed;
169
170
171/*
172 * measurement set for one channel. EEPROM contains:
173 *
174 * 1) Channel number measured
175 *
176 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
177 * (a.k.a. "tx chains") (6 measurements altogether)
178 */
179struct iwl_eeprom_calib_ch_info {
180 u8 ch_num;
181 struct iwl_eeprom_calib_measure
182 measurements[EEPROM_TX_POWER_TX_CHAINS]
183 [EEPROM_TX_POWER_MEASUREMENTS];
184} __packed;
185
186/*
187 * txpower subband info.
188 *
189 * For each frequency subband, EEPROM contains the following:
190 *
191 * 1) First and last channels within range of the subband. "0" values
192 * indicate that this sample set is not being used.
193 *
194 * 2) Sample measurement sets for 2 channels close to the range endpoints.
195 */
196struct iwl_eeprom_calib_subband_info {
197 u8 ch_from; /* channel number of lowest channel in subband */
198 u8 ch_to; /* channel number of highest channel in subband */
199 struct iwl_eeprom_calib_ch_info ch1;
200 struct iwl_eeprom_calib_ch_info ch2;
201} __packed;
202
203
204/*
205 * txpower calibration info. EEPROM contains:
206 *
207 * 1) Factory-measured saturation power levels (maximum levels at which
208 * tx power amplifier can output a signal without too much distortion).
209 * There is one level for 2.4 GHz band and one for 5 GHz band. These
210 * values apply to all channels within each of the bands.
211 *
212 * 2) Factory-measured power supply voltage level. This is assumed to be
213 * constant (i.e. same value applies to all channels/bands) while the
214 * factory measurements are being made.
215 *
216 * 3) Up to 8 sets of factory-measured txpower calibration values.
217 * These are for different frequency ranges, since txpower gain
218 * characteristics of the analog radio circuitry vary with frequency.
219 *
220 * Not all sets need to be filled with data;
221 * struct iwl_eeprom_calib_subband_info contains range of channels
222 * (0 if unused) for each set of data.
223 */
224struct iwl_eeprom_calib_info {
225 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
226 u8 saturation_power52; /* half-dBm */
227 __le16 voltage; /* signed */
228 struct iwl_eeprom_calib_subband_info
229 band_info[EEPROM_TX_POWER_BANDS];
230} __packed;
231
232
233/* General */
234#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
235#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
236#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
237#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
240#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
241#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
242#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
243#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
244
245/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
246#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
247#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
248#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
249#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
250#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
251#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
252
253#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
254#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
255
256/*
257 * Per-channel regulatory data.
258 *
259 * Each channel that *might* be supported by iwl has a fixed location
260 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
261 * txpower (MSB).
262 *
263 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
264 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
265 *
266 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
267 */
268#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
269#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
270#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
271
272/*
273 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
274 * 5.0 GHz channels 7, 8, 11, 12, 16
275 * (4915-5080MHz) (none of these is ever supported)
276 */
277#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
278#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
279
280/*
281 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
282 * (5170-5320MHz)
283 */
284#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
285#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
286
287/*
288 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
289 * (5500-5700MHz)
290 */
291#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
292#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
293
294/*
295 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
296 * (5725-5825MHz)
297 */
298#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
299#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
300
301/*
302 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
303 *
304 * The channel listed is the center of the lower 20 MHz half of the channel.
305 * The overall center frequency is actually 2 channels (10 MHz) above that,
306 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
307 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
308 * and the overall HT40 channel width centers on channel 3.
309 *
310 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
311 * control channel to which to tune. RXON also specifies whether the
312 * control channel is the upper or lower half of a HT40 channel.
313 *
314 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
315 */
316#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
317
318/*
319 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
320 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
321 */
322#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
323
324#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
325
326struct iwl_eeprom_ops {
327 const u32 regulatory_bands[7];
328 int (*acquire_semaphore) (struct iwl_priv *priv);
329 void (*release_semaphore) (struct iwl_priv *priv);
330};
331
332
333int iwl_legacy_eeprom_init(struct iwl_priv *priv);
334void iwl_legacy_eeprom_free(struct iwl_priv *priv);
335const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
336 size_t offset);
337u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
338int iwl_legacy_init_channel_map(struct iwl_priv *priv);
339void iwl_legacy_free_channel_map(struct iwl_priv *priv);
340const struct iwl_channel_info *iwl_legacy_get_channel_info(
341 const struct iwl_priv *priv,
342 enum ieee80211_band band, u16 channel);
343
344#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644
index 6e6091816e36..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ /dev/null
@@ -1,513 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_fh_h__
64#define __iwl_legacy_fh_h__
65
66/****************************/
67/* Flow Handler Definitions */
68/****************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH_MEM_LOWER_BOUND (0x1000)
75#define FH_MEM_UPPER_BOUND (0x2000)
76
77/**
78 * Keep-Warm (KW) buffer base address.
79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response.
90 *
91 * Bit fields:
92 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
93 */
94#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
95
96
97/**
98 * TFD Circular Buffers Base (CBBC) addresses
99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
104 * aligned (address bits 0-7 must be 0).
105 *
106 * Bit fields in each pointer register:
107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
108 */
109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
111
112/* Find TFD CB base pointer for given queue (range 0-15). */
113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
114
115
116/**
117 * Rx SRAM Control and Status Registers (RSCSR)
118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs.
126 *
127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers:
129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver).
132 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
133 * (typically 4K, although 8K or 16K are also selectable by driver).
134 * Driver sets up RB size and number of RBDs in the CB via Rx config
135 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
136 *
137 * Bit fields within one RBD:
138 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
139 *
140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0):
150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value)
153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make
160 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
161 * the circular buffer. This value should initially be 0 (before preparing any
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
171 *
172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process
174 * all filled but unprocessed RBs up to, but not including, the RB
175 * corresponding to the "read" index. For example, if "read" index becomes "1",
176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process.
178 *
179 * If read index == write index, 4965 thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254
183 * buffers waiting to be filled.
184 */
185#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
186#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
187#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
188
189/**
190 * Physical base address of 8-byte Rx Status buffer.
191 * Bit fields:
192 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
193 */
194#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
195
196/**
197 * Physical base address of Rx Buffer Descriptor Circular Buffer.
198 * Bit fields:
199 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
200 */
201#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
202
203/**
204 * Rx write pointer (index, really!).
205 * Bit fields:
206 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
207 * NOTE: For 256-entry circular buffer, use only bits [7:0].
208 */
209#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
210#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
211
212
213/**
214 * Rx Config/Status Registers (RCSR)
215 * Rx Config Reg for channel 0 (only channel used)
216 *
217 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
218 * normal operation (see bit fields).
219 *
220 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
221 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
222 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
223 *
224 * Bit fields:
225 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
226 * '10' operate normally
227 * 29-24: reserved
228 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
229 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
230 * 19-18: reserved
231 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
232 * '10' 12K, '11' 16K.
233 * 15-14: reserved
234 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
235 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
236 * typical value 0x10 (about 1/2 msec)
237 * 3- 0: reserved
238 */
239#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
240#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
241#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
242
243#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
244
245#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
246#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
247#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
248#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
249#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
250#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
251
252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
254#define RX_RB_TIMEOUT (0x10)
255
256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
258#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
259
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
262#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
263#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
264
265#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/**
272 * Rx Shared Status Registers (RSSR)
273 *
274 * After stopping Rx DMA channel (writing 0 to
275 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
276 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
277 *
278 * Bit fields:
279 * 24: 1 = Channel 0 is idle
280 *
281 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
282 * contain default values that should not be altered by the driver.
283 */
284#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
285#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
286
287#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
288#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
289#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
290 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
291
292#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
293
294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 *
310 * To use a Tx DMA channel, driver must initialize its
311 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
312 *
313 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
314 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
315 *
316 * All other bits should be 0.
317 *
318 * Bit fields:
319 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
320 * '10' operate normally
321 * 29- 4: Reserved, set to "0"
322 * 3: Enable internal DMA requests (1, normal operation), disable (0)
323 * 2- 0: Reserved, set to "0"
324 */
325#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
342
343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
345
346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
349
350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
353
354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
357
358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
361
362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
364
365/**
366 * Tx Shared Status Registers (TSSR)
367 *
368 * After stopping Tx DMA channel (writing 0 to
369 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
370 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
371 * (channel's buffers empty | no pending requests).
372 *
373 * Bit fields:
374 * 31-24: 1 = Channel buffers empty (channel 7:0)
375 * 23-16: 1 = No pending requests (channel 7:0)
376 */
377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402
403/* Tx service channels */
404#define FH_SRVC_CHNL (9)
405#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
406#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
407#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
408 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
409
410#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
411/* Instruct FH to increment the retry count of a packet when
412 * it is brought from the memory to TX-FIFO
413 */
414#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
415
416#define RX_QUEUE_SIZE 256
417#define RX_QUEUE_MASK 255
418#define RX_QUEUE_SIZE_LOG 8
419
420/*
421 * RX related structures and functions
422 */
423#define RX_FREE_BUFFERS 64
424#define RX_LOW_WATERMARK 8
425
426/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430
431/**
432 * struct iwl_rb_status - reseve buffer status
433 * host memory mapped FH registers
434 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
435 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
436 * @finished_rb_num [0:11] - Indicates the index of the current RB
437 * in which the last frame was written to
438 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
439 * which was transferred
440 */
441struct iwl_rb_status {
442 __le16 closed_rb_num;
443 __le16 closed_fr_num;
444 __le16 finished_rb_num;
445 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */
447} __packed;
448
449
450#define TFD_QUEUE_SIZE_MAX (256)
451#define TFD_QUEUE_SIZE_BC_DUP (64)
452#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
453#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
454#define IWL_NUM_OF_TBS 20
455
456static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
457{
458 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
459}
460/**
461 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
462 *
463 * This structure contains dma address and length of transmission address
464 *
465 * @lo: low [31:0] portion of the dma address of TX buffer
466 * every even is unaligned on 16 bit boundary
467 * @hi_n_len 0-3 [35:32] portion of dma
468 * 4-15 length of the tx buffer
469 */
470struct iwl_tfd_tb {
471 __le32 lo;
472 __le16 hi_n_len;
473} __packed;
474
475/**
476 * struct iwl_tfd
477 *
478 * Transmit Frame Descriptor (TFD)
479 *
480 * @ __reserved1[3] reserved
481 * @ num_tbs 0-4 number of active tbs
482 * 5 reserved
483 * 6-7 padding (not used)
484 * @ tbs[20] transmit frame buffer descriptors
485 * @ __pad padding
486 *
487 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
488 * Both driver and device share these circular buffers, each of which must be
489 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
490 *
491 * Driver must indicate the physical address of the base of each
492 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
493 *
494 * Each TFD contains pointer/size information for up to 20 data buffers
495 * in host DRAM. These buffers collectively contain the (one) frame described
496 * by the TFD. Each buffer must be a single contiguous block of memory within
497 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
498 * of (4K - 4). The concatenates all of a TFD's buffers into a single
499 * Tx frame, up to 8 KBytes in size.
500 *
501 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
502 */
503struct iwl_tfd {
504 u8 __reserved1[3];
505 u8 num_tbs;
506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
507 __le32 __pad;
508} __packed;
509
510/* Keep Warm Size */
511#define IWL_KW_SIZE 0x1000 /* 4k */
512
513#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644
index ce1fc9feb61f..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ /dev/null
@@ -1,271 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40const char *iwl_legacy_get_cmd_string(u8 cmd)
41{
42 switch (cmd) {
43 IWL_CMD(REPLY_ALIVE);
44 IWL_CMD(REPLY_ERROR);
45 IWL_CMD(REPLY_RXON);
46 IWL_CMD(REPLY_RXON_ASSOC);
47 IWL_CMD(REPLY_QOS_PARAM);
48 IWL_CMD(REPLY_RXON_TIMING);
49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_WEPKEY);
52 IWL_CMD(REPLY_3945_RX);
53 IWL_CMD(REPLY_TX);
54 IWL_CMD(REPLY_RATE_SCALE);
55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(REPLY_CHANNEL_SWITCH);
58 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
59 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
60 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
61 IWL_CMD(POWER_TABLE_CMD);
62 IWL_CMD(PM_SLEEP_NOTIFICATION);
63 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
64 IWL_CMD(REPLY_SCAN_CMD);
65 IWL_CMD(REPLY_SCAN_ABORT_CMD);
66 IWL_CMD(SCAN_START_NOTIFICATION);
67 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
68 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
69 IWL_CMD(BEACON_NOTIFICATION);
70 IWL_CMD(REPLY_TX_BEACON);
71 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
72 IWL_CMD(REPLY_BT_CONFIG);
73 IWL_CMD(REPLY_STATISTICS_CMD);
74 IWL_CMD(STATISTICS_NOTIFICATION);
75 IWL_CMD(CARD_STATE_NOTIFICATION);
76 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
77 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
78 IWL_CMD(SENSITIVITY_CMD);
79 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
80 IWL_CMD(REPLY_RX_PHY_CMD);
81 IWL_CMD(REPLY_RX_MPDU_CMD);
82 IWL_CMD(REPLY_RX);
83 IWL_CMD(REPLY_COMPRESSED_BA);
84 default:
85 return "UNKNOWN";
86
87 }
88}
89EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
90
91#define HOST_COMPLETE_TIMEOUT (HZ / 2)
92
93static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
94 struct iwl_device_cmd *cmd,
95 struct iwl_rx_packet *pkt)
96{
97 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
98 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
99 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
100 return;
101 }
102
103#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
104 switch (cmd->hdr.cmd) {
105 case REPLY_TX_LINK_QUALITY_CMD:
106 case SENSITIVITY_CMD:
107 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
108 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
109 break;
110 default:
111 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
112 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
113 }
114#endif
115}
116
117static int
118iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
119{
120 int ret;
121
122 BUG_ON(!(cmd->flags & CMD_ASYNC));
123
124 /* An asynchronous command can not expect an SKB to be set. */
125 BUG_ON(cmd->flags & CMD_WANT_SKB);
126
127 /* Assign a generic callback if one is not provided */
128 if (!cmd->callback)
129 cmd->callback = iwl_legacy_generic_cmd_callback;
130
131 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
132 return -EBUSY;
133
134 ret = iwl_legacy_enqueue_hcmd(priv, cmd);
135 if (ret < 0) {
136 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
137 iwl_legacy_get_cmd_string(cmd->id), ret);
138 return ret;
139 }
140 return 0;
141}
142
143int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
144{
145 int cmd_idx;
146 int ret;
147
148 lockdep_assert_held(&priv->mutex);
149
150 BUG_ON(cmd->flags & CMD_ASYNC);
151
152 /* A synchronous command can not have a callback set. */
153 BUG_ON(cmd->callback);
154
155 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
156 iwl_legacy_get_cmd_string(cmd->id));
157
158 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
159 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
160 iwl_legacy_get_cmd_string(cmd->id));
161
162 cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
163 if (cmd_idx < 0) {
164 ret = cmd_idx;
165 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
166 iwl_legacy_get_cmd_string(cmd->id), ret);
167 goto out;
168 }
169
170 ret = wait_event_timeout(priv->wait_command_queue,
171 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
172 HOST_COMPLETE_TIMEOUT);
173 if (!ret) {
174 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
175 IWL_ERR(priv,
176 "Error sending %s: time out after %dms.\n",
177 iwl_legacy_get_cmd_string(cmd->id),
178 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
179
180 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
181 IWL_DEBUG_INFO(priv,
182 "Clearing HCMD_ACTIVE for command %s\n",
183 iwl_legacy_get_cmd_string(cmd->id));
184 ret = -ETIMEDOUT;
185 goto cancel;
186 }
187 }
188
189 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
190 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
191 iwl_legacy_get_cmd_string(cmd->id));
192 ret = -ECANCELED;
193 goto fail;
194 }
195 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
196 IWL_ERR(priv, "Command %s failed: FW Error\n",
197 iwl_legacy_get_cmd_string(cmd->id));
198 ret = -EIO;
199 goto fail;
200 }
201 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
202 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
203 iwl_legacy_get_cmd_string(cmd->id));
204 ret = -EIO;
205 goto cancel;
206 }
207
208 ret = 0;
209 goto out;
210
211cancel:
212 if (cmd->flags & CMD_WANT_SKB) {
213 /*
214 * Cancel the CMD_WANT_SKB flag for the cmd in the
215 * TX cmd queue. Otherwise in case the cmd comes
216 * in later, it will possibly set an invalid
217 * address (cmd->meta.source).
218 */
219 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
220 ~CMD_WANT_SKB;
221 }
222fail:
223 if (cmd->reply_page) {
224 iwl_legacy_free_pages(priv, cmd->reply_page);
225 cmd->reply_page = 0;
226 }
227out:
228 return ret;
229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
231
232int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
233{
234 if (cmd->flags & CMD_ASYNC)
235 return iwl_legacy_send_cmd_async(priv, cmd);
236
237 return iwl_legacy_send_cmd_sync(priv, cmd);
238}
239EXPORT_SYMBOL(iwl_legacy_send_cmd);
240
241int
242iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
243{
244 struct iwl_host_cmd cmd = {
245 .id = id,
246 .len = len,
247 .data = data,
248 };
249
250 return iwl_legacy_send_cmd_sync(priv, &cmd);
251}
252EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
253
254int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
255 u8 id, u16 len, const void *data,
256 void (*callback)(struct iwl_priv *priv,
257 struct iwl_device_cmd *cmd,
258 struct iwl_rx_packet *pkt))
259{
260 struct iwl_host_cmd cmd = {
261 .id = id,
262 .len = len,
263 .data = data,
264 };
265
266 cmd.flags |= CMD_ASYNC;
267 cmd.callback = callback;
268
269 return iwl_legacy_send_cmd_async(priv, &cmd);
270}
271EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644
index 5cf23eaecbbb..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ /dev/null
@@ -1,196 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_legacy_helpers_h__
31#define __iwl_legacy_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/*
92 * we have 8 bits used like this:
93 *
94 * 7 6 5 4 3 2 1 0
95 * | | | | | | | |
96 * | | | | | | +-+-------- AC queue (0-3)
97 * | | | | | |
98 * | +-+-+-+-+------------ HW queue ID
99 * |
100 * +---------------------- unused
101 */
102static inline void
103iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
104{
105 BUG_ON(ac > 3); /* only have 2 bits */
106 BUG_ON(hwq > 31); /* only use 5 bits */
107
108 txq->swq_id = (hwq << 2) | ac;
109}
110
111static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
112 struct iwl_tx_queue *txq)
113{
114 u8 queue = txq->swq_id;
115 u8 ac = queue & 3;
116 u8 hwq = (queue >> 2) & 0x1f;
117
118 if (test_and_clear_bit(hwq, priv->queue_stopped))
119 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
120 ieee80211_wake_queue(priv->hw, ac);
121}
122
123static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
124 struct iwl_tx_queue *txq)
125{
126 u8 queue = txq->swq_id;
127 u8 ac = queue & 3;
128 u8 hwq = (queue >> 2) & 0x1f;
129
130 if (!test_and_set_bit(hwq, priv->queue_stopped))
131 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
132 ieee80211_stop_queue(priv->hw, ac);
133}
134
135#ifdef ieee80211_stop_queue
136#undef ieee80211_stop_queue
137#endif
138
139#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
140
141#ifdef ieee80211_wake_queue
142#undef ieee80211_wake_queue
143#endif
144
145#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
146
147static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
148{
149 clear_bit(STATUS_INT_ENABLED, &priv->status);
150
151 /* disable interrupts from uCode/NIC to host */
152 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
153
154 /* acknowledge/clear/reset any interrupts still pending
155 * from uCode or flow handler (Rx/Tx DMA) */
156 iwl_write32(priv, CSR_INT, 0xffffffff);
157 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
158 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
159}
160
161static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
162{
163 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
164 iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
165}
166
167static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
168{
169 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
170 set_bit(STATUS_INT_ENABLED, &priv->status);
171 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
172}
173
174/**
175 * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
176 * @priv -- pointer to iwl_priv data structure
177 * @tsf_bits -- number of bits need to shift for masking)
178 */
179static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
180 u16 tsf_bits)
181{
182 return (1 << tsf_bits) - 1;
183}
184
185/**
186 * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
187 * @priv -- pointer to iwl_priv data structure
188 * @tsf_bits -- number of bits need to shift for masking)
189 */
190static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
191 u16 tsf_bits)
192{
193 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
194}
195
196#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644
index 5cc5d342914f..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-io.h
+++ /dev/null
@@ -1,545 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_io_h__
30#define __iwl_legacy_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h"
37
38/*
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
57 * _iwl_legacy_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{
68 trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs);
70}
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73static inline void
74__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
75 u32 ofs, u8 val)
76{
77 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
78 _iwl_legacy_write8(priv, ofs, val);
79}
80#define iwl_write8(priv, ofs, val) \
81 __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
82#else
83#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
84#endif
85
86
87static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
88{
89 trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
90 iowrite32(val, priv->hw_base + ofs);
91}
92
93#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
94static inline void
95__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
96 u32 ofs, u32 val)
97{
98 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
99 _iwl_legacy_write32(priv, ofs, val);
100}
101#define iwl_write32(priv, ofs, val) \
102 __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
103#else
104#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
105#endif
106
107static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
108{
109 u32 val = ioread32(priv->hw_base + ofs);
110 trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
111 return val;
112}
113
114#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
115static inline u32
116__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
117{
118 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
119 return _iwl_legacy_read32(priv, ofs);
120}
121#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
122#else
123#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
124#endif
125
126#define IWL_POLL_INTERVAL 10 /* microseconds */
127static inline int
128_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
129 u32 bits, u32 mask, int timeout)
130{
131 int t = 0;
132
133 do {
134 if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
135 return t;
136 udelay(IWL_POLL_INTERVAL);
137 t += IWL_POLL_INTERVAL;
138 } while (t < timeout);
139
140 return -ETIMEDOUT;
141}
142#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
143static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 addr,
145 u32 bits, u32 mask, int timeout)
146{
147 int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
148 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
149 addr, bits, mask,
150 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
151 return ret;
152}
153#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
154 __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
155 bits, mask, timeout)
156#else
157#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
158#endif
159
160static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
161{
162 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
163}
164#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
165static inline void __iwl_legacy_set_bit(const char *f, u32 l,
166 struct iwl_priv *priv, u32 reg, u32 mask)
167{
168 u32 val = _iwl_legacy_read32(priv, reg) | mask;
169 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
170 mask, val);
171 _iwl_legacy_write32(priv, reg, val);
172}
173static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
174{
175 unsigned long reg_flags;
176
177 spin_lock_irqsave(&p->reg_lock, reg_flags);
178 __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
179 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
180}
181#else
182static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
183{
184 unsigned long reg_flags;
185
186 spin_lock_irqsave(&p->reg_lock, reg_flags);
187 _iwl_legacy_set_bit(p, r, m);
188 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
189}
190#endif
191
192static inline void
193_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
194{
195 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
196}
197#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
198static inline void
199__iwl_legacy_clear_bit(const char *f, u32 l,
200 struct iwl_priv *priv, u32 reg, u32 mask)
201{
202 u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
203 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
204 _iwl_legacy_write32(priv, reg, val);
205}
206static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
207{
208 unsigned long reg_flags;
209
210 spin_lock_irqsave(&p->reg_lock, reg_flags);
211 __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
212 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
213}
214#else
215static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
216{
217 unsigned long reg_flags;
218
219 spin_lock_irqsave(&p->reg_lock, reg_flags);
220 _iwl_legacy_clear_bit(p, r, m);
221 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
222}
223#endif
224
225static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
226{
227 int ret;
228 u32 val;
229
230 /* this bit wakes up the NIC */
231 _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
233
234 /*
235 * These bits say the device is running, and should keep running for
236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
237 * but they do not indicate that embedded SRAM is restored yet;
238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
239 * to/from host DRAM when sleeping/waking for power-saving.
240 * Each direction takes approximately 1/4 millisecond; with this
241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
242 * series of register accesses are expected (e.g. reading Event Log),
243 * to keep device from sleeping.
244 *
245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
246 * SRAM is okay/restored. We don't check that here because this call
247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
249 *
250 */
251 ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
252 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
253 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
254 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
255 if (ret < 0) {
256 val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
257 IWL_ERR(priv,
258 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
259 _iwl_legacy_write32(priv, CSR_RESET,
260 CSR_RESET_REG_FLAG_FORCE_NMI);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
268static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
269 struct iwl_priv *priv)
270{
271 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
272 return _iwl_legacy_grab_nic_access(priv);
273}
274#define iwl_grab_nic_access(priv) \
275 __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
276#else
277#define iwl_grab_nic_access(priv) \
278 _iwl_legacy_grab_nic_access(priv)
279#endif
280
281static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
282{
283 _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
285}
286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
287static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
288 struct iwl_priv *priv)
289{
290
291 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
292 _iwl_legacy_release_nic_access(priv);
293}
294#define iwl_release_nic_access(priv) \
295 __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
296#else
297#define iwl_release_nic_access(priv) \
298 _iwl_legacy_release_nic_access(priv)
299#endif
300
301static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
302{
303 return _iwl_legacy_read32(priv, reg);
304}
305#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
306static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
307 struct iwl_priv *priv, u32 reg)
308{
309 u32 value = _iwl_legacy_read_direct32(priv, reg);
310 IWL_DEBUG_IO(priv,
311 "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
312 f, l);
313 return value;
314}
315static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
316{
317 u32 value;
318 unsigned long reg_flags;
319
320 spin_lock_irqsave(&priv->reg_lock, reg_flags);
321 iwl_grab_nic_access(priv);
322 value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
323 iwl_release_nic_access(priv);
324 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
325 return value;
326}
327
328#else
329static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
330{
331 u32 value;
332 unsigned long reg_flags;
333
334 spin_lock_irqsave(&priv->reg_lock, reg_flags);
335 iwl_grab_nic_access(priv);
336 value = _iwl_legacy_read_direct32(priv, reg);
337 iwl_release_nic_access(priv);
338 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
339 return value;
340
341}
342#endif
343
344static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
345 u32 reg, u32 value)
346{
347 _iwl_legacy_write32(priv, reg, value);
348}
349static inline void
350iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
351{
352 unsigned long reg_flags;
353
354 spin_lock_irqsave(&priv->reg_lock, reg_flags);
355 if (!iwl_grab_nic_access(priv)) {
356 _iwl_legacy_write_direct32(priv, reg, value);
357 iwl_release_nic_access(priv);
358 }
359 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
360}
361
362static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
363 u32 reg, u32 len, u32 *values)
364{
365 u32 count = sizeof(u32);
366
367 if ((priv != NULL) && (values != NULL)) {
368 for (; 0 < len; len -= count, reg += count, values++)
369 iwl_legacy_write_direct32(priv, reg, *values);
370 }
371}
372
373static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
374 u32 mask, int timeout)
375{
376 int t = 0;
377
378 do {
379 if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
380 return t;
381 udelay(IWL_POLL_INTERVAL);
382 t += IWL_POLL_INTERVAL;
383 } while (t < timeout);
384
385 return -ETIMEDOUT;
386}
387
388#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
389static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
390 struct iwl_priv *priv,
391 u32 addr, u32 mask, int timeout)
392{
393 int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
394
395 if (unlikely(ret == -ETIMEDOUT))
396 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
397 "timedout - %s %d\n", addr, mask, f, l);
398 else
399 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
400 "- %s %d\n", addr, mask, ret, f, l);
401 return ret;
402}
403#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
404__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
405#else
406#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
407#endif
408
409static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
410{
411 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
412 rmb();
413 return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
414}
415static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
416{
417 unsigned long reg_flags;
418 u32 val;
419
420 spin_lock_irqsave(&priv->reg_lock, reg_flags);
421 iwl_grab_nic_access(priv);
422 val = _iwl_legacy_read_prph(priv, reg);
423 iwl_release_nic_access(priv);
424 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
425 return val;
426}
427
428static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
429 u32 addr, u32 val)
430{
431 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
432 ((addr & 0x0000FFFF) | (3 << 24)));
433 wmb();
434 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
435}
436
437static inline void
438iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
439{
440 unsigned long reg_flags;
441
442 spin_lock_irqsave(&priv->reg_lock, reg_flags);
443 if (!iwl_grab_nic_access(priv)) {
444 _iwl_legacy_write_prph(priv, addr, val);
445 iwl_release_nic_access(priv);
446 }
447 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
448}
449
450#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
451_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
452
453static inline void
454iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
455{
456 unsigned long reg_flags;
457
458 spin_lock_irqsave(&priv->reg_lock, reg_flags);
459 iwl_grab_nic_access(priv);
460 _iwl_legacy_set_bits_prph(priv, reg, mask);
461 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
463}
464
465#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
466_iwl_legacy_write_prph(priv, reg, \
467 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
468
469static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
470 u32 bits, u32 mask)
471{
472 unsigned long reg_flags;
473
474 spin_lock_irqsave(&priv->reg_lock, reg_flags);
475 iwl_grab_nic_access(priv);
476 _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
482 *priv, u32 reg, u32 mask)
483{
484 unsigned long reg_flags;
485 u32 val;
486
487 spin_lock_irqsave(&priv->reg_lock, reg_flags);
488 iwl_grab_nic_access(priv);
489 val = _iwl_legacy_read_prph(priv, reg);
490 _iwl_legacy_write_prph(priv, reg, (val & ~mask));
491 iwl_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
493}
494
495static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
496{
497 unsigned long reg_flags;
498 u32 value;
499
500 spin_lock_irqsave(&priv->reg_lock, reg_flags);
501 iwl_grab_nic_access(priv);
502
503 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
504 rmb();
505 value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
506
507 iwl_release_nic_access(priv);
508 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
509 return value;
510}
511
512static inline void
513iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
522 iwl_release_nic_access(priv);
523 }
524 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
525}
526
527static inline void
528iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
529 u32 len, u32 *values)
530{
531 unsigned long reg_flags;
532
533 spin_lock_irqsave(&priv->reg_lock, reg_flags);
534 if (!iwl_grab_nic_access(priv)) {
535 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
536 wmb();
537 for (; 0 < len; len -= sizeof(u32), values++)
538 _iwl_legacy_write_direct32(priv,
539 HBUS_TARG_MEM_WDAT, *values);
540
541 iwl_release_nic_access(priv);
542 }
543 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
544}
545#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644
index dc568a474c5d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ /dev/null
@@ -1,205 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43
44/* default: IWL_LED_BLINK(0) using blinking index table */
45static int led_mode;
46module_param(led_mode, int, S_IRUGO);
47MODULE_PARM_DESC(led_mode, "0=system default, "
48 "1=On(RF On)/Off(RF Off), 2=blinking");
49
50/* Throughput OFF time(ms) ON time (ms)
51 * >300 25 25
52 * >200 to 300 40 40
53 * >100 to 200 55 55
54 * >70 to 100 65 65
55 * >50 to 70 75 75
56 * >20 to 50 85 85
57 * >10 to 20 95 95
58 * >5 to 10 110 110
59 * >1 to 5 130 130
60 * >0 to 1 167 167
61 * <=0 SOLID ON
62 */
63static const struct ieee80211_tpt_blink iwl_blink[] = {
64 { .throughput = 0, .blink_time = 334 },
65 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
66 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
67 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
68 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
69 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
70 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
71 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
72 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
73 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
74};
75
76/*
77 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
78 * Led blink rate analysis showed an average deviation of 0% on 3945,
79 * 5% on 4965 HW.
80 * Need to compensate on the led on/off time per HW according to the deviation
81 * to achieve the desired led frequency
82 * The calculation is: (100-averageDeviation)/100 * blinkTime
83 * For code efficiency the calculation will be:
84 * compensation = (100 - averageDeviation) * 64 / 100
85 * NewBlinkTime = (compensation * BlinkTime) / 64
86 */
87static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
88 u8 time, u16 compensation)
89{
90 if (!compensation) {
91 IWL_ERR(priv, "undefined blink compensation: "
92 "use pre-defined blinking time\n");
93 return time;
94 }
95
96 return (u8)((time * compensation) >> 6);
97}
98
99/* Set led pattern command */
100static int iwl_legacy_led_cmd(struct iwl_priv *priv,
101 unsigned long on,
102 unsigned long off)
103{
104 struct iwl_led_cmd led_cmd = {
105 .id = IWL_LED_LINK,
106 .interval = IWL_DEF_LED_INTRVL
107 };
108 int ret;
109
110 if (!test_bit(STATUS_READY, &priv->status))
111 return -EBUSY;
112
113 if (priv->blink_on == on && priv->blink_off == off)
114 return 0;
115
116 if (off == 0) {
117 /* led is SOLID_ON */
118 on = IWL_LED_SOLID;
119 }
120
121 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
122 priv->cfg->base_params->led_compensation);
123 led_cmd.on = iwl_legacy_blink_compensation(priv, on,
124 priv->cfg->base_params->led_compensation);
125 led_cmd.off = iwl_legacy_blink_compensation(priv, off,
126 priv->cfg->base_params->led_compensation);
127
128 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
129 if (!ret) {
130 priv->blink_on = on;
131 priv->blink_off = off;
132 }
133 return ret;
134}
135
136static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
137 enum led_brightness brightness)
138{
139 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
140 unsigned long on = 0;
141
142 if (brightness > 0)
143 on = IWL_LED_SOLID;
144
145 iwl_legacy_led_cmd(priv, on, 0);
146}
147
148static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
149 unsigned long *delay_on,
150 unsigned long *delay_off)
151{
152 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
153
154 return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
155}
156
157void iwl_legacy_leds_init(struct iwl_priv *priv)
158{
159 int mode = led_mode;
160 int ret;
161
162 if (mode == IWL_LED_DEFAULT)
163 mode = priv->cfg->led_mode;
164
165 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
166 wiphy_name(priv->hw->wiphy));
167 priv->led.brightness_set = iwl_legacy_led_brightness_set;
168 priv->led.blink_set = iwl_legacy_led_blink_set;
169 priv->led.max_brightness = 1;
170
171 switch (mode) {
172 case IWL_LED_DEFAULT:
173 WARN_ON(1);
174 break;
175 case IWL_LED_BLINK:
176 priv->led.default_trigger =
177 ieee80211_create_tpt_led_trigger(priv->hw,
178 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
179 iwl_blink, ARRAY_SIZE(iwl_blink));
180 break;
181 case IWL_LED_RF_STATE:
182 priv->led.default_trigger =
183 ieee80211_get_radio_led_name(priv->hw);
184 break;
185 }
186
187 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
188 if (ret) {
189 kfree(priv->led.name);
190 return;
191 }
192
193 priv->led_registered = true;
194}
195EXPORT_SYMBOL(iwl_legacy_leds_init);
196
197void iwl_legacy_leds_exit(struct iwl_priv *priv)
198{
199 if (!priv->led_registered)
200 return;
201
202 led_classdev_unregister(&priv->led);
203 kfree(priv->led.name);
204}
205EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644
index f0791f70f79d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_leds_h__
28#define __iwl_legacy_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwl_legacy_leds_init(struct iwl_priv *priv);
54void iwl_legacy_leds_exit(struct iwl_priv *priv);
55
56#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644
index 38647e481eb0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
+++ /dev/null
@@ -1,456 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_rs_h__
28#define __iwl_legacy_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
35 u8 prev_ieee; /* previous rate in IEEE speeds */
36 u8 next_ieee; /* next rate in IEEE speeds */
37 u8 prev_rs; /* previous rate used in rs algo */
38 u8 next_rs; /* next rate used in rs algo */
39 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
40 u8 next_rs_tgg; /* next rate used in TGG rs algo */
41};
42
43struct iwl3945_rate_info {
44 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
45 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
46 u8 prev_ieee; /* previous rate in IEEE speeds */
47 u8 next_ieee; /* next rate in IEEE speeds */
48 u8 prev_rs; /* previous rate used in rs algo */
49 u8 next_rs; /* next rate used in rs algo */
50 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
51 u8 next_rs_tgg; /* next rate used in TGG rs algo */
52 u8 table_rs_index; /* index in rate scale table cmd */
53 u8 prev_table_rs; /* prev in rate table cmd */
54};
55
56
57/*
58 * These serve as indexes into
59 * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
60 */
61enum {
62 IWL_RATE_1M_INDEX = 0,
63 IWL_RATE_2M_INDEX,
64 IWL_RATE_5M_INDEX,
65 IWL_RATE_11M_INDEX,
66 IWL_RATE_6M_INDEX,
67 IWL_RATE_9M_INDEX,
68 IWL_RATE_12M_INDEX,
69 IWL_RATE_18M_INDEX,
70 IWL_RATE_24M_INDEX,
71 IWL_RATE_36M_INDEX,
72 IWL_RATE_48M_INDEX,
73 IWL_RATE_54M_INDEX,
74 IWL_RATE_60M_INDEX,
75 IWL_RATE_COUNT,
76 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
77 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
78 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
79 IWL_RATE_INVALID = IWL_RATE_COUNT,
80};
81
82enum {
83 IWL_RATE_6M_INDEX_TABLE = 0,
84 IWL_RATE_9M_INDEX_TABLE,
85 IWL_RATE_12M_INDEX_TABLE,
86 IWL_RATE_18M_INDEX_TABLE,
87 IWL_RATE_24M_INDEX_TABLE,
88 IWL_RATE_36M_INDEX_TABLE,
89 IWL_RATE_48M_INDEX_TABLE,
90 IWL_RATE_54M_INDEX_TABLE,
91 IWL_RATE_1M_INDEX_TABLE,
92 IWL_RATE_2M_INDEX_TABLE,
93 IWL_RATE_5M_INDEX_TABLE,
94 IWL_RATE_11M_INDEX_TABLE,
95 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
96};
97
98enum {
99 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
100 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
101 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
102 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
103 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
104};
105
106/* #define vs. enum to keep from defaulting to 'large integer' */
107#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
108#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
109#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
110#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
111#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
112#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
113#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
114#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
115#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
116#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
117#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
118#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
119#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
120
121/* uCode API values for legacy bit rates, both OFDM and CCK */
122enum {
123 IWL_RATE_6M_PLCP = 13,
124 IWL_RATE_9M_PLCP = 15,
125 IWL_RATE_12M_PLCP = 5,
126 IWL_RATE_18M_PLCP = 7,
127 IWL_RATE_24M_PLCP = 9,
128 IWL_RATE_36M_PLCP = 11,
129 IWL_RATE_48M_PLCP = 1,
130 IWL_RATE_54M_PLCP = 3,
131 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
132 IWL_RATE_1M_PLCP = 10,
133 IWL_RATE_2M_PLCP = 20,
134 IWL_RATE_5M_PLCP = 55,
135 IWL_RATE_11M_PLCP = 110,
136 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
137};
138
139/* uCode API values for OFDM high-throughput (HT) bit rates */
140enum {
141 IWL_RATE_SISO_6M_PLCP = 0,
142 IWL_RATE_SISO_12M_PLCP = 1,
143 IWL_RATE_SISO_18M_PLCP = 2,
144 IWL_RATE_SISO_24M_PLCP = 3,
145 IWL_RATE_SISO_36M_PLCP = 4,
146 IWL_RATE_SISO_48M_PLCP = 5,
147 IWL_RATE_SISO_54M_PLCP = 6,
148 IWL_RATE_SISO_60M_PLCP = 7,
149 IWL_RATE_MIMO2_6M_PLCP = 0x8,
150 IWL_RATE_MIMO2_12M_PLCP = 0x9,
151 IWL_RATE_MIMO2_18M_PLCP = 0xa,
152 IWL_RATE_MIMO2_24M_PLCP = 0xb,
153 IWL_RATE_MIMO2_36M_PLCP = 0xc,
154 IWL_RATE_MIMO2_48M_PLCP = 0xd,
155 IWL_RATE_MIMO2_54M_PLCP = 0xe,
156 IWL_RATE_MIMO2_60M_PLCP = 0xf,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159};
160
161/* MAC header values for bit rates */
162enum {
163 IWL_RATE_6M_IEEE = 12,
164 IWL_RATE_9M_IEEE = 18,
165 IWL_RATE_12M_IEEE = 24,
166 IWL_RATE_18M_IEEE = 36,
167 IWL_RATE_24M_IEEE = 48,
168 IWL_RATE_36M_IEEE = 72,
169 IWL_RATE_48M_IEEE = 96,
170 IWL_RATE_54M_IEEE = 108,
171 IWL_RATE_60M_IEEE = 120,
172 IWL_RATE_1M_IEEE = 2,
173 IWL_RATE_2M_IEEE = 4,
174 IWL_RATE_5M_IEEE = 11,
175 IWL_RATE_11M_IEEE = 22,
176};
177
178#define IWL_CCK_BASIC_RATES_MASK \
179 (IWL_RATE_1M_MASK | \
180 IWL_RATE_2M_MASK)
181
182#define IWL_CCK_RATES_MASK \
183 (IWL_CCK_BASIC_RATES_MASK | \
184 IWL_RATE_5M_MASK | \
185 IWL_RATE_11M_MASK)
186
187#define IWL_OFDM_BASIC_RATES_MASK \
188 (IWL_RATE_6M_MASK | \
189 IWL_RATE_12M_MASK | \
190 IWL_RATE_24M_MASK)
191
192#define IWL_OFDM_RATES_MASK \
193 (IWL_OFDM_BASIC_RATES_MASK | \
194 IWL_RATE_9M_MASK | \
195 IWL_RATE_18M_MASK | \
196 IWL_RATE_36M_MASK | \
197 IWL_RATE_48M_MASK | \
198 IWL_RATE_54M_MASK)
199
200#define IWL_BASIC_RATES_MASK \
201 (IWL_OFDM_BASIC_RATES_MASK | \
202 IWL_CCK_BASIC_RATES_MASK)
203
204#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
205#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
206
207#define IWL_INVALID_VALUE -1
208
209#define IWL_MIN_RSSI_VAL -100
210#define IWL_MAX_RSSI_VAL 0
211
212/* These values specify how many Tx frame attempts before
213 * searching for a new modulation mode */
214#define IWL_LEGACY_FAILURE_LIMIT 160
215#define IWL_LEGACY_SUCCESS_LIMIT 480
216#define IWL_LEGACY_TABLE_COUNT 160
217
218#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
219#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
220#define IWL_NONE_LEGACY_TABLE_COUNT 1500
221
222/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
223#define IWL_RS_GOOD_RATIO 12800 /* 100% */
224#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
225#define IWL_RATE_HIGH_TH 10880 /* 85% */
226#define IWL_RATE_INCREASE_TH 6400 /* 50% */
227#define IWL_RATE_DECREASE_TH 1920 /* 15% */
228
229/* possible actions when in legacy mode */
230#define IWL_LEGACY_SWITCH_ANTENNA1 0
231#define IWL_LEGACY_SWITCH_ANTENNA2 1
232#define IWL_LEGACY_SWITCH_SISO 2
233#define IWL_LEGACY_SWITCH_MIMO2_AB 3
234#define IWL_LEGACY_SWITCH_MIMO2_AC 4
235#define IWL_LEGACY_SWITCH_MIMO2_BC 5
236
237/* possible actions when in siso mode */
238#define IWL_SISO_SWITCH_ANTENNA1 0
239#define IWL_SISO_SWITCH_ANTENNA2 1
240#define IWL_SISO_SWITCH_MIMO2_AB 2
241#define IWL_SISO_SWITCH_MIMO2_AC 3
242#define IWL_SISO_SWITCH_MIMO2_BC 4
243#define IWL_SISO_SWITCH_GI 5
244
245/* possible actions when in mimo mode */
246#define IWL_MIMO2_SWITCH_ANTENNA1 0
247#define IWL_MIMO2_SWITCH_ANTENNA2 1
248#define IWL_MIMO2_SWITCH_SISO_A 2
249#define IWL_MIMO2_SWITCH_SISO_B 3
250#define IWL_MIMO2_SWITCH_SISO_C 4
251#define IWL_MIMO2_SWITCH_GI 5
252
253#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
254
255#define IWL_ACTION_LIMIT 3 /* # possible actions */
256
257#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
258
259/* load per tid defines for A-MPDU activation */
260#define IWL_AGG_TPT_THREHOLD 0
261#define IWL_AGG_LOAD_THRESHOLD 10
262#define IWL_AGG_ALL_TID 0xff
263#define TID_QUEUE_CELL_SPACING 50 /*mS */
264#define TID_QUEUE_MAX_SIZE 20
265#define TID_ROUND_VALUE 5 /* mS */
266#define TID_MAX_LOAD_COUNT 8
267
268#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
269#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
270
271extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
272
273enum iwl_table_type {
274 LQ_NONE,
275 LQ_G, /* legacy types */
276 LQ_A,
277 LQ_SISO, /* high-throughput types */
278 LQ_MIMO2,
279 LQ_MAX,
280};
281
282#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
283#define is_siso(tbl) ((tbl) == LQ_SISO)
284#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
285#define is_mimo(tbl) (is_mimo2(tbl))
286#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
287#define is_a_band(tbl) ((tbl) == LQ_A)
288#define is_g_and(tbl) ((tbl) == LQ_G)
289
290#define ANT_NONE 0x0
291#define ANT_A BIT(0)
292#define ANT_B BIT(1)
293#define ANT_AB (ANT_A | ANT_B)
294#define ANT_C BIT(2)
295#define ANT_AC (ANT_A | ANT_C)
296#define ANT_BC (ANT_B | ANT_C)
297#define ANT_ABC (ANT_AB | ANT_C)
298
299#define IWL_MAX_MCS_DISPLAY_SIZE 12
300
301struct iwl_rate_mcs_info {
302 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
303 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
304};
305
306/**
307 * struct iwl_rate_scale_data -- tx success history for one rate
308 */
309struct iwl_rate_scale_data {
310 u64 data; /* bitmap of successful frames */
311 s32 success_counter; /* number of frames successful */
312 s32 success_ratio; /* per-cent * 128 */
313 s32 counter; /* number of frames attempted */
314 s32 average_tpt; /* success ratio * expected throughput */
315 unsigned long stamp;
316};
317
318/**
319 * struct iwl_scale_tbl_info -- tx params and success history for all rates
320 *
321 * There are two of these in struct iwl_lq_sta,
322 * one for "active", and one for "search".
323 */
324struct iwl_scale_tbl_info {
325 enum iwl_table_type lq_type;
326 u8 ant_type;
327 u8 is_SGI; /* 1 = short guard interval */
328 u8 is_ht40; /* 1 = 40 MHz channel width */
329 u8 is_dup; /* 1 = duplicated data streams */
330 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
331 u8 max_search; /* maximun number of tables we can search */
332 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
333 u32 current_rate; /* rate_n_flags, uCode API format */
334 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
335};
336
337struct iwl_traffic_load {
338 unsigned long time_stamp; /* age of the oldest statistics */
339 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
340 * slice */
341 u32 total; /* total num of packets during the
342 * last TID_MAX_TIME_DIFF */
343 u8 queue_count; /* number of queues that has
344 * been used since the last cleanup */
345 u8 head; /* start of the circular buffer */
346};
347
348/**
349 * struct iwl_lq_sta -- driver's rate scaling private structure
350 *
351 * Pointer to this gets passed back and forth between driver and mac80211.
352 */
353struct iwl_lq_sta {
354 u8 active_tbl; /* index of active table, range 0-1 */
355 u8 enable_counter; /* indicates HT mode */
356 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
357 u8 search_better_tbl; /* 1: currently trying alternate mode */
358 s32 last_tpt;
359
360 /* The following determine when to search for a new mode */
361 u32 table_count_limit;
362 u32 max_failure_limit; /* # failed frames before new search */
363 u32 max_success_limit; /* # successful frames before new search */
364 u32 table_count;
365 u32 total_failed; /* total failed frames, any/all rates */
366 u32 total_success; /* total successful frames, any/all rates */
367 u64 flush_timer; /* time staying in mode before new search */
368
369 u8 action_counter; /* # mode-switch actions tried */
370 u8 is_green;
371 u8 is_dup;
372 enum ieee80211_band band;
373
374 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
375 u32 supp_rates;
376 u16 active_legacy_rate;
377 u16 active_siso_rate;
378 u16 active_mimo2_rate;
379 s8 max_rate_idx; /* Max rate set by user */
380 u8 missed_rate_counter;
381
382 struct iwl_link_quality_cmd lq;
383 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
384 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
385 u8 tx_agg_tid_en;
386#ifdef CONFIG_MAC80211_DEBUGFS
387 struct dentry *rs_sta_dbgfs_scale_table_file;
388 struct dentry *rs_sta_dbgfs_stats_table_file;
389 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
390 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
391 u32 dbg_fixed_rate;
392#endif
393 struct iwl_priv *drv;
394
395 /* used to be in sta_info */
396 int last_txrate_idx;
397 /* last tx rate_n_flags */
398 u32 last_rate_n_flags;
399 /* packets destined for this STA are aggregated */
400 u8 is_agg;
401};
402
403static inline u8 iwl4965_num_of_ant(u8 mask)
404{
405 return !!((mask) & ANT_A) +
406 !!((mask) & ANT_B) +
407 !!((mask) & ANT_C);
408}
409
410static inline u8 iwl4965_first_antenna(u8 mask)
411{
412 if (mask & ANT_A)
413 return ANT_A;
414 if (mask & ANT_B)
415 return ANT_B;
416 return ANT_C;
417}
418
419
420/**
421 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
422 *
423 * The specific throughput table used is based on the type of network
424 * the associated with, including A, B, G, and G w/ TGG protection
425 */
426extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
427
428/* Initialize station's rate scaling information after adding station */
429extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
430 struct ieee80211_sta *sta, u8 sta_id);
431extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
432 struct ieee80211_sta *sta, u8 sta_id);
433
434/**
435 * iwl_rate_control_register - Register the rate control algorithm callbacks
436 *
437 * Since the rate control algorithm is hardware specific, there is no need
438 * or reason to place it as a stand alone module. The driver can call
439 * iwl_rate_control_register in order to register the rate control callbacks
440 * with the mac80211 subsystem. This should be performed prior to calling
441 * ieee80211_register_hw
442 *
443 */
444extern int iwl4965_rate_control_register(void);
445extern int iwl3945_rate_control_register(void);
446
447/**
448 * iwl_rate_control_unregister - Unregister the rate control callbacks
449 *
450 * This should be called after calling ieee80211_unregister_hw, but before
451 * the driver is unloaded.
452 */
453extern void iwl4965_rate_control_unregister(void);
454extern void iwl3945_rate_control_unregister(void);
455
456#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644
index 903ef0d6d6cb..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-power.h"
44
45/*
46 * Setting power level allows the card to go to sleep when not busy.
47 *
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
51 */
52
53/*
54 * This defines the old power levels. They are still used by default
55 * (level 1) and for thermal throttle (levels 3 through 5)
56 */
57
58struct iwl_power_vec_entry {
59 struct iwl_powertable_cmd cmd;
60 u8 no_dtim; /* number of skip dtim */
61};
62
63static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
64 struct iwl_powertable_cmd *cmd)
65{
66 memset(cmd, 0, sizeof(*cmd));
67
68 if (priv->power_data.pci_pm)
69 cmd->flags |= IWL_POWER_PCI_PM_MSK;
70
71 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
72}
73
74static int
75iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
76{
77 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
78 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
79 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
80 le32_to_cpu(cmd->tx_data_timeout));
81 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
82 le32_to_cpu(cmd->rx_data_timeout));
83 IWL_DEBUG_POWER(priv,
84 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
85 le32_to_cpu(cmd->sleep_interval[0]),
86 le32_to_cpu(cmd->sleep_interval[1]),
87 le32_to_cpu(cmd->sleep_interval[2]),
88 le32_to_cpu(cmd->sleep_interval[3]),
89 le32_to_cpu(cmd->sleep_interval[4]));
90
91 return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
92 sizeof(struct iwl_powertable_cmd), cmd);
93}
94
95int
96iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
97 bool force)
98{
99 int ret;
100 bool update_chains;
101
102 lockdep_assert_held(&priv->mutex);
103
104 /* Don't update the RX chain when chain noise calibration is running */
105 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
106 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
107
108 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
109 return 0;
110
111 if (!iwl_legacy_is_ready_rf(priv))
112 return -EIO;
113
114 /* scan complete use sleep_power_next, need to be updated */
115 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
116 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
117 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
118 return 0;
119 }
120
121 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
122 set_bit(STATUS_POWER_PMI, &priv->status);
123
124 ret = iwl_legacy_set_power(priv, cmd);
125 if (!ret) {
126 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
127 clear_bit(STATUS_POWER_PMI, &priv->status);
128
129 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
130 priv->cfg->ops->lib->update_chain_flags(priv);
131 else if (priv->cfg->ops->lib->update_chain_flags)
132 IWL_DEBUG_POWER(priv,
133 "Cannot update the power, chain noise "
134 "calibration running: %d\n",
135 priv->chain_noise_data.state);
136
137 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
138 } else
139 IWL_ERR(priv, "set power fail, ret = %d", ret);
140
141 return ret;
142}
143
144int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
145{
146 struct iwl_powertable_cmd cmd;
147
148 iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
149 return iwl_legacy_power_set_mode(priv, &cmd, force);
150}
151EXPORT_SYMBOL(iwl_legacy_power_update_mode);
152
153/* initialize to default */
154void iwl_legacy_power_initialize(struct iwl_priv *priv)
155{
156 u16 lctl = iwl_legacy_pcie_link_ctl(priv);
157
158 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
159
160 priv->power_data.debug_sleep_level_override = -1;
161
162 memset(&priv->power_data.sleep_cmd, 0,
163 sizeof(priv->power_data.sleep_cmd));
164}
165EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644
index d30b36acdc4a..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_legacy_power_setting_h__
29#define __iwl_legacy_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool pci_pm;
47};
48
49int
50iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
51 bool force);
52int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
53void iwl_legacy_power_initialize(struct iwl_priv *priv);
54
55#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644
index f4d21ec22497..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ /dev/null
@@ -1,282 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <linux/export.h>
33#include <net/mac80211.h>
34#include <asm/unaligned.h>
35#include "iwl-eeprom.h"
36#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41/************************** RX-FUNCTIONS ****************************/
42/*
43 * Rx theory of operation
44 *
45 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
46 * each of which point to Receive Buffers to be filled by the NIC. These get
47 * used not only for Rx frames, but for any command response or notification
48 * from the NIC. The driver and NIC manage the Rx buffers by means
49 * of indexes into the circular buffer.
50 *
51 * Rx Queue Indexes
52 * The host/firmware share two index registers for managing the Rx buffers.
53 *
54 * The READ index maps to the first position that the firmware may be writing
55 * to -- the driver can read up to (but not including) this position and get
56 * good data.
57 * The READ index is managed by the firmware once the card is enabled.
58 *
59 * The WRITE index maps to the last position the driver has read from -- the
60 * position preceding WRITE is the last slot the firmware can place a packet.
61 *
62 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
63 * WRITE = READ.
64 *
65 * During initialization, the host sets up the READ queue position to the first
66 * INDEX position, and WRITE to the last (READ - 1 wrapped)
67 *
68 * When the firmware places a packet in a buffer, it will advance the READ index
69 * and fire the RX interrupt. The driver can then query the READ index and
70 * process as many packets as possible, moving the WRITE index forward as it
71 * resets the Rx queue buffers with new memory.
72 *
73 * The management in the driver is as follows:
74 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
75 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
76 * to replenish the iwl->rxq->rx_free.
77 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
78 * iwl->rxq is replenished and the READ INDEX is updated (updating the
79 * 'processed' and 'read' driver indexes as well)
80 * + A received packet is processed and handed to the kernel network stack,
81 * detached from the iwl->rxq. The driver 'processed' index is updated.
82 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
83 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
84 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
85 * were enough free buffers and RX_STALLED is set it is cleared.
86 *
87 *
88 * Driver sequence:
89 *
90 * iwl_legacy_rx_queue_alloc() Allocates rx_free
91 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
92 * iwl_rx_queue_restock
93 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
94 * queue, updates firmware pointers, and updates
95 * the WRITE index. If insufficient rx_free buffers
96 * are available, schedules iwl_rx_replenish
97 *
98 * -- enable interrupts --
99 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
100 * READ INDEX, detaching the SKB from the pool.
101 * Moves the packet buffer from queue to rx_used.
102 * Calls iwl_rx_queue_restock to refill any empty
103 * slots.
104 * ...
105 *
106 */
107
108/**
109 * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
110 */
111int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
112{
113 int s = q->read - q->write;
114 if (s <= 0)
115 s += RX_QUEUE_SIZE;
116 /* keep some buffer to not confuse full and empty queue */
117 s -= 2;
118 if (s < 0)
119 s = 0;
120 return s;
121}
122EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
123
124/**
125 * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
126 */
127void
128iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
129 struct iwl_rx_queue *q)
130{
131 unsigned long flags;
132 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
133 u32 reg;
134
135 spin_lock_irqsave(&q->lock, flags);
136
137 if (q->need_update == 0)
138 goto exit_unlock;
139
140 /* If power-saving is in use, make sure device is awake */
141 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
142 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
143
144 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
145 IWL_DEBUG_INFO(priv,
146 "Rx queue requesting wakeup,"
147 " GP1 = 0x%x\n", reg);
148 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
149 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
150 goto exit_unlock;
151 }
152
153 q->write_actual = (q->write & ~0x7);
154 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
155 q->write_actual);
156
157 /* Else device is assumed to be awake */
158 } else {
159 /* Device expects a multiple of 8 */
160 q->write_actual = (q->write & ~0x7);
161 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
162 q->write_actual);
163 }
164
165 q->need_update = 0;
166
167 exit_unlock:
168 spin_unlock_irqrestore(&q->lock, flags);
169}
170EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
171
172int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
173{
174 struct iwl_rx_queue *rxq = &priv->rxq;
175 struct device *dev = &priv->pci_dev->dev;
176 int i;
177
178 spin_lock_init(&rxq->lock);
179 INIT_LIST_HEAD(&rxq->rx_free);
180 INIT_LIST_HEAD(&rxq->rx_used);
181
182 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
183 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
184 GFP_KERNEL);
185 if (!rxq->bd)
186 goto err_bd;
187
188 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
189 &rxq->rb_stts_dma, GFP_KERNEL);
190 if (!rxq->rb_stts)
191 goto err_rb;
192
193 /* Fill the rx_used queue with _all_ of the Rx buffers */
194 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
195 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
196
197 /* Set us so that we have processed and used all buffers, but have
198 * not restocked the Rx queue with fresh buffers */
199 rxq->read = rxq->write = 0;
200 rxq->write_actual = 0;
201 rxq->free_count = 0;
202 rxq->need_update = 0;
203 return 0;
204
205err_rb:
206 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
207 rxq->bd_dma);
208err_bd:
209 return -ENOMEM;
210}
211EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
212
213
214void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
215 struct iwl_rx_mem_buffer *rxb)
216{
217 struct iwl_rx_packet *pkt = rxb_addr(rxb);
218 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
219
220 if (!report->state) {
221 IWL_DEBUG_11H(priv,
222 "Spectrum Measure Notification: Start\n");
223 return;
224 }
225
226 memcpy(&priv->measure_report, report, sizeof(*report));
227 priv->measurement_status |= MEASUREMENT_READY;
228}
229EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
230
231/*
232 * returns non-zero if packet should be dropped
233 */
234int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
235 struct ieee80211_hdr *hdr,
236 u32 decrypt_res,
237 struct ieee80211_rx_status *stats)
238{
239 u16 fc = le16_to_cpu(hdr->frame_control);
240
241 /*
242 * All contexts have the same setting here due to it being
243 * a module parameter, so OK to check any context.
244 */
245 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
246 RXON_FILTER_DIS_DECRYPT_MSK)
247 return 0;
248
249 if (!(fc & IEEE80211_FCTL_PROTECTED))
250 return 0;
251
252 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
253 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
254 case RX_RES_STATUS_SEC_TYPE_TKIP:
255 /* The uCode has got a bad phase 1 Key, pushes the packet.
256 * Decryption will be done in SW. */
257 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
258 RX_RES_STATUS_BAD_KEY_TTAK)
259 break;
260
261 case RX_RES_STATUS_SEC_TYPE_WEP:
262 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
263 RX_RES_STATUS_BAD_ICV_MIC) {
264 /* bad ICV, the packet is destroyed since the
265 * decryption is inplace, drop it */
266 IWL_DEBUG_RX(priv, "Packet destroyed\n");
267 return -1;
268 }
269 case RX_RES_STATUS_SEC_TYPE_CCMP:
270 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
271 RX_RES_STATUS_DECRYPT_OK) {
272 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
273 stats->flag |= RX_FLAG_DECRYPTED;
274 }
275 break;
276
277 default:
278 break;
279 }
280 return 0;
281}
282EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644
index 521b73b527d3..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ /dev/null
@@ -1,550 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <linux/export.h>
32#include <net/mac80211.h>
33
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
41/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
42 * sending probe req. This should be set long enough to hear probe responses
43 * from more than one AP. */
44#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
45#define IWL_ACTIVE_DWELL_TIME_52 (20)
46
47#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
48#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
49
50/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
51 * Must be set longer than active dwell time.
52 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
53#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
54#define IWL_PASSIVE_DWELL_TIME_52 (10)
55#define IWL_PASSIVE_DWELL_BASE (100)
56#define IWL_CHANNEL_TUNE_TIME 5
57
58static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
59{
60 int ret;
61 struct iwl_rx_packet *pkt;
62 struct iwl_host_cmd cmd = {
63 .id = REPLY_SCAN_ABORT_CMD,
64 .flags = CMD_WANT_SKB,
65 };
66
67 /* Exit instantly with error when device is not ready
68 * to receive scan abort command or it does not perform
69 * hardware scan currently */
70 if (!test_bit(STATUS_READY, &priv->status) ||
71 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
72 !test_bit(STATUS_SCAN_HW, &priv->status) ||
73 test_bit(STATUS_FW_ERROR, &priv->status) ||
74 test_bit(STATUS_EXIT_PENDING, &priv->status))
75 return -EIO;
76
77 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
78 if (ret)
79 return ret;
80
81 pkt = (struct iwl_rx_packet *)cmd.reply_page;
82 if (pkt->u.status != CAN_ABORT_STATUS) {
83 /* The scan abort will return 1 for success or
84 * 2 for "failure". A failure condition can be
85 * due to simply not being in an active scan which
86 * can occur if we send the scan abort before we
87 * the microcode has notified us that a scan is
88 * completed. */
89 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
90 ret = -EIO;
91 }
92
93 iwl_legacy_free_pages(priv, cmd.reply_page);
94 return ret;
95}
96
97static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
98{
99 /* check if scan was requested from mac80211 */
100 if (priv->scan_request) {
101 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
102 ieee80211_scan_completed(priv->hw, aborted);
103 }
104
105 priv->scan_vif = NULL;
106 priv->scan_request = NULL;
107}
108
109void iwl_legacy_force_scan_end(struct iwl_priv *priv)
110{
111 lockdep_assert_held(&priv->mutex);
112
113 if (!test_bit(STATUS_SCANNING, &priv->status)) {
114 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
115 return;
116 }
117
118 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
119 clear_bit(STATUS_SCANNING, &priv->status);
120 clear_bit(STATUS_SCAN_HW, &priv->status);
121 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
122 iwl_legacy_complete_scan(priv, true);
123}
124
125static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
126{
127 int ret;
128
129 lockdep_assert_held(&priv->mutex);
130
131 if (!test_bit(STATUS_SCANNING, &priv->status)) {
132 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
133 return;
134 }
135
136 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
137 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
138 return;
139 }
140
141 ret = iwl_legacy_send_scan_abort(priv);
142 if (ret) {
143 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
144 iwl_legacy_force_scan_end(priv);
145 } else
146 IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
147}
148
149/**
150 * iwl_scan_cancel - Cancel any currently executing HW scan
151 */
152int iwl_legacy_scan_cancel(struct iwl_priv *priv)
153{
154 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
155 queue_work(priv->workqueue, &priv->abort_scan);
156 return 0;
157}
158EXPORT_SYMBOL(iwl_legacy_scan_cancel);
159
160/**
161 * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
162 * @ms: amount of time to wait (in milliseconds) for scan to abort
163 *
164 */
165int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
166{
167 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
168
169 lockdep_assert_held(&priv->mutex);
170
171 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
172
173 iwl_legacy_do_scan_abort(priv);
174
175 while (time_before_eq(jiffies, timeout)) {
176 if (!test_bit(STATUS_SCAN_HW, &priv->status))
177 break;
178 msleep(20);
179 }
180
181 return test_bit(STATUS_SCAN_HW, &priv->status);
182}
183EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
184
185/* Service response to REPLY_SCAN_CMD (0x80) */
186static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb)
188{
189#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
190 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_scanreq_notification *notif =
192 (struct iwl_scanreq_notification *)pkt->u.raw;
193
194 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
195#endif
196}
197
198/* Service SCAN_START_NOTIFICATION (0x82) */
199static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
200 struct iwl_rx_mem_buffer *rxb)
201{
202 struct iwl_rx_packet *pkt = rxb_addr(rxb);
203 struct iwl_scanstart_notification *notif =
204 (struct iwl_scanstart_notification *)pkt->u.raw;
205 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
206 IWL_DEBUG_SCAN(priv, "Scan start: "
207 "%d [802.11%s] "
208 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
209 notif->channel,
210 notif->band ? "bg" : "a",
211 le32_to_cpu(notif->tsf_high),
212 le32_to_cpu(notif->tsf_low),
213 notif->status, notif->beacon_timer);
214}
215
216/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
217static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
218 struct iwl_rx_mem_buffer *rxb)
219{
220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
222 struct iwl_scanresults_notification *notif =
223 (struct iwl_scanresults_notification *)pkt->u.raw;
224
225 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
226 "%d [802.11%s] "
227 "(TSF: 0x%08X:%08X) - %d "
228 "elapsed=%lu usec\n",
229 notif->channel,
230 notif->band ? "bg" : "a",
231 le32_to_cpu(notif->tsf_high),
232 le32_to_cpu(notif->tsf_low),
233 le32_to_cpu(notif->statistics[0]),
234 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
235#endif
236}
237
238/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
239static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
240 struct iwl_rx_mem_buffer *rxb)
241{
242
243#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
244 struct iwl_rx_packet *pkt = rxb_addr(rxb);
245 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
246#endif
247
248 IWL_DEBUG_SCAN(priv,
249 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
250 scan_notif->scanned_channels,
251 scan_notif->tsf_low,
252 scan_notif->tsf_high, scan_notif->status);
253
254 /* The HW is no longer scanning */
255 clear_bit(STATUS_SCAN_HW, &priv->status);
256
257 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
258 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
259 jiffies_to_msecs(jiffies - priv->scan_start));
260
261 queue_work(priv->workqueue, &priv->scan_completed);
262}
263
264void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
265{
266 /* scan handlers */
267 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
268 priv->rx_handlers[SCAN_START_NOTIFICATION] =
269 iwl_legacy_rx_scan_start_notif;
270 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
271 iwl_legacy_rx_scan_results_notif;
272 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
273 iwl_legacy_rx_scan_complete_notif;
274}
275EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
276
277inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
278 enum ieee80211_band band,
279 u8 n_probes)
280{
281 if (band == IEEE80211_BAND_5GHZ)
282 return IWL_ACTIVE_DWELL_TIME_52 +
283 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
284 else
285 return IWL_ACTIVE_DWELL_TIME_24 +
286 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
287}
288EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
289
290u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
291 enum ieee80211_band band,
292 struct ieee80211_vif *vif)
293{
294 struct iwl_rxon_context *ctx;
295 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
296 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
297 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
298
299 if (iwl_legacy_is_any_associated(priv)) {
300 /*
301 * If we're associated, we clamp the maximum passive
302 * dwell time to be 98% of the smallest beacon interval
303 * (minus 2 * channel tune time)
304 */
305 for_each_context(priv, ctx) {
306 u16 value;
307
308 if (!iwl_legacy_is_associated_ctx(ctx))
309 continue;
310 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
311 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
312 value = IWL_PASSIVE_DWELL_BASE;
313 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
314 passive = min(value, passive);
315 }
316 }
317
318 return passive;
319}
320EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
321
322void iwl_legacy_init_scan_params(struct iwl_priv *priv)
323{
324 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
325 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
326 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
327 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
328 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
329}
330EXPORT_SYMBOL(iwl_legacy_init_scan_params);
331
332static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
333 struct ieee80211_vif *vif)
334{
335 int ret;
336
337 lockdep_assert_held(&priv->mutex);
338
339 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
340 return -EOPNOTSUPP;
341
342 cancel_delayed_work(&priv->scan_check);
343
344 if (!iwl_legacy_is_ready_rf(priv)) {
345 IWL_WARN(priv, "Request scan called when driver not ready.\n");
346 return -EIO;
347 }
348
349 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
350 IWL_DEBUG_SCAN(priv,
351 "Multiple concurrent scan requests in parallel.\n");
352 return -EBUSY;
353 }
354
355 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
356 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
357 return -EBUSY;
358 }
359
360 IWL_DEBUG_SCAN(priv, "Starting scan...\n");
361
362 set_bit(STATUS_SCANNING, &priv->status);
363 priv->scan_start = jiffies;
364
365 ret = priv->cfg->ops->utils->request_scan(priv, vif);
366 if (ret) {
367 clear_bit(STATUS_SCANNING, &priv->status);
368 return ret;
369 }
370
371 queue_delayed_work(priv->workqueue, &priv->scan_check,
372 IWL_SCAN_CHECK_WATCHDOG);
373
374 return 0;
375}
376
377int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
378 struct ieee80211_vif *vif,
379 struct cfg80211_scan_request *req)
380{
381 struct iwl_priv *priv = hw->priv;
382 int ret;
383
384 IWL_DEBUG_MAC80211(priv, "enter\n");
385
386 if (req->n_channels == 0)
387 return -EINVAL;
388
389 mutex_lock(&priv->mutex);
390
391 if (test_bit(STATUS_SCANNING, &priv->status)) {
392 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
393 ret = -EAGAIN;
394 goto out_unlock;
395 }
396
397 /* mac80211 will only ask for one band at a time */
398 priv->scan_request = req;
399 priv->scan_vif = vif;
400 priv->scan_band = req->channels[0]->band;
401
402 ret = iwl_legacy_scan_initiate(priv, vif);
403
404 IWL_DEBUG_MAC80211(priv, "leave\n");
405
406out_unlock:
407 mutex_unlock(&priv->mutex);
408
409 return ret;
410}
411EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
412
413static void iwl_legacy_bg_scan_check(struct work_struct *data)
414{
415 struct iwl_priv *priv =
416 container_of(data, struct iwl_priv, scan_check.work);
417
418 IWL_DEBUG_SCAN(priv, "Scan check work\n");
419
420 /* Since we are here firmware does not finish scan and
421 * most likely is in bad shape, so we don't bother to
422 * send abort command, just force scan complete to mac80211 */
423 mutex_lock(&priv->mutex);
424 iwl_legacy_force_scan_end(priv);
425 mutex_unlock(&priv->mutex);
426}
427
428/**
429 * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
430 */
431
432u16
433iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
434 const u8 *ta, const u8 *ies, int ie_len, int left)
435{
436 int len = 0;
437 u8 *pos = NULL;
438
439 /* Make sure there is enough space for the probe request,
440 * two mandatory IEs and the data */
441 left -= 24;
442 if (left < 0)
443 return 0;
444
445 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
446 memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
447 memcpy(frame->sa, ta, ETH_ALEN);
448 memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
449 frame->seq_ctrl = 0;
450
451 len += 24;
452
453 /* ...next IE... */
454 pos = &frame->u.probe_req.variable[0];
455
456 /* fill in our indirect SSID IE */
457 left -= 2;
458 if (left < 0)
459 return 0;
460 *pos++ = WLAN_EID_SSID;
461 *pos++ = 0;
462
463 len += 2;
464
465 if (WARN_ON(left < ie_len))
466 return len;
467
468 if (ies && ie_len) {
469 memcpy(pos, ies, ie_len);
470 len += ie_len;
471 }
472
473 return (u16)len;
474}
475EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
476
477static void iwl_legacy_bg_abort_scan(struct work_struct *work)
478{
479 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
480
481 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
482
483 /* We keep scan_check work queued in case when firmware will not
484 * report back scan completed notification */
485 mutex_lock(&priv->mutex);
486 iwl_legacy_scan_cancel_timeout(priv, 200);
487 mutex_unlock(&priv->mutex);
488}
489
490static void iwl_legacy_bg_scan_completed(struct work_struct *work)
491{
492 struct iwl_priv *priv =
493 container_of(work, struct iwl_priv, scan_completed);
494 bool aborted;
495
496 IWL_DEBUG_SCAN(priv, "Completed scan.\n");
497
498 cancel_delayed_work(&priv->scan_check);
499
500 mutex_lock(&priv->mutex);
501
502 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
503 if (aborted)
504 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
505
506 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
507 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
508 goto out_settings;
509 }
510
511 iwl_legacy_complete_scan(priv, aborted);
512
513out_settings:
514 /* Can we still talk to firmware ? */
515 if (!iwl_legacy_is_ready_rf(priv))
516 goto out;
517
518 /*
519 * We do not commit power settings while scan is pending,
520 * do it now if the settings changed.
521 */
522 iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
523 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
524
525 priv->cfg->ops->utils->post_scan(priv);
526
527out:
528 mutex_unlock(&priv->mutex);
529}
530
531void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
532{
533 INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
534 INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
535 INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
536}
537EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
538
539void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
540{
541 cancel_work_sync(&priv->abort_scan);
542 cancel_work_sync(&priv->scan_completed);
543
544 if (cancel_delayed_work_sync(&priv->scan_check)) {
545 mutex_lock(&priv->mutex);
546 iwl_legacy_force_scan_end(priv);
547 mutex_unlock(&priv->mutex);
548 }
549}
550EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
index 9f70a4723103..85fe48e520f9 100644
--- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -26,8 +26,8 @@
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
28 28
29#ifndef __iwl_legacy_spectrum_h__ 29#ifndef __il_spectrum_h__
30#define __iwl_legacy_spectrum_h__ 30#define __il_spectrum_h__
31enum { /* ieee80211_basic_report.map */ 31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0), 32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1), 33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
index f10df3e2813a..75fe315f66b4 100644
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -37,76 +37,76 @@
37#include "iwl-core.h" 37#include "iwl-core.h"
38#include "iwl-sta.h" 38#include "iwl-sta.h"
39 39
40/* priv->sta_lock must be held */ 40/* il->sta_lock must be held */
41static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 41static void il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
42{ 42{
43 43
44 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) 44 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
45 IWL_ERR(priv, 45 IL_ERR(
46 "ACTIVATE a non DRIVER active station id %u addr %pM\n", 46 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
47 sta_id, priv->stations[sta_id].sta.sta.addr); 47 sta_id, il->stations[sta_id].sta.sta.addr);
48 48
49 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { 49 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
50 IWL_DEBUG_ASSOC(priv, 50 D_ASSOC(
51 "STA id %u addr %pM already present" 51 "STA id %u addr %pM already present"
52 " in uCode (according to driver)\n", 52 " in uCode (according to driver)\n",
53 sta_id, priv->stations[sta_id].sta.sta.addr); 53 sta_id, il->stations[sta_id].sta.sta.addr);
54 } else { 54 } else {
55 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; 55 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
56 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", 56 D_ASSOC("Added STA id %u addr %pM to uCode\n",
57 sta_id, priv->stations[sta_id].sta.sta.addr); 57 sta_id, il->stations[sta_id].sta.sta.addr);
58 } 58 }
59} 59}
60 60
61static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv, 61static int il_process_add_sta_resp(struct il_priv *il,
62 struct iwl_legacy_addsta_cmd *addsta, 62 struct il_addsta_cmd *addsta,
63 struct iwl_rx_packet *pkt, 63 struct il_rx_pkt *pkt,
64 bool sync) 64 bool sync)
65{ 65{
66 u8 sta_id = addsta->sta.sta_id; 66 u8 sta_id = addsta->sta.sta_id;
67 unsigned long flags; 67 unsigned long flags;
68 int ret = -EIO; 68 int ret = -EIO;
69 69
70 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 70 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
71 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 71 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n",
72 pkt->hdr.flags); 72 pkt->hdr.flags);
73 return ret; 73 return ret;
74 } 74 }
75 75
76 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", 76 D_INFO("Processing response for adding station %u\n",
77 sta_id); 77 sta_id);
78 78
79 spin_lock_irqsave(&priv->sta_lock, flags); 79 spin_lock_irqsave(&il->sta_lock, flags);
80 80
81 switch (pkt->u.add_sta.status) { 81 switch (pkt->u.add_sta.status) {
82 case ADD_STA_SUCCESS_MSK: 82 case ADD_STA_SUCCESS_MSK:
83 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); 83 D_INFO("C_ADD_STA PASSED\n");
84 iwl_legacy_sta_ucode_activate(priv, sta_id); 84 il_sta_ucode_activate(il, sta_id);
85 ret = 0; 85 ret = 0;
86 break; 86 break;
87 case ADD_STA_NO_ROOM_IN_TABLE: 87 case ADD_STA_NO_ROOM_IN_TBL:
88 IWL_ERR(priv, "Adding station %d failed, no room in table.\n", 88 IL_ERR("Adding station %d failed, no room in table.\n",
89 sta_id); 89 sta_id);
90 break; 90 break;
91 case ADD_STA_NO_BLOCK_ACK_RESOURCE: 91 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
92 IWL_ERR(priv, 92 IL_ERR(
93 "Adding station %d failed, no block ack resource.\n", 93 "Adding station %d failed, no block ack resource.\n",
94 sta_id); 94 sta_id);
95 break; 95 break;
96 case ADD_STA_MODIFY_NON_EXIST_STA: 96 case ADD_STA_MODIFY_NON_EXIST_STA:
97 IWL_ERR(priv, "Attempting to modify non-existing station %d\n", 97 IL_ERR("Attempting to modify non-existing station %d\n",
98 sta_id); 98 sta_id);
99 break; 99 break;
100 default: 100 default:
101 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", 101 D_ASSOC("Received C_ADD_STA:(0x%08X)\n",
102 pkt->u.add_sta.status); 102 pkt->u.add_sta.status);
103 break; 103 break;
104 } 104 }
105 105
106 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", 106 D_INFO("%s station id %u addr %pM\n",
107 priv->stations[sta_id].sta.mode == 107 il->stations[sta_id].sta.mode ==
108 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 108 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
109 sta_id, priv->stations[sta_id].sta.sta.addr); 109 sta_id, il->stations[sta_id].sta.sta.addr);
110 110
111 /* 111 /*
112 * XXX: The MAC address in the command buffer is often changed from 112 * XXX: The MAC address in the command buffer is often changed from
@@ -116,68 +116,68 @@ static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
116 * issue has not yet been resolved and this debugging is left to 116 * issue has not yet been resolved and this debugging is left to
117 * observe the problem. 117 * observe the problem.
118 */ 118 */
119 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", 119 D_INFO("%s station according to cmd buffer %pM\n",
120 priv->stations[sta_id].sta.mode == 120 il->stations[sta_id].sta.mode ==
121 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 121 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
122 addsta->sta.addr); 122 addsta->sta.addr);
123 spin_unlock_irqrestore(&priv->sta_lock, flags); 123 spin_unlock_irqrestore(&il->sta_lock, flags);
124 124
125 return ret; 125 return ret;
126} 126}
127 127
128static void iwl_legacy_add_sta_callback(struct iwl_priv *priv, 128static void il_add_sta_callback(struct il_priv *il,
129 struct iwl_device_cmd *cmd, 129 struct il_device_cmd *cmd,
130 struct iwl_rx_packet *pkt) 130 struct il_rx_pkt *pkt)
131{ 131{
132 struct iwl_legacy_addsta_cmd *addsta = 132 struct il_addsta_cmd *addsta =
133 (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload; 133 (struct il_addsta_cmd *)cmd->cmd.payload;
134 134
135 iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false); 135 il_process_add_sta_resp(il, addsta, pkt, false);
136 136
137} 137}
138 138
139int iwl_legacy_send_add_sta(struct iwl_priv *priv, 139int il_send_add_sta(struct il_priv *il,
140 struct iwl_legacy_addsta_cmd *sta, u8 flags) 140 struct il_addsta_cmd *sta, u8 flags)
141{ 141{
142 struct iwl_rx_packet *pkt = NULL; 142 struct il_rx_pkt *pkt = NULL;
143 int ret = 0; 143 int ret = 0;
144 u8 data[sizeof(*sta)]; 144 u8 data[sizeof(*sta)];
145 struct iwl_host_cmd cmd = { 145 struct il_host_cmd cmd = {
146 .id = REPLY_ADD_STA, 146 .id = C_ADD_STA,
147 .flags = flags, 147 .flags = flags,
148 .data = data, 148 .data = data,
149 }; 149 };
150 u8 sta_id __maybe_unused = sta->sta.sta_id; 150 u8 sta_id __maybe_unused = sta->sta.sta_id;
151 151
152 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", 152 D_INFO("Adding sta %u (%pM) %ssynchronously\n",
153 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 153 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
154 154
155 if (flags & CMD_ASYNC) 155 if (flags & CMD_ASYNC)
156 cmd.callback = iwl_legacy_add_sta_callback; 156 cmd.callback = il_add_sta_callback;
157 else { 157 else {
158 cmd.flags |= CMD_WANT_SKB; 158 cmd.flags |= CMD_WANT_SKB;
159 might_sleep(); 159 might_sleep();
160 } 160 }
161 161
162 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); 162 cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
163 ret = iwl_legacy_send_cmd(priv, &cmd); 163 ret = il_send_cmd(il, &cmd);
164 164
165 if (ret || (flags & CMD_ASYNC)) 165 if (ret || (flags & CMD_ASYNC))
166 return ret; 166 return ret;
167 167
168 if (ret == 0) { 168 if (ret == 0) {
169 pkt = (struct iwl_rx_packet *)cmd.reply_page; 169 pkt = (struct il_rx_pkt *)cmd.reply_page;
170 ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true); 170 ret = il_process_add_sta_resp(il, sta, pkt, true);
171 } 171 }
172 iwl_legacy_free_pages(priv, cmd.reply_page); 172 il_free_pages(il, cmd.reply_page);
173 173
174 return ret; 174 return ret;
175} 175}
176EXPORT_SYMBOL(iwl_legacy_send_add_sta); 176EXPORT_SYMBOL(il_send_add_sta);
177 177
178static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index, 178static void il_set_ht_add_station(struct il_priv *il, u8 idx,
179 struct ieee80211_sta *sta, 179 struct ieee80211_sta *sta,
180 struct iwl_rxon_context *ctx) 180 struct il_rxon_context *ctx)
181{ 181{
182 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; 182 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
183 __le32 sta_flags; 183 __le32 sta_flags;
@@ -187,13 +187,13 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
187 goto done; 187 goto done;
188 188
189 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 189 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
190 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n", 190 D_ASSOC("spatial multiplexing power save mode: %s\n",
191 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? 191 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
192 "static" : 192 "static" :
193 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? 193 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
194 "dynamic" : "disabled"); 194 "dynamic" : "disabled");
195 195
196 sta_flags = priv->stations[index].sta.station_flags; 196 sta_flags = il->stations[idx].sta.station_flags;
197 197
198 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 198 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
199 199
@@ -207,7 +207,7 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
207 case WLAN_HT_CAP_SM_PS_DISABLED: 207 case WLAN_HT_CAP_SM_PS_DISABLED:
208 break; 208 break;
209 default: 209 default:
210 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); 210 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
211 break; 211 break;
212 } 212 }
213 213
@@ -217,27 +217,27 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
217 sta_flags |= cpu_to_le32( 217 sta_flags |= cpu_to_le32(
218 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 218 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
219 219
220 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) 220 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
221 sta_flags |= STA_FLG_HT40_EN_MSK; 221 sta_flags |= STA_FLG_HT40_EN_MSK;
222 else 222 else
223 sta_flags &= ~STA_FLG_HT40_EN_MSK; 223 sta_flags &= ~STA_FLG_HT40_EN_MSK;
224 224
225 priv->stations[index].sta.station_flags = sta_flags; 225 il->stations[idx].sta.station_flags = sta_flags;
226 done: 226 done:
227 return; 227 return;
228} 228}
229 229
230/** 230/**
231 * iwl_legacy_prep_station - Prepare station information for addition 231 * il_prep_station - Prepare station information for addition
232 * 232 *
233 * should be called with sta_lock held 233 * should be called with sta_lock held
234 */ 234 */
235u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 235u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
236 const u8 *addr, bool is_ap, struct ieee80211_sta *sta) 236 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
237{ 237{
238 struct iwl_station_entry *station; 238 struct il_station_entry *station;
239 int i; 239 int i;
240 u8 sta_id = IWL_INVALID_STATION; 240 u8 sta_id = IL_INVALID_STATION;
241 u16 rate; 241 u16 rate;
242 242
243 if (is_ap) 243 if (is_ap)
@@ -245,15 +245,15 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
245 else if (is_broadcast_ether_addr(addr)) 245 else if (is_broadcast_ether_addr(addr))
246 sta_id = ctx->bcast_sta_id; 246 sta_id = ctx->bcast_sta_id;
247 else 247 else
248 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { 248 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
249 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 249 if (!compare_ether_addr(il->stations[i].sta.sta.addr,
250 addr)) { 250 addr)) {
251 sta_id = i; 251 sta_id = i;
252 break; 252 break;
253 } 253 }
254 254
255 if (!priv->stations[i].used && 255 if (!il->stations[i].used &&
256 sta_id == IWL_INVALID_STATION) 256 sta_id == IL_INVALID_STATION)
257 sta_id = i; 257 sta_id = i;
258 } 258 }
259 259
@@ -261,7 +261,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
261 * These two conditions have the same outcome, but keep them 261 * These two conditions have the same outcome, but keep them
262 * separate 262 * separate
263 */ 263 */
264 if (unlikely(sta_id == IWL_INVALID_STATION)) 264 if (unlikely(sta_id == IL_INVALID_STATION))
265 return sta_id; 265 return sta_id;
266 266
267 /* 267 /*
@@ -269,30 +269,30 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
269 * station. Keep track if one is in progress so that we do not send 269 * station. Keep track if one is in progress so that we do not send
270 * another. 270 * another.
271 */ 271 */
272 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { 272 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
273 IWL_DEBUG_INFO(priv, 273 D_INFO(
274 "STA %d already in process of being added.\n", 274 "STA %d already in process of being added.\n",
275 sta_id); 275 sta_id);
276 return sta_id; 276 return sta_id;
277 } 277 }
278 278
279 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && 279 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
280 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && 280 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
281 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { 281 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
282 IWL_DEBUG_ASSOC(priv, 282 D_ASSOC(
283 "STA %d (%pM) already added, not adding again.\n", 283 "STA %d (%pM) already added, not adding again.\n",
284 sta_id, addr); 284 sta_id, addr);
285 return sta_id; 285 return sta_id;
286 } 286 }
287 287
288 station = &priv->stations[sta_id]; 288 station = &il->stations[sta_id];
289 station->used = IWL_STA_DRIVER_ACTIVE; 289 station->used = IL_STA_DRIVER_ACTIVE;
290 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", 290 D_ASSOC("Add STA to driver ID %d: %pM\n",
291 sta_id, addr); 291 sta_id, addr);
292 priv->num_stations++; 292 il->num_stations++;
293 293
294 /* Set up the REPLY_ADD_STA command to send to device */ 294 /* Set up the C_ADD_STA command to send to device */
295 memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd)); 295 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
296 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 296 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
297 station->sta.mode = 0; 297 station->sta.mode = 0;
298 station->sta.sta.sta_id = sta_id; 298 station->sta.sta.sta_id = sta_id;
@@ -300,7 +300,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
300 station->ctxid = ctx->ctxid; 300 station->ctxid = ctx->ctxid;
301 301
302 if (sta) { 302 if (sta) {
303 struct iwl_station_priv_common *sta_priv; 303 struct il_station_priv_common *sta_priv;
304 304
305 sta_priv = (void *)sta->drv_priv; 305 sta_priv = (void *)sta->drv_priv;
306 sta_priv->ctx = ctx; 306 sta_priv->ctx = ctx;
@@ -311,42 +311,42 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
311 * STA and broadcast STA) pass in a NULL sta, and mac80211 311 * STA and broadcast STA) pass in a NULL sta, and mac80211
312 * doesn't allow HT IBSS. 312 * doesn't allow HT IBSS.
313 */ 313 */
314 iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx); 314 il_set_ht_add_station(il, sta_id, sta, ctx);
315 315
316 /* 3945 only */ 316 /* 3945 only */
317 rate = (priv->band == IEEE80211_BAND_5GHZ) ? 317 rate = (il->band == IEEE80211_BAND_5GHZ) ?
318 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP; 318 RATE_6M_PLCP : RATE_1M_PLCP;
319 /* Turn on both antennas for the station... */ 319 /* Turn on both antennas for the station... */
320 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 320 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
321 321
322 return sta_id; 322 return sta_id;
323 323
324} 324}
325EXPORT_SYMBOL_GPL(iwl_legacy_prep_station); 325EXPORT_SYMBOL_GPL(il_prep_station);
326 326
327#define STA_WAIT_TIMEOUT (HZ/2) 327#define STA_WAIT_TIMEOUT (HZ/2)
328 328
329/** 329/**
330 * iwl_legacy_add_station_common - 330 * il_add_station_common -
331 */ 331 */
332int 332int
333iwl_legacy_add_station_common(struct iwl_priv *priv, 333il_add_station_common(struct il_priv *il,
334 struct iwl_rxon_context *ctx, 334 struct il_rxon_context *ctx,
335 const u8 *addr, bool is_ap, 335 const u8 *addr, bool is_ap,
336 struct ieee80211_sta *sta, u8 *sta_id_r) 336 struct ieee80211_sta *sta, u8 *sta_id_r)
337{ 337{
338 unsigned long flags_spin; 338 unsigned long flags_spin;
339 int ret = 0; 339 int ret = 0;
340 u8 sta_id; 340 u8 sta_id;
341 struct iwl_legacy_addsta_cmd sta_cmd; 341 struct il_addsta_cmd sta_cmd;
342 342
343 *sta_id_r = 0; 343 *sta_id_r = 0;
344 spin_lock_irqsave(&priv->sta_lock, flags_spin); 344 spin_lock_irqsave(&il->sta_lock, flags_spin);
345 sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta); 345 sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
346 if (sta_id == IWL_INVALID_STATION) { 346 if (sta_id == IL_INVALID_STATION) {
347 IWL_ERR(priv, "Unable to prepare station %pM for addition\n", 347 IL_ERR("Unable to prepare station %pM for addition\n",
348 addr); 348 addr);
349 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 349 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
350 return -EINVAL; 350 return -EINVAL;
351 } 351 }
352 352
@@ -355,75 +355,75 @@ iwl_legacy_add_station_common(struct iwl_priv *priv,
355 * station. Keep track if one is in progress so that we do not send 355 * station. Keep track if one is in progress so that we do not send
356 * another. 356 * another.
357 */ 357 */
358 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { 358 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
359 IWL_DEBUG_INFO(priv, 359 D_INFO(
360 "STA %d already in process of being added.\n", 360 "STA %d already in process of being added.\n",
361 sta_id); 361 sta_id);
362 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 362 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
363 return -EEXIST; 363 return -EEXIST;
364 } 364 }
365 365
366 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && 366 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
367 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 367 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
368 IWL_DEBUG_ASSOC(priv, 368 D_ASSOC(
369 "STA %d (%pM) already added, not adding again.\n", 369 "STA %d (%pM) already added, not adding again.\n",
370 sta_id, addr); 370 sta_id, addr);
371 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 371 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
372 return -EEXIST; 372 return -EEXIST;
373 } 373 }
374 374
375 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; 375 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
376 memcpy(&sta_cmd, &priv->stations[sta_id].sta, 376 memcpy(&sta_cmd, &il->stations[sta_id].sta,
377 sizeof(struct iwl_legacy_addsta_cmd)); 377 sizeof(struct il_addsta_cmd));
378 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 378 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
379 379
380 /* Add station to device's station table */ 380 /* Add station to device's station table */
381 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); 381 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
382 if (ret) { 382 if (ret) {
383 spin_lock_irqsave(&priv->sta_lock, flags_spin); 383 spin_lock_irqsave(&il->sta_lock, flags_spin);
384 IWL_ERR(priv, "Adding station %pM failed.\n", 384 IL_ERR("Adding station %pM failed.\n",
385 priv->stations[sta_id].sta.sta.addr); 385 il->stations[sta_id].sta.sta.addr);
386 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 386 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
387 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 387 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
388 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 388 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
389 } 389 }
390 *sta_id_r = sta_id; 390 *sta_id_r = sta_id;
391 return ret; 391 return ret;
392} 392}
393EXPORT_SYMBOL(iwl_legacy_add_station_common); 393EXPORT_SYMBOL(il_add_station_common);
394 394
395/** 395/**
396 * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station 396 * il_sta_ucode_deactivate - deactivate ucode status for a station
397 * 397 *
398 * priv->sta_lock must be held 398 * il->sta_lock must be held
399 */ 399 */
400static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) 400static void il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
401{ 401{
402 /* Ucode must be active and driver must be non active */ 402 /* Ucode must be active and driver must be non active */
403 if ((priv->stations[sta_id].used & 403 if ((il->stations[sta_id].used &
404 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != 404 (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
405 IWL_STA_UCODE_ACTIVE) 405 IL_STA_UCODE_ACTIVE)
406 IWL_ERR(priv, "removed non active STA %u\n", sta_id); 406 IL_ERR("removed non active STA %u\n", sta_id);
407 407
408 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; 408 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
409 409
410 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); 410 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
411 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); 411 D_ASSOC("Removed STA %u\n", sta_id);
412} 412}
413 413
414static int iwl_legacy_send_remove_station(struct iwl_priv *priv, 414static int il_send_remove_station(struct il_priv *il,
415 const u8 *addr, int sta_id, 415 const u8 *addr, int sta_id,
416 bool temporary) 416 bool temporary)
417{ 417{
418 struct iwl_rx_packet *pkt; 418 struct il_rx_pkt *pkt;
419 int ret; 419 int ret;
420 420
421 unsigned long flags_spin; 421 unsigned long flags_spin;
422 struct iwl_rem_sta_cmd rm_sta_cmd; 422 struct il_rem_sta_cmd rm_sta_cmd;
423 423
424 struct iwl_host_cmd cmd = { 424 struct il_host_cmd cmd = {
425 .id = REPLY_REMOVE_STA, 425 .id = C_REM_STA,
426 .len = sizeof(struct iwl_rem_sta_cmd), 426 .len = sizeof(struct il_rem_sta_cmd),
427 .flags = CMD_SYNC, 427 .flags = CMD_SYNC,
428 .data = &rm_sta_cmd, 428 .data = &rm_sta_cmd,
429 }; 429 };
@@ -434,14 +434,14 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
434 434
435 cmd.flags |= CMD_WANT_SKB; 435 cmd.flags |= CMD_WANT_SKB;
436 436
437 ret = iwl_legacy_send_cmd(priv, &cmd); 437 ret = il_send_cmd(il, &cmd);
438 438
439 if (ret) 439 if (ret)
440 return ret; 440 return ret;
441 441
442 pkt = (struct iwl_rx_packet *)cmd.reply_page; 442 pkt = (struct il_rx_pkt *)cmd.reply_page;
443 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 443 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
444 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 444 IL_ERR("Bad return from C_REM_STA (0x%08X)\n",
445 pkt->hdr.flags); 445 pkt->hdr.flags);
446 ret = -EIO; 446 ret = -EIO;
447 } 447 }
@@ -450,34 +450,34 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
450 switch (pkt->u.rem_sta.status) { 450 switch (pkt->u.rem_sta.status) {
451 case REM_STA_SUCCESS_MSK: 451 case REM_STA_SUCCESS_MSK:
452 if (!temporary) { 452 if (!temporary) {
453 spin_lock_irqsave(&priv->sta_lock, flags_spin); 453 spin_lock_irqsave(&il->sta_lock, flags_spin);
454 iwl_legacy_sta_ucode_deactivate(priv, sta_id); 454 il_sta_ucode_deactivate(il, sta_id);
455 spin_unlock_irqrestore(&priv->sta_lock, 455 spin_unlock_irqrestore(&il->sta_lock,
456 flags_spin); 456 flags_spin);
457 } 457 }
458 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 458 D_ASSOC("C_REM_STA PASSED\n");
459 break; 459 break;
460 default: 460 default:
461 ret = -EIO; 461 ret = -EIO;
462 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); 462 IL_ERR("C_REM_STA failed\n");
463 break; 463 break;
464 } 464 }
465 } 465 }
466 iwl_legacy_free_pages(priv, cmd.reply_page); 466 il_free_pages(il, cmd.reply_page);
467 467
468 return ret; 468 return ret;
469} 469}
470 470
471/** 471/**
472 * iwl_legacy_remove_station - Remove driver's knowledge of station. 472 * il_remove_station - Remove driver's knowledge of station.
473 */ 473 */
474int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id, 474int il_remove_station(struct il_priv *il, const u8 sta_id,
475 const u8 *addr) 475 const u8 *addr)
476{ 476{
477 unsigned long flags; 477 unsigned long flags;
478 478
479 if (!iwl_legacy_is_ready(priv)) { 479 if (!il_is_ready(il)) {
480 IWL_DEBUG_INFO(priv, 480 D_INFO(
481 "Unable to remove station %pM, device not ready.\n", 481 "Unable to remove station %pM, device not ready.\n",
482 addr); 482 addr);
483 /* 483 /*
@@ -488,85 +488,85 @@ int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
488 return 0; 488 return 0;
489 } 489 }
490 490
491 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", 491 D_ASSOC("Removing STA from driver:%d %pM\n",
492 sta_id, addr); 492 sta_id, addr);
493 493
494 if (WARN_ON(sta_id == IWL_INVALID_STATION)) 494 if (WARN_ON(sta_id == IL_INVALID_STATION))
495 return -EINVAL; 495 return -EINVAL;
496 496
497 spin_lock_irqsave(&priv->sta_lock, flags); 497 spin_lock_irqsave(&il->sta_lock, flags);
498 498
499 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 499 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
500 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", 500 D_INFO("Removing %pM but non DRIVER active\n",
501 addr); 501 addr);
502 goto out_err; 502 goto out_err;
503 } 503 }
504 504
505 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 505 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
506 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", 506 D_INFO("Removing %pM but non UCODE active\n",
507 addr); 507 addr);
508 goto out_err; 508 goto out_err;
509 } 509 }
510 510
511 if (priv->stations[sta_id].used & IWL_STA_LOCAL) { 511 if (il->stations[sta_id].used & IL_STA_LOCAL) {
512 kfree(priv->stations[sta_id].lq); 512 kfree(il->stations[sta_id].lq);
513 priv->stations[sta_id].lq = NULL; 513 il->stations[sta_id].lq = NULL;
514 } 514 }
515 515
516 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 516 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
517 517
518 priv->num_stations--; 518 il->num_stations--;
519 519
520 BUG_ON(priv->num_stations < 0); 520 BUG_ON(il->num_stations < 0);
521 521
522 spin_unlock_irqrestore(&priv->sta_lock, flags); 522 spin_unlock_irqrestore(&il->sta_lock, flags);
523 523
524 return iwl_legacy_send_remove_station(priv, addr, sta_id, false); 524 return il_send_remove_station(il, addr, sta_id, false);
525out_err: 525out_err:
526 spin_unlock_irqrestore(&priv->sta_lock, flags); 526 spin_unlock_irqrestore(&il->sta_lock, flags);
527 return -EINVAL; 527 return -EINVAL;
528} 528}
529EXPORT_SYMBOL_GPL(iwl_legacy_remove_station); 529EXPORT_SYMBOL_GPL(il_remove_station);
530 530
531/** 531/**
532 * iwl_legacy_clear_ucode_stations - clear ucode station table bits 532 * il_clear_ucode_stations - clear ucode station table bits
533 * 533 *
534 * This function clears all the bits in the driver indicating 534 * This function clears all the bits in the driver indicating
535 * which stations are active in the ucode. Call when something 535 * which stations are active in the ucode. Call when something
536 * other than explicit station management would cause this in 536 * other than explicit station management would cause this in
537 * the ucode, e.g. unassociated RXON. 537 * the ucode, e.g. unassociated RXON.
538 */ 538 */
539void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, 539void il_clear_ucode_stations(struct il_priv *il,
540 struct iwl_rxon_context *ctx) 540 struct il_rxon_context *ctx)
541{ 541{
542 int i; 542 int i;
543 unsigned long flags_spin; 543 unsigned long flags_spin;
544 bool cleared = false; 544 bool cleared = false;
545 545
546 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); 546 D_INFO("Clearing ucode stations in driver\n");
547 547
548 spin_lock_irqsave(&priv->sta_lock, flags_spin); 548 spin_lock_irqsave(&il->sta_lock, flags_spin);
549 for (i = 0; i < priv->hw_params.max_stations; i++) { 549 for (i = 0; i < il->hw_params.max_stations; i++) {
550 if (ctx && ctx->ctxid != priv->stations[i].ctxid) 550 if (ctx && ctx->ctxid != il->stations[i].ctxid)
551 continue; 551 continue;
552 552
553 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { 553 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
554 IWL_DEBUG_INFO(priv, 554 D_INFO(
555 "Clearing ucode active for station %d\n", i); 555 "Clearing ucode active for station %d\n", i);
556 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 556 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
557 cleared = true; 557 cleared = true;
558 } 558 }
559 } 559 }
560 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 560 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
561 561
562 if (!cleared) 562 if (!cleared)
563 IWL_DEBUG_INFO(priv, 563 D_INFO(
564 "No active stations found to be cleared\n"); 564 "No active stations found to be cleared\n");
565} 565}
566EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations); 566EXPORT_SYMBOL(il_clear_ucode_stations);
567 567
568/** 568/**
569 * iwl_legacy_restore_stations() - Restore driver known stations to device 569 * il_restore_stations() - Restore driver known stations to device
570 * 570 *
571 * All stations considered active by driver, but not present in ucode, is 571 * All stations considered active by driver, but not present in ucode, is
572 * restored. 572 * restored.
@@ -574,58 +574,58 @@ EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
574 * Function sleeps. 574 * Function sleeps.
575 */ 575 */
576void 576void
577iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 577il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
578{ 578{
579 struct iwl_legacy_addsta_cmd sta_cmd; 579 struct il_addsta_cmd sta_cmd;
580 struct iwl_link_quality_cmd lq; 580 struct il_link_quality_cmd lq;
581 unsigned long flags_spin; 581 unsigned long flags_spin;
582 int i; 582 int i;
583 bool found = false; 583 bool found = false;
584 int ret; 584 int ret;
585 bool send_lq; 585 bool send_lq;
586 586
587 if (!iwl_legacy_is_ready(priv)) { 587 if (!il_is_ready(il)) {
588 IWL_DEBUG_INFO(priv, 588 D_INFO(
589 "Not ready yet, not restoring any stations.\n"); 589 "Not ready yet, not restoring any stations.\n");
590 return; 590 return;
591 } 591 }
592 592
593 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); 593 D_ASSOC("Restoring all known stations ... start.\n");
594 spin_lock_irqsave(&priv->sta_lock, flags_spin); 594 spin_lock_irqsave(&il->sta_lock, flags_spin);
595 for (i = 0; i < priv->hw_params.max_stations; i++) { 595 for (i = 0; i < il->hw_params.max_stations; i++) {
596 if (ctx->ctxid != priv->stations[i].ctxid) 596 if (ctx->ctxid != il->stations[i].ctxid)
597 continue; 597 continue;
598 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && 598 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
599 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { 599 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
600 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", 600 D_ASSOC("Restoring sta %pM\n",
601 priv->stations[i].sta.sta.addr); 601 il->stations[i].sta.sta.addr);
602 priv->stations[i].sta.mode = 0; 602 il->stations[i].sta.mode = 0;
603 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; 603 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
604 found = true; 604 found = true;
605 } 605 }
606 } 606 }
607 607
608 for (i = 0; i < priv->hw_params.max_stations; i++) { 608 for (i = 0; i < il->hw_params.max_stations; i++) {
609 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { 609 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
610 memcpy(&sta_cmd, &priv->stations[i].sta, 610 memcpy(&sta_cmd, &il->stations[i].sta,
611 sizeof(struct iwl_legacy_addsta_cmd)); 611 sizeof(struct il_addsta_cmd));
612 send_lq = false; 612 send_lq = false;
613 if (priv->stations[i].lq) { 613 if (il->stations[i].lq) {
614 memcpy(&lq, priv->stations[i].lq, 614 memcpy(&lq, il->stations[i].lq,
615 sizeof(struct iwl_link_quality_cmd)); 615 sizeof(struct il_link_quality_cmd));
616 send_lq = true; 616 send_lq = true;
617 } 617 }
618 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 618 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
619 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); 619 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
620 if (ret) { 620 if (ret) {
621 spin_lock_irqsave(&priv->sta_lock, flags_spin); 621 spin_lock_irqsave(&il->sta_lock, flags_spin);
622 IWL_ERR(priv, "Adding station %pM failed.\n", 622 IL_ERR("Adding station %pM failed.\n",
623 priv->stations[i].sta.sta.addr); 623 il->stations[i].sta.sta.addr);
624 priv->stations[i].used &= 624 il->stations[i].used &=
625 ~IWL_STA_DRIVER_ACTIVE; 625 ~IL_STA_DRIVER_ACTIVE;
626 priv->stations[i].used &= 626 il->stations[i].used &=
627 ~IWL_STA_UCODE_INPROGRESS; 627 ~IL_STA_UCODE_INPROGRESS;
628 spin_unlock_irqrestore(&priv->sta_lock, 628 spin_unlock_irqrestore(&il->sta_lock,
629 flags_spin); 629 flags_spin);
630 } 630 }
631 /* 631 /*
@@ -633,78 +633,78 @@ iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
633 * current LQ command 633 * current LQ command
634 */ 634 */
635 if (send_lq) 635 if (send_lq)
636 iwl_legacy_send_lq_cmd(priv, ctx, &lq, 636 il_send_lq_cmd(il, ctx, &lq,
637 CMD_SYNC, true); 637 CMD_SYNC, true);
638 spin_lock_irqsave(&priv->sta_lock, flags_spin); 638 spin_lock_irqsave(&il->sta_lock, flags_spin);
639 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 639 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
640 } 640 }
641 } 641 }
642 642
643 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 643 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
644 if (!found) 644 if (!found)
645 IWL_DEBUG_INFO(priv, "Restoring all known stations" 645 D_INFO("Restoring all known stations"
646 " .... no stations to be restored.\n"); 646 " .... no stations to be restored.\n");
647 else 647 else
648 IWL_DEBUG_INFO(priv, "Restoring all known stations" 648 D_INFO("Restoring all known stations"
649 " .... complete.\n"); 649 " .... complete.\n");
650} 650}
651EXPORT_SYMBOL(iwl_legacy_restore_stations); 651EXPORT_SYMBOL(il_restore_stations);
652 652
653int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv) 653int il_get_free_ucode_key_idx(struct il_priv *il)
654{ 654{
655 int i; 655 int i;
656 656
657 for (i = 0; i < priv->sta_key_max_num; i++) 657 for (i = 0; i < il->sta_key_max_num; i++)
658 if (!test_and_set_bit(i, &priv->ucode_key_table)) 658 if (!test_and_set_bit(i, &il->ucode_key_table))
659 return i; 659 return i;
660 660
661 return WEP_INVALID_OFFSET; 661 return WEP_INVALID_OFFSET;
662} 662}
663EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index); 663EXPORT_SYMBOL(il_get_free_ucode_key_idx);
664 664
665void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv) 665void il_dealloc_bcast_stations(struct il_priv *il)
666{ 666{
667 unsigned long flags; 667 unsigned long flags;
668 int i; 668 int i;
669 669
670 spin_lock_irqsave(&priv->sta_lock, flags); 670 spin_lock_irqsave(&il->sta_lock, flags);
671 for (i = 0; i < priv->hw_params.max_stations; i++) { 671 for (i = 0; i < il->hw_params.max_stations; i++) {
672 if (!(priv->stations[i].used & IWL_STA_BCAST)) 672 if (!(il->stations[i].used & IL_STA_BCAST))
673 continue; 673 continue;
674 674
675 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 675 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
676 priv->num_stations--; 676 il->num_stations--;
677 BUG_ON(priv->num_stations < 0); 677 BUG_ON(il->num_stations < 0);
678 kfree(priv->stations[i].lq); 678 kfree(il->stations[i].lq);
679 priv->stations[i].lq = NULL; 679 il->stations[i].lq = NULL;
680 } 680 }
681 spin_unlock_irqrestore(&priv->sta_lock, flags); 681 spin_unlock_irqrestore(&il->sta_lock, flags);
682} 682}
683EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations); 683EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
684 684
685#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG 685#ifdef CONFIG_IWLEGACY_DEBUG
686static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, 686static void il_dump_lq_cmd(struct il_priv *il,
687 struct iwl_link_quality_cmd *lq) 687 struct il_link_quality_cmd *lq)
688{ 688{
689 int i; 689 int i;
690 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); 690 D_RATE("lq station id 0x%x\n", lq->sta_id);
691 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", 691 D_RATE("lq ant 0x%X 0x%X\n",
692 lq->general_params.single_stream_ant_msk, 692 lq->general_params.single_stream_ant_msk,
693 lq->general_params.dual_stream_ant_msk); 693 lq->general_params.dual_stream_ant_msk);
694 694
695 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 695 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
696 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", 696 D_RATE("lq idx %d 0x%X\n",
697 i, lq->rs_table[i].rate_n_flags); 697 i, lq->rs_table[i].rate_n_flags);
698} 698}
699#else 699#else
700static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, 700static inline void il_dump_lq_cmd(struct il_priv *il,
701 struct iwl_link_quality_cmd *lq) 701 struct il_link_quality_cmd *lq)
702{ 702{
703} 703}
704#endif 704#endif
705 705
706/** 706/**
707 * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity 707 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
708 * 708 *
709 * It sometimes happens when a HT rate has been in use and we 709 * It sometimes happens when a HT rate has been in use and we
710 * loose connectivity with AP then mac80211 will first tell us that the 710 * loose connectivity with AP then mac80211 will first tell us that the
@@ -714,22 +714,22 @@ static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
714 * Test for this to prevent driver from sending LQ command between the time 714 * Test for this to prevent driver from sending LQ command between the time
715 * RXON flags are updated and when LQ command is updated. 715 * RXON flags are updated and when LQ command is updated.
716 */ 716 */
717static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv, 717static bool il_is_lq_table_valid(struct il_priv *il,
718 struct iwl_rxon_context *ctx, 718 struct il_rxon_context *ctx,
719 struct iwl_link_quality_cmd *lq) 719 struct il_link_quality_cmd *lq)
720{ 720{
721 int i; 721 int i;
722 722
723 if (ctx->ht.enabled) 723 if (ctx->ht.enabled)
724 return true; 724 return true;
725 725
726 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", 726 D_INFO("Channel %u is not an HT channel\n",
727 ctx->active.channel); 727 ctx->active.channel);
728 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 728 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
729 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & 729 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
730 RATE_MCS_HT_MSK) { 730 RATE_MCS_HT_MSK) {
731 IWL_DEBUG_INFO(priv, 731 D_INFO(
732 "index %d of LQ expects HT channel\n", 732 "idx %d of LQ expects HT channel\n",
733 i); 733 i);
734 return false; 734 return false;
735 } 735 }
@@ -738,7 +738,7 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
738} 738}
739 739
740/** 740/**
741 * iwl_legacy_send_lq_cmd() - Send link quality command 741 * il_send_lq_cmd() - Send link quality command
742 * @init: This command is sent as part of station initialization right 742 * @init: This command is sent as part of station initialization right
743 * after station has been added. 743 * after station has been added.
744 * 744 *
@@ -747,35 +747,35 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
747 * this case to clear the state indicating that station creation is in 747 * this case to clear the state indicating that station creation is in
748 * progress. 748 * progress.
749 */ 749 */
750int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 750int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
751 struct iwl_link_quality_cmd *lq, u8 flags, bool init) 751 struct il_link_quality_cmd *lq, u8 flags, bool init)
752{ 752{
753 int ret = 0; 753 int ret = 0;
754 unsigned long flags_spin; 754 unsigned long flags_spin;
755 755
756 struct iwl_host_cmd cmd = { 756 struct il_host_cmd cmd = {
757 .id = REPLY_TX_LINK_QUALITY_CMD, 757 .id = C_TX_LINK_QUALITY_CMD,
758 .len = sizeof(struct iwl_link_quality_cmd), 758 .len = sizeof(struct il_link_quality_cmd),
759 .flags = flags, 759 .flags = flags,
760 .data = lq, 760 .data = lq,
761 }; 761 };
762 762
763 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) 763 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
764 return -EINVAL; 764 return -EINVAL;
765 765
766 766
767 spin_lock_irqsave(&priv->sta_lock, flags_spin); 767 spin_lock_irqsave(&il->sta_lock, flags_spin);
768 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 768 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
769 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 769 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
770 return -EINVAL; 770 return -EINVAL;
771 } 771 }
772 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 772 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
773 773
774 iwl_legacy_dump_lq_cmd(priv, lq); 774 il_dump_lq_cmd(il, lq);
775 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 775 BUG_ON(init && (cmd.flags & CMD_ASYNC));
776 776
777 if (iwl_legacy_is_lq_table_valid(priv, ctx, lq)) 777 if (il_is_lq_table_valid(il, ctx, lq))
778 ret = iwl_legacy_send_cmd(priv, &cmd); 778 ret = il_send_cmd(il, &cmd);
779 else 779 else
780 ret = -EINVAL; 780 ret = -EINVAL;
781 781
@@ -783,35 +783,35 @@ int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
783 return ret; 783 return ret;
784 784
785 if (init) { 785 if (init) {
786 IWL_DEBUG_INFO(priv, "init LQ command complete," 786 D_INFO("init LQ command complete,"
787 " clearing sta addition status for sta %d\n", 787 " clearing sta addition status for sta %d\n",
788 lq->sta_id); 788 lq->sta_id);
789 spin_lock_irqsave(&priv->sta_lock, flags_spin); 789 spin_lock_irqsave(&il->sta_lock, flags_spin);
790 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 790 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
791 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 791 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
792 } 792 }
793 return ret; 793 return ret;
794} 794}
795EXPORT_SYMBOL(iwl_legacy_send_lq_cmd); 795EXPORT_SYMBOL(il_send_lq_cmd);
796 796
797int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, 797int il_mac_sta_remove(struct ieee80211_hw *hw,
798 struct ieee80211_vif *vif, 798 struct ieee80211_vif *vif,
799 struct ieee80211_sta *sta) 799 struct ieee80211_sta *sta)
800{ 800{
801 struct iwl_priv *priv = hw->priv; 801 struct il_priv *il = hw->priv;
802 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv; 802 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
803 int ret; 803 int ret;
804 804
805 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n", 805 D_INFO("received request to remove station %pM\n",
806 sta->addr); 806 sta->addr);
807 mutex_lock(&priv->mutex); 807 mutex_lock(&il->mutex);
808 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", 808 D_INFO("proceeding to remove station %pM\n",
809 sta->addr); 809 sta->addr);
810 ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr); 810 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
811 if (ret) 811 if (ret)
812 IWL_ERR(priv, "Error removing station %pM\n", 812 IL_ERR("Error removing station %pM\n",
813 sta->addr); 813 sta->addr);
814 mutex_unlock(&priv->mutex); 814 mutex_unlock(&il->mutex);
815 return ret; 815 return ret;
816} 816}
817EXPORT_SYMBOL(iwl_legacy_mac_sta_remove); 817EXPORT_SYMBOL(il_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644
index 67bd75fe01a1..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.h
+++ /dev/null
@@ -1,148 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_legacy_sta_h__
30#define __iwl_legacy_sta_h__
31
32#include "iwl-dev.h"
33
34#define HW_KEY_DYNAMIC 0
35#define HW_KEY_DEFAULT 1
36
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
40 being activated */
41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
45
46void iwl_legacy_restore_stations(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx);
48void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
49 struct iwl_rxon_context *ctx);
50void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
51int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
52int iwl_legacy_send_add_sta(struct iwl_priv *priv,
53 struct iwl_legacy_addsta_cmd *sta, u8 flags);
54int iwl_legacy_add_station_common(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 const u8 *addr, bool is_ap,
57 struct ieee80211_sta *sta, u8 *sta_id_r);
58int iwl_legacy_remove_station(struct iwl_priv *priv,
59 const u8 sta_id,
60 const u8 *addr);
61int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta);
64
65u8 iwl_legacy_prep_station(struct iwl_priv *priv,
66 struct iwl_rxon_context *ctx,
67 const u8 *addr, bool is_ap,
68 struct ieee80211_sta *sta);
69
70int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
71 struct iwl_rxon_context *ctx,
72 struct iwl_link_quality_cmd *lq,
73 u8 flags, bool init);
74
75/**
76 * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
77 * @priv: iwl priv struct
78 *
79 * This is called during iwl_down() to make sure that in the case
80 * we're coming there from a hardware restart mac80211 will be
81 * able to reconfigure stations -- if we're getting there in the
82 * normal down flow then the stations will already be cleared.
83 */
84static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
85{
86 unsigned long flags;
87 struct iwl_rxon_context *ctx;
88
89 spin_lock_irqsave(&priv->sta_lock, flags);
90 memset(priv->stations, 0, sizeof(priv->stations));
91 priv->num_stations = 0;
92
93 priv->ucode_key_table = 0;
94
95 for_each_context(priv, ctx) {
96 /*
97 * Remove all key information that is not stored as part
98 * of station information since mac80211 may not have had
99 * a chance to remove all the keys. When device is
100 * reconfigured by mac80211 after an error all keys will
101 * be reconfigured.
102 */
103 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
104 ctx->key_mapping_keys = 0;
105 }
106
107 spin_unlock_irqrestore(&priv->sta_lock, flags);
108}
109
110static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
111{
112 if (WARN_ON(!sta))
113 return IWL_INVALID_STATION;
114
115 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
116}
117
118/**
119 * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
120 * @priv: iwl priv
121 * @context: the current context
122 * @sta: mac80211 station
123 *
124 * In certain circumstances mac80211 passes a station pointer
125 * that may be %NULL, for example during TX or key setup. In
126 * that case, we need to use the broadcast station, so this
127 * inline wraps that pattern.
128 */
129static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
130 struct iwl_rxon_context *context,
131 struct ieee80211_sta *sta)
132{
133 int sta_id;
134
135 if (!sta)
136 return context->bcast_sta_id;
137
138 sta_id = iwl_legacy_sta_id(sta);
139
140 /*
141 * mac80211 should not be passing a partially
142 * initialised station!
143 */
144 WARN_ON(sta_id == IWL_INVALID_STATION);
145
146 return sta_id;
147}
148#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644
index c0dfb1a4e968..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ /dev/null
@@ -1,659 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/export.h>
34#include <net/mac80211.h>
35#include "iwl-eeprom.h"
36#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41
42/**
43 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
44 */
45void
46iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
47{
48 u32 reg = 0;
49 int txq_id = txq->q.id;
50
51 if (txq->need_update == 0)
52 return;
53
54 /* if we're trying to save power */
55 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
56 /* wake up nic if it's powered down ...
57 * uCode will wake up, and interrupt us again, so next
58 * time we'll skip this part. */
59 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
60
61 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
62 IWL_DEBUG_INFO(priv,
63 "Tx queue %d requesting wakeup,"
64 " GP1 = 0x%x\n", txq_id, reg);
65 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
66 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
67 return;
68 }
69
70 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
71 txq->q.write_ptr | (txq_id << 8));
72
73 /*
74 * else not in power-save mode,
75 * uCode will never sleep when we're
76 * trying to tx (during RFKILL, we're not trying to tx).
77 */
78 } else
79 iwl_write32(priv, HBUS_TARG_WRPTR,
80 txq->q.write_ptr | (txq_id << 8));
81 txq->need_update = 0;
82}
83EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
84
85/**
86 * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
87 */
88void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
89{
90 struct iwl_tx_queue *txq = &priv->txq[txq_id];
91 struct iwl_queue *q = &txq->q;
92
93 if (q->n_bd == 0)
94 return;
95
96 while (q->write_ptr != q->read_ptr) {
97 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
98 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
99 }
100}
101EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
102
103/**
104 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
105 * @txq: Transmit queue to deallocate.
106 *
107 * Empty queue by removing and destroying all BD's.
108 * Free all buffers.
109 * 0-fill, but do not free "txq" descriptor structure.
110 */
111void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
112{
113 struct iwl_tx_queue *txq = &priv->txq[txq_id];
114 struct device *dev = &priv->pci_dev->dev;
115 int i;
116
117 iwl_legacy_tx_queue_unmap(priv, txq_id);
118
119 /* De-alloc array of command/tx buffers */
120 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
121 kfree(txq->cmd[i]);
122
123 /* De-alloc circular buffer of TFDs */
124 if (txq->q.n_bd)
125 dma_free_coherent(dev, priv->hw_params.tfd_size *
126 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
127
128 /* De-alloc array of per-TFD driver data */
129 kfree(txq->txb);
130 txq->txb = NULL;
131
132 /* deallocate arrays */
133 kfree(txq->cmd);
134 kfree(txq->meta);
135 txq->cmd = NULL;
136 txq->meta = NULL;
137
138 /* 0-fill queue descriptor structure */
139 memset(txq, 0, sizeof(*txq));
140}
141EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
142
143/**
144 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
145 */
146void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
147{
148 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
149 struct iwl_queue *q = &txq->q;
150 int i;
151
152 if (q->n_bd == 0)
153 return;
154
155 while (q->read_ptr != q->write_ptr) {
156 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
157
158 if (txq->meta[i].flags & CMD_MAPPED) {
159 pci_unmap_single(priv->pci_dev,
160 dma_unmap_addr(&txq->meta[i], mapping),
161 dma_unmap_len(&txq->meta[i], len),
162 PCI_DMA_BIDIRECTIONAL);
163 txq->meta[i].flags = 0;
164 }
165
166 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
167 }
168
169 i = q->n_window;
170 if (txq->meta[i].flags & CMD_MAPPED) {
171 pci_unmap_single(priv->pci_dev,
172 dma_unmap_addr(&txq->meta[i], mapping),
173 dma_unmap_len(&txq->meta[i], len),
174 PCI_DMA_BIDIRECTIONAL);
175 txq->meta[i].flags = 0;
176 }
177}
178EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
179
180/**
181 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
182 * @txq: Transmit queue to deallocate.
183 *
184 * Empty queue by removing and destroying all BD's.
185 * Free all buffers.
186 * 0-fill, but do not free "txq" descriptor structure.
187 */
188void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
189{
190 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
191 struct device *dev = &priv->pci_dev->dev;
192 int i;
193
194 iwl_legacy_cmd_queue_unmap(priv);
195
196 /* De-alloc array of command/tx buffers */
197 for (i = 0; i <= TFD_CMD_SLOTS; i++)
198 kfree(txq->cmd[i]);
199
200 /* De-alloc circular buffer of TFDs */
201 if (txq->q.n_bd)
202 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
203 txq->tfds, txq->q.dma_addr);
204
205 /* deallocate arrays */
206 kfree(txq->cmd);
207 kfree(txq->meta);
208 txq->cmd = NULL;
209 txq->meta = NULL;
210
211 /* 0-fill queue descriptor structure */
212 memset(txq, 0, sizeof(*txq));
213}
214EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
215
216/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
217 * DMA services
218 *
219 * Theory of operation
220 *
221 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
222 * of buffer descriptors, each of which points to one or more data buffers for
223 * the device to read from or fill. Driver and device exchange status of each
224 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
225 * entries in each circular buffer, to protect against confusing empty and full
226 * queue states.
227 *
228 * The device reads or writes the data in the queues via the device's several
229 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
230 *
231 * For Tx queue, there are low mark and high mark limits. If, after queuing
232 * the packet for Tx, free space become < low mark, Tx queue stopped. When
233 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
234 * Tx queue resumed.
235 *
236 * See more detailed info in iwl-4965-hw.h.
237 ***************************************************/
238
239int iwl_legacy_queue_space(const struct iwl_queue *q)
240{
241 int s = q->read_ptr - q->write_ptr;
242
243 if (q->read_ptr > q->write_ptr)
244 s -= q->n_bd;
245
246 if (s <= 0)
247 s += q->n_window;
248 /* keep some reserve to not confuse empty and full situations */
249 s -= 2;
250 if (s < 0)
251 s = 0;
252 return s;
253}
254EXPORT_SYMBOL(iwl_legacy_queue_space);
255
256
257/**
258 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
259 */
260static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
261 int count, int slots_num, u32 id)
262{
263 q->n_bd = count;
264 q->n_window = slots_num;
265 q->id = id;
266
267 /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
268 * and iwl_legacy_queue_dec_wrap are broken. */
269 BUG_ON(!is_power_of_2(count));
270
271 /* slots_num must be power-of-two size, otherwise
272 * iwl_legacy_get_cmd_index is broken. */
273 BUG_ON(!is_power_of_2(slots_num));
274
275 q->low_mark = q->n_window / 4;
276 if (q->low_mark < 4)
277 q->low_mark = 4;
278
279 q->high_mark = q->n_window / 8;
280 if (q->high_mark < 2)
281 q->high_mark = 2;
282
283 q->write_ptr = q->read_ptr = 0;
284
285 return 0;
286}
287
288/**
289 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
290 */
291static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
292 struct iwl_tx_queue *txq, u32 id)
293{
294 struct device *dev = &priv->pci_dev->dev;
295 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
296
297 /* Driver private data, only for Tx (not command) queues,
298 * not shared with device. */
299 if (id != priv->cmd_queue) {
300 txq->txb = kzalloc(sizeof(txq->txb[0]) *
301 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
302 if (!txq->txb) {
303 IWL_ERR(priv, "kmalloc for auxiliary BD "
304 "structures failed\n");
305 goto error;
306 }
307 } else {
308 txq->txb = NULL;
309 }
310
311 /* Circular buffer of transmit frame descriptors (TFDs),
312 * shared with device */
313 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
314 GFP_KERNEL);
315 if (!txq->tfds) {
316 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
317 goto error;
318 }
319 txq->q.id = id;
320
321 return 0;
322
323 error:
324 kfree(txq->txb);
325 txq->txb = NULL;
326
327 return -ENOMEM;
328}
329
330/**
331 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
332 */
333int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
334 int slots_num, u32 txq_id)
335{
336 int i, len;
337 int ret;
338 int actual_slots = slots_num;
339
340 /*
341 * Alloc buffer array for commands (Tx or other types of commands).
342 * For the command queue (#4/#9), allocate command space + one big
343 * command for scan, since scan command is very huge; the system will
344 * not have two scans at the same time, so only one is needed.
345 * For normal Tx queues (all other queues), no super-size command
346 * space is needed.
347 */
348 if (txq_id == priv->cmd_queue)
349 actual_slots++;
350
351 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
352 GFP_KERNEL);
353 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
354 GFP_KERNEL);
355
356 if (!txq->meta || !txq->cmd)
357 goto out_free_arrays;
358
359 len = sizeof(struct iwl_device_cmd);
360 for (i = 0; i < actual_slots; i++) {
361 /* only happens for cmd queue */
362 if (i == slots_num)
363 len = IWL_MAX_CMD_SIZE;
364
365 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
366 if (!txq->cmd[i])
367 goto err;
368 }
369
370 /* Alloc driver data array and TFD circular buffer */
371 ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
372 if (ret)
373 goto err;
374
375 txq->need_update = 0;
376
377 /*
378 * For the default queues 0-3, set up the swq_id
379 * already -- all others need to get one later
380 * (if they need one at all).
381 */
382 if (txq_id < 4)
383 iwl_legacy_set_swq_id(txq, txq_id, txq_id);
384
385 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
386 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
387 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
388
389 /* Initialize queue's high/low-water marks, and head/tail indexes */
390 iwl_legacy_queue_init(priv, &txq->q,
391 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
392
393 /* Tell device where to find queue */
394 priv->cfg->ops->lib->txq_init(priv, txq);
395
396 return 0;
397err:
398 for (i = 0; i < actual_slots; i++)
399 kfree(txq->cmd[i]);
400out_free_arrays:
401 kfree(txq->meta);
402 kfree(txq->cmd);
403
404 return -ENOMEM;
405}
406EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
407
408void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
409 int slots_num, u32 txq_id)
410{
411 int actual_slots = slots_num;
412
413 if (txq_id == priv->cmd_queue)
414 actual_slots++;
415
416 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
417
418 txq->need_update = 0;
419
420 /* Initialize queue's high/low-water marks, and head/tail indexes */
421 iwl_legacy_queue_init(priv, &txq->q,
422 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
423
424 /* Tell device where to find queue */
425 priv->cfg->ops->lib->txq_init(priv, txq);
426}
427EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
428
429/*************** HOST COMMAND QUEUE FUNCTIONS *****/
430
431/**
432 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
433 * @priv: device private data point
434 * @cmd: a point to the ucode command structure
435 *
436 * The function returns < 0 values to indicate the operation is
437 * failed. On success, it turns the index (> 0) of command in the
438 * command queue.
439 */
440int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
441{
442 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
443 struct iwl_queue *q = &txq->q;
444 struct iwl_device_cmd *out_cmd;
445 struct iwl_cmd_meta *out_meta;
446 dma_addr_t phys_addr;
447 unsigned long flags;
448 int len;
449 u32 idx;
450 u16 fix_size;
451
452 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
453 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
454
455 /* If any of the command structures end up being larger than
456 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
457 * we will need to increase the size of the TFD entries
458 * Also, check to see if command buffer should not exceed the size
459 * of device_cmd and max_cmd_size. */
460 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
461 !(cmd->flags & CMD_SIZE_HUGE));
462 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
463
464 if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
465 IWL_WARN(priv, "Not sending command - %s KILL\n",
466 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
467 return -EIO;
468 }
469
470 spin_lock_irqsave(&priv->hcmd_lock, flags);
471
472 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
473 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
474
475 IWL_ERR(priv, "Restarting adapter due to command queue full\n");
476 queue_work(priv->workqueue, &priv->restart);
477 return -ENOSPC;
478 }
479
480 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
481 out_cmd = txq->cmd[idx];
482 out_meta = &txq->meta[idx];
483
484 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
485 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
486 return -ENOSPC;
487 }
488
489 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
490 out_meta->flags = cmd->flags | CMD_MAPPED;
491 if (cmd->flags & CMD_WANT_SKB)
492 out_meta->source = cmd;
493 if (cmd->flags & CMD_ASYNC)
494 out_meta->callback = cmd->callback;
495
496 out_cmd->hdr.cmd = cmd->id;
497 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
498
499 /* At this point, the out_cmd now has all of the incoming cmd
500 * information */
501
502 out_cmd->hdr.flags = 0;
503 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
504 INDEX_TO_SEQ(q->write_ptr));
505 if (cmd->flags & CMD_SIZE_HUGE)
506 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
507 len = sizeof(struct iwl_device_cmd);
508 if (idx == TFD_CMD_SLOTS)
509 len = IWL_MAX_CMD_SIZE;
510
511#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
512 switch (out_cmd->hdr.cmd) {
513 case REPLY_TX_LINK_QUALITY_CMD:
514 case SENSITIVITY_CMD:
515 IWL_DEBUG_HC_DUMP(priv,
516 "Sending command %s (#%x), seq: 0x%04X, "
517 "%d bytes at %d[%d]:%d\n",
518 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
519 out_cmd->hdr.cmd,
520 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
521 q->write_ptr, idx, priv->cmd_queue);
522 break;
523 default:
524 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
525 "%d bytes at %d[%d]:%d\n",
526 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
527 out_cmd->hdr.cmd,
528 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
529 q->write_ptr, idx, priv->cmd_queue);
530 }
531#endif
532 txq->need_update = 1;
533
534 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
535 /* Set up entry in queue's byte count circular buffer */
536 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
537
538 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
539 fix_size, PCI_DMA_BIDIRECTIONAL);
540 dma_unmap_addr_set(out_meta, mapping, phys_addr);
541 dma_unmap_len_set(out_meta, len, fix_size);
542
543 trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
544 fix_size, cmd->flags);
545
546 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
547 phys_addr, fix_size, 1,
548 U32_PAD(cmd->len));
549
550 /* Increment and update queue's write index */
551 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
552 iwl_legacy_txq_update_write_ptr(priv, txq);
553
554 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
555 return idx;
556}
557
558/**
559 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
560 *
561 * When FW advances 'R' index, all entries between old and new 'R' index
562 * need to be reclaimed. As result, some free space forms. If there is
563 * enough free space (> low mark), wake the stack that feeds us.
564 */
565static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
566 int idx, int cmd_idx)
567{
568 struct iwl_tx_queue *txq = &priv->txq[txq_id];
569 struct iwl_queue *q = &txq->q;
570 int nfreed = 0;
571
572 if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
573 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
574 "is out of range [0-%d] %d %d.\n", txq_id,
575 idx, q->n_bd, q->write_ptr, q->read_ptr);
576 return;
577 }
578
579 for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
580 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
581
582 if (nfreed++ > 0) {
583 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
584 q->write_ptr, q->read_ptr);
585 queue_work(priv->workqueue, &priv->restart);
586 }
587
588 }
589}
590
591/**
592 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
593 * @rxb: Rx buffer to reclaim
594 *
595 * If an Rx buffer has an async callback associated with it the callback
596 * will be executed. The attached skb (if present) will only be freed
597 * if the callback returns 1
598 */
599void
600iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
601{
602 struct iwl_rx_packet *pkt = rxb_addr(rxb);
603 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
604 int txq_id = SEQ_TO_QUEUE(sequence);
605 int index = SEQ_TO_INDEX(sequence);
606 int cmd_index;
607 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
608 struct iwl_device_cmd *cmd;
609 struct iwl_cmd_meta *meta;
610 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
611 unsigned long flags;
612
613 /* If a Tx command is being handled and it isn't in the actual
614 * command queue then there a command routing bug has been introduced
615 * in the queue management code. */
616 if (WARN(txq_id != priv->cmd_queue,
617 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
618 txq_id, priv->cmd_queue, sequence,
619 priv->txq[priv->cmd_queue].q.read_ptr,
620 priv->txq[priv->cmd_queue].q.write_ptr)) {
621 iwl_print_hex_error(priv, pkt, 32);
622 return;
623 }
624
625 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
626 cmd = txq->cmd[cmd_index];
627 meta = &txq->meta[cmd_index];
628
629 txq->time_stamp = jiffies;
630
631 pci_unmap_single(priv->pci_dev,
632 dma_unmap_addr(meta, mapping),
633 dma_unmap_len(meta, len),
634 PCI_DMA_BIDIRECTIONAL);
635
636 /* Input error checking is done when commands are added to queue. */
637 if (meta->flags & CMD_WANT_SKB) {
638 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
639 rxb->page = NULL;
640 } else if (meta->callback)
641 meta->callback(priv, cmd, pkt);
642
643 spin_lock_irqsave(&priv->hcmd_lock, flags);
644
645 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
646
647 if (!(meta->flags & CMD_ASYNC)) {
648 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
649 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
650 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
651 wake_up(&priv->wait_command_queue);
652 }
653
654 /* Mark as unmapped */
655 meta->flags = 0;
656
657 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
658}
659EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644
index b282d869a546..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ /dev/null
@@ -1,4016 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/ieee80211_radiotap.h>
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl3945"
53
54#include "iwl-fh.h"
55#include "iwl-3945-fh.h"
56#include "iwl-commands.h"
57#include "iwl-sta.h"
58#include "iwl-3945.h"
59#include "iwl-core.h"
60#include "iwl-helpers.h"
61#include "iwl-dev.h"
62#include "iwl-spectrum.h"
63
64/*
65 * module name, copyright, version, etc.
66 */
67
68#define DRV_DESCRIPTION \
69"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
70
71#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
72#define VD "d"
73#else
74#define VD
75#endif
76
77/*
78 * add "s" to indicate spectrum measurement included.
79 * we add it here to be consistent with previous releases in which
80 * this was configurable.
81 */
82#define DRV_VERSION IWLWIFI_VERSION VD "s"
83#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
84#define DRV_AUTHOR "<ilw@linux.intel.com>"
85
86MODULE_DESCRIPTION(DRV_DESCRIPTION);
87MODULE_VERSION(DRV_VERSION);
88MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
89MODULE_LICENSE("GPL");
90
91 /* module parameters */
92struct iwl_mod_params iwl3945_mod_params = {
93 .sw_crypto = 1,
94 .restart_fw = 1,
95 .disable_hw_scan = 1,
96 /* the rest are 0 by default */
97};
98
99/**
100 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
101 * @priv: eeprom and antenna fields are used to determine antenna flags
102 *
103 * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
104 * iwl3945_mod_params.antenna specifies the antenna diversity mode:
105 *
106 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
107 * IWL_ANTENNA_MAIN - Force MAIN antenna
108 * IWL_ANTENNA_AUX - Force AUX antenna
109 */
110__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
111{
112 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
113
114 switch (iwl3945_mod_params.antenna) {
115 case IWL_ANTENNA_DIVERSITY:
116 return 0;
117
118 case IWL_ANTENNA_MAIN:
119 if (eeprom->antenna_switch_type)
120 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122
123 case IWL_ANTENNA_AUX:
124 if (eeprom->antenna_switch_type)
125 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
126 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
127 }
128
129 /* bad antenna selector value */
130 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
131 iwl3945_mod_params.antenna);
132
133 return 0; /* "diversity" is default if error */
134}
135
136static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
137 struct ieee80211_key_conf *keyconf,
138 u8 sta_id)
139{
140 unsigned long flags;
141 __le16 key_flags = 0;
142 int ret;
143
144 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
145 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
146
147 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
148 key_flags |= STA_KEY_MULTICAST_MSK;
149
150 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152 key_flags &= ~STA_KEY_FLG_INVALID;
153
154 spin_lock_irqsave(&priv->sta_lock, flags);
155 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
156 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
157 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
158 keyconf->keylen);
159
160 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
161 keyconf->keylen);
162
163 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
164 == STA_KEY_FLG_NO_ENC)
165 priv->stations[sta_id].sta.key.key_offset =
166 iwl_legacy_get_free_ucode_key_index(priv);
167 /* else, we are overriding an existing key => no need to allocated room
168 * in uCode. */
169
170 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
171 "no space for a new key");
172
173 priv->stations[sta_id].sta.key.key_flags = key_flags;
174 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
175 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
176
177 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
178
179 ret = iwl_legacy_send_add_sta(priv,
180 &priv->stations[sta_id].sta, CMD_ASYNC);
181
182 spin_unlock_irqrestore(&priv->sta_lock, flags);
183
184 return ret;
185}
186
187static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
188 struct ieee80211_key_conf *keyconf,
189 u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
195 struct ieee80211_key_conf *keyconf,
196 u8 sta_id)
197{
198 return -EOPNOTSUPP;
199}
200
201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
202{
203 unsigned long flags;
204 struct iwl_legacy_addsta_cmd sta_cmd;
205
206 spin_lock_irqsave(&priv->sta_lock, flags);
207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
208 memset(&priv->stations[sta_id].sta.key, 0,
209 sizeof(struct iwl4965_keyinfo));
210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
214 spin_unlock_irqrestore(&priv->sta_lock, flags);
215
216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
217 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
218}
219
220static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
221 struct ieee80211_key_conf *keyconf, u8 sta_id)
222{
223 int ret = 0;
224
225 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
226
227 switch (keyconf->cipher) {
228 case WLAN_CIPHER_SUITE_CCMP:
229 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
230 break;
231 case WLAN_CIPHER_SUITE_TKIP:
232 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
233 break;
234 case WLAN_CIPHER_SUITE_WEP40:
235 case WLAN_CIPHER_SUITE_WEP104:
236 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
237 break;
238 default:
239 IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
240 keyconf->cipher);
241 ret = -EINVAL;
242 }
243
244 IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
245 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
246 sta_id, ret);
247
248 return ret;
249}
250
251static int iwl3945_remove_static_key(struct iwl_priv *priv)
252{
253 int ret = -EOPNOTSUPP;
254
255 return ret;
256}
257
258static int iwl3945_set_static_key(struct iwl_priv *priv,
259 struct ieee80211_key_conf *key)
260{
261 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
262 key->cipher == WLAN_CIPHER_SUITE_WEP104)
263 return -EOPNOTSUPP;
264
265 IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
266 return -EINVAL;
267}
268
269static void iwl3945_clear_free_frames(struct iwl_priv *priv)
270{
271 struct list_head *element;
272
273 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
274 priv->frames_count);
275
276 while (!list_empty(&priv->free_frames)) {
277 element = priv->free_frames.next;
278 list_del(element);
279 kfree(list_entry(element, struct iwl3945_frame, list));
280 priv->frames_count--;
281 }
282
283 if (priv->frames_count) {
284 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
285 priv->frames_count);
286 priv->frames_count = 0;
287 }
288}
289
290static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
291{
292 struct iwl3945_frame *frame;
293 struct list_head *element;
294 if (list_empty(&priv->free_frames)) {
295 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
296 if (!frame) {
297 IWL_ERR(priv, "Could not allocate frame!\n");
298 return NULL;
299 }
300
301 priv->frames_count++;
302 return frame;
303 }
304
305 element = priv->free_frames.next;
306 list_del(element);
307 return list_entry(element, struct iwl3945_frame, list);
308}
309
310static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
311{
312 memset(frame, 0, sizeof(*frame));
313 list_add(&frame->list, &priv->free_frames);
314}
315
316unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
317 struct ieee80211_hdr *hdr,
318 int left)
319{
320
321 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
322 return 0;
323
324 if (priv->beacon_skb->len > left)
325 return 0;
326
327 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
328
329 return priv->beacon_skb->len;
330}
331
332static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
333{
334 struct iwl3945_frame *frame;
335 unsigned int frame_size;
336 int rc;
337 u8 rate;
338
339 frame = iwl3945_get_free_frame(priv);
340
341 if (!frame) {
342 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
343 "command.\n");
344 return -ENOMEM;
345 }
346
347 rate = iwl_legacy_get_lowest_plcp(priv,
348 &priv->contexts[IWL_RXON_CTX_BSS]);
349
350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
351
352 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
353 &frame->u.cmd[0]);
354
355 iwl3945_free_frame(priv, frame);
356
357 return rc;
358}
359
360static void iwl3945_unset_hw_params(struct iwl_priv *priv)
361{
362 if (priv->_3945.shared_virt)
363 dma_free_coherent(&priv->pci_dev->dev,
364 sizeof(struct iwl3945_shared),
365 priv->_3945.shared_virt,
366 priv->_3945.shared_phys);
367}
368
369static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
370 struct ieee80211_tx_info *info,
371 struct iwl_device_cmd *cmd,
372 struct sk_buff *skb_frag,
373 int sta_id)
374{
375 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
376 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
377
378 tx_cmd->sec_ctl = 0;
379
380 switch (keyinfo->cipher) {
381 case WLAN_CIPHER_SUITE_CCMP:
382 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
383 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
384 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
385 break;
386
387 case WLAN_CIPHER_SUITE_TKIP:
388 break;
389
390 case WLAN_CIPHER_SUITE_WEP104:
391 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
392 /* fall through */
393 case WLAN_CIPHER_SUITE_WEP40:
394 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
395 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
396
397 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
398
399 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
400 "with key %d\n", info->control.hw_key->hw_key_idx);
401 break;
402
403 default:
404 IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
405 break;
406 }
407}
408
409/*
410 * handle build REPLY_TX command notification.
411 */
412static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
413 struct iwl_device_cmd *cmd,
414 struct ieee80211_tx_info *info,
415 struct ieee80211_hdr *hdr, u8 std_id)
416{
417 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
418 __le32 tx_flags = tx_cmd->tx_flags;
419 __le16 fc = hdr->frame_control;
420
421 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
423 tx_flags |= TX_CMD_FLG_ACK_MSK;
424 if (ieee80211_is_mgmt(fc))
425 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
426 if (ieee80211_is_probe_resp(fc) &&
427 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
428 tx_flags |= TX_CMD_FLG_TSF_MSK;
429 } else {
430 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
431 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
432 }
433
434 tx_cmd->sta_id = std_id;
435 if (ieee80211_has_morefrags(fc))
436 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
437
438 if (ieee80211_is_data_qos(fc)) {
439 u8 *qc = ieee80211_get_qos_ctl(hdr);
440 tx_cmd->tid_tspec = qc[0] & 0xf;
441 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
442 } else {
443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
444 }
445
446 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
447
448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
449 if (ieee80211_is_mgmt(fc)) {
450 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
451 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
452 else
453 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
454 } else {
455 tx_cmd->timeout.pm_frame_timeout = 0;
456 }
457
458 tx_cmd->driver_txop = 0;
459 tx_cmd->tx_flags = tx_flags;
460 tx_cmd->next_frame_len = 0;
461}
462
463/*
464 * start REPLY_TX command process
465 */
466static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
467{
468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
469 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
470 struct iwl3945_tx_cmd *tx_cmd;
471 struct iwl_tx_queue *txq = NULL;
472 struct iwl_queue *q = NULL;
473 struct iwl_device_cmd *out_cmd;
474 struct iwl_cmd_meta *out_meta;
475 dma_addr_t phys_addr;
476 dma_addr_t txcmd_phys;
477 int txq_id = skb_get_queue_mapping(skb);
478 u16 len, idx, hdr_len;
479 u8 id;
480 u8 unicast;
481 u8 sta_id;
482 u8 tid = 0;
483 __le16 fc;
484 u8 wait_write_ptr = 0;
485 unsigned long flags;
486
487 spin_lock_irqsave(&priv->lock, flags);
488 if (iwl_legacy_is_rfkill(priv)) {
489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
490 goto drop_unlock;
491 }
492
493 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
494 IWL_ERR(priv, "ERROR: No TX rate available.\n");
495 goto drop_unlock;
496 }
497
498 unicast = !is_multicast_ether_addr(hdr->addr1);
499 id = 0;
500
501 fc = hdr->frame_control;
502
503#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
504 if (ieee80211_is_auth(fc))
505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
506 else if (ieee80211_is_assoc_req(fc))
507 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
508 else if (ieee80211_is_reassoc_req(fc))
509 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
510#endif
511
512 spin_unlock_irqrestore(&priv->lock, flags);
513
514 hdr_len = ieee80211_hdrlen(fc);
515
516 /* Find index into station table for destination station */
517 sta_id = iwl_legacy_sta_id_or_broadcast(
518 priv, &priv->contexts[IWL_RXON_CTX_BSS],
519 info->control.sta);
520 if (sta_id == IWL_INVALID_STATION) {
521 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
522 hdr->addr1);
523 goto drop;
524 }
525
526 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
527
528 if (ieee80211_is_data_qos(fc)) {
529 u8 *qc = ieee80211_get_qos_ctl(hdr);
530 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
531 if (unlikely(tid >= MAX_TID_COUNT))
532 goto drop;
533 }
534
535 /* Descriptor for chosen Tx queue */
536 txq = &priv->txq[txq_id];
537 q = &txq->q;
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark))
540 goto drop;
541
542 spin_lock_irqsave(&priv->lock, flags);
543
544 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
545
546 /* Set up driver data for this TFD */
547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
548 txq->txb[q->write_ptr].skb = skb;
549 txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
550
551 /* Init first empty entry in queue's array of Tx/cmd buffers */
552 out_cmd = txq->cmd[idx];
553 out_meta = &txq->meta[idx];
554 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
555 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
556 memset(tx_cmd, 0, sizeof(*tx_cmd));
557
558 /*
559 * Set up the Tx-command (not MAC!) header.
560 * Store the chosen Tx queue and TFD index within the sequence field;
561 * after Tx, uCode's Tx response will return this value so driver can
562 * locate the frame within the tx queue and do post-tx processing.
563 */
564 out_cmd->hdr.cmd = REPLY_TX;
565 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
566 INDEX_TO_SEQ(q->write_ptr)));
567
568 /* Copy MAC header from skb into command buffer */
569 memcpy(tx_cmd->hdr, hdr, hdr_len);
570
571
572 if (info->control.hw_key)
573 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
574
575 /* TODO need this for burst mode later on */
576 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
577
578 /* set is_hcca to 0; it probably will never be implemented */
579 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
580
581 /* Total # bytes to be transmitted */
582 len = (u16)skb->len;
583 tx_cmd->len = cpu_to_le16(len);
584
585 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
586 iwl_legacy_update_stats(priv, true, fc, len);
587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
589
590 if (!ieee80211_has_morefrags(hdr->frame_control)) {
591 txq->need_update = 1;
592 } else {
593 wait_write_ptr = 1;
594 txq->need_update = 0;
595 }
596
597 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
598 le16_to_cpu(out_cmd->hdr.sequence));
599 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
600 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
601 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
602 ieee80211_hdrlen(fc));
603
604 /*
605 * Use the first empty entry in this queue's command buffer array
606 * to contain the Tx command and MAC header concatenated together
607 * (payload data will be in another buffer).
608 * Size of this varies, due to varying MAC header length.
609 * If end is not dword aligned, we'll have 2 extra bytes at the end
610 * of the MAC header (device reads on dword boundaries).
611 * We'll tell device about this padding later.
612 */
613 len = sizeof(struct iwl3945_tx_cmd) +
614 sizeof(struct iwl_cmd_header) + hdr_len;
615 len = (len + 3) & ~3;
616
617 /* Physical address of this Tx command's header (not MAC header!),
618 * within command buffer array. */
619 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
620 len, PCI_DMA_TODEVICE);
621 /* we do not map meta data ... so we can safely access address to
622 * provide to unmap command*/
623 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
624 dma_unmap_len_set(out_meta, len, len);
625
626 /* Add buffer containing Tx command and MAC(!) header to TFD's
627 * first entry */
628 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
629 txcmd_phys, len, 1, 0);
630
631
632 /* Set up TFD's 2nd entry to point directly to remainder of skb,
633 * if any (802.11 null frames have no payload). */
634 len = skb->len - hdr_len;
635 if (len) {
636 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
637 len, PCI_DMA_TODEVICE);
638 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
639 phys_addr, len,
640 0, U32_PAD(len));
641 }
642
643
644 /* Tell device the write index *just past* this latest filled TFD */
645 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
646 iwl_legacy_txq_update_write_ptr(priv, txq);
647 spin_unlock_irqrestore(&priv->lock, flags);
648
649 if ((iwl_legacy_queue_space(q) < q->high_mark)
650 && priv->mac80211_registered) {
651 if (wait_write_ptr) {
652 spin_lock_irqsave(&priv->lock, flags);
653 txq->need_update = 1;
654 iwl_legacy_txq_update_write_ptr(priv, txq);
655 spin_unlock_irqrestore(&priv->lock, flags);
656 }
657
658 iwl_legacy_stop_queue(priv, txq);
659 }
660
661 return 0;
662
663drop_unlock:
664 spin_unlock_irqrestore(&priv->lock, flags);
665drop:
666 return -1;
667}
668
669static int iwl3945_get_measurement(struct iwl_priv *priv,
670 struct ieee80211_measurement_params *params,
671 u8 type)
672{
673 struct iwl_spectrum_cmd spectrum;
674 struct iwl_rx_packet *pkt;
675 struct iwl_host_cmd cmd = {
676 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
677 .data = (void *)&spectrum,
678 .flags = CMD_WANT_SKB,
679 };
680 u32 add_time = le64_to_cpu(params->start_time);
681 int rc;
682 int spectrum_resp_status;
683 int duration = le16_to_cpu(params->duration);
684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
685
686 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
687 add_time = iwl_legacy_usecs_to_beacons(priv,
688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
689 le16_to_cpu(ctx->timing.beacon_interval));
690
691 memset(&spectrum, 0, sizeof(spectrum));
692
693 spectrum.channel_count = cpu_to_le16(1);
694 spectrum.flags =
695 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
696 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
697 cmd.len = sizeof(spectrum);
698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
699
700 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
701 spectrum.start_time =
702 iwl_legacy_add_beacon_time(priv,
703 priv->_3945.last_beacon_time, add_time,
704 le16_to_cpu(ctx->timing.beacon_interval));
705 else
706 spectrum.start_time = 0;
707
708 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
709 spectrum.channels[0].channel = params->channel;
710 spectrum.channels[0].type = type;
711 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
712 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
714
715 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
716 if (rc)
717 return rc;
718
719 pkt = (struct iwl_rx_packet *)cmd.reply_page;
720 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
721 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
722 rc = -EIO;
723 }
724
725 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
726 switch (spectrum_resp_status) {
727 case 0: /* Command will be handled */
728 if (pkt->u.spectrum.id != 0xff) {
729 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
730 pkt->u.spectrum.id);
731 priv->measurement_status &= ~MEASUREMENT_READY;
732 }
733 priv->measurement_status |= MEASUREMENT_ACTIVE;
734 rc = 0;
735 break;
736
737 case 1: /* Command will not be handled */
738 rc = -EAGAIN;
739 break;
740 }
741
742 iwl_legacy_free_pages(priv, cmd.reply_page);
743
744 return rc;
745}
746
747static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
748 struct iwl_rx_mem_buffer *rxb)
749{
750 struct iwl_rx_packet *pkt = rxb_addr(rxb);
751 struct iwl_alive_resp *palive;
752 struct delayed_work *pwork;
753
754 palive = &pkt->u.alive_frame;
755
756 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
757 "0x%01X 0x%01X\n",
758 palive->is_valid, palive->ver_type,
759 palive->ver_subtype);
760
761 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
762 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
763 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
764 sizeof(struct iwl_alive_resp));
765 pwork = &priv->init_alive_start;
766 } else {
767 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
768 memcpy(&priv->card_alive, &pkt->u.alive_frame,
769 sizeof(struct iwl_alive_resp));
770 pwork = &priv->alive_start;
771 iwl3945_disable_events(priv);
772 }
773
774 /* We delay the ALIVE response by 5ms to
775 * give the HW RF Kill time to activate... */
776 if (palive->is_valid == UCODE_VALID_OK)
777 queue_delayed_work(priv->workqueue, pwork,
778 msecs_to_jiffies(5));
779 else
780 IWL_WARN(priv, "uCode did not respond OK.\n");
781}
782
783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
784 struct iwl_rx_mem_buffer *rxb)
785{
786#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
787 struct iwl_rx_packet *pkt = rxb_addr(rxb);
788#endif
789
790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
791}
792
793static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
794 struct iwl_rx_mem_buffer *rxb)
795{
796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
797 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
798#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
799 u8 rate = beacon->beacon_notify_hdr.rate;
800
801 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
802 "tsf %d %d rate %d\n",
803 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
804 beacon->beacon_notify_hdr.failure_frame,
805 le32_to_cpu(beacon->ibss_mgr_status),
806 le32_to_cpu(beacon->high_tsf),
807 le32_to_cpu(beacon->low_tsf), rate);
808#endif
809
810 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
811
812}
813
814/* Handle notification from uCode that card's power state is changing
815 * due to software, hardware, or critical temperature RFKILL */
816static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
817 struct iwl_rx_mem_buffer *rxb)
818{
819 struct iwl_rx_packet *pkt = rxb_addr(rxb);
820 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
821 unsigned long status = priv->status;
822
823 IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
824 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
825 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
826
827 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
828 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
829
830 if (flags & HW_CARD_DISABLED)
831 set_bit(STATUS_RF_KILL_HW, &priv->status);
832 else
833 clear_bit(STATUS_RF_KILL_HW, &priv->status);
834
835
836 iwl_legacy_scan_cancel(priv);
837
838 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
839 test_bit(STATUS_RF_KILL_HW, &priv->status)))
840 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
841 test_bit(STATUS_RF_KILL_HW, &priv->status));
842 else
843 wake_up(&priv->wait_command_queue);
844}
845
846/**
847 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
848 *
849 * Setup the RX handlers for each of the reply types sent from the uCode
850 * to the host.
851 *
852 * This function chains into the hardware specific files for them to setup
853 * any hardware specific handlers as well.
854 */
855static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
856{
857 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
858 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
859 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
860 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
861 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
862 iwl_legacy_rx_spectrum_measure_notif;
863 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
864 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
865 iwl_legacy_rx_pm_debug_statistics_notif;
866 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
867
868 /*
869 * The same handler is used for both the REPLY to a discrete
870 * statistics request from the host as well as for the periodic
871 * statistics notifications (after received beacons) from the uCode.
872 */
873 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
874 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
875
876 iwl_legacy_setup_rx_scan_handlers(priv);
877 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
878
879 /* Set up hardware specific Rx handlers */
880 iwl3945_hw_rx_handler_setup(priv);
881}
882
883/************************** RX-FUNCTIONS ****************************/
884/*
885 * Rx theory of operation
886 *
887 * The host allocates 32 DMA target addresses and passes the host address
888 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
889 * 0 to 31
890 *
891 * Rx Queue Indexes
892 * The host/firmware share two index registers for managing the Rx buffers.
893 *
894 * The READ index maps to the first position that the firmware may be writing
895 * to -- the driver can read up to (but not including) this position and get
896 * good data.
897 * The READ index is managed by the firmware once the card is enabled.
898 *
899 * The WRITE index maps to the last position the driver has read from -- the
900 * position preceding WRITE is the last slot the firmware can place a packet.
901 *
902 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
903 * WRITE = READ.
904 *
905 * During initialization, the host sets up the READ queue position to the first
906 * INDEX position, and WRITE to the last (READ - 1 wrapped)
907 *
908 * When the firmware places a packet in a buffer, it will advance the READ index
909 * and fire the RX interrupt. The driver can then query the READ index and
910 * process as many packets as possible, moving the WRITE index forward as it
911 * resets the Rx queue buffers with new memory.
912 *
913 * The management in the driver is as follows:
914 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
915 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
916 * to replenish the iwl->rxq->rx_free.
917 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
918 * iwl->rxq is replenished and the READ INDEX is updated (updating the
919 * 'processed' and 'read' driver indexes as well)
920 * + A received packet is processed and handed to the kernel network stack,
921 * detached from the iwl->rxq. The driver 'processed' index is updated.
922 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
923 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
924 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
925 * were enough free buffers and RX_STALLED is set it is cleared.
926 *
927 *
928 * Driver sequence:
929 *
930 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
931 * iwl3945_rx_queue_restock
932 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
933 * queue, updates firmware pointers, and updates
934 * the WRITE index. If insufficient rx_free buffers
935 * are available, schedules iwl3945_rx_replenish
936 *
937 * -- enable interrupts --
938 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
939 * READ INDEX, detaching the SKB from the pool.
940 * Moves the packet buffer from queue to rx_used.
941 * Calls iwl3945_rx_queue_restock to refill any empty
942 * slots.
943 * ...
944 *
945 */
946
947/**
948 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
949 */
950static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
951 dma_addr_t dma_addr)
952{
953 return cpu_to_le32((u32)dma_addr);
954}
955
956/**
957 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
958 *
959 * If there are slots in the RX queue that need to be restocked,
960 * and we have free pre-allocated buffers, fill the ranks as much
961 * as we can, pulling from rx_free.
962 *
963 * This moves the 'write' index forward to catch up with 'processed', and
964 * also updates the memory address in the firmware to reference the new
965 * target buffer.
966 */
967static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
968{
969 struct iwl_rx_queue *rxq = &priv->rxq;
970 struct list_head *element;
971 struct iwl_rx_mem_buffer *rxb;
972 unsigned long flags;
973 int write;
974
975 spin_lock_irqsave(&rxq->lock, flags);
976 write = rxq->write & ~0x7;
977 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
978 /* Get next free Rx buffer, remove from free list */
979 element = rxq->rx_free.next;
980 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
981 list_del(element);
982
983 /* Point to Rx buffer via next RBD in circular buffer */
984 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
985 rxq->queue[rxq->write] = rxb;
986 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
987 rxq->free_count--;
988 }
989 spin_unlock_irqrestore(&rxq->lock, flags);
990 /* If the pre-allocated buffer pool is dropping low, schedule to
991 * refill it */
992 if (rxq->free_count <= RX_LOW_WATERMARK)
993 queue_work(priv->workqueue, &priv->rx_replenish);
994
995
996 /* If we've added more space for the firmware to place data, tell it.
997 * Increment device's write pointer in multiples of 8. */
998 if ((rxq->write_actual != (rxq->write & ~0x7))
999 || (abs(rxq->write - rxq->read) > 7)) {
1000 spin_lock_irqsave(&rxq->lock, flags);
1001 rxq->need_update = 1;
1002 spin_unlock_irqrestore(&rxq->lock, flags);
1003 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1004 }
1005}
1006
1007/**
1008 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
1009 *
1010 * When moving to rx_free an SKB is allocated for the slot.
1011 *
1012 * Also restock the Rx queue via iwl3945_rx_queue_restock.
1013 * This is called as a scheduled work item (except for during initialization)
1014 */
1015static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1016{
1017 struct iwl_rx_queue *rxq = &priv->rxq;
1018 struct list_head *element;
1019 struct iwl_rx_mem_buffer *rxb;
1020 struct page *page;
1021 unsigned long flags;
1022 gfp_t gfp_mask = priority;
1023
1024 while (1) {
1025 spin_lock_irqsave(&rxq->lock, flags);
1026
1027 if (list_empty(&rxq->rx_used)) {
1028 spin_unlock_irqrestore(&rxq->lock, flags);
1029 return;
1030 }
1031 spin_unlock_irqrestore(&rxq->lock, flags);
1032
1033 if (rxq->free_count > RX_LOW_WATERMARK)
1034 gfp_mask |= __GFP_NOWARN;
1035
1036 if (priv->hw_params.rx_page_order > 0)
1037 gfp_mask |= __GFP_COMP;
1038
1039 /* Alloc a new receive buffer */
1040 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1041 if (!page) {
1042 if (net_ratelimit())
1043 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1044 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1045 net_ratelimit())
1046 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1047 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
1048 rxq->free_count);
1049 /* We don't reschedule replenish work here -- we will
1050 * call the restock method and if it still needs
1051 * more buffers it will schedule replenish */
1052 break;
1053 }
1054
1055 spin_lock_irqsave(&rxq->lock, flags);
1056 if (list_empty(&rxq->rx_used)) {
1057 spin_unlock_irqrestore(&rxq->lock, flags);
1058 __free_pages(page, priv->hw_params.rx_page_order);
1059 return;
1060 }
1061 element = rxq->rx_used.next;
1062 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1063 list_del(element);
1064 spin_unlock_irqrestore(&rxq->lock, flags);
1065
1066 rxb->page = page;
1067 /* Get physical address of RB/SKB */
1068 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1069 PAGE_SIZE << priv->hw_params.rx_page_order,
1070 PCI_DMA_FROMDEVICE);
1071
1072 spin_lock_irqsave(&rxq->lock, flags);
1073
1074 list_add_tail(&rxb->list, &rxq->rx_free);
1075 rxq->free_count++;
1076 priv->alloc_rxb_page++;
1077
1078 spin_unlock_irqrestore(&rxq->lock, flags);
1079 }
1080}
1081
1082void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1083{
1084 unsigned long flags;
1085 int i;
1086 spin_lock_irqsave(&rxq->lock, flags);
1087 INIT_LIST_HEAD(&rxq->rx_free);
1088 INIT_LIST_HEAD(&rxq->rx_used);
1089 /* Fill the rx_used queue with _all_ of the Rx buffers */
1090 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1091 /* In the reset function, these buffers may have been allocated
1092 * to an SKB, so we need to unmap and free potential storage */
1093 if (rxq->pool[i].page != NULL) {
1094 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1095 PAGE_SIZE << priv->hw_params.rx_page_order,
1096 PCI_DMA_FROMDEVICE);
1097 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1098 rxq->pool[i].page = NULL;
1099 }
1100 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1101 }
1102
1103 /* Set us so that we have processed and used all buffers, but have
1104 * not restocked the Rx queue with fresh buffers */
1105 rxq->read = rxq->write = 0;
1106 rxq->write_actual = 0;
1107 rxq->free_count = 0;
1108 spin_unlock_irqrestore(&rxq->lock, flags);
1109}
1110
1111void iwl3945_rx_replenish(void *data)
1112{
1113 struct iwl_priv *priv = data;
1114 unsigned long flags;
1115
1116 iwl3945_rx_allocate(priv, GFP_KERNEL);
1117
1118 spin_lock_irqsave(&priv->lock, flags);
1119 iwl3945_rx_queue_restock(priv);
1120 spin_unlock_irqrestore(&priv->lock, flags);
1121}
1122
1123static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1124{
1125 iwl3945_rx_allocate(priv, GFP_ATOMIC);
1126
1127 iwl3945_rx_queue_restock(priv);
1128}
1129
1130
1131/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1132 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1133 * This free routine walks the list of POOL entries and if SKB is set to
1134 * non NULL it is unmapped and freed
1135 */
1136static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1137{
1138 int i;
1139 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1140 if (rxq->pool[i].page != NULL) {
1141 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1142 PAGE_SIZE << priv->hw_params.rx_page_order,
1143 PCI_DMA_FROMDEVICE);
1144 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1145 rxq->pool[i].page = NULL;
1146 }
1147 }
1148
1149 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1150 rxq->bd_dma);
1151 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1152 rxq->rb_stts, rxq->rb_stts_dma);
1153 rxq->bd = NULL;
1154 rxq->rb_stts = NULL;
1155}
1156
1157
1158/* Convert linear signal-to-noise ratio into dB */
1159static u8 ratio2dB[100] = {
1160/* 0 1 2 3 4 5 6 7 8 9 */
1161 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1162 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1163 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1164 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1165 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1166 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1167 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1168 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1169 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1170 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1171};
1172
1173/* Calculates a relative dB value from a ratio of linear
1174 * (i.e. not dB) signal levels.
1175 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1176int iwl3945_calc_db_from_ratio(int sig_ratio)
1177{
1178 /* 1000:1 or higher just report as 60 dB */
1179 if (sig_ratio >= 1000)
1180 return 60;
1181
1182 /* 100:1 or higher, divide by 10 and use table,
1183 * add 20 dB to make up for divide by 10 */
1184 if (sig_ratio >= 100)
1185 return 20 + (int)ratio2dB[sig_ratio/10];
1186
1187 /* We shouldn't see this */
1188 if (sig_ratio < 1)
1189 return 0;
1190
1191 /* Use table for ratios 1:1 - 99:1 */
1192 return (int)ratio2dB[sig_ratio];
1193}
1194
1195/**
1196 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1197 *
1198 * Uses the priv->rx_handlers callback function array to invoke
1199 * the appropriate handlers, including command responses,
1200 * frame-received notifications, and other notifications.
1201 */
1202static void iwl3945_rx_handle(struct iwl_priv *priv)
1203{
1204 struct iwl_rx_mem_buffer *rxb;
1205 struct iwl_rx_packet *pkt;
1206 struct iwl_rx_queue *rxq = &priv->rxq;
1207 u32 r, i;
1208 int reclaim;
1209 unsigned long flags;
1210 u8 fill_rx = 0;
1211 u32 count = 8;
1212 int total_empty = 0;
1213
1214 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1215 * buffer that the driver may process (last buffer filled by ucode). */
1216 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1217 i = rxq->read;
1218
1219 /* calculate total frames need to be restock after handling RX */
1220 total_empty = r - rxq->write_actual;
1221 if (total_empty < 0)
1222 total_empty += RX_QUEUE_SIZE;
1223
1224 if (total_empty > (RX_QUEUE_SIZE / 2))
1225 fill_rx = 1;
1226 /* Rx interrupt, but nothing sent from uCode */
1227 if (i == r)
1228 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1229
1230 while (i != r) {
1231 int len;
1232
1233 rxb = rxq->queue[i];
1234
1235 /* If an RXB doesn't have a Rx queue slot associated with it,
1236 * then a bug has been introduced in the queue refilling
1237 * routines -- catch it here */
1238 BUG_ON(rxb == NULL);
1239
1240 rxq->queue[i] = NULL;
1241
1242 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1243 PAGE_SIZE << priv->hw_params.rx_page_order,
1244 PCI_DMA_FROMDEVICE);
1245 pkt = rxb_addr(rxb);
1246
1247 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1248 len += sizeof(u32); /* account for status word */
1249 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1250
1251 /* Reclaim a command buffer only if this packet is a response
1252 * to a (driver-originated) command.
1253 * If the packet (e.g. Rx frame) originated from uCode,
1254 * there is no command buffer to reclaim.
1255 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1256 * but apparently a few don't get set; catch them here. */
1257 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1258 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1259 (pkt->hdr.cmd != REPLY_TX);
1260
1261 /* Based on type of command response or notification,
1262 * handle those that need handling via function in
1263 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1264 if (priv->rx_handlers[pkt->hdr.cmd]) {
1265 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1266 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1267 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1268 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1269 } else {
1270 /* No handling needed */
1271 IWL_DEBUG_RX(priv,
1272 "r %d i %d No handler needed for %s, 0x%02x\n",
1273 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1274 pkt->hdr.cmd);
1275 }
1276
1277 /*
1278 * XXX: After here, we should always check rxb->page
1279 * against NULL before touching it or its virtual
1280 * memory (pkt). Because some rx_handler might have
1281 * already taken or freed the pages.
1282 */
1283
1284 if (reclaim) {
1285 /* Invoke any callbacks, transfer the buffer to caller,
1286 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1287 * as we reclaim the driver command queue */
1288 if (rxb->page)
1289 iwl_legacy_tx_cmd_complete(priv, rxb);
1290 else
1291 IWL_WARN(priv, "Claim null rxb?\n");
1292 }
1293
1294 /* Reuse the page if possible. For notification packets and
1295 * SKBs that fail to Rx correctly, add them back into the
1296 * rx_free list for reuse later. */
1297 spin_lock_irqsave(&rxq->lock, flags);
1298 if (rxb->page != NULL) {
1299 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1300 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1301 PCI_DMA_FROMDEVICE);
1302 list_add_tail(&rxb->list, &rxq->rx_free);
1303 rxq->free_count++;
1304 } else
1305 list_add_tail(&rxb->list, &rxq->rx_used);
1306
1307 spin_unlock_irqrestore(&rxq->lock, flags);
1308
1309 i = (i + 1) & RX_QUEUE_MASK;
1310 /* If there are a lot of unused frames,
1311 * restock the Rx queue so ucode won't assert. */
1312 if (fill_rx) {
1313 count++;
1314 if (count >= 8) {
1315 rxq->read = i;
1316 iwl3945_rx_replenish_now(priv);
1317 count = 0;
1318 }
1319 }
1320 }
1321
1322 /* Backtrack one entry */
1323 rxq->read = i;
1324 if (fill_rx)
1325 iwl3945_rx_replenish_now(priv);
1326 else
1327 iwl3945_rx_queue_restock(priv);
1328}
1329
1330/* call this function to flush any scheduled tasklet */
1331static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1332{
1333 /* wait to make sure we flush pending tasklet*/
1334 synchronize_irq(priv->pci_dev->irq);
1335 tasklet_kill(&priv->irq_tasklet);
1336}
1337
1338static const char *iwl3945_desc_lookup(int i)
1339{
1340 switch (i) {
1341 case 1:
1342 return "FAIL";
1343 case 2:
1344 return "BAD_PARAM";
1345 case 3:
1346 return "BAD_CHECKSUM";
1347 case 4:
1348 return "NMI_INTERRUPT";
1349 case 5:
1350 return "SYSASSERT";
1351 case 6:
1352 return "FATAL_ERROR";
1353 }
1354
1355 return "UNKNOWN";
1356}
1357
1358#define ERROR_START_OFFSET (1 * sizeof(u32))
1359#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1360
1361void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1362{
1363 u32 i;
1364 u32 desc, time, count, base, data1;
1365 u32 blink1, blink2, ilink1, ilink2;
1366
1367 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1368
1369 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1370 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1371 return;
1372 }
1373
1374
1375 count = iwl_legacy_read_targ_mem(priv, base);
1376
1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1378 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1379 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1380 priv->status, count);
1381 }
1382
1383 IWL_ERR(priv, "Desc Time asrtPC blink2 "
1384 "ilink1 nmiPC Line\n");
1385 for (i = ERROR_START_OFFSET;
1386 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1387 i += ERROR_ELEM_SIZE) {
1388 desc = iwl_legacy_read_targ_mem(priv, base + i);
1389 time =
1390 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1391 blink1 =
1392 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1393 blink2 =
1394 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1395 ilink1 =
1396 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1397 ilink2 =
1398 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1399 data1 =
1400 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1401
1402 IWL_ERR(priv,
1403 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1404 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1405 ilink1, ilink2, data1);
1406 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1407 0, blink1, blink2, ilink1, ilink2);
1408 }
1409}
1410
1411static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1412{
1413 u32 inta, handled = 0;
1414 u32 inta_fh;
1415 unsigned long flags;
1416#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1417 u32 inta_mask;
1418#endif
1419
1420 spin_lock_irqsave(&priv->lock, flags);
1421
1422 /* Ack/clear/reset pending uCode interrupts.
1423 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1424 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1425 inta = iwl_read32(priv, CSR_INT);
1426 iwl_write32(priv, CSR_INT, inta);
1427
1428 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1429 * Any new interrupts that happen after this, either while we're
1430 * in this tasklet, or later, will show up in next ISR/tasklet. */
1431 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1432 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1433
1434#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1435 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1436 /* just for debug */
1437 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1438 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1439 inta, inta_mask, inta_fh);
1440 }
1441#endif
1442
1443 spin_unlock_irqrestore(&priv->lock, flags);
1444
1445 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1446 * atomic, make sure that inta covers all the interrupts that
1447 * we've discovered, even if FH interrupt came in just after
1448 * reading CSR_INT. */
1449 if (inta_fh & CSR39_FH_INT_RX_MASK)
1450 inta |= CSR_INT_BIT_FH_RX;
1451 if (inta_fh & CSR39_FH_INT_TX_MASK)
1452 inta |= CSR_INT_BIT_FH_TX;
1453
1454 /* Now service all interrupt bits discovered above. */
1455 if (inta & CSR_INT_BIT_HW_ERR) {
1456 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1457
1458 /* Tell the device to stop sending interrupts */
1459 iwl_legacy_disable_interrupts(priv);
1460
1461 priv->isr_stats.hw++;
1462 iwl_legacy_irq_handle_error(priv);
1463
1464 handled |= CSR_INT_BIT_HW_ERR;
1465
1466 return;
1467 }
1468
1469#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1470 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1471 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1472 if (inta & CSR_INT_BIT_SCD) {
1473 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1474 "the frame/frames.\n");
1475 priv->isr_stats.sch++;
1476 }
1477
1478 /* Alive notification via Rx interrupt will do the real work */
1479 if (inta & CSR_INT_BIT_ALIVE) {
1480 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1481 priv->isr_stats.alive++;
1482 }
1483 }
1484#endif
1485 /* Safely ignore these bits for debug checks below */
1486 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1487
1488 /* Error detected by uCode */
1489 if (inta & CSR_INT_BIT_SW_ERR) {
1490 IWL_ERR(priv, "Microcode SW error detected. "
1491 "Restarting 0x%X.\n", inta);
1492 priv->isr_stats.sw++;
1493 iwl_legacy_irq_handle_error(priv);
1494 handled |= CSR_INT_BIT_SW_ERR;
1495 }
1496
1497 /* uCode wakes up after power-down sleep */
1498 if (inta & CSR_INT_BIT_WAKEUP) {
1499 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1500 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1501 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1502 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1503 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1504 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1505 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1506 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1507
1508 priv->isr_stats.wakeup++;
1509 handled |= CSR_INT_BIT_WAKEUP;
1510 }
1511
1512 /* All uCode command responses, including Tx command responses,
1513 * Rx "responses" (frame-received notification), and other
1514 * notifications from uCode come through here*/
1515 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1516 iwl3945_rx_handle(priv);
1517 priv->isr_stats.rx++;
1518 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1519 }
1520
1521 if (inta & CSR_INT_BIT_FH_TX) {
1522 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1523 priv->isr_stats.tx++;
1524
1525 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1526 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1527 (FH39_SRVC_CHNL), 0x0);
1528 handled |= CSR_INT_BIT_FH_TX;
1529 }
1530
1531 if (inta & ~handled) {
1532 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1533 priv->isr_stats.unhandled++;
1534 }
1535
1536 if (inta & ~priv->inta_mask) {
1537 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1538 inta & ~priv->inta_mask);
1539 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1540 }
1541
1542 /* Re-enable all interrupts */
1543 /* only Re-enable if disabled by irq */
1544 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1545 iwl_legacy_enable_interrupts(priv);
1546
1547#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1548 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1549 inta = iwl_read32(priv, CSR_INT);
1550 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1551 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1552 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1553 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1554 }
1555#endif
1556}
1557
1558static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1559 enum ieee80211_band band,
1560 u8 is_active, u8 n_probes,
1561 struct iwl3945_scan_channel *scan_ch,
1562 struct ieee80211_vif *vif)
1563{
1564 struct ieee80211_channel *chan;
1565 const struct ieee80211_supported_band *sband;
1566 const struct iwl_channel_info *ch_info;
1567 u16 passive_dwell = 0;
1568 u16 active_dwell = 0;
1569 int added, i;
1570
1571 sband = iwl_get_hw_mode(priv, band);
1572 if (!sband)
1573 return 0;
1574
1575 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1576 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1577
1578 if (passive_dwell <= active_dwell)
1579 passive_dwell = active_dwell + 1;
1580
1581 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1582 chan = priv->scan_request->channels[i];
1583
1584 if (chan->band != band)
1585 continue;
1586
1587 scan_ch->channel = chan->hw_value;
1588
1589 ch_info = iwl_legacy_get_channel_info(priv, band,
1590 scan_ch->channel);
1591 if (!iwl_legacy_is_channel_valid(ch_info)) {
1592 IWL_DEBUG_SCAN(priv,
1593 "Channel %d is INVALID for this band.\n",
1594 scan_ch->channel);
1595 continue;
1596 }
1597
1598 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1599 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1600 /* If passive , set up for auto-switch
1601 * and use long active_dwell time.
1602 */
1603 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1604 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1605 scan_ch->type = 0; /* passive */
1606 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1607 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
1608 } else {
1609 scan_ch->type = 1; /* active */
1610 }
1611
1612 /* Set direct probe bits. These may be used both for active
1613 * scan channels (probes gets sent right away),
1614 * or for passive channels (probes get se sent only after
1615 * hearing clear Rx packet).*/
1616 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
1617 if (n_probes)
1618 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1619 } else {
1620 /* uCode v1 does not allow setting direct probe bits on
1621 * passive channel. */
1622 if ((scan_ch->type & 1) && n_probes)
1623 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1624 }
1625
1626 /* Set txpower levels to defaults */
1627 scan_ch->tpc.dsp_atten = 110;
1628 /* scan_pwr_info->tpc.dsp_atten; */
1629
1630 /*scan_pwr_info->tpc.tx_gain; */
1631 if (band == IEEE80211_BAND_5GHZ)
1632 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1633 else {
1634 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1635 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1636 * power level:
1637 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1638 */
1639 }
1640
1641 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
1642 scan_ch->channel,
1643 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1644 (scan_ch->type & 1) ?
1645 active_dwell : passive_dwell);
1646
1647 scan_ch++;
1648 added++;
1649 }
1650
1651 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1652 return added;
1653}
1654
1655static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1656 struct ieee80211_rate *rates)
1657{
1658 int i;
1659
1660 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1661 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1662 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1663 rates[i].hw_value_short = i;
1664 rates[i].flags = 0;
1665 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
1666 /*
1667 * If CCK != 1M then set short preamble rate flag.
1668 */
1669 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
1670 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1671 }
1672 }
1673}
1674
1675/******************************************************************************
1676 *
1677 * uCode download functions
1678 *
1679 ******************************************************************************/
1680
1681static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1682{
1683 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1684 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1685 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1686 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1687 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1688 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1689}
1690
1691/**
1692 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
1693 * looking at all data.
1694 */
1695static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
1696{
1697 u32 val;
1698 u32 save_len = len;
1699 int rc = 0;
1700 u32 errcnt;
1701
1702 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1703
1704 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1705 IWL39_RTC_INST_LOWER_BOUND);
1706
1707 errcnt = 0;
1708 for (; len > 0; len -= sizeof(u32), image++) {
1709 /* read data comes through single port, auto-incr addr */
1710 /* NOTE: Use the debugless read so we don't flood kernel log
1711 * if IWL_DL_IO is set */
1712 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1713 if (val != le32_to_cpu(*image)) {
1714 IWL_ERR(priv, "uCode INST section is invalid at "
1715 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1716 save_len - len, val, le32_to_cpu(*image));
1717 rc = -EIO;
1718 errcnt++;
1719 if (errcnt >= 20)
1720 break;
1721 }
1722 }
1723
1724
1725 if (!errcnt)
1726 IWL_DEBUG_INFO(priv,
1727 "ucode image in INSTRUCTION memory is good\n");
1728
1729 return rc;
1730}
1731
1732
1733/**
1734 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1735 * using sample data 100 bytes apart. If these sample points are good,
1736 * it's a pretty good bet that everything between them is good, too.
1737 */
1738static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1739{
1740 u32 val;
1741 int rc = 0;
1742 u32 errcnt = 0;
1743 u32 i;
1744
1745 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1746
1747 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1748 /* read data comes through single port, auto-incr addr */
1749 /* NOTE: Use the debugless read so we don't flood kernel log
1750 * if IWL_DL_IO is set */
1751 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1752 i + IWL39_RTC_INST_LOWER_BOUND);
1753 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1754 if (val != le32_to_cpu(*image)) {
1755#if 0 /* Enable this if you want to see details */
1756 IWL_ERR(priv, "uCode INST section is invalid at "
1757 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1758 i, val, *image);
1759#endif
1760 rc = -EIO;
1761 errcnt++;
1762 if (errcnt >= 3)
1763 break;
1764 }
1765 }
1766
1767 return rc;
1768}
1769
1770
1771/**
1772 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
1773 * and verify its contents
1774 */
1775static int iwl3945_verify_ucode(struct iwl_priv *priv)
1776{
1777 __le32 *image;
1778 u32 len;
1779 int rc = 0;
1780
1781 /* Try bootstrap */
1782 image = (__le32 *)priv->ucode_boot.v_addr;
1783 len = priv->ucode_boot.len;
1784 rc = iwl3945_verify_inst_sparse(priv, image, len);
1785 if (rc == 0) {
1786 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
1787 return 0;
1788 }
1789
1790 /* Try initialize */
1791 image = (__le32 *)priv->ucode_init.v_addr;
1792 len = priv->ucode_init.len;
1793 rc = iwl3945_verify_inst_sparse(priv, image, len);
1794 if (rc == 0) {
1795 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
1796 return 0;
1797 }
1798
1799 /* Try runtime/protocol */
1800 image = (__le32 *)priv->ucode_code.v_addr;
1801 len = priv->ucode_code.len;
1802 rc = iwl3945_verify_inst_sparse(priv, image, len);
1803 if (rc == 0) {
1804 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
1805 return 0;
1806 }
1807
1808 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1809
1810 /* Since nothing seems to match, show first several data entries in
1811 * instruction SRAM, so maybe visual inspection will give a clue.
1812 * Selection of bootstrap image (vs. other images) is arbitrary. */
1813 image = (__le32 *)priv->ucode_boot.v_addr;
1814 len = priv->ucode_boot.len;
1815 rc = iwl3945_verify_inst_full(priv, image, len);
1816
1817 return rc;
1818}
1819
1820static void iwl3945_nic_start(struct iwl_priv *priv)
1821{
1822 /* Remove all resets to allow NIC to operate */
1823 iwl_write32(priv, CSR_RESET, 0);
1824}
1825
1826#define IWL3945_UCODE_GET(item) \
1827static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
1828{ \
1829 return le32_to_cpu(ucode->v1.item); \
1830}
1831
1832static u32 iwl3945_ucode_get_header_size(u32 api_ver)
1833{
1834 return 24;
1835}
1836
1837static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
1838{
1839 return (u8 *) ucode->v1.data;
1840}
1841
1842IWL3945_UCODE_GET(inst_size);
1843IWL3945_UCODE_GET(data_size);
1844IWL3945_UCODE_GET(init_size);
1845IWL3945_UCODE_GET(init_data_size);
1846IWL3945_UCODE_GET(boot_size);
1847
1848/**
1849 * iwl3945_read_ucode - Read uCode images from disk file.
1850 *
1851 * Copy into buffers for card to fetch via bus-mastering
1852 */
1853static int iwl3945_read_ucode(struct iwl_priv *priv)
1854{
1855 const struct iwl_ucode_header *ucode;
1856 int ret = -EINVAL, index;
1857 const struct firmware *ucode_raw;
1858 /* firmware file name contains uCode/driver compatibility version */
1859 const char *name_pre = priv->cfg->fw_name_pre;
1860 const unsigned int api_max = priv->cfg->ucode_api_max;
1861 const unsigned int api_min = priv->cfg->ucode_api_min;
1862 char buf[25];
1863 u8 *src;
1864 size_t len;
1865 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1866
1867 /* Ask kernel firmware_class module to get the boot firmware off disk.
1868 * request_firmware() is synchronous, file is in memory on return. */
1869 for (index = api_max; index >= api_min; index--) {
1870 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
1871 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
1872 if (ret < 0) {
1873 IWL_ERR(priv, "%s firmware file req failed: %d\n",
1874 buf, ret);
1875 if (ret == -ENOENT)
1876 continue;
1877 else
1878 goto error;
1879 } else {
1880 if (index < api_max)
1881 IWL_ERR(priv, "Loaded firmware %s, "
1882 "which is deprecated. "
1883 " Please use API v%u instead.\n",
1884 buf, api_max);
1885 IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
1886 "(%zd bytes) from disk\n",
1887 buf, ucode_raw->size);
1888 break;
1889 }
1890 }
1891
1892 if (ret < 0)
1893 goto error;
1894
1895 /* Make sure that we got at least our header! */
1896 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
1897 IWL_ERR(priv, "File size way too small!\n");
1898 ret = -EINVAL;
1899 goto err_release;
1900 }
1901
1902 /* Data from ucode file: header followed by uCode images */
1903 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1904
1905 priv->ucode_ver = le32_to_cpu(ucode->ver);
1906 api_ver = IWL_UCODE_API(priv->ucode_ver);
1907 inst_size = iwl3945_ucode_get_inst_size(ucode);
1908 data_size = iwl3945_ucode_get_data_size(ucode);
1909 init_size = iwl3945_ucode_get_init_size(ucode);
1910 init_data_size = iwl3945_ucode_get_init_data_size(ucode);
1911 boot_size = iwl3945_ucode_get_boot_size(ucode);
1912 src = iwl3945_ucode_get_data(ucode);
1913
1914 /* api_ver should match the api version forming part of the
1915 * firmware filename ... but we don't check for that and only rely
1916 * on the API version read from firmware header from here on forward */
1917
1918 if (api_ver < api_min || api_ver > api_max) {
1919 IWL_ERR(priv, "Driver unable to support your firmware API. "
1920 "Driver supports v%u, firmware is v%u.\n",
1921 api_max, api_ver);
1922 priv->ucode_ver = 0;
1923 ret = -EINVAL;
1924 goto err_release;
1925 }
1926 if (api_ver != api_max)
1927 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
1928 "got %u. New firmware can be obtained "
1929 "from http://www.intellinuxwireless.org.\n",
1930 api_max, api_ver);
1931
1932 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1933 IWL_UCODE_MAJOR(priv->ucode_ver),
1934 IWL_UCODE_MINOR(priv->ucode_ver),
1935 IWL_UCODE_API(priv->ucode_ver),
1936 IWL_UCODE_SERIAL(priv->ucode_ver));
1937
1938 snprintf(priv->hw->wiphy->fw_version,
1939 sizeof(priv->hw->wiphy->fw_version),
1940 "%u.%u.%u.%u",
1941 IWL_UCODE_MAJOR(priv->ucode_ver),
1942 IWL_UCODE_MINOR(priv->ucode_ver),
1943 IWL_UCODE_API(priv->ucode_ver),
1944 IWL_UCODE_SERIAL(priv->ucode_ver));
1945
1946 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1947 priv->ucode_ver);
1948 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
1949 inst_size);
1950 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
1951 data_size);
1952 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
1953 init_size);
1954 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
1955 init_data_size);
1956 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
1957 boot_size);
1958
1959
1960 /* Verify size of file vs. image size info in file's header */
1961 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
1962 inst_size + data_size + init_size +
1963 init_data_size + boot_size) {
1964
1965 IWL_DEBUG_INFO(priv,
1966 "uCode file size %zd does not match expected size\n",
1967 ucode_raw->size);
1968 ret = -EINVAL;
1969 goto err_release;
1970 }
1971
1972 /* Verify that uCode images will fit in card's SRAM */
1973 if (inst_size > IWL39_MAX_INST_SIZE) {
1974 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
1975 inst_size);
1976 ret = -EINVAL;
1977 goto err_release;
1978 }
1979
1980 if (data_size > IWL39_MAX_DATA_SIZE) {
1981 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
1982 data_size);
1983 ret = -EINVAL;
1984 goto err_release;
1985 }
1986 if (init_size > IWL39_MAX_INST_SIZE) {
1987 IWL_DEBUG_INFO(priv,
1988 "uCode init instr len %d too large to fit in\n",
1989 init_size);
1990 ret = -EINVAL;
1991 goto err_release;
1992 }
1993 if (init_data_size > IWL39_MAX_DATA_SIZE) {
1994 IWL_DEBUG_INFO(priv,
1995 "uCode init data len %d too large to fit in\n",
1996 init_data_size);
1997 ret = -EINVAL;
1998 goto err_release;
1999 }
2000 if (boot_size > IWL39_MAX_BSM_SIZE) {
2001 IWL_DEBUG_INFO(priv,
2002 "uCode boot instr len %d too large to fit in\n",
2003 boot_size);
2004 ret = -EINVAL;
2005 goto err_release;
2006 }
2007
2008 /* Allocate ucode buffers for card's bus-master loading ... */
2009
2010 /* Runtime instructions and 2 copies of data:
2011 * 1) unmodified from disk
2012 * 2) backup cache for save/restore during power-downs */
2013 priv->ucode_code.len = inst_size;
2014 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2015
2016 priv->ucode_data.len = data_size;
2017 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2018
2019 priv->ucode_data_backup.len = data_size;
2020 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2021
2022 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2023 !priv->ucode_data_backup.v_addr)
2024 goto err_pci_alloc;
2025
2026 /* Initialization instructions and data */
2027 if (init_size && init_data_size) {
2028 priv->ucode_init.len = init_size;
2029 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2030
2031 priv->ucode_init_data.len = init_data_size;
2032 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2033
2034 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2035 goto err_pci_alloc;
2036 }
2037
2038 /* Bootstrap (instructions only, no data) */
2039 if (boot_size) {
2040 priv->ucode_boot.len = boot_size;
2041 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2042
2043 if (!priv->ucode_boot.v_addr)
2044 goto err_pci_alloc;
2045 }
2046
2047 /* Copy images into buffers for card's bus-master reads ... */
2048
2049 /* Runtime instructions (first block of data in file) */
2050 len = inst_size;
2051 IWL_DEBUG_INFO(priv,
2052 "Copying (but not loading) uCode instr len %zd\n", len);
2053 memcpy(priv->ucode_code.v_addr, src, len);
2054 src += len;
2055
2056 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2057 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
2058
2059 /* Runtime data (2nd block)
2060 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
2061 len = data_size;
2062 IWL_DEBUG_INFO(priv,
2063 "Copying (but not loading) uCode data len %zd\n", len);
2064 memcpy(priv->ucode_data.v_addr, src, len);
2065 memcpy(priv->ucode_data_backup.v_addr, src, len);
2066 src += len;
2067
2068 /* Initialization instructions (3rd block) */
2069 if (init_size) {
2070 len = init_size;
2071 IWL_DEBUG_INFO(priv,
2072 "Copying (but not loading) init instr len %zd\n", len);
2073 memcpy(priv->ucode_init.v_addr, src, len);
2074 src += len;
2075 }
2076
2077 /* Initialization data (4th block) */
2078 if (init_data_size) {
2079 len = init_data_size;
2080 IWL_DEBUG_INFO(priv,
2081 "Copying (but not loading) init data len %zd\n", len);
2082 memcpy(priv->ucode_init_data.v_addr, src, len);
2083 src += len;
2084 }
2085
2086 /* Bootstrap instructions (5th block) */
2087 len = boot_size;
2088 IWL_DEBUG_INFO(priv,
2089 "Copying (but not loading) boot instr len %zd\n", len);
2090 memcpy(priv->ucode_boot.v_addr, src, len);
2091
2092 /* We have our copies now, allow OS release its copies */
2093 release_firmware(ucode_raw);
2094 return 0;
2095
2096 err_pci_alloc:
2097 IWL_ERR(priv, "failed to allocate pci memory\n");
2098 ret = -ENOMEM;
2099 iwl3945_dealloc_ucode_pci(priv);
2100
2101 err_release:
2102 release_firmware(ucode_raw);
2103
2104 error:
2105 return ret;
2106}
2107
2108
2109/**
2110 * iwl3945_set_ucode_ptrs - Set uCode address location
2111 *
2112 * Tell initialization uCode where to find runtime uCode.
2113 *
2114 * BSM registers initially contain pointers to initialization uCode.
2115 * We need to replace them to load runtime uCode inst and data,
2116 * and to save runtime data when powering down.
2117 */
2118static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2119{
2120 dma_addr_t pinst;
2121 dma_addr_t pdata;
2122
2123 /* bits 31:0 for 3945 */
2124 pinst = priv->ucode_code.p_addr;
2125 pdata = priv->ucode_data_backup.p_addr;
2126
2127 /* Tell bootstrap uCode where to find image to load */
2128 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2129 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2130 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2131 priv->ucode_data.len);
2132
2133 /* Inst byte count must be last to set up, bit 31 signals uCode
2134 * that all new ptr/size info is in place */
2135 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2136 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2137
2138 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
2139
2140 return 0;
2141}
2142
2143/**
2144 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
2145 *
2146 * Called after REPLY_ALIVE notification received from "initialize" uCode.
2147 *
2148 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2149 */
2150static void iwl3945_init_alive_start(struct iwl_priv *priv)
2151{
2152 /* Check alive response for "valid" sign from uCode */
2153 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
2154 /* We had an error bringing up the hardware, so take it
2155 * all the way back down so we can try again */
2156 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
2157 goto restart;
2158 }
2159
2160 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2161 * This is a paranoid check, because we would not have gotten the
2162 * "initialize" alive if code weren't properly loaded. */
2163 if (iwl3945_verify_ucode(priv)) {
2164 /* Runtime instruction load was bad;
2165 * take it all the way back down so we can try again */
2166 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
2167 goto restart;
2168 }
2169
2170 /* Send pointers to protocol/runtime uCode image ... init code will
2171 * load and launch runtime uCode, which will send us another "Alive"
2172 * notification. */
2173 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
2174 if (iwl3945_set_ucode_ptrs(priv)) {
2175 /* Runtime instruction load won't happen;
2176 * take it all the way back down so we can try again */
2177 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
2178 goto restart;
2179 }
2180 return;
2181
2182 restart:
2183 queue_work(priv->workqueue, &priv->restart);
2184}
2185
2186/**
2187 * iwl3945_alive_start - called after REPLY_ALIVE notification received
2188 * from protocol/runtime uCode (initialization uCode's
2189 * Alive gets handled by iwl3945_init_alive_start()).
2190 */
2191static void iwl3945_alive_start(struct iwl_priv *priv)
2192{
2193 int thermal_spin = 0;
2194 u32 rfkill;
2195 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2196
2197 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2198
2199 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2200 /* We had an error bringing up the hardware, so take it
2201 * all the way back down so we can try again */
2202 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2203 goto restart;
2204 }
2205
2206 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2207 * This is a paranoid check, because we would not have gotten the
2208 * "runtime" alive if code weren't properly loaded. */
2209 if (iwl3945_verify_ucode(priv)) {
2210 /* Runtime instruction load was bad;
2211 * take it all the way back down so we can try again */
2212 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2213 goto restart;
2214 }
2215
2216 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2217 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2218
2219 if (rfkill & 0x1) {
2220 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2221 /* if RFKILL is not on, then wait for thermal
2222 * sensor in adapter to kick in */
2223 while (iwl3945_hw_get_temperature(priv) == 0) {
2224 thermal_spin++;
2225 udelay(10);
2226 }
2227
2228 if (thermal_spin)
2229 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
2230 thermal_spin * 10);
2231 } else
2232 set_bit(STATUS_RF_KILL_HW, &priv->status);
2233
2234 /* After the ALIVE response, we can send commands to 3945 uCode */
2235 set_bit(STATUS_ALIVE, &priv->status);
2236
2237 /* Enable watchdog to monitor the driver tx queues */
2238 iwl_legacy_setup_watchdog(priv);
2239
2240 if (iwl_legacy_is_rfkill(priv))
2241 return;
2242
2243 ieee80211_wake_queues(priv->hw);
2244
2245 priv->active_rate = IWL_RATES_MASK_3945;
2246
2247 iwl_legacy_power_update_mode(priv, true);
2248
2249 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2250 struct iwl3945_rxon_cmd *active_rxon =
2251 (struct iwl3945_rxon_cmd *)(&ctx->active);
2252
2253 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2254 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2255 } else {
2256 /* Initialize our rx_config data */
2257 iwl_legacy_connection_init_rx_config(priv, ctx);
2258 }
2259
2260 /* Configure Bluetooth device coexistence support */
2261 iwl_legacy_send_bt_config(priv);
2262
2263 set_bit(STATUS_READY, &priv->status);
2264
2265 /* Configure the adapter for unassociated operation */
2266 iwl3945_commit_rxon(priv, ctx);
2267
2268 iwl3945_reg_txpower_periodic(priv);
2269
2270 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2271 wake_up(&priv->wait_command_queue);
2272
2273 return;
2274
2275 restart:
2276 queue_work(priv->workqueue, &priv->restart);
2277}
2278
2279static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2280
2281static void __iwl3945_down(struct iwl_priv *priv)
2282{
2283 unsigned long flags;
2284 int exit_pending;
2285
2286 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2287
2288 iwl_legacy_scan_cancel_timeout(priv, 200);
2289
2290 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2291
2292 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2293 * to prevent rearm timer */
2294 del_timer_sync(&priv->watchdog);
2295
2296 /* Station information will now be cleared in device */
2297 iwl_legacy_clear_ucode_stations(priv, NULL);
2298 iwl_legacy_dealloc_bcast_stations(priv);
2299 iwl_legacy_clear_driver_stations(priv);
2300
2301 /* Unblock any waiting calls */
2302 wake_up_all(&priv->wait_command_queue);
2303
2304 /* Wipe out the EXIT_PENDING status bit if we are not actually
2305 * exiting the module */
2306 if (!exit_pending)
2307 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2308
2309 /* stop and reset the on-board processor */
2310 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2311
2312 /* tell the device to stop sending interrupts */
2313 spin_lock_irqsave(&priv->lock, flags);
2314 iwl_legacy_disable_interrupts(priv);
2315 spin_unlock_irqrestore(&priv->lock, flags);
2316 iwl3945_synchronize_irq(priv);
2317
2318 if (priv->mac80211_registered)
2319 ieee80211_stop_queues(priv->hw);
2320
2321 /* If we have not previously called iwl3945_init() then
2322 * clear all bits but the RF Kill bits and return */
2323 if (!iwl_legacy_is_init(priv)) {
2324 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2325 STATUS_RF_KILL_HW |
2326 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2327 STATUS_GEO_CONFIGURED |
2328 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2329 STATUS_EXIT_PENDING;
2330 goto exit;
2331 }
2332
2333 /* ...otherwise clear out all the status bits but the RF Kill
2334 * bit and continue taking the NIC down. */
2335 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2336 STATUS_RF_KILL_HW |
2337 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2338 STATUS_GEO_CONFIGURED |
2339 test_bit(STATUS_FW_ERROR, &priv->status) <<
2340 STATUS_FW_ERROR |
2341 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2342 STATUS_EXIT_PENDING;
2343
2344 iwl3945_hw_txq_ctx_stop(priv);
2345 iwl3945_hw_rxq_stop(priv);
2346
2347 /* Power-down device's busmaster DMA clocks */
2348 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2349 udelay(5);
2350
2351 /* Stop the device, and put it in low power state */
2352 iwl_legacy_apm_stop(priv);
2353
2354 exit:
2355 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2356
2357 if (priv->beacon_skb)
2358 dev_kfree_skb(priv->beacon_skb);
2359 priv->beacon_skb = NULL;
2360
2361 /* clear out any free frames */
2362 iwl3945_clear_free_frames(priv);
2363}
2364
2365static void iwl3945_down(struct iwl_priv *priv)
2366{
2367 mutex_lock(&priv->mutex);
2368 __iwl3945_down(priv);
2369 mutex_unlock(&priv->mutex);
2370
2371 iwl3945_cancel_deferred_work(priv);
2372}
2373
2374#define MAX_HW_RESTARTS 5
2375
2376static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2377{
2378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2379 unsigned long flags;
2380 u8 sta_id;
2381
2382 spin_lock_irqsave(&priv->sta_lock, flags);
2383 sta_id = iwl_legacy_prep_station(priv, ctx,
2384 iwlegacy_bcast_addr, false, NULL);
2385 if (sta_id == IWL_INVALID_STATION) {
2386 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2387 spin_unlock_irqrestore(&priv->sta_lock, flags);
2388
2389 return -EINVAL;
2390 }
2391
2392 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
2393 priv->stations[sta_id].used |= IWL_STA_BCAST;
2394 spin_unlock_irqrestore(&priv->sta_lock, flags);
2395
2396 return 0;
2397}
2398
2399static int __iwl3945_up(struct iwl_priv *priv)
2400{
2401 int rc, i;
2402
2403 rc = iwl3945_alloc_bcast_station(priv);
2404 if (rc)
2405 return rc;
2406
2407 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2408 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2409 return -EIO;
2410 }
2411
2412 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2413 IWL_ERR(priv, "ucode not available for device bring up\n");
2414 return -EIO;
2415 }
2416
2417 /* If platform's RF_KILL switch is NOT set to KILL */
2418 if (iwl_read32(priv, CSR_GP_CNTRL) &
2419 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2420 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2421 else {
2422 set_bit(STATUS_RF_KILL_HW, &priv->status);
2423 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2424 return -ENODEV;
2425 }
2426
2427 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2428
2429 rc = iwl3945_hw_nic_init(priv);
2430 if (rc) {
2431 IWL_ERR(priv, "Unable to int nic\n");
2432 return rc;
2433 }
2434
2435 /* make sure rfkill handshake bits are cleared */
2436 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2437 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2438 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2439
2440 /* clear (again), then enable host interrupts */
2441 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2442 iwl_legacy_enable_interrupts(priv);
2443
2444 /* really make sure rfkill handshake bits are cleared */
2445 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2446 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2447
2448 /* Copy original ucode data image from disk into backup cache.
2449 * This will be used to initialize the on-board processor's
2450 * data SRAM for a clean start when the runtime program first loads. */
2451 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2452 priv->ucode_data.len);
2453
2454 /* We return success when we resume from suspend and rf_kill is on. */
2455 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
2456 return 0;
2457
2458 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2459
2460 /* load bootstrap state machine,
2461 * load bootstrap program into processor's memory,
2462 * prepare to load the "initialize" uCode */
2463 rc = priv->cfg->ops->lib->load_ucode(priv);
2464
2465 if (rc) {
2466 IWL_ERR(priv,
2467 "Unable to set up bootstrap uCode: %d\n", rc);
2468 continue;
2469 }
2470
2471 /* start card; "initialize" will load runtime ucode */
2472 iwl3945_nic_start(priv);
2473
2474 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2475
2476 return 0;
2477 }
2478
2479 set_bit(STATUS_EXIT_PENDING, &priv->status);
2480 __iwl3945_down(priv);
2481 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2482
2483 /* tried to restart and config the device for as long as our
2484 * patience could withstand */
2485 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2486 return -EIO;
2487}
2488
2489
2490/*****************************************************************************
2491 *
2492 * Workqueue callbacks
2493 *
2494 *****************************************************************************/
2495
2496static void iwl3945_bg_init_alive_start(struct work_struct *data)
2497{
2498 struct iwl_priv *priv =
2499 container_of(data, struct iwl_priv, init_alive_start.work);
2500
2501 mutex_lock(&priv->mutex);
2502 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2503 goto out;
2504
2505 iwl3945_init_alive_start(priv);
2506out:
2507 mutex_unlock(&priv->mutex);
2508}
2509
2510static void iwl3945_bg_alive_start(struct work_struct *data)
2511{
2512 struct iwl_priv *priv =
2513 container_of(data, struct iwl_priv, alive_start.work);
2514
2515 mutex_lock(&priv->mutex);
2516 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2517 goto out;
2518
2519 iwl3945_alive_start(priv);
2520out:
2521 mutex_unlock(&priv->mutex);
2522}
2523
2524/*
2525 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2526 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2527 * *is* readable even when device has been SW_RESET into low power mode
2528 * (e.g. during RF KILL).
2529 */
2530static void iwl3945_rfkill_poll(struct work_struct *data)
2531{
2532 struct iwl_priv *priv =
2533 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2534 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2535 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2536 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2537
2538 if (new_rfkill != old_rfkill) {
2539 if (new_rfkill)
2540 set_bit(STATUS_RF_KILL_HW, &priv->status);
2541 else
2542 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2543
2544 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2545
2546 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2547 new_rfkill ? "disable radio" : "enable radio");
2548 }
2549
2550 /* Keep this running, even if radio now enabled. This will be
2551 * cancelled in mac_start() if system decides to start again */
2552 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2553 round_jiffies_relative(2 * HZ));
2554
2555}
2556
2557int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2558{
2559 struct iwl_host_cmd cmd = {
2560 .id = REPLY_SCAN_CMD,
2561 .len = sizeof(struct iwl3945_scan_cmd),
2562 .flags = CMD_SIZE_HUGE,
2563 };
2564 struct iwl3945_scan_cmd *scan;
2565 u8 n_probes = 0;
2566 enum ieee80211_band band;
2567 bool is_active = false;
2568 int ret;
2569 u16 len;
2570
2571 lockdep_assert_held(&priv->mutex);
2572
2573 if (!priv->scan_cmd) {
2574 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2575 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2576 if (!priv->scan_cmd) {
2577 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2578 return -ENOMEM;
2579 }
2580 }
2581 scan = priv->scan_cmd;
2582 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2583
2584 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2585 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2586
2587 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2588 u16 interval;
2589 u32 extra;
2590 u32 suspend_time = 100;
2591 u32 scan_suspend_time = 100;
2592
2593 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2594
2595 interval = vif->bss_conf.beacon_int;
2596
2597 scan->suspend_time = 0;
2598 scan->max_out_time = cpu_to_le32(200 * 1024);
2599 if (!interval)
2600 interval = suspend_time;
2601 /*
2602 * suspend time format:
2603 * 0-19: beacon interval in usec (time before exec.)
2604 * 20-23: 0
2605 * 24-31: number of beacons (suspend between channels)
2606 */
2607
2608 extra = (suspend_time / interval) << 24;
2609 scan_suspend_time = 0xFF0FFFFF &
2610 (extra | ((suspend_time % interval) * 1024));
2611
2612 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2613 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
2614 scan_suspend_time, interval);
2615 }
2616
2617 if (priv->scan_request->n_ssids) {
2618 int i, p = 0;
2619 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2620 for (i = 0; i < priv->scan_request->n_ssids; i++) {
2621 /* always does wildcard anyway */
2622 if (!priv->scan_request->ssids[i].ssid_len)
2623 continue;
2624 scan->direct_scan[p].id = WLAN_EID_SSID;
2625 scan->direct_scan[p].len =
2626 priv->scan_request->ssids[i].ssid_len;
2627 memcpy(scan->direct_scan[p].ssid,
2628 priv->scan_request->ssids[i].ssid,
2629 priv->scan_request->ssids[i].ssid_len);
2630 n_probes++;
2631 p++;
2632 }
2633 is_active = true;
2634 } else
2635 IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
2636
2637 /* We don't build a direct scan probe request; the uCode will do
2638 * that based on the direct_mask added to each channel entry */
2639 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2640 scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2641 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2642
2643 /* flags + rate selection */
2644
2645 switch (priv->scan_band) {
2646 case IEEE80211_BAND_2GHZ:
2647 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2648 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2649 band = IEEE80211_BAND_2GHZ;
2650 break;
2651 case IEEE80211_BAND_5GHZ:
2652 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2653 band = IEEE80211_BAND_5GHZ;
2654 break;
2655 default:
2656 IWL_WARN(priv, "Invalid scan band\n");
2657 return -EIO;
2658 }
2659
2660 /*
2661 * If active scaning is requested but a certain channel
2662 * is marked passive, we can do active scanning if we
2663 * detect transmissions.
2664 */
2665 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2666 IWL_GOOD_CRC_TH_DISABLED;
2667
2668 len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
2669 vif->addr, priv->scan_request->ie,
2670 priv->scan_request->ie_len,
2671 IWL_MAX_SCAN_SIZE - sizeof(*scan));
2672 scan->tx_cmd.len = cpu_to_le16(len);
2673
2674 /* select Rx antennas */
2675 scan->flags |= iwl3945_get_antenna_flags(priv);
2676
2677 scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2678 (void *)&scan->data[len], vif);
2679 if (scan->channel_count == 0) {
2680 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
2681 return -EIO;
2682 }
2683
2684 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
2685 scan->channel_count * sizeof(struct iwl3945_scan_channel);
2686 cmd.data = scan;
2687 scan->len = cpu_to_le16(cmd.len);
2688
2689 set_bit(STATUS_SCAN_HW, &priv->status);
2690 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2691 if (ret)
2692 clear_bit(STATUS_SCAN_HW, &priv->status);
2693 return ret;
2694}
2695
2696void iwl3945_post_scan(struct iwl_priv *priv)
2697{
2698 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2699
2700 /*
2701 * Since setting the RXON may have been deferred while
2702 * performing the scan, fire one off if needed
2703 */
2704 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2705 iwl3945_commit_rxon(priv, ctx);
2706}
2707
2708static void iwl3945_bg_restart(struct work_struct *data)
2709{
2710 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2711
2712 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2713 return;
2714
2715 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2716 struct iwl_rxon_context *ctx;
2717 mutex_lock(&priv->mutex);
2718 for_each_context(priv, ctx)
2719 ctx->vif = NULL;
2720 priv->is_open = 0;
2721 mutex_unlock(&priv->mutex);
2722 iwl3945_down(priv);
2723 ieee80211_restart_hw(priv->hw);
2724 } else {
2725 iwl3945_down(priv);
2726
2727 mutex_lock(&priv->mutex);
2728 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2729 mutex_unlock(&priv->mutex);
2730 return;
2731 }
2732
2733 __iwl3945_up(priv);
2734 mutex_unlock(&priv->mutex);
2735 }
2736}
2737
2738static void iwl3945_bg_rx_replenish(struct work_struct *data)
2739{
2740 struct iwl_priv *priv =
2741 container_of(data, struct iwl_priv, rx_replenish);
2742
2743 mutex_lock(&priv->mutex);
2744 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2745 goto out;
2746
2747 iwl3945_rx_replenish(priv);
2748out:
2749 mutex_unlock(&priv->mutex);
2750}
2751
2752void iwl3945_post_associate(struct iwl_priv *priv)
2753{
2754 int rc = 0;
2755 struct ieee80211_conf *conf = NULL;
2756 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2757
2758 if (!ctx->vif || !priv->is_open)
2759 return;
2760
2761 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2762 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
2763
2764 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2765 return;
2766
2767 iwl_legacy_scan_cancel_timeout(priv, 200);
2768
2769 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
2770
2771 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2772 iwl3945_commit_rxon(priv, ctx);
2773
2774 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2775 if (rc)
2776 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2777 "Attempting to continue.\n");
2778
2779 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2780
2781 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
2782
2783 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2784 ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
2785
2786 if (ctx->vif->bss_conf.use_short_preamble)
2787 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2788 else
2789 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2790
2791 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2792 if (ctx->vif->bss_conf.use_short_slot)
2793 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2794 else
2795 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2796 }
2797
2798 iwl3945_commit_rxon(priv, ctx);
2799
2800 switch (ctx->vif->type) {
2801 case NL80211_IFTYPE_STATION:
2802 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
2803 break;
2804 case NL80211_IFTYPE_ADHOC:
2805 iwl3945_send_beacon_cmd(priv);
2806 break;
2807 default:
2808 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2809 __func__, ctx->vif->type);
2810 break;
2811 }
2812}
2813
2814/*****************************************************************************
2815 *
2816 * mac80211 entry point functions
2817 *
2818 *****************************************************************************/
2819
2820#define UCODE_READY_TIMEOUT (2 * HZ)
2821
2822static int iwl3945_mac_start(struct ieee80211_hw *hw)
2823{
2824 struct iwl_priv *priv = hw->priv;
2825 int ret;
2826
2827 IWL_DEBUG_MAC80211(priv, "enter\n");
2828
2829 /* we should be verifying the device is ready to be opened */
2830 mutex_lock(&priv->mutex);
2831
2832 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2833 * ucode filename and max sizes are card-specific. */
2834
2835 if (!priv->ucode_code.len) {
2836 ret = iwl3945_read_ucode(priv);
2837 if (ret) {
2838 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
2839 mutex_unlock(&priv->mutex);
2840 goto out_release_irq;
2841 }
2842 }
2843
2844 ret = __iwl3945_up(priv);
2845
2846 mutex_unlock(&priv->mutex);
2847
2848 if (ret)
2849 goto out_release_irq;
2850
2851 IWL_DEBUG_INFO(priv, "Start UP work.\n");
2852
2853 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2854 * mac80211 will not be run successfully. */
2855 ret = wait_event_timeout(priv->wait_command_queue,
2856 test_bit(STATUS_READY, &priv->status),
2857 UCODE_READY_TIMEOUT);
2858 if (!ret) {
2859 if (!test_bit(STATUS_READY, &priv->status)) {
2860 IWL_ERR(priv,
2861 "Wait for START_ALIVE timeout after %dms.\n",
2862 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2863 ret = -ETIMEDOUT;
2864 goto out_release_irq;
2865 }
2866 }
2867
2868 /* ucode is running and will send rfkill notifications,
2869 * no need to poll the killswitch state anymore */
2870 cancel_delayed_work(&priv->_3945.rfkill_poll);
2871
2872 priv->is_open = 1;
2873 IWL_DEBUG_MAC80211(priv, "leave\n");
2874 return 0;
2875
2876out_release_irq:
2877 priv->is_open = 0;
2878 IWL_DEBUG_MAC80211(priv, "leave - failed\n");
2879 return ret;
2880}
2881
2882static void iwl3945_mac_stop(struct ieee80211_hw *hw)
2883{
2884 struct iwl_priv *priv = hw->priv;
2885
2886 IWL_DEBUG_MAC80211(priv, "enter\n");
2887
2888 if (!priv->is_open) {
2889 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
2890 return;
2891 }
2892
2893 priv->is_open = 0;
2894
2895 iwl3945_down(priv);
2896
2897 flush_workqueue(priv->workqueue);
2898
2899 /* start polling the killswitch state again */
2900 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2901 round_jiffies_relative(2 * HZ));
2902
2903 IWL_DEBUG_MAC80211(priv, "leave\n");
2904}
2905
2906static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2907{
2908 struct iwl_priv *priv = hw->priv;
2909
2910 IWL_DEBUG_MAC80211(priv, "enter\n");
2911
2912 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2913 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2914
2915 if (iwl3945_tx_skb(priv, skb))
2916 dev_kfree_skb_any(skb);
2917
2918 IWL_DEBUG_MAC80211(priv, "leave\n");
2919}
2920
2921void iwl3945_config_ap(struct iwl_priv *priv)
2922{
2923 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2924 struct ieee80211_vif *vif = ctx->vif;
2925 int rc = 0;
2926
2927 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2928 return;
2929
2930 /* The following should be done only at AP bring up */
2931 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
2932
2933 /* RXON - unassoc (to set timing command) */
2934 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2935 iwl3945_commit_rxon(priv, ctx);
2936
2937 /* RXON Timing */
2938 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2939 if (rc)
2940 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2941 "Attempting to continue.\n");
2942
2943 ctx->staging.assoc_id = 0;
2944
2945 if (vif->bss_conf.use_short_preamble)
2946 ctx->staging.flags |=
2947 RXON_FLG_SHORT_PREAMBLE_MSK;
2948 else
2949 ctx->staging.flags &=
2950 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2951
2952 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2953 if (vif->bss_conf.use_short_slot)
2954 ctx->staging.flags |=
2955 RXON_FLG_SHORT_SLOT_MSK;
2956 else
2957 ctx->staging.flags &=
2958 ~RXON_FLG_SHORT_SLOT_MSK;
2959 }
2960 /* restore RXON assoc */
2961 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2962 iwl3945_commit_rxon(priv, ctx);
2963 }
2964 iwl3945_send_beacon_cmd(priv);
2965}
2966
2967static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2968 struct ieee80211_vif *vif,
2969 struct ieee80211_sta *sta,
2970 struct ieee80211_key_conf *key)
2971{
2972 struct iwl_priv *priv = hw->priv;
2973 int ret = 0;
2974 u8 sta_id = IWL_INVALID_STATION;
2975 u8 static_key;
2976
2977 IWL_DEBUG_MAC80211(priv, "enter\n");
2978
2979 if (iwl3945_mod_params.sw_crypto) {
2980 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2981 return -EOPNOTSUPP;
2982 }
2983
2984 /*
2985 * To support IBSS RSN, don't program group keys in IBSS, the
2986 * hardware will then not attempt to decrypt the frames.
2987 */
2988 if (vif->type == NL80211_IFTYPE_ADHOC &&
2989 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2990 return -EOPNOTSUPP;
2991
2992 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
2993
2994 if (!static_key) {
2995 sta_id = iwl_legacy_sta_id_or_broadcast(
2996 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
2997 if (sta_id == IWL_INVALID_STATION)
2998 return -EINVAL;
2999 }
3000
3001 mutex_lock(&priv->mutex);
3002 iwl_legacy_scan_cancel_timeout(priv, 100);
3003
3004 switch (cmd) {
3005 case SET_KEY:
3006 if (static_key)
3007 ret = iwl3945_set_static_key(priv, key);
3008 else
3009 ret = iwl3945_set_dynamic_key(priv, key, sta_id);
3010 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3011 break;
3012 case DISABLE_KEY:
3013 if (static_key)
3014 ret = iwl3945_remove_static_key(priv);
3015 else
3016 ret = iwl3945_clear_sta_key_info(priv, sta_id);
3017 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3018 break;
3019 default:
3020 ret = -EINVAL;
3021 }
3022
3023 mutex_unlock(&priv->mutex);
3024 IWL_DEBUG_MAC80211(priv, "leave\n");
3025
3026 return ret;
3027}
3028
3029static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3030 struct ieee80211_vif *vif,
3031 struct ieee80211_sta *sta)
3032{
3033 struct iwl_priv *priv = hw->priv;
3034 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3035 int ret;
3036 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3037 u8 sta_id;
3038
3039 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3040 sta->addr);
3041 mutex_lock(&priv->mutex);
3042 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3043 sta->addr);
3044 sta_priv->common.sta_id = IWL_INVALID_STATION;
3045
3046
3047 ret = iwl_legacy_add_station_common(priv,
3048 &priv->contexts[IWL_RXON_CTX_BSS],
3049 sta->addr, is_ap, sta, &sta_id);
3050 if (ret) {
3051 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3052 sta->addr, ret);
3053 /* Should we return success if return code is EEXIST ? */
3054 mutex_unlock(&priv->mutex);
3055 return ret;
3056 }
3057
3058 sta_priv->common.sta_id = sta_id;
3059
3060 /* Initialize rate scaling */
3061 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3062 sta->addr);
3063 iwl3945_rs_rate_init(priv, sta, sta_id);
3064 mutex_unlock(&priv->mutex);
3065
3066 return 0;
3067}
3068
3069static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3070 unsigned int changed_flags,
3071 unsigned int *total_flags,
3072 u64 multicast)
3073{
3074 struct iwl_priv *priv = hw->priv;
3075 __le32 filter_or = 0, filter_nand = 0;
3076 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3077
3078#define CHK(test, flag) do { \
3079 if (*total_flags & (test)) \
3080 filter_or |= (flag); \
3081 else \
3082 filter_nand |= (flag); \
3083 } while (0)
3084
3085 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3086 changed_flags, *total_flags);
3087
3088 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3089 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3090 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3091
3092#undef CHK
3093
3094 mutex_lock(&priv->mutex);
3095
3096 ctx->staging.filter_flags &= ~filter_nand;
3097 ctx->staging.filter_flags |= filter_or;
3098
3099 /*
3100 * Not committing directly because hardware can perform a scan,
3101 * but even if hw is ready, committing here breaks for some reason,
3102 * we'll eventually commit the filter flags change anyway.
3103 */
3104
3105 mutex_unlock(&priv->mutex);
3106
3107 /*
3108 * Receiving all multicast frames is always enabled by the
3109 * default flags setup in iwl_legacy_connection_init_rx_config()
3110 * since we currently do not support programming multicast
3111 * filters into the device.
3112 */
3113 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3114 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3115}
3116
3117
3118/*****************************************************************************
3119 *
3120 * sysfs attributes
3121 *
3122 *****************************************************************************/
3123
3124#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3125
3126/*
3127 * The following adds a new attribute to the sysfs representation
3128 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3129 * used for controlling the debug level.
3130 *
3131 * See the level definitions in iwl for details.
3132 *
3133 * The debug_level being managed using sysfs below is a per device debug
3134 * level that is used instead of the global debug level if it (the per
3135 * device debug level) is set.
3136 */
3137static ssize_t iwl3945_show_debug_level(struct device *d,
3138 struct device_attribute *attr, char *buf)
3139{
3140 struct iwl_priv *priv = dev_get_drvdata(d);
3141 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3142}
3143static ssize_t iwl3945_store_debug_level(struct device *d,
3144 struct device_attribute *attr,
3145 const char *buf, size_t count)
3146{
3147 struct iwl_priv *priv = dev_get_drvdata(d);
3148 unsigned long val;
3149 int ret;
3150
3151 ret = strict_strtoul(buf, 0, &val);
3152 if (ret)
3153 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3154 else {
3155 priv->debug_level = val;
3156 if (iwl_legacy_alloc_traffic_mem(priv))
3157 IWL_ERR(priv,
3158 "Not enough memory to generate traffic log\n");
3159 }
3160 return strnlen(buf, count);
3161}
3162
3163static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3164 iwl3945_show_debug_level, iwl3945_store_debug_level);
3165
3166#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
3167
3168static ssize_t iwl3945_show_temperature(struct device *d,
3169 struct device_attribute *attr, char *buf)
3170{
3171 struct iwl_priv *priv = dev_get_drvdata(d);
3172
3173 if (!iwl_legacy_is_alive(priv))
3174 return -EAGAIN;
3175
3176 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3177}
3178
3179static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3180
3181static ssize_t iwl3945_show_tx_power(struct device *d,
3182 struct device_attribute *attr, char *buf)
3183{
3184 struct iwl_priv *priv = dev_get_drvdata(d);
3185 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3186}
3187
3188static ssize_t iwl3945_store_tx_power(struct device *d,
3189 struct device_attribute *attr,
3190 const char *buf, size_t count)
3191{
3192 struct iwl_priv *priv = dev_get_drvdata(d);
3193 char *p = (char *)buf;
3194 u32 val;
3195
3196 val = simple_strtoul(p, &p, 10);
3197 if (p == buf)
3198 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
3199 else
3200 iwl3945_hw_reg_set_txpower(priv, val);
3201
3202 return count;
3203}
3204
3205static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3206
3207static ssize_t iwl3945_show_flags(struct device *d,
3208 struct device_attribute *attr, char *buf)
3209{
3210 struct iwl_priv *priv = dev_get_drvdata(d);
3211 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3212
3213 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3214}
3215
3216static ssize_t iwl3945_store_flags(struct device *d,
3217 struct device_attribute *attr,
3218 const char *buf, size_t count)
3219{
3220 struct iwl_priv *priv = dev_get_drvdata(d);
3221 u32 flags = simple_strtoul(buf, NULL, 0);
3222 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3223
3224 mutex_lock(&priv->mutex);
3225 if (le32_to_cpu(ctx->staging.flags) != flags) {
3226 /* Cancel any currently running scans... */
3227 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3228 IWL_WARN(priv, "Could not cancel scan.\n");
3229 else {
3230 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3231 flags);
3232 ctx->staging.flags = cpu_to_le32(flags);
3233 iwl3945_commit_rxon(priv, ctx);
3234 }
3235 }
3236 mutex_unlock(&priv->mutex);
3237
3238 return count;
3239}
3240
3241static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3242
3243static ssize_t iwl3945_show_filter_flags(struct device *d,
3244 struct device_attribute *attr, char *buf)
3245{
3246 struct iwl_priv *priv = dev_get_drvdata(d);
3247 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3248
3249 return sprintf(buf, "0x%04X\n",
3250 le32_to_cpu(ctx->active.filter_flags));
3251}
3252
3253static ssize_t iwl3945_store_filter_flags(struct device *d,
3254 struct device_attribute *attr,
3255 const char *buf, size_t count)
3256{
3257 struct iwl_priv *priv = dev_get_drvdata(d);
3258 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3259 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3260
3261 mutex_lock(&priv->mutex);
3262 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3263 /* Cancel any currently running scans... */
3264 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3265 IWL_WARN(priv, "Could not cancel scan.\n");
3266 else {
3267 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3268 "0x%04X\n", filter_flags);
3269 ctx->staging.filter_flags =
3270 cpu_to_le32(filter_flags);
3271 iwl3945_commit_rxon(priv, ctx);
3272 }
3273 }
3274 mutex_unlock(&priv->mutex);
3275
3276 return count;
3277}
3278
3279static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3280 iwl3945_store_filter_flags);
3281
3282static ssize_t iwl3945_show_measurement(struct device *d,
3283 struct device_attribute *attr, char *buf)
3284{
3285 struct iwl_priv *priv = dev_get_drvdata(d);
3286 struct iwl_spectrum_notification measure_report;
3287 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3288 u8 *data = (u8 *)&measure_report;
3289 unsigned long flags;
3290
3291 spin_lock_irqsave(&priv->lock, flags);
3292 if (!(priv->measurement_status & MEASUREMENT_READY)) {
3293 spin_unlock_irqrestore(&priv->lock, flags);
3294 return 0;
3295 }
3296 memcpy(&measure_report, &priv->measure_report, size);
3297 priv->measurement_status = 0;
3298 spin_unlock_irqrestore(&priv->lock, flags);
3299
3300 while (size && (PAGE_SIZE - len)) {
3301 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3302 PAGE_SIZE - len, 1);
3303 len = strlen(buf);
3304 if (PAGE_SIZE - len)
3305 buf[len++] = '\n';
3306
3307 ofs += 16;
3308 size -= min(size, 16U);
3309 }
3310
3311 return len;
3312}
3313
3314static ssize_t iwl3945_store_measurement(struct device *d,
3315 struct device_attribute *attr,
3316 const char *buf, size_t count)
3317{
3318 struct iwl_priv *priv = dev_get_drvdata(d);
3319 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3320 struct ieee80211_measurement_params params = {
3321 .channel = le16_to_cpu(ctx->active.channel),
3322 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3323 .duration = cpu_to_le16(1),
3324 };
3325 u8 type = IWL_MEASURE_BASIC;
3326 u8 buffer[32];
3327 u8 channel;
3328
3329 if (count) {
3330 char *p = buffer;
3331 strncpy(buffer, buf, min(sizeof(buffer), count));
3332 channel = simple_strtoul(p, NULL, 0);
3333 if (channel)
3334 params.channel = channel;
3335
3336 p = buffer;
3337 while (*p && *p != ' ')
3338 p++;
3339 if (*p)
3340 type = simple_strtoul(p + 1, NULL, 0);
3341 }
3342
3343 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
3344 "channel %d (for '%s')\n", type, params.channel, buf);
3345 iwl3945_get_measurement(priv, &params, type);
3346
3347 return count;
3348}
3349
3350static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3351 iwl3945_show_measurement, iwl3945_store_measurement);
3352
3353static ssize_t iwl3945_store_retry_rate(struct device *d,
3354 struct device_attribute *attr,
3355 const char *buf, size_t count)
3356{
3357 struct iwl_priv *priv = dev_get_drvdata(d);
3358
3359 priv->retry_rate = simple_strtoul(buf, NULL, 0);
3360 if (priv->retry_rate <= 0)
3361 priv->retry_rate = 1;
3362
3363 return count;
3364}
3365
3366static ssize_t iwl3945_show_retry_rate(struct device *d,
3367 struct device_attribute *attr, char *buf)
3368{
3369 struct iwl_priv *priv = dev_get_drvdata(d);
3370 return sprintf(buf, "%d", priv->retry_rate);
3371}
3372
3373static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3374 iwl3945_store_retry_rate);
3375
3376
3377static ssize_t iwl3945_show_channels(struct device *d,
3378 struct device_attribute *attr, char *buf)
3379{
3380 /* all this shit doesn't belong into sysfs anyway */
3381 return 0;
3382}
3383
3384static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3385
3386static ssize_t iwl3945_show_antenna(struct device *d,
3387 struct device_attribute *attr, char *buf)
3388{
3389 struct iwl_priv *priv = dev_get_drvdata(d);
3390
3391 if (!iwl_legacy_is_alive(priv))
3392 return -EAGAIN;
3393
3394 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3395}
3396
3397static ssize_t iwl3945_store_antenna(struct device *d,
3398 struct device_attribute *attr,
3399 const char *buf, size_t count)
3400{
3401 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
3402 int ant;
3403
3404 if (count == 0)
3405 return 0;
3406
3407 if (sscanf(buf, "%1i", &ant) != 1) {
3408 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
3409 return count;
3410 }
3411
3412 if ((ant >= 0) && (ant <= 2)) {
3413 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
3414 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
3415 } else
3416 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
3417
3418
3419 return count;
3420}
3421
3422static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3423
3424static ssize_t iwl3945_show_status(struct device *d,
3425 struct device_attribute *attr, char *buf)
3426{
3427 struct iwl_priv *priv = dev_get_drvdata(d);
3428 if (!iwl_legacy_is_alive(priv))
3429 return -EAGAIN;
3430 return sprintf(buf, "0x%08x\n", (int)priv->status);
3431}
3432
3433static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3434
3435static ssize_t iwl3945_dump_error_log(struct device *d,
3436 struct device_attribute *attr,
3437 const char *buf, size_t count)
3438{
3439 struct iwl_priv *priv = dev_get_drvdata(d);
3440 char *p = (char *)buf;
3441
3442 if (p[0] == '1')
3443 iwl3945_dump_nic_error_log(priv);
3444
3445 return strnlen(buf, count);
3446}
3447
3448static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3449
3450/*****************************************************************************
3451 *
3452 * driver setup and tear down
3453 *
3454 *****************************************************************************/
3455
3456static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3457{
3458 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3459
3460 init_waitqueue_head(&priv->wait_command_queue);
3461
3462 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3463 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3464 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3465 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3466 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3467
3468 iwl_legacy_setup_scan_deferred_work(priv);
3469
3470 iwl3945_hw_setup_deferred_work(priv);
3471
3472 init_timer(&priv->watchdog);
3473 priv->watchdog.data = (unsigned long)priv;
3474 priv->watchdog.function = iwl_legacy_bg_watchdog;
3475
3476 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3477 iwl3945_irq_tasklet, (unsigned long)priv);
3478}
3479
3480static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3481{
3482 iwl3945_hw_cancel_deferred_work(priv);
3483
3484 cancel_delayed_work_sync(&priv->init_alive_start);
3485 cancel_delayed_work(&priv->alive_start);
3486
3487 iwl_legacy_cancel_scan_deferred_work(priv);
3488}
3489
3490static struct attribute *iwl3945_sysfs_entries[] = {
3491 &dev_attr_antenna.attr,
3492 &dev_attr_channels.attr,
3493 &dev_attr_dump_errors.attr,
3494 &dev_attr_flags.attr,
3495 &dev_attr_filter_flags.attr,
3496 &dev_attr_measurement.attr,
3497 &dev_attr_retry_rate.attr,
3498 &dev_attr_status.attr,
3499 &dev_attr_temperature.attr,
3500 &dev_attr_tx_power.attr,
3501#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3502 &dev_attr_debug_level.attr,
3503#endif
3504 NULL
3505};
3506
3507static struct attribute_group iwl3945_attribute_group = {
3508 .name = NULL, /* put in device directory */
3509 .attrs = iwl3945_sysfs_entries,
3510};
3511
3512struct ieee80211_ops iwl3945_hw_ops = {
3513 .tx = iwl3945_mac_tx,
3514 .start = iwl3945_mac_start,
3515 .stop = iwl3945_mac_stop,
3516 .add_interface = iwl_legacy_mac_add_interface,
3517 .remove_interface = iwl_legacy_mac_remove_interface,
3518 .change_interface = iwl_legacy_mac_change_interface,
3519 .config = iwl_legacy_mac_config,
3520 .configure_filter = iwl3945_configure_filter,
3521 .set_key = iwl3945_mac_set_key,
3522 .conf_tx = iwl_legacy_mac_conf_tx,
3523 .reset_tsf = iwl_legacy_mac_reset_tsf,
3524 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3525 .hw_scan = iwl_legacy_mac_hw_scan,
3526 .sta_add = iwl3945_mac_sta_add,
3527 .sta_remove = iwl_legacy_mac_sta_remove,
3528 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3529};
3530
3531static int iwl3945_init_drv(struct iwl_priv *priv)
3532{
3533 int ret;
3534 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3535
3536 priv->retry_rate = 1;
3537 priv->beacon_skb = NULL;
3538
3539 spin_lock_init(&priv->sta_lock);
3540 spin_lock_init(&priv->hcmd_lock);
3541
3542 INIT_LIST_HEAD(&priv->free_frames);
3543
3544 mutex_init(&priv->mutex);
3545
3546 priv->ieee_channels = NULL;
3547 priv->ieee_rates = NULL;
3548 priv->band = IEEE80211_BAND_2GHZ;
3549
3550 priv->iw_mode = NL80211_IFTYPE_STATION;
3551 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3552
3553 /* initialize force reset */
3554 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3555
3556 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3557 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3558 eeprom->version);
3559 ret = -EINVAL;
3560 goto err;
3561 }
3562 ret = iwl_legacy_init_channel_map(priv);
3563 if (ret) {
3564 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3565 goto err;
3566 }
3567
3568 /* Set up txpower settings in driver for all channels */
3569 if (iwl3945_txpower_set_from_eeprom(priv)) {
3570 ret = -EIO;
3571 goto err_free_channel_map;
3572 }
3573
3574 ret = iwl_legacy_init_geos(priv);
3575 if (ret) {
3576 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3577 goto err_free_channel_map;
3578 }
3579 iwl3945_init_hw_rates(priv, priv->ieee_rates);
3580
3581 return 0;
3582
3583err_free_channel_map:
3584 iwl_legacy_free_channel_map(priv);
3585err:
3586 return ret;
3587}
3588
3589#define IWL3945_MAX_PROBE_REQUEST 200
3590
3591static int iwl3945_setup_mac(struct iwl_priv *priv)
3592{
3593 int ret;
3594 struct ieee80211_hw *hw = priv->hw;
3595
3596 hw->rate_control_algorithm = "iwl-3945-rs";
3597 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
3598 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3599
3600 /* Tell mac80211 our characteristics */
3601 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3602 IEEE80211_HW_SPECTRUM_MGMT;
3603
3604 hw->wiphy->interface_modes =
3605 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3606
3607 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3608 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3609 WIPHY_FLAG_IBSS_RSN;
3610
3611 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3612 /* we create the 802.11 header and a zero-length SSID element */
3613 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
3614
3615 /* Default value; 4 EDCA QOS priorities */
3616 hw->queues = 4;
3617
3618 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
3619 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3620 &priv->bands[IEEE80211_BAND_2GHZ];
3621
3622 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
3623 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3624 &priv->bands[IEEE80211_BAND_5GHZ];
3625
3626 iwl_legacy_leds_init(priv);
3627
3628 ret = ieee80211_register_hw(priv->hw);
3629 if (ret) {
3630 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
3631 return ret;
3632 }
3633 priv->mac80211_registered = 1;
3634
3635 return 0;
3636}
3637
3638static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3639{
3640 int err = 0, i;
3641 struct iwl_priv *priv;
3642 struct ieee80211_hw *hw;
3643 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3644 struct iwl3945_eeprom *eeprom;
3645 unsigned long flags;
3646
3647 /***********************
3648 * 1. Allocating HW data
3649 * ********************/
3650
3651 /* mac80211 allocates memory for this device instance, including
3652 * space for this driver's private structure */
3653 hw = iwl_legacy_alloc_all(cfg);
3654 if (hw == NULL) {
3655 pr_err("Can not allocate network device\n");
3656 err = -ENOMEM;
3657 goto out;
3658 }
3659 priv = hw->priv;
3660 SET_IEEE80211_DEV(hw, &pdev->dev);
3661
3662 priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
3663
3664 /* 3945 has only one valid context */
3665 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3666
3667 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3668 priv->contexts[i].ctxid = i;
3669
3670 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3671 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3672 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3673 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3674 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3675 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3676 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3677 BIT(NL80211_IFTYPE_STATION) |
3678 BIT(NL80211_IFTYPE_ADHOC);
3679 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3680 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3681 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3682
3683 /*
3684 * Disabling hardware scan means that mac80211 will perform scans
3685 * "the hard way", rather than using device's scan.
3686 */
3687 if (iwl3945_mod_params.disable_hw_scan) {
3688 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
3689 iwl3945_hw_ops.hw_scan = NULL;
3690 }
3691
3692 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3693 priv->cfg = cfg;
3694 priv->pci_dev = pdev;
3695 priv->inta_mask = CSR_INI_SET_MASK;
3696
3697 if (iwl_legacy_alloc_traffic_mem(priv))
3698 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3699
3700 /***************************
3701 * 2. Initializing PCI bus
3702 * *************************/
3703 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3704 PCIE_LINK_STATE_CLKPM);
3705
3706 if (pci_enable_device(pdev)) {
3707 err = -ENODEV;
3708 goto out_ieee80211_free_hw;
3709 }
3710
3711 pci_set_master(pdev);
3712
3713 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3714 if (!err)
3715 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3716 if (err) {
3717 IWL_WARN(priv, "No suitable DMA available.\n");
3718 goto out_pci_disable_device;
3719 }
3720
3721 pci_set_drvdata(pdev, priv);
3722 err = pci_request_regions(pdev, DRV_NAME);
3723 if (err)
3724 goto out_pci_disable_device;
3725
3726 /***********************
3727 * 3. Read REV Register
3728 * ********************/
3729 priv->hw_base = pci_iomap(pdev, 0, 0);
3730 if (!priv->hw_base) {
3731 err = -ENODEV;
3732 goto out_pci_release_regions;
3733 }
3734
3735 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3736 (unsigned long long) pci_resource_len(pdev, 0));
3737 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3738
3739 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3740 * PCI Tx retries from interfering with C3 CPU state */
3741 pci_write_config_byte(pdev, 0x41, 0x00);
3742
3743 /* these spin locks will be used in apm_ops.init and EEPROM access
3744 * we should init now
3745 */
3746 spin_lock_init(&priv->reg_lock);
3747 spin_lock_init(&priv->lock);
3748
3749 /*
3750 * stop and reset the on-board processor just in case it is in a
3751 * strange state ... like being left stranded by a primary kernel
3752 * and this is now the kdump kernel trying to start up
3753 */
3754 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3755
3756 /***********************
3757 * 4. Read EEPROM
3758 * ********************/
3759
3760 /* Read the EEPROM */
3761 err = iwl_legacy_eeprom_init(priv);
3762 if (err) {
3763 IWL_ERR(priv, "Unable to init EEPROM\n");
3764 goto out_iounmap;
3765 }
3766 /* MAC Address location in EEPROM same for 3945/4965 */
3767 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3768 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
3769 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
3770
3771 /***********************
3772 * 5. Setup HW Constants
3773 * ********************/
3774 /* Device-specific setup */
3775 if (iwl3945_hw_set_hw_params(priv)) {
3776 IWL_ERR(priv, "failed to set hw settings\n");
3777 goto out_eeprom_free;
3778 }
3779
3780 /***********************
3781 * 6. Setup priv
3782 * ********************/
3783
3784 err = iwl3945_init_drv(priv);
3785 if (err) {
3786 IWL_ERR(priv, "initializing driver failed\n");
3787 goto out_unset_hw_params;
3788 }
3789
3790 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
3791 priv->cfg->name);
3792
3793 /***********************
3794 * 7. Setup Services
3795 * ********************/
3796
3797 spin_lock_irqsave(&priv->lock, flags);
3798 iwl_legacy_disable_interrupts(priv);
3799 spin_unlock_irqrestore(&priv->lock, flags);
3800
3801 pci_enable_msi(priv->pci_dev);
3802
3803 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3804 IRQF_SHARED, DRV_NAME, priv);
3805 if (err) {
3806 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3807 goto out_disable_msi;
3808 }
3809
3810 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3811 if (err) {
3812 IWL_ERR(priv, "failed to create sysfs device attributes\n");
3813 goto out_release_irq;
3814 }
3815
3816 iwl_legacy_set_rxon_channel(priv,
3817 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
3818 &priv->contexts[IWL_RXON_CTX_BSS]);
3819 iwl3945_setup_deferred_work(priv);
3820 iwl3945_setup_rx_handlers(priv);
3821 iwl_legacy_power_initialize(priv);
3822
3823 /*********************************
3824 * 8. Setup and Register mac80211
3825 * *******************************/
3826
3827 iwl_legacy_enable_interrupts(priv);
3828
3829 err = iwl3945_setup_mac(priv);
3830 if (err)
3831 goto out_remove_sysfs;
3832
3833 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
3834 if (err)
3835 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
3836
3837 /* Start monitoring the killswitch */
3838 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3839 2 * HZ);
3840
3841 return 0;
3842
3843 out_remove_sysfs:
3844 destroy_workqueue(priv->workqueue);
3845 priv->workqueue = NULL;
3846 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3847 out_release_irq:
3848 free_irq(priv->pci_dev->irq, priv);
3849 out_disable_msi:
3850 pci_disable_msi(priv->pci_dev);
3851 iwl_legacy_free_geos(priv);
3852 iwl_legacy_free_channel_map(priv);
3853 out_unset_hw_params:
3854 iwl3945_unset_hw_params(priv);
3855 out_eeprom_free:
3856 iwl_legacy_eeprom_free(priv);
3857 out_iounmap:
3858 pci_iounmap(pdev, priv->hw_base);
3859 out_pci_release_regions:
3860 pci_release_regions(pdev);
3861 out_pci_disable_device:
3862 pci_set_drvdata(pdev, NULL);
3863 pci_disable_device(pdev);
3864 out_ieee80211_free_hw:
3865 iwl_legacy_free_traffic_mem(priv);
3866 ieee80211_free_hw(priv->hw);
3867 out:
3868 return err;
3869}
3870
3871static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
3872{
3873 struct iwl_priv *priv = pci_get_drvdata(pdev);
3874 unsigned long flags;
3875
3876 if (!priv)
3877 return;
3878
3879 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3880
3881 iwl_legacy_dbgfs_unregister(priv);
3882
3883 set_bit(STATUS_EXIT_PENDING, &priv->status);
3884
3885 iwl_legacy_leds_exit(priv);
3886
3887 if (priv->mac80211_registered) {
3888 ieee80211_unregister_hw(priv->hw);
3889 priv->mac80211_registered = 0;
3890 } else {
3891 iwl3945_down(priv);
3892 }
3893
3894 /*
3895 * Make sure device is reset to low power before unloading driver.
3896 * This may be redundant with iwl_down(), but there are paths to
3897 * run iwl_down() without calling apm_ops.stop(), and there are
3898 * paths to avoid running iwl_down() at all before leaving driver.
3899 * This (inexpensive) call *makes sure* device is reset.
3900 */
3901 iwl_legacy_apm_stop(priv);
3902
3903 /* make sure we flush any pending irq or
3904 * tasklet for the driver
3905 */
3906 spin_lock_irqsave(&priv->lock, flags);
3907 iwl_legacy_disable_interrupts(priv);
3908 spin_unlock_irqrestore(&priv->lock, flags);
3909
3910 iwl3945_synchronize_irq(priv);
3911
3912 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3913
3914 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
3915
3916 iwl3945_dealloc_ucode_pci(priv);
3917
3918 if (priv->rxq.bd)
3919 iwl3945_rx_queue_free(priv, &priv->rxq);
3920 iwl3945_hw_txq_ctx_free(priv);
3921
3922 iwl3945_unset_hw_params(priv);
3923
3924 /*netif_stop_queue(dev); */
3925 flush_workqueue(priv->workqueue);
3926
3927 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
3928 * priv->workqueue... so we can't take down the workqueue
3929 * until now... */
3930 destroy_workqueue(priv->workqueue);
3931 priv->workqueue = NULL;
3932 iwl_legacy_free_traffic_mem(priv);
3933
3934 free_irq(pdev->irq, priv);
3935 pci_disable_msi(pdev);
3936
3937 pci_iounmap(pdev, priv->hw_base);
3938 pci_release_regions(pdev);
3939 pci_disable_device(pdev);
3940 pci_set_drvdata(pdev, NULL);
3941
3942 iwl_legacy_free_channel_map(priv);
3943 iwl_legacy_free_geos(priv);
3944 kfree(priv->scan_cmd);
3945 if (priv->beacon_skb)
3946 dev_kfree_skb(priv->beacon_skb);
3947
3948 ieee80211_free_hw(priv->hw);
3949}
3950
3951
3952/*****************************************************************************
3953 *
3954 * driver and module entry point
3955 *
3956 *****************************************************************************/
3957
3958static struct pci_driver iwl3945_driver = {
3959 .name = DRV_NAME,
3960 .id_table = iwl3945_hw_card_ids,
3961 .probe = iwl3945_pci_probe,
3962 .remove = __devexit_p(iwl3945_pci_remove),
3963 .driver.pm = IWL_LEGACY_PM_OPS,
3964};
3965
3966static int __init iwl3945_init(void)
3967{
3968
3969 int ret;
3970 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3971 pr_info(DRV_COPYRIGHT "\n");
3972
3973 ret = iwl3945_rate_control_register();
3974 if (ret) {
3975 pr_err("Unable to register rate control algorithm: %d\n", ret);
3976 return ret;
3977 }
3978
3979 ret = pci_register_driver(&iwl3945_driver);
3980 if (ret) {
3981 pr_err("Unable to initialize PCI module\n");
3982 goto error_register;
3983 }
3984
3985 return ret;
3986
3987error_register:
3988 iwl3945_rate_control_unregister();
3989 return ret;
3990}
3991
3992static void __exit iwl3945_exit(void)
3993{
3994 pci_unregister_driver(&iwl3945_driver);
3995 iwl3945_rate_control_unregister();
3996}
3997
3998MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
3999
4000module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4001MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4002module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4003MODULE_PARM_DESC(swcrypto,
4004 "using software crypto (default 1 [software])");
4005module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4006 int, S_IRUGO);
4007MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
4008#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4009module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4010MODULE_PARM_DESC(debug, "debug output mask");
4011#endif
4012module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4013MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4014
4015module_exit(iwl3945_exit);
4016module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644
index d2fba9eae153..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ /dev/null
@@ -1,3281 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "iwl-eeprom.h"
54#include "iwl-dev.h"
55#include "iwl-core.h"
56#include "iwl-io.h"
57#include "iwl-helpers.h"
58#include "iwl-sta.h"
59#include "iwl-4965-calib.h"
60#include "iwl-4965.h"
61#include "iwl-4965-led.h"
62
63
64/******************************************************************************
65 *
66 * module boiler plate
67 *
68 ******************************************************************************/
69
70/*
71 * module name, copyright, version, etc.
72 */
73#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
74
75#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
76#define VD "d"
77#else
78#define VD
79#endif
80
81#define DRV_VERSION IWLWIFI_VERSION VD
82
83
84MODULE_DESCRIPTION(DRV_DESCRIPTION);
85MODULE_VERSION(DRV_VERSION);
86MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87MODULE_LICENSE("GPL");
88MODULE_ALIAS("iwl4965");
89
90void iwl4965_update_chain_flags(struct iwl_priv *priv)
91{
92 struct iwl_rxon_context *ctx;
93
94 if (priv->cfg->ops->hcmd->set_rxon_chain) {
95 for_each_context(priv, ctx) {
96 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
97 if (ctx->active.rx_chain != ctx->staging.rx_chain)
98 iwl_legacy_commit_rxon(priv, ctx);
99 }
100 }
101}
102
103static void iwl4965_clear_free_frames(struct iwl_priv *priv)
104{
105 struct list_head *element;
106
107 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
108 priv->frames_count);
109
110 while (!list_empty(&priv->free_frames)) {
111 element = priv->free_frames.next;
112 list_del(element);
113 kfree(list_entry(element, struct iwl_frame, list));
114 priv->frames_count--;
115 }
116
117 if (priv->frames_count) {
118 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
119 priv->frames_count);
120 priv->frames_count = 0;
121 }
122}
123
124static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
125{
126 struct iwl_frame *frame;
127 struct list_head *element;
128 if (list_empty(&priv->free_frames)) {
129 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
130 if (!frame) {
131 IWL_ERR(priv, "Could not allocate frame!\n");
132 return NULL;
133 }
134
135 priv->frames_count++;
136 return frame;
137 }
138
139 element = priv->free_frames.next;
140 list_del(element);
141 return list_entry(element, struct iwl_frame, list);
142}
143
144static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
145{
146 memset(frame, 0, sizeof(*frame));
147 list_add(&frame->list, &priv->free_frames);
148}
149
150static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
151 struct ieee80211_hdr *hdr,
152 int left)
153{
154 lockdep_assert_held(&priv->mutex);
155
156 if (!priv->beacon_skb)
157 return 0;
158
159 if (priv->beacon_skb->len > left)
160 return 0;
161
162 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
163
164 return priv->beacon_skb->len;
165}
166
167/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
168static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
169 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
170 u8 *beacon, u32 frame_size)
171{
172 u16 tim_idx;
173 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
174
175 /*
176 * The index is relative to frame start but we start looking at the
177 * variable-length part of the beacon.
178 */
179 tim_idx = mgmt->u.beacon.variable - beacon;
180
181 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
182 while ((tim_idx < (frame_size - 2)) &&
183 (beacon[tim_idx] != WLAN_EID_TIM))
184 tim_idx += beacon[tim_idx+1] + 2;
185
186 /* If TIM field was found, set variables */
187 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
188 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
189 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
190 } else
191 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
192}
193
194static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
195 struct iwl_frame *frame)
196{
197 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
198 u32 frame_size;
199 u32 rate_flags;
200 u32 rate;
201 /*
202 * We have to set up the TX command, the TX Beacon command, and the
203 * beacon contents.
204 */
205
206 lockdep_assert_held(&priv->mutex);
207
208 if (!priv->beacon_ctx) {
209 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
210 return 0;
211 }
212
213 /* Initialize memory */
214 tx_beacon_cmd = &frame->u.beacon;
215 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
216
217 /* Set up TX beacon contents */
218 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
219 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
220 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
221 return 0;
222 if (!frame_size)
223 return 0;
224
225 /* Set up TX command fields */
226 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
227 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
228 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
229 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
230 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
231
232 /* Set up TX beacon command fields */
233 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
234 frame_size);
235
236 /* Set up packet rate and flags */
237 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
238 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
239 priv->hw_params.valid_tx_ant);
240 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
241 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
242 rate_flags |= RATE_MCS_CCK_MSK;
243 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
244 rate_flags);
245
246 return sizeof(*tx_beacon_cmd) + frame_size;
247}
248
249int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
250{
251 struct iwl_frame *frame;
252 unsigned int frame_size;
253 int rc;
254
255 frame = iwl4965_get_free_frame(priv);
256 if (!frame) {
257 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
258 "command.\n");
259 return -ENOMEM;
260 }
261
262 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
263 if (!frame_size) {
264 IWL_ERR(priv, "Error configuring the beacon command\n");
265 iwl4965_free_frame(priv, frame);
266 return -EINVAL;
267 }
268
269 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
270 &frame->u.cmd[0]);
271
272 iwl4965_free_frame(priv, frame);
273
274 return rc;
275}
276
277static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
278{
279 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
280
281 dma_addr_t addr = get_unaligned_le32(&tb->lo);
282 if (sizeof(dma_addr_t) > sizeof(u32))
283 addr |=
284 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
285
286 return addr;
287}
288
289static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
290{
291 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
292
293 return le16_to_cpu(tb->hi_n_len) >> 4;
294}
295
296static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
297 dma_addr_t addr, u16 len)
298{
299 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
300 u16 hi_n_len = len << 4;
301
302 put_unaligned_le32(addr, &tb->lo);
303 if (sizeof(dma_addr_t) > sizeof(u32))
304 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
305
306 tb->hi_n_len = cpu_to_le16(hi_n_len);
307
308 tfd->num_tbs = idx + 1;
309}
310
311static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
312{
313 return tfd->num_tbs & 0x1f;
314}
315
316/**
317 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
318 * @priv - driver private data
319 * @txq - tx queue
320 *
321 * Does NOT advance any TFD circular buffer read/write indexes
322 * Does NOT free the TFD itself (which is within circular buffer)
323 */
324void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
325{
326 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
327 struct iwl_tfd *tfd;
328 struct pci_dev *dev = priv->pci_dev;
329 int index = txq->q.read_ptr;
330 int i;
331 int num_tbs;
332
333 tfd = &tfd_tmp[index];
334
335 /* Sanity check on number of chunks */
336 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
337
338 if (num_tbs >= IWL_NUM_OF_TBS) {
339 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
340 /* @todo issue fatal error, it is quite serious situation */
341 return;
342 }
343
344 /* Unmap tx_cmd */
345 if (num_tbs)
346 pci_unmap_single(dev,
347 dma_unmap_addr(&txq->meta[index], mapping),
348 dma_unmap_len(&txq->meta[index], len),
349 PCI_DMA_BIDIRECTIONAL);
350
351 /* Unmap chunks, if any. */
352 for (i = 1; i < num_tbs; i++)
353 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
354 iwl4965_tfd_tb_get_len(tfd, i),
355 PCI_DMA_TODEVICE);
356
357 /* free SKB */
358 if (txq->txb) {
359 struct sk_buff *skb;
360
361 skb = txq->txb[txq->q.read_ptr].skb;
362
363 /* can be called from irqs-disabled context */
364 if (skb) {
365 dev_kfree_skb_any(skb);
366 txq->txb[txq->q.read_ptr].skb = NULL;
367 }
368 }
369}
370
371int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
372 struct iwl_tx_queue *txq,
373 dma_addr_t addr, u16 len,
374 u8 reset, u8 pad)
375{
376 struct iwl_queue *q;
377 struct iwl_tfd *tfd, *tfd_tmp;
378 u32 num_tbs;
379
380 q = &txq->q;
381 tfd_tmp = (struct iwl_tfd *)txq->tfds;
382 tfd = &tfd_tmp[q->write_ptr];
383
384 if (reset)
385 memset(tfd, 0, sizeof(*tfd));
386
387 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
388
389 /* Each TFD can point to a maximum 20 Tx buffers */
390 if (num_tbs >= IWL_NUM_OF_TBS) {
391 IWL_ERR(priv, "Error can not send more than %d chunks\n",
392 IWL_NUM_OF_TBS);
393 return -EINVAL;
394 }
395
396 BUG_ON(addr & ~DMA_BIT_MASK(36));
397 if (unlikely(addr & ~IWL_TX_DMA_MASK))
398 IWL_ERR(priv, "Unaligned address = %llx\n",
399 (unsigned long long)addr);
400
401 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
402
403 return 0;
404}
405
406/*
407 * Tell nic where to find circular buffer of Tx Frame Descriptors for
408 * given Tx queue, and enable the DMA channel used for that queue.
409 *
410 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
411 * channels supported in hardware.
412 */
413int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
414 struct iwl_tx_queue *txq)
415{
416 int txq_id = txq->q.id;
417
418 /* Circular buffer (TFD queue in DRAM) physical base address */
419 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
420 txq->q.dma_addr >> 8);
421
422 return 0;
423}
424
425/******************************************************************************
426 *
427 * Generic RX handler implementations
428 *
429 ******************************************************************************/
430static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
431 struct iwl_rx_mem_buffer *rxb)
432{
433 struct iwl_rx_packet *pkt = rxb_addr(rxb);
434 struct iwl_alive_resp *palive;
435 struct delayed_work *pwork;
436
437 palive = &pkt->u.alive_frame;
438
439 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
440 "0x%01X 0x%01X\n",
441 palive->is_valid, palive->ver_type,
442 palive->ver_subtype);
443
444 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
445 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
446 memcpy(&priv->card_alive_init,
447 &pkt->u.alive_frame,
448 sizeof(struct iwl_init_alive_resp));
449 pwork = &priv->init_alive_start;
450 } else {
451 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
452 memcpy(&priv->card_alive, &pkt->u.alive_frame,
453 sizeof(struct iwl_alive_resp));
454 pwork = &priv->alive_start;
455 }
456
457 /* We delay the ALIVE response by 5ms to
458 * give the HW RF Kill time to activate... */
459 if (palive->is_valid == UCODE_VALID_OK)
460 queue_delayed_work(priv->workqueue, pwork,
461 msecs_to_jiffies(5));
462 else
463 IWL_WARN(priv, "uCode did not respond OK.\n");
464}
465
466/**
467 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
468 *
469 * This callback is provided in order to send a statistics request.
470 *
471 * This timer function is continually reset to execute within
472 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
473 * was received. We need to ensure we receive the statistics in order
474 * to update the temperature used for calibrating the TXPOWER.
475 */
476static void iwl4965_bg_statistics_periodic(unsigned long data)
477{
478 struct iwl_priv *priv = (struct iwl_priv *)data;
479
480 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
481 return;
482
483 /* dont send host command if rf-kill is on */
484 if (!iwl_legacy_is_ready_rf(priv))
485 return;
486
487 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
488}
489
490static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
491 struct iwl_rx_mem_buffer *rxb)
492{
493 struct iwl_rx_packet *pkt = rxb_addr(rxb);
494 struct iwl4965_beacon_notif *beacon =
495 (struct iwl4965_beacon_notif *)pkt->u.raw;
496#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
497 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
498
499 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
500 "tsf %d %d rate %d\n",
501 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
502 beacon->beacon_notify_hdr.failure_frame,
503 le32_to_cpu(beacon->ibss_mgr_status),
504 le32_to_cpu(beacon->high_tsf),
505 le32_to_cpu(beacon->low_tsf), rate);
506#endif
507
508 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
509}
510
511static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
512{
513 unsigned long flags;
514
515 IWL_DEBUG_POWER(priv, "Stop all queues\n");
516
517 if (priv->mac80211_registered)
518 ieee80211_stop_queues(priv->hw);
519
520 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
521 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
522 iwl_read32(priv, CSR_UCODE_DRV_GP1);
523
524 spin_lock_irqsave(&priv->reg_lock, flags);
525 if (!iwl_grab_nic_access(priv))
526 iwl_release_nic_access(priv);
527 spin_unlock_irqrestore(&priv->reg_lock, flags);
528}
529
530/* Handle notification from uCode that card's power state is changing
531 * due to software, hardware, or critical temperature RFKILL */
532static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
533 struct iwl_rx_mem_buffer *rxb)
534{
535 struct iwl_rx_packet *pkt = rxb_addr(rxb);
536 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
537 unsigned long status = priv->status;
538
539 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
540 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
541 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
542 (flags & CT_CARD_DISABLED) ?
543 "Reached" : "Not reached");
544
545 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
546 CT_CARD_DISABLED)) {
547
548 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
549 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
550
551 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
552 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
553
554 if (!(flags & RXON_CARD_DISABLED)) {
555 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
556 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
557 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
558 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
559 }
560 }
561
562 if (flags & CT_CARD_DISABLED)
563 iwl4965_perform_ct_kill_task(priv);
564
565 if (flags & HW_CARD_DISABLED)
566 set_bit(STATUS_RF_KILL_HW, &priv->status);
567 else
568 clear_bit(STATUS_RF_KILL_HW, &priv->status);
569
570 if (!(flags & RXON_CARD_DISABLED))
571 iwl_legacy_scan_cancel(priv);
572
573 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
574 test_bit(STATUS_RF_KILL_HW, &priv->status)))
575 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
576 test_bit(STATUS_RF_KILL_HW, &priv->status));
577 else
578 wake_up(&priv->wait_command_queue);
579}
580
581/**
582 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
583 *
584 * Setup the RX handlers for each of the reply types sent from the uCode
585 * to the host.
586 *
587 * This function chains into the hardware specific files for them to setup
588 * any hardware specific handlers as well.
589 */
590static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
591{
592 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
593 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
594 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
595 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
596 iwl_legacy_rx_spectrum_measure_notif;
597 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
598 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
599 iwl_legacy_rx_pm_debug_statistics_notif;
600 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
601
602 /*
603 * The same handler is used for both the REPLY to a discrete
604 * statistics request from the host as well as for the periodic
605 * statistics notifications (after received beacons) from the uCode.
606 */
607 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
608 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
609
610 iwl_legacy_setup_rx_scan_handlers(priv);
611
612 /* status change handler */
613 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
614 iwl4965_rx_card_state_notif;
615
616 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
617 iwl4965_rx_missed_beacon_notif;
618 /* Rx handlers */
619 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
620 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
621 /* block ack */
622 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
623 /* Set up hardware specific Rx handlers */
624 priv->cfg->ops->lib->rx_handler_setup(priv);
625}
626
627/**
628 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
629 *
630 * Uses the priv->rx_handlers callback function array to invoke
631 * the appropriate handlers, including command responses,
632 * frame-received notifications, and other notifications.
633 */
634void iwl4965_rx_handle(struct iwl_priv *priv)
635{
636 struct iwl_rx_mem_buffer *rxb;
637 struct iwl_rx_packet *pkt;
638 struct iwl_rx_queue *rxq = &priv->rxq;
639 u32 r, i;
640 int reclaim;
641 unsigned long flags;
642 u8 fill_rx = 0;
643 u32 count = 8;
644 int total_empty;
645
646 /* uCode's read index (stored in shared DRAM) indicates the last Rx
647 * buffer that the driver may process (last buffer filled by ucode). */
648 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
649 i = rxq->read;
650
651 /* Rx interrupt, but nothing sent from uCode */
652 if (i == r)
653 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
654
655 /* calculate total frames need to be restock after handling RX */
656 total_empty = r - rxq->write_actual;
657 if (total_empty < 0)
658 total_empty += RX_QUEUE_SIZE;
659
660 if (total_empty > (RX_QUEUE_SIZE / 2))
661 fill_rx = 1;
662
663 while (i != r) {
664 int len;
665
666 rxb = rxq->queue[i];
667
668 /* If an RXB doesn't have a Rx queue slot associated with it,
669 * then a bug has been introduced in the queue refilling
670 * routines -- catch it here */
671 BUG_ON(rxb == NULL);
672
673 rxq->queue[i] = NULL;
674
675 pci_unmap_page(priv->pci_dev, rxb->page_dma,
676 PAGE_SIZE << priv->hw_params.rx_page_order,
677 PCI_DMA_FROMDEVICE);
678 pkt = rxb_addr(rxb);
679
680 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
681 len += sizeof(u32); /* account for status word */
682 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
683
684 /* Reclaim a command buffer only if this packet is a response
685 * to a (driver-originated) command.
686 * If the packet (e.g. Rx frame) originated from uCode,
687 * there is no command buffer to reclaim.
688 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
689 * but apparently a few don't get set; catch them here. */
690 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
691 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
692 (pkt->hdr.cmd != REPLY_RX) &&
693 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
694 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
695 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
696 (pkt->hdr.cmd != REPLY_TX);
697
698 /* Based on type of command response or notification,
699 * handle those that need handling via function in
700 * rx_handlers table. See iwl4965_setup_rx_handlers() */
701 if (priv->rx_handlers[pkt->hdr.cmd]) {
702 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
703 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
704 pkt->hdr.cmd);
705 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
706 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
707 } else {
708 /* No handling needed */
709 IWL_DEBUG_RX(priv,
710 "r %d i %d No handler needed for %s, 0x%02x\n",
711 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
712 pkt->hdr.cmd);
713 }
714
715 /*
716 * XXX: After here, we should always check rxb->page
717 * against NULL before touching it or its virtual
718 * memory (pkt). Because some rx_handler might have
719 * already taken or freed the pages.
720 */
721
722 if (reclaim) {
723 /* Invoke any callbacks, transfer the buffer to caller,
724 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
725 * as we reclaim the driver command queue */
726 if (rxb->page)
727 iwl_legacy_tx_cmd_complete(priv, rxb);
728 else
729 IWL_WARN(priv, "Claim null rxb?\n");
730 }
731
732 /* Reuse the page if possible. For notification packets and
733 * SKBs that fail to Rx correctly, add them back into the
734 * rx_free list for reuse later. */
735 spin_lock_irqsave(&rxq->lock, flags);
736 if (rxb->page != NULL) {
737 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
738 0, PAGE_SIZE << priv->hw_params.rx_page_order,
739 PCI_DMA_FROMDEVICE);
740 list_add_tail(&rxb->list, &rxq->rx_free);
741 rxq->free_count++;
742 } else
743 list_add_tail(&rxb->list, &rxq->rx_used);
744
745 spin_unlock_irqrestore(&rxq->lock, flags);
746
747 i = (i + 1) & RX_QUEUE_MASK;
748 /* If there are a lot of unused frames,
749 * restock the Rx queue so ucode wont assert. */
750 if (fill_rx) {
751 count++;
752 if (count >= 8) {
753 rxq->read = i;
754 iwl4965_rx_replenish_now(priv);
755 count = 0;
756 }
757 }
758 }
759
760 /* Backtrack one entry */
761 rxq->read = i;
762 if (fill_rx)
763 iwl4965_rx_replenish_now(priv);
764 else
765 iwl4965_rx_queue_restock(priv);
766}
767
768/* call this function to flush any scheduled tasklet */
769static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
770{
771 /* wait to make sure we flush pending tasklet*/
772 synchronize_irq(priv->pci_dev->irq);
773 tasklet_kill(&priv->irq_tasklet);
774}
775
776static void iwl4965_irq_tasklet(struct iwl_priv *priv)
777{
778 u32 inta, handled = 0;
779 u32 inta_fh;
780 unsigned long flags;
781 u32 i;
782#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
783 u32 inta_mask;
784#endif
785
786 spin_lock_irqsave(&priv->lock, flags);
787
788 /* Ack/clear/reset pending uCode interrupts.
789 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
790 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
791 inta = iwl_read32(priv, CSR_INT);
792 iwl_write32(priv, CSR_INT, inta);
793
794 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
795 * Any new interrupts that happen after this, either while we're
796 * in this tasklet, or later, will show up in next ISR/tasklet. */
797 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
798 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
799
800#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
801 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
802 /* just for debug */
803 inta_mask = iwl_read32(priv, CSR_INT_MASK);
804 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
805 inta, inta_mask, inta_fh);
806 }
807#endif
808
809 spin_unlock_irqrestore(&priv->lock, flags);
810
811 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
812 * atomic, make sure that inta covers all the interrupts that
813 * we've discovered, even if FH interrupt came in just after
814 * reading CSR_INT. */
815 if (inta_fh & CSR49_FH_INT_RX_MASK)
816 inta |= CSR_INT_BIT_FH_RX;
817 if (inta_fh & CSR49_FH_INT_TX_MASK)
818 inta |= CSR_INT_BIT_FH_TX;
819
820 /* Now service all interrupt bits discovered above. */
821 if (inta & CSR_INT_BIT_HW_ERR) {
822 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
823
824 /* Tell the device to stop sending interrupts */
825 iwl_legacy_disable_interrupts(priv);
826
827 priv->isr_stats.hw++;
828 iwl_legacy_irq_handle_error(priv);
829
830 handled |= CSR_INT_BIT_HW_ERR;
831
832 return;
833 }
834
835#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
836 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
837 /* NIC fires this, but we don't use it, redundant with WAKEUP */
838 if (inta & CSR_INT_BIT_SCD) {
839 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
840 "the frame/frames.\n");
841 priv->isr_stats.sch++;
842 }
843
844 /* Alive notification via Rx interrupt will do the real work */
845 if (inta & CSR_INT_BIT_ALIVE) {
846 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
847 priv->isr_stats.alive++;
848 }
849 }
850#endif
851 /* Safely ignore these bits for debug checks below */
852 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
853
854 /* HW RF KILL switch toggled */
855 if (inta & CSR_INT_BIT_RF_KILL) {
856 int hw_rf_kill = 0;
857 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
858 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
859 hw_rf_kill = 1;
860
861 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
862 hw_rf_kill ? "disable radio" : "enable radio");
863
864 priv->isr_stats.rfkill++;
865
866 /* driver only loads ucode once setting the interface up.
867 * the driver allows loading the ucode even if the radio
868 * is killed. Hence update the killswitch state here. The
869 * rfkill handler will care about restarting if needed.
870 */
871 if (!test_bit(STATUS_ALIVE, &priv->status)) {
872 if (hw_rf_kill)
873 set_bit(STATUS_RF_KILL_HW, &priv->status);
874 else
875 clear_bit(STATUS_RF_KILL_HW, &priv->status);
876 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
877 }
878
879 handled |= CSR_INT_BIT_RF_KILL;
880 }
881
882 /* Chip got too hot and stopped itself */
883 if (inta & CSR_INT_BIT_CT_KILL) {
884 IWL_ERR(priv, "Microcode CT kill error detected.\n");
885 priv->isr_stats.ctkill++;
886 handled |= CSR_INT_BIT_CT_KILL;
887 }
888
889 /* Error detected by uCode */
890 if (inta & CSR_INT_BIT_SW_ERR) {
891 IWL_ERR(priv, "Microcode SW error detected. "
892 " Restarting 0x%X.\n", inta);
893 priv->isr_stats.sw++;
894 iwl_legacy_irq_handle_error(priv);
895 handled |= CSR_INT_BIT_SW_ERR;
896 }
897
898 /*
899 * uCode wakes up after power-down sleep.
900 * Tell device about any new tx or host commands enqueued,
901 * and about any Rx buffers made available while asleep.
902 */
903 if (inta & CSR_INT_BIT_WAKEUP) {
904 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
905 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
906 for (i = 0; i < priv->hw_params.max_txq_num; i++)
907 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
908 priv->isr_stats.wakeup++;
909 handled |= CSR_INT_BIT_WAKEUP;
910 }
911
912 /* All uCode command responses, including Tx command responses,
913 * Rx "responses" (frame-received notification), and other
914 * notifications from uCode come through here*/
915 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
916 iwl4965_rx_handle(priv);
917 priv->isr_stats.rx++;
918 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
919 }
920
921 /* This "Tx" DMA channel is used only for loading uCode */
922 if (inta & CSR_INT_BIT_FH_TX) {
923 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
924 priv->isr_stats.tx++;
925 handled |= CSR_INT_BIT_FH_TX;
926 /* Wake up uCode load routine, now that load is complete */
927 priv->ucode_write_complete = 1;
928 wake_up(&priv->wait_command_queue);
929 }
930
931 if (inta & ~handled) {
932 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
933 priv->isr_stats.unhandled++;
934 }
935
936 if (inta & ~(priv->inta_mask)) {
937 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
938 inta & ~priv->inta_mask);
939 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
940 }
941
942 /* Re-enable all interrupts */
943 /* only Re-enable if disabled by irq */
944 if (test_bit(STATUS_INT_ENABLED, &priv->status))
945 iwl_legacy_enable_interrupts(priv);
946 /* Re-enable RF_KILL if it occurred */
947 else if (handled & CSR_INT_BIT_RF_KILL)
948 iwl_legacy_enable_rfkill_int(priv);
949
950#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
951 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
952 inta = iwl_read32(priv, CSR_INT);
953 inta_mask = iwl_read32(priv, CSR_INT_MASK);
954 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
955 IWL_DEBUG_ISR(priv,
956 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
957 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
958 }
959#endif
960}
961
962/*****************************************************************************
963 *
964 * sysfs attributes
965 *
966 *****************************************************************************/
967
968#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
969
970/*
971 * The following adds a new attribute to the sysfs representation
972 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
973 * used for controlling the debug level.
974 *
975 * See the level definitions in iwl for details.
976 *
977 * The debug_level being managed using sysfs below is a per device debug
978 * level that is used instead of the global debug level if it (the per
979 * device debug level) is set.
980 */
981static ssize_t iwl4965_show_debug_level(struct device *d,
982 struct device_attribute *attr, char *buf)
983{
984 struct iwl_priv *priv = dev_get_drvdata(d);
985 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
986}
987static ssize_t iwl4965_store_debug_level(struct device *d,
988 struct device_attribute *attr,
989 const char *buf, size_t count)
990{
991 struct iwl_priv *priv = dev_get_drvdata(d);
992 unsigned long val;
993 int ret;
994
995 ret = strict_strtoul(buf, 0, &val);
996 if (ret)
997 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
998 else {
999 priv->debug_level = val;
1000 if (iwl_legacy_alloc_traffic_mem(priv))
1001 IWL_ERR(priv,
1002 "Not enough memory to generate traffic log\n");
1003 }
1004 return strnlen(buf, count);
1005}
1006
1007static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1008 iwl4965_show_debug_level, iwl4965_store_debug_level);
1009
1010
1011#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1012
1013
1014static ssize_t iwl4965_show_temperature(struct device *d,
1015 struct device_attribute *attr, char *buf)
1016{
1017 struct iwl_priv *priv = dev_get_drvdata(d);
1018
1019 if (!iwl_legacy_is_alive(priv))
1020 return -EAGAIN;
1021
1022 return sprintf(buf, "%d\n", priv->temperature);
1023}
1024
1025static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1026
1027static ssize_t iwl4965_show_tx_power(struct device *d,
1028 struct device_attribute *attr, char *buf)
1029{
1030 struct iwl_priv *priv = dev_get_drvdata(d);
1031
1032 if (!iwl_legacy_is_ready_rf(priv))
1033 return sprintf(buf, "off\n");
1034 else
1035 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1036}
1037
1038static ssize_t iwl4965_store_tx_power(struct device *d,
1039 struct device_attribute *attr,
1040 const char *buf, size_t count)
1041{
1042 struct iwl_priv *priv = dev_get_drvdata(d);
1043 unsigned long val;
1044 int ret;
1045
1046 ret = strict_strtoul(buf, 10, &val);
1047 if (ret)
1048 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1049 else {
1050 ret = iwl_legacy_set_tx_power(priv, val, false);
1051 if (ret)
1052 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1053 ret);
1054 else
1055 ret = count;
1056 }
1057 return ret;
1058}
1059
1060static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1061 iwl4965_show_tx_power, iwl4965_store_tx_power);
1062
1063static struct attribute *iwl_sysfs_entries[] = {
1064 &dev_attr_temperature.attr,
1065 &dev_attr_tx_power.attr,
1066#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1067 &dev_attr_debug_level.attr,
1068#endif
1069 NULL
1070};
1071
1072static struct attribute_group iwl_attribute_group = {
1073 .name = NULL, /* put in device directory */
1074 .attrs = iwl_sysfs_entries,
1075};
1076
1077/******************************************************************************
1078 *
1079 * uCode download functions
1080 *
1081 ******************************************************************************/
1082
1083static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1084{
1085 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1086 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1087 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1088 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1089 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1090 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1091}
1092
1093static void iwl4965_nic_start(struct iwl_priv *priv)
1094{
1095 /* Remove all resets to allow NIC to operate */
1096 iwl_write32(priv, CSR_RESET, 0);
1097}
1098
1099static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1100 void *context);
1101static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1102 u32 max_probe_length);
1103
1104static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1105{
1106 const char *name_pre = priv->cfg->fw_name_pre;
1107 char tag[8];
1108
1109 if (first) {
1110 priv->fw_index = priv->cfg->ucode_api_max;
1111 sprintf(tag, "%d", priv->fw_index);
1112 } else {
1113 priv->fw_index--;
1114 sprintf(tag, "%d", priv->fw_index);
1115 }
1116
1117 if (priv->fw_index < priv->cfg->ucode_api_min) {
1118 IWL_ERR(priv, "no suitable firmware found!\n");
1119 return -ENOENT;
1120 }
1121
1122 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1123
1124 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1125 priv->firmware_name);
1126
1127 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1128 &priv->pci_dev->dev, GFP_KERNEL, priv,
1129 iwl4965_ucode_callback);
1130}
1131
1132struct iwl4965_firmware_pieces {
1133 const void *inst, *data, *init, *init_data, *boot;
1134 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1135};
1136
1137static int iwl4965_load_firmware(struct iwl_priv *priv,
1138 const struct firmware *ucode_raw,
1139 struct iwl4965_firmware_pieces *pieces)
1140{
1141 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1142 u32 api_ver, hdr_size;
1143 const u8 *src;
1144
1145 priv->ucode_ver = le32_to_cpu(ucode->ver);
1146 api_ver = IWL_UCODE_API(priv->ucode_ver);
1147
1148 switch (api_ver) {
1149 default:
1150 case 0:
1151 case 1:
1152 case 2:
1153 hdr_size = 24;
1154 if (ucode_raw->size < hdr_size) {
1155 IWL_ERR(priv, "File size too small!\n");
1156 return -EINVAL;
1157 }
1158 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1159 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1160 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1161 pieces->init_data_size =
1162 le32_to_cpu(ucode->v1.init_data_size);
1163 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1164 src = ucode->v1.data;
1165 break;
1166 }
1167
1168 /* Verify size of file vs. image size info in file's header */
1169 if (ucode_raw->size != hdr_size + pieces->inst_size +
1170 pieces->data_size + pieces->init_size +
1171 pieces->init_data_size + pieces->boot_size) {
1172
1173 IWL_ERR(priv,
1174 "uCode file size %d does not match expected size\n",
1175 (int)ucode_raw->size);
1176 return -EINVAL;
1177 }
1178
1179 pieces->inst = src;
1180 src += pieces->inst_size;
1181 pieces->data = src;
1182 src += pieces->data_size;
1183 pieces->init = src;
1184 src += pieces->init_size;
1185 pieces->init_data = src;
1186 src += pieces->init_data_size;
1187 pieces->boot = src;
1188 src += pieces->boot_size;
1189
1190 return 0;
1191}
1192
1193/**
1194 * iwl4965_ucode_callback - callback when firmware was loaded
1195 *
1196 * If loaded successfully, copies the firmware into buffers
1197 * for the card to fetch (via DMA).
1198 */
1199static void
1200iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1201{
1202 struct iwl_priv *priv = context;
1203 struct iwl_ucode_header *ucode;
1204 int err;
1205 struct iwl4965_firmware_pieces pieces;
1206 const unsigned int api_max = priv->cfg->ucode_api_max;
1207 const unsigned int api_min = priv->cfg->ucode_api_min;
1208 u32 api_ver;
1209
1210 u32 max_probe_length = 200;
1211 u32 standard_phy_calibration_size =
1212 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1213
1214 memset(&pieces, 0, sizeof(pieces));
1215
1216 if (!ucode_raw) {
1217 if (priv->fw_index <= priv->cfg->ucode_api_max)
1218 IWL_ERR(priv,
1219 "request for firmware file '%s' failed.\n",
1220 priv->firmware_name);
1221 goto try_again;
1222 }
1223
1224 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1225 priv->firmware_name, ucode_raw->size);
1226
1227 /* Make sure that we got at least the API version number */
1228 if (ucode_raw->size < 4) {
1229 IWL_ERR(priv, "File size way too small!\n");
1230 goto try_again;
1231 }
1232
1233 /* Data from ucode file: header followed by uCode images */
1234 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1235
1236 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1237
1238 if (err)
1239 goto try_again;
1240
1241 api_ver = IWL_UCODE_API(priv->ucode_ver);
1242
1243 /*
1244 * api_ver should match the api version forming part of the
1245 * firmware filename ... but we don't check for that and only rely
1246 * on the API version read from firmware header from here on forward
1247 */
1248 if (api_ver < api_min || api_ver > api_max) {
1249 IWL_ERR(priv,
1250 "Driver unable to support your firmware API. "
1251 "Driver supports v%u, firmware is v%u.\n",
1252 api_max, api_ver);
1253 goto try_again;
1254 }
1255
1256 if (api_ver != api_max)
1257 IWL_ERR(priv,
1258 "Firmware has old API version. Expected v%u, "
1259 "got v%u. New firmware can be obtained "
1260 "from http://www.intellinuxwireless.org.\n",
1261 api_max, api_ver);
1262
1263 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1264 IWL_UCODE_MAJOR(priv->ucode_ver),
1265 IWL_UCODE_MINOR(priv->ucode_ver),
1266 IWL_UCODE_API(priv->ucode_ver),
1267 IWL_UCODE_SERIAL(priv->ucode_ver));
1268
1269 snprintf(priv->hw->wiphy->fw_version,
1270 sizeof(priv->hw->wiphy->fw_version),
1271 "%u.%u.%u.%u",
1272 IWL_UCODE_MAJOR(priv->ucode_ver),
1273 IWL_UCODE_MINOR(priv->ucode_ver),
1274 IWL_UCODE_API(priv->ucode_ver),
1275 IWL_UCODE_SERIAL(priv->ucode_ver));
1276
1277 /*
1278 * For any of the failures below (before allocating pci memory)
1279 * we will try to load a version with a smaller API -- maybe the
1280 * user just got a corrupted version of the latest API.
1281 */
1282
1283 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1284 priv->ucode_ver);
1285 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1286 pieces.inst_size);
1287 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1288 pieces.data_size);
1289 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1290 pieces.init_size);
1291 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1292 pieces.init_data_size);
1293 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1294 pieces.boot_size);
1295
1296 /* Verify that uCode images will fit in card's SRAM */
1297 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1298 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1299 pieces.inst_size);
1300 goto try_again;
1301 }
1302
1303 if (pieces.data_size > priv->hw_params.max_data_size) {
1304 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1305 pieces.data_size);
1306 goto try_again;
1307 }
1308
1309 if (pieces.init_size > priv->hw_params.max_inst_size) {
1310 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1311 pieces.init_size);
1312 goto try_again;
1313 }
1314
1315 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1316 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1317 pieces.init_data_size);
1318 goto try_again;
1319 }
1320
1321 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1322 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1323 pieces.boot_size);
1324 goto try_again;
1325 }
1326
1327 /* Allocate ucode buffers for card's bus-master loading ... */
1328
1329 /* Runtime instructions and 2 copies of data:
1330 * 1) unmodified from disk
1331 * 2) backup cache for save/restore during power-downs */
1332 priv->ucode_code.len = pieces.inst_size;
1333 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1334
1335 priv->ucode_data.len = pieces.data_size;
1336 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1337
1338 priv->ucode_data_backup.len = pieces.data_size;
1339 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1340
1341 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1342 !priv->ucode_data_backup.v_addr)
1343 goto err_pci_alloc;
1344
1345 /* Initialization instructions and data */
1346 if (pieces.init_size && pieces.init_data_size) {
1347 priv->ucode_init.len = pieces.init_size;
1348 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1349
1350 priv->ucode_init_data.len = pieces.init_data_size;
1351 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1352
1353 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1354 goto err_pci_alloc;
1355 }
1356
1357 /* Bootstrap (instructions only, no data) */
1358 if (pieces.boot_size) {
1359 priv->ucode_boot.len = pieces.boot_size;
1360 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1361
1362 if (!priv->ucode_boot.v_addr)
1363 goto err_pci_alloc;
1364 }
1365
1366 /* Now that we can no longer fail, copy information */
1367
1368 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1369
1370 /* Copy images into buffers for card's bus-master reads ... */
1371
1372 /* Runtime instructions (first block of data in file) */
1373 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1374 pieces.inst_size);
1375 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1376
1377 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1378 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1379
1380 /*
1381 * Runtime data
1382 * NOTE: Copy into backup buffer will be done in iwl_up()
1383 */
1384 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1385 pieces.data_size);
1386 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1387 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1388
1389 /* Initialization instructions */
1390 if (pieces.init_size) {
1391 IWL_DEBUG_INFO(priv,
1392 "Copying (but not loading) init instr len %Zd\n",
1393 pieces.init_size);
1394 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1395 }
1396
1397 /* Initialization data */
1398 if (pieces.init_data_size) {
1399 IWL_DEBUG_INFO(priv,
1400 "Copying (but not loading) init data len %Zd\n",
1401 pieces.init_data_size);
1402 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1403 pieces.init_data_size);
1404 }
1405
1406 /* Bootstrap instructions */
1407 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1408 pieces.boot_size);
1409 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1410
1411 /*
1412 * figure out the offset of chain noise reset and gain commands
1413 * base on the size of standard phy calibration commands table size
1414 */
1415 priv->_4965.phy_calib_chain_noise_reset_cmd =
1416 standard_phy_calibration_size;
1417 priv->_4965.phy_calib_chain_noise_gain_cmd =
1418 standard_phy_calibration_size + 1;
1419
1420 /**************************************************
1421 * This is still part of probe() in a sense...
1422 *
1423 * 9. Setup and register with mac80211 and debugfs
1424 **************************************************/
1425 err = iwl4965_mac_setup_register(priv, max_probe_length);
1426 if (err)
1427 goto out_unbind;
1428
1429 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1430 if (err)
1431 IWL_ERR(priv,
1432 "failed to create debugfs files. Ignoring error: %d\n", err);
1433
1434 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1435 &iwl_attribute_group);
1436 if (err) {
1437 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1438 goto out_unbind;
1439 }
1440
1441 /* We have our copies now, allow OS release its copies */
1442 release_firmware(ucode_raw);
1443 complete(&priv->_4965.firmware_loading_complete);
1444 return;
1445
1446 try_again:
1447 /* try next, if any */
1448 if (iwl4965_request_firmware(priv, false))
1449 goto out_unbind;
1450 release_firmware(ucode_raw);
1451 return;
1452
1453 err_pci_alloc:
1454 IWL_ERR(priv, "failed to allocate pci memory\n");
1455 iwl4965_dealloc_ucode_pci(priv);
1456 out_unbind:
1457 complete(&priv->_4965.firmware_loading_complete);
1458 device_release_driver(&priv->pci_dev->dev);
1459 release_firmware(ucode_raw);
1460}
1461
1462static const char * const desc_lookup_text[] = {
1463 "OK",
1464 "FAIL",
1465 "BAD_PARAM",
1466 "BAD_CHECKSUM",
1467 "NMI_INTERRUPT_WDG",
1468 "SYSASSERT",
1469 "FATAL_ERROR",
1470 "BAD_COMMAND",
1471 "HW_ERROR_TUNE_LOCK",
1472 "HW_ERROR_TEMPERATURE",
1473 "ILLEGAL_CHAN_FREQ",
1474 "VCC_NOT_STABLE",
1475 "FH_ERROR",
1476 "NMI_INTERRUPT_HOST",
1477 "NMI_INTERRUPT_ACTION_PT",
1478 "NMI_INTERRUPT_UNKNOWN",
1479 "UCODE_VERSION_MISMATCH",
1480 "HW_ERROR_ABS_LOCK",
1481 "HW_ERROR_CAL_LOCK_FAIL",
1482 "NMI_INTERRUPT_INST_ACTION_PT",
1483 "NMI_INTERRUPT_DATA_ACTION_PT",
1484 "NMI_TRM_HW_ER",
1485 "NMI_INTERRUPT_TRM",
1486 "NMI_INTERRUPT_BREAK_POINT",
1487 "DEBUG_0",
1488 "DEBUG_1",
1489 "DEBUG_2",
1490 "DEBUG_3",
1491};
1492
1493static struct { char *name; u8 num; } advanced_lookup[] = {
1494 { "NMI_INTERRUPT_WDG", 0x34 },
1495 { "SYSASSERT", 0x35 },
1496 { "UCODE_VERSION_MISMATCH", 0x37 },
1497 { "BAD_COMMAND", 0x38 },
1498 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1499 { "FATAL_ERROR", 0x3D },
1500 { "NMI_TRM_HW_ERR", 0x46 },
1501 { "NMI_INTERRUPT_TRM", 0x4C },
1502 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1503 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1504 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1505 { "NMI_INTERRUPT_HOST", 0x66 },
1506 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1507 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1508 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1509 { "ADVANCED_SYSASSERT", 0 },
1510};
1511
1512static const char *iwl4965_desc_lookup(u32 num)
1513{
1514 int i;
1515 int max = ARRAY_SIZE(desc_lookup_text);
1516
1517 if (num < max)
1518 return desc_lookup_text[num];
1519
1520 max = ARRAY_SIZE(advanced_lookup) - 1;
1521 for (i = 0; i < max; i++) {
1522 if (advanced_lookup[i].num == num)
1523 break;
1524 }
1525 return advanced_lookup[i].name;
1526}
1527
1528#define ERROR_START_OFFSET (1 * sizeof(u32))
1529#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1530
1531void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1532{
1533 u32 data2, line;
1534 u32 desc, time, count, base, data1;
1535 u32 blink1, blink2, ilink1, ilink2;
1536 u32 pc, hcmd;
1537
1538 if (priv->ucode_type == UCODE_INIT) {
1539 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1540 } else {
1541 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1542 }
1543
1544 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1545 IWL_ERR(priv,
1546 "Not valid error log pointer 0x%08X for %s uCode\n",
1547 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1548 return;
1549 }
1550
1551 count = iwl_legacy_read_targ_mem(priv, base);
1552
1553 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1554 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1555 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1556 priv->status, count);
1557 }
1558
1559 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1560 priv->isr_stats.err_code = desc;
1561 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1562 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1563 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1564 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1565 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1566 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1567 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1568 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1569 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1570 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1571
1572 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1573 time, data1, data2, line,
1574 blink1, blink2, ilink1, ilink2);
1575
1576 IWL_ERR(priv, "Desc Time "
1577 "data1 data2 line\n");
1578 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1579 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1580 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1581 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1582 pc, blink1, blink2, ilink1, ilink2, hcmd);
1583}
1584
1585static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1586{
1587 struct iwl_ct_kill_config cmd;
1588 unsigned long flags;
1589 int ret = 0;
1590
1591 spin_lock_irqsave(&priv->lock, flags);
1592 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1593 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1594 spin_unlock_irqrestore(&priv->lock, flags);
1595
1596 cmd.critical_temperature_R =
1597 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1598
1599 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1600 sizeof(cmd), &cmd);
1601 if (ret)
1602 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1603 else
1604 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1605 "succeeded, "
1606 "critical temperature is %d\n",
1607 priv->hw_params.ct_kill_threshold);
1608}
1609
1610static const s8 default_queue_to_tx_fifo[] = {
1611 IWL_TX_FIFO_VO,
1612 IWL_TX_FIFO_VI,
1613 IWL_TX_FIFO_BE,
1614 IWL_TX_FIFO_BK,
1615 IWL49_CMD_FIFO_NUM,
1616 IWL_TX_FIFO_UNUSED,
1617 IWL_TX_FIFO_UNUSED,
1618};
1619
1620static int iwl4965_alive_notify(struct iwl_priv *priv)
1621{
1622 u32 a;
1623 unsigned long flags;
1624 int i, chan;
1625 u32 reg_val;
1626
1627 spin_lock_irqsave(&priv->lock, flags);
1628
1629 /* Clear 4965's internal Tx Scheduler data base */
1630 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1631 IWL49_SCD_SRAM_BASE_ADDR);
1632 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1633 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1634 iwl_legacy_write_targ_mem(priv, a, 0);
1635 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1636 iwl_legacy_write_targ_mem(priv, a, 0);
1637 for (; a < priv->scd_base_addr +
1638 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1639 iwl_legacy_write_targ_mem(priv, a, 0);
1640
1641 /* Tel 4965 where to find Tx byte count tables */
1642 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1643 priv->scd_bc_tbls.dma >> 10);
1644
1645 /* Enable DMA channel */
1646 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1647 iwl_legacy_write_direct32(priv,
1648 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1649 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1650 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1651
1652 /* Update FH chicken bits */
1653 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1654 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1655 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1656
1657 /* Disable chain mode for all queues */
1658 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1659
1660 /* Initialize each Tx queue (including the command queue) */
1661 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1662
1663 /* TFD circular buffer read/write indexes */
1664 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1665 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1666
1667 /* Max Tx Window size for Scheduler-ACK mode */
1668 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1669 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1670 (SCD_WIN_SIZE <<
1671 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1672 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1673
1674 /* Frame limit */
1675 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1676 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1677 sizeof(u32),
1678 (SCD_FRAME_LIMIT <<
1679 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1680 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1681
1682 }
1683 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1684 (1 << priv->hw_params.max_txq_num) - 1);
1685
1686 /* Activate all Tx DMA/FIFO channels */
1687 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
1688
1689 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
1690
1691 /* make sure all queue are not stopped */
1692 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
1693 for (i = 0; i < 4; i++)
1694 atomic_set(&priv->queue_stop_count[i], 0);
1695
1696 /* reset to 0 to enable all the queue first */
1697 priv->txq_ctx_active_msk = 0;
1698 /* Map each Tx/cmd queue to its corresponding fifo */
1699 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
1700
1701 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1702 int ac = default_queue_to_tx_fifo[i];
1703
1704 iwl_txq_ctx_activate(priv, i);
1705
1706 if (ac == IWL_TX_FIFO_UNUSED)
1707 continue;
1708
1709 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1710 }
1711
1712 spin_unlock_irqrestore(&priv->lock, flags);
1713
1714 return 0;
1715}
1716
1717/**
1718 * iwl4965_alive_start - called after REPLY_ALIVE notification received
1719 * from protocol/runtime uCode (initialization uCode's
1720 * Alive gets handled by iwl_init_alive_start()).
1721 */
1722static void iwl4965_alive_start(struct iwl_priv *priv)
1723{
1724 int ret = 0;
1725 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1726
1727 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1728
1729 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
1730 /* We had an error bringing up the hardware, so take it
1731 * all the way back down so we can try again */
1732 IWL_DEBUG_INFO(priv, "Alive failed.\n");
1733 goto restart;
1734 }
1735
1736 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
1737 * This is a paranoid check, because we would not have gotten the
1738 * "runtime" alive if code weren't properly loaded. */
1739 if (iwl4965_verify_ucode(priv)) {
1740 /* Runtime instruction load was bad;
1741 * take it all the way back down so we can try again */
1742 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
1743 goto restart;
1744 }
1745
1746 ret = iwl4965_alive_notify(priv);
1747 if (ret) {
1748 IWL_WARN(priv,
1749 "Could not complete ALIVE transition [ntf]: %d\n", ret);
1750 goto restart;
1751 }
1752
1753
1754 /* After the ALIVE response, we can send host commands to the uCode */
1755 set_bit(STATUS_ALIVE, &priv->status);
1756
1757 /* Enable watchdog to monitor the driver tx queues */
1758 iwl_legacy_setup_watchdog(priv);
1759
1760 if (iwl_legacy_is_rfkill(priv))
1761 return;
1762
1763 ieee80211_wake_queues(priv->hw);
1764
1765 priv->active_rate = IWL_RATES_MASK;
1766
1767 if (iwl_legacy_is_associated_ctx(ctx)) {
1768 struct iwl_legacy_rxon_cmd *active_rxon =
1769 (struct iwl_legacy_rxon_cmd *)&ctx->active;
1770 /* apply any changes in staging */
1771 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1772 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1773 } else {
1774 struct iwl_rxon_context *tmp;
1775 /* Initialize our rx_config data */
1776 for_each_context(priv, tmp)
1777 iwl_legacy_connection_init_rx_config(priv, tmp);
1778
1779 if (priv->cfg->ops->hcmd->set_rxon_chain)
1780 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1781 }
1782
1783 /* Configure bluetooth coexistence if enabled */
1784 iwl_legacy_send_bt_config(priv);
1785
1786 iwl4965_reset_run_time_calib(priv);
1787
1788 set_bit(STATUS_READY, &priv->status);
1789
1790 /* Configure the adapter for unassociated operation */
1791 iwl_legacy_commit_rxon(priv, ctx);
1792
1793 /* At this point, the NIC is initialized and operational */
1794 iwl4965_rf_kill_ct_config(priv);
1795
1796 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1797 wake_up(&priv->wait_command_queue);
1798
1799 iwl_legacy_power_update_mode(priv, true);
1800 IWL_DEBUG_INFO(priv, "Updated power mode\n");
1801
1802 return;
1803
1804 restart:
1805 queue_work(priv->workqueue, &priv->restart);
1806}
1807
1808static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
1809
1810static void __iwl4965_down(struct iwl_priv *priv)
1811{
1812 unsigned long flags;
1813 int exit_pending;
1814
1815 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
1816
1817 iwl_legacy_scan_cancel_timeout(priv, 200);
1818
1819 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
1820
1821 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
1822 * to prevent rearm timer */
1823 del_timer_sync(&priv->watchdog);
1824
1825 iwl_legacy_clear_ucode_stations(priv, NULL);
1826 iwl_legacy_dealloc_bcast_stations(priv);
1827 iwl_legacy_clear_driver_stations(priv);
1828
1829 /* Unblock any waiting calls */
1830 wake_up_all(&priv->wait_command_queue);
1831
1832 /* Wipe out the EXIT_PENDING status bit if we are not actually
1833 * exiting the module */
1834 if (!exit_pending)
1835 clear_bit(STATUS_EXIT_PENDING, &priv->status);
1836
1837 /* stop and reset the on-board processor */
1838 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1839
1840 /* tell the device to stop sending interrupts */
1841 spin_lock_irqsave(&priv->lock, flags);
1842 iwl_legacy_disable_interrupts(priv);
1843 spin_unlock_irqrestore(&priv->lock, flags);
1844 iwl4965_synchronize_irq(priv);
1845
1846 if (priv->mac80211_registered)
1847 ieee80211_stop_queues(priv->hw);
1848
1849 /* If we have not previously called iwl_init() then
1850 * clear all bits but the RF Kill bit and return */
1851 if (!iwl_legacy_is_init(priv)) {
1852 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1853 STATUS_RF_KILL_HW |
1854 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1855 STATUS_GEO_CONFIGURED |
1856 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1857 STATUS_EXIT_PENDING;
1858 goto exit;
1859 }
1860
1861 /* ...otherwise clear out all the status bits but the RF Kill
1862 * bit and continue taking the NIC down. */
1863 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1864 STATUS_RF_KILL_HW |
1865 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1866 STATUS_GEO_CONFIGURED |
1867 test_bit(STATUS_FW_ERROR, &priv->status) <<
1868 STATUS_FW_ERROR |
1869 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1870 STATUS_EXIT_PENDING;
1871
1872 iwl4965_txq_ctx_stop(priv);
1873 iwl4965_rxq_stop(priv);
1874
1875 /* Power-down device's busmaster DMA clocks */
1876 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1877 udelay(5);
1878
1879 /* Make sure (redundant) we've released our request to stay awake */
1880 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
1881 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1882
1883 /* Stop the device, and put it in low power state */
1884 iwl_legacy_apm_stop(priv);
1885
1886 exit:
1887 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
1888
1889 dev_kfree_skb(priv->beacon_skb);
1890 priv->beacon_skb = NULL;
1891
1892 /* clear out any free frames */
1893 iwl4965_clear_free_frames(priv);
1894}
1895
1896static void iwl4965_down(struct iwl_priv *priv)
1897{
1898 mutex_lock(&priv->mutex);
1899 __iwl4965_down(priv);
1900 mutex_unlock(&priv->mutex);
1901
1902 iwl4965_cancel_deferred_work(priv);
1903}
1904
1905#define HW_READY_TIMEOUT (50)
1906
1907static int iwl4965_set_hw_ready(struct iwl_priv *priv)
1908{
1909 int ret = 0;
1910
1911 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1912 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1913
1914 /* See if we got it */
1915 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
1916 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1917 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1918 HW_READY_TIMEOUT);
1919 if (ret != -ETIMEDOUT)
1920 priv->hw_ready = true;
1921 else
1922 priv->hw_ready = false;
1923
1924 IWL_DEBUG_INFO(priv, "hardware %s\n",
1925 (priv->hw_ready == 1) ? "ready" : "not ready");
1926 return ret;
1927}
1928
1929static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
1930{
1931 int ret = 0;
1932
1933 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
1934
1935 ret = iwl4965_set_hw_ready(priv);
1936 if (priv->hw_ready)
1937 return ret;
1938
1939 /* If HW is not ready, prepare the conditions to check again */
1940 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1941 CSR_HW_IF_CONFIG_REG_PREPARE);
1942
1943 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
1944 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
1945 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
1946
1947 /* HW should be ready by now, check again. */
1948 if (ret != -ETIMEDOUT)
1949 iwl4965_set_hw_ready(priv);
1950
1951 return ret;
1952}
1953
1954#define MAX_HW_RESTARTS 5
1955
1956static int __iwl4965_up(struct iwl_priv *priv)
1957{
1958 struct iwl_rxon_context *ctx;
1959 int i;
1960 int ret;
1961
1962 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1963 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
1964 return -EIO;
1965 }
1966
1967 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
1968 IWL_ERR(priv, "ucode not available for device bringup\n");
1969 return -EIO;
1970 }
1971
1972 for_each_context(priv, ctx) {
1973 ret = iwl4965_alloc_bcast_station(priv, ctx);
1974 if (ret) {
1975 iwl_legacy_dealloc_bcast_stations(priv);
1976 return ret;
1977 }
1978 }
1979
1980 iwl4965_prepare_card_hw(priv);
1981
1982 if (!priv->hw_ready) {
1983 IWL_WARN(priv, "Exit HW not ready\n");
1984 return -EIO;
1985 }
1986
1987 /* If platform's RF_KILL switch is NOT set to KILL */
1988 if (iwl_read32(priv,
1989 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
1990 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1991 else
1992 set_bit(STATUS_RF_KILL_HW, &priv->status);
1993
1994 if (iwl_legacy_is_rfkill(priv)) {
1995 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
1996
1997 iwl_legacy_enable_interrupts(priv);
1998 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
1999 return 0;
2000 }
2001
2002 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2003
2004 /* must be initialised before iwl_hw_nic_init */
2005 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2006
2007 ret = iwl4965_hw_nic_init(priv);
2008 if (ret) {
2009 IWL_ERR(priv, "Unable to init nic\n");
2010 return ret;
2011 }
2012
2013 /* make sure rfkill handshake bits are cleared */
2014 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2015 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2016 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2017
2018 /* clear (again), then enable host interrupts */
2019 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2020 iwl_legacy_enable_interrupts(priv);
2021
2022 /* really make sure rfkill handshake bits are cleared */
2023 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2024 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2025
2026 /* Copy original ucode data image from disk into backup cache.
2027 * This will be used to initialize the on-board processor's
2028 * data SRAM for a clean start when the runtime program first loads. */
2029 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2030 priv->ucode_data.len);
2031
2032 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2033
2034 /* load bootstrap state machine,
2035 * load bootstrap program into processor's memory,
2036 * prepare to load the "initialize" uCode */
2037 ret = priv->cfg->ops->lib->load_ucode(priv);
2038
2039 if (ret) {
2040 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2041 ret);
2042 continue;
2043 }
2044
2045 /* start card; "initialize" will load runtime ucode */
2046 iwl4965_nic_start(priv);
2047
2048 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2049
2050 return 0;
2051 }
2052
2053 set_bit(STATUS_EXIT_PENDING, &priv->status);
2054 __iwl4965_down(priv);
2055 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2056
2057 /* tried to restart and config the device for as long as our
2058 * patience could withstand */
2059 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2060 return -EIO;
2061}
2062
2063
2064/*****************************************************************************
2065 *
2066 * Workqueue callbacks
2067 *
2068 *****************************************************************************/
2069
2070static void iwl4965_bg_init_alive_start(struct work_struct *data)
2071{
2072 struct iwl_priv *priv =
2073 container_of(data, struct iwl_priv, init_alive_start.work);
2074
2075 mutex_lock(&priv->mutex);
2076 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2077 goto out;
2078
2079 priv->cfg->ops->lib->init_alive_start(priv);
2080out:
2081 mutex_unlock(&priv->mutex);
2082}
2083
2084static void iwl4965_bg_alive_start(struct work_struct *data)
2085{
2086 struct iwl_priv *priv =
2087 container_of(data, struct iwl_priv, alive_start.work);
2088
2089 mutex_lock(&priv->mutex);
2090 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2091 goto out;
2092
2093 iwl4965_alive_start(priv);
2094out:
2095 mutex_unlock(&priv->mutex);
2096}
2097
2098static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2099{
2100 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2101 run_time_calib_work);
2102
2103 mutex_lock(&priv->mutex);
2104
2105 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2106 test_bit(STATUS_SCANNING, &priv->status)) {
2107 mutex_unlock(&priv->mutex);
2108 return;
2109 }
2110
2111 if (priv->start_calib) {
2112 iwl4965_chain_noise_calibration(priv,
2113 (void *)&priv->_4965.statistics);
2114 iwl4965_sensitivity_calibration(priv,
2115 (void *)&priv->_4965.statistics);
2116 }
2117
2118 mutex_unlock(&priv->mutex);
2119}
2120
2121static void iwl4965_bg_restart(struct work_struct *data)
2122{
2123 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2124
2125 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2126 return;
2127
2128 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2129 struct iwl_rxon_context *ctx;
2130
2131 mutex_lock(&priv->mutex);
2132 for_each_context(priv, ctx)
2133 ctx->vif = NULL;
2134 priv->is_open = 0;
2135
2136 __iwl4965_down(priv);
2137
2138 mutex_unlock(&priv->mutex);
2139 iwl4965_cancel_deferred_work(priv);
2140 ieee80211_restart_hw(priv->hw);
2141 } else {
2142 iwl4965_down(priv);
2143
2144 mutex_lock(&priv->mutex);
2145 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2146 mutex_unlock(&priv->mutex);
2147 return;
2148 }
2149
2150 __iwl4965_up(priv);
2151 mutex_unlock(&priv->mutex);
2152 }
2153}
2154
2155static void iwl4965_bg_rx_replenish(struct work_struct *data)
2156{
2157 struct iwl_priv *priv =
2158 container_of(data, struct iwl_priv, rx_replenish);
2159
2160 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2161 return;
2162
2163 mutex_lock(&priv->mutex);
2164 iwl4965_rx_replenish(priv);
2165 mutex_unlock(&priv->mutex);
2166}
2167
2168/*****************************************************************************
2169 *
2170 * mac80211 entry point functions
2171 *
2172 *****************************************************************************/
2173
2174#define UCODE_READY_TIMEOUT (4 * HZ)
2175
2176/*
2177 * Not a mac80211 entry point function, but it fits in with all the
2178 * other mac80211 functions grouped here.
2179 */
2180static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2181 u32 max_probe_length)
2182{
2183 int ret;
2184 struct ieee80211_hw *hw = priv->hw;
2185 struct iwl_rxon_context *ctx;
2186
2187 hw->rate_control_algorithm = "iwl-4965-rs";
2188
2189 /* Tell mac80211 our characteristics */
2190 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2191 IEEE80211_HW_AMPDU_AGGREGATION |
2192 IEEE80211_HW_NEED_DTIM_PERIOD |
2193 IEEE80211_HW_SPECTRUM_MGMT |
2194 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2195
2196 if (priv->cfg->sku & IWL_SKU_N)
2197 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2198 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2199
2200 hw->sta_data_size = sizeof(struct iwl_station_priv);
2201 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2202
2203 for_each_context(priv, ctx) {
2204 hw->wiphy->interface_modes |= ctx->interface_modes;
2205 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2206 }
2207
2208 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2209 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2210
2211 /*
2212 * For now, disable PS by default because it affects
2213 * RX performance significantly.
2214 */
2215 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2216
2217 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2218 /* we create the 802.11 header and a zero-length SSID element */
2219 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2220
2221 /* Default value; 4 EDCA QOS priorities */
2222 hw->queues = 4;
2223
2224 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2225
2226 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2227 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2228 &priv->bands[IEEE80211_BAND_2GHZ];
2229 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2230 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2231 &priv->bands[IEEE80211_BAND_5GHZ];
2232
2233 iwl_legacy_leds_init(priv);
2234
2235 ret = ieee80211_register_hw(priv->hw);
2236 if (ret) {
2237 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2238 return ret;
2239 }
2240 priv->mac80211_registered = 1;
2241
2242 return 0;
2243}
2244
2245
2246int iwl4965_mac_start(struct ieee80211_hw *hw)
2247{
2248 struct iwl_priv *priv = hw->priv;
2249 int ret;
2250
2251 IWL_DEBUG_MAC80211(priv, "enter\n");
2252
2253 /* we should be verifying the device is ready to be opened */
2254 mutex_lock(&priv->mutex);
2255 ret = __iwl4965_up(priv);
2256 mutex_unlock(&priv->mutex);
2257
2258 if (ret)
2259 return ret;
2260
2261 if (iwl_legacy_is_rfkill(priv))
2262 goto out;
2263
2264 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2265
2266 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2267 * mac80211 will not be run successfully. */
2268 ret = wait_event_timeout(priv->wait_command_queue,
2269 test_bit(STATUS_READY, &priv->status),
2270 UCODE_READY_TIMEOUT);
2271 if (!ret) {
2272 if (!test_bit(STATUS_READY, &priv->status)) {
2273 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2274 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2275 return -ETIMEDOUT;
2276 }
2277 }
2278
2279 iwl4965_led_enable(priv);
2280
2281out:
2282 priv->is_open = 1;
2283 IWL_DEBUG_MAC80211(priv, "leave\n");
2284 return 0;
2285}
2286
2287void iwl4965_mac_stop(struct ieee80211_hw *hw)
2288{
2289 struct iwl_priv *priv = hw->priv;
2290
2291 IWL_DEBUG_MAC80211(priv, "enter\n");
2292
2293 if (!priv->is_open)
2294 return;
2295
2296 priv->is_open = 0;
2297
2298 iwl4965_down(priv);
2299
2300 flush_workqueue(priv->workqueue);
2301
2302 /* User space software may expect getting rfkill changes
2303 * even if interface is down */
2304 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2305 iwl_legacy_enable_rfkill_int(priv);
2306
2307 IWL_DEBUG_MAC80211(priv, "leave\n");
2308}
2309
2310void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2311{
2312 struct iwl_priv *priv = hw->priv;
2313
2314 IWL_DEBUG_MACDUMP(priv, "enter\n");
2315
2316 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2317 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2318
2319 if (iwl4965_tx_skb(priv, skb))
2320 dev_kfree_skb_any(skb);
2321
2322 IWL_DEBUG_MACDUMP(priv, "leave\n");
2323}
2324
2325void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2326 struct ieee80211_vif *vif,
2327 struct ieee80211_key_conf *keyconf,
2328 struct ieee80211_sta *sta,
2329 u32 iv32, u16 *phase1key)
2330{
2331 struct iwl_priv *priv = hw->priv;
2332 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2333
2334 IWL_DEBUG_MAC80211(priv, "enter\n");
2335
2336 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2337 iv32, phase1key);
2338
2339 IWL_DEBUG_MAC80211(priv, "leave\n");
2340}
2341
2342int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2343 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2344 struct ieee80211_key_conf *key)
2345{
2346 struct iwl_priv *priv = hw->priv;
2347 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2348 struct iwl_rxon_context *ctx = vif_priv->ctx;
2349 int ret;
2350 u8 sta_id;
2351 bool is_default_wep_key = false;
2352
2353 IWL_DEBUG_MAC80211(priv, "enter\n");
2354
2355 if (priv->cfg->mod_params->sw_crypto) {
2356 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2357 return -EOPNOTSUPP;
2358 }
2359
2360 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2361 if (sta_id == IWL_INVALID_STATION)
2362 return -EINVAL;
2363
2364 mutex_lock(&priv->mutex);
2365 iwl_legacy_scan_cancel_timeout(priv, 100);
2366
2367 /*
2368 * If we are getting WEP group key and we didn't receive any key mapping
2369 * so far, we are in legacy wep mode (group key only), otherwise we are
2370 * in 1X mode.
2371 * In legacy wep mode, we use another host command to the uCode.
2372 */
2373 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2374 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2375 !sta) {
2376 if (cmd == SET_KEY)
2377 is_default_wep_key = !ctx->key_mapping_keys;
2378 else
2379 is_default_wep_key =
2380 (key->hw_key_idx == HW_KEY_DEFAULT);
2381 }
2382
2383 switch (cmd) {
2384 case SET_KEY:
2385 if (is_default_wep_key)
2386 ret = iwl4965_set_default_wep_key(priv,
2387 vif_priv->ctx, key);
2388 else
2389 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2390 key, sta_id);
2391
2392 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2393 break;
2394 case DISABLE_KEY:
2395 if (is_default_wep_key)
2396 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2397 else
2398 ret = iwl4965_remove_dynamic_key(priv, ctx,
2399 key, sta_id);
2400
2401 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2402 break;
2403 default:
2404 ret = -EINVAL;
2405 }
2406
2407 mutex_unlock(&priv->mutex);
2408 IWL_DEBUG_MAC80211(priv, "leave\n");
2409
2410 return ret;
2411}
2412
2413int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2414 struct ieee80211_vif *vif,
2415 enum ieee80211_ampdu_mlme_action action,
2416 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2417 u8 buf_size)
2418{
2419 struct iwl_priv *priv = hw->priv;
2420 int ret = -EINVAL;
2421
2422 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2423 sta->addr, tid);
2424
2425 if (!(priv->cfg->sku & IWL_SKU_N))
2426 return -EACCES;
2427
2428 mutex_lock(&priv->mutex);
2429
2430 switch (action) {
2431 case IEEE80211_AMPDU_RX_START:
2432 IWL_DEBUG_HT(priv, "start Rx\n");
2433 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2434 break;
2435 case IEEE80211_AMPDU_RX_STOP:
2436 IWL_DEBUG_HT(priv, "stop Rx\n");
2437 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2438 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2439 ret = 0;
2440 break;
2441 case IEEE80211_AMPDU_TX_START:
2442 IWL_DEBUG_HT(priv, "start Tx\n");
2443 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2444 break;
2445 case IEEE80211_AMPDU_TX_STOP:
2446 IWL_DEBUG_HT(priv, "stop Tx\n");
2447 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2448 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2449 ret = 0;
2450 break;
2451 case IEEE80211_AMPDU_TX_OPERATIONAL:
2452 ret = 0;
2453 break;
2454 }
2455 mutex_unlock(&priv->mutex);
2456
2457 return ret;
2458}
2459
2460int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2461 struct ieee80211_vif *vif,
2462 struct ieee80211_sta *sta)
2463{
2464 struct iwl_priv *priv = hw->priv;
2465 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2466 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2467 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2468 int ret;
2469 u8 sta_id;
2470
2471 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2472 sta->addr);
2473 mutex_lock(&priv->mutex);
2474 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2475 sta->addr);
2476 sta_priv->common.sta_id = IWL_INVALID_STATION;
2477
2478 atomic_set(&sta_priv->pending_frames, 0);
2479
2480 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2481 is_ap, sta, &sta_id);
2482 if (ret) {
2483 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2484 sta->addr, ret);
2485 /* Should we return success if return code is EEXIST ? */
2486 mutex_unlock(&priv->mutex);
2487 return ret;
2488 }
2489
2490 sta_priv->common.sta_id = sta_id;
2491
2492 /* Initialize rate scaling */
2493 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2494 sta->addr);
2495 iwl4965_rs_rate_init(priv, sta, sta_id);
2496 mutex_unlock(&priv->mutex);
2497
2498 return 0;
2499}
2500
2501void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2502 struct ieee80211_channel_switch *ch_switch)
2503{
2504 struct iwl_priv *priv = hw->priv;
2505 const struct iwl_channel_info *ch_info;
2506 struct ieee80211_conf *conf = &hw->conf;
2507 struct ieee80211_channel *channel = ch_switch->channel;
2508 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2509
2510 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2511 u16 ch;
2512
2513 IWL_DEBUG_MAC80211(priv, "enter\n");
2514
2515 mutex_lock(&priv->mutex);
2516
2517 if (iwl_legacy_is_rfkill(priv))
2518 goto out;
2519
2520 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2521 test_bit(STATUS_SCANNING, &priv->status) ||
2522 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
2523 goto out;
2524
2525 if (!iwl_legacy_is_associated_ctx(ctx))
2526 goto out;
2527
2528 if (!priv->cfg->ops->lib->set_channel_switch)
2529 goto out;
2530
2531 ch = channel->hw_value;
2532 if (le16_to_cpu(ctx->active.channel) == ch)
2533 goto out;
2534
2535 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2536 if (!iwl_legacy_is_channel_valid(ch_info)) {
2537 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2538 goto out;
2539 }
2540
2541 spin_lock_irq(&priv->lock);
2542
2543 priv->current_ht_config.smps = conf->smps_mode;
2544
2545 /* Configure HT40 channels */
2546 ctx->ht.enabled = conf_is_ht(conf);
2547 if (ctx->ht.enabled) {
2548 if (conf_is_ht40_minus(conf)) {
2549 ctx->ht.extension_chan_offset =
2550 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2551 ctx->ht.is_40mhz = true;
2552 } else if (conf_is_ht40_plus(conf)) {
2553 ctx->ht.extension_chan_offset =
2554 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2555 ctx->ht.is_40mhz = true;
2556 } else {
2557 ctx->ht.extension_chan_offset =
2558 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2559 ctx->ht.is_40mhz = false;
2560 }
2561 } else
2562 ctx->ht.is_40mhz = false;
2563
2564 if ((le16_to_cpu(ctx->staging.channel) != ch))
2565 ctx->staging.flags = 0;
2566
2567 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2568 iwl_legacy_set_rxon_ht(priv, ht_conf);
2569 iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
2570
2571 spin_unlock_irq(&priv->lock);
2572
2573 iwl_legacy_set_rate(priv);
2574 /*
2575 * at this point, staging_rxon has the
2576 * configuration for channel switch
2577 */
2578 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2579 priv->switch_channel = cpu_to_le16(ch);
2580 if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
2581 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2582 priv->switch_channel = 0;
2583 ieee80211_chswitch_done(ctx->vif, false);
2584 }
2585
2586out:
2587 mutex_unlock(&priv->mutex);
2588 IWL_DEBUG_MAC80211(priv, "leave\n");
2589}
2590
2591void iwl4965_configure_filter(struct ieee80211_hw *hw,
2592 unsigned int changed_flags,
2593 unsigned int *total_flags,
2594 u64 multicast)
2595{
2596 struct iwl_priv *priv = hw->priv;
2597 __le32 filter_or = 0, filter_nand = 0;
2598 struct iwl_rxon_context *ctx;
2599
2600#define CHK(test, flag) do { \
2601 if (*total_flags & (test)) \
2602 filter_or |= (flag); \
2603 else \
2604 filter_nand |= (flag); \
2605 } while (0)
2606
2607 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2608 changed_flags, *total_flags);
2609
2610 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2611 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2612 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2613 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2614
2615#undef CHK
2616
2617 mutex_lock(&priv->mutex);
2618
2619 for_each_context(priv, ctx) {
2620 ctx->staging.filter_flags &= ~filter_nand;
2621 ctx->staging.filter_flags |= filter_or;
2622
2623 /*
2624 * Not committing directly because hardware can perform a scan,
2625 * but we'll eventually commit the filter flags change anyway.
2626 */
2627 }
2628
2629 mutex_unlock(&priv->mutex);
2630
2631 /*
2632 * Receiving all multicast frames is always enabled by the
2633 * default flags setup in iwl_legacy_connection_init_rx_config()
2634 * since we currently do not support programming multicast
2635 * filters into the device.
2636 */
2637 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2638 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2639}
2640
2641/*****************************************************************************
2642 *
2643 * driver setup and teardown
2644 *
2645 *****************************************************************************/
2646
2647static void iwl4965_bg_txpower_work(struct work_struct *work)
2648{
2649 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2650 txpower_work);
2651
2652 mutex_lock(&priv->mutex);
2653
2654 /* If a scan happened to start before we got here
2655 * then just return; the statistics notification will
2656 * kick off another scheduled work to compensate for
2657 * any temperature delta we missed here. */
2658 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2659 test_bit(STATUS_SCANNING, &priv->status))
2660 goto out;
2661
2662 /* Regardless of if we are associated, we must reconfigure the
2663 * TX power since frames can be sent on non-radar channels while
2664 * not associated */
2665 priv->cfg->ops->lib->send_tx_power(priv);
2666
2667 /* Update last_temperature to keep is_calib_needed from running
2668 * when it isn't needed... */
2669 priv->last_temperature = priv->temperature;
2670out:
2671 mutex_unlock(&priv->mutex);
2672}
2673
2674static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
2675{
2676 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
2677
2678 init_waitqueue_head(&priv->wait_command_queue);
2679
2680 INIT_WORK(&priv->restart, iwl4965_bg_restart);
2681 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
2682 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
2683 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
2684 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
2685
2686 iwl_legacy_setup_scan_deferred_work(priv);
2687
2688 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
2689
2690 init_timer(&priv->statistics_periodic);
2691 priv->statistics_periodic.data = (unsigned long)priv;
2692 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
2693
2694 init_timer(&priv->watchdog);
2695 priv->watchdog.data = (unsigned long)priv;
2696 priv->watchdog.function = iwl_legacy_bg_watchdog;
2697
2698 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
2699 iwl4965_irq_tasklet, (unsigned long)priv);
2700}
2701
2702static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2703{
2704 cancel_work_sync(&priv->txpower_work);
2705 cancel_delayed_work_sync(&priv->init_alive_start);
2706 cancel_delayed_work(&priv->alive_start);
2707 cancel_work_sync(&priv->run_time_calib_work);
2708
2709 iwl_legacy_cancel_scan_deferred_work(priv);
2710
2711 del_timer_sync(&priv->statistics_periodic);
2712}
2713
2714static void iwl4965_init_hw_rates(struct iwl_priv *priv,
2715 struct ieee80211_rate *rates)
2716{
2717 int i;
2718
2719 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
2720 rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
2721 rates[i].hw_value = i; /* Rate scaling will work on indexes */
2722 rates[i].hw_value_short = i;
2723 rates[i].flags = 0;
2724 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
2725 /*
2726 * If CCK != 1M then set short preamble rate flag.
2727 */
2728 rates[i].flags |=
2729 (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
2730 0 : IEEE80211_RATE_SHORT_PREAMBLE;
2731 }
2732 }
2733}
2734/*
2735 * Acquire priv->lock before calling this function !
2736 */
2737void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
2738{
2739 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
2740 (index & 0xff) | (txq_id << 8));
2741 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
2742}
2743
2744void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
2745 struct iwl_tx_queue *txq,
2746 int tx_fifo_id, int scd_retry)
2747{
2748 int txq_id = txq->q.id;
2749
2750 /* Find out whether to activate Tx queue */
2751 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
2752
2753 /* Set up and activate */
2754 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
2755 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2756 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
2757 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
2758 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
2759 IWL49_SCD_QUEUE_STTS_REG_MSK);
2760
2761 txq->sched_retry = scd_retry;
2762
2763 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
2764 active ? "Activate" : "Deactivate",
2765 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
2766}
2767
2768
2769static int iwl4965_init_drv(struct iwl_priv *priv)
2770{
2771 int ret;
2772
2773 spin_lock_init(&priv->sta_lock);
2774 spin_lock_init(&priv->hcmd_lock);
2775
2776 INIT_LIST_HEAD(&priv->free_frames);
2777
2778 mutex_init(&priv->mutex);
2779
2780 priv->ieee_channels = NULL;
2781 priv->ieee_rates = NULL;
2782 priv->band = IEEE80211_BAND_2GHZ;
2783
2784 priv->iw_mode = NL80211_IFTYPE_STATION;
2785 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
2786 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
2787
2788 /* initialize force reset */
2789 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
2790
2791 /* Choose which receivers/antennas to use */
2792 if (priv->cfg->ops->hcmd->set_rxon_chain)
2793 priv->cfg->ops->hcmd->set_rxon_chain(priv,
2794 &priv->contexts[IWL_RXON_CTX_BSS]);
2795
2796 iwl_legacy_init_scan_params(priv);
2797
2798 ret = iwl_legacy_init_channel_map(priv);
2799 if (ret) {
2800 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
2801 goto err;
2802 }
2803
2804 ret = iwl_legacy_init_geos(priv);
2805 if (ret) {
2806 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
2807 goto err_free_channel_map;
2808 }
2809 iwl4965_init_hw_rates(priv, priv->ieee_rates);
2810
2811 return 0;
2812
2813err_free_channel_map:
2814 iwl_legacy_free_channel_map(priv);
2815err:
2816 return ret;
2817}
2818
2819static void iwl4965_uninit_drv(struct iwl_priv *priv)
2820{
2821 iwl4965_calib_free_results(priv);
2822 iwl_legacy_free_geos(priv);
2823 iwl_legacy_free_channel_map(priv);
2824 kfree(priv->scan_cmd);
2825}
2826
2827static void iwl4965_hw_detect(struct iwl_priv *priv)
2828{
2829 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
2830 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
2831 priv->rev_id = priv->pci_dev->revision;
2832 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
2833}
2834
2835static int iwl4965_set_hw_params(struct iwl_priv *priv)
2836{
2837 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2838 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2839 if (priv->cfg->mod_params->amsdu_size_8K)
2840 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
2841 else
2842 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
2843
2844 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
2845
2846 if (priv->cfg->mod_params->disable_11n)
2847 priv->cfg->sku &= ~IWL_SKU_N;
2848
2849 /* Device-specific setup */
2850 return priv->cfg->ops->lib->set_hw_params(priv);
2851}
2852
2853static const u8 iwl4965_bss_ac_to_fifo[] = {
2854 IWL_TX_FIFO_VO,
2855 IWL_TX_FIFO_VI,
2856 IWL_TX_FIFO_BE,
2857 IWL_TX_FIFO_BK,
2858};
2859
2860static const u8 iwl4965_bss_ac_to_queue[] = {
2861 0, 1, 2, 3,
2862};
2863
2864static int
2865iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2866{
2867 int err = 0, i;
2868 struct iwl_priv *priv;
2869 struct ieee80211_hw *hw;
2870 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
2871 unsigned long flags;
2872 u16 pci_cmd;
2873
2874 /************************
2875 * 1. Allocating HW data
2876 ************************/
2877
2878 hw = iwl_legacy_alloc_all(cfg);
2879 if (!hw) {
2880 err = -ENOMEM;
2881 goto out;
2882 }
2883 priv = hw->priv;
2884 /* At this point both hw and priv are allocated. */
2885
2886 /*
2887 * The default context is always valid,
2888 * more may be discovered when firmware
2889 * is loaded.
2890 */
2891 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
2892
2893 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
2894 priv->contexts[i].ctxid = i;
2895
2896 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
2897 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
2898 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
2899 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
2900 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
2901 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
2902 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
2903 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
2904 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
2905 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
2906 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
2907 BIT(NL80211_IFTYPE_ADHOC);
2908 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
2909 BIT(NL80211_IFTYPE_STATION);
2910 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
2911 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
2912 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
2913 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
2914
2915 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
2916
2917 SET_IEEE80211_DEV(hw, &pdev->dev);
2918
2919 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
2920 priv->cfg = cfg;
2921 priv->pci_dev = pdev;
2922 priv->inta_mask = CSR_INI_SET_MASK;
2923
2924 if (iwl_legacy_alloc_traffic_mem(priv))
2925 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
2926
2927 /**************************
2928 * 2. Initializing PCI bus
2929 **************************/
2930 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2931 PCIE_LINK_STATE_CLKPM);
2932
2933 if (pci_enable_device(pdev)) {
2934 err = -ENODEV;
2935 goto out_ieee80211_free_hw;
2936 }
2937
2938 pci_set_master(pdev);
2939
2940 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2941 if (!err)
2942 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2943 if (err) {
2944 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2945 if (!err)
2946 err = pci_set_consistent_dma_mask(pdev,
2947 DMA_BIT_MASK(32));
2948 /* both attempts failed: */
2949 if (err) {
2950 IWL_WARN(priv, "No suitable DMA available.\n");
2951 goto out_pci_disable_device;
2952 }
2953 }
2954
2955 err = pci_request_regions(pdev, DRV_NAME);
2956 if (err)
2957 goto out_pci_disable_device;
2958
2959 pci_set_drvdata(pdev, priv);
2960
2961
2962 /***********************
2963 * 3. Read REV register
2964 ***********************/
2965 priv->hw_base = pci_iomap(pdev, 0, 0);
2966 if (!priv->hw_base) {
2967 err = -ENODEV;
2968 goto out_pci_release_regions;
2969 }
2970
2971 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
2972 (unsigned long long) pci_resource_len(pdev, 0));
2973 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
2974
2975 /* these spin locks will be used in apm_ops.init and EEPROM access
2976 * we should init now
2977 */
2978 spin_lock_init(&priv->reg_lock);
2979 spin_lock_init(&priv->lock);
2980
2981 /*
2982 * stop and reset the on-board processor just in case it is in a
2983 * strange state ... like being left stranded by a primary kernel
2984 * and this is now the kdump kernel trying to start up
2985 */
2986 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2987
2988 iwl4965_hw_detect(priv);
2989 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
2990 priv->cfg->name, priv->hw_rev);
2991
2992 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2993 * PCI Tx retries from interfering with C3 CPU state */
2994 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2995
2996 iwl4965_prepare_card_hw(priv);
2997 if (!priv->hw_ready) {
2998 IWL_WARN(priv, "Failed, HW not ready\n");
2999 goto out_iounmap;
3000 }
3001
3002 /*****************
3003 * 4. Read EEPROM
3004 *****************/
3005 /* Read the EEPROM */
3006 err = iwl_legacy_eeprom_init(priv);
3007 if (err) {
3008 IWL_ERR(priv, "Unable to init EEPROM\n");
3009 goto out_iounmap;
3010 }
3011 err = iwl4965_eeprom_check_version(priv);
3012 if (err)
3013 goto out_free_eeprom;
3014
3015 if (err)
3016 goto out_free_eeprom;
3017
3018 /* extract MAC Address */
3019 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3020 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3021 priv->hw->wiphy->addresses = priv->addresses;
3022 priv->hw->wiphy->n_addresses = 1;
3023
3024 /************************
3025 * 5. Setup HW constants
3026 ************************/
3027 if (iwl4965_set_hw_params(priv)) {
3028 IWL_ERR(priv, "failed to set hw parameters\n");
3029 goto out_free_eeprom;
3030 }
3031
3032 /*******************
3033 * 6. Setup priv
3034 *******************/
3035
3036 err = iwl4965_init_drv(priv);
3037 if (err)
3038 goto out_free_eeprom;
3039 /* At this point both hw and priv are initialized. */
3040
3041 /********************
3042 * 7. Setup services
3043 ********************/
3044 spin_lock_irqsave(&priv->lock, flags);
3045 iwl_legacy_disable_interrupts(priv);
3046 spin_unlock_irqrestore(&priv->lock, flags);
3047
3048 pci_enable_msi(priv->pci_dev);
3049
3050 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3051 IRQF_SHARED, DRV_NAME, priv);
3052 if (err) {
3053 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3054 goto out_disable_msi;
3055 }
3056
3057 iwl4965_setup_deferred_work(priv);
3058 iwl4965_setup_rx_handlers(priv);
3059
3060 /*********************************************
3061 * 8. Enable interrupts and read RFKILL state
3062 *********************************************/
3063
3064 /* enable rfkill interrupt: hw bug w/a */
3065 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3066 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3067 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3068 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3069 }
3070
3071 iwl_legacy_enable_rfkill_int(priv);
3072
3073 /* If platform's RF_KILL switch is NOT set to KILL */
3074 if (iwl_read32(priv, CSR_GP_CNTRL) &
3075 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3076 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3077 else
3078 set_bit(STATUS_RF_KILL_HW, &priv->status);
3079
3080 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3081 test_bit(STATUS_RF_KILL_HW, &priv->status));
3082
3083 iwl_legacy_power_initialize(priv);
3084
3085 init_completion(&priv->_4965.firmware_loading_complete);
3086
3087 err = iwl4965_request_firmware(priv, true);
3088 if (err)
3089 goto out_destroy_workqueue;
3090
3091 return 0;
3092
3093 out_destroy_workqueue:
3094 destroy_workqueue(priv->workqueue);
3095 priv->workqueue = NULL;
3096 free_irq(priv->pci_dev->irq, priv);
3097 out_disable_msi:
3098 pci_disable_msi(priv->pci_dev);
3099 iwl4965_uninit_drv(priv);
3100 out_free_eeprom:
3101 iwl_legacy_eeprom_free(priv);
3102 out_iounmap:
3103 pci_iounmap(pdev, priv->hw_base);
3104 out_pci_release_regions:
3105 pci_set_drvdata(pdev, NULL);
3106 pci_release_regions(pdev);
3107 out_pci_disable_device:
3108 pci_disable_device(pdev);
3109 out_ieee80211_free_hw:
3110 iwl_legacy_free_traffic_mem(priv);
3111 ieee80211_free_hw(priv->hw);
3112 out:
3113 return err;
3114}
3115
3116static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3117{
3118 struct iwl_priv *priv = pci_get_drvdata(pdev);
3119 unsigned long flags;
3120
3121 if (!priv)
3122 return;
3123
3124 wait_for_completion(&priv->_4965.firmware_loading_complete);
3125
3126 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3127
3128 iwl_legacy_dbgfs_unregister(priv);
3129 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3130
3131 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3132 * to be called and iwl4965_down since we are removing the device
3133 * we need to set STATUS_EXIT_PENDING bit.
3134 */
3135 set_bit(STATUS_EXIT_PENDING, &priv->status);
3136
3137 iwl_legacy_leds_exit(priv);
3138
3139 if (priv->mac80211_registered) {
3140 ieee80211_unregister_hw(priv->hw);
3141 priv->mac80211_registered = 0;
3142 } else {
3143 iwl4965_down(priv);
3144 }
3145
3146 /*
3147 * Make sure device is reset to low power before unloading driver.
3148 * This may be redundant with iwl4965_down(), but there are paths to
3149 * run iwl4965_down() without calling apm_ops.stop(), and there are
3150 * paths to avoid running iwl4965_down() at all before leaving driver.
3151 * This (inexpensive) call *makes sure* device is reset.
3152 */
3153 iwl_legacy_apm_stop(priv);
3154
3155 /* make sure we flush any pending irq or
3156 * tasklet for the driver
3157 */
3158 spin_lock_irqsave(&priv->lock, flags);
3159 iwl_legacy_disable_interrupts(priv);
3160 spin_unlock_irqrestore(&priv->lock, flags);
3161
3162 iwl4965_synchronize_irq(priv);
3163
3164 iwl4965_dealloc_ucode_pci(priv);
3165
3166 if (priv->rxq.bd)
3167 iwl4965_rx_queue_free(priv, &priv->rxq);
3168 iwl4965_hw_txq_ctx_free(priv);
3169
3170 iwl_legacy_eeprom_free(priv);
3171
3172
3173 /*netif_stop_queue(dev); */
3174 flush_workqueue(priv->workqueue);
3175
3176 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3177 * priv->workqueue... so we can't take down the workqueue
3178 * until now... */
3179 destroy_workqueue(priv->workqueue);
3180 priv->workqueue = NULL;
3181 iwl_legacy_free_traffic_mem(priv);
3182
3183 free_irq(priv->pci_dev->irq, priv);
3184 pci_disable_msi(priv->pci_dev);
3185 pci_iounmap(pdev, priv->hw_base);
3186 pci_release_regions(pdev);
3187 pci_disable_device(pdev);
3188 pci_set_drvdata(pdev, NULL);
3189
3190 iwl4965_uninit_drv(priv);
3191
3192 dev_kfree_skb(priv->beacon_skb);
3193
3194 ieee80211_free_hw(priv->hw);
3195}
3196
3197/*
3198 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3199 * must be called under priv->lock and mac access
3200 */
3201void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3202{
3203 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3204}
3205
3206/*****************************************************************************
3207 *
3208 * driver and module entry point
3209 *
3210 *****************************************************************************/
3211
3212/* Hardware specific file defines the PCI IDs table for that hardware module */
3213static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3214#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3215 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3216 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3217#endif /* CONFIG_IWL4965 */
3218
3219 {0}
3220};
3221MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3222
3223static struct pci_driver iwl4965_driver = {
3224 .name = DRV_NAME,
3225 .id_table = iwl4965_hw_card_ids,
3226 .probe = iwl4965_pci_probe,
3227 .remove = __devexit_p(iwl4965_pci_remove),
3228 .driver.pm = IWL_LEGACY_PM_OPS,
3229};
3230
3231static int __init iwl4965_init(void)
3232{
3233
3234 int ret;
3235 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3236 pr_info(DRV_COPYRIGHT "\n");
3237
3238 ret = iwl4965_rate_control_register();
3239 if (ret) {
3240 pr_err("Unable to register rate control algorithm: %d\n", ret);
3241 return ret;
3242 }
3243
3244 ret = pci_register_driver(&iwl4965_driver);
3245 if (ret) {
3246 pr_err("Unable to initialize PCI module\n");
3247 goto error_register;
3248 }
3249
3250 return ret;
3251
3252error_register:
3253 iwl4965_rate_control_unregister();
3254 return ret;
3255}
3256
3257static void __exit iwl4965_exit(void)
3258{
3259 pci_unregister_driver(&iwl4965_driver);
3260 iwl4965_rate_control_unregister();
3261}
3262
3263module_exit(iwl4965_exit);
3264module_init(iwl4965_init);
3265
3266#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3267module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
3268MODULE_PARM_DESC(debug, "debug output mask");
3269#endif
3270
3271module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3272MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3273module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3274MODULE_PARM_DESC(queues_num, "number of hw queues.");
3275module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3276MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3277module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3278 int, S_IRUGO);
3279MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3280module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3281MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h
index 30a493003ab0..ffec4b4a248a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-prph.h
+++ b/drivers/net/wireless/iwlegacy/prph.h
@@ -60,8 +60,8 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/ 61 *****************************************************************************/
62 62
63#ifndef __iwl_legacy_prph_h__ 63#ifndef __il_prph_h__
64#define __iwl_legacy_prph_h__ 64#define __il_prph_h__
65 65
66/* 66/*
67 * Registers in this file are internal, not PCI bus memory mapped. 67 * Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) 91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) 92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) 93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ 94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) 95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ 96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) 97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98 98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
@@ -120,13 +120,13 @@
120 * 120 *
121 * 1) Initialization -- performs hardware calibration and sets up some 121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification 122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work. 123 * (struct il_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program. 124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the 125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver. 126 * NIC after loading the driver.
127 * 127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This 128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it 129 * notifies host via "alive" notification (struct il_alive_resp) that it
130 * is ready to be used. 130 * is ready to be used.
131 * 131 *
132 * When initializing the NIC, the host driver does the following procedure: 132 * When initializing the NIC, the host driver does the following procedure:
@@ -189,7 +189,7 @@
189 * procedure. 189 * procedure.
190 * 190 *
191 * This save/restore method is mostly for autonomous power management during 191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and 192 * normal operation (result of C_POWER_TBL). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode, 193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory). 194 * allowing total shutdown (including BSM memory).
195 * 195 *
@@ -202,19 +202,19 @@
202 */ 202 */
203 203
204/* BSM bit fields */ 204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ 205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ 206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ 207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208 208
209/* BSM addresses */ 209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400) 210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800) 211#define BSM_END (PRPH_BASE + 0x3800)
212 212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ 213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ 214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ 215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ 216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ 217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218 218
219/* 219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore. 220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
@@ -231,8 +231,7 @@
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) 231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */ 232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) 233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */ 234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236 235
237/* 3945 Tx scheduler registers */ 236/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00) 237#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
@@ -255,7 +254,7 @@
255 * but one DMA channel may take input from several queues. 254 * but one DMA channel may take input from several queues.
256 * 255 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows 256 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c): 257 * (cf. default_queue_to_tx_fifo in 4965.c):
259 * 258 *
260 * 0 -- EDCA BK (background) frames, lowest priority 259 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority 260 * 1 -- EDCA BE (best effort) frames, normal priority
@@ -274,20 +273,20 @@
274 * The driver sets up each queue to work in one of two modes: 273 * The driver sets up each queue to work in one of two modes:
275 * 274 *
276 * 1) Scheduler-Ack, in which the scheduler automatically supports a 275 * 1) Scheduler-Ack, in which the scheduler automatically supports a
277 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue 276 * block-ack (BA) win of up to 64 TFDs. In this mode, each queue
278 * contains TFDs for a unique combination of Recipient Address (RA) 277 * contains TFDs for a unique combination of Recipient Address (RA)
279 * and Traffic Identifier (TID), that is, traffic of a given 278 * and Traffic Identifier (TID), that is, traffic of a given
280 * Quality-Of-Service (QOS) priority, destined for a single station. 279 * Quality-Of-Service (QOS) priority, destined for a single station.
281 * 280 *
282 * In scheduler-ack mode, the scheduler keeps track of the Tx status of 281 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
283 * each frame within the BA window, including whether it's been transmitted, 282 * each frame within the BA win, including whether it's been transmitted,
284 * and whether it's been acknowledged by the receiving station. The device 283 * and whether it's been acknowledged by the receiving station. The device
285 * automatically processes block-acks received from the receiving STA, 284 * automatically processes block-acks received from the receiving STA,
286 * and reschedules un-acked frames to be retransmitted (successful 285 * and reschedules un-acked frames to be retransmitted (successful
287 * Tx completion may end up being out-of-order). 286 * Tx completion may end up being out-of-order).
288 * 287 *
289 * The driver must maintain the queue's Byte Count table in host DRAM 288 * The driver must maintain the queue's Byte Count table in host DRAM
290 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. 289 * (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
291 * This mode does not support fragmentation. 290 * This mode does not support fragmentation.
292 * 291 *
293 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. 292 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -316,34 +315,34 @@
316 */ 315 */
317 316
318/** 317/**
319 * Max Tx window size is the max number of contiguous TFDs that the scheduler 318 * Max Tx win size is the max number of contiguous TFDs that the scheduler
320 * can keep track of at one time when creating block-ack chains of frames. 319 * can keep track of at one time when creating block-ack chains of frames.
321 * Note that "64" matches the number of ack bits in a block-ack packet. 320 * Note that "64" matches the number of ack bits in a block-ack packet.
322 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize 321 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
323 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. 322 * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
324 */ 323 */
325#define SCD_WIN_SIZE 64 324#define SCD_WIN_SIZE 64
326#define SCD_FRAME_LIMIT 64 325#define SCD_FRAME_LIMIT 64
327 326
328/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */ 327/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
329#define IWL49_SCD_START_OFFSET 0xa02c00 328#define IL49_SCD_START_OFFSET 0xa02c00
330 329
331/* 330/*
332 * 4965 tells driver SRAM address for internal scheduler structs via this reg. 331 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
333 * Value is valid only after "Alive" response from uCode. 332 * Value is valid only after "Alive" response from uCode.
334 */ 333 */
335#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0) 334#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0)
336 335
337/* 336/*
338 * Driver may need to update queue-empty bits after changing queue's 337 * Driver may need to update queue-empty bits after changing queue's
339 * write and read pointers (indexes) during (re-)initialization (i.e. when 338 * write and read pointers (idxes) during (re-)initialization (i.e. when
340 * scheduler is not tracking what's happening). 339 * scheduler is not tracking what's happening).
341 * Bit fields: 340 * Bit fields:
342 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit 341 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
343 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty 342 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
344 * NOTE: This register is not used by Linux driver. 343 * NOTE: This register is not used by Linux driver.
345 */ 344 */
346#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4) 345#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4)
347 346
348/* 347/*
349 * Physical base address of array of byte count (BC) circular buffers (CBs). 348 * Physical base address of array of byte count (BC) circular buffers (CBs).
@@ -351,11 +350,11 @@
351 * This register points to BC CB for queue 0, must be on 1024-byte boundary. 350 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
352 * Others are spaced by 1024 bytes. 351 * Others are spaced by 1024 bytes.
353 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad. 352 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
354 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff). 353 * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
355 * Bit fields: 354 * Bit fields:
356 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned. 355 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
357 */ 356 */
358#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10) 357#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10)
359 358
360/* 359/*
361 * Enables any/all Tx DMA/FIFO channels. 360 * Enables any/all Tx DMA/FIFO channels.
@@ -364,23 +363,23 @@
364 * Bit fields: 363 * Bit fields:
365 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7 364 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
366 */ 365 */
367#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c) 366#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c)
368/* 367/*
369 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue. 368 * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
370 * Initialized and updated by driver as new TFDs are added to queue. 369 * Initialized and updated by driver as new TFDs are added to queue.
371 * NOTE: If using Block Ack, index must correspond to frame's 370 * NOTE: If using Block Ack, idx must correspond to frame's
372 * Start Sequence Number; index = (SSN & 0xff) 371 * Start Sequence Number; idx = (SSN & 0xff)
373 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses? 372 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
374 */ 373 */
375#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4) 374#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
376 375
377/* 376/*
378 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue. 377 * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
379 * For FIFO mode, index indicates next frame to transmit. 378 * For FIFO mode, idx indicates next frame to transmit.
380 * For Scheduler-ACK mode, index indicates first frame in Tx window. 379 * For Scheduler-ACK mode, idx indicates first frame in Tx win.
381 * Initialized by driver, updated by scheduler. 380 * Initialized by driver, updated by scheduler.
382 */ 381 */
383#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4) 382#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
384 383
385/* 384/*
386 * Select which queues work in chain mode (1) vs. not (0). 385 * Select which queues work in chain mode (1) vs. not (0).
@@ -391,18 +390,18 @@
391 * NOTE: If driver sets up queue for chain mode, it should be also set up 390 * NOTE: If driver sets up queue for chain mode, it should be also set up
392 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x). 391 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
393 */ 392 */
394#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0) 393#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0)
395 394
396/* 395/*
397 * Select which queues interrupt driver when scheduler increments 396 * Select which queues interrupt driver when scheduler increments
398 * a queue's read pointer (index). 397 * a queue's read pointer (idx).
399 * Bit fields: 398 * Bit fields:
400 * 31-16: Reserved 399 * 31-16: Reserved
401 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled 400 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
402 * NOTE: This functionality is apparently a no-op; driver relies on interrupts 401 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
403 * from Rx queue to read Tx command responses and update Tx queues. 402 * from Rx queue to read Tx command responses and update Tx queues.
404 */ 403 */
405#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4) 404#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4)
406 405
407/* 406/*
408 * Queue search status registers. One for each queue. 407 * Queue search status registers. One for each queue.
@@ -414,7 +413,7 @@
414 * Driver should init to "1" for aggregation mode, or "0" otherwise. 413 * Driver should init to "1" for aggregation mode, or "0" otherwise.
415 * 7-6: Driver should init to "0" 414 * 7-6: Driver should init to "0"
416 * 5: Window Size Left; indicates whether scheduler can request 415 * 5: Window Size Left; indicates whether scheduler can request
417 * another TFD, based on window size, etc. Driver should init 416 * another TFD, based on win size, etc. Driver should init
418 * this bit to "1" for aggregation mode, or "0" for non-agg. 417 * this bit to "1" for aggregation mode, or "0" for non-agg.
419 * 4-1: Tx FIFO to use (range 0-7). 418 * 4-1: Tx FIFO to use (range 0-7).
420 * 0: Queue is active (1), not active (0). 419 * 0: Queue is active (1), not active (0).
@@ -423,18 +422,18 @@
423 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled 422 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
424 * via SCD_QUEUECHAIN_SEL. 423 * via SCD_QUEUECHAIN_SEL.
425 */ 424 */
426#define IWL49_SCD_QUEUE_STATUS_BITS(x)\ 425#define IL49_SCD_QUEUE_STATUS_BITS(x)\
427 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4) 426 (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
428 427
429/* Bit field positions */ 428/* Bit field positions */
430#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) 429#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
431#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1) 430#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
432#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5) 431#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
433#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) 432#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
434 433
435/* Write masks */ 434/* Write masks */
436#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) 435#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
437#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) 436#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
438 437
439/** 438/**
440 * 4965 internal SRAM structures for scheduler, shared with driver ... 439 * 4965 internal SRAM structures for scheduler, shared with driver ...
@@ -460,7 +459,7 @@
460 * each queue's entry as follows: 459 * each queue's entry as follows:
461 * 460 *
462 * LS Dword bit fields: 461 * LS Dword bit fields:
463 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64. 462 * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64.
464 * 463 *
465 * MS Dword bit fields: 464 * MS Dword bit fields:
466 * 16-22: Frame limit. Driver should init to 10 (0xa). 465 * 16-22: Frame limit. Driver should init to 10 (0xa).
@@ -470,14 +469,14 @@
470 * Init must be done after driver receives "Alive" response from 4965 uCode, 469 * Init must be done after driver receives "Alive" response from 4965 uCode,
471 * and when setting up queue for aggregation. 470 * and when setting up queue for aggregation.
472 */ 471 */
473#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380 472#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380
474#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ 473#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
475 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) 474 (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
476 475
477#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) 476#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
478#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) 477#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
479#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) 478#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
480#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) 479#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
481 480
482/* 481/*
483 * Tx Status Bitmap 482 * Tx Status Bitmap
@@ -486,7 +485,7 @@
486 * "Alive" notification from uCode. Area is used only by device itself; 485 * "Alive" notification from uCode. Area is used only by device itself;
487 * no other support (besides clearing) is required from driver. 486 * no other support (besides clearing) is required from driver.
488 */ 487 */
489#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 488#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
490 489
491/* 490/*
492 * RAxTID to queue translation mapping. 491 * RAxTID to queue translation mapping.
@@ -494,7 +493,7 @@
494 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be 493 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
495 * for only one combination of receiver address (RA) and traffic ID (TID), i.e. 494 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
496 * one QOS priority level destined for one station (for this wireless link, 495 * one QOS priority level destined for one station (for this wireless link,
497 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit 496 * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit
498 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK 497 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
499 * mode, the device ignores the mapping value. 498 * mode, the device ignores the mapping value.
500 * 499 *
@@ -508,16 +507,16 @@
508 * must read a dword-aligned value from device SRAM, replace the 16-bit map 507 * must read a dword-aligned value from device SRAM, replace the 16-bit map
509 * value of interest, and write the dword value back into device SRAM. 508 * value of interest, and write the dword value back into device SRAM.
510 */ 509 */
511#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500 510#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500
512 511
513/* Find translation table dword to read/write for given queue */ 512/* Find translation table dword to read/write for given queue */
514#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ 513#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
515 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) 514 ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
516 515
517#define IWL_SCD_TXFIFO_POS_TID (0) 516#define IL_SCD_TXFIFO_POS_TID (0)
518#define IWL_SCD_TXFIFO_POS_RA (4) 517#define IL_SCD_TXFIFO_POS_RA (4)
519#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) 518#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
520 519
521/*********************** END TX SCHEDULER *************************************/ 520/*********************** END TX SCHEDULER *************************************/
522 521
523#endif /* __iwl_legacy_prph_h__ */ 522#endif /* __il_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index e6a02e09ee18..a1a95d5f3923 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -790,6 +790,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
790 iwl_rx_reply_tx_agg(priv, tx_resp); 790 iwl_rx_reply_tx_agg(priv, tx_resp);
791 791
792 if (tx_resp->frame_count == 1) { 792 if (tx_resp->frame_count == 1) {
793 IWL_DEBUG_TX_REPLY(priv, "Q %d, ssn %d", txq_id, ssn);
793 __skb_queue_head_init(&skbs); 794 __skb_queue_head_init(&skbs);
794 /*we can free until ssn % q.n_bd not inclusive */ 795 /*we can free until ssn % q.n_bd not inclusive */
795 iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, 796 iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
@@ -920,11 +921,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
920 ba_resp->sta_id); 921 ba_resp->sta_id);
921 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " 922 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
922 "scd_flow = %d, scd_ssn = %d\n", 923 "scd_flow = %d, scd_ssn = %d\n",
923 ba_resp->tid, 924 ba_resp->tid, ba_resp->seq_ctl,
924 ba_resp->seq_ctl,
925 (unsigned long long)le64_to_cpu(ba_resp->bitmap), 925 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
926 ba_resp->scd_flow, 926 scd_flow, ba_resp_scd_ssn);
927 ba_resp->scd_ssn);
928 927
929 /* Mark that the expected block-ack response arrived */ 928 /* Mark that the expected block-ack response arrived */
930 agg->wait_for_ba = false; 929 agg->wait_for_ba = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 40ef97bac1aa..44a7bdd7ccfd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,20 +47,21 @@ do { \
47} while (0) 47} while (0)
48 48
49#ifdef CONFIG_IWLWIFI_DEBUG 49#ifdef CONFIG_IWLWIFI_DEBUG
50#define IWL_DEBUG(m, level, fmt, args...) \ 50#define IWL_DEBUG(m, level, fmt, ...) \
51do { \ 51do { \
52 if (iwl_get_debug_level((m)->shrd) & (level)) \ 52 if (iwl_get_debug_level((m)->shrd) & (level)) \
53 dev_printk(KERN_ERR, bus(m)->dev, \ 53 dev_err(bus(m)->dev, "%c %s " fmt, \
54 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ 54 in_interrupt() ? 'I' : 'U', __func__, \
55 __func__ , ## args); \ 55 ##__VA_ARGS__); \
56} while (0) 56} while (0)
57 57
58#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ 58#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \
59do { \ 59do { \
60 if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\ 60 if (iwl_get_debug_level((m)->shrd) & (level) && \
61 dev_printk(KERN_ERR, bus(m)->dev, \ 61 net_ratelimit()) \
62 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ 62 dev_err(bus(m)->dev, "%c %s " fmt, \
63 __func__ , ## args); \ 63 in_interrupt() ? 'I' : 'U', __func__, \
64 ##__VA_ARGS__); \
64} while (0) 65} while (0)
65 66
66#define iwl_print_hex_dump(m, level, p, len) \ 67#define iwl_print_hex_dump(m, level, p, len) \
@@ -70,14 +71,18 @@ do { \
70 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 71 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
71} while (0) 72} while (0)
72 73
73#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \ 74#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
74do { \ 75do { \
75 if (!iwl_is_rfkill(p->shrd)) \ 76 if (!iwl_is_rfkill(p->shrd)) \
76 dev_printk(KERN_ERR, bus(p)->dev, "%c %s " fmt, \ 77 dev_err(bus(p)->dev, "%s%c %s " fmt, \
77 (in_interrupt() ? 'I' : 'U'), __func__ , ##args); \ 78 "", \
78 else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \ 79 in_interrupt() ? 'I' : 'U', __func__, \
79 dev_printk(KERN_ERR, bus(p)->dev, "(RFKILL) %c %s " fmt, \ 80 ##__VA_ARGS__); \
80 (in_interrupt() ? 'I' : 'U'), __func__ , ##args); \ 81 else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
82 dev_err(bus(p)->dev, "%s%c %s " fmt, \
83 "(RFKILL) ", \
84 in_interrupt() ? 'I' : 'U', __func__, \
85 ##__VA_ARGS__); \
81} while (0) 86} while (0)
82 87
83#else 88#else
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3ffa8e62b856..3464cad7e38c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
143 143
144 spin_lock_irqsave(&bus->reg_lock, flags); 144 spin_lock_irqsave(&bus->reg_lock, flags);
145 iwl_grab_nic_access(bus); 145 iwl_grab_nic_access(bus);
146 value = iwl_read32(bus(bus), reg); 146 value = iwl_read32(bus, reg);
147 iwl_release_nic_access(bus); 147 iwl_release_nic_access(bus);
148 spin_unlock_irqrestore(&bus->reg_lock, flags); 148 spin_unlock_irqrestore(&bus->reg_lock, flags);
149 149
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index afaaa2a51b96..5a384b309b09 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -354,6 +354,11 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
354 txq->swq_id = (hwq << 2) | ac; 354 txq->swq_id = (hwq << 2) | ac;
355} 355}
356 356
357static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
358{
359 return txq->swq_id & 0x3;
360}
361
357static inline void iwl_wake_queue(struct iwl_trans *trans, 362static inline void iwl_wake_queue(struct iwl_trans *trans,
358 struct iwl_tx_queue *txq, const char *msg) 363 struct iwl_tx_queue *txq, const char *msg)
359{ 364{
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 6dba1515023c..79331fb10aa5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -559,7 +559,6 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
559 tid_data->agg.txq_id = txq_id; 559 tid_data->agg.txq_id = txq_id;
560 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id); 560 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
561 561
562 tid_data = &trans->shrd->tid_data[sta_id][tid];
563 if (tid_data->tfds_in_queue == 0) { 562 if (tid_data->tfds_in_queue == 0) {
564 IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n"); 563 IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n");
565 tid_data->agg.state = IWL_AGG_ON; 564 tid_data->agg.state = IWL_AGG_ON;
@@ -1121,9 +1120,6 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1121 return 0; 1120 return 0;
1122 } 1121 }
1123 1122
1124 IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1125 q->read_ptr, index);
1126
1127 if (WARN_ON(!skb_queue_empty(skbs))) 1123 if (WARN_ON(!skb_queue_empty(skbs)))
1128 return 0; 1124 return 0;
1129 1125
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 93c4f56ac408..5954fdfd60dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -1351,9 +1351,9 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1351 } 1351 }
1352 1352
1353 if (txq->q.read_ptr != tfd_num) { 1353 if (txq->q.read_ptr != tfd_num) {
1354 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim " 1354 IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
1355 "scd_ssn=%d idx=%d txq=%d swq=%d\n", 1355 txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
1356 ssn , tfd_num, txq_id, txq->swq_id); 1356 tfd_num, ssn);
1357 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); 1357 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1358 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond) 1358 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1359 iwl_wake_queue(trans, txq, "Packets reclaimed"); 1359 iwl_wake_queue(trans, txq, "Packets reclaimed");
@@ -1516,8 +1516,12 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
1516 if (time_after(jiffies, timeout)) { 1516 if (time_after(jiffies, timeout)) {
1517 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id, 1517 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
1518 hw_params(trans).wd_timeout); 1518 hw_params(trans).wd_timeout);
1519 IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n", 1519 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1520 q->read_ptr, q->write_ptr); 1520 q->read_ptr, q->write_ptr);
1521 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
1522 iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
1523 & (TFD_QUEUE_SIZE_MAX - 1),
1524 iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
1521 return 1; 1525 return 1;
1522 } 1526 }
1523 1527
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 885ddc1c4fed..f955b2d66ed6 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -13,13 +13,14 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
13{ 13{
14 struct lbs_private *priv = dev->ml_priv; 14 struct lbs_private *priv = dev->ml_priv;
15 15
16 snprintf(info->fw_version, 32, "%u.%u.%u.p%u", 16 snprintf(info->fw_version, sizeof(info->fw_version),
17 "%u.%u.%u.p%u",
17 priv->fwrelease >> 24 & 0xff, 18 priv->fwrelease >> 24 & 0xff,
18 priv->fwrelease >> 16 & 0xff, 19 priv->fwrelease >> 16 & 0xff,
19 priv->fwrelease >> 8 & 0xff, 20 priv->fwrelease >> 8 & 0xff,
20 priv->fwrelease & 0xff); 21 priv->fwrelease & 0xff);
21 strcpy(info->driver, "libertas"); 22 strlcpy(info->driver, "libertas", sizeof(info->driver));
22 strcpy(info->version, lbs_driver_version); 23 strlcpy(info->version, lbs_driver_version, sizeof(info->version));
23} 24}
24 25
25/* 26/*
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 8f2797aa0c60..2a078cea830a 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -10,12 +10,12 @@ config MWIFIEX
10 mwifiex. 10 mwifiex.
11 11
12config MWIFIEX_SDIO 12config MWIFIEX_SDIO
13 tristate "Marvell WiFi-Ex Driver for SD8787" 13 tristate "Marvell WiFi-Ex Driver for SD8787/SD8797"
14 depends on MWIFIEX && MMC 14 depends on MWIFIEX && MMC
15 select FW_LOADER 15 select FW_LOADER
16 ---help--- 16 ---help---
17 This adds support for wireless adapters based on Marvell 17 This adds support for wireless adapters based on Marvell
18 8787 chipset with SDIO interface. 18 8787/8797 chipsets with SDIO interface.
19 19
20 If you choose to build it as a module, it will be called 20 If you choose to build it as a module, it will be called
21 mwifiex_sdio. 21 mwifiex_sdio.
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index f2e6de03805c..1782a77f15dc 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -75,18 +75,32 @@ static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
75 * This function maps an index in supported rates table into 75 * This function maps an index in supported rates table into
76 * the corresponding data rate. 76 * the corresponding data rate.
77 */ 77 */
78u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info) 78u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
79 u8 ht_info)
79{ 80{
80 u16 mcs_rate[4][8] = { 81 /*
81 {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e} 82 * For every mcs_rate line, the first 8 bytes are for stream 1x1,
82 , /* LG 40M */ 83 * and all 16 bytes are for stream 2x2.
83 {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c} 84 */
84 , /* SG 40M */ 85 u16 mcs_rate[4][16] = {
85 {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82} 86 /* LGI 40M */
86 , /* LG 20M */ 87 { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
87 {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90} 88 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
88 }; /* SG 20M */ 89
89 90 /* SGI 40M */
91 { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
92 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
93
94 /* LGI 20M */
95 { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
96 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
97
98 /* SGI 20M */
99 { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
100 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
101 };
102 u32 mcs_num_supp =
103 (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
90 u32 rate; 104 u32 rate;
91 105
92 if (ht_info & BIT(0)) { 106 if (ht_info & BIT(0)) {
@@ -95,7 +109,7 @@ u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
95 rate = 0x0D; /* MCS 32 SGI rate */ 109 rate = 0x0D; /* MCS 32 SGI rate */
96 else 110 else
97 rate = 0x0C; /* MCS 32 LGI rate */ 111 rate = 0x0C; /* MCS 32 LGI rate */
98 } else if (index < 8) { 112 } else if (index < mcs_num_supp) {
99 if (ht_info & BIT(1)) { 113 if (ht_info & BIT(1)) {
100 if (ht_info & BIT(2)) 114 if (ht_info & BIT(2))
101 /* SGI, 40M */ 115 /* SGI, 40M */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 35cb29cbd96e..62b863907698 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -165,6 +165,7 @@ enum MWIFIEX_802_11_WEP_STATUS {
165 165
166#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f) 166#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
167#define SETHT_MCS32(x) (x[4] |= 1) 167#define SETHT_MCS32(x) (x[4] |= 1)
168#define HT_STREAM_2X2 0x22
168 169
169#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4)) 170#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
170 171
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 30f138b6fa4c..3861a617c0e1 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -775,7 +775,8 @@ struct mwifiex_chan_freq_power *
775struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211( 775struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
776 struct mwifiex_private *priv, 776 struct mwifiex_private *priv,
777 u8 band, u32 freq); 777 u8 band, u32 freq);
778u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info); 778u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
779 u8 ht_info);
779u32 mwifiex_find_freq_from_band_chan(u8, u8); 780u32 mwifiex_find_freq_from_band_chan(u8, u8);
780int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask, 781int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
781 u8 **buffer); 782 u8 **buffer);
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index ffaf3f3a57df..702452b505c3 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -256,10 +256,13 @@ static int mwifiex_sdio_resume(struct device *dev)
256 256
257/* Device ID for SD8787 */ 257/* Device ID for SD8787 */
258#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119) 258#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
259/* Device ID for SD8797 */
260#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
259 261
260/* WLAN IDs */ 262/* WLAN IDs */
261static const struct sdio_device_id mwifiex_ids[] = { 263static const struct sdio_device_id mwifiex_ids[] = {
262 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)}, 264 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
265 {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
263 {}, 266 {},
264}; 267};
265 268
@@ -1573,7 +1576,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
1573 sdio_set_drvdata(func, card); 1576 sdio_set_drvdata(func, card);
1574 1577
1575 adapter->dev = &func->dev; 1578 adapter->dev = &func->dev;
1576 strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME); 1579
1580 switch (func->device) {
1581 case SDIO_DEVICE_ID_MARVELL_8797:
1582 strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
1583 break;
1584 case SDIO_DEVICE_ID_MARVELL_8787:
1585 default:
1586 strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
1587 break;
1588 }
1577 1589
1578 return 0; 1590 return 0;
1579 1591
@@ -1774,4 +1786,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
1774MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION); 1786MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
1775MODULE_VERSION(SDIO_VERSION); 1787MODULE_VERSION(SDIO_VERSION);
1776MODULE_LICENSE("GPL v2"); 1788MODULE_LICENSE("GPL v2");
1777MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); 1789MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
1790MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 3f711801e58a..a3fb322205b0 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -29,6 +29,7 @@
29#include "main.h" 29#include "main.h"
30 30
31#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin" 31#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
32#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
32 33
33#define BLOCK_MODE 1 34#define BLOCK_MODE 1
34#define BYTE_MODE 0 35#define BYTE_MODE 0
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 7a16b0c417af..e812db8b695c 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -508,7 +508,7 @@ static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv,
508 priv->tx_htinfo = resp->params.tx_rate.ht_info; 508 priv->tx_htinfo = resp->params.tx_rate.ht_info;
509 if (!priv->is_data_rate_auto) 509 if (!priv->is_data_rate_auto)
510 priv->data_rate = 510 priv->data_rate =
511 mwifiex_index_to_data_rate(priv->tx_rate, 511 mwifiex_index_to_data_rate(priv, priv->tx_rate,
512 priv->tx_htinfo); 512 priv->tx_htinfo);
513 513
514 return 0; 514 return 0;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index ea4a29b7e331..4b6f5539657d 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -832,8 +832,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
832 832
833 if (!ret) { 833 if (!ret) {
834 if (rate->is_rate_auto) 834 if (rate->is_rate_auto)
835 rate->rate = mwifiex_index_to_data_rate(priv->tx_rate, 835 rate->rate = mwifiex_index_to_data_rate(priv,
836 priv->tx_htinfo); 836 priv->tx_rate, priv->tx_htinfo);
837 else 837 else
838 rate->rate = priv->data_rate; 838 rate->rate = priv->data_rate;
839 } else { 839 } else {
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 78d0d6988553..2d5cf5ba319b 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -581,11 +581,7 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
581 struct p54s_priv *priv = dev->priv; 581 struct p54s_priv *priv = dev->priv;
582 unsigned long flags; 582 unsigned long flags;
583 583
584 if (mutex_lock_interruptible(&priv->mutex)) { 584 mutex_lock(&priv->mutex);
585 /* FIXME: how to handle this error? */
586 return;
587 }
588
589 WARN_ON(priv->fw_state != FW_STATE_READY); 585 WARN_ON(priv->fw_state != FW_STATE_READY);
590 586
591 p54spi_power_off(priv); 587 p54spi_power_off(priv);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 6ed9c323e3cb..42b97bc38477 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -242,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
242 242
243 skb_unlink(skb, &priv->tx_queue); 243 skb_unlink(skb, &priv->tx_queue);
244 p54_tx_qos_accounting_free(priv, skb); 244 p54_tx_qos_accounting_free(priv, skb);
245 dev_kfree_skb_any(skb); 245 ieee80211_free_txskb(dev, skb);
246} 246}
247EXPORT_SYMBOL_GPL(p54_free_skb); 247EXPORT_SYMBOL_GPL(p54_free_skb);
248 248
@@ -788,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
788 &hdr_flags, &aid, &burst_allowed); 788 &hdr_flags, &aid, &burst_allowed);
789 789
790 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { 790 if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
791 dev_kfree_skb_any(skb); 791 ieee80211_free_txskb(dev, skb);
792 return; 792 return;
793 } 793 }
794 794
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5d0f61508a2e..8a3cf4fe376f 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -793,8 +793,8 @@ islpci_set_multicast_list(struct net_device *dev)
793static void islpci_ethtool_get_drvinfo(struct net_device *dev, 793static void islpci_ethtool_get_drvinfo(struct net_device *dev,
794 struct ethtool_drvinfo *info) 794 struct ethtool_drvinfo *info)
795{ 795{
796 strcpy(info->driver, DRV_NAME); 796 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
797 strcpy(info->version, DRV_VERSION); 797 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
798} 798}
799 799
800static const struct ethtool_ops islpci_ethtool_ops = { 800static const struct ethtool_ops islpci_ethtool_ops = {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 620e3c0e88e0..3802c31fefcd 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -244,6 +244,10 @@ enum ndis_80211_power_mode {
244 NDIS_80211_POWER_MODE_FAST_PSP, 244 NDIS_80211_POWER_MODE_FAST_PSP,
245}; 245};
246 246
247enum ndis_80211_pmkid_cand_list_flag_bits {
248 NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
249};
250
247struct ndis_80211_auth_request { 251struct ndis_80211_auth_request {
248 __le32 length; 252 __le32 length;
249 u8 bssid[6]; 253 u8 bssid[6];
@@ -387,19 +391,17 @@ struct ndis_80211_capability {
387struct ndis_80211_bssid_info { 391struct ndis_80211_bssid_info {
388 u8 bssid[6]; 392 u8 bssid[6];
389 u8 pmkid[16]; 393 u8 pmkid[16];
390}; 394} __packed;
391 395
392struct ndis_80211_pmkid { 396struct ndis_80211_pmkid {
393 __le32 length; 397 __le32 length;
394 __le32 bssid_info_count; 398 __le32 bssid_info_count;
395 struct ndis_80211_bssid_info bssid_info[0]; 399 struct ndis_80211_bssid_info bssid_info[0];
396}; 400} __packed;
397 401
398/* 402/*
399 * private data 403 * private data
400 */ 404 */
401#define NET_TYPE_11FB 0
402
403#define CAP_MODE_80211A 1 405#define CAP_MODE_80211A 1
404#define CAP_MODE_80211B 2 406#define CAP_MODE_80211B 2
405#define CAP_MODE_80211G 4 407#define CAP_MODE_80211G 4
@@ -1347,6 +1349,32 @@ static int set_channel(struct usbnet *usbdev, int channel)
1347 return ret; 1349 return ret;
1348} 1350}
1349 1351
1352static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
1353 u16 *beacon_interval)
1354{
1355 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1356 struct ieee80211_channel *channel;
1357 struct ndis_80211_conf config;
1358 int len, ret;
1359
1360 /* Get channel and beacon interval */
1361 len = sizeof(config);
1362 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
1363 netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
1364 __func__, ret);
1365 if (ret < 0)
1366 return NULL;
1367
1368 channel = ieee80211_get_channel(priv->wdev.wiphy,
1369 KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
1370 if (!channel)
1371 return NULL;
1372
1373 if (beacon_interval)
1374 *beacon_interval = le16_to_cpu(config.beacon_period);
1375 return channel;
1376}
1377
1350/* index must be 0 - N, as per NDIS */ 1378/* index must be 0 - N, as per NDIS */
1351static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, 1379static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
1352 int index) 1380 int index)
@@ -2650,13 +2678,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
2650{ 2678{
2651 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2679 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2652 struct ieee80211_channel *channel; 2680 struct ieee80211_channel *channel;
2653 struct ndis_80211_conf config;
2654 struct ndis_80211_ssid ssid; 2681 struct ndis_80211_ssid ssid;
2655 struct cfg80211_bss *bss; 2682 struct cfg80211_bss *bss;
2656 s32 signal; 2683 s32 signal;
2657 u64 timestamp; 2684 u64 timestamp;
2658 u16 capability; 2685 u16 capability;
2659 u16 beacon_interval; 2686 u16 beacon_interval = 0;
2660 __le32 rssi; 2687 __le32 rssi;
2661 u8 ie_buf[34]; 2688 u8 ie_buf[34];
2662 int len, ret, ie_len; 2689 int len, ret, ie_len;
@@ -2681,22 +2708,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
2681 } 2708 }
2682 2709
2683 /* Get channel and beacon interval */ 2710 /* Get channel and beacon interval */
2684 len = sizeof(config); 2711 channel = get_current_channel(usbdev, &beacon_interval);
2685 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); 2712 if (!channel) {
2686 netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n", 2713 netdev_warn(usbdev->net, "%s(): could not get channel.\n",
2687 __func__, ret); 2714 __func__);
2688 if (ret >= 0) {
2689 beacon_interval = le16_to_cpu(config.beacon_period);
2690 channel = ieee80211_get_channel(priv->wdev.wiphy,
2691 KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
2692 if (!channel) {
2693 netdev_warn(usbdev->net, "%s(): could not get channel."
2694 "\n", __func__);
2695 return;
2696 }
2697 } else {
2698 netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
2699 __func__);
2700 return; 2715 return;
2701 } 2716 }
2702 2717
@@ -2841,8 +2856,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2841 req_ie_len, resp_ie, 2856 req_ie_len, resp_ie,
2842 resp_ie_len, 0, GFP_KERNEL); 2857 resp_ie_len, 0, GFP_KERNEL);
2843 else 2858 else
2844 cfg80211_roamed(usbdev->net, NULL, bssid, 2859 cfg80211_roamed(usbdev->net,
2845 req_ie, req_ie_len, 2860 get_current_channel(usbdev, NULL),
2861 bssid, req_ie, req_ie_len,
2846 resp_ie, resp_ie_len, GFP_KERNEL); 2862 resp_ie, resp_ie_len, GFP_KERNEL);
2847 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) 2863 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
2848 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); 2864 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
@@ -3008,25 +3024,13 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
3008 for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) { 3024 for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
3009 struct ndis_80211_pmkid_candidate *cand = 3025 struct ndis_80211_pmkid_candidate *cand =
3010 &cand_list->candidate_list[i]; 3026 &cand_list->candidate_list[i];
3027 bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
3011 3028
3012 netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n", 3029 netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
3013 i, le32_to_cpu(cand->flags), cand->bssid); 3030 i, le32_to_cpu(cand->flags), preauth, cand->bssid);
3014
3015#if 0
3016 struct iw_pmkid_cand pcand;
3017 union iwreq_data wrqu;
3018 3031
3019 memset(&pcand, 0, sizeof(pcand)); 3032 cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
3020 if (le32_to_cpu(cand->flags) & 0x01) 3033 preauth, GFP_ATOMIC);
3021 pcand.flags |= IW_PMKID_CAND_PREAUTH;
3022 pcand.index = i;
3023 memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN);
3024
3025 memset(&wrqu, 0, sizeof(wrqu));
3026 wrqu.data.length = sizeof(pcand);
3027 wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu,
3028 (u8 *)&pcand);
3029#endif
3030 } 3034 }
3031} 3035}
3032 3036
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index b4ce93436d2e..a13ecfce4825 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -345,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
345 if (is_valid_ether_addr(rtlefuse->dev_addr)) { 345 if (is_valid_ether_addr(rtlefuse->dev_addr)) {
346 SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); 346 SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
347 } else { 347 } else {
348 u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; 348 u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
349 get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1); 349 get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
350 SET_IEEE80211_PERM_ADDR(hw, rtlmac); 350 SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
351 } 351 }
352 352
353} 353}
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 4ae905983d0d..f66b5757f6b9 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -76,7 +76,7 @@ enum ap_peer {
76 SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val) 76 SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
77 77
78#define SET_80211_PS_POLL_AID(_hdr, _val) \ 78#define SET_80211_PS_POLL_AID(_hdr, _val) \
79 (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val)) 79 (*(u16 *)((u8 *)(_hdr) + 2) = _val)
80#define SET_80211_PS_POLL_BSSID(_hdr, _val) \ 80#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
81 memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN) 81 memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
82#define SET_80211_PS_POLL_TA(_hdr, _val) \ 82#define SET_80211_PS_POLL_TA(_hdr, _val) \
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index eb61061821e4..b6683a247b51 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -890,9 +890,6 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
890 if (rtlpriv->rtlhal.earlymode_enable) 890 if (rtlpriv->rtlhal.earlymode_enable)
891 tasklet_schedule(&rtlpriv->works.irq_tasklet); 891 tasklet_schedule(&rtlpriv->works.irq_tasklet);
892 892
893 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
894 return IRQ_HANDLED;
895
896done: 893done:
897 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); 894 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
898 return IRQ_HANDLED; 895 return IRQ_HANDLED;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 950c65a15b8a..fa393dfe136c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -73,6 +73,34 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
73 } 73 }
74} 74}
75 75
76static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer,
77 u32 size)
78{
79 struct rtl_priv *rtlpriv = rtl_priv(hw);
80 u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20;
81 u8 *bufferPtr = (u8 *) buffer;
82 u32 i, offset, blockCount, remainSize;
83
84 blockCount = size / blockSize;
85 remainSize = size % blockSize;
86
87 for (i = 0; i < blockCount; i++) {
88 offset = i * blockSize;
89 rtlpriv->io.writeN_sync(rtlpriv,
90 (FW_8192C_START_ADDRESS + offset),
91 (void *)(bufferPtr + offset),
92 blockSize);
93 }
94
95 if (remainSize) {
96 offset = blockCount * blockSize;
97 rtlpriv->io.writeN_sync(rtlpriv,
98 (FW_8192C_START_ADDRESS + offset),
99 (void *)(bufferPtr + offset),
100 remainSize);
101 }
102}
103
76static void _rtl92c_fw_block_write(struct ieee80211_hw *hw, 104static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
77 const u8 *buffer, u32 size) 105 const u8 *buffer, u32 size)
78{ 106{
@@ -81,23 +109,30 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
81 u8 *bufferPtr = (u8 *) buffer; 109 u8 *bufferPtr = (u8 *) buffer;
82 u32 *pu4BytePtr = (u32 *) buffer; 110 u32 *pu4BytePtr = (u32 *) buffer;
83 u32 i, offset, blockCount, remainSize; 111 u32 i, offset, blockCount, remainSize;
112 u32 data;
84 113
114 if (rtlpriv->io.writeN_sync) {
115 rtl_block_fw_writeN(hw, buffer, size);
116 return;
117 }
85 blockCount = size / blockSize; 118 blockCount = size / blockSize;
86 remainSize = size % blockSize; 119 remainSize = size % blockSize;
120 if (remainSize) {
121 /* the last word is < 4 bytes - pad it with zeros */
122 for (i = 0; i < 4 - remainSize; i++)
123 *(bufferPtr + size + i) = 0;
124 blockCount++;
125 }
87 126
88 for (i = 0; i < blockCount; i++) { 127 for (i = 0; i < blockCount; i++) {
89 offset = i * blockSize; 128 offset = i * blockSize;
129 /* for big-endian platforms, the firmware data need to be byte
130 * swapped as it was read as a byte string and will be written
131 * as 32-bit dwords and byte swapped when written
132 */
133 data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i));
90 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), 134 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
91 *(pu4BytePtr + i)); 135 data);
92 }
93
94 if (remainSize) {
95 offset = blockCount * blockSize;
96 bufferPtr += offset;
97 for (i = 0; i < remainSize; i++) {
98 rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
99 offset + i), *(bufferPtr + i));
100 }
101 } 136 }
102} 137}
103 138
@@ -238,8 +273,9 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
238 if (IS_FW_HEADER_EXIST(pfwheader)) { 273 if (IS_FW_HEADER_EXIST(pfwheader)) {
239 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, 274 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
240 ("Firmware Version(%d), Signature(%#x),Size(%d)\n", 275 ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
241 pfwheader->version, pfwheader->signature, 276 le16_to_cpu(pfwheader->version),
242 (uint)sizeof(struct rtl92c_firmware_header))); 277 le16_to_cpu(pfwheader->signature),
278 (uint)sizeof(struct rtl92c_firmware_header)));
243 279
244 pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); 280 pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
245 fwsize = fwsize - sizeof(struct rtl92c_firmware_header); 281 fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3d5823c12621..cec5a3a1cc53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -32,32 +32,32 @@
32 32
33#define FW_8192C_SIZE 0x3000 33#define FW_8192C_SIZE 0x3000
34#define FW_8192C_START_ADDRESS 0x1000 34#define FW_8192C_START_ADDRESS 0x1000
35#define FW_8192C_END_ADDRESS 0x3FFF 35#define FW_8192C_END_ADDRESS 0x1FFF
36#define FW_8192C_PAGE_SIZE 4096 36#define FW_8192C_PAGE_SIZE 4096
37#define FW_8192C_POLLING_DELAY 5 37#define FW_8192C_POLLING_DELAY 5
38#define FW_8192C_POLLING_TIMEOUT_COUNT 100 38#define FW_8192C_POLLING_TIMEOUT_COUNT 100
39 39
40#define IS_FW_HEADER_EXIST(_pfwhdr) \ 40#define IS_FW_HEADER_EXIST(_pfwhdr) \
41 ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\ 41 ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
42 (_pfwhdr->signature&0xFFF0) == 0x88C0) 42 (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0)
43 43
44struct rtl92c_firmware_header { 44struct rtl92c_firmware_header {
45 u16 signature; 45 __le16 signature;
46 u8 category; 46 u8 category;
47 u8 function; 47 u8 function;
48 u16 version; 48 __le16 version;
49 u8 subversion; 49 u8 subversion;
50 u8 rsvd1; 50 u8 rsvd1;
51 u8 month; 51 u8 month;
52 u8 date; 52 u8 date;
53 u8 hour; 53 u8 hour;
54 u8 minute; 54 u8 minute;
55 u16 ramcodeSize; 55 __le16 ramcodeSize;
56 u16 rsvd2; 56 __le16 rsvd2;
57 u32 svnindex; 57 __le32 svnindex;
58 u32 rsvd3; 58 __le32 rsvd3;
59 u32 rsvd4; 59 __le32 rsvd4;
60 u32 rsvd5; 60 __le32 rsvd5;
61}; 61};
62 62
63enum rtl8192c_h2c_cmd { 63enum rtl8192c_h2c_cmd {
@@ -94,5 +94,6 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
94void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 94void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
95void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 95void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
96void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 96void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
97void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
97 98
98#endif 99#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 814c05df51e8..4ed973a3aa17 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
498 } 498 }
499 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"), 499 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
500 hwinfo, HWSET_MAX_SIZE); 500 hwinfo, HWSET_MAX_SIZE);
501 eeprom_id = *((u16 *)&hwinfo[0]); 501 eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
502 if (eeprom_id != RTL8190_EEPROM_ID) { 502 if (eeprom_id != RTL8190_EEPROM_ID) {
503 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 503 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
504 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id)); 504 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
@@ -516,13 +516,14 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
516 pr_info("MAC address: %pM\n", rtlefuse->dev_addr); 516 pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
517 _rtl92cu_read_txpower_info_from_hwpg(hw, 517 _rtl92cu_read_txpower_info_from_hwpg(hw,
518 rtlefuse->autoload_failflag, hwinfo); 518 rtlefuse->autoload_failflag, hwinfo);
519 rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; 519 rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
520 rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; 520 rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
521 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 521 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
522 (" VID = 0x%02x PID = 0x%02x\n", 522 (" VID = 0x%02x PID = 0x%02x\n",
523 rtlefuse->eeprom_vid, rtlefuse->eeprom_did)); 523 rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
524 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; 524 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
525 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; 525 rtlefuse->eeprom_version =
526 le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
526 rtlefuse->txpwr_fromeprom = true; 527 rtlefuse->txpwr_fromeprom = true;
527 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; 528 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
528 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 529 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 060a06f4a885..9e0c8fcdf90f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -84,6 +84,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
84 } 84 }
85 } 85 }
86 rtlhal->version = (enum version_8192c)chip_version; 86 rtlhal->version = (enum version_8192c)chip_version;
87 pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
87 switch (rtlhal->version) { 88 switch (rtlhal->version) {
88 case VERSION_NORMAL_TSMC_CHIP_92C_1T2R: 89 case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
89 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 90 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index bc33b147f44f..b3cc7b949992 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
491 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); 491 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
492 for (index = 0; index < 16; index++) 492 for (index = 0; index < 16; index++)
493 checksum = checksum ^ (*(ptr + index)); 493 checksum = checksum ^ (*(ptr + index));
494 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); 494 SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
495} 495}
496 496
497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, 497void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 54cb8a60514d..e956fa71d040 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -34,13 +34,14 @@
34#include "usb.h" 34#include "usb.h"
35#include "base.h" 35#include "base.h"
36#include "ps.h" 36#include "ps.h"
37#include "rtl8192c/fw_common.h"
37 38
38#define REALTEK_USB_VENQT_READ 0xC0 39#define REALTEK_USB_VENQT_READ 0xC0
39#define REALTEK_USB_VENQT_WRITE 0x40 40#define REALTEK_USB_VENQT_WRITE 0x40
40#define REALTEK_USB_VENQT_CMD_REQ 0x05 41#define REALTEK_USB_VENQT_CMD_REQ 0x05
41#define REALTEK_USB_VENQT_CMD_IDX 0x00 42#define REALTEK_USB_VENQT_CMD_IDX 0x00
42 43
43#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254 44#define MAX_USBCTRL_VENDORREQ_TIMES 10
44 45
45static void usbctrl_async_callback(struct urb *urb) 46static void usbctrl_async_callback(struct urb *urb)
46{ 47{
@@ -82,6 +83,7 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
82 dr->wValue = cpu_to_le16(value); 83 dr->wValue = cpu_to_le16(value);
83 dr->wIndex = cpu_to_le16(index); 84 dr->wIndex = cpu_to_le16(index);
84 dr->wLength = cpu_to_le16(len); 85 dr->wLength = cpu_to_le16(len);
86 /* data are already in little-endian order */
85 memcpy(buf, pdata, len); 87 memcpy(buf, pdata, len);
86 usb_fill_control_urb(urb, udev, pipe, 88 usb_fill_control_urb(urb, udev, pipe,
87 (unsigned char *)dr, buf, len, 89 (unsigned char *)dr, buf, len,
@@ -100,16 +102,28 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
100 unsigned int pipe; 102 unsigned int pipe;
101 int status; 103 int status;
102 u8 reqtype; 104 u8 reqtype;
105 int vendorreq_times = 0;
106 static int count;
103 107
104 pipe = usb_rcvctrlpipe(udev, 0); /* read_in */ 108 pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
105 reqtype = REALTEK_USB_VENQT_READ; 109 reqtype = REALTEK_USB_VENQT_READ;
106 110
107 status = usb_control_msg(udev, pipe, request, reqtype, value, index, 111 do {
108 pdata, len, 0); /* max. timeout */ 112 status = usb_control_msg(udev, pipe, request, reqtype, value,
113 index, pdata, len, 0); /*max. timeout*/
114 if (status < 0) {
115 /* firmware download is checksumed, don't retry */
116 if ((value >= FW_8192C_START_ADDRESS &&
117 value <= FW_8192C_END_ADDRESS))
118 break;
119 } else {
120 break;
121 }
122 } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
109 123
110 if (status < 0) 124 if (status < 0 && count++ < 4)
111 pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n", 125 pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
112 value, status, *(u32 *)pdata); 126 value, status, le32_to_cpu(*(u32 *)pdata));
113 return status; 127 return status;
114} 128}
115 129
@@ -129,7 +143,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
129 143
130 wvalue = (u16)addr; 144 wvalue = (u16)addr;
131 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len); 145 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
132 ret = *data; 146 ret = le32_to_cpu(*data);
133 kfree(data); 147 kfree(data);
134 return ret; 148 return ret;
135} 149}
@@ -161,12 +175,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
161 u8 request; 175 u8 request;
162 u16 wvalue; 176 u16 wvalue;
163 u16 index; 177 u16 index;
164 u32 data; 178 __le32 data;
165 179
166 request = REALTEK_USB_VENQT_CMD_REQ; 180 request = REALTEK_USB_VENQT_CMD_REQ;
167 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ 181 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
168 wvalue = (u16)(addr&0x0000ffff); 182 wvalue = (u16)(addr&0x0000ffff);
169 data = val; 183 data = cpu_to_le32(val);
170 _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data, 184 _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
171 len); 185 len);
172} 186}
@@ -192,6 +206,30 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
192 _usb_write_async(to_usb_device(dev), addr, val, 4); 206 _usb_write_async(to_usb_device(dev), addr, val, 4);
193} 207}
194 208
209static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
210 u16 len)
211{
212 struct device *dev = rtlpriv->io.dev;
213 struct usb_device *udev = to_usb_device(dev);
214 u8 request = REALTEK_USB_VENQT_CMD_REQ;
215 u8 reqtype = REALTEK_USB_VENQT_WRITE;
216 u16 wvalue;
217 u16 index = REALTEK_USB_VENQT_CMD_IDX;
218 int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
219 u8 *buffer;
220 dma_addr_t dma_addr;
221
222 wvalue = (u16)(addr&0x0000ffff);
223 buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
224 if (!buffer)
225 return;
226 memcpy(buffer, data, len);
227 usb_control_msg(udev, pipe, request, reqtype, wvalue,
228 index, buffer, len, 50);
229
230 usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
231}
232
195static void _rtl_usb_io_handler_init(struct device *dev, 233static void _rtl_usb_io_handler_init(struct device *dev,
196 struct ieee80211_hw *hw) 234 struct ieee80211_hw *hw)
197{ 235{
@@ -205,6 +243,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
205 rtlpriv->io.read8_sync = _usb_read8_sync; 243 rtlpriv->io.read8_sync = _usb_read8_sync;
206 rtlpriv->io.read16_sync = _usb_read16_sync; 244 rtlpriv->io.read16_sync = _usb_read16_sync;
207 rtlpriv->io.read32_sync = _usb_read32_sync; 245 rtlpriv->io.read32_sync = _usb_read32_sync;
246 rtlpriv->io.writeN_sync = _usb_writeN_sync;
208} 247}
209 248
210static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) 249static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 713c7ddba8eb..f3c132b55d42 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -63,6 +63,7 @@
63#define AC_MAX 4 63#define AC_MAX 4
64#define QOS_QUEUE_NUM 4 64#define QOS_QUEUE_NUM 4
65#define RTL_MAC80211_NUM_QUEUE 5 65#define RTL_MAC80211_NUM_QUEUE 5
66#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
66 67
67#define QBSS_LOAD_SIZE 5 68#define QBSS_LOAD_SIZE 5
68#define MAX_WMMELE_LENGTH 64 69#define MAX_WMMELE_LENGTH 64
@@ -943,8 +944,10 @@ struct rtl_io {
943 unsigned long pci_base_addr; /*device I/O address */ 944 unsigned long pci_base_addr; /*device I/O address */
944 945
945 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); 946 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
946 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val); 947 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
947 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val); 948 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
949 void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
950 u16 len);
948 951
949 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); 952 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
950 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); 953 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index f9261c253735..97bfebfcce90 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -1169,6 +1169,21 @@ enum nl80211_commands {
1169 * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire 1169 * @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire
1170 * probe-response frame. The DA field in the 802.11 header is zero-ed out, 1170 * probe-response frame. The DA field in the 802.11 header is zero-ed out,
1171 * to be filled by the FW. 1171 * to be filled by the FW.
1172 * @NL80211_ATTR_DISABLE_HT: Force HT capable interfaces to disable
1173 * this feature. Currently, only supported in mac80211 drivers.
1174 * @NL80211_ATTR_HT_CAPABILITY_MASK: Specify which bits of the
1175 * ATTR_HT_CAPABILITY to which attention should be paid.
1176 * Currently, only mac80211 NICs support this feature.
1177 * The values that may be configured are:
1178 * MCS rates, MAX-AMSDU, HT-20-40 and HT_CAP_SGI_40
1179 * AMPDU density and AMPDU factor.
1180 * All values are treated as suggestions and may be ignored
1181 * by the driver as required. The actual values may be seen in
1182 * the station debugfs ht_caps file.
1183 *
1184 * @NL80211_ATTR_DFS_REGION: region for regulatory rules which this country
1185 * abides to when initiating radiation on DFS channels. A country maps
1186 * to one DFS region.
1172 * 1187 *
1173 * @NL80211_ATTR_MAX: highest attribute number currently defined 1188 * @NL80211_ATTR_MAX: highest attribute number currently defined
1174 * @__NL80211_ATTR_AFTER_LAST: internal use 1189 * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -1408,6 +1423,11 @@ enum nl80211_attrs {
1408 1423
1409 NL80211_ATTR_PROBE_RESP, 1424 NL80211_ATTR_PROBE_RESP,
1410 1425
1426 NL80211_ATTR_DFS_REGION,
1427
1428 NL80211_ATTR_DISABLE_HT,
1429 NL80211_ATTR_HT_CAPABILITY_MASK,
1430
1411 /* add attributes here, update the policy in nl80211.c */ 1431 /* add attributes here, update the policy in nl80211.c */
1412 1432
1413 __NL80211_ATTR_AFTER_LAST, 1433 __NL80211_ATTR_AFTER_LAST,
@@ -1917,6 +1937,21 @@ enum nl80211_reg_rule_flags {
1917}; 1937};
1918 1938
1919/** 1939/**
1940 * enum nl80211_dfs_regions - regulatory DFS regions
1941 *
1942 * @NL80211_DFS_UNSET: Country has no DFS master region specified
1943 * @NL80211_DFS_FCC_: Country follows DFS master rules from FCC
1944 * @NL80211_DFS_FCC_: Country follows DFS master rules from ETSI
1945 * @NL80211_DFS_JP_: Country follows DFS master rules from JP/MKK/Telec
1946 */
1947enum nl80211_dfs_regions {
1948 NL80211_DFS_UNSET = 0,
1949 NL80211_DFS_FCC = 1,
1950 NL80211_DFS_ETSI = 2,
1951 NL80211_DFS_JP = 3,
1952};
1953
1954/**
1920 * enum nl80211_survey_info - survey information 1955 * enum nl80211_survey_info - survey information
1921 * 1956 *
1922 * These attribute types are used with %NL80211_ATTR_SURVEY_INFO 1957 * These attribute types are used with %NL80211_ATTR_SURVEY_INFO
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 8d7ba0961d3e..d5e18913f293 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1044,6 +1044,15 @@ struct cfg80211_auth_request {
1044}; 1044};
1045 1045
1046/** 1046/**
1047 * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
1048 *
1049 * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
1050 */
1051enum cfg80211_assoc_req_flags {
1052 ASSOC_REQ_DISABLE_HT = BIT(0),
1053};
1054
1055/**
1047 * struct cfg80211_assoc_request - (Re)Association request data 1056 * struct cfg80211_assoc_request - (Re)Association request data
1048 * 1057 *
1049 * This structure provides information needed to complete IEEE 802.11 1058 * This structure provides information needed to complete IEEE 802.11
@@ -1054,6 +1063,10 @@ struct cfg80211_auth_request {
1054 * @use_mfp: Use management frame protection (IEEE 802.11w) in this association 1063 * @use_mfp: Use management frame protection (IEEE 802.11w) in this association
1055 * @crypto: crypto settings 1064 * @crypto: crypto settings
1056 * @prev_bssid: previous BSSID, if not %NULL use reassociate frame 1065 * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
1066 * @flags: See &enum cfg80211_assoc_req_flags
1067 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
1068 * will be used in ht_capa. Un-supported values will be ignored.
1069 * @ht_capa_mask: The bits of ht_capa which are to be used.
1057 */ 1070 */
1058struct cfg80211_assoc_request { 1071struct cfg80211_assoc_request {
1059 struct cfg80211_bss *bss; 1072 struct cfg80211_bss *bss;
@@ -1061,6 +1074,9 @@ struct cfg80211_assoc_request {
1061 size_t ie_len; 1074 size_t ie_len;
1062 struct cfg80211_crypto_settings crypto; 1075 struct cfg80211_crypto_settings crypto;
1063 bool use_mfp; 1076 bool use_mfp;
1077 u32 flags;
1078 struct ieee80211_ht_cap ht_capa;
1079 struct ieee80211_ht_cap ht_capa_mask;
1064}; 1080};
1065 1081
1066/** 1082/**
@@ -1159,6 +1175,10 @@ struct cfg80211_ibss_params {
1159 * @key_len: length of WEP key for shared key authentication 1175 * @key_len: length of WEP key for shared key authentication
1160 * @key_idx: index of WEP key for shared key authentication 1176 * @key_idx: index of WEP key for shared key authentication
1161 * @key: WEP key for shared key authentication 1177 * @key: WEP key for shared key authentication
1178 * @flags: See &enum cfg80211_assoc_req_flags
1179 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
1180 * will be used in ht_capa. Un-supported values will be ignored.
1181 * @ht_capa_mask: The bits of ht_capa which are to be used.
1162 */ 1182 */
1163struct cfg80211_connect_params { 1183struct cfg80211_connect_params {
1164 struct ieee80211_channel *channel; 1184 struct ieee80211_channel *channel;
@@ -1172,6 +1192,9 @@ struct cfg80211_connect_params {
1172 struct cfg80211_crypto_settings crypto; 1192 struct cfg80211_crypto_settings crypto;
1173 const u8 *key; 1193 const u8 *key;
1174 u8 key_len, key_idx; 1194 u8 key_len, key_idx;
1195 u32 flags;
1196 struct ieee80211_ht_cap ht_capa;
1197 struct ieee80211_ht_cap ht_capa_mask;
1175}; 1198};
1176 1199
1177/** 1200/**
@@ -1700,6 +1723,8 @@ struct cfg80211_ops {
1700 * cfg80211_report_obss_beacon(). 1723 * cfg80211_report_obss_beacon().
1701 * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device 1724 * @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
1702 * responds to probe-requests in hardware. 1725 * responds to probe-requests in hardware.
1726 * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
1727 * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
1703 */ 1728 */
1704enum wiphy_flags { 1729enum wiphy_flags {
1705 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), 1730 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
@@ -1721,6 +1746,8 @@ enum wiphy_flags {
1721 WIPHY_FLAG_HAVE_AP_SME = BIT(17), 1746 WIPHY_FLAG_HAVE_AP_SME = BIT(17),
1722 WIPHY_FLAG_REPORTS_OBSS = BIT(18), 1747 WIPHY_FLAG_REPORTS_OBSS = BIT(18),
1723 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19), 1748 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19),
1749 WIPHY_FLAG_OFFCHAN_TX = BIT(20),
1750 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
1724}; 1751};
1725 1752
1726/** 1753/**
@@ -1934,6 +1961,8 @@ struct wiphy_wowlan_support {
1934 * @wowlan: WoWLAN support information 1961 * @wowlan: WoWLAN support information
1935 * 1962 *
1936 * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. 1963 * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
1964 * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden.
1965 * If null, then none can be over-ridden.
1937 */ 1966 */
1938struct wiphy { 1967struct wiphy {
1939 /* assign these fields before you register the wiphy */ 1968 /* assign these fields before you register the wiphy */
@@ -2023,6 +2052,8 @@ struct wiphy {
2023 /* dir in debugfs: ieee80211/<wiphyname> */ 2052 /* dir in debugfs: ieee80211/<wiphyname> */
2024 struct dentry *debugfsdir; 2053 struct dentry *debugfsdir;
2025 2054
2055 const struct ieee80211_ht_cap *ht_capa_mod_mask;
2056
2026#ifdef CONFIG_NET_NS 2057#ifdef CONFIG_NET_NS
2027 /* the network namespace this phy lives in currently */ 2058 /* the network namespace this phy lives in currently */
2028 struct net *_net; 2059 struct net *_net;
@@ -2387,69 +2418,6 @@ extern int ieee80211_radiotap_iterator_next(
2387extern const unsigned char rfc1042_header[6]; 2418extern const unsigned char rfc1042_header[6];
2388extern const unsigned char bridge_tunnel_header[6]; 2419extern const unsigned char bridge_tunnel_header[6];
2389 2420
2390/* Parsed Information Elements */
2391struct ieee802_11_elems {
2392 u8 *ie_start;
2393 size_t total_len;
2394
2395 /* pointers to IEs */
2396 u8 *ssid;
2397 u8 *supp_rates;
2398 u8 *fh_params;
2399 u8 *ds_params;
2400 u8 *cf_params;
2401 struct ieee80211_tim_ie *tim;
2402 u8 *ibss_params;
2403 u8 *challenge;
2404 u8 *wpa;
2405 u8 *rsn;
2406 u8 *erp_info;
2407 u8 *ext_supp_rates;
2408 u8 *wmm_info;
2409 u8 *wmm_param;
2410 struct ieee80211_ht_cap *ht_cap_elem;
2411 struct ieee80211_ht_info *ht_info_elem;
2412 struct ieee80211_meshconf_ie *mesh_config;
2413 u8 *mesh_id;
2414 u8 *peering;
2415 u8 *preq;
2416 u8 *prep;
2417 u8 *perr;
2418 struct ieee80211_rann_ie *rann;
2419 u8 *ch_switch_elem;
2420 u8 *country_elem;
2421 u8 *pwr_constr_elem;
2422 u8 *quiet_elem; /* first quite element */
2423 u8 *timeout_int;
2424
2425 /* length of them, respectively */
2426 u8 ssid_len;
2427 u8 supp_rates_len;
2428 u8 fh_params_len;
2429 u8 ds_params_len;
2430 u8 cf_params_len;
2431 u8 tim_len;
2432 u8 ibss_params_len;
2433 u8 challenge_len;
2434 u8 wpa_len;
2435 u8 rsn_len;
2436 u8 erp_info_len;
2437 u8 ext_supp_rates_len;
2438 u8 wmm_info_len;
2439 u8 wmm_param_len;
2440 u8 mesh_id_len;
2441 u8 peering_len;
2442 u8 preq_len;
2443 u8 prep_len;
2444 u8 perr_len;
2445 u8 ch_switch_elem_len;
2446 u8 country_elem_len;
2447 u8 pwr_constr_elem_len;
2448 u8 quiet_elem_len;
2449 u8 num_of_quiet_elem; /* can be more the one */
2450 u8 timeout_int_len;
2451};
2452
2453/** 2421/**
2454 * ieee80211_get_hdrlen_from_skb - get header length from data 2422 * ieee80211_get_hdrlen_from_skb - get header length from data
2455 * 2423 *
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0756049ae76d..5b5c8a7e26d7 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1760,11 +1760,21 @@ enum ieee80211_frame_release_type {
1760 * skb contains the buffer starting from the IEEE 802.11 header. 1760 * skb contains the buffer starting from the IEEE 802.11 header.
1761 * The low-level driver should send the frame out based on 1761 * The low-level driver should send the frame out based on
1762 * configuration in the TX control data. This handler should, 1762 * configuration in the TX control data. This handler should,
1763 * preferably, never fail and stop queues appropriately, more 1763 * preferably, never fail and stop queues appropriately.
1764 * importantly, however, it must never fail for A-MPDU-queues. 1764 * This must be implemented if @tx_frags is not.
1765 * This function should return NETDEV_TX_OK except in very 1765 * Must be atomic.
1766 * limited cases. 1766 *
1767 * Must be implemented and atomic. 1767 * @tx_frags: Called to transmit multiple fragments of a single MSDU.
1768 * This handler must consume all fragments, sending out some of
1769 * them only is useless and it can't ask for some of them to be
1770 * queued again. If the frame is not fragmented the queue has a
1771 * single SKB only. To avoid issues with the networking stack
1772 * when TX status is reported the frames should be removed from
1773 * the skb queue.
1774 * If this is used, the tx_info @vif and @sta pointers will be
1775 * invalid -- you must not use them in that case.
1776 * This must be implemented if @tx isn't.
1777 * Must be atomic.
1768 * 1778 *
1769 * @start: Called before the first netdevice attached to the hardware 1779 * @start: Called before the first netdevice attached to the hardware
1770 * is enabled. This should turn on the hardware and must turn on 1780 * is enabled. This should turn on the hardware and must turn on
@@ -2101,6 +2111,8 @@ enum ieee80211_frame_release_type {
2101 */ 2111 */
2102struct ieee80211_ops { 2112struct ieee80211_ops {
2103 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 2113 void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
2114 void (*tx_frags)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2115 struct ieee80211_sta *sta, struct sk_buff_head *skbs);
2104 int (*start)(struct ieee80211_hw *hw); 2116 int (*start)(struct ieee80211_hw *hw);
2105 void (*stop)(struct ieee80211_hw *hw); 2117 void (*stop)(struct ieee80211_hw *hw);
2106#ifdef CONFIG_PM 2118#ifdef CONFIG_PM
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index cdbe67139343..b61eb6c9df14 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -34,30 +34,30 @@
34#define NCI_MAX_NUM_CONN 10 34#define NCI_MAX_NUM_CONN 10
35 35
36/* NCI Status Codes */ 36/* NCI Status Codes */
37#define NCI_STATUS_OK 0x00 37#define NCI_STATUS_OK 0x00
38#define NCI_STATUS_REJECTED 0x01 38#define NCI_STATUS_REJECTED 0x01
39#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02 39#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02
40#define NCI_STATUS_FAILED 0x03 40#define NCI_STATUS_FAILED 0x03
41#define NCI_STATUS_NOT_INITIALIZED 0x04 41#define NCI_STATUS_NOT_INITIALIZED 0x04
42#define NCI_STATUS_SYNTAX_ERROR 0x05 42#define NCI_STATUS_SYNTAX_ERROR 0x05
43#define NCI_STATUS_SEMANTIC_ERROR 0x06 43#define NCI_STATUS_SEMANTIC_ERROR 0x06
44#define NCI_STATUS_UNKNOWN_GID 0x07 44#define NCI_STATUS_UNKNOWN_GID 0x07
45#define NCI_STATUS_UNKNOWN_OID 0x08 45#define NCI_STATUS_UNKNOWN_OID 0x08
46#define NCI_STATUS_INVALID_PARAM 0x09 46#define NCI_STATUS_INVALID_PARAM 0x09
47#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a 47#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a
48/* Discovery Specific Status Codes */ 48/* Discovery Specific Status Codes */
49#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0 49#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
50#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1 50#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
51#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2 51#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2
52/* RF Interface Specific Status Codes */ 52/* RF Interface Specific Status Codes */
53#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0 53#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
54#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1 54#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
55#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2 55#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
56/* NFCEE Interface Specific Status Codes */ 56/* NFCEE Interface Specific Status Codes */
57#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0 57#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0
58#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1 58#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1
59#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2 59#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2
60#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3 60#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3
61#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc4 61#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc4
62 62
63/* NCI RF Technology and Mode */ 63/* NCI RF Technology and Mode */
@@ -97,9 +97,9 @@
97 97
98/* NCI RF Interfaces */ 98/* NCI RF Interfaces */
99#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00 99#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
100#define NCI_RF_INTERFACE_FRAME 0x01 100#define NCI_RF_INTERFACE_FRAME 0x01
101#define NCI_RF_INTERFACE_ISO_DEP 0x02 101#define NCI_RF_INTERFACE_ISO_DEP 0x02
102#define NCI_RF_INTERFACE_NFC_DEP 0x03 102#define NCI_RF_INTERFACE_NFC_DEP 0x03
103 103
104/* NCI Reset types */ 104/* NCI Reset types */
105#define NCI_RESET_TYPE_KEEP_CONFIG 0x00 105#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
@@ -118,22 +118,22 @@
118 118
119/* NCI Discovery Types */ 119/* NCI Discovery Types */
120#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE 0x00 120#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE 0x00
121#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01 121#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01
122#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02 122#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02
123#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03 123#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03
124#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05 124#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05
125#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09 125#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09
126#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80 126#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80
127#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81 127#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81
128#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82 128#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82
129#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83 129#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83
130#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85 130#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85
131 131
132/* NCI Deactivation Type */ 132/* NCI Deactivation Type */
133#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00 133#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
134#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01 134#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
135#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02 135#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
136#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03 136#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03
137 137
138/* Message Type (MT) */ 138/* Message Type (MT) */
139#define NCI_MT_DATA_PKT 0x00 139#define NCI_MT_DATA_PKT 0x00
@@ -165,10 +165,10 @@
165#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f) 165#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f)
166 166
167/* GID values */ 167/* GID values */
168#define NCI_GID_CORE 0x0 168#define NCI_GID_CORE 0x0
169#define NCI_GID_RF_MGMT 0x1 169#define NCI_GID_RF_MGMT 0x1
170#define NCI_GID_NFCEE_MGMT 0x2 170#define NCI_GID_NFCEE_MGMT 0x2
171#define NCI_GID_PROPRIETARY 0xf 171#define NCI_GID_PROPRIETARY 0xf
172 172
173/* ---- NCI Packet structures ---- */ 173/* ---- NCI Packet structures ---- */
174#define NCI_CTRL_HDR_SIZE 3 174#define NCI_CTRL_HDR_SIZE 3
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index eb7d3c2d4274..a5f79933e211 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -48,6 +48,10 @@ enum environment_cap {
48 * 99 - built by driver but a specific alpha2 cannot be determined 48 * 99 - built by driver but a specific alpha2 cannot be determined
49 * 98 - result of an intersection between two regulatory domains 49 * 98 - result of an intersection between two regulatory domains
50 * 97 - regulatory domain has not yet been configured 50 * 97 - regulatory domain has not yet been configured
51 * @dfs_region: If CRDA responded with a regulatory domain that requires
52 * DFS master operation on a known DFS region (NL80211_DFS_*),
53 * dfs_region represents that region. Drivers can use this and the
54 * @alpha2 to adjust their device's DFS parameters as required.
51 * @intersect: indicates whether the wireless core should intersect 55 * @intersect: indicates whether the wireless core should intersect
52 * the requested regulatory domain with the presently set regulatory 56 * the requested regulatory domain with the presently set regulatory
53 * domain. 57 * domain.
@@ -67,6 +71,7 @@ struct regulatory_request {
67 int wiphy_idx; 71 int wiphy_idx;
68 enum nl80211_reg_initiator initiator; 72 enum nl80211_reg_initiator initiator;
69 char alpha2[2]; 73 char alpha2[2];
74 u8 dfs_region;
70 bool intersect; 75 bool intersect;
71 bool processed; 76 bool processed;
72 enum environment_cap country_ie_env; 77 enum environment_cap country_ie_env;
@@ -93,6 +98,7 @@ struct ieee80211_reg_rule {
93struct ieee80211_regdomain { 98struct ieee80211_regdomain {
94 u32 n_reg_rules; 99 u32 n_reg_rules;
95 char alpha2[2]; 100 char alpha2[2];
101 u8 dfs_region;
96 struct ieee80211_reg_rule reg_rules[]; 102 struct ieee80211_reg_rule reg_rules[];
97}; 103};
98 104
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 1063a7e57d62..2577c45069e5 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -832,7 +832,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
832 } 832 }
833 833
834 if (params->ht_capa) 834 if (params->ht_capa)
835 ieee80211_ht_cap_ie_to_sta_ht_cap(sband, 835 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
836 params->ht_capa, 836 params->ht_capa,
837 &sta->sta.ht_cap); 837 &sta->sta.ht_cap);
838 838
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3110cbdc501b..2406b3e7393f 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,10 +63,10 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" 63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
64 64
65 int res = scnprintf(buf, sizeof(buf), 65 int res = scnprintf(buf, sizeof(buf),
66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA), 67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
68 TEST(PS_DRIVER), TEST(AUTHORIZED), 68 TEST(PS_DRIVER), TEST(AUTHORIZED),
69 TEST(SHORT_PREAMBLE), TEST(ASSOC_AP), 69 TEST(SHORT_PREAMBLE),
70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), 70 TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), 71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), 72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index b12ed52732c8..49cc5e0e8a6a 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -15,6 +15,14 @@ static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
15 local->ops->tx(&local->hw, skb); 15 local->ops->tx(&local->hw, skb);
16} 16}
17 17
18static inline void drv_tx_frags(struct ieee80211_local *local,
19 struct ieee80211_vif *vif,
20 struct ieee80211_sta *sta,
21 struct sk_buff_head *skbs)
22{
23 local->ops->tx_frags(&local->hw, vif, sta, skbs);
24}
25
18static inline int drv_start(struct ieee80211_local *local) 26static inline int drv_start(struct ieee80211_local *local)
19{ 27{
20 int ret; 28 int ret;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index d06975098aad..810cfbea6ad1 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,7 +19,82 @@
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "rate.h" 20#include "rate.h"
21 21
22void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 22bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
23{
24 const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40);
25 if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) &&
26 !(sdata->u.mgd.ht_capa.cap_info & flg))
27 return true;
28 return false;
29}
30
31void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
32 struct ieee80211_sta_ht_cap *ht_cap,
33 u16 flag)
34{
35 __le16 le_flag = cpu_to_le16(flag);
36 if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
37 if (!(sdata->u.mgd.ht_capa.cap_info & le_flag))
38 ht_cap->cap &= ~flag;
39 }
40}
41
42void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
43 struct ieee80211_sta_ht_cap *ht_cap)
44{
45 u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask);
46 u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
47 int i;
48
49 if (sdata->vif.type != NL80211_IFTYPE_STATION) {
50 WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
51 return;
52 }
53
54 /* NOTE: If you add more over-rides here, update register_hw
55 * ht_capa_mod_msk logic in main.c as well.
56 * And, if this method can ever change ht_cap.ht_supported, fix
57 * the check in ieee80211_add_ht_ie.
58 */
59
60 /* check for HT over-rides, MCS rates first. */
61 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
62 u8 m = smask[i];
63 ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */
64 /* Add back rates that are supported */
65 ht_cap->mcs.rx_mask[i] |= (m & scaps[i]);
66 }
67
68 /* Force removal of HT-40 capabilities? */
69 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
70 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
71
72 /* Allow user to disable the max-AMSDU bit. */
73 __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
74
75 /* Allow user to decrease AMPDU factor */
76 if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
77 IEEE80211_HT_AMPDU_PARM_FACTOR) {
78 u8 n = sdata->u.mgd.ht_capa.ampdu_params_info
79 & IEEE80211_HT_AMPDU_PARM_FACTOR;
80 if (n < ht_cap->ampdu_factor)
81 ht_cap->ampdu_factor = n;
82 }
83
84 /* Allow the user to increase AMPDU density. */
85 if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
86 IEEE80211_HT_AMPDU_PARM_DENSITY) {
87 u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info &
88 IEEE80211_HT_AMPDU_PARM_DENSITY)
89 >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
90 if (n > ht_cap->ampdu_density)
91 ht_cap->ampdu_density = n;
92 }
93}
94
95
96void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
97 struct ieee80211_supported_band *sband,
23 struct ieee80211_ht_cap *ht_cap_ie, 98 struct ieee80211_ht_cap *ht_cap_ie,
24 struct ieee80211_sta_ht_cap *ht_cap) 99 struct ieee80211_sta_ht_cap *ht_cap)
25{ 100{
@@ -103,6 +178,12 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
103 /* handle MCS rate 32 too */ 178 /* handle MCS rate 32 too */
104 if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) 179 if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
105 ht_cap->mcs.rx_mask[32/8] |= 1; 180 ht_cap->mcs.rx_mask[32/8] |= 1;
181
182 /*
183 * If user has specified capability over-rides, take care
184 * of that here.
185 */
186 ieee80211_apply_htcap_overrides(sdata, ht_cap);
106} 187}
107 188
108void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx) 189void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 068cc92d16aa..762243e469df 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -142,6 +142,7 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
142 142
143struct ieee80211_tx_data { 143struct ieee80211_tx_data {
144 struct sk_buff *skb; 144 struct sk_buff *skb;
145 struct sk_buff_head skbs;
145 struct ieee80211_local *local; 146 struct ieee80211_local *local;
146 struct ieee80211_sub_if_data *sdata; 147 struct ieee80211_sub_if_data *sdata;
147 struct sta_info *sta; 148 struct sta_info *sta;
@@ -448,6 +449,9 @@ struct ieee80211_if_managed {
448 */ 449 */
449 int rssi_min_thold, rssi_max_thold; 450 int rssi_min_thold, rssi_max_thold;
450 int last_ave_beacon_signal; 451 int last_ave_beacon_signal;
452
453 struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
454 struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
451}; 455};
452 456
453struct ieee80211_if_ibss { 457struct ieee80211_if_ibss {
@@ -1039,6 +1043,69 @@ struct ieee80211_ra_tid {
1039 u16 tid; 1043 u16 tid;
1040}; 1044};
1041 1045
1046/* Parsed Information Elements */
1047struct ieee802_11_elems {
1048 u8 *ie_start;
1049 size_t total_len;
1050
1051 /* pointers to IEs */
1052 u8 *ssid;
1053 u8 *supp_rates;
1054 u8 *fh_params;
1055 u8 *ds_params;
1056 u8 *cf_params;
1057 struct ieee80211_tim_ie *tim;
1058 u8 *ibss_params;
1059 u8 *challenge;
1060 u8 *wpa;
1061 u8 *rsn;
1062 u8 *erp_info;
1063 u8 *ext_supp_rates;
1064 u8 *wmm_info;
1065 u8 *wmm_param;
1066 struct ieee80211_ht_cap *ht_cap_elem;
1067 struct ieee80211_ht_info *ht_info_elem;
1068 struct ieee80211_meshconf_ie *mesh_config;
1069 u8 *mesh_id;
1070 u8 *peering;
1071 u8 *preq;
1072 u8 *prep;
1073 u8 *perr;
1074 struct ieee80211_rann_ie *rann;
1075 u8 *ch_switch_elem;
1076 u8 *country_elem;
1077 u8 *pwr_constr_elem;
1078 u8 *quiet_elem; /* first quite element */
1079 u8 *timeout_int;
1080
1081 /* length of them, respectively */
1082 u8 ssid_len;
1083 u8 supp_rates_len;
1084 u8 fh_params_len;
1085 u8 ds_params_len;
1086 u8 cf_params_len;
1087 u8 tim_len;
1088 u8 ibss_params_len;
1089 u8 challenge_len;
1090 u8 wpa_len;
1091 u8 rsn_len;
1092 u8 erp_info_len;
1093 u8 ext_supp_rates_len;
1094 u8 wmm_info_len;
1095 u8 wmm_param_len;
1096 u8 mesh_id_len;
1097 u8 peering_len;
1098 u8 preq_len;
1099 u8 prep_len;
1100 u8 perr_len;
1101 u8 ch_switch_elem_len;
1102 u8 country_elem_len;
1103 u8 pwr_constr_elem_len;
1104 u8 quiet_elem_len;
1105 u8 num_of_quiet_elem; /* can be more the one */
1106 u8 timeout_int_len;
1107};
1108
1042static inline struct ieee80211_local *hw_to_local( 1109static inline struct ieee80211_local *hw_to_local(
1043 struct ieee80211_hw *hw) 1110 struct ieee80211_hw *hw)
1044{ 1111{
@@ -1188,7 +1255,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1188 struct net_device *dev); 1255 struct net_device *dev);
1189 1256
1190/* HT */ 1257/* HT */
1191void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, 1258bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata);
1259void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
1260 struct ieee80211_sta_ht_cap *ht_cap);
1261void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
1262 struct ieee80211_supported_band *sband,
1192 struct ieee80211_ht_cap *ht_cap_ie, 1263 struct ieee80211_ht_cap *ht_cap_ie,
1193 struct ieee80211_sta_ht_cap *ht_cap); 1264 struct ieee80211_sta_ht_cap *ht_cap);
1194void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 1265void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
@@ -1343,7 +1414,7 @@ void ieee80211_recalc_smps(struct ieee80211_local *local);
1343size_t ieee80211_ie_split(const u8 *ies, size_t ielen, 1414size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1344 const u8 *ids, int n_ids, size_t offset); 1415 const u8 *ids, int n_ids, size_t offset);
1345size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); 1416size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
1346u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_supported_band *sband, 1417u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1347 u16 cap); 1418 u16 cap);
1348u8 *ieee80211_ie_build_ht_info(u8 *pos, 1419u8 *ieee80211_ie_build_ht_info(u8 *pos,
1349 struct ieee80211_sta_ht_cap *ht_cap, 1420 struct ieee80211_sta_ht_cap *ht_cap,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 12a6d4bb5d37..b34ca0cbdf6c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
474 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); 474 RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
475 synchronize_rcu(); 475 synchronize_rcu();
476 kfree(old_beacon); 476 kfree(old_beacon);
477 kfree(old_probe_resp); 477 kfree_skb(old_probe_resp);
478 478
479 /* down all dependent devices, that is VLANs */ 479 /* down all dependent devices, that is VLANs */
480 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 480 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index e323d4e6647b..dddedfad5404 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -558,6 +558,19 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
558 }, 558 },
559}; 559};
560 560
561static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
562 .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR |
563 IEEE80211_HT_AMPDU_PARM_DENSITY,
564
565 .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
566 IEEE80211_HT_CAP_MAX_AMSDU |
567 IEEE80211_HT_CAP_SGI_40),
568 .mcs = {
569 .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff,
570 0xff, 0xff, 0xff, 0xff, 0xff, },
571 },
572};
573
561struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 574struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
562 const struct ieee80211_ops *ops) 575 const struct ieee80211_ops *ops)
563{ 576{
@@ -594,7 +607,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
594 wiphy->flags |= WIPHY_FLAG_NETNS_OK | 607 wiphy->flags |= WIPHY_FLAG_NETNS_OK |
595 WIPHY_FLAG_4ADDR_AP | 608 WIPHY_FLAG_4ADDR_AP |
596 WIPHY_FLAG_4ADDR_STATION | 609 WIPHY_FLAG_4ADDR_STATION |
597 WIPHY_FLAG_REPORTS_OBSS; 610 WIPHY_FLAG_REPORTS_OBSS |
611 WIPHY_FLAG_OFFCHAN_TX |
612 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
598 613
599 wiphy->features = NL80211_FEATURE_SK_TX_STATUS; 614 wiphy->features = NL80211_FEATURE_SK_TX_STATUS;
600 615
@@ -609,7 +624,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
609 624
610 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); 625 local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
611 626
612 BUG_ON(!ops->tx); 627 BUG_ON(!ops->tx && !ops->tx_frags);
613 BUG_ON(!ops->start); 628 BUG_ON(!ops->start);
614 BUG_ON(!ops->stop); 629 BUG_ON(!ops->stop);
615 BUG_ON(!ops->config); 630 BUG_ON(!ops->config);
@@ -629,6 +644,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
629 local->user_power_level = -1; 644 local->user_power_level = -1;
630 local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; 645 local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
631 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; 646 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
647 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
632 648
633 INIT_LIST_HEAD(&local->interfaces); 649 INIT_LIST_HEAD(&local->interfaces);
634 650
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index b3a125f60347..ee82d2f7f114 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -366,7 +366,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
366 return -ENOMEM; 366 return -ENOMEM;
367 367
368 pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap)); 368 pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap));
369 ieee80211_ie_build_ht_cap(pos, sband, sband->ht_cap.cap); 369 ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap);
370 370
371 return 0; 371 return 0;
372} 372}
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index a7afb2d32def..ce3db2735d7c 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -867,10 +867,11 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
867 return; 867 return;
868 } 868 }
869 869
870 spin_lock_bh(&mpath->state_lock); 870 spin_lock(&mpath->state_lock);
871 if (mpath->flags & MESH_PATH_REQ_QUEUED) { 871 if (mpath->flags & MESH_PATH_REQ_QUEUED) {
872 spin_unlock_bh(&mpath->state_lock); 872 spin_unlock(&mpath->state_lock);
873 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 873 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
874 kfree(preq_node);
874 return; 875 return;
875 } 876 }
876 877
@@ -878,7 +879,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
878 preq_node->flags = flags; 879 preq_node->flags = flags;
879 880
880 mpath->flags |= MESH_PATH_REQ_QUEUED; 881 mpath->flags |= MESH_PATH_REQ_QUEUED;
881 spin_unlock_bh(&mpath->state_lock); 882 spin_unlock(&mpath->state_lock);
882 883
883 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); 884 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
884 ++ifmsh->preq_queue_len; 885 ++ifmsh->preq_queue_len;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 4fc23d1b9c3a..7bd2a76aef0e 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -69,8 +69,6 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
69 lockdep_is_held(&pathtbl_resize_lock)); 69 lockdep_is_held(&pathtbl_resize_lock));
70} 70}
71 71
72static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
73
74/* 72/*
75 * CAREFUL -- "tbl" must not be an expression, 73 * CAREFUL -- "tbl" must not be an expression,
76 * in particular not an rcu_dereference(), since 74 * in particular not an rcu_dereference(), since
@@ -420,21 +418,18 @@ static void mesh_gate_node_reclaim(struct rcu_head *rp)
420} 418}
421 419
422/** 420/**
423 * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates 421 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
424 * @mesh_tbl: table which contains known_gates list 422 * @mpath: gate path to add to table
425 * @mpath: mpath to known mesh gate
426 *
427 * Returns: 0 on success
428 *
429 */ 423 */
430static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) 424int mesh_path_add_gate(struct mesh_path *mpath)
431{ 425{
426 struct mesh_table *tbl;
432 struct mpath_node *gate, *new_gate; 427 struct mpath_node *gate, *new_gate;
433 struct hlist_node *n; 428 struct hlist_node *n;
434 int err; 429 int err;
435 430
436 rcu_read_lock(); 431 rcu_read_lock();
437 tbl = rcu_dereference(tbl); 432 tbl = rcu_dereference(mesh_paths);
438 433
439 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) 434 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
440 if (gate->mpath == mpath) { 435 if (gate->mpath == mpath) {
@@ -478,8 +473,6 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
478 struct mpath_node *gate; 473 struct mpath_node *gate;
479 struct hlist_node *p, *q; 474 struct hlist_node *p, *q;
480 475
481 tbl = rcu_dereference(tbl);
482
483 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) 476 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
484 if (gate->mpath == mpath) { 477 if (gate->mpath == mpath) {
485 spin_lock_bh(&tbl->gates_lock); 478 spin_lock_bh(&tbl->gates_lock);
@@ -498,16 +491,6 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
498} 491}
499 492
500/** 493/**
501 *
502 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
503 * @mpath: gate path to add to table
504 */
505int mesh_path_add_gate(struct mesh_path *mpath)
506{
507 return mesh_gate_add(mesh_paths, mpath);
508}
509
510/**
511 * mesh_gate_num - number of gates known to this interface 494 * mesh_gate_num - number of gates known to this interface
512 * @sdata: subif data 495 * @sdata: subif data
513 */ 496 */
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0140e88a8220..7314372b12ba 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -101,7 +101,8 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
101 set_sta_flag(sta, WLAN_STA_WME); 101 set_sta_flag(sta, WLAN_STA_WME);
102 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 102 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
103 if (elems->ht_cap_elem) 103 if (elems->ht_cap_elem)
104 ieee80211_ht_cap_ie_to_sta_ht_cap(sband, elems->ht_cap_elem, 104 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
105 elems->ht_cap_elem,
105 &sta->sta.ht_cap); 106 &sta->sta.ht_cap);
106 rate_control_rate_init(sta); 107 rate_control_rate_init(sta);
107 108
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f9ec15b3fe09..09019d135942 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -209,6 +209,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
209 channel_type = NL80211_CHAN_HT20; 209 channel_type = NL80211_CHAN_HT20;
210 210
211 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && 211 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
212 !ieee80111_cfg_override_disables_ht40(sdata) &&
212 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && 213 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
213 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { 214 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
214 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 215 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
@@ -1120,6 +1121,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1120 1121
1121 /* on the next assoc, re-program HT parameters */ 1122 /* on the next assoc, re-program HT parameters */
1122 sdata->ht_opmode_valid = false; 1123 sdata->ht_opmode_valid = false;
1124 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
1125 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
1123 1126
1124 local->power_constr_level = 0; 1127 local->power_constr_level = 0;
1125 1128
@@ -1359,9 +1362,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1359 ieee80211_set_disassoc(sdata, true, true); 1362 ieee80211_set_disassoc(sdata, true, true);
1360 mutex_unlock(&ifmgd->mtx); 1363 mutex_unlock(&ifmgd->mtx);
1361 1364
1362 mutex_lock(&local->mtx);
1363 ieee80211_recalc_idle(local);
1364 mutex_unlock(&local->mtx);
1365 /* 1365 /*
1366 * must be outside lock due to cfg80211, 1366 * must be outside lock due to cfg80211,
1367 * but that's not a problem. 1367 * but that's not a problem.
@@ -1370,6 +1370,10 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1370 IEEE80211_STYPE_DEAUTH, 1370 IEEE80211_STYPE_DEAUTH,
1371 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 1371 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1372 NULL, true); 1372 NULL, true);
1373
1374 mutex_lock(&local->mtx);
1375 ieee80211_recalc_idle(local);
1376 mutex_unlock(&local->mtx);
1373} 1377}
1374 1378
1375void ieee80211_beacon_connection_loss_work(struct work_struct *work) 1379void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@@ -1575,7 +1579,6 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1575 1579
1576 set_sta_flag(sta, WLAN_STA_AUTH); 1580 set_sta_flag(sta, WLAN_STA_AUTH);
1577 set_sta_flag(sta, WLAN_STA_ASSOC); 1581 set_sta_flag(sta, WLAN_STA_ASSOC);
1578 set_sta_flag(sta, WLAN_STA_ASSOC_AP);
1579 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) 1582 if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
1580 set_sta_flag(sta, WLAN_STA_AUTHORIZED); 1583 set_sta_flag(sta, WLAN_STA_AUTHORIZED);
1581 1584
@@ -1613,7 +1616,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1613 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; 1616 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
1614 1617
1615 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 1618 if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
1616 ieee80211_ht_cap_ie_to_sta_ht_cap(sband, 1619 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
1617 elems.ht_cap_elem, &sta->sta.ht_cap); 1620 elems.ht_cap_elem, &sta->sta.ht_cap);
1618 1621
1619 ap_ht_cap_flags = sta->sta.ht_cap.cap; 1622 ap_ht_cap_flags = sta->sta.ht_cap.cap;
@@ -1982,7 +1985,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1982 1985
1983 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1986 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1984 1987
1985 ieee80211_ht_cap_ie_to_sta_ht_cap(sband, 1988 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
1986 elems.ht_cap_elem, &sta->sta.ht_cap); 1989 elems.ht_cap_elem, &sta->sta.ht_cap);
1987 1990
1988 ap_ht_cap_flags = sta->sta.ht_cap.cap; 1991 ap_ht_cap_flags = sta->sta.ht_cap.cap;
@@ -2136,9 +2139,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2136 2139
2137 ieee80211_set_disassoc(sdata, true, true); 2140 ieee80211_set_disassoc(sdata, true, true);
2138 mutex_unlock(&ifmgd->mtx); 2141 mutex_unlock(&ifmgd->mtx);
2139 mutex_lock(&local->mtx);
2140 ieee80211_recalc_idle(local);
2141 mutex_unlock(&local->mtx);
2142 /* 2142 /*
2143 * must be outside lock due to cfg80211, 2143 * must be outside lock due to cfg80211,
2144 * but that's not a problem. 2144 * but that's not a problem.
@@ -2146,6 +2146,11 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2146 ieee80211_send_deauth_disassoc(sdata, bssid, 2146 ieee80211_send_deauth_disassoc(sdata, bssid,
2147 IEEE80211_STYPE_DEAUTH, reason, 2147 IEEE80211_STYPE_DEAUTH, reason,
2148 NULL, true); 2148 NULL, true);
2149
2150 mutex_lock(&local->mtx);
2151 ieee80211_recalc_idle(local);
2152 mutex_unlock(&local->mtx);
2153
2149 mutex_lock(&ifmgd->mtx); 2154 mutex_lock(&ifmgd->mtx);
2150} 2155}
2151 2156
@@ -2640,6 +2645,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2640 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 2645 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2641 2646
2642 2647
2648 if (req->flags & ASSOC_REQ_DISABLE_HT)
2649 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
2650
2651 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
2652 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
2653 sizeof(ifmgd->ht_capa_mask));
2654
2643 if (req->ie && req->ie_len) { 2655 if (req->ie && req->ie_len) {
2644 memcpy(wk->ie, req->ie, req->ie_len); 2656 memcpy(wk->ie, req->ie, req->ie_len);
2645 wk->ie_len = req->ie_len; 2657 wk->ie_len = req->ie_len;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 3d414411a96e..ebd8cccac8f2 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -212,8 +212,6 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
212 return; 212 return;
213 } 213 }
214 214
215 ieee80211_recalc_idle(local);
216
217 if (local->hw_roc_skb) { 215 if (local->hw_roc_skb) {
218 sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev); 216 sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev);
219 ieee80211_tx_skb(sdata, local->hw_roc_skb); 217 ieee80211_tx_skb(sdata, local->hw_roc_skb);
@@ -227,6 +225,8 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
227 GFP_KERNEL); 225 GFP_KERNEL);
228 } 226 }
229 227
228 ieee80211_recalc_idle(local);
229
230 mutex_unlock(&local->mtx); 230 mutex_unlock(&local->mtx);
231} 231}
232 232
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index cdb28535716b..ff5f7b84e825 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -36,8 +36,17 @@
36/* Transmit duration for the raw data part of an average sized packet */ 36/* Transmit duration for the raw data part of an average sized packet */
37#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) 37#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
38 38
39/*
40 * Define group sort order: HT40 -> SGI -> #streams
41 */
42#define GROUP_IDX(_streams, _sgi, _ht40) \
43 MINSTREL_MAX_STREAMS * 2 * _ht40 + \
44 MINSTREL_MAX_STREAMS * _sgi + \
45 _streams - 1
46
39/* MCS rate information for an MCS group */ 47/* MCS rate information for an MCS group */
40#define MCS_GROUP(_streams, _sgi, _ht40) { \ 48#define MCS_GROUP(_streams, _sgi, _ht40) \
49 [GROUP_IDX(_streams, _sgi, _ht40)] = { \
41 .streams = _streams, \ 50 .streams = _streams, \
42 .flags = \ 51 .flags = \
43 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ 52 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
@@ -58,6 +67,9 @@
58 * To enable sufficiently targeted rate sampling, MCS rates are divided into 67 * To enable sufficiently targeted rate sampling, MCS rates are divided into
59 * groups, based on the number of streams and flags (HT40, SGI) that they 68 * groups, based on the number of streams and flags (HT40, SGI) that they
60 * use. 69 * use.
70 *
71 * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
72 * HT40 -> SGI -> #streams
61 */ 73 */
62const struct mcs_group minstrel_mcs_groups[] = { 74const struct mcs_group minstrel_mcs_groups[] = {
63 MCS_GROUP(1, 0, 0), 75 MCS_GROUP(1, 0, 0),
@@ -102,21 +114,9 @@ minstrel_ewma(int old, int new, int weight)
102static int 114static int
103minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) 115minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
104{ 116{
105 int streams = (rate->idx / MCS_GROUP_RATES) + 1; 117 return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
106 u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH; 118 !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
107 int i; 119 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
108
109 for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
110 if (minstrel_mcs_groups[i].streams != streams)
111 continue;
112 if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
113 continue;
114
115 return i;
116 }
117
118 WARN_ON(1);
119 return 0;
120} 120}
121 121
122static inline struct minstrel_rate_stats * 122static inline struct minstrel_rate_stats *
@@ -130,7 +130,7 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
130 * Recalculate success probabilities and counters for a rate using EWMA 130 * Recalculate success probabilities and counters for a rate using EWMA
131 */ 131 */
132static void 132static void
133minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr) 133minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
134{ 134{
135 if (unlikely(mr->attempts > 0)) { 135 if (unlikely(mr->attempts > 0)) {
136 mr->sample_skipped = 0; 136 mr->sample_skipped = 0;
@@ -156,8 +156,7 @@ minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr
156 * the expected number of retransmissions and their expected length 156 * the expected number of retransmissions and their expected length
157 */ 157 */
158static void 158static void
159minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, 159minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
160 int group, int rate)
161{ 160{
162 struct minstrel_rate_stats *mr; 161 struct minstrel_rate_stats *mr;
163 unsigned int usecs; 162 unsigned int usecs;
@@ -226,8 +225,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
226 mr = &mg->rates[i]; 225 mr = &mg->rates[i];
227 mr->retry_updated = false; 226 mr->retry_updated = false;
228 index = MCS_GROUP_RATES * group + i; 227 index = MCS_GROUP_RATES * group + i;
229 minstrel_calc_rate_ewma(mp, mr); 228 minstrel_calc_rate_ewma(mr);
230 minstrel_ht_calc_tp(mp, mi, group, i); 229 minstrel_ht_calc_tp(mi, group, i);
231 230
232 if (!mr->cur_tp) 231 if (!mr->cur_tp)
233 continue; 232 continue;
@@ -300,10 +299,10 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
300static bool 299static bool
301minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) 300minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
302{ 301{
303 if (!rate->count) 302 if (rate->idx < 0)
304 return false; 303 return false;
305 304
306 if (rate->idx < 0) 305 if (!rate->count)
307 return false; 306 return false;
308 307
309 return !!(rate->flags & IEEE80211_TX_RC_MCS); 308 return !!(rate->flags & IEEE80211_TX_RC_MCS);
@@ -357,7 +356,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
357} 356}
358 357
359static void 358static void
360minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb) 359minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
361{ 360{
362 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 361 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
363 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 362 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
@@ -455,7 +454,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
455 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { 454 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
456 minstrel_ht_update_stats(mp, mi); 455 minstrel_ht_update_stats(mp, mi);
457 if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 456 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
458 minstrel_aggr_check(mp, sta, skb); 457 minstrel_aggr_check(sta, skb);
459 } 458 }
460} 459}
461 460
@@ -515,7 +514,6 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
515static void 514static void
516minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, 515minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
517 struct ieee80211_tx_rate *rate, int index, 516 struct ieee80211_tx_rate *rate, int index,
518 struct ieee80211_tx_rate_control *txrc,
519 bool sample, bool rtscts) 517 bool sample, bool rtscts)
520{ 518{
521 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; 519 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
@@ -628,11 +626,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
628 if (sample_idx >= 0) { 626 if (sample_idx >= 0) {
629 sample = true; 627 sample = true;
630 minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, 628 minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
631 txrc, true, false); 629 true, false);
632 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 630 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
633 } else { 631 } else {
634 minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate, 632 minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
635 txrc, false, false); 633 false, false);
636 } 634 }
637 635
638 if (mp->hw->max_rates >= 3) { 636 if (mp->hw->max_rates >= 3) {
@@ -643,13 +641,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
643 */ 641 */
644 if (sample_idx >= 0) 642 if (sample_idx >= 0)
645 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate, 643 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
646 txrc, false, false); 644 false, false);
647 else 645 else
648 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2, 646 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
649 txrc, false, true); 647 false, true);
650 648
651 minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, 649 minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
652 txrc, false, !sample); 650 false, !sample);
653 651
654 ar[3].count = 0; 652 ar[3].count = 0;
655 ar[3].idx = -1; 653 ar[3].idx = -1;
@@ -660,7 +658,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
660 * max_tp_rate -> max_prob_rate by default. 658 * max_tp_rate -> max_prob_rate by default.
661 */ 659 */
662 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate, 660 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
663 txrc, false, !sample); 661 false, !sample);
664 662
665 ar[2].count = 0; 663 ar[2].count = 0;
666 ar[2].idx = -1; 664 ar[2].idx = -1;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c5923ab8a070..6280e8bca49d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -30,7 +30,6 @@
30 * when virtual port control is not in use. 30 * when virtual port control is not in use.
31 * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble 31 * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble
32 * frames. 32 * frames.
33 * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP.
34 * @WLAN_STA_WME: Station is a QoS-STA. 33 * @WLAN_STA_WME: Station is a QoS-STA.
35 * @WLAN_STA_WDS: Station is one of our WDS peers. 34 * @WLAN_STA_WDS: Station is one of our WDS peers.
36 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the 35 * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
@@ -60,7 +59,6 @@ enum ieee80211_sta_info_flags {
60 WLAN_STA_PS_STA, 59 WLAN_STA_PS_STA,
61 WLAN_STA_AUTHORIZED, 60 WLAN_STA_AUTHORIZED,
62 WLAN_STA_SHORT_PREAMBLE, 61 WLAN_STA_SHORT_PREAMBLE,
63 WLAN_STA_ASSOC_AP,
64 WLAN_STA_WME, 62 WLAN_STA_WME,
65 WLAN_STA_WDS, 63 WLAN_STA_WDS,
66 WLAN_STA_CLEAR_PS_FILT, 64 WLAN_STA_CLEAR_PS_FILT,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f044963feb9a..8d31933abe6a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -36,7 +36,8 @@
36 36
37/* misc utils */ 37/* misc utils */
38 38
39static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, 39static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
40 struct sk_buff *skb, int group_addr,
40 int next_frag_len) 41 int next_frag_len)
41{ 42{
42 int rate, mrate, erp, dur, i; 43 int rate, mrate, erp, dur, i;
@@ -44,7 +45,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
44 struct ieee80211_local *local = tx->local; 45 struct ieee80211_local *local = tx->local;
45 struct ieee80211_supported_band *sband; 46 struct ieee80211_supported_band *sband;
46 struct ieee80211_hdr *hdr; 47 struct ieee80211_hdr *hdr;
47 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 48 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
48 49
49 /* assume HW handles this */ 50 /* assume HW handles this */
50 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) 51 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
@@ -76,7 +77,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
76 * at the highest possible rate belonging to the PHY rates in the 77 * at the highest possible rate belonging to the PHY rates in the
77 * BSSBasicRateSet 78 * BSSBasicRateSet
78 */ 79 */
79 hdr = (struct ieee80211_hdr *)tx->skb->data; 80 hdr = (struct ieee80211_hdr *)skb->data;
80 if (ieee80211_is_ctl(hdr->frame_control)) { 81 if (ieee80211_is_ctl(hdr->frame_control)) {
81 /* TODO: These control frames are not currently sent by 82 /* TODO: These control frames are not currently sent by
82 * mac80211, but should they be implemented, this function 83 * mac80211, but should they be implemented, this function
@@ -572,8 +573,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
572 switch (tx->key->conf.cipher) { 573 switch (tx->key->conf.cipher) {
573 case WLAN_CIPHER_SUITE_WEP40: 574 case WLAN_CIPHER_SUITE_WEP40:
574 case WLAN_CIPHER_SUITE_WEP104: 575 case WLAN_CIPHER_SUITE_WEP104:
575 if (ieee80211_is_auth(hdr->frame_control))
576 break;
577 case WLAN_CIPHER_SUITE_TKIP: 576 case WLAN_CIPHER_SUITE_TKIP:
578 if (!ieee80211_is_data_present(hdr->frame_control)) 577 if (!ieee80211_is_data_present(hdr->frame_control))
579 tx->key = NULL; 578 tx->key = NULL;
@@ -844,11 +843,13 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
844 return TX_CONTINUE; 843 return TX_CONTINUE;
845} 844}
846 845
847static int ieee80211_fragment(struct ieee80211_local *local, 846static int ieee80211_fragment(struct ieee80211_tx_data *tx,
848 struct sk_buff *skb, int hdrlen, 847 struct sk_buff *skb, int hdrlen,
849 int frag_threshold) 848 int frag_threshold)
850{ 849{
851 struct sk_buff *tail = skb, *tmp; 850 struct ieee80211_local *local = tx->local;
851 struct ieee80211_tx_info *info;
852 struct sk_buff *tmp;
852 int per_fragm = frag_threshold - hdrlen - FCS_LEN; 853 int per_fragm = frag_threshold - hdrlen - FCS_LEN;
853 int pos = hdrlen + per_fragm; 854 int pos = hdrlen + per_fragm;
854 int rem = skb->len - hdrlen - per_fragm; 855 int rem = skb->len - hdrlen - per_fragm;
@@ -856,6 +857,8 @@ static int ieee80211_fragment(struct ieee80211_local *local,
856 if (WARN_ON(rem < 0)) 857 if (WARN_ON(rem < 0))
857 return -EINVAL; 858 return -EINVAL;
858 859
860 /* first fragment was already added to queue by caller */
861
859 while (rem) { 862 while (rem) {
860 int fraglen = per_fragm; 863 int fraglen = per_fragm;
861 864
@@ -868,12 +871,21 @@ static int ieee80211_fragment(struct ieee80211_local *local,
868 IEEE80211_ENCRYPT_TAILROOM); 871 IEEE80211_ENCRYPT_TAILROOM);
869 if (!tmp) 872 if (!tmp)
870 return -ENOMEM; 873 return -ENOMEM;
871 tail->next = tmp; 874
872 tail = tmp; 875 __skb_queue_tail(&tx->skbs, tmp);
876
873 skb_reserve(tmp, local->tx_headroom + 877 skb_reserve(tmp, local->tx_headroom +
874 IEEE80211_ENCRYPT_HEADROOM); 878 IEEE80211_ENCRYPT_HEADROOM);
875 /* copy control information */ 879 /* copy control information */
876 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); 880 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
881
882 info = IEEE80211_SKB_CB(tmp);
883 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
884 IEEE80211_TX_CTL_FIRST_FRAGMENT);
885
886 if (rem)
887 info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
888
877 skb_copy_queue_mapping(tmp, skb); 889 skb_copy_queue_mapping(tmp, skb);
878 tmp->priority = skb->priority; 890 tmp->priority = skb->priority;
879 tmp->dev = skb->dev; 891 tmp->dev = skb->dev;
@@ -885,6 +897,7 @@ static int ieee80211_fragment(struct ieee80211_local *local,
885 pos += fraglen; 897 pos += fraglen;
886 } 898 }
887 899
900 /* adjust first fragment's length */
888 skb->len = hdrlen + per_fragm; 901 skb->len = hdrlen + per_fragm;
889 return 0; 902 return 0;
890} 903}
@@ -899,6 +912,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
899 int hdrlen; 912 int hdrlen;
900 int fragnum; 913 int fragnum;
901 914
915 /* no matter what happens, tx->skb moves to tx->skbs */
916 __skb_queue_tail(&tx->skbs, skb);
917 tx->skb = NULL;
918
902 if (info->flags & IEEE80211_TX_CTL_DONTFRAG) 919 if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
903 return TX_CONTINUE; 920 return TX_CONTINUE;
904 921
@@ -927,21 +944,21 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
927 * of the fragments then we will simply pretend to accept the skb 944 * of the fragments then we will simply pretend to accept the skb
928 * but store it away as pending. 945 * but store it away as pending.
929 */ 946 */
930 if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold)) 947 if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold))
931 return TX_DROP; 948 return TX_DROP;
932 949
933 /* update duration/seq/flags of fragments */ 950 /* update duration/seq/flags of fragments */
934 fragnum = 0; 951 fragnum = 0;
935 do { 952
953 skb_queue_walk(&tx->skbs, skb) {
936 int next_len; 954 int next_len;
937 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); 955 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
938 956
939 hdr = (void *)skb->data; 957 hdr = (void *)skb->data;
940 info = IEEE80211_SKB_CB(skb); 958 info = IEEE80211_SKB_CB(skb);
941 959
942 if (skb->next) { 960 if (!skb_queue_is_last(&tx->skbs, skb)) {
943 hdr->frame_control |= morefrags; 961 hdr->frame_control |= morefrags;
944 next_len = skb->next->len;
945 /* 962 /*
946 * No multi-rate retries for fragmented frames, that 963 * No multi-rate retries for fragmented frames, that
947 * would completely throw off the NAV at other STAs. 964 * would completely throw off the NAV at other STAs.
@@ -956,10 +973,9 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
956 hdr->frame_control &= ~morefrags; 973 hdr->frame_control &= ~morefrags;
957 next_len = 0; 974 next_len = 0;
958 } 975 }
959 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
960 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); 976 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
961 fragnum++; 977 fragnum++;
962 } while ((skb = skb->next)); 978 }
963 979
964 return TX_CONTINUE; 980 return TX_CONTINUE;
965} 981}
@@ -967,16 +983,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
967static ieee80211_tx_result debug_noinline 983static ieee80211_tx_result debug_noinline
968ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) 984ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
969{ 985{
970 struct sk_buff *skb = tx->skb; 986 struct sk_buff *skb;
971 987
972 if (!tx->sta) 988 if (!tx->sta)
973 return TX_CONTINUE; 989 return TX_CONTINUE;
974 990
975 tx->sta->tx_packets++; 991 tx->sta->tx_packets++;
976 do { 992 skb_queue_walk(&tx->skbs, skb) {
977 tx->sta->tx_fragments++; 993 tx->sta->tx_fragments++;
978 tx->sta->tx_bytes += skb->len; 994 tx->sta->tx_bytes += skb->len;
979 } while ((skb = skb->next)); 995 }
980 996
981 return TX_CONTINUE; 997 return TX_CONTINUE;
982} 998}
@@ -1015,21 +1031,25 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
1015static ieee80211_tx_result debug_noinline 1031static ieee80211_tx_result debug_noinline
1016ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) 1032ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
1017{ 1033{
1018 struct sk_buff *skb = tx->skb; 1034 struct sk_buff *skb;
1019 struct ieee80211_hdr *hdr; 1035 struct ieee80211_hdr *hdr;
1020 int next_len; 1036 int next_len;
1021 bool group_addr; 1037 bool group_addr;
1022 1038
1023 do { 1039 skb_queue_walk(&tx->skbs, skb) {
1024 hdr = (void *) skb->data; 1040 hdr = (void *) skb->data;
1025 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) 1041 if (unlikely(ieee80211_is_pspoll(hdr->frame_control)))
1026 break; /* must not overwrite AID */ 1042 break; /* must not overwrite AID */
1027 next_len = skb->next ? skb->next->len : 0; 1043 if (!skb_queue_is_last(&tx->skbs, skb)) {
1044 struct sk_buff *next = skb_queue_next(&tx->skbs, skb);
1045 next_len = next->len;
1046 } else
1047 next_len = 0;
1028 group_addr = is_multicast_ether_addr(hdr->addr1); 1048 group_addr = is_multicast_ether_addr(hdr->addr1);
1029 1049
1030 hdr->duration_id = 1050 hdr->duration_id =
1031 ieee80211_duration(tx, group_addr, next_len); 1051 ieee80211_duration(tx, skb, group_addr, next_len);
1032 } while ((skb = skb->next)); 1052 }
1033 1053
1034 return TX_CONTINUE; 1054 return TX_CONTINUE;
1035} 1055}
@@ -1108,6 +1128,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1108 tx->local = local; 1128 tx->local = local;
1109 tx->sdata = sdata; 1129 tx->sdata = sdata;
1110 tx->channel = local->hw.conf.channel; 1130 tx->channel = local->hw.conf.channel;
1131 __skb_queue_head_init(&tx->skbs);
1111 1132
1112 /* 1133 /*
1113 * If this flag is set to true anywhere, and we get here, 1134 * If this flag is set to true anywhere, and we get here,
@@ -1180,22 +1201,18 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1180 return TX_CONTINUE; 1201 return TX_CONTINUE;
1181} 1202}
1182 1203
1183/* 1204static bool ieee80211_tx_frags(struct ieee80211_local *local,
1184 * Returns false if the frame couldn't be transmitted but was queued instead. 1205 struct ieee80211_vif *vif,
1185 */ 1206 struct ieee80211_sta *sta,
1186static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, 1207 struct sk_buff_head *skbs,
1187 struct sta_info *sta, bool txpending) 1208 bool txpending)
1188{ 1209{
1189 struct sk_buff *skb = *skbp, *next; 1210 struct sk_buff *skb, *tmp;
1190 struct ieee80211_tx_info *info; 1211 struct ieee80211_tx_info *info;
1191 struct ieee80211_sub_if_data *sdata;
1192 unsigned long flags; 1212 unsigned long flags;
1193 int len;
1194 bool fragm = false;
1195 1213
1196 while (skb) { 1214 skb_queue_walk_safe(skbs, skb, tmp) {
1197 int q = skb_get_queue_mapping(skb); 1215 int q = skb_get_queue_mapping(skb);
1198 __le16 fc;
1199 1216
1200 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1217 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1201 if (local->queue_stop_reasons[q] || 1218 if (local->queue_stop_reasons[q] ||
@@ -1205,24 +1222,10 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
1205 * transmission from the tx-pending tasklet when the 1222 * transmission from the tx-pending tasklet when the
1206 * queue is woken again. 1223 * queue is woken again.
1207 */ 1224 */
1208 1225 if (txpending)
1209 do { 1226 skb_queue_splice(skbs, &local->pending[q]);
1210 next = skb->next; 1227 else
1211 skb->next = NULL; 1228 skb_queue_splice_tail(skbs, &local->pending[q]);
1212 /*
1213 * NB: If txpending is true, next must already
1214 * be NULL since we must've gone through this
1215 * loop before already; therefore we can just
1216 * queue the frame to the head without worrying
1217 * about reordering of fragments.
1218 */
1219 if (unlikely(txpending))
1220 __skb_queue_head(&local->pending[q],
1221 skb);
1222 else
1223 __skb_queue_tail(&local->pending[q],
1224 skb);
1225 } while ((skb = next));
1226 1229
1227 spin_unlock_irqrestore(&local->queue_stop_reason_lock, 1230 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1228 flags); 1231 flags);
@@ -1231,47 +1234,72 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
1231 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1234 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1232 1235
1233 info = IEEE80211_SKB_CB(skb); 1236 info = IEEE80211_SKB_CB(skb);
1237 info->control.vif = vif;
1238 info->control.sta = sta;
1234 1239
1235 if (fragm) 1240 __skb_unlink(skb, skbs);
1236 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | 1241 drv_tx(local, skb);
1237 IEEE80211_TX_CTL_FIRST_FRAGMENT); 1242 }
1238
1239 next = skb->next;
1240 len = skb->len;
1241 1243
1242 if (next) 1244 return true;
1243 info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; 1245}
1244 1246
1245 sdata = vif_to_sdata(info->control.vif); 1247/*
1248 * Returns false if the frame couldn't be transmitted but was queued instead.
1249 */
1250static bool __ieee80211_tx(struct ieee80211_local *local,
1251 struct sk_buff_head *skbs, int led_len,
1252 struct sta_info *sta, bool txpending)
1253{
1254 struct ieee80211_tx_info *info;
1255 struct ieee80211_sub_if_data *sdata;
1256 struct ieee80211_vif *vif;
1257 struct ieee80211_sta *pubsta;
1258 struct sk_buff *skb;
1259 bool result = true;
1260 __le16 fc;
1246 1261
1247 switch (sdata->vif.type) { 1262 if (WARN_ON(skb_queue_empty(skbs)))
1248 case NL80211_IFTYPE_MONITOR: 1263 return true;
1249 info->control.vif = NULL;
1250 break;
1251 case NL80211_IFTYPE_AP_VLAN:
1252 info->control.vif = &container_of(sdata->bss,
1253 struct ieee80211_sub_if_data, u.ap)->vif;
1254 break;
1255 default:
1256 /* keep */
1257 break;
1258 }
1259 1264
1260 if (sta && sta->uploaded) 1265 skb = skb_peek(skbs);
1261 info->control.sta = &sta->sta; 1266 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
1262 else 1267 info = IEEE80211_SKB_CB(skb);
1263 info->control.sta = NULL; 1268 sdata = vif_to_sdata(info->control.vif);
1269 if (sta && !sta->uploaded)
1270 sta = NULL;
1264 1271
1265 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 1272 if (sta)
1266 drv_tx(local, skb); 1273 pubsta = &sta->sta;
1274 else
1275 pubsta = NULL;
1267 1276
1268 ieee80211_tpt_led_trig_tx(local, fc, len); 1277 switch (sdata->vif.type) {
1269 *skbp = skb = next; 1278 case NL80211_IFTYPE_MONITOR:
1270 ieee80211_led_tx(local, 1); 1279 sdata = NULL;
1271 fragm = true; 1280 vif = NULL;
1281 break;
1282 case NL80211_IFTYPE_AP_VLAN:
1283 sdata = container_of(sdata->bss,
1284 struct ieee80211_sub_if_data, u.ap);
1285 /* fall through */
1286 default:
1287 vif = &sdata->vif;
1288 break;
1272 } 1289 }
1273 1290
1274 return true; 1291 if (local->ops->tx_frags)
1292 drv_tx_frags(local, vif, pubsta, skbs);
1293 else
1294 result = ieee80211_tx_frags(local, vif, pubsta, skbs,
1295 txpending);
1296
1297 ieee80211_tpt_led_trig_tx(local, fc, led_len);
1298 ieee80211_led_tx(local, 1);
1299
1300 WARN_ON(!skb_queue_empty(skbs));
1301
1302 return result;
1275} 1303}
1276 1304
1277/* 1305/*
@@ -1280,8 +1308,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp,
1280 */ 1308 */
1281static int invoke_tx_handlers(struct ieee80211_tx_data *tx) 1309static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1282{ 1310{
1283 struct sk_buff *skb = tx->skb; 1311 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
1284 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1285 ieee80211_tx_result res = TX_DROP; 1312 ieee80211_tx_result res = TX_DROP;
1286 1313
1287#define CALL_TXH(txh) \ 1314#define CALL_TXH(txh) \
@@ -1315,13 +1342,10 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1315 txh_done: 1342 txh_done:
1316 if (unlikely(res == TX_DROP)) { 1343 if (unlikely(res == TX_DROP)) {
1317 I802_DEBUG_INC(tx->local->tx_handlers_drop); 1344 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1318 while (skb) { 1345 if (tx->skb)
1319 struct sk_buff *next; 1346 dev_kfree_skb(tx->skb);
1320 1347 else
1321 next = skb->next; 1348 __skb_queue_purge(&tx->skbs);
1322 dev_kfree_skb(skb);
1323 skb = next;
1324 }
1325 return -1; 1349 return -1;
1326 } else if (unlikely(res == TX_QUEUED)) { 1350 } else if (unlikely(res == TX_QUEUED)) {
1327 I802_DEBUG_INC(tx->local->tx_handlers_queued); 1351 I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1342,6 +1366,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1342 ieee80211_tx_result res_prepare; 1366 ieee80211_tx_result res_prepare;
1343 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1367 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1344 bool result = true; 1368 bool result = true;
1369 int led_len;
1345 1370
1346 if (unlikely(skb->len < 10)) { 1371 if (unlikely(skb->len < 10)) {
1347 dev_kfree_skb(skb); 1372 dev_kfree_skb(skb);
@@ -1351,6 +1376,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1351 rcu_read_lock(); 1376 rcu_read_lock();
1352 1377
1353 /* initialises tx */ 1378 /* initialises tx */
1379 led_len = skb->len;
1354 res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); 1380 res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
1355 1381
1356 if (unlikely(res_prepare == TX_DROP)) { 1382 if (unlikely(res_prepare == TX_DROP)) {
@@ -1364,7 +1390,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1364 info->band = tx.channel->band; 1390 info->band = tx.channel->band;
1365 1391
1366 if (!invoke_tx_handlers(&tx)) 1392 if (!invoke_tx_handlers(&tx))
1367 result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending); 1393 result = __ieee80211_tx(local, &tx.skbs, led_len,
1394 tx.sta, txpending);
1368 out: 1395 out:
1369 rcu_read_unlock(); 1396 rcu_read_unlock();
1370 return result; 1397 return result;
@@ -2112,10 +2139,15 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2112 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { 2139 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
2113 result = ieee80211_tx(sdata, skb, true); 2140 result = ieee80211_tx(sdata, skb, true);
2114 } else { 2141 } else {
2142 struct sk_buff_head skbs;
2143
2144 __skb_queue_head_init(&skbs);
2145 __skb_queue_tail(&skbs, skb);
2146
2115 hdr = (struct ieee80211_hdr *)skb->data; 2147 hdr = (struct ieee80211_hdr *)skb->data;
2116 sta = sta_info_get(sdata, hdr->addr1); 2148 sta = sta_info_get(sdata, hdr->addr1);
2117 2149
2118 result = __ieee80211_tx(local, &skb, sta, true); 2150 result = __ieee80211_tx(local, &skbs, skb->len, sta, true);
2119 } 2151 }
2120 2152
2121 return result; 2153 return result;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3a00814699f0..3b9b492e9403 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -20,6 +20,7 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/if_arp.h> 21#include <linux/if_arp.h>
22#include <linux/bitmap.h> 22#include <linux/bitmap.h>
23#include <linux/crc32.h>
23#include <net/net_namespace.h> 24#include <net/net_namespace.h>
24#include <net/cfg80211.h> 25#include <net/cfg80211.h>
25#include <net/rtnetlink.h> 26#include <net/rtnetlink.h>
@@ -96,13 +97,13 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
96 97
97void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) 98void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
98{ 99{
99 struct sk_buff *skb = tx->skb; 100 struct sk_buff *skb;
100 struct ieee80211_hdr *hdr; 101 struct ieee80211_hdr *hdr;
101 102
102 do { 103 skb_queue_walk(&tx->skbs, skb) {
103 hdr = (struct ieee80211_hdr *) skb->data; 104 hdr = (struct ieee80211_hdr *) skb->data;
104 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 105 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
105 } while ((skb = skb->next)); 106 }
106} 107}
107 108
108int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 109int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
@@ -564,6 +565,172 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
564} 565}
565EXPORT_SYMBOL(ieee80211_queue_delayed_work); 566EXPORT_SYMBOL(ieee80211_queue_delayed_work);
566 567
568u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
569 struct ieee802_11_elems *elems,
570 u64 filter, u32 crc)
571{
572 size_t left = len;
573 u8 *pos = start;
574 bool calc_crc = filter != 0;
575
576 memset(elems, 0, sizeof(*elems));
577 elems->ie_start = start;
578 elems->total_len = len;
579
580 while (left >= 2) {
581 u8 id, elen;
582
583 id = *pos++;
584 elen = *pos++;
585 left -= 2;
586
587 if (elen > left)
588 break;
589
590 if (calc_crc && id < 64 && (filter & (1ULL << id)))
591 crc = crc32_be(crc, pos - 2, elen + 2);
592
593 switch (id) {
594 case WLAN_EID_SSID:
595 elems->ssid = pos;
596 elems->ssid_len = elen;
597 break;
598 case WLAN_EID_SUPP_RATES:
599 elems->supp_rates = pos;
600 elems->supp_rates_len = elen;
601 break;
602 case WLAN_EID_FH_PARAMS:
603 elems->fh_params = pos;
604 elems->fh_params_len = elen;
605 break;
606 case WLAN_EID_DS_PARAMS:
607 elems->ds_params = pos;
608 elems->ds_params_len = elen;
609 break;
610 case WLAN_EID_CF_PARAMS:
611 elems->cf_params = pos;
612 elems->cf_params_len = elen;
613 break;
614 case WLAN_EID_TIM:
615 if (elen >= sizeof(struct ieee80211_tim_ie)) {
616 elems->tim = (void *)pos;
617 elems->tim_len = elen;
618 }
619 break;
620 case WLAN_EID_IBSS_PARAMS:
621 elems->ibss_params = pos;
622 elems->ibss_params_len = elen;
623 break;
624 case WLAN_EID_CHALLENGE:
625 elems->challenge = pos;
626 elems->challenge_len = elen;
627 break;
628 case WLAN_EID_VENDOR_SPECIFIC:
629 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
630 pos[2] == 0xf2) {
631 /* Microsoft OUI (00:50:F2) */
632
633 if (calc_crc)
634 crc = crc32_be(crc, pos - 2, elen + 2);
635
636 if (pos[3] == 1) {
637 /* OUI Type 1 - WPA IE */
638 elems->wpa = pos;
639 elems->wpa_len = elen;
640 } else if (elen >= 5 && pos[3] == 2) {
641 /* OUI Type 2 - WMM IE */
642 if (pos[4] == 0) {
643 elems->wmm_info = pos;
644 elems->wmm_info_len = elen;
645 } else if (pos[4] == 1) {
646 elems->wmm_param = pos;
647 elems->wmm_param_len = elen;
648 }
649 }
650 }
651 break;
652 case WLAN_EID_RSN:
653 elems->rsn = pos;
654 elems->rsn_len = elen;
655 break;
656 case WLAN_EID_ERP_INFO:
657 elems->erp_info = pos;
658 elems->erp_info_len = elen;
659 break;
660 case WLAN_EID_EXT_SUPP_RATES:
661 elems->ext_supp_rates = pos;
662 elems->ext_supp_rates_len = elen;
663 break;
664 case WLAN_EID_HT_CAPABILITY:
665 if (elen >= sizeof(struct ieee80211_ht_cap))
666 elems->ht_cap_elem = (void *)pos;
667 break;
668 case WLAN_EID_HT_INFORMATION:
669 if (elen >= sizeof(struct ieee80211_ht_info))
670 elems->ht_info_elem = (void *)pos;
671 break;
672 case WLAN_EID_MESH_ID:
673 elems->mesh_id = pos;
674 elems->mesh_id_len = elen;
675 break;
676 case WLAN_EID_MESH_CONFIG:
677 if (elen >= sizeof(struct ieee80211_meshconf_ie))
678 elems->mesh_config = (void *)pos;
679 break;
680 case WLAN_EID_PEER_MGMT:
681 elems->peering = pos;
682 elems->peering_len = elen;
683 break;
684 case WLAN_EID_PREQ:
685 elems->preq = pos;
686 elems->preq_len = elen;
687 break;
688 case WLAN_EID_PREP:
689 elems->prep = pos;
690 elems->prep_len = elen;
691 break;
692 case WLAN_EID_PERR:
693 elems->perr = pos;
694 elems->perr_len = elen;
695 break;
696 case WLAN_EID_RANN:
697 if (elen >= sizeof(struct ieee80211_rann_ie))
698 elems->rann = (void *)pos;
699 break;
700 case WLAN_EID_CHANNEL_SWITCH:
701 elems->ch_switch_elem = pos;
702 elems->ch_switch_elem_len = elen;
703 break;
704 case WLAN_EID_QUIET:
705 if (!elems->quiet_elem) {
706 elems->quiet_elem = pos;
707 elems->quiet_elem_len = elen;
708 }
709 elems->num_of_quiet_elem++;
710 break;
711 case WLAN_EID_COUNTRY:
712 elems->country_elem = pos;
713 elems->country_elem_len = elen;
714 break;
715 case WLAN_EID_PWR_CONSTRAINT:
716 elems->pwr_constr_elem = pos;
717 elems->pwr_constr_elem_len = elen;
718 break;
719 case WLAN_EID_TIMEOUT_INTERVAL:
720 elems->timeout_int = pos;
721 elems->timeout_int_len = elen;
722 break;
723 default:
724 break;
725 }
726
727 left -= elen;
728 pos += elen;
729 }
730
731 return crc;
732}
733
567void ieee802_11_parse_elems(u8 *start, size_t len, 734void ieee802_11_parse_elems(u8 *start, size_t len,
568 struct ieee802_11_elems *elems) 735 struct ieee802_11_elems *elems)
569{ 736{
@@ -813,7 +980,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
813 } 980 }
814 981
815 if (sband->ht_cap.ht_supported) 982 if (sband->ht_cap.ht_supported)
816 pos = ieee80211_ie_build_ht_cap(pos, sband, sband->ht_cap.cap); 983 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
984 sband->ht_cap.cap);
817 985
818 /* 986 /*
819 * If adding more here, adjust code in main.c 987 * If adding more here, adjust code in main.c
@@ -1356,7 +1524,7 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
1356} 1524}
1357EXPORT_SYMBOL(ieee80211_disable_rssi_reports); 1525EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
1358 1526
1359u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_supported_band *sband, 1527u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1360 u16 cap) 1528 u16 cap)
1361{ 1529{
1362 __le16 tmp; 1530 __le16 tmp;
@@ -1371,13 +1539,13 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_supported_band *sband,
1371 pos += sizeof(u16); 1539 pos += sizeof(u16);
1372 1540
1373 /* AMPDU parameters */ 1541 /* AMPDU parameters */
1374 *pos++ = sband->ht_cap.ampdu_factor | 1542 *pos++ = ht_cap->ampdu_factor |
1375 (sband->ht_cap.ampdu_density << 1543 (ht_cap->ampdu_density <<
1376 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); 1544 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
1377 1545
1378 /* MCS set */ 1546 /* MCS set */
1379 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 1547 memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs));
1380 pos += sizeof(sband->ht_cap.mcs); 1548 pos += sizeof(ht_cap->mcs);
1381 1549
1382 /* extended capabilities */ 1550 /* extended capabilities */
1383 pos += sizeof(__le16); 1551 pos += sizeof(__le16);
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a1c6bfd55f0f..68ad351479df 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -330,13 +330,12 @@ ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
330 330
331 ieee80211_tx_set_protected(tx); 331 ieee80211_tx_set_protected(tx);
332 332
333 skb = tx->skb; 333 skb_queue_walk(&tx->skbs, skb) {
334 do {
335 if (wep_encrypt_skb(tx, skb) < 0) { 334 if (wep_encrypt_skb(tx, skb) < 0) {
336 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); 335 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
337 return TX_DROP; 336 return TX_DROP;
338 } 337 }
339 } while ((skb = skb->next)); 338 }
340 339
341 return TX_CONTINUE; 340 return TX_CONTINUE;
342} 341}
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 3dd5a89e99a7..6884a2d986dc 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -94,7 +94,8 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
94 94
95/* frame sending functions */ 95/* frame sending functions */
96 96
97static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie, 97static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
98 struct sk_buff *skb, const u8 *ht_info_ie,
98 struct ieee80211_supported_band *sband, 99 struct ieee80211_supported_band *sband,
99 struct ieee80211_channel *channel, 100 struct ieee80211_channel *channel,
100 enum ieee80211_smps_mode smps) 101 enum ieee80211_smps_mode smps)
@@ -102,7 +103,10 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
102 struct ieee80211_ht_info *ht_info; 103 struct ieee80211_ht_info *ht_info;
103 u8 *pos; 104 u8 *pos;
104 u32 flags = channel->flags; 105 u32 flags = channel->flags;
105 u16 cap = sband->ht_cap.cap; 106 u16 cap;
107 struct ieee80211_sta_ht_cap ht_cap;
108
109 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
106 110
107 if (!sband->ht_cap.ht_supported) 111 if (!sband->ht_cap.ht_supported)
108 return; 112 return;
@@ -113,9 +117,13 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
113 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info)) 117 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
114 return; 118 return;
115 119
120 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
121 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
122
116 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2); 123 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
117 124
118 /* determine capability flags */ 125 /* determine capability flags */
126 cap = ht_cap.cap;
119 127
120 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 128 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
121 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 129 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
@@ -154,7 +162,7 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
154 162
155 /* reserve and fill IE */ 163 /* reserve and fill IE */
156 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); 164 pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
157 ieee80211_ie_build_ht_cap(pos, sband, cap); 165 ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
158} 166}
159 167
160static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, 168static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
@@ -329,7 +337,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
329 337
330 if (wk->assoc.use_11n && wk->assoc.wmm_used && 338 if (wk->assoc.use_11n && wk->assoc.wmm_used &&
331 local->hw.queues >= 4) 339 local->hw.queues >= 4)
332 ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie, 340 ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie,
333 sband, wk->chan, wk->assoc.smps); 341 sband, wk->chan, wk->assoc.smps);
334 342
335 /* if present, add any custom non-vendor IEs that go after HT */ 343 /* if present, add any custom non-vendor IEs that go after HT */
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 106e15a4649f..93aab0715e8a 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -223,14 +223,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
223ieee80211_tx_result 223ieee80211_tx_result
224ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) 224ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
225{ 225{
226 struct sk_buff *skb = tx->skb; 226 struct sk_buff *skb;
227 227
228 ieee80211_tx_set_protected(tx); 228 ieee80211_tx_set_protected(tx);
229 229
230 do { 230 skb_queue_walk(&tx->skbs, skb) {
231 if (tkip_encrypt_skb(tx, skb) < 0) 231 if (tkip_encrypt_skb(tx, skb) < 0)
232 return TX_DROP; 232 return TX_DROP;
233 } while ((skb = skb->next)); 233 }
234 234
235 return TX_CONTINUE; 235 return TX_CONTINUE;
236} 236}
@@ -449,14 +449,14 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
449ieee80211_tx_result 449ieee80211_tx_result
450ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) 450ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
451{ 451{
452 struct sk_buff *skb = tx->skb; 452 struct sk_buff *skb;
453 453
454 ieee80211_tx_set_protected(tx); 454 ieee80211_tx_set_protected(tx);
455 455
456 do { 456 skb_queue_walk(&tx->skbs, skb) {
457 if (ccmp_encrypt_skb(tx, skb) < 0) 457 if (ccmp_encrypt_skb(tx, skb) < 0)
458 return TX_DROP; 458 return TX_DROP;
459 } while ((skb = skb->next)); 459 }
460 460
461 return TX_CONTINUE; 461 return TX_CONTINUE;
462} 462}
@@ -554,15 +554,22 @@ static inline void bip_ipn_swap(u8 *d, const u8 *s)
554ieee80211_tx_result 554ieee80211_tx_result
555ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) 555ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
556{ 556{
557 struct sk_buff *skb = tx->skb; 557 struct sk_buff *skb;
558 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 558 struct ieee80211_tx_info *info;
559 struct ieee80211_key *key = tx->key; 559 struct ieee80211_key *key = tx->key;
560 struct ieee80211_mmie *mmie; 560 struct ieee80211_mmie *mmie;
561 u8 aad[20]; 561 u8 aad[20];
562 u64 pn64; 562 u64 pn64;
563 563
564 if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
565 return TX_DROP;
566
567 skb = skb_peek(&tx->skbs);
568
569 info = IEEE80211_SKB_CB(skb);
570
564 if (info->control.hw_key) 571 if (info->control.hw_key)
565 return 0; 572 return TX_CONTINUE;
566 573
567 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) 574 if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
568 return TX_DROP; 575 return TX_DROP;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 1c7d4df5418c..fb08c28fc90a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -341,13 +341,17 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
341 const u8 *bssid, const u8 *prev_bssid, 341 const u8 *bssid, const u8 *prev_bssid,
342 const u8 *ssid, int ssid_len, 342 const u8 *ssid, int ssid_len,
343 const u8 *ie, int ie_len, bool use_mfp, 343 const u8 *ie, int ie_len, bool use_mfp,
344 struct cfg80211_crypto_settings *crypt); 344 struct cfg80211_crypto_settings *crypt,
345 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
346 struct ieee80211_ht_cap *ht_capa_mask);
345int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 347int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
346 struct net_device *dev, struct ieee80211_channel *chan, 348 struct net_device *dev, struct ieee80211_channel *chan,
347 const u8 *bssid, const u8 *prev_bssid, 349 const u8 *bssid, const u8 *prev_bssid,
348 const u8 *ssid, int ssid_len, 350 const u8 *ssid, int ssid_len,
349 const u8 *ie, int ie_len, bool use_mfp, 351 const u8 *ie, int ie_len, bool use_mfp,
350 struct cfg80211_crypto_settings *crypt); 352 struct cfg80211_crypto_settings *crypt,
353 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
354 struct ieee80211_ht_cap *ht_capa_mask);
351int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 355int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
352 struct net_device *dev, const u8 *bssid, 356 struct net_device *dev, const u8 *bssid,
353 const u8 *ie, int ie_len, u16 reason, 357 const u8 *ie, int ie_len, u16 reason,
@@ -379,6 +383,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
379 bool channel_type_valid, unsigned int wait, 383 bool channel_type_valid, unsigned int wait,
380 const u8 *buf, size_t len, bool no_cck, 384 const u8 *buf, size_t len, bool no_cck,
381 bool dont_wait_for_ack, u64 *cookie); 385 bool dont_wait_for_ack, u64 *cookie);
386void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
387 const struct ieee80211_ht_cap *ht_capa_mask);
382 388
383/* SME */ 389/* SME */
384int __cfg80211_connect(struct cfg80211_registered_device *rdev, 390int __cfg80211_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 6c1bafd508c8..438dfc105b4a 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -501,13 +501,32 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
501 return err; 501 return err;
502} 502}
503 503
504/* Do a logical ht_capa &= ht_capa_mask. */
505void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
506 const struct ieee80211_ht_cap *ht_capa_mask)
507{
508 int i;
509 u8 *p1, *p2;
510 if (!ht_capa_mask) {
511 memset(ht_capa, 0, sizeof(*ht_capa));
512 return;
513 }
514
515 p1 = (u8*)(ht_capa);
516 p2 = (u8*)(ht_capa_mask);
517 for (i = 0; i<sizeof(*ht_capa); i++)
518 p1[i] &= p2[i];
519}
520
504int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 521int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
505 struct net_device *dev, 522 struct net_device *dev,
506 struct ieee80211_channel *chan, 523 struct ieee80211_channel *chan,
507 const u8 *bssid, const u8 *prev_bssid, 524 const u8 *bssid, const u8 *prev_bssid,
508 const u8 *ssid, int ssid_len, 525 const u8 *ssid, int ssid_len,
509 const u8 *ie, int ie_len, bool use_mfp, 526 const u8 *ie, int ie_len, bool use_mfp,
510 struct cfg80211_crypto_settings *crypt) 527 struct cfg80211_crypto_settings *crypt,
528 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
529 struct ieee80211_ht_cap *ht_capa_mask)
511{ 530{
512 struct wireless_dev *wdev = dev->ieee80211_ptr; 531 struct wireless_dev *wdev = dev->ieee80211_ptr;
513 struct cfg80211_assoc_request req; 532 struct cfg80211_assoc_request req;
@@ -537,6 +556,15 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
537 memcpy(&req.crypto, crypt, sizeof(req.crypto)); 556 memcpy(&req.crypto, crypt, sizeof(req.crypto));
538 req.use_mfp = use_mfp; 557 req.use_mfp = use_mfp;
539 req.prev_bssid = prev_bssid; 558 req.prev_bssid = prev_bssid;
559 req.flags = assoc_flags;
560 if (ht_capa)
561 memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa));
562 if (ht_capa_mask)
563 memcpy(&req.ht_capa_mask, ht_capa_mask,
564 sizeof(req.ht_capa_mask));
565 cfg80211_oper_and_ht_capa(&req.ht_capa_mask,
566 rdev->wiphy.ht_capa_mod_mask);
567
540 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, 568 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
541 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 569 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
542 if (!req.bss) { 570 if (!req.bss) {
@@ -574,14 +602,17 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
574 const u8 *bssid, const u8 *prev_bssid, 602 const u8 *bssid, const u8 *prev_bssid,
575 const u8 *ssid, int ssid_len, 603 const u8 *ssid, int ssid_len,
576 const u8 *ie, int ie_len, bool use_mfp, 604 const u8 *ie, int ie_len, bool use_mfp,
577 struct cfg80211_crypto_settings *crypt) 605 struct cfg80211_crypto_settings *crypt,
606 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
607 struct ieee80211_ht_cap *ht_capa_mask)
578{ 608{
579 struct wireless_dev *wdev = dev->ieee80211_ptr; 609 struct wireless_dev *wdev = dev->ieee80211_ptr;
580 int err; 610 int err;
581 611
582 wdev_lock(wdev); 612 wdev_lock(wdev);
583 err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 613 err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
584 ssid, ssid_len, ie, ie_len, use_mfp, crypt); 614 ssid, ssid_len, ie, ie_len, use_mfp, crypt,
615 assoc_flags, ht_capa, ht_capa_mask);
585 wdev_unlock(wdev); 616 wdev_unlock(wdev);
586 617
587 return err; 618 return err;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 6bc7c4b32fa5..a1cabde7cb5f 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -199,6 +199,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
199 [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG }, 199 [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG },
200 [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY, 200 [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY,
201 .len = IEEE80211_MAX_DATA_LEN }, 201 .len = IEEE80211_MAX_DATA_LEN },
202 [NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 },
203 [NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG },
204 [NL80211_ATTR_HT_CAPABILITY_MASK] = {
205 .len = NL80211_HT_CAPABILITY_LEN
206 },
202}; 207};
203 208
204/* policy for the key attributes */ 209/* policy for the key attributes */
@@ -881,7 +886,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
881 CMD(set_pmksa, SET_PMKSA); 886 CMD(set_pmksa, SET_PMKSA);
882 CMD(del_pmksa, DEL_PMKSA); 887 CMD(del_pmksa, DEL_PMKSA);
883 CMD(flush_pmksa, FLUSH_PMKSA); 888 CMD(flush_pmksa, FLUSH_PMKSA);
884 CMD(remain_on_channel, REMAIN_ON_CHANNEL); 889 if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
890 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
885 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); 891 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
886 CMD(mgmt_tx, FRAME); 892 CMD(mgmt_tx, FRAME);
887 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); 893 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
@@ -903,6 +909,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
903 NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS); 909 NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS);
904 } 910 }
905 911
912#ifdef CONFIG_NL80211_TESTMODE
913 CMD(testmode_cmd, TESTMODE);
914#endif
915
906#undef CMD 916#undef CMD
907 917
908 if (dev->ops->connect || dev->ops->auth) { 918 if (dev->ops->connect || dev->ops->auth) {
@@ -917,11 +927,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
917 927
918 nla_nest_end(msg, nl_cmds); 928 nla_nest_end(msg, nl_cmds);
919 929
920 if (dev->ops->remain_on_channel) 930 if (dev->ops->remain_on_channel &&
931 dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
921 NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 932 NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
922 dev->wiphy.max_remain_on_channel_duration); 933 dev->wiphy.max_remain_on_channel_duration);
923 934
924 if (dev->ops->mgmt_tx_cancel_wait) 935 if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)
925 NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); 936 NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
926 937
927 if (mgmt_stypes) { 938 if (mgmt_stypes) {
@@ -1025,6 +1036,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1025 1036
1026 NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features); 1037 NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features);
1027 1038
1039 if (dev->wiphy.ht_capa_mod_mask)
1040 NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
1041 sizeof(*dev->wiphy.ht_capa_mod_mask),
1042 dev->wiphy.ht_capa_mod_mask);
1043
1028 return genlmsg_end(msg, hdr); 1044 return genlmsg_end(msg, hdr);
1029 1045
1030 nla_put_failure: 1046 nla_put_failure:
@@ -2478,26 +2494,34 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
2478/* 2494/*
2479 * Get vlan interface making sure it is running and on the right wiphy. 2495 * Get vlan interface making sure it is running and on the right wiphy.
2480 */ 2496 */
2481static int get_vlan(struct genl_info *info, 2497static struct net_device *get_vlan(struct genl_info *info,
2482 struct cfg80211_registered_device *rdev, 2498 struct cfg80211_registered_device *rdev)
2483 struct net_device **vlan)
2484{ 2499{
2485 struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; 2500 struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN];
2486 *vlan = NULL; 2501 struct net_device *v;
2487 2502 int ret;
2488 if (vlanattr) { 2503
2489 *vlan = dev_get_by_index(genl_info_net(info), 2504 if (!vlanattr)
2490 nla_get_u32(vlanattr)); 2505 return NULL;
2491 if (!*vlan) 2506
2492 return -ENODEV; 2507 v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr));
2493 if (!(*vlan)->ieee80211_ptr) 2508 if (!v)
2494 return -EINVAL; 2509 return ERR_PTR(-ENODEV);
2495 if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) 2510
2496 return -EINVAL; 2511 if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) {
2497 if (!netif_running(*vlan)) 2512 ret = -EINVAL;
2498 return -ENETDOWN; 2513 goto error;
2499 } 2514 }
2500 return 0; 2515
2516 if (!netif_running(v)) {
2517 ret = -ENETDOWN;
2518 goto error;
2519 }
2520
2521 return v;
2522 error:
2523 dev_put(v);
2524 return ERR_PTR(ret);
2501} 2525}
2502 2526
2503static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) 2527static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
@@ -2547,9 +2571,9 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2547 params.plink_state = 2571 params.plink_state =
2548 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); 2572 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
2549 2573
2550 err = get_vlan(info, rdev, &params.vlan); 2574 params.vlan = get_vlan(info, rdev);
2551 if (err) 2575 if (IS_ERR(params.vlan))
2552 goto out; 2576 return PTR_ERR(params.vlan);
2553 2577
2554 /* validate settings */ 2578 /* validate settings */
2555 err = 0; 2579 err = 0;
@@ -2717,9 +2741,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2717 (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))) 2741 (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
2718 return -EINVAL; 2742 return -EINVAL;
2719 2743
2720 err = get_vlan(info, rdev, &params.vlan); 2744 params.vlan = get_vlan(info, rdev);
2721 if (err) 2745 if (IS_ERR(params.vlan))
2722 goto out; 2746 return PTR_ERR(params.vlan);
2723 2747
2724 /* validate settings */ 2748 /* validate settings */
2725 err = 0; 2749 err = 0;
@@ -3382,6 +3406,9 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
3382 3406
3383 NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, 3407 NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
3384 cfg80211_regdomain->alpha2); 3408 cfg80211_regdomain->alpha2);
3409 if (cfg80211_regdomain->dfs_region)
3410 NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION,
3411 cfg80211_regdomain->dfs_region);
3385 3412
3386 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); 3413 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
3387 if (!nl_reg_rules) 3414 if (!nl_reg_rules)
@@ -3440,6 +3467,7 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
3440 char *alpha2 = NULL; 3467 char *alpha2 = NULL;
3441 int rem_reg_rules = 0, r = 0; 3468 int rem_reg_rules = 0, r = 0;
3442 u32 num_rules = 0, rule_idx = 0, size_of_regd; 3469 u32 num_rules = 0, rule_idx = 0, size_of_regd;
3470 u8 dfs_region = 0;
3443 struct ieee80211_regdomain *rd = NULL; 3471 struct ieee80211_regdomain *rd = NULL;
3444 3472
3445 if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) 3473 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
@@ -3450,6 +3478,9 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
3450 3478
3451 alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); 3479 alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
3452 3480
3481 if (info->attrs[NL80211_ATTR_DFS_REGION])
3482 dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]);
3483
3453 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], 3484 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
3454 rem_reg_rules) { 3485 rem_reg_rules) {
3455 num_rules++; 3486 num_rules++;
@@ -3477,6 +3508,13 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
3477 rd->alpha2[0] = alpha2[0]; 3508 rd->alpha2[0] = alpha2[0];
3478 rd->alpha2[1] = alpha2[1]; 3509 rd->alpha2[1] = alpha2[1];
3479 3510
3511 /*
3512 * Disable DFS master mode if the DFS region was
3513 * not supported or known on this kernel.
3514 */
3515 if (reg_supported_dfs_region(dfs_region))
3516 rd->dfs_region = dfs_region;
3517
3480 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], 3518 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
3481 rem_reg_rules) { 3519 rem_reg_rules) {
3482 nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, 3520 nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
@@ -4384,6 +4422,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
4384 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; 4422 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
4385 int err, ssid_len, ie_len = 0; 4423 int err, ssid_len, ie_len = 0;
4386 bool use_mfp = false; 4424 bool use_mfp = false;
4425 u32 flags = 0;
4426 struct ieee80211_ht_cap *ht_capa = NULL;
4427 struct ieee80211_ht_cap *ht_capa_mask = NULL;
4387 4428
4388 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 4429 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
4389 return -EINVAL; 4430 return -EINVAL;
@@ -4427,11 +4468,25 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
4427 if (info->attrs[NL80211_ATTR_PREV_BSSID]) 4468 if (info->attrs[NL80211_ATTR_PREV_BSSID])
4428 prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); 4469 prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
4429 4470
4471 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
4472 flags |= ASSOC_REQ_DISABLE_HT;
4473
4474 if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
4475 ht_capa_mask =
4476 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]);
4477
4478 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
4479 if (!ht_capa_mask)
4480 return -EINVAL;
4481 ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
4482 }
4483
4430 err = nl80211_crypto_settings(rdev, info, &crypto, 1); 4484 err = nl80211_crypto_settings(rdev, info, &crypto, 1);
4431 if (!err) 4485 if (!err)
4432 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 4486 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
4433 ssid, ssid_len, ie, ie_len, use_mfp, 4487 ssid, ssid_len, ie, ie_len, use_mfp,
4434 &crypto); 4488 &crypto, flags, ht_capa,
4489 ht_capa_mask);
4435 4490
4436 return err; 4491 return err;
4437} 4492}
@@ -4921,6 +4976,22 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
4921 return PTR_ERR(connkeys); 4976 return PTR_ERR(connkeys);
4922 } 4977 }
4923 4978
4979 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
4980 connect.flags |= ASSOC_REQ_DISABLE_HT;
4981
4982 if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
4983 memcpy(&connect.ht_capa_mask,
4984 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
4985 sizeof(connect.ht_capa_mask));
4986
4987 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
4988 if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
4989 return -EINVAL;
4990 memcpy(&connect.ht_capa,
4991 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
4992 sizeof(connect.ht_capa));
4993 }
4994
4924 err = cfg80211_connect(rdev, dev, &connect, connkeys); 4995 err = cfg80211_connect(rdev, dev, &connect, connkeys);
4925 if (err) 4996 if (err)
4926 kfree(connkeys); 4997 kfree(connkeys);
@@ -5108,7 +5179,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5108 duration > rdev->wiphy.max_remain_on_channel_duration) 5179 duration > rdev->wiphy.max_remain_on_channel_duration)
5109 return -EINVAL; 5180 return -EINVAL;
5110 5181
5111 if (!rdev->ops->remain_on_channel) 5182 if (!rdev->ops->remain_on_channel ||
5183 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
5112 return -EOPNOTSUPP; 5184 return -EOPNOTSUPP;
5113 5185
5114 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { 5186 if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -5321,7 +5393,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5321 return -EOPNOTSUPP; 5393 return -EOPNOTSUPP;
5322 5394
5323 if (info->attrs[NL80211_ATTR_DURATION]) { 5395 if (info->attrs[NL80211_ATTR_DURATION]) {
5324 if (!rdev->ops->mgmt_tx_cancel_wait) 5396 if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
5325 return -EINVAL; 5397 return -EINVAL;
5326 wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); 5398 wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
5327 } 5399 }
@@ -5339,6 +5411,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5339 5411
5340 offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; 5412 offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
5341 5413
5414 if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
5415 return -EINVAL;
5416
5342 no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); 5417 no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
5343 5418
5344 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 5419 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 77e926738014..76b35df39623 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1123,6 +1123,8 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
1123 if (ignore_reg_update(wiphy, initiator)) 1123 if (ignore_reg_update(wiphy, initiator))
1124 return; 1124 return;
1125 1125
1126 last_request->dfs_region = cfg80211_regdomain->dfs_region;
1127
1126 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1128 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1127 if (wiphy->bands[band]) 1129 if (wiphy->bands[band])
1128 handle_band(wiphy, band, initiator); 1130 handle_band(wiphy, band, initiator);
@@ -1948,6 +1950,42 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
1948 } 1950 }
1949} 1951}
1950 1952
1953bool reg_supported_dfs_region(u8 dfs_region)
1954{
1955 switch (dfs_region) {
1956 case NL80211_DFS_UNSET:
1957 case NL80211_DFS_FCC:
1958 case NL80211_DFS_ETSI:
1959 case NL80211_DFS_JP:
1960 return true;
1961 default:
1962 REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n",
1963 dfs_region);
1964 return false;
1965 }
1966}
1967
1968static void print_dfs_region(u8 dfs_region)
1969{
1970 if (!dfs_region)
1971 return;
1972
1973 switch (dfs_region) {
1974 case NL80211_DFS_FCC:
1975 pr_info(" DFS Master region FCC");
1976 break;
1977 case NL80211_DFS_ETSI:
1978 pr_info(" DFS Master region ETSI");
1979 break;
1980 case NL80211_DFS_JP:
1981 pr_info(" DFS Master region JP");
1982 break;
1983 default:
1984 pr_info(" DFS Master region Uknown");
1985 break;
1986 }
1987}
1988
1951static void print_regdomain(const struct ieee80211_regdomain *rd) 1989static void print_regdomain(const struct ieee80211_regdomain *rd)
1952{ 1990{
1953 1991
@@ -1975,6 +2013,7 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
1975 pr_info("Regulatory domain changed to country: %c%c\n", 2013 pr_info("Regulatory domain changed to country: %c%c\n",
1976 rd->alpha2[0], rd->alpha2[1]); 2014 rd->alpha2[0], rd->alpha2[1]);
1977 } 2015 }
2016 print_dfs_region(rd->dfs_region);
1978 print_rd_rules(rd); 2017 print_rd_rules(rd);
1979} 2018}
1980 2019
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 4a56799d868d..786e414afd91 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -5,6 +5,7 @@ extern const struct ieee80211_regdomain *cfg80211_regdomain;
5 5
6bool is_world_regdom(const char *alpha2); 6bool is_world_regdom(const char *alpha2);
7bool reg_is_valid_request(const char *alpha2); 7bool reg_is_valid_request(const char *alpha2);
8bool reg_supported_dfs_region(u8 dfs_region);
8 9
9int regulatory_hint_user(const char *alpha2); 10int regulatory_hint_user(const char *alpha2);
10 11
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 0acfdc9beacf..f0c900ce2fb9 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -190,7 +190,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
190 prev_bssid, 190 prev_bssid,
191 params->ssid, params->ssid_len, 191 params->ssid, params->ssid_len,
192 params->ie, params->ie_len, 192 params->ie, params->ie_len,
193 false, &params->crypto); 193 false, &params->crypto,
194 params->flags, &params->ht_capa,
195 &params->ht_capa_mask);
194 if (err) 196 if (err)
195 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, 197 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
196 NULL, 0, 198 NULL, 0,
@@ -774,6 +776,9 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
774 wdev->connect_keys = NULL; 776 wdev->connect_keys = NULL;
775 } 777 }
776 778
779 cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
780 rdev->wiphy.ht_capa_mod_mask);
781
777 if (connkeys && connkeys->def >= 0) { 782 if (connkeys && connkeys->def >= 0) {
778 int idx; 783 int idx;
779 u32 cipher; 784 u32 cipher;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4dde429441d2..9c601d59b77a 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -7,7 +7,6 @@
7#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include <linux/etherdevice.h> 8#include <linux/etherdevice.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/crc32.h>
11#include <net/cfg80211.h> 10#include <net/cfg80211.h>
12#include <net/ip.h> 11#include <net/ip.h>
13#include "core.h" 12#include "core.h"
@@ -240,17 +239,6 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
240 return 0; 239 return 0;
241} 240}
242 241
243/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
244/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
245const unsigned char rfc1042_header[] __aligned(2) =
246 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
247EXPORT_SYMBOL(rfc1042_header);
248
249/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
250const unsigned char bridge_tunnel_header[] __aligned(2) =
251 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
252EXPORT_SYMBOL(bridge_tunnel_header);
253
254unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) 242unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
255{ 243{
256 unsigned int hdrlen = 24; 244 unsigned int hdrlen = 24;
@@ -1051,169 +1039,13 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
1051 return 0; 1039 return 0;
1052} 1040}
1053 1041
1054u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, 1042/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
1055 struct ieee802_11_elems *elems, 1043/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
1056 u64 filter, u32 crc) 1044const unsigned char rfc1042_header[] __aligned(2) =
1057{ 1045 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
1058 size_t left = len; 1046EXPORT_SYMBOL(rfc1042_header);
1059 u8 *pos = start;
1060 bool calc_crc = filter != 0;
1061
1062 memset(elems, 0, sizeof(*elems));
1063 elems->ie_start = start;
1064 elems->total_len = len;
1065
1066 while (left >= 2) {
1067 u8 id, elen;
1068
1069 id = *pos++;
1070 elen = *pos++;
1071 left -= 2;
1072
1073 if (elen > left)
1074 break;
1075
1076 if (calc_crc && id < 64 && (filter & (1ULL << id)))
1077 crc = crc32_be(crc, pos - 2, elen + 2);
1078
1079 switch (id) {
1080 case WLAN_EID_SSID:
1081 elems->ssid = pos;
1082 elems->ssid_len = elen;
1083 break;
1084 case WLAN_EID_SUPP_RATES:
1085 elems->supp_rates = pos;
1086 elems->supp_rates_len = elen;
1087 break;
1088 case WLAN_EID_FH_PARAMS:
1089 elems->fh_params = pos;
1090 elems->fh_params_len = elen;
1091 break;
1092 case WLAN_EID_DS_PARAMS:
1093 elems->ds_params = pos;
1094 elems->ds_params_len = elen;
1095 break;
1096 case WLAN_EID_CF_PARAMS:
1097 elems->cf_params = pos;
1098 elems->cf_params_len = elen;
1099 break;
1100 case WLAN_EID_TIM:
1101 if (elen >= sizeof(struct ieee80211_tim_ie)) {
1102 elems->tim = (void *)pos;
1103 elems->tim_len = elen;
1104 }
1105 break;
1106 case WLAN_EID_IBSS_PARAMS:
1107 elems->ibss_params = pos;
1108 elems->ibss_params_len = elen;
1109 break;
1110 case WLAN_EID_CHALLENGE:
1111 elems->challenge = pos;
1112 elems->challenge_len = elen;
1113 break;
1114 case WLAN_EID_VENDOR_SPECIFIC:
1115 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
1116 pos[2] == 0xf2) {
1117 /* Microsoft OUI (00:50:F2) */
1118
1119 if (calc_crc)
1120 crc = crc32_be(crc, pos - 2, elen + 2);
1121
1122 if (pos[3] == 1) {
1123 /* OUI Type 1 - WPA IE */
1124 elems->wpa = pos;
1125 elems->wpa_len = elen;
1126 } else if (elen >= 5 && pos[3] == 2) {
1127 /* OUI Type 2 - WMM IE */
1128 if (pos[4] == 0) {
1129 elems->wmm_info = pos;
1130 elems->wmm_info_len = elen;
1131 } else if (pos[4] == 1) {
1132 elems->wmm_param = pos;
1133 elems->wmm_param_len = elen;
1134 }
1135 }
1136 }
1137 break;
1138 case WLAN_EID_RSN:
1139 elems->rsn = pos;
1140 elems->rsn_len = elen;
1141 break;
1142 case WLAN_EID_ERP_INFO:
1143 elems->erp_info = pos;
1144 elems->erp_info_len = elen;
1145 break;
1146 case WLAN_EID_EXT_SUPP_RATES:
1147 elems->ext_supp_rates = pos;
1148 elems->ext_supp_rates_len = elen;
1149 break;
1150 case WLAN_EID_HT_CAPABILITY:
1151 if (elen >= sizeof(struct ieee80211_ht_cap))
1152 elems->ht_cap_elem = (void *)pos;
1153 break;
1154 case WLAN_EID_HT_INFORMATION:
1155 if (elen >= sizeof(struct ieee80211_ht_info))
1156 elems->ht_info_elem = (void *)pos;
1157 break;
1158 case WLAN_EID_MESH_ID:
1159 elems->mesh_id = pos;
1160 elems->mesh_id_len = elen;
1161 break;
1162 case WLAN_EID_MESH_CONFIG:
1163 if (elen >= sizeof(struct ieee80211_meshconf_ie))
1164 elems->mesh_config = (void *)pos;
1165 break;
1166 case WLAN_EID_PEER_MGMT:
1167 elems->peering = pos;
1168 elems->peering_len = elen;
1169 break;
1170 case WLAN_EID_PREQ:
1171 elems->preq = pos;
1172 elems->preq_len = elen;
1173 break;
1174 case WLAN_EID_PREP:
1175 elems->prep = pos;
1176 elems->prep_len = elen;
1177 break;
1178 case WLAN_EID_PERR:
1179 elems->perr = pos;
1180 elems->perr_len = elen;
1181 break;
1182 case WLAN_EID_RANN:
1183 if (elen >= sizeof(struct ieee80211_rann_ie))
1184 elems->rann = (void *)pos;
1185 break;
1186 case WLAN_EID_CHANNEL_SWITCH:
1187 elems->ch_switch_elem = pos;
1188 elems->ch_switch_elem_len = elen;
1189 break;
1190 case WLAN_EID_QUIET:
1191 if (!elems->quiet_elem) {
1192 elems->quiet_elem = pos;
1193 elems->quiet_elem_len = elen;
1194 }
1195 elems->num_of_quiet_elem++;
1196 break;
1197 case WLAN_EID_COUNTRY:
1198 elems->country_elem = pos;
1199 elems->country_elem_len = elen;
1200 break;
1201 case WLAN_EID_PWR_CONSTRAINT:
1202 elems->pwr_constr_elem = pos;
1203 elems->pwr_constr_elem_len = elen;
1204 break;
1205 case WLAN_EID_TIMEOUT_INTERVAL:
1206 elems->timeout_int = pos;
1207 elems->timeout_int_len = elen;
1208 break;
1209 default:
1210 break;
1211 }
1212
1213 left -= elen;
1214 pos += elen;
1215 }
1216 1047
1217 return crc; 1048/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
1218} 1049const unsigned char bridge_tunnel_header[] __aligned(2) =
1219EXPORT_SYMBOL(ieee802_11_parse_elems_crc); 1050 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
1051EXPORT_SYMBOL(bridge_tunnel_header);