diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/dvm/tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/dvm/tx.c | 1385 |
1 files changed, 1385 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c new file mode 100644 index 00000000000..52f2cae080c --- /dev/null +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c | |||
@@ -0,0 +1,1385 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * GPL LICENSE SUMMARY | ||
4 | * | ||
5 | * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
19 | * USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called LICENSE.GPL. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/ieee80211.h> | ||
35 | #include "iwl-io.h" | ||
36 | #include "iwl-trans.h" | ||
37 | #include "iwl-agn-hw.h" | ||
38 | #include "dev.h" | ||
39 | #include "agn.h" | ||
40 | |||
41 | static const u8 tid_to_ac[] = { | ||
42 | IEEE80211_AC_BE, | ||
43 | IEEE80211_AC_BK, | ||
44 | IEEE80211_AC_BK, | ||
45 | IEEE80211_AC_BE, | ||
46 | IEEE80211_AC_VI, | ||
47 | IEEE80211_AC_VI, | ||
48 | IEEE80211_AC_VO, | ||
49 | IEEE80211_AC_VO, | ||
50 | }; | ||
51 | |||
52 | static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, | ||
53 | struct ieee80211_tx_info *info, | ||
54 | __le16 fc, __le32 *tx_flags) | ||
55 | { | ||
56 | if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || | ||
57 | info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT || | ||
58 | info->flags & IEEE80211_TX_CTL_AMPDU) | ||
59 | *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * handle build REPLY_TX command notification. | ||
64 | */ | ||
65 | static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, | ||
66 | struct sk_buff *skb, | ||
67 | struct iwl_tx_cmd *tx_cmd, | ||
68 | struct ieee80211_tx_info *info, | ||
69 | struct ieee80211_hdr *hdr, u8 sta_id) | ||
70 | { | ||
71 | __le16 fc = hdr->frame_control; | ||
72 | __le32 tx_flags = tx_cmd->tx_flags; | ||
73 | |||
74 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
75 | |||
76 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) | ||
77 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
78 | else | ||
79 | tx_flags &= ~TX_CMD_FLG_ACK_MSK; | ||
80 | |||
81 | if (ieee80211_is_probe_resp(fc)) | ||
82 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
83 | else if (ieee80211_is_back_req(fc)) | ||
84 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
85 | else if (info->band == IEEE80211_BAND_2GHZ && | ||
86 | priv->cfg->bt_params && | ||
87 | priv->cfg->bt_params->advanced_bt_coexist && | ||
88 | (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || | ||
89 | ieee80211_is_reassoc_req(fc) || | ||
90 | skb->protocol == cpu_to_be16(ETH_P_PAE))) | ||
91 | tx_flags |= TX_CMD_FLG_IGNORE_BT; | ||
92 | |||
93 | |||
94 | tx_cmd->sta_id = sta_id; | ||
95 | if (ieee80211_has_morefrags(fc)) | ||
96 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
97 | |||
98 | if (ieee80211_is_data_qos(fc)) { | ||
99 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
100 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
101 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
102 | } else { | ||
103 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; | ||
104 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) | ||
105 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
106 | else | ||
107 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
108 | } | ||
109 | |||
110 | iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); | ||
111 | |||
112 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
113 | if (ieee80211_is_mgmt(fc)) { | ||
114 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
115 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
116 | else | ||
117 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
118 | } else { | ||
119 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
120 | } | ||
121 | |||
122 | tx_cmd->driver_txop = 0; | ||
123 | tx_cmd->tx_flags = tx_flags; | ||
124 | tx_cmd->next_frame_len = 0; | ||
125 | } | ||
126 | |||
127 | static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, | ||
128 | struct iwl_tx_cmd *tx_cmd, | ||
129 | struct ieee80211_tx_info *info, | ||
130 | __le16 fc) | ||
131 | { | ||
132 | u32 rate_flags; | ||
133 | int rate_idx; | ||
134 | u8 rts_retry_limit; | ||
135 | u8 data_retry_limit; | ||
136 | u8 rate_plcp; | ||
137 | |||
138 | if (priv->wowlan) { | ||
139 | rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT; | ||
140 | data_retry_limit = IWLAGN_LOW_RETRY_LIMIT; | ||
141 | } else { | ||
142 | /* Set retry limit on RTS packets */ | ||
143 | rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT; | ||
144 | |||
145 | /* Set retry limit on DATA packets and Probe Responses*/ | ||
146 | if (ieee80211_is_probe_resp(fc)) { | ||
147 | data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT; | ||
148 | rts_retry_limit = | ||
149 | min(data_retry_limit, rts_retry_limit); | ||
150 | } else if (ieee80211_is_back_req(fc)) | ||
151 | data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT; | ||
152 | else | ||
153 | data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; | ||
154 | } | ||
155 | |||
156 | tx_cmd->data_retry_limit = data_retry_limit; | ||
157 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
158 | |||
159 | /* DATA packets will use the uCode station table for rate/antenna | ||
160 | * selection */ | ||
161 | if (ieee80211_is_data(fc)) { | ||
162 | tx_cmd->initial_rate_index = 0; | ||
163 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
164 | #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE | ||
165 | if (priv->tm_fixed_rate) { | ||
166 | /* | ||
167 | * rate overwrite by testmode | ||
168 | * we not only send lq command to change rate | ||
169 | * we also re-enforce per data pkt base. | ||
170 | */ | ||
171 | tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK; | ||
172 | memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate, | ||
173 | sizeof(tx_cmd->rate_n_flags)); | ||
174 | } | ||
175 | #endif | ||
176 | return; | ||
177 | } else if (ieee80211_is_back_req(fc)) | ||
178 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
179 | |||
180 | /** | ||
181 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | ||
182 | * not really a TX rate. Thus, we use the lowest supported rate for | ||
183 | * this band. Also use the lowest supported rate if the stored rate | ||
184 | * index is invalid. | ||
185 | */ | ||
186 | rate_idx = info->control.rates[0].idx; | ||
187 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | ||
188 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | ||
189 | rate_idx = rate_lowest_index(&priv->bands[info->band], | ||
190 | info->control.sta); | ||
191 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | ||
192 | if (info->band == IEEE80211_BAND_5GHZ) | ||
193 | rate_idx += IWL_FIRST_OFDM_RATE; | ||
194 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | ||
195 | rate_plcp = iwl_rates[rate_idx].plcp; | ||
196 | /* Zero out flags for this packet */ | ||
197 | rate_flags = 0; | ||
198 | |||
199 | /* Set CCK flag as needed */ | ||
200 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | ||
201 | rate_flags |= RATE_MCS_CCK_MSK; | ||
202 | |||
203 | /* Set up antennas */ | ||
204 | if (priv->cfg->bt_params && | ||
205 | priv->cfg->bt_params->advanced_bt_coexist && | ||
206 | priv->bt_full_concurrent) { | ||
207 | /* operated as 1x1 in full concurrency mode */ | ||
208 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, | ||
209 | first_antenna(priv->hw_params.valid_tx_ant)); | ||
210 | } else | ||
211 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, | ||
212 | priv->hw_params.valid_tx_ant); | ||
213 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | ||
214 | |||
215 | /* Set the rate in the TX cmd */ | ||
216 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
217 | } | ||
218 | |||
219 | static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | ||
220 | struct ieee80211_tx_info *info, | ||
221 | struct iwl_tx_cmd *tx_cmd, | ||
222 | struct sk_buff *skb_frag) | ||
223 | { | ||
224 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
225 | |||
226 | switch (keyconf->cipher) { | ||
227 | case WLAN_CIPHER_SUITE_CCMP: | ||
228 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
229 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
230 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
231 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
232 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | ||
233 | break; | ||
234 | |||
235 | case WLAN_CIPHER_SUITE_TKIP: | ||
236 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
237 | ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); | ||
238 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); | ||
239 | break; | ||
240 | |||
241 | case WLAN_CIPHER_SUITE_WEP104: | ||
242 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
243 | /* fall through */ | ||
244 | case WLAN_CIPHER_SUITE_WEP40: | ||
245 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
246 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
247 | |||
248 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
249 | |||
250 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | ||
251 | "with key %d\n", keyconf->keyidx); | ||
252 | break; | ||
253 | |||
254 | default: | ||
255 | IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher); | ||
256 | break; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | /** | ||
261 | * iwl_sta_id_or_broadcast - return sta_id or broadcast sta | ||
262 | * @context: the current context | ||
263 | * @sta: mac80211 station | ||
264 | * | ||
265 | * In certain circumstances mac80211 passes a station pointer | ||
266 | * that may be %NULL, for example during TX or key setup. In | ||
267 | * that case, we need to use the broadcast station, so this | ||
268 | * inline wraps that pattern. | ||
269 | */ | ||
270 | static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context, | ||
271 | struct ieee80211_sta *sta) | ||
272 | { | ||
273 | int sta_id; | ||
274 | |||
275 | if (!sta) | ||
276 | return context->bcast_sta_id; | ||
277 | |||
278 | sta_id = iwl_sta_id(sta); | ||
279 | |||
280 | /* | ||
281 | * mac80211 should not be passing a partially | ||
282 | * initialised station! | ||
283 | */ | ||
284 | WARN_ON(sta_id == IWL_INVALID_STATION); | ||
285 | |||
286 | return sta_id; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * start REPLY_TX command process | ||
291 | */ | ||
292 | int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
293 | { | ||
294 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
295 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
296 | struct iwl_station_priv *sta_priv = NULL; | ||
297 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
298 | struct iwl_device_cmd *dev_cmd = NULL; | ||
299 | struct iwl_tx_cmd *tx_cmd; | ||
300 | __le16 fc; | ||
301 | u8 hdr_len; | ||
302 | u16 len, seq_number = 0; | ||
303 | u8 sta_id, tid = IWL_MAX_TID_COUNT; | ||
304 | bool is_agg = false; | ||
305 | int txq_id; | ||
306 | |||
307 | if (info->control.vif) | ||
308 | ctx = iwl_rxon_ctx_from_vif(info->control.vif); | ||
309 | |||
310 | if (iwl_is_rfkill(priv)) { | ||
311 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | ||
312 | goto drop_unlock_priv; | ||
313 | } | ||
314 | |||
315 | fc = hdr->frame_control; | ||
316 | |||
317 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
318 | if (ieee80211_is_auth(fc)) | ||
319 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | ||
320 | else if (ieee80211_is_assoc_req(fc)) | ||
321 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | ||
322 | else if (ieee80211_is_reassoc_req(fc)) | ||
323 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | ||
324 | #endif | ||
325 | |||
326 | if (unlikely(ieee80211_is_probe_resp(fc))) { | ||
327 | struct iwl_wipan_noa_data *noa_data = | ||
328 | rcu_dereference(priv->noa_data); | ||
329 | |||
330 | if (noa_data && | ||
331 | pskb_expand_head(skb, 0, noa_data->length, | ||
332 | GFP_ATOMIC) == 0) { | ||
333 | memcpy(skb_put(skb, noa_data->length), | ||
334 | noa_data->data, noa_data->length); | ||
335 | hdr = (struct ieee80211_hdr *)skb->data; | ||
336 | } | ||
337 | } | ||
338 | |||
339 | hdr_len = ieee80211_hdrlen(fc); | ||
340 | |||
341 | /* For management frames use broadcast id to do not break aggregation */ | ||
342 | if (!ieee80211_is_data(fc)) | ||
343 | sta_id = ctx->bcast_sta_id; | ||
344 | else { | ||
345 | /* Find index into station table for destination station */ | ||
346 | sta_id = iwl_sta_id_or_broadcast(ctx, info->control.sta); | ||
347 | if (sta_id == IWL_INVALID_STATION) { | ||
348 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | ||
349 | hdr->addr1); | ||
350 | goto drop_unlock_priv; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | ||
355 | |||
356 | if (info->control.sta) | ||
357 | sta_priv = (void *)info->control.sta->drv_priv; | ||
358 | |||
359 | if (sta_priv && sta_priv->asleep && | ||
360 | (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) { | ||
361 | /* | ||
362 | * This sends an asynchronous command to the device, | ||
363 | * but we can rely on it being processed before the | ||
364 | * next frame is processed -- and the next frame to | ||
365 | * this station is the one that will consume this | ||
366 | * counter. | ||
367 | * For now set the counter to just 1 since we do not | ||
368 | * support uAPSD yet. | ||
369 | * | ||
370 | * FIXME: If we get two non-bufferable frames one | ||
371 | * after the other, we might only send out one of | ||
372 | * them because this is racy. | ||
373 | */ | ||
374 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | ||
375 | } | ||
376 | |||
377 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
378 | is_agg = true; | ||
379 | |||
380 | dev_cmd = kmem_cache_alloc(iwl_tx_cmd_pool, GFP_ATOMIC); | ||
381 | |||
382 | if (unlikely(!dev_cmd)) | ||
383 | goto drop_unlock_priv; | ||
384 | |||
385 | memset(dev_cmd, 0, sizeof(*dev_cmd)); | ||
386 | tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; | ||
387 | |||
388 | /* Total # bytes to be transmitted */ | ||
389 | len = (u16)skb->len; | ||
390 | tx_cmd->len = cpu_to_le16(len); | ||
391 | |||
392 | if (info->control.hw_key) | ||
393 | iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb); | ||
394 | |||
395 | /* TODO need this for burst mode later on */ | ||
396 | iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); | ||
397 | |||
398 | iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); | ||
399 | |||
400 | memset(&info->status, 0, sizeof(info->status)); | ||
401 | |||
402 | info->driver_data[0] = ctx; | ||
403 | info->driver_data[1] = dev_cmd; | ||
404 | |||
405 | spin_lock(&priv->sta_lock); | ||
406 | |||
407 | if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { | ||
408 | u8 *qc = NULL; | ||
409 | struct iwl_tid_data *tid_data; | ||
410 | qc = ieee80211_get_qos_ctl(hdr); | ||
411 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
412 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) | ||
413 | goto drop_unlock_sta; | ||
414 | tid_data = &priv->tid_data[sta_id][tid]; | ||
415 | |||
416 | /* aggregation is on for this <sta,tid> */ | ||
417 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | ||
418 | tid_data->agg.state != IWL_AGG_ON) { | ||
419 | IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:" | ||
420 | " Tx flags = 0x%08x, agg.state = %d", | ||
421 | info->flags, tid_data->agg.state); | ||
422 | IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", | ||
423 | sta_id, tid, SEQ_TO_SN(tid_data->seq_number)); | ||
424 | goto drop_unlock_sta; | ||
425 | } | ||
426 | |||
427 | /* We can receive packets from the stack in IWL_AGG_{ON,OFF} | ||
428 | * only. Check this here. | ||
429 | */ | ||
430 | if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && | ||
431 | tid_data->agg.state != IWL_AGG_OFF, | ||
432 | "Tx while agg.state = %d", tid_data->agg.state)) | ||
433 | goto drop_unlock_sta; | ||
434 | |||
435 | seq_number = tid_data->seq_number; | ||
436 | seq_number &= IEEE80211_SCTL_SEQ; | ||
437 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
438 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | ||
439 | seq_number += 0x10; | ||
440 | } | ||
441 | |||
442 | /* Copy MAC header from skb into command buffer */ | ||
443 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
444 | |||
445 | if (is_agg) | ||
446 | txq_id = priv->tid_data[sta_id][tid].agg.txq_id; | ||
447 | else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { | ||
448 | /* | ||
449 | * Send this frame after DTIM -- there's a special queue | ||
450 | * reserved for this for contexts that support AP mode. | ||
451 | */ | ||
452 | txq_id = ctx->mcast_queue; | ||
453 | |||
454 | /* | ||
455 | * The microcode will clear the more data | ||
456 | * bit in the last frame it transmits. | ||
457 | */ | ||
458 | hdr->frame_control |= | ||
459 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | ||
460 | } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) | ||
461 | txq_id = IWL_AUX_QUEUE; | ||
462 | else | ||
463 | txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; | ||
464 | |||
465 | WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue); | ||
466 | WARN_ON_ONCE(is_agg && | ||
467 | priv->queue_to_mac80211[txq_id] != info->hw_queue); | ||
468 | |||
469 | if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id)) | ||
470 | goto drop_unlock_sta; | ||
471 | |||
472 | if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && | ||
473 | !ieee80211_has_morefrags(fc)) | ||
474 | priv->tid_data[sta_id][tid].seq_number = seq_number; | ||
475 | |||
476 | spin_unlock(&priv->sta_lock); | ||
477 | |||
478 | /* | ||
479 | * Avoid atomic ops if it isn't an associated client. | ||
480 | * Also, if this is a packet for aggregation, don't | ||
481 | * increase the counter because the ucode will stop | ||
482 | * aggregation queues when their respective station | ||
483 | * goes to sleep. | ||
484 | */ | ||
485 | if (sta_priv && sta_priv->client && !is_agg) | ||
486 | atomic_inc(&sta_priv->pending_frames); | ||
487 | |||
488 | if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) | ||
489 | iwl_scan_offchannel_skb(priv); | ||
490 | |||
491 | return 0; | ||
492 | |||
493 | drop_unlock_sta: | ||
494 | if (dev_cmd) | ||
495 | kmem_cache_free(iwl_tx_cmd_pool, dev_cmd); | ||
496 | spin_unlock(&priv->sta_lock); | ||
497 | drop_unlock_priv: | ||
498 | return -1; | ||
499 | } | ||
500 | |||
501 | static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq) | ||
502 | { | ||
503 | int q; | ||
504 | |||
505 | for (q = IWLAGN_FIRST_AMPDU_QUEUE; | ||
506 | q < priv->cfg->base_params->num_of_queues; q++) { | ||
507 | if (!test_and_set_bit(q, priv->agg_q_alloc)) { | ||
508 | priv->queue_to_mac80211[q] = mq; | ||
509 | return q; | ||
510 | } | ||
511 | } | ||
512 | |||
513 | return -ENOSPC; | ||
514 | } | ||
515 | |||
516 | static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q) | ||
517 | { | ||
518 | clear_bit(q, priv->agg_q_alloc); | ||
519 | priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE; | ||
520 | } | ||
521 | |||
522 | int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, | ||
523 | struct ieee80211_sta *sta, u16 tid) | ||
524 | { | ||
525 | struct iwl_tid_data *tid_data; | ||
526 | int sta_id, txq_id; | ||
527 | enum iwl_agg_state agg_state; | ||
528 | |||
529 | sta_id = iwl_sta_id(sta); | ||
530 | |||
531 | if (sta_id == IWL_INVALID_STATION) { | ||
532 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | ||
533 | return -ENXIO; | ||
534 | } | ||
535 | |||
536 | spin_lock_bh(&priv->sta_lock); | ||
537 | |||
538 | tid_data = &priv->tid_data[sta_id][tid]; | ||
539 | txq_id = priv->tid_data[sta_id][tid].agg.txq_id; | ||
540 | |||
541 | switch (priv->tid_data[sta_id][tid].agg.state) { | ||
542 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | ||
543 | /* | ||
544 | * This can happen if the peer stops aggregation | ||
545 | * again before we've had a chance to drain the | ||
546 | * queue we selected previously, i.e. before the | ||
547 | * session was really started completely. | ||
548 | */ | ||
549 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); | ||
550 | goto turn_off; | ||
551 | case IWL_AGG_STARTING: | ||
552 | /* | ||
553 | * This can happen when the session is stopped before | ||
554 | * we receive ADDBA response | ||
555 | */ | ||
556 | IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n"); | ||
557 | goto turn_off; | ||
558 | case IWL_AGG_ON: | ||
559 | break; | ||
560 | default: | ||
561 | IWL_WARN(priv, "Stopping AGG while state not ON " | ||
562 | "or starting for %d on %d (%d)\n", sta_id, tid, | ||
563 | priv->tid_data[sta_id][tid].agg.state); | ||
564 | spin_unlock_bh(&priv->sta_lock); | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); | ||
569 | |||
570 | /* There are still packets for this RA / TID in the HW */ | ||
571 | if (!test_bit(txq_id, priv->agg_q_alloc)) { | ||
572 | IWL_DEBUG_TX_QUEUES(priv, | ||
573 | "stopping AGG on STA/TID %d/%d but hwq %d not used\n", | ||
574 | sta_id, tid, txq_id); | ||
575 | } else if (tid_data->agg.ssn != tid_data->next_reclaimed) { | ||
576 | IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " | ||
577 | "next_recl = %d\n", | ||
578 | tid_data->agg.ssn, | ||
579 | tid_data->next_reclaimed); | ||
580 | priv->tid_data[sta_id][tid].agg.state = | ||
581 | IWL_EMPTYING_HW_QUEUE_DELBA; | ||
582 | spin_unlock_bh(&priv->sta_lock); | ||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", | ||
587 | tid_data->agg.ssn); | ||
588 | turn_off: | ||
589 | agg_state = priv->tid_data[sta_id][tid].agg.state; | ||
590 | priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; | ||
591 | |||
592 | spin_unlock_bh(&priv->sta_lock); | ||
593 | |||
594 | if (test_bit(txq_id, priv->agg_q_alloc)) { | ||
595 | /* | ||
596 | * If the transport didn't know that we wanted to start | ||
597 | * agreggation, don't tell it that we want to stop them. | ||
598 | * This can happen when we don't get the addBA response on | ||
599 | * time, or we hadn't time to drain the AC queues. | ||
600 | */ | ||
601 | if (agg_state == IWL_AGG_ON) | ||
602 | iwl_trans_tx_agg_disable(priv->trans, txq_id); | ||
603 | else | ||
604 | IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", | ||
605 | agg_state); | ||
606 | iwlagn_dealloc_agg_txq(priv, txq_id); | ||
607 | } | ||
608 | |||
609 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, | ||
615 | struct ieee80211_sta *sta, u16 tid, u16 *ssn) | ||
616 | { | ||
617 | struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); | ||
618 | struct iwl_tid_data *tid_data; | ||
619 | int sta_id, txq_id, ret; | ||
620 | |||
621 | IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", | ||
622 | sta->addr, tid); | ||
623 | |||
624 | sta_id = iwl_sta_id(sta); | ||
625 | if (sta_id == IWL_INVALID_STATION) { | ||
626 | IWL_ERR(priv, "Start AGG on invalid station\n"); | ||
627 | return -ENXIO; | ||
628 | } | ||
629 | if (unlikely(tid >= IWL_MAX_TID_COUNT)) | ||
630 | return -EINVAL; | ||
631 | |||
632 | if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) { | ||
633 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | ||
634 | return -ENXIO; | ||
635 | } | ||
636 | |||
637 | txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]); | ||
638 | if (txq_id < 0) { | ||
639 | IWL_DEBUG_TX_QUEUES(priv, | ||
640 | "No free aggregation queue for %pM/%d\n", | ||
641 | sta->addr, tid); | ||
642 | return txq_id; | ||
643 | } | ||
644 | |||
645 | ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); | ||
646 | if (ret) | ||
647 | return ret; | ||
648 | |||
649 | spin_lock_bh(&priv->sta_lock); | ||
650 | tid_data = &priv->tid_data[sta_id][tid]; | ||
651 | tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); | ||
652 | tid_data->agg.txq_id = txq_id; | ||
653 | |||
654 | *ssn = tid_data->agg.ssn; | ||
655 | |||
656 | if (*ssn == tid_data->next_reclaimed) { | ||
657 | IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", | ||
658 | tid_data->agg.ssn); | ||
659 | tid_data->agg.state = IWL_AGG_STARTING; | ||
660 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); | ||
661 | } else { | ||
662 | IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " | ||
663 | "next_reclaimed = %d\n", | ||
664 | tid_data->agg.ssn, | ||
665 | tid_data->next_reclaimed); | ||
666 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | ||
667 | } | ||
668 | spin_unlock_bh(&priv->sta_lock); | ||
669 | |||
670 | return ret; | ||
671 | } | ||
672 | |||
673 | int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, | ||
674 | struct ieee80211_sta *sta, u16 tid, u8 buf_size) | ||
675 | { | ||
676 | struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; | ||
677 | struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); | ||
678 | int q, fifo; | ||
679 | u16 ssn; | ||
680 | |||
681 | buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); | ||
682 | |||
683 | spin_lock_bh(&priv->sta_lock); | ||
684 | ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; | ||
685 | q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id; | ||
686 | priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON; | ||
687 | spin_unlock_bh(&priv->sta_lock); | ||
688 | |||
689 | fifo = ctx->ac_to_fifo[tid_to_ac[tid]]; | ||
690 | |||
691 | iwl_trans_tx_agg_setup(priv->trans, q, fifo, | ||
692 | sta_priv->sta_id, tid, | ||
693 | buf_size, ssn); | ||
694 | |||
695 | /* | ||
696 | * If the limit is 0, then it wasn't initialised yet, | ||
697 | * use the default. We can do that since we take the | ||
698 | * minimum below, and we don't want to go above our | ||
699 | * default due to hardware restrictions. | ||
700 | */ | ||
701 | if (sta_priv->max_agg_bufsize == 0) | ||
702 | sta_priv->max_agg_bufsize = | ||
703 | LINK_QUAL_AGG_FRAME_LIMIT_DEF; | ||
704 | |||
705 | /* | ||
706 | * Even though in theory the peer could have different | ||
707 | * aggregation reorder buffer sizes for different sessions, | ||
708 | * our ucode doesn't allow for that and has a global limit | ||
709 | * for each station. Therefore, use the minimum of all the | ||
710 | * aggregation sessions and our default value. | ||
711 | */ | ||
712 | sta_priv->max_agg_bufsize = | ||
713 | min(sta_priv->max_agg_bufsize, buf_size); | ||
714 | |||
715 | if (priv->hw_params.use_rts_for_aggregation) { | ||
716 | /* | ||
717 | * switch to RTS/CTS if it is the prefer protection | ||
718 | * method for HT traffic | ||
719 | */ | ||
720 | |||
721 | sta_priv->lq_sta.lq.general_params.flags |= | ||
722 | LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; | ||
723 | } | ||
724 | priv->agg_tids_count++; | ||
725 | IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", | ||
726 | priv->agg_tids_count); | ||
727 | |||
728 | sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = | ||
729 | sta_priv->max_agg_bufsize; | ||
730 | |||
731 | IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n", | ||
732 | sta->addr, tid); | ||
733 | |||
734 | return iwl_send_lq_cmd(priv, ctx, | ||
735 | &sta_priv->lq_sta.lq, CMD_ASYNC, false); | ||
736 | } | ||
737 | |||
738 | static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) | ||
739 | { | ||
740 | struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid]; | ||
741 | enum iwl_rxon_context_id ctx; | ||
742 | struct ieee80211_vif *vif; | ||
743 | u8 *addr; | ||
744 | |||
745 | lockdep_assert_held(&priv->sta_lock); | ||
746 | |||
747 | addr = priv->stations[sta_id].sta.sta.addr; | ||
748 | ctx = priv->stations[sta_id].ctxid; | ||
749 | vif = priv->contexts[ctx].vif; | ||
750 | |||
751 | switch (priv->tid_data[sta_id][tid].agg.state) { | ||
752 | case IWL_EMPTYING_HW_QUEUE_DELBA: | ||
753 | /* There are no packets for this RA / TID in the HW any more */ | ||
754 | if (tid_data->agg.ssn == tid_data->next_reclaimed) { | ||
755 | IWL_DEBUG_TX_QUEUES(priv, | ||
756 | "Can continue DELBA flow ssn = next_recl =" | ||
757 | " %d", tid_data->next_reclaimed); | ||
758 | iwl_trans_tx_agg_disable(priv->trans, | ||
759 | tid_data->agg.txq_id); | ||
760 | iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); | ||
761 | tid_data->agg.state = IWL_AGG_OFF; | ||
762 | ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); | ||
763 | } | ||
764 | break; | ||
765 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | ||
766 | /* There are no packets for this RA / TID in the HW any more */ | ||
767 | if (tid_data->agg.ssn == tid_data->next_reclaimed) { | ||
768 | IWL_DEBUG_TX_QUEUES(priv, | ||
769 | "Can continue ADDBA flow ssn = next_recl =" | ||
770 | " %d", tid_data->next_reclaimed); | ||
771 | tid_data->agg.state = IWL_AGG_STARTING; | ||
772 | ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); | ||
773 | } | ||
774 | break; | ||
775 | default: | ||
776 | break; | ||
777 | } | ||
778 | } | ||
779 | |||
780 | static void iwlagn_non_agg_tx_status(struct iwl_priv *priv, | ||
781 | struct iwl_rxon_context *ctx, | ||
782 | const u8 *addr1) | ||
783 | { | ||
784 | struct ieee80211_sta *sta; | ||
785 | struct iwl_station_priv *sta_priv; | ||
786 | |||
787 | rcu_read_lock(); | ||
788 | sta = ieee80211_find_sta(ctx->vif, addr1); | ||
789 | if (sta) { | ||
790 | sta_priv = (void *)sta->drv_priv; | ||
791 | /* avoid atomic ops if this isn't a client */ | ||
792 | if (sta_priv->client && | ||
793 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
794 | ieee80211_sta_block_awake(priv->hw, sta, false); | ||
795 | } | ||
796 | rcu_read_unlock(); | ||
797 | } | ||
798 | |||
799 | /** | ||
800 | * translate ucode response to mac80211 tx status control values | ||
801 | */ | ||
802 | static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, | ||
803 | struct ieee80211_tx_info *info) | ||
804 | { | ||
805 | struct ieee80211_tx_rate *r = &info->status.rates[0]; | ||
806 | |||
807 | info->status.antenna = | ||
808 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); | ||
809 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
810 | r->flags |= IEEE80211_TX_RC_MCS; | ||
811 | if (rate_n_flags & RATE_MCS_GF_MSK) | ||
812 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | ||
813 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
814 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | ||
815 | if (rate_n_flags & RATE_MCS_DUP_MSK) | ||
816 | r->flags |= IEEE80211_TX_RC_DUP_DATA; | ||
817 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
818 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | ||
819 | r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band); | ||
820 | } | ||
821 | |||
822 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
823 | const char *iwl_get_tx_fail_reason(u32 status) | ||
824 | { | ||
825 | #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x | ||
826 | #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x | ||
827 | |||
828 | switch (status & TX_STATUS_MSK) { | ||
829 | case TX_STATUS_SUCCESS: | ||
830 | return "SUCCESS"; | ||
831 | TX_STATUS_POSTPONE(DELAY); | ||
832 | TX_STATUS_POSTPONE(FEW_BYTES); | ||
833 | TX_STATUS_POSTPONE(BT_PRIO); | ||
834 | TX_STATUS_POSTPONE(QUIET_PERIOD); | ||
835 | TX_STATUS_POSTPONE(CALC_TTAK); | ||
836 | TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); | ||
837 | TX_STATUS_FAIL(SHORT_LIMIT); | ||
838 | TX_STATUS_FAIL(LONG_LIMIT); | ||
839 | TX_STATUS_FAIL(FIFO_UNDERRUN); | ||
840 | TX_STATUS_FAIL(DRAIN_FLOW); | ||
841 | TX_STATUS_FAIL(RFKILL_FLUSH); | ||
842 | TX_STATUS_FAIL(LIFE_EXPIRE); | ||
843 | TX_STATUS_FAIL(DEST_PS); | ||
844 | TX_STATUS_FAIL(HOST_ABORTED); | ||
845 | TX_STATUS_FAIL(BT_RETRY); | ||
846 | TX_STATUS_FAIL(STA_INVALID); | ||
847 | TX_STATUS_FAIL(FRAG_DROPPED); | ||
848 | TX_STATUS_FAIL(TID_DISABLE); | ||
849 | TX_STATUS_FAIL(FIFO_FLUSHED); | ||
850 | TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); | ||
851 | TX_STATUS_FAIL(PASSIVE_NO_RX); | ||
852 | TX_STATUS_FAIL(NO_BEACON_ON_RADAR); | ||
853 | } | ||
854 | |||
855 | return "UNKNOWN"; | ||
856 | |||
857 | #undef TX_STATUS_FAIL | ||
858 | #undef TX_STATUS_POSTPONE | ||
859 | } | ||
860 | #endif /* CONFIG_IWLWIFI_DEBUG */ | ||
861 | |||
862 | static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status) | ||
863 | { | ||
864 | status &= AGG_TX_STATUS_MSK; | ||
865 | |||
866 | switch (status) { | ||
867 | case AGG_TX_STATE_UNDERRUN_MSK: | ||
868 | priv->reply_agg_tx_stats.underrun++; | ||
869 | break; | ||
870 | case AGG_TX_STATE_BT_PRIO_MSK: | ||
871 | priv->reply_agg_tx_stats.bt_prio++; | ||
872 | break; | ||
873 | case AGG_TX_STATE_FEW_BYTES_MSK: | ||
874 | priv->reply_agg_tx_stats.few_bytes++; | ||
875 | break; | ||
876 | case AGG_TX_STATE_ABORT_MSK: | ||
877 | priv->reply_agg_tx_stats.abort++; | ||
878 | break; | ||
879 | case AGG_TX_STATE_LAST_SENT_TTL_MSK: | ||
880 | priv->reply_agg_tx_stats.last_sent_ttl++; | ||
881 | break; | ||
882 | case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK: | ||
883 | priv->reply_agg_tx_stats.last_sent_try++; | ||
884 | break; | ||
885 | case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK: | ||
886 | priv->reply_agg_tx_stats.last_sent_bt_kill++; | ||
887 | break; | ||
888 | case AGG_TX_STATE_SCD_QUERY_MSK: | ||
889 | priv->reply_agg_tx_stats.scd_query++; | ||
890 | break; | ||
891 | case AGG_TX_STATE_TEST_BAD_CRC32_MSK: | ||
892 | priv->reply_agg_tx_stats.bad_crc32++; | ||
893 | break; | ||
894 | case AGG_TX_STATE_RESPONSE_MSK: | ||
895 | priv->reply_agg_tx_stats.response++; | ||
896 | break; | ||
897 | case AGG_TX_STATE_DUMP_TX_MSK: | ||
898 | priv->reply_agg_tx_stats.dump_tx++; | ||
899 | break; | ||
900 | case AGG_TX_STATE_DELAY_TX_MSK: | ||
901 | priv->reply_agg_tx_stats.delay_tx++; | ||
902 | break; | ||
903 | default: | ||
904 | priv->reply_agg_tx_stats.unknown++; | ||
905 | break; | ||
906 | } | ||
907 | } | ||
908 | |||
909 | static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, | ||
910 | struct iwlagn_tx_resp *tx_resp) | ||
911 | { | ||
912 | struct agg_tx_status *frame_status = &tx_resp->status; | ||
913 | int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> | ||
914 | IWLAGN_TX_RES_TID_POS; | ||
915 | int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> | ||
916 | IWLAGN_TX_RES_RA_POS; | ||
917 | struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg; | ||
918 | u32 status = le16_to_cpu(tx_resp->status.status); | ||
919 | int i; | ||
920 | |||
921 | WARN_ON(tid == IWL_TID_NON_QOS); | ||
922 | |||
923 | if (agg->wait_for_ba) | ||
924 | IWL_DEBUG_TX_REPLY(priv, | ||
925 | "got tx response w/o block-ack\n"); | ||
926 | |||
927 | agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | ||
928 | agg->wait_for_ba = (tx_resp->frame_count > 1); | ||
929 | |||
930 | /* | ||
931 | * If the BT kill count is non-zero, we'll get this | ||
932 | * notification again. | ||
933 | */ | ||
934 | if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && | ||
935 | priv->cfg->bt_params && | ||
936 | priv->cfg->bt_params->advanced_bt_coexist) { | ||
937 | IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); | ||
938 | } | ||
939 | |||
940 | if (tx_resp->frame_count == 1) | ||
941 | return; | ||
942 | |||
943 | /* Construct bit-map of pending frames within Tx window */ | ||
944 | for (i = 0; i < tx_resp->frame_count; i++) { | ||
945 | u16 fstatus = le16_to_cpu(frame_status[i].status); | ||
946 | |||
947 | if (status & AGG_TX_STATUS_MSK) | ||
948 | iwlagn_count_agg_tx_err_status(priv, fstatus); | ||
949 | |||
950 | if (status & (AGG_TX_STATE_FEW_BYTES_MSK | | ||
951 | AGG_TX_STATE_ABORT_MSK)) | ||
952 | continue; | ||
953 | |||
954 | IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), " | ||
955 | "try-count (0x%08x)\n", | ||
956 | iwl_get_agg_tx_fail_reason(fstatus), | ||
957 | fstatus & AGG_TX_STATUS_MSK, | ||
958 | fstatus & AGG_TX_TRY_MSK); | ||
959 | } | ||
960 | } | ||
961 | |||
962 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
963 | #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x | ||
964 | |||
965 | const char *iwl_get_agg_tx_fail_reason(u16 status) | ||
966 | { | ||
967 | status &= AGG_TX_STATUS_MSK; | ||
968 | switch (status) { | ||
969 | case AGG_TX_STATE_TRANSMITTED: | ||
970 | return "SUCCESS"; | ||
971 | AGG_TX_STATE_FAIL(UNDERRUN_MSK); | ||
972 | AGG_TX_STATE_FAIL(BT_PRIO_MSK); | ||
973 | AGG_TX_STATE_FAIL(FEW_BYTES_MSK); | ||
974 | AGG_TX_STATE_FAIL(ABORT_MSK); | ||
975 | AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK); | ||
976 | AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK); | ||
977 | AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK); | ||
978 | AGG_TX_STATE_FAIL(SCD_QUERY_MSK); | ||
979 | AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK); | ||
980 | AGG_TX_STATE_FAIL(RESPONSE_MSK); | ||
981 | AGG_TX_STATE_FAIL(DUMP_TX_MSK); | ||
982 | AGG_TX_STATE_FAIL(DELAY_TX_MSK); | ||
983 | } | ||
984 | |||
985 | return "UNKNOWN"; | ||
986 | } | ||
987 | #endif /* CONFIG_IWLWIFI_DEBUG */ | ||
988 | |||
989 | static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) | ||
990 | { | ||
991 | return le32_to_cpup((__le32 *)&tx_resp->status + | ||
992 | tx_resp->frame_count) & MAX_SN; | ||
993 | } | ||
994 | |||
995 | static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status) | ||
996 | { | ||
997 | status &= TX_STATUS_MSK; | ||
998 | |||
999 | switch (status) { | ||
1000 | case TX_STATUS_POSTPONE_DELAY: | ||
1001 | priv->reply_tx_stats.pp_delay++; | ||
1002 | break; | ||
1003 | case TX_STATUS_POSTPONE_FEW_BYTES: | ||
1004 | priv->reply_tx_stats.pp_few_bytes++; | ||
1005 | break; | ||
1006 | case TX_STATUS_POSTPONE_BT_PRIO: | ||
1007 | priv->reply_tx_stats.pp_bt_prio++; | ||
1008 | break; | ||
1009 | case TX_STATUS_POSTPONE_QUIET_PERIOD: | ||
1010 | priv->reply_tx_stats.pp_quiet_period++; | ||
1011 | break; | ||
1012 | case TX_STATUS_POSTPONE_CALC_TTAK: | ||
1013 | priv->reply_tx_stats.pp_calc_ttak++; | ||
1014 | break; | ||
1015 | case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: | ||
1016 | priv->reply_tx_stats.int_crossed_retry++; | ||
1017 | break; | ||
1018 | case TX_STATUS_FAIL_SHORT_LIMIT: | ||
1019 | priv->reply_tx_stats.short_limit++; | ||
1020 | break; | ||
1021 | case TX_STATUS_FAIL_LONG_LIMIT: | ||
1022 | priv->reply_tx_stats.long_limit++; | ||
1023 | break; | ||
1024 | case TX_STATUS_FAIL_FIFO_UNDERRUN: | ||
1025 | priv->reply_tx_stats.fifo_underrun++; | ||
1026 | break; | ||
1027 | case TX_STATUS_FAIL_DRAIN_FLOW: | ||
1028 | priv->reply_tx_stats.drain_flow++; | ||
1029 | break; | ||
1030 | case TX_STATUS_FAIL_RFKILL_FLUSH: | ||
1031 | priv->reply_tx_stats.rfkill_flush++; | ||
1032 | break; | ||
1033 | case TX_STATUS_FAIL_LIFE_EXPIRE: | ||
1034 | priv->reply_tx_stats.life_expire++; | ||
1035 | break; | ||
1036 | case TX_STATUS_FAIL_DEST_PS: | ||
1037 | priv->reply_tx_stats.dest_ps++; | ||
1038 | break; | ||
1039 | case TX_STATUS_FAIL_HOST_ABORTED: | ||
1040 | priv->reply_tx_stats.host_abort++; | ||
1041 | break; | ||
1042 | case TX_STATUS_FAIL_BT_RETRY: | ||
1043 | priv->reply_tx_stats.bt_retry++; | ||
1044 | break; | ||
1045 | case TX_STATUS_FAIL_STA_INVALID: | ||
1046 | priv->reply_tx_stats.sta_invalid++; | ||
1047 | break; | ||
1048 | case TX_STATUS_FAIL_FRAG_DROPPED: | ||
1049 | priv->reply_tx_stats.frag_drop++; | ||
1050 | break; | ||
1051 | case TX_STATUS_FAIL_TID_DISABLE: | ||
1052 | priv->reply_tx_stats.tid_disable++; | ||
1053 | break; | ||
1054 | case TX_STATUS_FAIL_FIFO_FLUSHED: | ||
1055 | priv->reply_tx_stats.fifo_flush++; | ||
1056 | break; | ||
1057 | case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL: | ||
1058 | priv->reply_tx_stats.insuff_cf_poll++; | ||
1059 | break; | ||
1060 | case TX_STATUS_FAIL_PASSIVE_NO_RX: | ||
1061 | priv->reply_tx_stats.fail_hw_drop++; | ||
1062 | break; | ||
1063 | case TX_STATUS_FAIL_NO_BEACON_ON_RADAR: | ||
1064 | priv->reply_tx_stats.sta_color_mismatch++; | ||
1065 | break; | ||
1066 | default: | ||
1067 | priv->reply_tx_stats.unknown++; | ||
1068 | break; | ||
1069 | } | ||
1070 | } | ||
1071 | |||
1072 | static void iwlagn_set_tx_status(struct iwl_priv *priv, | ||
1073 | struct ieee80211_tx_info *info, | ||
1074 | struct iwlagn_tx_resp *tx_resp, | ||
1075 | bool is_agg) | ||
1076 | { | ||
1077 | u16 status = le16_to_cpu(tx_resp->status.status); | ||
1078 | |||
1079 | info->status.rates[0].count = tx_resp->failure_frame + 1; | ||
1080 | if (is_agg) | ||
1081 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | ||
1082 | info->flags |= iwl_tx_status_to_mac80211(status); | ||
1083 | iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), | ||
1084 | info); | ||
1085 | if (!iwl_is_tx_success(status)) | ||
1086 | iwlagn_count_tx_err_status(priv, status); | ||
1087 | } | ||
1088 | |||
1089 | static void iwl_check_abort_status(struct iwl_priv *priv, | ||
1090 | u8 frame_count, u32 status) | ||
1091 | { | ||
1092 | if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { | ||
1093 | IWL_ERR(priv, "Tx flush command to flush out all frames\n"); | ||
1094 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
1095 | queue_work(priv->workqueue, &priv->tx_flush); | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid, | ||
1100 | int txq_id, int ssn, struct sk_buff_head *skbs) | ||
1101 | { | ||
1102 | if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE && | ||
1103 | tid != IWL_TID_NON_QOS && | ||
1104 | txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) { | ||
1105 | /* | ||
1106 | * FIXME: this is a uCode bug which need to be addressed, | ||
1107 | * log the information and return for now. | ||
1108 | * Since it is can possibly happen very often and in order | ||
1109 | * not to fill the syslog, don't use IWL_ERR or IWL_WARN | ||
1110 | */ | ||
1111 | IWL_DEBUG_TX_QUEUES(priv, | ||
1112 | "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n", | ||
1113 | txq_id, sta_id, tid, | ||
1114 | priv->tid_data[sta_id][tid].agg.txq_id); | ||
1115 | return 1; | ||
1116 | } | ||
1117 | |||
1118 | iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs); | ||
1119 | return 0; | ||
1120 | } | ||
1121 | |||
1122 | int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, | ||
1123 | struct iwl_device_cmd *cmd) | ||
1124 | { | ||
1125 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1126 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
1127 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
1128 | int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence); | ||
1129 | struct iwlagn_tx_resp *tx_resp = (void *)pkt->data; | ||
1130 | struct ieee80211_hdr *hdr; | ||
1131 | u32 status = le16_to_cpu(tx_resp->status.status); | ||
1132 | u16 ssn = iwlagn_get_scd_ssn(tx_resp); | ||
1133 | int tid; | ||
1134 | int sta_id; | ||
1135 | int freed; | ||
1136 | struct ieee80211_tx_info *info; | ||
1137 | struct sk_buff_head skbs; | ||
1138 | struct sk_buff *skb; | ||
1139 | struct iwl_rxon_context *ctx; | ||
1140 | bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); | ||
1141 | bool is_offchannel_skb; | ||
1142 | |||
1143 | tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> | ||
1144 | IWLAGN_TX_RES_TID_POS; | ||
1145 | sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> | ||
1146 | IWLAGN_TX_RES_RA_POS; | ||
1147 | |||
1148 | spin_lock(&priv->sta_lock); | ||
1149 | |||
1150 | if (is_agg) | ||
1151 | iwl_rx_reply_tx_agg(priv, tx_resp); | ||
1152 | |||
1153 | __skb_queue_head_init(&skbs); | ||
1154 | |||
1155 | is_offchannel_skb = false; | ||
1156 | |||
1157 | if (tx_resp->frame_count == 1) { | ||
1158 | u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); | ||
1159 | next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); | ||
1160 | |||
1161 | if (is_agg) { | ||
1162 | /* If this is an aggregation queue, we can rely on the | ||
1163 | * ssn since the wifi sequence number corresponds to | ||
1164 | * the index in the TFD ring (%256). | ||
1165 | * The seq_ctl is the sequence control of the packet | ||
1166 | * to which this Tx response relates. But if there is a | ||
1167 | * hole in the bitmap of the BA we received, this Tx | ||
1168 | * response may allow to reclaim the hole and all the | ||
1169 | * subsequent packets that were already acked. | ||
1170 | * In that case, seq_ctl != ssn, and the next packet | ||
1171 | * to be reclaimed will be ssn and not seq_ctl. | ||
1172 | */ | ||
1173 | next_reclaimed = ssn; | ||
1174 | } | ||
1175 | |||
1176 | if (tid != IWL_TID_NON_QOS) { | ||
1177 | priv->tid_data[sta_id][tid].next_reclaimed = | ||
1178 | next_reclaimed; | ||
1179 | IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", | ||
1180 | next_reclaimed); | ||
1181 | } | ||
1182 | |||
1183 | /*we can free until ssn % q.n_bd not inclusive */ | ||
1184 | WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs)); | ||
1185 | iwlagn_check_ratid_empty(priv, sta_id, tid); | ||
1186 | freed = 0; | ||
1187 | |||
1188 | /* process frames */ | ||
1189 | skb_queue_walk(&skbs, skb) { | ||
1190 | hdr = (struct ieee80211_hdr *)skb->data; | ||
1191 | |||
1192 | if (!ieee80211_is_data_qos(hdr->frame_control)) | ||
1193 | priv->last_seq_ctl = tx_resp->seq_ctl; | ||
1194 | |||
1195 | info = IEEE80211_SKB_CB(skb); | ||
1196 | ctx = info->driver_data[0]; | ||
1197 | kmem_cache_free(iwl_tx_cmd_pool, | ||
1198 | (info->driver_data[1])); | ||
1199 | |||
1200 | memset(&info->status, 0, sizeof(info->status)); | ||
1201 | |||
1202 | if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && | ||
1203 | iwl_is_associated_ctx(ctx) && ctx->vif && | ||
1204 | ctx->vif->type == NL80211_IFTYPE_STATION) { | ||
1205 | /* block and stop all queues */ | ||
1206 | priv->passive_no_rx = true; | ||
1207 | IWL_DEBUG_TX_QUEUES(priv, "stop all queues: " | ||
1208 | "passive channel"); | ||
1209 | ieee80211_stop_queues(priv->hw); | ||
1210 | |||
1211 | IWL_DEBUG_TX_REPLY(priv, | ||
1212 | "TXQ %d status %s (0x%08x) " | ||
1213 | "rate_n_flags 0x%x retries %d\n", | ||
1214 | txq_id, | ||
1215 | iwl_get_tx_fail_reason(status), | ||
1216 | status, | ||
1217 | le32_to_cpu(tx_resp->rate_n_flags), | ||
1218 | tx_resp->failure_frame); | ||
1219 | |||
1220 | IWL_DEBUG_TX_REPLY(priv, | ||
1221 | "FrameCnt = %d, idx=%d\n", | ||
1222 | tx_resp->frame_count, cmd_index); | ||
1223 | } | ||
1224 | |||
1225 | /* check if BAR is needed */ | ||
1226 | if (is_agg && !iwl_is_tx_success(status)) | ||
1227 | info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | ||
1228 | iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb), | ||
1229 | tx_resp, is_agg); | ||
1230 | if (!is_agg) | ||
1231 | iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); | ||
1232 | |||
1233 | is_offchannel_skb = | ||
1234 | (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); | ||
1235 | freed++; | ||
1236 | } | ||
1237 | |||
1238 | WARN_ON(!is_agg && freed != 1); | ||
1239 | |||
1240 | /* | ||
1241 | * An offchannel frame can be send only on the AUX queue, where | ||
1242 | * there is no aggregation (and reordering) so it only is single | ||
1243 | * skb is expected to be processed. | ||
1244 | */ | ||
1245 | WARN_ON(is_offchannel_skb && freed != 1); | ||
1246 | } | ||
1247 | |||
1248 | iwl_check_abort_status(priv, tx_resp->frame_count, status); | ||
1249 | spin_unlock(&priv->sta_lock); | ||
1250 | |||
1251 | while (!skb_queue_empty(&skbs)) { | ||
1252 | skb = __skb_dequeue(&skbs); | ||
1253 | ieee80211_tx_status(priv->hw, skb); | ||
1254 | } | ||
1255 | |||
1256 | if (is_offchannel_skb) | ||
1257 | iwl_scan_offchannel_skb_status(priv); | ||
1258 | |||
1259 | return 0; | ||
1260 | } | ||
1261 | |||
1262 | /** | ||
1263 | * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | ||
1264 | * | ||
1265 | * Handles block-acknowledge notification from device, which reports success | ||
1266 | * of frames sent via aggregation. | ||
1267 | */ | ||
1268 | int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
1269 | struct iwl_rx_cmd_buffer *rxb, | ||
1270 | struct iwl_device_cmd *cmd) | ||
1271 | { | ||
1272 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1273 | struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; | ||
1274 | struct iwl_ht_agg *agg; | ||
1275 | struct sk_buff_head reclaimed_skbs; | ||
1276 | struct ieee80211_tx_info *info; | ||
1277 | struct ieee80211_hdr *hdr; | ||
1278 | struct sk_buff *skb; | ||
1279 | int sta_id; | ||
1280 | int tid; | ||
1281 | int freed; | ||
1282 | |||
1283 | /* "flow" corresponds to Tx queue */ | ||
1284 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1285 | |||
1286 | /* "ssn" is start of block-ack Tx window, corresponds to index | ||
1287 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | ||
1288 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | ||
1289 | |||
1290 | if (scd_flow >= priv->cfg->base_params->num_of_queues) { | ||
1291 | IWL_ERR(priv, | ||
1292 | "BUG_ON scd_flow is bigger than number of queues\n"); | ||
1293 | return 0; | ||
1294 | } | ||
1295 | |||
1296 | sta_id = ba_resp->sta_id; | ||
1297 | tid = ba_resp->tid; | ||
1298 | agg = &priv->tid_data[sta_id][tid].agg; | ||
1299 | |||
1300 | spin_lock(&priv->sta_lock); | ||
1301 | |||
1302 | if (unlikely(!agg->wait_for_ba)) { | ||
1303 | if (unlikely(ba_resp->bitmap)) | ||
1304 | IWL_ERR(priv, "Received BA when not expected\n"); | ||
1305 | spin_unlock(&priv->sta_lock); | ||
1306 | return 0; | ||
1307 | } | ||
1308 | |||
1309 | __skb_queue_head_init(&reclaimed_skbs); | ||
1310 | |||
1311 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | ||
1312 | * block-ack window (we assume that they've been successfully | ||
1313 | * transmitted ... if not, it's too late anyway). */ | ||
1314 | if (iwl_reclaim(priv, sta_id, tid, scd_flow, | ||
1315 | ba_resp_scd_ssn, &reclaimed_skbs)) { | ||
1316 | spin_unlock(&priv->sta_lock); | ||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " | ||
1321 | "sta_id = %d\n", | ||
1322 | agg->wait_for_ba, | ||
1323 | (u8 *) &ba_resp->sta_addr_lo32, | ||
1324 | ba_resp->sta_id); | ||
1325 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " | ||
1326 | "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", | ||
1327 | ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl), | ||
1328 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | ||
1329 | scd_flow, ba_resp_scd_ssn, ba_resp->txed, | ||
1330 | ba_resp->txed_2_done); | ||
1331 | |||
1332 | /* Mark that the expected block-ack response arrived */ | ||
1333 | agg->wait_for_ba = false; | ||
1334 | |||
1335 | /* Sanity check values reported by uCode */ | ||
1336 | if (ba_resp->txed_2_done > ba_resp->txed) { | ||
1337 | IWL_DEBUG_TX_REPLY(priv, | ||
1338 | "bogus sent(%d) and ack(%d) count\n", | ||
1339 | ba_resp->txed, ba_resp->txed_2_done); | ||
1340 | /* | ||
1341 | * set txed_2_done = txed, | ||
1342 | * so it won't impact rate scale | ||
1343 | */ | ||
1344 | ba_resp->txed = ba_resp->txed_2_done; | ||
1345 | } | ||
1346 | |||
1347 | priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn; | ||
1348 | |||
1349 | iwlagn_check_ratid_empty(priv, sta_id, tid); | ||
1350 | freed = 0; | ||
1351 | |||
1352 | skb_queue_walk(&reclaimed_skbs, skb) { | ||
1353 | hdr = (struct ieee80211_hdr *)skb->data; | ||
1354 | |||
1355 | if (ieee80211_is_data_qos(hdr->frame_control)) | ||
1356 | freed++; | ||
1357 | else | ||
1358 | WARN_ON_ONCE(1); | ||
1359 | |||
1360 | info = IEEE80211_SKB_CB(skb); | ||
1361 | kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1])); | ||
1362 | |||
1363 | if (freed == 1) { | ||
1364 | /* this is the first skb we deliver in this batch */ | ||
1365 | /* put the rate scaling data there */ | ||
1366 | info = IEEE80211_SKB_CB(skb); | ||
1367 | memset(&info->status, 0, sizeof(info->status)); | ||
1368 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1369 | info->flags |= IEEE80211_TX_STAT_AMPDU; | ||
1370 | info->status.ampdu_ack_len = ba_resp->txed_2_done; | ||
1371 | info->status.ampdu_len = ba_resp->txed; | ||
1372 | iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, | ||
1373 | info); | ||
1374 | } | ||
1375 | } | ||
1376 | |||
1377 | spin_unlock(&priv->sta_lock); | ||
1378 | |||
1379 | while (!skb_queue_empty(&reclaimed_skbs)) { | ||
1380 | skb = __skb_dequeue(&reclaimed_skbs); | ||
1381 | ieee80211_tx_status(priv->hw, skb); | ||
1382 | } | ||
1383 | |||
1384 | return 0; | ||
1385 | } | ||