aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c546
1 files changed, 334 insertions, 212 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 69155aa448fb..4974cd7837cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -67,22 +67,16 @@
67 */ 67 */
68 68
69static const u8 tid_to_ac[] = { 69static const u8 tid_to_ac[] = {
70 /* this matches the mac80211 numbers */ 70 IEEE80211_AC_BE,
71 2, 3, 3, 2, 1, 1, 0, 0 71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
72}; 78};
73 79
74static const u8 ac_to_fifo[] = {
75 IWL_TX_FIFO_VO,
76 IWL_TX_FIFO_VI,
77 IWL_TX_FIFO_BE,
78 IWL_TX_FIFO_BK,
79};
80
81static inline int get_fifo_from_ac(u8 ac)
82{
83 return ac_to_fifo[ac];
84}
85
86static inline int get_ac_from_tid(u16 tid) 80static inline int get_ac_from_tid(u16 tid)
87{ 81{
88 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
@@ -92,10 +86,10 @@ static inline int get_ac_from_tid(u16 tid)
92 return -EINVAL; 86 return -EINVAL;
93} 87}
94 88
95static inline int get_fifo_from_tid(u16 tid) 89static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
96{ 90{
97 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 91 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
98 return get_fifo_from_ac(tid_to_ac[tid]); 92 return ctx->ac_to_fifo[tid_to_ac[tid]];
99 93
100 /* no support for TIDs 8-15 yet */ 94 /* no support for TIDs 8-15 yet */
101 return -EINVAL; 95 return -EINVAL;
@@ -104,9 +98,9 @@ static inline int get_fifo_from_tid(u16 tid)
104/** 98/**
105 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 99 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
106 */ 100 */
107void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 101static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
108 struct iwl_tx_queue *txq, 102 struct iwl_tx_queue *txq,
109 u16 byte_cnt) 103 u16 byte_cnt)
110{ 104{
111 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 105 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
112 int write_ptr = txq->q.write_ptr; 106 int write_ptr = txq->q.write_ptr;
@@ -118,21 +112,19 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
118 112
119 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); 113 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
120 114
121 if (txq_id != IWL_CMD_QUEUE_NUM) { 115 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
122 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 116 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
123 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; 117
124 118 switch (sec_ctl & TX_CMD_SEC_MSK) {
125 switch (sec_ctl & TX_CMD_SEC_MSK) { 119 case TX_CMD_SEC_CCM:
126 case TX_CMD_SEC_CCM: 120 len += CCMP_MIC_LEN;
127 len += CCMP_MIC_LEN; 121 break;
128 break; 122 case TX_CMD_SEC_TKIP:
129 case TX_CMD_SEC_TKIP: 123 len += TKIP_ICV_LEN;
130 len += TKIP_ICV_LEN; 124 break;
131 break; 125 case TX_CMD_SEC_WEP:
132 case TX_CMD_SEC_WEP: 126 len += WEP_IV_LEN + WEP_ICV_LEN;
133 len += WEP_IV_LEN + WEP_ICV_LEN; 127 break;
134 break;
135 }
136 } 128 }
137 129
138 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); 130 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
@@ -144,8 +136,8 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
144 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 136 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
145} 137}
146 138
147void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, 139static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
148 struct iwl_tx_queue *txq) 140 struct iwl_tx_queue *txq)
149{ 141{
150 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 142 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
151 int txq_id = txq->q.id; 143 int txq_id = txq->q.id;
@@ -155,7 +147,7 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
155 147
156 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 148 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
157 149
158 if (txq_id != IWL_CMD_QUEUE_NUM) 150 if (txq_id != priv->cmd_queue)
159 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 151 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
160 152
161 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 153 bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -228,30 +220,46 @@ void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
228 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); 220 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
229} 221}
230 222
231int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, 223static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, int tid)
232 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
233{ 224{
234 unsigned long flags;
235 u16 ra_tid;
236 int ret;
237
238 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 225 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
239 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 226 (IWLAGN_FIRST_AMPDU_QUEUE +
240 <= txq_id)) { 227 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
241 IWL_WARN(priv, 228 IWL_WARN(priv,
242 "queue number out of range: %d, must be %d to %d\n", 229 "queue number out of range: %d, must be %d to %d\n",
243 txq_id, IWLAGN_FIRST_AMPDU_QUEUE, 230 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
244 IWLAGN_FIRST_AMPDU_QUEUE + 231 IWLAGN_FIRST_AMPDU_QUEUE +
245 priv->cfg->num_of_ampdu_queues - 1); 232 priv->cfg->base_params->num_of_ampdu_queues - 1);
246 return -EINVAL; 233 return -EINVAL;
247 } 234 }
248 235
249 ra_tid = BUILD_RAxTID(sta_id, tid);
250
251 /* Modify device's station table to Tx this TID */ 236 /* Modify device's station table to Tx this TID */
252 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 237 return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
253 if (ret) 238}
254 return ret; 239
240void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
241 struct ieee80211_sta *sta,
242 int tid, int frame_limit)
243{
244 int sta_id, tx_fifo, txq_id, ssn_idx;
245 u16 ra_tid;
246 unsigned long flags;
247 struct iwl_tid_data *tid_data;
248
249 sta_id = iwl_sta_id(sta);
250 if (WARN_ON(sta_id == IWL_INVALID_STATION))
251 return;
252 if (WARN_ON(tid >= MAX_TID_COUNT))
253 return;
254
255 spin_lock_irqsave(&priv->sta_lock, flags);
256 tid_data = &priv->stations[sta_id].tid[tid];
257 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
258 txq_id = tid_data->agg.txq_id;
259 tx_fifo = tid_data->agg.tx_fifo;
260 spin_unlock_irqrestore(&priv->sta_lock, flags);
261
262 ra_tid = BUILD_RAxTID(sta_id, tid);
255 263
256 spin_lock_irqsave(&priv->lock, flags); 264 spin_lock_irqsave(&priv->lock, flags);
257 265
@@ -277,10 +285,10 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
277 iwl_write_targ_mem(priv, priv->scd_base_addr + 285 iwl_write_targ_mem(priv, priv->scd_base_addr +
278 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 286 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
279 sizeof(u32), 287 sizeof(u32),
280 ((SCD_WIN_SIZE << 288 ((frame_limit <<
281 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 289 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
282 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 290 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
283 ((SCD_FRAME_LIMIT << 291 ((frame_limit <<
284 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 292 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
285 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 293 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
286 294
@@ -290,21 +298,19 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
290 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 298 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
291 299
292 spin_unlock_irqrestore(&priv->lock, flags); 300 spin_unlock_irqrestore(&priv->lock, flags);
293
294 return 0;
295} 301}
296 302
297int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 303static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
298 u16 ssn_idx, u8 tx_fifo) 304 u16 ssn_idx, u8 tx_fifo)
299{ 305{
300 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 306 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
301 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 307 (IWLAGN_FIRST_AMPDU_QUEUE +
302 <= txq_id)) { 308 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
303 IWL_ERR(priv, 309 IWL_ERR(priv,
304 "queue number out of range: %d, must be %d to %d\n", 310 "queue number out of range: %d, must be %d to %d\n",
305 txq_id, IWLAGN_FIRST_AMPDU_QUEUE, 311 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
306 IWLAGN_FIRST_AMPDU_QUEUE + 312 IWLAGN_FIRST_AMPDU_QUEUE +
307 priv->cfg->num_of_ampdu_queues - 1); 313 priv->cfg->base_params->num_of_ampdu_queues - 1);
308 return -EINVAL; 314 return -EINVAL;
309 } 315 }
310 316
@@ -333,19 +339,15 @@ void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
333 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask); 339 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
334} 340}
335 341
336static inline int get_queue_from_ac(u16 ac)
337{
338 return ac;
339}
340
341/* 342/*
342 * handle build REPLY_TX command notification. 343 * handle build REPLY_TX command notification.
343 */ 344 */
344static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, 345static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
345 struct iwl_tx_cmd *tx_cmd, 346 struct sk_buff *skb,
346 struct ieee80211_tx_info *info, 347 struct iwl_tx_cmd *tx_cmd,
347 struct ieee80211_hdr *hdr, 348 struct ieee80211_tx_info *info,
348 u8 std_id) 349 struct ieee80211_hdr *hdr,
350 u8 std_id)
349{ 351{
350 __le16 fc = hdr->frame_control; 352 __le16 fc = hdr->frame_control;
351 __le32 tx_flags = tx_cmd->tx_flags; 353 __le32 tx_flags = tx_cmd->tx_flags;
@@ -365,6 +367,13 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
365 367
366 if (ieee80211_is_back_req(fc)) 368 if (ieee80211_is_back_req(fc))
367 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 369 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
370 else if (info->band == IEEE80211_BAND_2GHZ &&
371 priv->cfg->bt_params &&
372 priv->cfg->bt_params->advanced_bt_coexist &&
373 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
374 ieee80211_is_reassoc_req(fc) ||
375 skb->protocol == cpu_to_be16(ETH_P_PAE)))
376 tx_flags |= TX_CMD_FLG_IGNORE_BT;
368 377
369 378
370 tx_cmd->sta_id = std_id; 379 tx_cmd->sta_id = std_id;
@@ -454,7 +463,14 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
454 rate_flags |= RATE_MCS_CCK_MSK; 463 rate_flags |= RATE_MCS_CCK_MSK;
455 464
456 /* Set up antennas */ 465 /* Set up antennas */
457 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 466 if (priv->cfg->bt_params &&
467 priv->cfg->bt_params->advanced_bt_coexist &&
468 priv->bt_full_concurrent) {
469 /* operated as 1x1 in full concurrency mode */
470 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
471 first_antenna(priv->hw_params.valid_tx_ant));
472 } else
473 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
458 priv->hw_params.valid_tx_ant); 474 priv->hw_params.valid_tx_ant);
459 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 475 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
460 476
@@ -470,8 +486,8 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
470{ 486{
471 struct ieee80211_key_conf *keyconf = info->control.hw_key; 487 struct ieee80211_key_conf *keyconf = info->control.hw_key;
472 488
473 switch (keyconf->alg) { 489 switch (keyconf->cipher) {
474 case ALG_CCMP: 490 case WLAN_CIPHER_SUITE_CCMP:
475 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 491 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
476 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); 492 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
477 if (info->flags & IEEE80211_TX_CTL_AMPDU) 493 if (info->flags & IEEE80211_TX_CTL_AMPDU)
@@ -479,20 +495,20 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
479 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 495 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
480 break; 496 break;
481 497
482 case ALG_TKIP: 498 case WLAN_CIPHER_SUITE_TKIP:
483 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; 499 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
484 ieee80211_get_tkip_key(keyconf, skb_frag, 500 ieee80211_get_tkip_key(keyconf, skb_frag,
485 IEEE80211_TKIP_P2_KEY, tx_cmd->key); 501 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
486 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); 502 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
487 break; 503 break;
488 504
489 case ALG_WEP: 505 case WLAN_CIPHER_SUITE_WEP104:
506 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
507 /* fall through */
508 case WLAN_CIPHER_SUITE_WEP40:
490 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | 509 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
491 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); 510 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
492 511
493 if (keyconf->keylen == WEP_KEY_LEN_128)
494 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
495
496 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); 512 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
497 513
498 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 514 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
@@ -500,7 +516,7 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
500 break; 516 break;
501 517
502 default: 518 default:
503 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); 519 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
504 break; 520 break;
505 } 521 }
506} 522}
@@ -519,11 +535,12 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
519 struct iwl_device_cmd *out_cmd; 535 struct iwl_device_cmd *out_cmd;
520 struct iwl_cmd_meta *out_meta; 536 struct iwl_cmd_meta *out_meta;
521 struct iwl_tx_cmd *tx_cmd; 537 struct iwl_tx_cmd *tx_cmd;
522 int swq_id, txq_id; 538 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
523 dma_addr_t phys_addr; 539 int txq_id;
540 dma_addr_t phys_addr = 0;
524 dma_addr_t txcmd_phys; 541 dma_addr_t txcmd_phys;
525 dma_addr_t scratch_phys; 542 dma_addr_t scratch_phys;
526 u16 len, len_org, firstlen, secondlen; 543 u16 len, firstlen, secondlen;
527 u16 seq_number = 0; 544 u16 seq_number = 0;
528 __le16 fc; 545 __le16 fc;
529 u8 hdr_len; 546 u8 hdr_len;
@@ -532,11 +549,22 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
532 u8 tid = 0; 549 u8 tid = 0;
533 u8 *qc = NULL; 550 u8 *qc = NULL;
534 unsigned long flags; 551 unsigned long flags;
552 bool is_agg = false;
553
554 /*
555 * If the frame needs to go out off-channel, then
556 * we'll have put the PAN context to that channel,
557 * so make the frame go out there.
558 */
559 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
560 ctx = &priv->contexts[IWL_RXON_CTX_PAN];
561 else if (info->control.vif)
562 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
535 563
536 spin_lock_irqsave(&priv->lock, flags); 564 spin_lock_irqsave(&priv->lock, flags);
537 if (iwl_is_rfkill(priv)) { 565 if (iwl_is_rfkill(priv)) {
538 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 566 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
539 goto drop_unlock; 567 goto drop_unlock_priv;
540 } 568 }
541 569
542 fc = hdr->frame_control; 570 fc = hdr->frame_control;
@@ -552,12 +580,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
552 580
553 hdr_len = ieee80211_hdrlen(fc); 581 hdr_len = ieee80211_hdrlen(fc);
554 582
555 /* Find index into station table for destination station */ 583 /* For management frames use broadcast id to do not break aggregation */
556 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); 584 if (!ieee80211_is_data(fc))
557 if (sta_id == IWL_INVALID_STATION) { 585 sta_id = ctx->bcast_sta_id;
558 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 586 else {
559 hdr->addr1); 587 /* Find index into station table for destination station */
560 goto drop_unlock; 588 sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
589 if (sta_id == IWL_INVALID_STATION) {
590 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
591 hdr->addr1);
592 goto drop_unlock_priv;
593 }
561 } 594 }
562 595
563 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 596 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
@@ -565,9 +598,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
565 if (sta) 598 if (sta)
566 sta_priv = (void *)sta->drv_priv; 599 sta_priv = (void *)sta->drv_priv;
567 600
568 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && 601 if (sta_priv && sta_priv->asleep &&
569 sta_priv->asleep) { 602 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
570 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
571 /* 603 /*
572 * This sends an asynchronous command to the device, 604 * This sends an asynchronous command to the device,
573 * but we can rely on it being processed before the 605 * but we can rely on it being processed before the
@@ -580,7 +612,20 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
580 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); 612 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
581 } 613 }
582 614
583 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); 615 /*
616 * Send this frame after DTIM -- there's a special queue
617 * reserved for this for contexts that support AP mode.
618 */
619 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
620 txq_id = ctx->mcast_queue;
621 /*
622 * The microcode will clear the more data
623 * bit in the last frame it transmits.
624 */
625 hdr->frame_control |=
626 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
627 } else
628 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
584 629
585 /* irqs already disabled/saved above when locking priv->lock */ 630 /* irqs already disabled/saved above when locking priv->lock */
586 spin_lock(&priv->sta_lock); 631 spin_lock(&priv->sta_lock);
@@ -588,10 +633,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
588 if (ieee80211_is_data_qos(fc)) { 633 if (ieee80211_is_data_qos(fc)) {
589 qc = ieee80211_get_qos_ctl(hdr); 634 qc = ieee80211_get_qos_ctl(hdr);
590 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 635 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
591 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { 636
592 spin_unlock(&priv->sta_lock); 637 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
593 goto drop_unlock; 638 goto drop_unlock_sta;
594 } 639
595 seq_number = priv->stations[sta_id].tid[tid].seq_number; 640 seq_number = priv->stations[sta_id].tid[tid].seq_number;
596 seq_number &= IEEE80211_SCTL_SEQ; 641 seq_number &= IEEE80211_SCTL_SEQ;
597 hdr->seq_ctrl = hdr->seq_ctrl & 642 hdr->seq_ctrl = hdr->seq_ctrl &
@@ -602,29 +647,20 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
602 if (info->flags & IEEE80211_TX_CTL_AMPDU && 647 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
603 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { 648 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
604 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 649 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
650 is_agg = true;
605 } 651 }
606 } 652 }
607 653
608 txq = &priv->txq[txq_id]; 654 txq = &priv->txq[txq_id];
609 swq_id = txq->swq_id;
610 q = &txq->q; 655 q = &txq->q;
611 656
612 if (unlikely(iwl_queue_space(q) < q->high_mark)) { 657 if (unlikely(iwl_queue_space(q) < q->high_mark))
613 spin_unlock(&priv->sta_lock); 658 goto drop_unlock_sta;
614 goto drop_unlock;
615 }
616
617 if (ieee80211_is_data_qos(fc)) {
618 priv->stations[sta_id].tid[tid].tfds_in_queue++;
619 if (!ieee80211_has_morefrags(fc))
620 priv->stations[sta_id].tid[tid].seq_number = seq_number;
621 }
622
623 spin_unlock(&priv->sta_lock);
624 659
625 /* Set up driver data for this TFD */ 660 /* Set up driver data for this TFD */
626 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 661 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
627 txq->txb[q->write_ptr].skb = skb; 662 txq->txb[q->write_ptr].skb = skb;
663 txq->txb[q->write_ptr].ctx = ctx;
628 664
629 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 665 /* Set up first empty entry in queue's array of Tx/cmd buffers */
630 out_cmd = txq->cmd[q->write_ptr]; 666 out_cmd = txq->cmd[q->write_ptr];
@@ -655,7 +691,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
655 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 691 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
656 692
657 /* TODO need this for burst mode later on */ 693 /* TODO need this for burst mode later on */
658 iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); 694 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
659 iwl_dbg_log_tx_data_frame(priv, len, hdr); 695 iwl_dbg_log_tx_data_frame(priv, len, hdr);
660 696
661 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 697 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
@@ -672,30 +708,21 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
672 */ 708 */
673 len = sizeof(struct iwl_tx_cmd) + 709 len = sizeof(struct iwl_tx_cmd) +
674 sizeof(struct iwl_cmd_header) + hdr_len; 710 sizeof(struct iwl_cmd_header) + hdr_len;
675 711 firstlen = (len + 3) & ~3;
676 len_org = len;
677 firstlen = len = (len + 3) & ~3;
678
679 if (len_org != len)
680 len_org = 1;
681 else
682 len_org = 0;
683 712
684 /* Tell NIC about any 2-byte padding after MAC header */ 713 /* Tell NIC about any 2-byte padding after MAC header */
685 if (len_org) 714 if (firstlen != len)
686 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 715 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
687 716
688 /* Physical address of this Tx command's header (not MAC header!), 717 /* Physical address of this Tx command's header (not MAC header!),
689 * within command buffer array. */ 718 * within command buffer array. */
690 txcmd_phys = pci_map_single(priv->pci_dev, 719 txcmd_phys = pci_map_single(priv->pci_dev,
691 &out_cmd->hdr, len, 720 &out_cmd->hdr, firstlen,
692 PCI_DMA_BIDIRECTIONAL); 721 PCI_DMA_BIDIRECTIONAL);
722 if (unlikely(pci_dma_mapping_error(priv->pci_dev, txcmd_phys)))
723 goto drop_unlock_sta;
693 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 724 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
694 dma_unmap_len_set(out_meta, len, len); 725 dma_unmap_len_set(out_meta, len, firstlen);
695 /* Add buffer containing Tx command and MAC(!) header to TFD's
696 * first entry */
697 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
698 txcmd_phys, len, 1, 0);
699 726
700 if (!ieee80211_has_morefrags(hdr->frame_control)) { 727 if (!ieee80211_has_morefrags(hdr->frame_control)) {
701 txq->need_update = 1; 728 txq->need_update = 1;
@@ -706,23 +733,39 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
706 733
707 /* Set up TFD's 2nd entry to point directly to remainder of skb, 734 /* Set up TFD's 2nd entry to point directly to remainder of skb,
708 * if any (802.11 null frames have no payload). */ 735 * if any (802.11 null frames have no payload). */
709 secondlen = len = skb->len - hdr_len; 736 secondlen = skb->len - hdr_len;
710 if (len) { 737 if (secondlen > 0) {
711 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 738 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
712 len, PCI_DMA_TODEVICE); 739 secondlen, PCI_DMA_TODEVICE);
713 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 740 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
714 phys_addr, len, 741 pci_unmap_single(priv->pci_dev,
715 0, 0); 742 dma_unmap_addr(out_meta, mapping),
743 dma_unmap_len(out_meta, len),
744 PCI_DMA_BIDIRECTIONAL);
745 goto drop_unlock_sta;
746 }
716 } 747 }
717 748
749 if (ieee80211_is_data_qos(fc)) {
750 priv->stations[sta_id].tid[tid].tfds_in_queue++;
751 if (!ieee80211_has_morefrags(fc))
752 priv->stations[sta_id].tid[tid].seq_number = seq_number;
753 }
754
755 spin_unlock(&priv->sta_lock);
756
757 /* Attach buffers to TFD */
758 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
759 if (secondlen > 0)
760 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
761 secondlen, 0);
762
718 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 763 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
719 offsetof(struct iwl_tx_cmd, scratch); 764 offsetof(struct iwl_tx_cmd, scratch);
720 765
721 len = sizeof(struct iwl_tx_cmd) +
722 sizeof(struct iwl_cmd_header) + hdr_len;
723 /* take back ownership of DMA buffer to enable update */ 766 /* take back ownership of DMA buffer to enable update */
724 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, 767 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
725 len, PCI_DMA_BIDIRECTIONAL); 768 firstlen, PCI_DMA_BIDIRECTIONAL);
726 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 769 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
727 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 770 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
728 771
@@ -734,11 +777,11 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
734 777
735 /* Set up entry for this TFD in Tx byte-count array */ 778 /* Set up entry for this TFD in Tx byte-count array */
736 if (info->flags & IEEE80211_TX_CTL_AMPDU) 779 if (info->flags & IEEE80211_TX_CTL_AMPDU)
737 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 780 iwlagn_txq_update_byte_cnt_tbl(priv, txq,
738 le16_to_cpu(tx_cmd->len)); 781 le16_to_cpu(tx_cmd->len));
739 782
740 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 783 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
741 len, PCI_DMA_BIDIRECTIONAL); 784 firstlen, PCI_DMA_BIDIRECTIONAL);
742 785
743 trace_iwlwifi_dev_tx(priv, 786 trace_iwlwifi_dev_tx(priv,
744 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], 787 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
@@ -758,8 +801,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
758 * whether or not we should update the write pointer. 801 * whether or not we should update the write pointer.
759 */ 802 */
760 803
761 /* avoid atomic ops if it isn't an associated client */ 804 /*
762 if (sta_priv && sta_priv->client) 805 * Avoid atomic ops if it isn't an associated client.
806 * Also, if this is a packet for aggregation, don't
807 * increase the counter because the ucode will stop
808 * aggregation queues when their respective station
809 * goes to sleep.
810 */
811 if (sta_priv && sta_priv->client && !is_agg)
763 atomic_inc(&sta_priv->pending_frames); 812 atomic_inc(&sta_priv->pending_frames);
764 813
765 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { 814 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
@@ -769,13 +818,15 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
769 iwl_txq_update_write_ptr(priv, txq); 818 iwl_txq_update_write_ptr(priv, txq);
770 spin_unlock_irqrestore(&priv->lock, flags); 819 spin_unlock_irqrestore(&priv->lock, flags);
771 } else { 820 } else {
772 iwl_stop_queue(priv, txq->swq_id); 821 iwl_stop_queue(priv, txq);
773 } 822 }
774 } 823 }
775 824
776 return 0; 825 return 0;
777 826
778drop_unlock: 827drop_unlock_sta:
828 spin_unlock(&priv->sta_lock);
829drop_unlock_priv:
779 spin_unlock_irqrestore(&priv->lock, flags); 830 spin_unlock_irqrestore(&priv->lock, flags);
780 return -1; 831 return -1;
781} 832}
@@ -813,7 +864,7 @@ void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
813 /* Tx queues */ 864 /* Tx queues */
814 if (priv->txq) { 865 if (priv->txq) {
815 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 866 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
816 if (txq_id == IWL_CMD_QUEUE_NUM) 867 if (txq_id == priv->cmd_queue)
817 iwl_cmd_queue_free(priv); 868 iwl_cmd_queue_free(priv);
818 else 869 else
819 iwl_tx_queue_free(priv, txq_id); 870 iwl_tx_queue_free(priv, txq_id);
@@ -863,16 +914,16 @@ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
863 spin_lock_irqsave(&priv->lock, flags); 914 spin_lock_irqsave(&priv->lock, flags);
864 915
865 /* Turn off all Tx DMA fifos */ 916 /* Turn off all Tx DMA fifos */
866 priv->cfg->ops->lib->txq_set_sched(priv, 0); 917 iwlagn_txq_set_sched(priv, 0);
867 918
868 /* Tell NIC where to find the "keep warm" buffer */ 919 /* Tell NIC where to find the "keep warm" buffer */
869 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 920 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
870 921
871 spin_unlock_irqrestore(&priv->lock, flags); 922 spin_unlock_irqrestore(&priv->lock, flags);
872 923
873 /* Alloc and init all Tx queues, including the command queue (#4) */ 924 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
874 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 925 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
875 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 926 slots_num = (txq_id == priv->cmd_queue) ?
876 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 927 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
877 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 928 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
878 txq_id); 929 txq_id);
@@ -901,7 +952,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
901 spin_lock_irqsave(&priv->lock, flags); 952 spin_lock_irqsave(&priv->lock, flags);
902 953
903 /* Turn off all Tx DMA fifos */ 954 /* Turn off all Tx DMA fifos */
904 priv->cfg->ops->lib->txq_set_sched(priv, 0); 955 iwlagn_txq_set_sched(priv, 0);
905 956
906 /* Tell NIC where to find the "keep warm" buffer */ 957 /* Tell NIC where to find the "keep warm" buffer */
907 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 958 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
@@ -910,7 +961,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
910 961
911 /* Alloc and init all Tx queues, including the command queue (#4) */ 962 /* Alloc and init all Tx queues, including the command queue (#4) */
912 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 963 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
913 slots_num = txq_id == IWL_CMD_QUEUE_NUM ? 964 slots_num = txq_id == priv->cmd_queue ?
914 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 965 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
915 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); 966 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
916 } 967 }
@@ -921,13 +972,13 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
921 */ 972 */
922void iwlagn_txq_ctx_stop(struct iwl_priv *priv) 973void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
923{ 974{
924 int ch; 975 int ch, txq_id;
925 unsigned long flags; 976 unsigned long flags;
926 977
927 /* Turn off all Tx DMA fifos */ 978 /* Turn off all Tx DMA fifos */
928 spin_lock_irqsave(&priv->lock, flags); 979 spin_lock_irqsave(&priv->lock, flags);
929 980
930 priv->cfg->ops->lib->txq_set_sched(priv, 0); 981 iwlagn_txq_set_sched(priv, 0);
931 982
932 /* Stop each Tx DMA channel, and wait for it to be idle */ 983 /* Stop each Tx DMA channel, and wait for it to be idle */
933 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { 984 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
@@ -940,6 +991,16 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
940 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); 991 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
941 } 992 }
942 spin_unlock_irqrestore(&priv->lock, flags); 993 spin_unlock_irqrestore(&priv->lock, flags);
994
995 if (!priv->txq)
996 return;
997
998 /* Unmap DMA from host system and free skb's */
999 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1000 if (txq_id == priv->cmd_queue)
1001 iwl_cmd_queue_unmap(priv);
1002 else
1003 iwl_tx_queue_unmap(priv, txq_id);
943} 1004}
944 1005
945/* 1006/*
@@ -968,7 +1029,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
968 unsigned long flags; 1029 unsigned long flags;
969 struct iwl_tid_data *tid_data; 1030 struct iwl_tid_data *tid_data;
970 1031
971 tx_fifo = get_fifo_from_tid(tid); 1032 tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
972 if (unlikely(tx_fifo < 0)) 1033 if (unlikely(tx_fifo < 0))
973 return tx_fifo; 1034 return tx_fifo;
974 1035
@@ -998,11 +1059,11 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
998 tid_data = &priv->stations[sta_id].tid[tid]; 1059 tid_data = &priv->stations[sta_id].tid[tid];
999 *ssn = SEQ_TO_SN(tid_data->seq_number); 1060 *ssn = SEQ_TO_SN(tid_data->seq_number);
1000 tid_data->agg.txq_id = txq_id; 1061 tid_data->agg.txq_id = txq_id;
1001 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id); 1062 tid_data->agg.tx_fifo = tx_fifo;
1063 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
1002 spin_unlock_irqrestore(&priv->sta_lock, flags); 1064 spin_unlock_irqrestore(&priv->sta_lock, flags);
1003 1065
1004 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, 1066 ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
1005 sta_id, tid, *ssn);
1006 if (ret) 1067 if (ret)
1007 return ret; 1068 return ret;
1008 1069
@@ -1024,12 +1085,12 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1024int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 1085int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1025 struct ieee80211_sta *sta, u16 tid) 1086 struct ieee80211_sta *sta, u16 tid)
1026{ 1087{
1027 int tx_fifo_id, txq_id, sta_id, ssn = -1; 1088 int tx_fifo_id, txq_id, sta_id, ssn;
1028 struct iwl_tid_data *tid_data; 1089 struct iwl_tid_data *tid_data;
1029 int write_ptr, read_ptr; 1090 int write_ptr, read_ptr;
1030 unsigned long flags; 1091 unsigned long flags;
1031 1092
1032 tx_fifo_id = get_fifo_from_tid(tid); 1093 tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
1033 if (unlikely(tx_fifo_id < 0)) 1094 if (unlikely(tx_fifo_id < 0))
1034 return tx_fifo_id; 1095 return tx_fifo_id;
1035 1096
@@ -1042,21 +1103,26 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1042 1103
1043 spin_lock_irqsave(&priv->sta_lock, flags); 1104 spin_lock_irqsave(&priv->sta_lock, flags);
1044 1105
1045 if (priv->stations[sta_id].tid[tid].agg.state ==
1046 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1047 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1048 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1049 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1050 spin_unlock_irqrestore(&priv->sta_lock, flags);
1051 return 0;
1052 }
1053
1054 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1055 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1056
1057 tid_data = &priv->stations[sta_id].tid[tid]; 1106 tid_data = &priv->stations[sta_id].tid[tid];
1058 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 1107 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1059 txq_id = tid_data->agg.txq_id; 1108 txq_id = tid_data->agg.txq_id;
1109
1110 switch (priv->stations[sta_id].tid[tid].agg.state) {
1111 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1112 /*
1113 * This can happen if the peer stops aggregation
1114 * again before we've had a chance to drain the
1115 * queue we selected previously, i.e. before the
1116 * session was really started completely.
1117 */
1118 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1119 goto turn_off;
1120 case IWL_AGG_ON:
1121 break;
1122 default:
1123 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1124 }
1125
1060 write_ptr = priv->txq[txq_id].q.write_ptr; 1126 write_ptr = priv->txq[txq_id].q.write_ptr;
1061 read_ptr = priv->txq[txq_id].q.read_ptr; 1127 read_ptr = priv->txq[txq_id].q.read_ptr;
1062 1128
@@ -1070,6 +1136,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1070 } 1136 }
1071 1137
1072 IWL_DEBUG_HT(priv, "HW queue is empty\n"); 1138 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1139 turn_off:
1073 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1140 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1074 1141
1075 /* do not restore/save irqs */ 1142 /* do not restore/save irqs */
@@ -1083,8 +1150,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1083 * to deactivate the uCode queue, just return "success" to allow 1150 * to deactivate the uCode queue, just return "success" to allow
1084 * mac80211 to clean up it own data. 1151 * mac80211 to clean up it own data.
1085 */ 1152 */
1086 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, 1153 iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1087 tx_fifo_id);
1088 spin_unlock_irqrestore(&priv->lock, flags); 1154 spin_unlock_irqrestore(&priv->lock, flags);
1089 1155
1090 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1156 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -1098,6 +1164,9 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1098 struct iwl_queue *q = &priv->txq[txq_id].q; 1164 struct iwl_queue *q = &priv->txq[txq_id].q;
1099 u8 *addr = priv->stations[sta_id].sta.sta.addr; 1165 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1100 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; 1166 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1167 struct iwl_rxon_context *ctx;
1168
1169 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1101 1170
1102 lockdep_assert_held(&priv->sta_lock); 1171 lockdep_assert_held(&priv->sta_lock);
1103 1172
@@ -1108,12 +1177,11 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1108 if ((txq_id == tid_data->agg.txq_id) && 1177 if ((txq_id == tid_data->agg.txq_id) &&
1109 (q->read_ptr == q->write_ptr)) { 1178 (q->read_ptr == q->write_ptr)) {
1110 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 1179 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1111 int tx_fifo = get_fifo_from_tid(tid); 1180 int tx_fifo = get_fifo_from_tid(ctx, tid);
1112 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); 1181 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1113 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, 1182 iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1114 ssn, tx_fifo);
1115 tid_data->agg.state = IWL_AGG_OFF; 1183 tid_data->agg.state = IWL_AGG_OFF;
1116 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); 1184 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1117 } 1185 }
1118 break; 1186 break;
1119 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1187 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1121,7 +1189,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1121 if (tid_data->tfds_in_queue == 0) { 1189 if (tid_data->tfds_in_queue == 0) {
1122 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); 1190 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1123 tid_data->agg.state = IWL_AGG_ON; 1191 tid_data->agg.state = IWL_AGG_ON;
1124 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); 1192 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1125 } 1193 }
1126 break; 1194 break;
1127 } 1195 }
@@ -1129,14 +1197,15 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1129 return 0; 1197 return 0;
1130} 1198}
1131 1199
1132static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) 1200static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
1201 struct iwl_rxon_context *ctx,
1202 const u8 *addr1)
1133{ 1203{
1134 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1135 struct ieee80211_sta *sta; 1204 struct ieee80211_sta *sta;
1136 struct iwl_station_priv *sta_priv; 1205 struct iwl_station_priv *sta_priv;
1137 1206
1138 rcu_read_lock(); 1207 rcu_read_lock();
1139 sta = ieee80211_find_sta(priv->vif, hdr->addr1); 1208 sta = ieee80211_find_sta(ctx->vif, addr1);
1140 if (sta) { 1209 if (sta) {
1141 sta_priv = (void *)sta->drv_priv; 1210 sta_priv = (void *)sta->drv_priv;
1142 /* avoid atomic ops if this isn't a client */ 1211 /* avoid atomic ops if this isn't a client */
@@ -1145,8 +1214,17 @@ static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1145 ieee80211_sta_block_awake(priv->hw, sta, false); 1214 ieee80211_sta_block_awake(priv->hw, sta, false);
1146 } 1215 }
1147 rcu_read_unlock(); 1216 rcu_read_unlock();
1217}
1218
1219static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1220 bool is_agg)
1221{
1222 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1223
1224 if (!is_agg)
1225 iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1148 1226
1149 ieee80211_tx_status_irqsafe(priv->hw, skb); 1227 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1150} 1228}
1151 1229
1152int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) 1230int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
@@ -1169,17 +1247,21 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1169 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1247 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1170 1248
1171 tx_info = &txq->txb[txq->q.read_ptr]; 1249 tx_info = &txq->txb[txq->q.read_ptr];
1172 iwlagn_tx_status(priv, tx_info->skb); 1250
1251 if (WARN_ON_ONCE(tx_info->skb == NULL))
1252 continue;
1173 1253
1174 hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1254 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1175 if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1255 if (ieee80211_is_data_qos(hdr->frame_control))
1176 nfreed++; 1256 nfreed++;
1257
1258 iwlagn_tx_status(priv, tx_info,
1259 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1177 tx_info->skb = NULL; 1260 tx_info->skb = NULL;
1178 1261
1179 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1262 iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
1180 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1181 1263
1182 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 1264 iwlagn_txq_free_tfd(priv, txq);
1183 } 1265 }
1184 return nfreed; 1266 return nfreed;
1185} 1267}
@@ -1195,15 +1277,15 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1195 struct iwl_compressed_ba_resp *ba_resp) 1277 struct iwl_compressed_ba_resp *ba_resp)
1196 1278
1197{ 1279{
1198 int i, sh, ack; 1280 int sh;
1199 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); 1281 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1200 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 1282 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1201 u64 bitmap, sent_bitmap;
1202 int successes = 0;
1203 struct ieee80211_tx_info *info; 1283 struct ieee80211_tx_info *info;
1284 u64 bitmap, sent_bitmap;
1204 1285
1205 if (unlikely(!agg->wait_for_ba)) { 1286 if (unlikely(!agg->wait_for_ba)) {
1206 IWL_ERR(priv, "Received BA when not expected\n"); 1287 if (unlikely(ba_resp->bitmap))
1288 IWL_ERR(priv, "Received BA when not expected\n");
1207 return -EINVAL; 1289 return -EINVAL;
1208 } 1290 }
1209 1291
@@ -1213,44 +1295,44 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1213 1295
1214 /* Calculate shift to align block-ack bits with our Tx window bits */ 1296 /* Calculate shift to align block-ack bits with our Tx window bits */
1215 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); 1297 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1216 if (sh < 0) /* tbw something is wrong with indices */ 1298 if (sh < 0)
1217 sh += 0x100; 1299 sh += 0x100;
1218 1300
1219 /* don't use 64-bit values for now */ 1301 /*
1302 * Check for success or failure according to the
1303 * transmitted bitmap and block-ack bitmap
1304 */
1220 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; 1305 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1306 sent_bitmap = bitmap & agg->bitmap;
1221 1307
1222 if (agg->frame_count > (64 - sh)) { 1308 /* Sanity check values reported by uCode */
1223 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); 1309 if (ba_resp->txed_2_done > ba_resp->txed) {
1224 return -1; 1310 IWL_DEBUG_TX_REPLY(priv,
1311 "bogus sent(%d) and ack(%d) count\n",
1312 ba_resp->txed, ba_resp->txed_2_done);
1313 /*
1314 * set txed_2_done = txed,
1315 * so it won't impact rate scale
1316 */
1317 ba_resp->txed = ba_resp->txed_2_done;
1225 } 1318 }
1319 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
1320 ba_resp->txed, ba_resp->txed_2_done);
1226 1321
1227 /* check for success or failure according to the 1322 /* Find the first ACKed frame to store the TX status */
1228 * transmitted bitmap and block-ack bitmap */ 1323 while (sent_bitmap && !(sent_bitmap & 1)) {
1229 sent_bitmap = bitmap & agg->bitmap; 1324 agg->start_idx = (agg->start_idx + 1) & 0xff;
1230
1231 /* For each frame attempted in aggregation,
1232 * update driver's record of tx frame's status. */
1233 i = 0;
1234 while (sent_bitmap) {
1235 ack = sent_bitmap & 1ULL;
1236 successes += ack;
1237 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1238 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1239 agg->start_idx + i);
1240 sent_bitmap >>= 1; 1325 sent_bitmap >>= 1;
1241 ++i;
1242 } 1326 }
1243 1327
1244 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); 1328 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1245 memset(&info->status, 0, sizeof(info->status)); 1329 memset(&info->status, 0, sizeof(info->status));
1246 info->flags |= IEEE80211_TX_STAT_ACK; 1330 info->flags |= IEEE80211_TX_STAT_ACK;
1247 info->flags |= IEEE80211_TX_STAT_AMPDU; 1331 info->flags |= IEEE80211_TX_STAT_AMPDU;
1248 info->status.ampdu_ack_len = successes; 1332 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1249 info->status.ampdu_len = agg->frame_count; 1333 info->status.ampdu_len = ba_resp->txed;
1250 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 1334 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1251 1335
1252 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1253
1254 return 0; 1336 return 0;
1255} 1337}
1256 1338
@@ -1360,10 +1442,50 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1360 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1442 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1361 priv->mac80211_registered && 1443 priv->mac80211_registered &&
1362 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1444 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1363 iwl_wake_queue(priv, txq->swq_id); 1445 iwl_wake_queue(priv, txq);
1364 1446
1365 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); 1447 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
1366 } 1448 }
1367 1449
1368 spin_unlock_irqrestore(&priv->sta_lock, flags); 1450 spin_unlock_irqrestore(&priv->sta_lock, flags);
1369} 1451}
1452
1453#ifdef CONFIG_IWLWIFI_DEBUG
1454const char *iwl_get_tx_fail_reason(u32 status)
1455{
1456#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1457#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1458
1459 switch (status & TX_STATUS_MSK) {
1460 case TX_STATUS_SUCCESS:
1461 return "SUCCESS";
1462 TX_STATUS_POSTPONE(DELAY);
1463 TX_STATUS_POSTPONE(FEW_BYTES);
1464 TX_STATUS_POSTPONE(BT_PRIO);
1465 TX_STATUS_POSTPONE(QUIET_PERIOD);
1466 TX_STATUS_POSTPONE(CALC_TTAK);
1467 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1468 TX_STATUS_FAIL(SHORT_LIMIT);
1469 TX_STATUS_FAIL(LONG_LIMIT);
1470 TX_STATUS_FAIL(FIFO_UNDERRUN);
1471 TX_STATUS_FAIL(DRAIN_FLOW);
1472 TX_STATUS_FAIL(RFKILL_FLUSH);
1473 TX_STATUS_FAIL(LIFE_EXPIRE);
1474 TX_STATUS_FAIL(DEST_PS);
1475 TX_STATUS_FAIL(HOST_ABORTED);
1476 TX_STATUS_FAIL(BT_RETRY);
1477 TX_STATUS_FAIL(STA_INVALID);
1478 TX_STATUS_FAIL(FRAG_DROPPED);
1479 TX_STATUS_FAIL(TID_DISABLE);
1480 TX_STATUS_FAIL(FIFO_FLUSHED);
1481 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1482 TX_STATUS_FAIL(PASSIVE_NO_RX);
1483 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1484 }
1485
1486 return "UNKNOWN";
1487
1488#undef TX_STATUS_FAIL
1489#undef TX_STATUS_POSTPONE
1490}
1491#endif /* CONFIG_IWLWIFI_DEBUG */