diff options
author | Wey-Yi Guy <wey-yi.w.guy@intel.com> | 2010-03-17 16:34:34 -0400 |
---|---|---|
committer | Reinette Chatre <reinette.chatre@intel.com> | 2010-03-25 14:19:27 -0400 |
commit | 74bcdb33e99f49ef5202dd2f8109945b4570edc2 (patch) | |
tree | 0c1be859e04b0c460abd9cbf1f40571362c82956 /drivers/net/wireless | |
parent | 348ee7cd57831c47373dd157f138c558daaf129d (diff) |
iwlwifi: move agn only tx functions from iwlcore to iwlagn
Identify the tx functions only used by agn driver and move those from
iwlcore to iwlagn.
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-4965.c | 7 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn-lib.c | 136 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn-tx.c | 1022 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn.c | 14 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn.h | 17 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-core.c | 51 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-core.h | 12 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-devtrace.c | 1 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-rx.c | 80 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-sta.c | 1 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 1029 |
11 files changed, 1185 insertions, 1185 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 3949133d9ee2..cee31cdf0a22 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "iwl-calib.h" | 46 | #include "iwl-calib.h" |
47 | #include "iwl-sta.h" | 47 | #include "iwl-sta.h" |
48 | #include "iwl-agn-led.h" | 48 | #include "iwl-agn-led.h" |
49 | #include "iwl-agn.h" | ||
49 | 50 | ||
50 | static int iwl4965_send_tx_power(struct iwl_priv *priv); | 51 | static int iwl4965_send_tx_power(struct iwl_priv *priv); |
51 | static int iwl4965_hw_get_temperature(struct iwl_priv *priv); | 52 | static int iwl4965_hw_get_temperature(struct iwl_priv *priv); |
@@ -2020,7 +2021,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2020 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); | 2021 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); |
2021 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2022 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
2022 | "%d index %d\n", scd_ssn , index); | 2023 | "%d index %d\n", scd_ssn , index); |
2023 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2024 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); |
2024 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2025 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
2025 | 2026 | ||
2026 | if (priv->mac80211_registered && | 2027 | if (priv->mac80211_registered && |
@@ -2046,7 +2047,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2046 | le32_to_cpu(tx_resp->rate_n_flags), | 2047 | le32_to_cpu(tx_resp->rate_n_flags), |
2047 | tx_resp->failure_frame); | 2048 | tx_resp->failure_frame); |
2048 | 2049 | ||
2049 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2050 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); |
2050 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | 2051 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2051 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 2052 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; |
2052 | 2053 | ||
@@ -2056,7 +2057,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2056 | } | 2057 | } |
2057 | 2058 | ||
2058 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | 2059 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2059 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | 2060 | iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); |
2060 | 2061 | ||
2061 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | 2062 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) |
2062 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | 2063 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index c826b7f54256..6f9d52d04464 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |||
@@ -208,7 +208,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, | |||
208 | "scd_ssn=%d idx=%d txq=%d swq=%d\n", | 208 | "scd_ssn=%d idx=%d txq=%d swq=%d\n", |
209 | scd_ssn , index, txq_id, txq->swq_id); | 209 | scd_ssn , index, txq_id, txq->swq_id); |
210 | 210 | ||
211 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 211 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); |
212 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 212 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
213 | 213 | ||
214 | if (priv->mac80211_registered && | 214 | if (priv->mac80211_registered && |
@@ -236,7 +236,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, | |||
236 | le32_to_cpu(tx_resp->rate_n_flags), | 236 | le32_to_cpu(tx_resp->rate_n_flags), |
237 | tx_resp->failure_frame); | 237 | tx_resp->failure_frame); |
238 | 238 | ||
239 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 239 | freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); |
240 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 240 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
241 | 241 | ||
242 | if (priv->mac80211_registered && | 242 | if (priv->mac80211_registered && |
@@ -244,7 +244,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv, | |||
244 | iwl_wake_queue(priv, txq_id); | 244 | iwl_wake_queue(priv, txq_id); |
245 | } | 245 | } |
246 | 246 | ||
247 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | 247 | iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); |
248 | 248 | ||
249 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | 249 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) |
250 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | 250 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); |
@@ -381,3 +381,133 @@ struct iwl_mod_params iwlagn_mod_params = { | |||
381 | .restart_fw = 1, | 381 | .restart_fw = 1, |
382 | /* the rest are 0 by default */ | 382 | /* the rest are 0 by default */ |
383 | }; | 383 | }; |
384 | |||
385 | void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
386 | { | ||
387 | unsigned long flags; | ||
388 | int i; | ||
389 | spin_lock_irqsave(&rxq->lock, flags); | ||
390 | INIT_LIST_HEAD(&rxq->rx_free); | ||
391 | INIT_LIST_HEAD(&rxq->rx_used); | ||
392 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
393 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
394 | /* In the reset function, these buffers may have been allocated | ||
395 | * to an SKB, so we need to unmap and free potential storage */ | ||
396 | if (rxq->pool[i].page != NULL) { | ||
397 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
398 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
399 | PCI_DMA_FROMDEVICE); | ||
400 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
401 | rxq->pool[i].page = NULL; | ||
402 | } | ||
403 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
404 | } | ||
405 | |||
406 | /* Set us so that we have processed and used all buffers, but have | ||
407 | * not restocked the Rx queue with fresh buffers */ | ||
408 | rxq->read = rxq->write = 0; | ||
409 | rxq->write_actual = 0; | ||
410 | rxq->free_count = 0; | ||
411 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
412 | } | ||
413 | |||
414 | int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
415 | { | ||
416 | u32 rb_size; | ||
417 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
418 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | ||
419 | |||
420 | if (!priv->cfg->use_isr_legacy) | ||
421 | rb_timeout = RX_RB_TIMEOUT; | ||
422 | |||
423 | if (priv->cfg->mod_params->amsdu_size_8K) | ||
424 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
425 | else | ||
426 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
427 | |||
428 | /* Stop Rx DMA */ | ||
429 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
430 | |||
431 | /* Reset driver's Rx queue write index */ | ||
432 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
433 | |||
434 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
435 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
436 | (u32)(rxq->dma_addr >> 8)); | ||
437 | |||
438 | /* Tell device where in DRAM to update its Rx status */ | ||
439 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
440 | rxq->rb_stts_dma >> 4); | ||
441 | |||
442 | /* Enable Rx DMA | ||
443 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
444 | * the credit mechanism in 5000 HW RX FIFO | ||
445 | * Direct rx interrupts to hosts | ||
446 | * Rx buffer size 4 or 8k | ||
447 | * RB timeout 0x10 | ||
448 | * 256 RBDs | ||
449 | */ | ||
450 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
451 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
452 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
453 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
454 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | ||
455 | rb_size| | ||
456 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
457 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
458 | |||
459 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
460 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
461 | |||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | int iwlagn_hw_nic_init(struct iwl_priv *priv) | ||
466 | { | ||
467 | unsigned long flags; | ||
468 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
469 | int ret; | ||
470 | |||
471 | /* nic_init */ | ||
472 | spin_lock_irqsave(&priv->lock, flags); | ||
473 | priv->cfg->ops->lib->apm_ops.init(priv); | ||
474 | |||
475 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | ||
476 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | ||
477 | |||
478 | spin_unlock_irqrestore(&priv->lock, flags); | ||
479 | |||
480 | ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); | ||
481 | |||
482 | priv->cfg->ops->lib->apm_ops.config(priv); | ||
483 | |||
484 | /* Allocate the RX queue, or reset if it is already allocated */ | ||
485 | if (!rxq->bd) { | ||
486 | ret = iwl_rx_queue_alloc(priv); | ||
487 | if (ret) { | ||
488 | IWL_ERR(priv, "Unable to initialize Rx queue\n"); | ||
489 | return -ENOMEM; | ||
490 | } | ||
491 | } else | ||
492 | iwlagn_rx_queue_reset(priv, rxq); | ||
493 | |||
494 | iwl_rx_replenish(priv); | ||
495 | |||
496 | iwlagn_rx_init(priv, rxq); | ||
497 | |||
498 | spin_lock_irqsave(&priv->lock, flags); | ||
499 | |||
500 | rxq->need_update = 1; | ||
501 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
502 | |||
503 | spin_unlock_irqrestore(&priv->lock, flags); | ||
504 | |||
505 | /* Allocate and init all Tx and Command queues */ | ||
506 | ret = iwlagn_txq_ctx_reset(priv); | ||
507 | if (ret) | ||
508 | return ret; | ||
509 | |||
510 | set_bit(STATUS_INIT, &priv->status); | ||
511 | |||
512 | return 0; | ||
513 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 43bdd9d9df4d..4e4b70d10804 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c | |||
@@ -36,8 +36,61 @@ | |||
36 | #include "iwl-core.h" | 36 | #include "iwl-core.h" |
37 | #include "iwl-sta.h" | 37 | #include "iwl-sta.h" |
38 | #include "iwl-io.h" | 38 | #include "iwl-io.h" |
39 | #include "iwl-helpers.h" | ||
39 | #include "iwl-agn-hw.h" | 40 | #include "iwl-agn-hw.h" |
40 | 41 | ||
42 | /* | ||
43 | * mac80211 queues, ACs, hardware queues, FIFOs. | ||
44 | * | ||
45 | * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues | ||
46 | * | ||
47 | * Mac80211 uses the following numbers, which we get as from it | ||
48 | * by way of skb_get_queue_mapping(skb): | ||
49 | * | ||
50 | * VO 0 | ||
51 | * VI 1 | ||
52 | * BE 2 | ||
53 | * BK 3 | ||
54 | * | ||
55 | * | ||
56 | * Regular (not A-MPDU) frames are put into hardware queues corresponding | ||
57 | * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their | ||
58 | * own queue per aggregation session (RA/TID combination), such queues are | ||
59 | * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In | ||
60 | * order to map frames to the right queue, we also need an AC->hw queue | ||
61 | * mapping. This is implemented here. | ||
62 | * | ||
63 | * Due to the way hw queues are set up (by the hw specific modules like | ||
64 | * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity | ||
65 | * mapping. | ||
66 | */ | ||
67 | |||
68 | static const u8 tid_to_ac[] = { | ||
69 | /* this matches the mac80211 numbers */ | ||
70 | 2, 3, 3, 2, 1, 1, 0, 0 | ||
71 | }; | ||
72 | |||
73 | static const u8 ac_to_fifo[] = { | ||
74 | IWL_TX_FIFO_VO, | ||
75 | IWL_TX_FIFO_VI, | ||
76 | IWL_TX_FIFO_BE, | ||
77 | IWL_TX_FIFO_BK, | ||
78 | }; | ||
79 | |||
80 | static inline int get_fifo_from_ac(u8 ac) | ||
81 | { | ||
82 | return ac_to_fifo[ac]; | ||
83 | } | ||
84 | |||
85 | static inline int get_fifo_from_tid(u16 tid) | ||
86 | { | ||
87 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
88 | return get_fifo_from_ac(tid_to_ac[tid]); | ||
89 | |||
90 | /* no support for TIDs 8-15 yet */ | ||
91 | return -EINVAL; | ||
92 | } | ||
93 | |||
41 | /** | 94 | /** |
42 | * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | 95 | * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array |
43 | */ | 96 | */ |
@@ -266,3 +319,972 @@ void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask) | |||
266 | { | 319 | { |
267 | iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); | 320 | iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); |
268 | } | 321 | } |
322 | |||
323 | static inline int get_queue_from_ac(u16 ac) | ||
324 | { | ||
325 | return ac; | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * handle build REPLY_TX command notification. | ||
330 | */ | ||
331 | static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, | ||
332 | struct iwl_tx_cmd *tx_cmd, | ||
333 | struct ieee80211_tx_info *info, | ||
334 | struct ieee80211_hdr *hdr, | ||
335 | u8 std_id) | ||
336 | { | ||
337 | __le16 fc = hdr->frame_control; | ||
338 | __le32 tx_flags = tx_cmd->tx_flags; | ||
339 | |||
340 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
341 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
342 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
343 | if (ieee80211_is_mgmt(fc)) | ||
344 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
345 | if (ieee80211_is_probe_resp(fc) && | ||
346 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
347 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
348 | } else { | ||
349 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
350 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
351 | } | ||
352 | |||
353 | if (ieee80211_is_back_req(fc)) | ||
354 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
355 | |||
356 | |||
357 | tx_cmd->sta_id = std_id; | ||
358 | if (ieee80211_has_morefrags(fc)) | ||
359 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
360 | |||
361 | if (ieee80211_is_data_qos(fc)) { | ||
362 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
363 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
364 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
365 | } else { | ||
366 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
367 | } | ||
368 | |||
369 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); | ||
370 | |||
371 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
372 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
373 | |||
374 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
375 | if (ieee80211_is_mgmt(fc)) { | ||
376 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
377 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
378 | else | ||
379 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
380 | } else { | ||
381 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
382 | } | ||
383 | |||
384 | tx_cmd->driver_txop = 0; | ||
385 | tx_cmd->tx_flags = tx_flags; | ||
386 | tx_cmd->next_frame_len = 0; | ||
387 | } | ||
388 | |||
389 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
390 | |||
391 | static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, | ||
392 | struct iwl_tx_cmd *tx_cmd, | ||
393 | struct ieee80211_tx_info *info, | ||
394 | __le16 fc) | ||
395 | { | ||
396 | u32 rate_flags; | ||
397 | int rate_idx; | ||
398 | u8 rts_retry_limit; | ||
399 | u8 data_retry_limit; | ||
400 | u8 rate_plcp; | ||
401 | |||
402 | /* Set retry limit on DATA packets and Probe Responses*/ | ||
403 | if (ieee80211_is_probe_resp(fc)) | ||
404 | data_retry_limit = 3; | ||
405 | else | ||
406 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | ||
407 | tx_cmd->data_retry_limit = data_retry_limit; | ||
408 | |||
409 | /* Set retry limit on RTS packets */ | ||
410 | rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; | ||
411 | if (data_retry_limit < rts_retry_limit) | ||
412 | rts_retry_limit = data_retry_limit; | ||
413 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
414 | |||
415 | /* DATA packets will use the uCode station table for rate/antenna | ||
416 | * selection */ | ||
417 | if (ieee80211_is_data(fc)) { | ||
418 | tx_cmd->initial_rate_index = 0; | ||
419 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
420 | return; | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | ||
425 | * not really a TX rate. Thus, we use the lowest supported rate for | ||
426 | * this band. Also use the lowest supported rate if the stored rate | ||
427 | * index is invalid. | ||
428 | */ | ||
429 | rate_idx = info->control.rates[0].idx; | ||
430 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | ||
431 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | ||
432 | rate_idx = rate_lowest_index(&priv->bands[info->band], | ||
433 | info->control.sta); | ||
434 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | ||
435 | if (info->band == IEEE80211_BAND_5GHZ) | ||
436 | rate_idx += IWL_FIRST_OFDM_RATE; | ||
437 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | ||
438 | rate_plcp = iwl_rates[rate_idx].plcp; | ||
439 | /* Zero out flags for this packet */ | ||
440 | rate_flags = 0; | ||
441 | |||
442 | /* Set CCK flag as needed */ | ||
443 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | ||
444 | rate_flags |= RATE_MCS_CCK_MSK; | ||
445 | |||
446 | /* Set up RTS and CTS flags for certain packets */ | ||
447 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { | ||
448 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | ||
449 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | ||
450 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | ||
451 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | ||
452 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | ||
453 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
454 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
455 | } | ||
456 | break; | ||
457 | default: | ||
458 | break; | ||
459 | } | ||
460 | |||
461 | /* Set up antennas */ | ||
462 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); | ||
463 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | ||
464 | |||
465 | /* Set the rate in the TX cmd */ | ||
466 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
467 | } | ||
468 | |||
469 | static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | ||
470 | struct ieee80211_tx_info *info, | ||
471 | struct iwl_tx_cmd *tx_cmd, | ||
472 | struct sk_buff *skb_frag, | ||
473 | int sta_id) | ||
474 | { | ||
475 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
476 | |||
477 | switch (keyconf->alg) { | ||
478 | case ALG_CCMP: | ||
479 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
480 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
481 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
482 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
483 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | ||
484 | break; | ||
485 | |||
486 | case ALG_TKIP: | ||
487 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
488 | ieee80211_get_tkip_key(keyconf, skb_frag, | ||
489 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); | ||
490 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); | ||
491 | break; | ||
492 | |||
493 | case ALG_WEP: | ||
494 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
495 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
496 | |||
497 | if (keyconf->keylen == WEP_KEY_LEN_128) | ||
498 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
499 | |||
500 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
501 | |||
502 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | ||
503 | "with key %d\n", keyconf->keyidx); | ||
504 | break; | ||
505 | |||
506 | default: | ||
507 | IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); | ||
508 | break; | ||
509 | } | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * start REPLY_TX command process | ||
514 | */ | ||
515 | int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
516 | { | ||
517 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
518 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
519 | struct ieee80211_sta *sta = info->control.sta; | ||
520 | struct iwl_station_priv *sta_priv = NULL; | ||
521 | struct iwl_tx_queue *txq; | ||
522 | struct iwl_queue *q; | ||
523 | struct iwl_device_cmd *out_cmd; | ||
524 | struct iwl_cmd_meta *out_meta; | ||
525 | struct iwl_tx_cmd *tx_cmd; | ||
526 | int swq_id, txq_id; | ||
527 | dma_addr_t phys_addr; | ||
528 | dma_addr_t txcmd_phys; | ||
529 | dma_addr_t scratch_phys; | ||
530 | u16 len, len_org, firstlen, secondlen; | ||
531 | u16 seq_number = 0; | ||
532 | __le16 fc; | ||
533 | u8 hdr_len; | ||
534 | u8 sta_id; | ||
535 | u8 wait_write_ptr = 0; | ||
536 | u8 tid = 0; | ||
537 | u8 *qc = NULL; | ||
538 | unsigned long flags; | ||
539 | |||
540 | spin_lock_irqsave(&priv->lock, flags); | ||
541 | if (iwl_is_rfkill(priv)) { | ||
542 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | ||
543 | goto drop_unlock; | ||
544 | } | ||
545 | |||
546 | fc = hdr->frame_control; | ||
547 | |||
548 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
549 | if (ieee80211_is_auth(fc)) | ||
550 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | ||
551 | else if (ieee80211_is_assoc_req(fc)) | ||
552 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | ||
553 | else if (ieee80211_is_reassoc_req(fc)) | ||
554 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | ||
555 | #endif | ||
556 | |||
557 | hdr_len = ieee80211_hdrlen(fc); | ||
558 | |||
559 | /* Find (or create) index into station table for destination station */ | ||
560 | if (info->flags & IEEE80211_TX_CTL_INJECTED) | ||
561 | sta_id = priv->hw_params.bcast_sta_id; | ||
562 | else | ||
563 | sta_id = iwl_get_sta_id(priv, hdr); | ||
564 | if (sta_id == IWL_INVALID_STATION) { | ||
565 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | ||
566 | hdr->addr1); | ||
567 | goto drop_unlock; | ||
568 | } | ||
569 | |||
570 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | ||
571 | |||
572 | if (sta) | ||
573 | sta_priv = (void *)sta->drv_priv; | ||
574 | |||
575 | if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && | ||
576 | sta_priv->asleep) { | ||
577 | WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); | ||
578 | /* | ||
579 | * This sends an asynchronous command to the device, | ||
580 | * but we can rely on it being processed before the | ||
581 | * next frame is processed -- and the next frame to | ||
582 | * this station is the one that will consume this | ||
583 | * counter. | ||
584 | * For now set the counter to just 1 since we do not | ||
585 | * support uAPSD yet. | ||
586 | */ | ||
587 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | ||
588 | } | ||
589 | |||
590 | txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); | ||
591 | if (ieee80211_is_data_qos(fc)) { | ||
592 | qc = ieee80211_get_qos_ctl(hdr); | ||
593 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
594 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
595 | goto drop_unlock; | ||
596 | seq_number = priv->stations[sta_id].tid[tid].seq_number; | ||
597 | seq_number &= IEEE80211_SCTL_SEQ; | ||
598 | hdr->seq_ctrl = hdr->seq_ctrl & | ||
599 | cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
600 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | ||
601 | seq_number += 0x10; | ||
602 | /* aggregation is on for this <sta,tid> */ | ||
603 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | ||
604 | priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { | ||
605 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | ||
606 | } | ||
607 | } | ||
608 | |||
609 | txq = &priv->txq[txq_id]; | ||
610 | swq_id = txq->swq_id; | ||
611 | q = &txq->q; | ||
612 | |||
613 | if (unlikely(iwl_queue_space(q) < q->high_mark)) | ||
614 | goto drop_unlock; | ||
615 | |||
616 | if (ieee80211_is_data_qos(fc)) | ||
617 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
618 | |||
619 | /* Set up driver data for this TFD */ | ||
620 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
621 | txq->txb[q->write_ptr].skb[0] = skb; | ||
622 | |||
623 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
624 | out_cmd = txq->cmd[q->write_ptr]; | ||
625 | out_meta = &txq->meta[q->write_ptr]; | ||
626 | tx_cmd = &out_cmd->cmd.tx; | ||
627 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
628 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | ||
629 | |||
630 | /* | ||
631 | * Set up the Tx-command (not MAC!) header. | ||
632 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
633 | * after Tx, uCode's Tx response will return this value so driver can | ||
634 | * locate the frame within the tx queue and do post-tx processing. | ||
635 | */ | ||
636 | out_cmd->hdr.cmd = REPLY_TX; | ||
637 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
638 | INDEX_TO_SEQ(q->write_ptr))); | ||
639 | |||
640 | /* Copy MAC header from skb into command buffer */ | ||
641 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
642 | |||
643 | |||
644 | /* Total # bytes to be transmitted */ | ||
645 | len = (u16)skb->len; | ||
646 | tx_cmd->len = cpu_to_le16(len); | ||
647 | |||
648 | if (info->control.hw_key) | ||
649 | iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | ||
650 | |||
651 | /* TODO need this for burst mode later on */ | ||
652 | iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); | ||
653 | iwl_dbg_log_tx_data_frame(priv, len, hdr); | ||
654 | |||
655 | iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); | ||
656 | |||
657 | iwl_update_stats(priv, true, fc, len); | ||
658 | /* | ||
659 | * Use the first empty entry in this queue's command buffer array | ||
660 | * to contain the Tx command and MAC header concatenated together | ||
661 | * (payload data will be in another buffer). | ||
662 | * Size of this varies, due to varying MAC header length. | ||
663 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
664 | * of the MAC header (device reads on dword boundaries). | ||
665 | * We'll tell device about this padding later. | ||
666 | */ | ||
667 | len = sizeof(struct iwl_tx_cmd) + | ||
668 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
669 | |||
670 | len_org = len; | ||
671 | firstlen = len = (len + 3) & ~3; | ||
672 | |||
673 | if (len_org != len) | ||
674 | len_org = 1; | ||
675 | else | ||
676 | len_org = 0; | ||
677 | |||
678 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
679 | if (len_org) | ||
680 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
681 | |||
682 | /* Physical address of this Tx command's header (not MAC header!), | ||
683 | * within command buffer array. */ | ||
684 | txcmd_phys = pci_map_single(priv->pci_dev, | ||
685 | &out_cmd->hdr, len, | ||
686 | PCI_DMA_BIDIRECTIONAL); | ||
687 | pci_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
688 | pci_unmap_len_set(out_meta, len, len); | ||
689 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
690 | * first entry */ | ||
691 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
692 | txcmd_phys, len, 1, 0); | ||
693 | |||
694 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
695 | txq->need_update = 1; | ||
696 | if (qc) | ||
697 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
698 | } else { | ||
699 | wait_write_ptr = 1; | ||
700 | txq->need_update = 0; | ||
701 | } | ||
702 | |||
703 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
704 | * if any (802.11 null frames have no payload). */ | ||
705 | secondlen = len = skb->len - hdr_len; | ||
706 | if (len) { | ||
707 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
708 | len, PCI_DMA_TODEVICE); | ||
709 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
710 | phys_addr, len, | ||
711 | 0, 0); | ||
712 | } | ||
713 | |||
714 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
715 | offsetof(struct iwl_tx_cmd, scratch); | ||
716 | |||
717 | len = sizeof(struct iwl_tx_cmd) + | ||
718 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
719 | /* take back ownership of DMA buffer to enable update */ | ||
720 | pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, | ||
721 | len, PCI_DMA_BIDIRECTIONAL); | ||
722 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
723 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
724 | |||
725 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", | ||
726 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
727 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); | ||
728 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
729 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
730 | |||
731 | /* Set up entry for this TFD in Tx byte-count array */ | ||
732 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
733 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | ||
734 | le16_to_cpu(tx_cmd->len)); | ||
735 | |||
736 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | ||
737 | len, PCI_DMA_BIDIRECTIONAL); | ||
738 | |||
739 | trace_iwlwifi_dev_tx(priv, | ||
740 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
741 | sizeof(struct iwl_tfd), | ||
742 | &out_cmd->hdr, firstlen, | ||
743 | skb->data + hdr_len, secondlen); | ||
744 | |||
745 | /* Tell device the write index *just past* this latest filled TFD */ | ||
746 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
747 | iwl_txq_update_write_ptr(priv, txq); | ||
748 | spin_unlock_irqrestore(&priv->lock, flags); | ||
749 | |||
750 | /* | ||
751 | * At this point the frame is "transmitted" successfully | ||
752 | * and we will get a TX status notification eventually, | ||
753 | * regardless of the value of ret. "ret" only indicates | ||
754 | * whether or not we should update the write pointer. | ||
755 | */ | ||
756 | |||
757 | /* avoid atomic ops if it isn't an associated client */ | ||
758 | if (sta_priv && sta_priv->client) | ||
759 | atomic_inc(&sta_priv->pending_frames); | ||
760 | |||
761 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | ||
762 | if (wait_write_ptr) { | ||
763 | spin_lock_irqsave(&priv->lock, flags); | ||
764 | txq->need_update = 1; | ||
765 | iwl_txq_update_write_ptr(priv, txq); | ||
766 | spin_unlock_irqrestore(&priv->lock, flags); | ||
767 | } else { | ||
768 | iwl_stop_queue(priv, txq->swq_id); | ||
769 | } | ||
770 | } | ||
771 | |||
772 | return 0; | ||
773 | |||
774 | drop_unlock: | ||
775 | spin_unlock_irqrestore(&priv->lock, flags); | ||
776 | return -1; | ||
777 | } | ||
778 | |||
779 | static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, | ||
780 | struct iwl_dma_ptr *ptr, size_t size) | ||
781 | { | ||
782 | ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, | ||
783 | GFP_KERNEL); | ||
784 | if (!ptr->addr) | ||
785 | return -ENOMEM; | ||
786 | ptr->size = size; | ||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, | ||
791 | struct iwl_dma_ptr *ptr) | ||
792 | { | ||
793 | if (unlikely(!ptr->addr)) | ||
794 | return; | ||
795 | |||
796 | dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | ||
797 | memset(ptr, 0, sizeof(*ptr)); | ||
798 | } | ||
799 | |||
800 | /** | ||
801 | * iwlagn_hw_txq_ctx_free - Free TXQ Context | ||
802 | * | ||
803 | * Destroy all TX DMA queues and structures | ||
804 | */ | ||
805 | void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv) | ||
806 | { | ||
807 | int txq_id; | ||
808 | |||
809 | /* Tx queues */ | ||
810 | if (priv->txq) { | ||
811 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; | ||
812 | txq_id++) | ||
813 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
814 | iwl_cmd_queue_free(priv); | ||
815 | else | ||
816 | iwl_tx_queue_free(priv, txq_id); | ||
817 | } | ||
818 | iwlagn_free_dma_ptr(priv, &priv->kw); | ||
819 | |||
820 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
821 | |||
822 | /* free tx queue structure */ | ||
823 | iwl_free_txq_mem(priv); | ||
824 | } | ||
825 | |||
826 | /** | ||
827 | * iwlagn_txq_ctx_reset - Reset TX queue context | ||
828 | * Destroys all DMA structures and initialize them again | ||
829 | * | ||
830 | * @param priv | ||
831 | * @return error code | ||
832 | */ | ||
833 | int iwlagn_txq_ctx_reset(struct iwl_priv *priv) | ||
834 | { | ||
835 | int ret = 0; | ||
836 | int txq_id, slots_num; | ||
837 | unsigned long flags; | ||
838 | |||
839 | /* Free all tx/cmd queues and keep-warm buffer */ | ||
840 | iwlagn_hw_txq_ctx_free(priv); | ||
841 | |||
842 | ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | ||
843 | priv->hw_params.scd_bc_tbls_size); | ||
844 | if (ret) { | ||
845 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | ||
846 | goto error_bc_tbls; | ||
847 | } | ||
848 | /* Alloc keep-warm buffer */ | ||
849 | ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | ||
850 | if (ret) { | ||
851 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | ||
852 | goto error_kw; | ||
853 | } | ||
854 | |||
855 | /* allocate tx queue structure */ | ||
856 | ret = iwl_alloc_txq_mem(priv); | ||
857 | if (ret) | ||
858 | goto error; | ||
859 | |||
860 | spin_lock_irqsave(&priv->lock, flags); | ||
861 | |||
862 | /* Turn off all Tx DMA fifos */ | ||
863 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
864 | |||
865 | /* Tell NIC where to find the "keep warm" buffer */ | ||
866 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
867 | |||
868 | spin_unlock_irqrestore(&priv->lock, flags); | ||
869 | |||
870 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
871 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
872 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | ||
873 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
874 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | ||
875 | txq_id); | ||
876 | if (ret) { | ||
877 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | ||
878 | goto error; | ||
879 | } | ||
880 | } | ||
881 | |||
882 | return ret; | ||
883 | |||
884 | error: | ||
885 | iwlagn_hw_txq_ctx_free(priv); | ||
886 | iwlagn_free_dma_ptr(priv, &priv->kw); | ||
887 | error_kw: | ||
888 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
889 | error_bc_tbls: | ||
890 | return ret; | ||
891 | } | ||
892 | |||
893 | /** | ||
894 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | ||
895 | */ | ||
896 | void iwlagn_txq_ctx_stop(struct iwl_priv *priv) | ||
897 | { | ||
898 | int ch; | ||
899 | unsigned long flags; | ||
900 | |||
901 | /* Turn off all Tx DMA fifos */ | ||
902 | spin_lock_irqsave(&priv->lock, flags); | ||
903 | |||
904 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
905 | |||
906 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
907 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { | ||
908 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
909 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | ||
910 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | ||
911 | 1000); | ||
912 | } | ||
913 | spin_unlock_irqrestore(&priv->lock, flags); | ||
914 | |||
915 | /* Deallocate memory for all Tx queues */ | ||
916 | iwlagn_hw_txq_ctx_free(priv); | ||
917 | } | ||
918 | |||
919 | /* | ||
920 | * Find first available (lowest unused) Tx Queue, mark it "active". | ||
921 | * Called only when finding queue for aggregation. | ||
922 | * Should never return anything < 7, because they should already | ||
923 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | ||
924 | */ | ||
925 | static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv) | ||
926 | { | ||
927 | int txq_id; | ||
928 | |||
929 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
930 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | ||
931 | return txq_id; | ||
932 | return -1; | ||
933 | } | ||
934 | |||
935 | int iwlagn_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | ||
936 | { | ||
937 | int sta_id; | ||
938 | int tx_fifo; | ||
939 | int txq_id; | ||
940 | int ret; | ||
941 | unsigned long flags; | ||
942 | struct iwl_tid_data *tid_data; | ||
943 | |||
944 | tx_fifo = get_fifo_from_tid(tid); | ||
945 | if (unlikely(tx_fifo < 0)) | ||
946 | return tx_fifo; | ||
947 | |||
948 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", | ||
949 | __func__, ra, tid); | ||
950 | |||
951 | sta_id = iwl_find_station(priv, ra); | ||
952 | if (sta_id == IWL_INVALID_STATION) { | ||
953 | IWL_ERR(priv, "Start AGG on invalid station\n"); | ||
954 | return -ENXIO; | ||
955 | } | ||
956 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
957 | return -EINVAL; | ||
958 | |||
959 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | ||
960 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | ||
961 | return -ENXIO; | ||
962 | } | ||
963 | |||
964 | txq_id = iwlagn_txq_ctx_activate_free(priv); | ||
965 | if (txq_id == -1) { | ||
966 | IWL_ERR(priv, "No free aggregation queue available\n"); | ||
967 | return -ENXIO; | ||
968 | } | ||
969 | |||
970 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
971 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
972 | *ssn = SEQ_TO_SN(tid_data->seq_number); | ||
973 | tid_data->agg.txq_id = txq_id; | ||
974 | priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); | ||
975 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
976 | |||
977 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | ||
978 | sta_id, tid, *ssn); | ||
979 | if (ret) | ||
980 | return ret; | ||
981 | |||
982 | if (tid_data->tfds_in_queue == 0) { | ||
983 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
984 | tid_data->agg.state = IWL_AGG_ON; | ||
985 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
986 | } else { | ||
987 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", | ||
988 | tid_data->tfds_in_queue); | ||
989 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | ||
990 | } | ||
991 | return ret; | ||
992 | } | ||
993 | |||
994 | int iwlagn_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | ||
995 | { | ||
996 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | ||
997 | struct iwl_tid_data *tid_data; | ||
998 | int write_ptr, read_ptr; | ||
999 | unsigned long flags; | ||
1000 | |||
1001 | if (!ra) { | ||
1002 | IWL_ERR(priv, "ra = NULL\n"); | ||
1003 | return -EINVAL; | ||
1004 | } | ||
1005 | |||
1006 | tx_fifo_id = get_fifo_from_tid(tid); | ||
1007 | if (unlikely(tx_fifo_id < 0)) | ||
1008 | return tx_fifo_id; | ||
1009 | |||
1010 | sta_id = iwl_find_station(priv, ra); | ||
1011 | |||
1012 | if (sta_id == IWL_INVALID_STATION) { | ||
1013 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | ||
1014 | return -ENXIO; | ||
1015 | } | ||
1016 | |||
1017 | if (priv->stations[sta_id].tid[tid].agg.state == | ||
1018 | IWL_EMPTYING_HW_QUEUE_ADDBA) { | ||
1019 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); | ||
1020 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1021 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | ||
1026 | IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); | ||
1027 | |||
1028 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1029 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | ||
1030 | txq_id = tid_data->agg.txq_id; | ||
1031 | write_ptr = priv->txq[txq_id].q.write_ptr; | ||
1032 | read_ptr = priv->txq[txq_id].q.read_ptr; | ||
1033 | |||
1034 | /* The queue is not empty */ | ||
1035 | if (write_ptr != read_ptr) { | ||
1036 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); | ||
1037 | priv->stations[sta_id].tid[tid].agg.state = | ||
1038 | IWL_EMPTYING_HW_QUEUE_DELBA; | ||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1043 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1044 | |||
1045 | spin_lock_irqsave(&priv->lock, flags); | ||
1046 | /* | ||
1047 | * the only reason this call can fail is queue number out of range, | ||
1048 | * which can happen if uCode is reloaded and all the station | ||
1049 | * information are lost. if it is outside the range, there is no need | ||
1050 | * to deactivate the uCode queue, just return "success" to allow | ||
1051 | * mac80211 to clean up it own data. | ||
1052 | */ | ||
1053 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | ||
1054 | tx_fifo_id); | ||
1055 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1056 | |||
1057 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1058 | |||
1059 | return 0; | ||
1060 | } | ||
1061 | |||
1062 | int iwlagn_txq_check_empty(struct iwl_priv *priv, | ||
1063 | int sta_id, u8 tid, int txq_id) | ||
1064 | { | ||
1065 | struct iwl_queue *q = &priv->txq[txq_id].q; | ||
1066 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | ||
1067 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | ||
1068 | |||
1069 | switch (priv->stations[sta_id].tid[tid].agg.state) { | ||
1070 | case IWL_EMPTYING_HW_QUEUE_DELBA: | ||
1071 | /* We are reclaiming the last packet of the */ | ||
1072 | /* aggregated HW queue */ | ||
1073 | if ((txq_id == tid_data->agg.txq_id) && | ||
1074 | (q->read_ptr == q->write_ptr)) { | ||
1075 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1076 | int tx_fifo = get_fifo_from_tid(tid); | ||
1077 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); | ||
1078 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | ||
1079 | ssn, tx_fifo); | ||
1080 | tid_data->agg.state = IWL_AGG_OFF; | ||
1081 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1082 | } | ||
1083 | break; | ||
1084 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | ||
1085 | /* We are reclaiming the last packet of the queue */ | ||
1086 | if (tid_data->tfds_in_queue == 0) { | ||
1087 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); | ||
1088 | tid_data->agg.state = IWL_AGG_ON; | ||
1089 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1090 | } | ||
1091 | break; | ||
1092 | } | ||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) | ||
1097 | { | ||
1098 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
1099 | struct ieee80211_sta *sta; | ||
1100 | struct iwl_station_priv *sta_priv; | ||
1101 | |||
1102 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); | ||
1103 | if (sta) { | ||
1104 | sta_priv = (void *)sta->drv_priv; | ||
1105 | /* avoid atomic ops if this isn't a client */ | ||
1106 | if (sta_priv->client && | ||
1107 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1108 | ieee80211_sta_block_awake(priv->hw, sta, false); | ||
1109 | } | ||
1110 | |||
1111 | ieee80211_tx_status_irqsafe(priv->hw, skb); | ||
1112 | } | ||
1113 | |||
1114 | int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1115 | { | ||
1116 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1117 | struct iwl_queue *q = &txq->q; | ||
1118 | struct iwl_tx_info *tx_info; | ||
1119 | int nfreed = 0; | ||
1120 | struct ieee80211_hdr *hdr; | ||
1121 | |||
1122 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1123 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | ||
1124 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1125 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1126 | return 0; | ||
1127 | } | ||
1128 | |||
1129 | for (index = iwl_queue_inc_wrap(index, q->n_bd); | ||
1130 | q->read_ptr != index; | ||
1131 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1132 | |||
1133 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1134 | iwlagn_tx_status(priv, tx_info->skb[0]); | ||
1135 | |||
1136 | hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; | ||
1137 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) | ||
1138 | nfreed++; | ||
1139 | tx_info->skb[0] = NULL; | ||
1140 | |||
1141 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | ||
1142 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | ||
1143 | |||
1144 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | ||
1145 | } | ||
1146 | return nfreed; | ||
1147 | } | ||
1148 | |||
1149 | /** | ||
1150 | * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack | ||
1151 | * | ||
1152 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | ||
1153 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | ||
1154 | */ | ||
1155 | static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv, | ||
1156 | struct iwl_ht_agg *agg, | ||
1157 | struct iwl_compressed_ba_resp *ba_resp) | ||
1158 | |||
1159 | { | ||
1160 | int i, sh, ack; | ||
1161 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | ||
1162 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1163 | u64 bitmap; | ||
1164 | int successes = 0; | ||
1165 | struct ieee80211_tx_info *info; | ||
1166 | |||
1167 | if (unlikely(!agg->wait_for_ba)) { | ||
1168 | IWL_ERR(priv, "Received BA when not expected\n"); | ||
1169 | return -EINVAL; | ||
1170 | } | ||
1171 | |||
1172 | /* Mark that the expected block-ack response arrived */ | ||
1173 | agg->wait_for_ba = 0; | ||
1174 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); | ||
1175 | |||
1176 | /* Calculate shift to align block-ack bits with our Tx window bits */ | ||
1177 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); | ||
1178 | if (sh < 0) /* tbw something is wrong with indices */ | ||
1179 | sh += 0x100; | ||
1180 | |||
1181 | /* don't use 64-bit values for now */ | ||
1182 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | ||
1183 | |||
1184 | if (agg->frame_count > (64 - sh)) { | ||
1185 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); | ||
1186 | return -1; | ||
1187 | } | ||
1188 | |||
1189 | /* check for success or failure according to the | ||
1190 | * transmitted bitmap and block-ack bitmap */ | ||
1191 | bitmap &= agg->bitmap; | ||
1192 | |||
1193 | /* For each frame attempted in aggregation, | ||
1194 | * update driver's record of tx frame's status. */ | ||
1195 | for (i = 0; i < agg->frame_count ; i++) { | ||
1196 | ack = bitmap & (1ULL << i); | ||
1197 | successes += !!ack; | ||
1198 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", | ||
1199 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, | ||
1200 | agg->start_idx + i); | ||
1201 | } | ||
1202 | |||
1203 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | ||
1204 | memset(&info->status, 0, sizeof(info->status)); | ||
1205 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1206 | info->flags |= IEEE80211_TX_STAT_AMPDU; | ||
1207 | info->status.ampdu_ack_map = successes; | ||
1208 | info->status.ampdu_ack_len = agg->frame_count; | ||
1209 | iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | ||
1210 | |||
1211 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); | ||
1212 | |||
1213 | return 0; | ||
1214 | } | ||
1215 | |||
1216 | /** | ||
1217 | * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | ||
1218 | * | ||
1219 | * Handles block-acknowledge notification from device, which reports success | ||
1220 | * of frames sent via aggregation. | ||
1221 | */ | ||
1222 | void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
1223 | struct iwl_rx_mem_buffer *rxb) | ||
1224 | { | ||
1225 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1226 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | ||
1227 | struct iwl_tx_queue *txq = NULL; | ||
1228 | struct iwl_ht_agg *agg; | ||
1229 | int index; | ||
1230 | int sta_id; | ||
1231 | int tid; | ||
1232 | |||
1233 | /* "flow" corresponds to Tx queue */ | ||
1234 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1235 | |||
1236 | /* "ssn" is start of block-ack Tx window, corresponds to index | ||
1237 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | ||
1238 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | ||
1239 | |||
1240 | if (scd_flow >= priv->hw_params.max_txq_num) { | ||
1241 | IWL_ERR(priv, | ||
1242 | "BUG_ON scd_flow is bigger than number of queues\n"); | ||
1243 | return; | ||
1244 | } | ||
1245 | |||
1246 | txq = &priv->txq[scd_flow]; | ||
1247 | sta_id = ba_resp->sta_id; | ||
1248 | tid = ba_resp->tid; | ||
1249 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
1250 | |||
1251 | /* Find index just before block-ack window */ | ||
1252 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | ||
1253 | |||
1254 | /* TODO: Need to get this copy more safely - now good for debug */ | ||
1255 | |||
1256 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " | ||
1257 | "sta_id = %d\n", | ||
1258 | agg->wait_for_ba, | ||
1259 | (u8 *) &ba_resp->sta_addr_lo32, | ||
1260 | ba_resp->sta_id); | ||
1261 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " | ||
1262 | "%d, scd_ssn = %d\n", | ||
1263 | ba_resp->tid, | ||
1264 | ba_resp->seq_ctl, | ||
1265 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | ||
1266 | ba_resp->scd_flow, | ||
1267 | ba_resp->scd_ssn); | ||
1268 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n", | ||
1269 | agg->start_idx, | ||
1270 | (unsigned long long)agg->bitmap); | ||
1271 | |||
1272 | /* Update driver's record of ACK vs. not for each frame in window */ | ||
1273 | iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp); | ||
1274 | |||
1275 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | ||
1276 | * block-ack window (we assume that they've been successfully | ||
1277 | * transmitted ... if not, it's too late anyway). */ | ||
1278 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | ||
1279 | /* calculate mac80211 ampdu sw queue to wake */ | ||
1280 | int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index); | ||
1281 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
1282 | |||
1283 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | ||
1284 | priv->mac80211_registered && | ||
1285 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | ||
1286 | iwl_wake_queue(priv, txq->swq_id); | ||
1287 | |||
1288 | iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); | ||
1289 | } | ||
1290 | } | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 680e897b92db..a57933b8f6fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -887,7 +887,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv) | |||
887 | priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; | 887 | priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; |
888 | priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; | 888 | priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; |
889 | /* block ack */ | 889 | /* block ack */ |
890 | priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba; | 890 | priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; |
891 | /* Set up hardware specific Rx handlers */ | 891 | /* Set up hardware specific Rx handlers */ |
892 | priv->cfg->ops->lib->rx_handler_setup(priv); | 892 | priv->cfg->ops->lib->rx_handler_setup(priv); |
893 | } | 893 | } |
@@ -2249,7 +2249,7 @@ static void __iwl_down(struct iwl_priv *priv) | |||
2249 | /* device going down, Stop using ICT table */ | 2249 | /* device going down, Stop using ICT table */ |
2250 | iwl_disable_ict(priv); | 2250 | iwl_disable_ict(priv); |
2251 | 2251 | ||
2252 | iwl_txq_ctx_stop(priv); | 2252 | iwlagn_txq_ctx_stop(priv); |
2253 | iwl_rxq_stop(priv); | 2253 | iwl_rxq_stop(priv); |
2254 | 2254 | ||
2255 | /* Power-down device's busmaster DMA clocks */ | 2255 | /* Power-down device's busmaster DMA clocks */ |
@@ -2371,7 +2371,7 @@ static int __iwl_up(struct iwl_priv *priv) | |||
2371 | 2371 | ||
2372 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | 2372 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); |
2373 | 2373 | ||
2374 | ret = iwl_hw_nic_init(priv); | 2374 | ret = iwlagn_hw_nic_init(priv); |
2375 | if (ret) { | 2375 | if (ret) { |
2376 | IWL_ERR(priv, "Unable to init nic\n"); | 2376 | IWL_ERR(priv, "Unable to init nic\n"); |
2377 | return ret; | 2377 | return ret; |
@@ -2782,7 +2782,7 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2782 | IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, | 2782 | IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, |
2783 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); | 2783 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); |
2784 | 2784 | ||
2785 | if (iwl_tx_skb(priv, skb)) | 2785 | if (iwlagn_tx_skb(priv, skb)) |
2786 | dev_kfree_skb_any(skb); | 2786 | dev_kfree_skb_any(skb); |
2787 | 2787 | ||
2788 | IWL_DEBUG_MACDUMP(priv, "leave\n"); | 2788 | IWL_DEBUG_MACDUMP(priv, "leave\n"); |
@@ -2970,7 +2970,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, | |||
2970 | return ret; | 2970 | return ret; |
2971 | case IEEE80211_AMPDU_TX_START: | 2971 | case IEEE80211_AMPDU_TX_START: |
2972 | IWL_DEBUG_HT(priv, "start Tx\n"); | 2972 | IWL_DEBUG_HT(priv, "start Tx\n"); |
2973 | ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn); | 2973 | ret = iwlagn_tx_agg_start(priv, sta->addr, tid, ssn); |
2974 | if (ret == 0) { | 2974 | if (ret == 0) { |
2975 | priv->_agn.agg_tids_count++; | 2975 | priv->_agn.agg_tids_count++; |
2976 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", | 2976 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", |
@@ -2979,7 +2979,7 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, | |||
2979 | return ret; | 2979 | return ret; |
2980 | case IEEE80211_AMPDU_TX_STOP: | 2980 | case IEEE80211_AMPDU_TX_STOP: |
2981 | IWL_DEBUG_HT(priv, "stop Tx\n"); | 2981 | IWL_DEBUG_HT(priv, "stop Tx\n"); |
2982 | ret = iwl_tx_agg_stop(priv, sta->addr, tid); | 2982 | ret = iwlagn_tx_agg_stop(priv, sta->addr, tid); |
2983 | if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) { | 2983 | if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) { |
2984 | priv->_agn.agg_tids_count--; | 2984 | priv->_agn.agg_tids_count--; |
2985 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", | 2985 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", |
@@ -3738,7 +3738,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) | |||
3738 | 3738 | ||
3739 | if (priv->rxq.bd) | 3739 | if (priv->rxq.bd) |
3740 | iwl_rx_queue_free(priv, &priv->rxq); | 3740 | iwl_rx_queue_free(priv, &priv->rxq); |
3741 | iwl_hw_txq_ctx_free(priv); | 3741 | iwlagn_hw_txq_ctx_free(priv); |
3742 | 3742 | ||
3743 | iwl_eeprom_free(priv); | 3743 | iwl_eeprom_free(priv); |
3744 | 3744 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index ad9b47e1a817..c75a767872f7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h | |||
@@ -113,5 +113,22 @@ void iwlagn_temperature(struct iwl_priv *priv); | |||
113 | u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); | 113 | u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); |
114 | const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, | 114 | const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, |
115 | size_t offset); | 115 | size_t offset); |
116 | void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
117 | int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
118 | int iwlagn_hw_nic_init(struct iwl_priv *priv); | ||
119 | |||
120 | /* tx */ | ||
121 | int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); | ||
122 | int iwlagn_tx_agg_start(struct iwl_priv *priv, | ||
123 | const u8 *ra, u16 tid, u16 *ssn); | ||
124 | int iwlagn_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); | ||
125 | int iwlagn_txq_check_empty(struct iwl_priv *priv, | ||
126 | int sta_id, u8 tid, int txq_id); | ||
127 | void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
128 | struct iwl_rx_mem_buffer *rxb); | ||
129 | int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); | ||
130 | void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv); | ||
131 | int iwlagn_txq_ctx_reset(struct iwl_priv *priv); | ||
132 | void iwlagn_txq_ctx_stop(struct iwl_priv *priv); | ||
116 | 133 | ||
117 | #endif /* __iwl_agn_h__ */ | 134 | #endif /* __iwl_agn_h__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 5180fb24cd38..0dc41d84dc15 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -265,57 +265,6 @@ void iwl_hw_detect(struct iwl_priv *priv) | |||
265 | } | 265 | } |
266 | EXPORT_SYMBOL(iwl_hw_detect); | 266 | EXPORT_SYMBOL(iwl_hw_detect); |
267 | 267 | ||
268 | int iwl_hw_nic_init(struct iwl_priv *priv) | ||
269 | { | ||
270 | unsigned long flags; | ||
271 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
272 | int ret; | ||
273 | |||
274 | /* nic_init */ | ||
275 | spin_lock_irqsave(&priv->lock, flags); | ||
276 | priv->cfg->ops->lib->apm_ops.init(priv); | ||
277 | |||
278 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | ||
279 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | ||
280 | |||
281 | spin_unlock_irqrestore(&priv->lock, flags); | ||
282 | |||
283 | ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); | ||
284 | |||
285 | priv->cfg->ops->lib->apm_ops.config(priv); | ||
286 | |||
287 | /* Allocate the RX queue, or reset if it is already allocated */ | ||
288 | if (!rxq->bd) { | ||
289 | ret = iwl_rx_queue_alloc(priv); | ||
290 | if (ret) { | ||
291 | IWL_ERR(priv, "Unable to initialize Rx queue\n"); | ||
292 | return -ENOMEM; | ||
293 | } | ||
294 | } else | ||
295 | iwl_rx_queue_reset(priv, rxq); | ||
296 | |||
297 | iwl_rx_replenish(priv); | ||
298 | |||
299 | iwl_rx_init(priv, rxq); | ||
300 | |||
301 | spin_lock_irqsave(&priv->lock, flags); | ||
302 | |||
303 | rxq->need_update = 1; | ||
304 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
305 | |||
306 | spin_unlock_irqrestore(&priv->lock, flags); | ||
307 | |||
308 | /* Allocate and init all Tx and Command queues */ | ||
309 | ret = iwl_txq_ctx_reset(priv); | ||
310 | if (ret) | ||
311 | return ret; | ||
312 | |||
313 | set_bit(STATUS_INIT, &priv->status); | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | EXPORT_SYMBOL(iwl_hw_nic_init); | ||
318 | |||
319 | /* | 268 | /* |
320 | * QoS support | 269 | * QoS support |
321 | */ | 270 | */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index b3e698b576e1..446d27bd4301 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -336,7 +336,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv); | |||
336 | void iwl_configure_filter(struct ieee80211_hw *hw, | 336 | void iwl_configure_filter(struct ieee80211_hw *hw, |
337 | unsigned int changed_flags, | 337 | unsigned int changed_flags, |
338 | unsigned int *total_flags, u64 multicast); | 338 | unsigned int *total_flags, u64 multicast); |
339 | int iwl_hw_nic_init(struct iwl_priv *priv); | ||
340 | int iwl_set_hw_params(struct iwl_priv *priv); | 339 | int iwl_set_hw_params(struct iwl_priv *priv); |
341 | bool iwl_is_monitor_mode(struct iwl_priv *priv); | 340 | bool iwl_is_monitor_mode(struct iwl_priv *priv); |
342 | void iwl_post_associate(struct iwl_priv *priv); | 341 | void iwl_post_associate(struct iwl_priv *priv); |
@@ -426,15 +425,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv); | |||
426 | void iwl_rx_handle(struct iwl_priv *priv); | 425 | void iwl_rx_handle(struct iwl_priv *priv); |
427 | void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, | 426 | void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, |
428 | struct iwl_rx_queue *q); | 427 | struct iwl_rx_queue *q); |
429 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
430 | void iwl_rx_replenish(struct iwl_priv *priv); | 428 | void iwl_rx_replenish(struct iwl_priv *priv); |
431 | void iwl_rx_replenish_now(struct iwl_priv *priv); | 429 | void iwl_rx_replenish_now(struct iwl_priv *priv); |
432 | int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
433 | void iwl_rx_queue_restock(struct iwl_priv *priv); | 430 | void iwl_rx_queue_restock(struct iwl_priv *priv); |
434 | int iwl_rx_queue_space(const struct iwl_rx_queue *q); | 431 | int iwl_rx_queue_space(const struct iwl_rx_queue *q); |
435 | void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority); | 432 | void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority); |
436 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); | 433 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); |
437 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); | ||
438 | /* Handlers */ | 434 | /* Handlers */ |
439 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, | 435 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, |
440 | struct iwl_rx_mem_buffer *rxb); | 436 | struct iwl_rx_mem_buffer *rxb); |
@@ -455,13 +451,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); | |||
455 | /***************************************************** | 451 | /***************************************************** |
456 | * TX | 452 | * TX |
457 | ******************************************************/ | 453 | ******************************************************/ |
458 | int iwl_txq_ctx_reset(struct iwl_priv *priv); | ||
459 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 454 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
460 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, | 455 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, |
461 | struct iwl_tx_queue *txq, | 456 | struct iwl_tx_queue *txq, |
462 | dma_addr_t addr, u16 len, u8 reset, u8 pad); | 457 | dma_addr_t addr, u16 len, u8 reset, u8 pad); |
463 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); | ||
464 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv); | ||
465 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, | 458 | int iwl_hw_tx_queue_init(struct iwl_priv *priv, |
466 | struct iwl_tx_queue *txq); | 459 | struct iwl_tx_queue *txq); |
467 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, | 460 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, |
@@ -470,9 +463,6 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); | |||
470 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 463 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
471 | int slots_num, u32 txq_id); | 464 | int slots_num, u32 txq_id); |
472 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); | 465 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); |
473 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); | ||
474 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); | ||
475 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id); | ||
476 | /***************************************************** | 466 | /***************************************************** |
477 | * TX power | 467 | * TX power |
478 | ****************************************************/ | 468 | ****************************************************/ |
@@ -689,8 +679,6 @@ extern void iwl_rx_reply_rx(struct iwl_priv *priv, | |||
689 | struct iwl_rx_mem_buffer *rxb); | 679 | struct iwl_rx_mem_buffer *rxb); |
690 | extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv, | 680 | extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv, |
691 | struct iwl_rx_mem_buffer *rxb); | 681 | struct iwl_rx_mem_buffer *rxb); |
692 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
693 | struct iwl_rx_mem_buffer *rxb); | ||
694 | void iwl_apm_stop(struct iwl_priv *priv); | 682 | void iwl_apm_stop(struct iwl_priv *priv); |
695 | int iwl_apm_init(struct iwl_priv *priv); | 683 | int iwl_apm_init(struct iwl_priv *priv); |
696 | 684 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index 36580d8d8b8d..f469aa92316a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c | |||
@@ -35,6 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8); | |||
35 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); | 35 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); |
36 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); | 36 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); |
37 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); | 37 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); |
38 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx); | ||
38 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); | 39 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); |
39 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); | 40 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); |
40 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); | 41 | EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index de453f25f1c9..b75c3ccd1c43 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
@@ -395,86 +395,6 @@ err_bd: | |||
395 | } | 395 | } |
396 | EXPORT_SYMBOL(iwl_rx_queue_alloc); | 396 | EXPORT_SYMBOL(iwl_rx_queue_alloc); |
397 | 397 | ||
398 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
399 | { | ||
400 | unsigned long flags; | ||
401 | int i; | ||
402 | spin_lock_irqsave(&rxq->lock, flags); | ||
403 | INIT_LIST_HEAD(&rxq->rx_free); | ||
404 | INIT_LIST_HEAD(&rxq->rx_used); | ||
405 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
406 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
407 | /* In the reset function, these buffers may have been allocated | ||
408 | * to an SKB, so we need to unmap and free potential storage */ | ||
409 | if (rxq->pool[i].page != NULL) { | ||
410 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
411 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
412 | PCI_DMA_FROMDEVICE); | ||
413 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
414 | rxq->pool[i].page = NULL; | ||
415 | } | ||
416 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
417 | } | ||
418 | |||
419 | /* Set us so that we have processed and used all buffers, but have | ||
420 | * not restocked the Rx queue with fresh buffers */ | ||
421 | rxq->read = rxq->write = 0; | ||
422 | rxq->write_actual = 0; | ||
423 | rxq->free_count = 0; | ||
424 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
425 | } | ||
426 | |||
427 | int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
428 | { | ||
429 | u32 rb_size; | ||
430 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
431 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | ||
432 | |||
433 | if (!priv->cfg->use_isr_legacy) | ||
434 | rb_timeout = RX_RB_TIMEOUT; | ||
435 | |||
436 | if (priv->cfg->mod_params->amsdu_size_8K) | ||
437 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
438 | else | ||
439 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
440 | |||
441 | /* Stop Rx DMA */ | ||
442 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
443 | |||
444 | /* Reset driver's Rx queue write index */ | ||
445 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
446 | |||
447 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
448 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
449 | (u32)(rxq->dma_addr >> 8)); | ||
450 | |||
451 | /* Tell device where in DRAM to update its Rx status */ | ||
452 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
453 | rxq->rb_stts_dma >> 4); | ||
454 | |||
455 | /* Enable Rx DMA | ||
456 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
457 | * the credit mechanism in 5000 HW RX FIFO | ||
458 | * Direct rx interrupts to hosts | ||
459 | * Rx buffer size 4 or 8k | ||
460 | * RB timeout 0x10 | ||
461 | * 256 RBDs | ||
462 | */ | ||
463 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
464 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
465 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
466 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
467 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | ||
468 | rb_size| | ||
469 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
470 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
471 | |||
472 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
473 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | int iwl_rxq_stop(struct iwl_priv *priv) | 398 | int iwl_rxq_stop(struct iwl_priv *priv) |
479 | { | 399 | { |
480 | 400 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index d401b6f226f9..9c28ad7a2de1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
@@ -1395,6 +1395,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) | |||
1395 | 1395 | ||
1396 | iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | 1396 | iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); |
1397 | } | 1397 | } |
1398 | EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count); | ||
1398 | 1399 | ||
1399 | int iwl_mac_sta_remove(struct ieee80211_hw *hw, | 1400 | int iwl_mac_sta_remove(struct ieee80211_hw *hw, |
1400 | struct ieee80211_vif *vif, | 1401 | struct ieee80211_vif *vif, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 34c983833a8f..91f9c89b1b6d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -37,84 +37,6 @@ | |||
37 | #include "iwl-io.h" | 37 | #include "iwl-io.h" |
38 | #include "iwl-helpers.h" | 38 | #include "iwl-helpers.h" |
39 | 39 | ||
40 | /* | ||
41 | * mac80211 queues, ACs, hardware queues, FIFOs. | ||
42 | * | ||
43 | * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues | ||
44 | * | ||
45 | * Mac80211 uses the following numbers, which we get as from it | ||
46 | * by way of skb_get_queue_mapping(skb): | ||
47 | * | ||
48 | * VO 0 | ||
49 | * VI 1 | ||
50 | * BE 2 | ||
51 | * BK 3 | ||
52 | * | ||
53 | * | ||
54 | * Regular (not A-MPDU) frames are put into hardware queues corresponding | ||
55 | * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their | ||
56 | * own queue per aggregation session (RA/TID combination), such queues are | ||
57 | * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In | ||
58 | * order to map frames to the right queue, we also need an AC->hw queue | ||
59 | * mapping. This is implemented here. | ||
60 | * | ||
61 | * Due to the way hw queues are set up (by the hw specific modules like | ||
62 | * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity | ||
63 | * mapping. | ||
64 | */ | ||
65 | |||
66 | static const u8 tid_to_ac[] = { | ||
67 | /* this matches the mac80211 numbers */ | ||
68 | 2, 3, 3, 2, 1, 1, 0, 0 | ||
69 | }; | ||
70 | |||
71 | static const u8 ac_to_fifo[] = { | ||
72 | IWL_TX_FIFO_VO, | ||
73 | IWL_TX_FIFO_VI, | ||
74 | IWL_TX_FIFO_BE, | ||
75 | IWL_TX_FIFO_BK, | ||
76 | }; | ||
77 | |||
78 | static inline int get_fifo_from_ac(u8 ac) | ||
79 | { | ||
80 | return ac_to_fifo[ac]; | ||
81 | } | ||
82 | |||
83 | static inline int get_queue_from_ac(u16 ac) | ||
84 | { | ||
85 | return ac; | ||
86 | } | ||
87 | |||
88 | static inline int get_fifo_from_tid(u16 tid) | ||
89 | { | ||
90 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
91 | return get_fifo_from_ac(tid_to_ac[tid]); | ||
92 | |||
93 | /* no support for TIDs 8-15 yet */ | ||
94 | return -EINVAL; | ||
95 | } | ||
96 | |||
97 | static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, | ||
98 | struct iwl_dma_ptr *ptr, size_t size) | ||
99 | { | ||
100 | ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, | ||
101 | GFP_KERNEL); | ||
102 | if (!ptr->addr) | ||
103 | return -ENOMEM; | ||
104 | ptr->size = size; | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static inline void iwl_free_dma_ptr(struct iwl_priv *priv, | ||
109 | struct iwl_dma_ptr *ptr) | ||
110 | { | ||
111 | if (unlikely(!ptr->addr)) | ||
112 | return; | ||
113 | |||
114 | dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | ||
115 | memset(ptr, 0, sizeof(*ptr)); | ||
116 | } | ||
117 | |||
118 | /** | 40 | /** |
119 | * iwl_txq_update_write_ptr - Send new write index to hardware | 41 | * iwl_txq_update_write_ptr - Send new write index to hardware |
120 | */ | 42 | */ |
@@ -448,579 +370,6 @@ out_free_arrays: | |||
448 | } | 370 | } |
449 | EXPORT_SYMBOL(iwl_tx_queue_init); | 371 | EXPORT_SYMBOL(iwl_tx_queue_init); |
450 | 372 | ||
451 | /** | ||
452 | * iwl_hw_txq_ctx_free - Free TXQ Context | ||
453 | * | ||
454 | * Destroy all TX DMA queues and structures | ||
455 | */ | ||
456 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | ||
457 | { | ||
458 | int txq_id; | ||
459 | |||
460 | /* Tx queues */ | ||
461 | if (priv->txq) { | ||
462 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; | ||
463 | txq_id++) | ||
464 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
465 | iwl_cmd_queue_free(priv); | ||
466 | else | ||
467 | iwl_tx_queue_free(priv, txq_id); | ||
468 | } | ||
469 | iwl_free_dma_ptr(priv, &priv->kw); | ||
470 | |||
471 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
472 | |||
473 | /* free tx queue structure */ | ||
474 | iwl_free_txq_mem(priv); | ||
475 | } | ||
476 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | ||
477 | |||
478 | /** | ||
479 | * iwl_txq_ctx_reset - Reset TX queue context | ||
480 | * Destroys all DMA structures and initialize them again | ||
481 | * | ||
482 | * @param priv | ||
483 | * @return error code | ||
484 | */ | ||
485 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | ||
486 | { | ||
487 | int ret = 0; | ||
488 | int txq_id, slots_num; | ||
489 | unsigned long flags; | ||
490 | |||
491 | /* Free all tx/cmd queues and keep-warm buffer */ | ||
492 | iwl_hw_txq_ctx_free(priv); | ||
493 | |||
494 | ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | ||
495 | priv->hw_params.scd_bc_tbls_size); | ||
496 | if (ret) { | ||
497 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | ||
498 | goto error_bc_tbls; | ||
499 | } | ||
500 | /* Alloc keep-warm buffer */ | ||
501 | ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | ||
502 | if (ret) { | ||
503 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | ||
504 | goto error_kw; | ||
505 | } | ||
506 | |||
507 | /* allocate tx queue structure */ | ||
508 | ret = iwl_alloc_txq_mem(priv); | ||
509 | if (ret) | ||
510 | goto error; | ||
511 | |||
512 | spin_lock_irqsave(&priv->lock, flags); | ||
513 | |||
514 | /* Turn off all Tx DMA fifos */ | ||
515 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
516 | |||
517 | /* Tell NIC where to find the "keep warm" buffer */ | ||
518 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
519 | |||
520 | spin_unlock_irqrestore(&priv->lock, flags); | ||
521 | |||
522 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
523 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
524 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | ||
525 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
526 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | ||
527 | txq_id); | ||
528 | if (ret) { | ||
529 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | ||
530 | goto error; | ||
531 | } | ||
532 | } | ||
533 | |||
534 | return ret; | ||
535 | |||
536 | error: | ||
537 | iwl_hw_txq_ctx_free(priv); | ||
538 | iwl_free_dma_ptr(priv, &priv->kw); | ||
539 | error_kw: | ||
540 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
541 | error_bc_tbls: | ||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | ||
547 | */ | ||
548 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | ||
549 | { | ||
550 | int ch; | ||
551 | unsigned long flags; | ||
552 | |||
553 | /* Turn off all Tx DMA fifos */ | ||
554 | spin_lock_irqsave(&priv->lock, flags); | ||
555 | |||
556 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
557 | |||
558 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
559 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { | ||
560 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
561 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | ||
562 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | ||
563 | 1000); | ||
564 | } | ||
565 | spin_unlock_irqrestore(&priv->lock, flags); | ||
566 | |||
567 | /* Deallocate memory for all Tx queues */ | ||
568 | iwl_hw_txq_ctx_free(priv); | ||
569 | } | ||
570 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | ||
571 | |||
572 | /* | ||
573 | * handle build REPLY_TX command notification. | ||
574 | */ | ||
575 | static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, | ||
576 | struct iwl_tx_cmd *tx_cmd, | ||
577 | struct ieee80211_tx_info *info, | ||
578 | struct ieee80211_hdr *hdr, | ||
579 | u8 std_id) | ||
580 | { | ||
581 | __le16 fc = hdr->frame_control; | ||
582 | __le32 tx_flags = tx_cmd->tx_flags; | ||
583 | |||
584 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
585 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
586 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
587 | if (ieee80211_is_mgmt(fc)) | ||
588 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
589 | if (ieee80211_is_probe_resp(fc) && | ||
590 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
591 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
592 | } else { | ||
593 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
594 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
595 | } | ||
596 | |||
597 | if (ieee80211_is_back_req(fc)) | ||
598 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
599 | |||
600 | |||
601 | tx_cmd->sta_id = std_id; | ||
602 | if (ieee80211_has_morefrags(fc)) | ||
603 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
604 | |||
605 | if (ieee80211_is_data_qos(fc)) { | ||
606 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
607 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
608 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
609 | } else { | ||
610 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
611 | } | ||
612 | |||
613 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); | ||
614 | |||
615 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
616 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
617 | |||
618 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
619 | if (ieee80211_is_mgmt(fc)) { | ||
620 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
621 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
622 | else | ||
623 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
624 | } else { | ||
625 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
626 | } | ||
627 | |||
628 | tx_cmd->driver_txop = 0; | ||
629 | tx_cmd->tx_flags = tx_flags; | ||
630 | tx_cmd->next_frame_len = 0; | ||
631 | } | ||
632 | |||
633 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
634 | |||
635 | static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | ||
636 | struct iwl_tx_cmd *tx_cmd, | ||
637 | struct ieee80211_tx_info *info, | ||
638 | __le16 fc) | ||
639 | { | ||
640 | u32 rate_flags; | ||
641 | int rate_idx; | ||
642 | u8 rts_retry_limit; | ||
643 | u8 data_retry_limit; | ||
644 | u8 rate_plcp; | ||
645 | |||
646 | /* Set retry limit on DATA packets and Probe Responses*/ | ||
647 | if (ieee80211_is_probe_resp(fc)) | ||
648 | data_retry_limit = 3; | ||
649 | else | ||
650 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | ||
651 | tx_cmd->data_retry_limit = data_retry_limit; | ||
652 | |||
653 | /* Set retry limit on RTS packets */ | ||
654 | rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; | ||
655 | if (data_retry_limit < rts_retry_limit) | ||
656 | rts_retry_limit = data_retry_limit; | ||
657 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
658 | |||
659 | /* DATA packets will use the uCode station table for rate/antenna | ||
660 | * selection */ | ||
661 | if (ieee80211_is_data(fc)) { | ||
662 | tx_cmd->initial_rate_index = 0; | ||
663 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
664 | return; | ||
665 | } | ||
666 | |||
667 | /** | ||
668 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | ||
669 | * not really a TX rate. Thus, we use the lowest supported rate for | ||
670 | * this band. Also use the lowest supported rate if the stored rate | ||
671 | * index is invalid. | ||
672 | */ | ||
673 | rate_idx = info->control.rates[0].idx; | ||
674 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | ||
675 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | ||
676 | rate_idx = rate_lowest_index(&priv->bands[info->band], | ||
677 | info->control.sta); | ||
678 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | ||
679 | if (info->band == IEEE80211_BAND_5GHZ) | ||
680 | rate_idx += IWL_FIRST_OFDM_RATE; | ||
681 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | ||
682 | rate_plcp = iwl_rates[rate_idx].plcp; | ||
683 | /* Zero out flags for this packet */ | ||
684 | rate_flags = 0; | ||
685 | |||
686 | /* Set CCK flag as needed */ | ||
687 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | ||
688 | rate_flags |= RATE_MCS_CCK_MSK; | ||
689 | |||
690 | /* Set up RTS and CTS flags for certain packets */ | ||
691 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { | ||
692 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | ||
693 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | ||
694 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | ||
695 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | ||
696 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | ||
697 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
698 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
699 | } | ||
700 | break; | ||
701 | default: | ||
702 | break; | ||
703 | } | ||
704 | |||
705 | /* Set up antennas */ | ||
706 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); | ||
707 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | ||
708 | |||
709 | /* Set the rate in the TX cmd */ | ||
710 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
711 | } | ||
712 | |||
713 | static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | ||
714 | struct ieee80211_tx_info *info, | ||
715 | struct iwl_tx_cmd *tx_cmd, | ||
716 | struct sk_buff *skb_frag, | ||
717 | int sta_id) | ||
718 | { | ||
719 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
720 | |||
721 | switch (keyconf->alg) { | ||
722 | case ALG_CCMP: | ||
723 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
724 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
725 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
726 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
727 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | ||
728 | break; | ||
729 | |||
730 | case ALG_TKIP: | ||
731 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
732 | ieee80211_get_tkip_key(keyconf, skb_frag, | ||
733 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); | ||
734 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); | ||
735 | break; | ||
736 | |||
737 | case ALG_WEP: | ||
738 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
739 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
740 | |||
741 | if (keyconf->keylen == WEP_KEY_LEN_128) | ||
742 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
743 | |||
744 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
745 | |||
746 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | ||
747 | "with key %d\n", keyconf->keyidx); | ||
748 | break; | ||
749 | |||
750 | default: | ||
751 | IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); | ||
752 | break; | ||
753 | } | ||
754 | } | ||
755 | |||
756 | /* | ||
757 | * start REPLY_TX command process | ||
758 | */ | ||
759 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
760 | { | ||
761 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
762 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
763 | struct ieee80211_sta *sta = info->control.sta; | ||
764 | struct iwl_station_priv *sta_priv = NULL; | ||
765 | struct iwl_tx_queue *txq; | ||
766 | struct iwl_queue *q; | ||
767 | struct iwl_device_cmd *out_cmd; | ||
768 | struct iwl_cmd_meta *out_meta; | ||
769 | struct iwl_tx_cmd *tx_cmd; | ||
770 | int swq_id, txq_id; | ||
771 | dma_addr_t phys_addr; | ||
772 | dma_addr_t txcmd_phys; | ||
773 | dma_addr_t scratch_phys; | ||
774 | u16 len, len_org, firstlen, secondlen; | ||
775 | u16 seq_number = 0; | ||
776 | __le16 fc; | ||
777 | u8 hdr_len; | ||
778 | u8 sta_id; | ||
779 | u8 wait_write_ptr = 0; | ||
780 | u8 tid = 0; | ||
781 | u8 *qc = NULL; | ||
782 | unsigned long flags; | ||
783 | |||
784 | spin_lock_irqsave(&priv->lock, flags); | ||
785 | if (iwl_is_rfkill(priv)) { | ||
786 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | ||
787 | goto drop_unlock; | ||
788 | } | ||
789 | |||
790 | fc = hdr->frame_control; | ||
791 | |||
792 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
793 | if (ieee80211_is_auth(fc)) | ||
794 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | ||
795 | else if (ieee80211_is_assoc_req(fc)) | ||
796 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | ||
797 | else if (ieee80211_is_reassoc_req(fc)) | ||
798 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | ||
799 | #endif | ||
800 | |||
801 | hdr_len = ieee80211_hdrlen(fc); | ||
802 | |||
803 | /* Find (or create) index into station table for destination station */ | ||
804 | if (info->flags & IEEE80211_TX_CTL_INJECTED) | ||
805 | sta_id = priv->hw_params.bcast_sta_id; | ||
806 | else | ||
807 | sta_id = iwl_get_sta_id(priv, hdr); | ||
808 | if (sta_id == IWL_INVALID_STATION) { | ||
809 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | ||
810 | hdr->addr1); | ||
811 | goto drop_unlock; | ||
812 | } | ||
813 | |||
814 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | ||
815 | |||
816 | if (sta) | ||
817 | sta_priv = (void *)sta->drv_priv; | ||
818 | |||
819 | if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && | ||
820 | sta_priv->asleep) { | ||
821 | WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); | ||
822 | /* | ||
823 | * This sends an asynchronous command to the device, | ||
824 | * but we can rely on it being processed before the | ||
825 | * next frame is processed -- and the next frame to | ||
826 | * this station is the one that will consume this | ||
827 | * counter. | ||
828 | * For now set the counter to just 1 since we do not | ||
829 | * support uAPSD yet. | ||
830 | */ | ||
831 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | ||
832 | } | ||
833 | |||
834 | txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); | ||
835 | if (ieee80211_is_data_qos(fc)) { | ||
836 | qc = ieee80211_get_qos_ctl(hdr); | ||
837 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
838 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
839 | goto drop_unlock; | ||
840 | seq_number = priv->stations[sta_id].tid[tid].seq_number; | ||
841 | seq_number &= IEEE80211_SCTL_SEQ; | ||
842 | hdr->seq_ctrl = hdr->seq_ctrl & | ||
843 | cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
844 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | ||
845 | seq_number += 0x10; | ||
846 | /* aggregation is on for this <sta,tid> */ | ||
847 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | ||
848 | priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { | ||
849 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | ||
850 | } | ||
851 | } | ||
852 | |||
853 | txq = &priv->txq[txq_id]; | ||
854 | swq_id = txq->swq_id; | ||
855 | q = &txq->q; | ||
856 | |||
857 | if (unlikely(iwl_queue_space(q) < q->high_mark)) | ||
858 | goto drop_unlock; | ||
859 | |||
860 | if (ieee80211_is_data_qos(fc)) | ||
861 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
862 | |||
863 | /* Set up driver data for this TFD */ | ||
864 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
865 | txq->txb[q->write_ptr].skb[0] = skb; | ||
866 | |||
867 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
868 | out_cmd = txq->cmd[q->write_ptr]; | ||
869 | out_meta = &txq->meta[q->write_ptr]; | ||
870 | tx_cmd = &out_cmd->cmd.tx; | ||
871 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
872 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | ||
873 | |||
874 | /* | ||
875 | * Set up the Tx-command (not MAC!) header. | ||
876 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
877 | * after Tx, uCode's Tx response will return this value so driver can | ||
878 | * locate the frame within the tx queue and do post-tx processing. | ||
879 | */ | ||
880 | out_cmd->hdr.cmd = REPLY_TX; | ||
881 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
882 | INDEX_TO_SEQ(q->write_ptr))); | ||
883 | |||
884 | /* Copy MAC header from skb into command buffer */ | ||
885 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
886 | |||
887 | |||
888 | /* Total # bytes to be transmitted */ | ||
889 | len = (u16)skb->len; | ||
890 | tx_cmd->len = cpu_to_le16(len); | ||
891 | |||
892 | if (info->control.hw_key) | ||
893 | iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | ||
894 | |||
895 | /* TODO need this for burst mode later on */ | ||
896 | iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); | ||
897 | iwl_dbg_log_tx_data_frame(priv, len, hdr); | ||
898 | |||
899 | iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc); | ||
900 | |||
901 | iwl_update_stats(priv, true, fc, len); | ||
902 | /* | ||
903 | * Use the first empty entry in this queue's command buffer array | ||
904 | * to contain the Tx command and MAC header concatenated together | ||
905 | * (payload data will be in another buffer). | ||
906 | * Size of this varies, due to varying MAC header length. | ||
907 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
908 | * of the MAC header (device reads on dword boundaries). | ||
909 | * We'll tell device about this padding later. | ||
910 | */ | ||
911 | len = sizeof(struct iwl_tx_cmd) + | ||
912 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
913 | |||
914 | len_org = len; | ||
915 | firstlen = len = (len + 3) & ~3; | ||
916 | |||
917 | if (len_org != len) | ||
918 | len_org = 1; | ||
919 | else | ||
920 | len_org = 0; | ||
921 | |||
922 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
923 | if (len_org) | ||
924 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
925 | |||
926 | /* Physical address of this Tx command's header (not MAC header!), | ||
927 | * within command buffer array. */ | ||
928 | txcmd_phys = pci_map_single(priv->pci_dev, | ||
929 | &out_cmd->hdr, len, | ||
930 | PCI_DMA_BIDIRECTIONAL); | ||
931 | pci_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
932 | pci_unmap_len_set(out_meta, len, len); | ||
933 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
934 | * first entry */ | ||
935 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
936 | txcmd_phys, len, 1, 0); | ||
937 | |||
938 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
939 | txq->need_update = 1; | ||
940 | if (qc) | ||
941 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
942 | } else { | ||
943 | wait_write_ptr = 1; | ||
944 | txq->need_update = 0; | ||
945 | } | ||
946 | |||
947 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
948 | * if any (802.11 null frames have no payload). */ | ||
949 | secondlen = len = skb->len - hdr_len; | ||
950 | if (len) { | ||
951 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
952 | len, PCI_DMA_TODEVICE); | ||
953 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
954 | phys_addr, len, | ||
955 | 0, 0); | ||
956 | } | ||
957 | |||
958 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
959 | offsetof(struct iwl_tx_cmd, scratch); | ||
960 | |||
961 | len = sizeof(struct iwl_tx_cmd) + | ||
962 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
963 | /* take back ownership of DMA buffer to enable update */ | ||
964 | pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, | ||
965 | len, PCI_DMA_BIDIRECTIONAL); | ||
966 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
967 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
968 | |||
969 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", | ||
970 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
971 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); | ||
972 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
973 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
974 | |||
975 | /* Set up entry for this TFD in Tx byte-count array */ | ||
976 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
977 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | ||
978 | le16_to_cpu(tx_cmd->len)); | ||
979 | |||
980 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | ||
981 | len, PCI_DMA_BIDIRECTIONAL); | ||
982 | |||
983 | trace_iwlwifi_dev_tx(priv, | ||
984 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
985 | sizeof(struct iwl_tfd), | ||
986 | &out_cmd->hdr, firstlen, | ||
987 | skb->data + hdr_len, secondlen); | ||
988 | |||
989 | /* Tell device the write index *just past* this latest filled TFD */ | ||
990 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
991 | iwl_txq_update_write_ptr(priv, txq); | ||
992 | spin_unlock_irqrestore(&priv->lock, flags); | ||
993 | |||
994 | /* | ||
995 | * At this point the frame is "transmitted" successfully | ||
996 | * and we will get a TX status notification eventually, | ||
997 | * regardless of the value of ret. "ret" only indicates | ||
998 | * whether or not we should update the write pointer. | ||
999 | */ | ||
1000 | |||
1001 | /* avoid atomic ops if it isn't an associated client */ | ||
1002 | if (sta_priv && sta_priv->client) | ||
1003 | atomic_inc(&sta_priv->pending_frames); | ||
1004 | |||
1005 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | ||
1006 | if (wait_write_ptr) { | ||
1007 | spin_lock_irqsave(&priv->lock, flags); | ||
1008 | txq->need_update = 1; | ||
1009 | iwl_txq_update_write_ptr(priv, txq); | ||
1010 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1011 | } else { | ||
1012 | iwl_stop_queue(priv, txq->swq_id); | ||
1013 | } | ||
1014 | } | ||
1015 | |||
1016 | return 0; | ||
1017 | |||
1018 | drop_unlock: | ||
1019 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1020 | return -1; | ||
1021 | } | ||
1022 | EXPORT_SYMBOL(iwl_tx_skb); | ||
1023 | |||
1024 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | 373 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
1025 | 374 | ||
1026 | /** | 375 | /** |
@@ -1146,61 +495,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1146 | return idx; | 495 | return idx; |
1147 | } | 496 | } |
1148 | 497 | ||
1149 | static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) | ||
1150 | { | ||
1151 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
1152 | struct ieee80211_sta *sta; | ||
1153 | struct iwl_station_priv *sta_priv; | ||
1154 | |||
1155 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); | ||
1156 | if (sta) { | ||
1157 | sta_priv = (void *)sta->drv_priv; | ||
1158 | /* avoid atomic ops if this isn't a client */ | ||
1159 | if (sta_priv->client && | ||
1160 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1161 | ieee80211_sta_block_awake(priv->hw, sta, false); | ||
1162 | } | ||
1163 | |||
1164 | ieee80211_tx_status_irqsafe(priv->hw, skb); | ||
1165 | } | ||
1166 | |||
1167 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1168 | { | ||
1169 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1170 | struct iwl_queue *q = &txq->q; | ||
1171 | struct iwl_tx_info *tx_info; | ||
1172 | int nfreed = 0; | ||
1173 | struct ieee80211_hdr *hdr; | ||
1174 | |||
1175 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1176 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | ||
1177 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1178 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1179 | return 0; | ||
1180 | } | ||
1181 | |||
1182 | for (index = iwl_queue_inc_wrap(index, q->n_bd); | ||
1183 | q->read_ptr != index; | ||
1184 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1185 | |||
1186 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1187 | iwl_tx_status(priv, tx_info->skb[0]); | ||
1188 | |||
1189 | hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; | ||
1190 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) | ||
1191 | nfreed++; | ||
1192 | tx_info->skb[0] = NULL; | ||
1193 | |||
1194 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | ||
1195 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | ||
1196 | |||
1197 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | ||
1198 | } | ||
1199 | return nfreed; | ||
1200 | } | ||
1201 | EXPORT_SYMBOL(iwl_tx_queue_reclaim); | ||
1202 | |||
1203 | |||
1204 | /** | 498 | /** |
1205 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | 499 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd |
1206 | * | 500 | * |
@@ -1292,329 +586,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1292 | } | 586 | } |
1293 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | 587 | EXPORT_SYMBOL(iwl_tx_cmd_complete); |
1294 | 588 | ||
1295 | /* | ||
1296 | * Find first available (lowest unused) Tx Queue, mark it "active". | ||
1297 | * Called only when finding queue for aggregation. | ||
1298 | * Should never return anything < 7, because they should already | ||
1299 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | ||
1300 | */ | ||
1301 | static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) | ||
1302 | { | ||
1303 | int txq_id; | ||
1304 | |||
1305 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
1306 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | ||
1307 | return txq_id; | ||
1308 | return -1; | ||
1309 | } | ||
1310 | |||
1311 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | ||
1312 | { | ||
1313 | int sta_id; | ||
1314 | int tx_fifo; | ||
1315 | int txq_id; | ||
1316 | int ret; | ||
1317 | unsigned long flags; | ||
1318 | struct iwl_tid_data *tid_data; | ||
1319 | |||
1320 | tx_fifo = get_fifo_from_tid(tid); | ||
1321 | if (unlikely(tx_fifo < 0)) | ||
1322 | return tx_fifo; | ||
1323 | |||
1324 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", | ||
1325 | __func__, ra, tid); | ||
1326 | |||
1327 | sta_id = iwl_find_station(priv, ra); | ||
1328 | if (sta_id == IWL_INVALID_STATION) { | ||
1329 | IWL_ERR(priv, "Start AGG on invalid station\n"); | ||
1330 | return -ENXIO; | ||
1331 | } | ||
1332 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
1333 | return -EINVAL; | ||
1334 | |||
1335 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | ||
1336 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | ||
1337 | return -ENXIO; | ||
1338 | } | ||
1339 | |||
1340 | txq_id = iwl_txq_ctx_activate_free(priv); | ||
1341 | if (txq_id == -1) { | ||
1342 | IWL_ERR(priv, "No free aggregation queue available\n"); | ||
1343 | return -ENXIO; | ||
1344 | } | ||
1345 | |||
1346 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
1347 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1348 | *ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1349 | tid_data->agg.txq_id = txq_id; | ||
1350 | priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); | ||
1351 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
1352 | |||
1353 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | ||
1354 | sta_id, tid, *ssn); | ||
1355 | if (ret) | ||
1356 | return ret; | ||
1357 | |||
1358 | if (tid_data->tfds_in_queue == 0) { | ||
1359 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1360 | tid_data->agg.state = IWL_AGG_ON; | ||
1361 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1362 | } else { | ||
1363 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", | ||
1364 | tid_data->tfds_in_queue); | ||
1365 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | ||
1366 | } | ||
1367 | return ret; | ||
1368 | } | ||
1369 | EXPORT_SYMBOL(iwl_tx_agg_start); | ||
1370 | |||
1371 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | ||
1372 | { | ||
1373 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | ||
1374 | struct iwl_tid_data *tid_data; | ||
1375 | int write_ptr, read_ptr; | ||
1376 | unsigned long flags; | ||
1377 | |||
1378 | if (!ra) { | ||
1379 | IWL_ERR(priv, "ra = NULL\n"); | ||
1380 | return -EINVAL; | ||
1381 | } | ||
1382 | |||
1383 | tx_fifo_id = get_fifo_from_tid(tid); | ||
1384 | if (unlikely(tx_fifo_id < 0)) | ||
1385 | return tx_fifo_id; | ||
1386 | |||
1387 | sta_id = iwl_find_station(priv, ra); | ||
1388 | |||
1389 | if (sta_id == IWL_INVALID_STATION) { | ||
1390 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | ||
1391 | return -ENXIO; | ||
1392 | } | ||
1393 | |||
1394 | if (priv->stations[sta_id].tid[tid].agg.state == | ||
1395 | IWL_EMPTYING_HW_QUEUE_ADDBA) { | ||
1396 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); | ||
1397 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1398 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1399 | return 0; | ||
1400 | } | ||
1401 | |||
1402 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | ||
1403 | IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); | ||
1404 | |||
1405 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1406 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | ||
1407 | txq_id = tid_data->agg.txq_id; | ||
1408 | write_ptr = priv->txq[txq_id].q.write_ptr; | ||
1409 | read_ptr = priv->txq[txq_id].q.read_ptr; | ||
1410 | |||
1411 | /* The queue is not empty */ | ||
1412 | if (write_ptr != read_ptr) { | ||
1413 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); | ||
1414 | priv->stations[sta_id].tid[tid].agg.state = | ||
1415 | IWL_EMPTYING_HW_QUEUE_DELBA; | ||
1416 | return 0; | ||
1417 | } | ||
1418 | |||
1419 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1420 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1421 | |||
1422 | spin_lock_irqsave(&priv->lock, flags); | ||
1423 | /* | ||
1424 | * the only reason this call can fail is queue number out of range, | ||
1425 | * which can happen if uCode is reloaded and all the station | ||
1426 | * information are lost. if it is outside the range, there is no need | ||
1427 | * to deactivate the uCode queue, just return "success" to allow | ||
1428 | * mac80211 to clean up it own data. | ||
1429 | */ | ||
1430 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | ||
1431 | tx_fifo_id); | ||
1432 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1433 | |||
1434 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1435 | |||
1436 | return 0; | ||
1437 | } | ||
1438 | EXPORT_SYMBOL(iwl_tx_agg_stop); | ||
1439 | |||
1440 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | ||
1441 | { | ||
1442 | struct iwl_queue *q = &priv->txq[txq_id].q; | ||
1443 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | ||
1444 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | ||
1445 | |||
1446 | switch (priv->stations[sta_id].tid[tid].agg.state) { | ||
1447 | case IWL_EMPTYING_HW_QUEUE_DELBA: | ||
1448 | /* We are reclaiming the last packet of the */ | ||
1449 | /* aggregated HW queue */ | ||
1450 | if ((txq_id == tid_data->agg.txq_id) && | ||
1451 | (q->read_ptr == q->write_ptr)) { | ||
1452 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1453 | int tx_fifo = get_fifo_from_tid(tid); | ||
1454 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); | ||
1455 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | ||
1456 | ssn, tx_fifo); | ||
1457 | tid_data->agg.state = IWL_AGG_OFF; | ||
1458 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1459 | } | ||
1460 | break; | ||
1461 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | ||
1462 | /* We are reclaiming the last packet of the queue */ | ||
1463 | if (tid_data->tfds_in_queue == 0) { | ||
1464 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); | ||
1465 | tid_data->agg.state = IWL_AGG_ON; | ||
1466 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1467 | } | ||
1468 | break; | ||
1469 | } | ||
1470 | return 0; | ||
1471 | } | ||
1472 | EXPORT_SYMBOL(iwl_txq_check_empty); | ||
1473 | |||
1474 | /** | ||
1475 | * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack | ||
1476 | * | ||
1477 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | ||
1478 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | ||
1479 | */ | ||
1480 | static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | ||
1481 | struct iwl_ht_agg *agg, | ||
1482 | struct iwl_compressed_ba_resp *ba_resp) | ||
1483 | |||
1484 | { | ||
1485 | int i, sh, ack; | ||
1486 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | ||
1487 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1488 | u64 bitmap; | ||
1489 | int successes = 0; | ||
1490 | struct ieee80211_tx_info *info; | ||
1491 | |||
1492 | if (unlikely(!agg->wait_for_ba)) { | ||
1493 | IWL_ERR(priv, "Received BA when not expected\n"); | ||
1494 | return -EINVAL; | ||
1495 | } | ||
1496 | |||
1497 | /* Mark that the expected block-ack response arrived */ | ||
1498 | agg->wait_for_ba = 0; | ||
1499 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); | ||
1500 | |||
1501 | /* Calculate shift to align block-ack bits with our Tx window bits */ | ||
1502 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); | ||
1503 | if (sh < 0) /* tbw something is wrong with indices */ | ||
1504 | sh += 0x100; | ||
1505 | |||
1506 | /* don't use 64-bit values for now */ | ||
1507 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | ||
1508 | |||
1509 | if (agg->frame_count > (64 - sh)) { | ||
1510 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); | ||
1511 | return -1; | ||
1512 | } | ||
1513 | |||
1514 | /* check for success or failure according to the | ||
1515 | * transmitted bitmap and block-ack bitmap */ | ||
1516 | bitmap &= agg->bitmap; | ||
1517 | |||
1518 | /* For each frame attempted in aggregation, | ||
1519 | * update driver's record of tx frame's status. */ | ||
1520 | for (i = 0; i < agg->frame_count ; i++) { | ||
1521 | ack = bitmap & (1ULL << i); | ||
1522 | successes += !!ack; | ||
1523 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", | ||
1524 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, | ||
1525 | agg->start_idx + i); | ||
1526 | } | ||
1527 | |||
1528 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | ||
1529 | memset(&info->status, 0, sizeof(info->status)); | ||
1530 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1531 | info->flags |= IEEE80211_TX_STAT_AMPDU; | ||
1532 | info->status.ampdu_ack_map = successes; | ||
1533 | info->status.ampdu_ack_len = agg->frame_count; | ||
1534 | iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | ||
1535 | |||
1536 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); | ||
1537 | |||
1538 | return 0; | ||
1539 | } | ||
1540 | |||
1541 | /** | ||
1542 | * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | ||
1543 | * | ||
1544 | * Handles block-acknowledge notification from device, which reports success | ||
1545 | * of frames sent via aggregation. | ||
1546 | */ | ||
1547 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
1548 | struct iwl_rx_mem_buffer *rxb) | ||
1549 | { | ||
1550 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1551 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | ||
1552 | struct iwl_tx_queue *txq = NULL; | ||
1553 | struct iwl_ht_agg *agg; | ||
1554 | int index; | ||
1555 | int sta_id; | ||
1556 | int tid; | ||
1557 | |||
1558 | /* "flow" corresponds to Tx queue */ | ||
1559 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1560 | |||
1561 | /* "ssn" is start of block-ack Tx window, corresponds to index | ||
1562 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | ||
1563 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | ||
1564 | |||
1565 | if (scd_flow >= priv->hw_params.max_txq_num) { | ||
1566 | IWL_ERR(priv, | ||
1567 | "BUG_ON scd_flow is bigger than number of queues\n"); | ||
1568 | return; | ||
1569 | } | ||
1570 | |||
1571 | txq = &priv->txq[scd_flow]; | ||
1572 | sta_id = ba_resp->sta_id; | ||
1573 | tid = ba_resp->tid; | ||
1574 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
1575 | |||
1576 | /* Find index just before block-ack window */ | ||
1577 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | ||
1578 | |||
1579 | /* TODO: Need to get this copy more safely - now good for debug */ | ||
1580 | |||
1581 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " | ||
1582 | "sta_id = %d\n", | ||
1583 | agg->wait_for_ba, | ||
1584 | (u8 *) &ba_resp->sta_addr_lo32, | ||
1585 | ba_resp->sta_id); | ||
1586 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " | ||
1587 | "%d, scd_ssn = %d\n", | ||
1588 | ba_resp->tid, | ||
1589 | ba_resp->seq_ctl, | ||
1590 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | ||
1591 | ba_resp->scd_flow, | ||
1592 | ba_resp->scd_ssn); | ||
1593 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n", | ||
1594 | agg->start_idx, | ||
1595 | (unsigned long long)agg->bitmap); | ||
1596 | |||
1597 | /* Update driver's record of ACK vs. not for each frame in window */ | ||
1598 | iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp); | ||
1599 | |||
1600 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | ||
1601 | * block-ack window (we assume that they've been successfully | ||
1602 | * transmitted ... if not, it's too late anyway). */ | ||
1603 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | ||
1604 | /* calculate mac80211 ampdu sw queue to wake */ | ||
1605 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); | ||
1606 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
1607 | |||
1608 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | ||
1609 | priv->mac80211_registered && | ||
1610 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | ||
1611 | iwl_wake_queue(priv, txq->swq_id); | ||
1612 | |||
1613 | iwl_txq_check_empty(priv, sta_id, tid, scd_flow); | ||
1614 | } | ||
1615 | } | ||
1616 | EXPORT_SYMBOL(iwl_rx_reply_compressed_ba); | ||
1617 | |||
1618 | #ifdef CONFIG_IWLWIFI_DEBUG | 589 | #ifdef CONFIG_IWLWIFI_DEBUG |
1619 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | 590 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x |
1620 | 591 | ||