diff options
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r-- | drivers/net/wireless/ath/ath9k/main.c | 3 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-4965.c | 13 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn-rs.c | 55 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-calib.c | 12 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-core.c | 11 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-core.h | 5 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 107 |
7 files changed, 151 insertions, 55 deletions
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 67ca4e5a6017..115e1aeedb59 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -1532,8 +1532,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | |||
1532 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); | 1532 | all_wiphys_idle = ath9k_all_wiphys_idle(sc); |
1533 | ath9k_set_wiphy_idle(aphy, idle); | 1533 | ath9k_set_wiphy_idle(aphy, idle); |
1534 | 1534 | ||
1535 | if (!idle && all_wiphys_idle) | 1535 | enable_radio = (!idle && all_wiphys_idle); |
1536 | enable_radio = true; | ||
1537 | 1536 | ||
1538 | /* | 1537 | /* |
1539 | * After we unlock here its possible another wiphy | 1538 | * After we unlock here its possible another wiphy |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 83c52a682622..8972166386cb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -2015,7 +2015,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2015 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " | 2015 | IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " |
2016 | "%d index %d\n", scd_ssn , index); | 2016 | "%d index %d\n", scd_ssn , index); |
2017 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2017 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2018 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2018 | if (qc) |
2019 | iwl_free_tfds_in_queue(priv, sta_id, | ||
2020 | tid, freed); | ||
2019 | 2021 | ||
2020 | if (priv->mac80211_registered && | 2022 | if (priv->mac80211_registered && |
2021 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && | 2023 | (iwl_queue_space(&txq->q) > txq->q.low_mark) && |
@@ -2041,14 +2043,17 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2041 | tx_resp->failure_frame); | 2043 | tx_resp->failure_frame); |
2042 | 2044 | ||
2043 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); | 2045 | freed = iwl_tx_queue_reclaim(priv, txq_id, index); |
2044 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | 2046 | if (qc && likely(sta_id != IWL_INVALID_STATION)) |
2047 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
2048 | else if (sta_id == IWL_INVALID_STATION) | ||
2049 | IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); | ||
2045 | 2050 | ||
2046 | if (priv->mac80211_registered && | 2051 | if (priv->mac80211_registered && |
2047 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) | 2052 | (iwl_queue_space(&txq->q) > txq->q.low_mark)) |
2048 | iwl_wake_queue(priv, txq_id); | 2053 | iwl_wake_queue(priv, txq_id); |
2049 | } | 2054 | } |
2050 | 2055 | if (qc && likely(sta_id != IWL_INVALID_STATION)) | |
2051 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); | 2056 | iwl_txq_check_empty(priv, sta_id, tid, txq_id); |
2052 | 2057 | ||
2053 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | 2058 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) |
2054 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); | 2059 | IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 35f819ac87a3..1460116d329f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | |||
@@ -346,6 +346,17 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) | |||
346 | !!(rate_n_flags & RATE_MCS_ANT_C_MSK); | 346 | !!(rate_n_flags & RATE_MCS_ANT_C_MSK); |
347 | } | 347 | } |
348 | 348 | ||
349 | /* | ||
350 | * Static function to get the expected throughput from an iwl_scale_tbl_info | ||
351 | * that wraps a NULL pointer check | ||
352 | */ | ||
353 | static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) | ||
354 | { | ||
355 | if (tbl->expected_tpt) | ||
356 | return tbl->expected_tpt[rs_index]; | ||
357 | return 0; | ||
358 | } | ||
359 | |||
349 | /** | 360 | /** |
350 | * rs_collect_tx_data - Update the success/failure sliding window | 361 | * rs_collect_tx_data - Update the success/failure sliding window |
351 | * | 362 | * |
@@ -353,19 +364,21 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags) | |||
353 | * at this rate. window->data contains the bitmask of successful | 364 | * at this rate. window->data contains the bitmask of successful |
354 | * packets. | 365 | * packets. |
355 | */ | 366 | */ |
356 | static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, | 367 | static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, |
357 | int scale_index, s32 tpt, int attempts, | 368 | int scale_index, int attempts, int successes) |
358 | int successes) | ||
359 | { | 369 | { |
360 | struct iwl_rate_scale_data *window = NULL; | 370 | struct iwl_rate_scale_data *window = NULL; |
361 | static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); | 371 | static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); |
362 | s32 fail_count; | 372 | s32 fail_count, tpt; |
363 | 373 | ||
364 | if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) | 374 | if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) |
365 | return -EINVAL; | 375 | return -EINVAL; |
366 | 376 | ||
367 | /* Select window for current tx bit rate */ | 377 | /* Select window for current tx bit rate */ |
368 | window = &(windows[scale_index]); | 378 | window = &(tbl->win[scale_index]); |
379 | |||
380 | /* Get expected throughput */ | ||
381 | tpt = get_expected_tpt(tbl, scale_index); | ||
369 | 382 | ||
370 | /* | 383 | /* |
371 | * Keep track of only the latest 62 tx frame attempts in this rate's | 384 | * Keep track of only the latest 62 tx frame attempts in this rate's |
@@ -739,16 +752,6 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a, | |||
739 | return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && | 752 | return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && |
740 | (a->is_SGI == b->is_SGI); | 753 | (a->is_SGI == b->is_SGI); |
741 | } | 754 | } |
742 | /* | ||
743 | * Static function to get the expected throughput from an iwl_scale_tbl_info | ||
744 | * that wraps a NULL pointer check | ||
745 | */ | ||
746 | static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) | ||
747 | { | ||
748 | if (tbl->expected_tpt) | ||
749 | return tbl->expected_tpt[rs_index]; | ||
750 | return 0; | ||
751 | } | ||
752 | 755 | ||
753 | /* | 756 | /* |
754 | * mac80211 sends us Tx status | 757 | * mac80211 sends us Tx status |
@@ -765,12 +768,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
765 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 768 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
766 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; | 769 | struct iwl_priv *priv = (struct iwl_priv *)priv_r; |
767 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 770 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
768 | struct iwl_rate_scale_data *window = NULL; | ||
769 | enum mac80211_rate_control_flags mac_flags; | 771 | enum mac80211_rate_control_flags mac_flags; |
770 | u32 tx_rate; | 772 | u32 tx_rate; |
771 | struct iwl_scale_tbl_info tbl_type; | 773 | struct iwl_scale_tbl_info tbl_type; |
772 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl; | 774 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; |
773 | s32 tpt = 0; | ||
774 | 775 | ||
775 | IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); | 776 | IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); |
776 | 777 | ||
@@ -853,7 +854,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
853 | IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); | 854 | IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); |
854 | return; | 855 | return; |
855 | } | 856 | } |
856 | window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]); | ||
857 | 857 | ||
858 | /* | 858 | /* |
859 | * Updating the frame history depends on whether packets were | 859 | * Updating the frame history depends on whether packets were |
@@ -866,8 +866,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
866 | tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); | 866 | tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); |
867 | rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, | 867 | rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, |
868 | &rs_index); | 868 | &rs_index); |
869 | tpt = get_expected_tpt(curr_tbl, rs_index); | 869 | rs_collect_tx_data(curr_tbl, rs_index, |
870 | rs_collect_tx_data(window, rs_index, tpt, | ||
871 | info->status.ampdu_ack_len, | 870 | info->status.ampdu_ack_len, |
872 | info->status.ampdu_ack_map); | 871 | info->status.ampdu_ack_map); |
873 | 872 | ||
@@ -897,19 +896,13 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, | |||
897 | * table as active/search. | 896 | * table as active/search. |
898 | */ | 897 | */ |
899 | if (table_type_matches(&tbl_type, curr_tbl)) | 898 | if (table_type_matches(&tbl_type, curr_tbl)) |
900 | tpt = get_expected_tpt(curr_tbl, rs_index); | 899 | tmp_tbl = curr_tbl; |
901 | else if (table_type_matches(&tbl_type, other_tbl)) | 900 | else if (table_type_matches(&tbl_type, other_tbl)) |
902 | tpt = get_expected_tpt(other_tbl, rs_index); | 901 | tmp_tbl = other_tbl; |
903 | else | 902 | else |
904 | continue; | 903 | continue; |
905 | 904 | rs_collect_tx_data(tmp_tbl, rs_index, 1, | |
906 | /* Constants mean 1 transmission, 0 successes */ | 905 | i < retries ? 0 : legacy_success); |
907 | if (i < retries) | ||
908 | rs_collect_tx_data(window, rs_index, tpt, 1, | ||
909 | 0); | ||
910 | else | ||
911 | rs_collect_tx_data(window, rs_index, tpt, 1, | ||
912 | legacy_success); | ||
913 | } | 906 | } |
914 | 907 | ||
915 | /* Update success/fail counts if not searching for new mode */ | 908 | /* Update success/fail counts if not searching for new mode */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c index de3b3f403d1f..8b516c5ff0bb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c | |||
@@ -808,6 +808,18 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, | |||
808 | } | 808 | } |
809 | } | 809 | } |
810 | 810 | ||
811 | /* | ||
812 | * The above algorithm sometimes fails when the ucode | ||
813 | * reports 0 for all chains. It's not clear why that | ||
814 | * happens to start with, but it is then causing trouble | ||
815 | * because this can make us enable more chains than the | ||
816 | * hardware really has. | ||
817 | * | ||
818 | * To be safe, simply mask out any chains that we know | ||
819 | * are not on the device. | ||
820 | */ | ||
821 | active_chains &= priv->hw_params.valid_rx_ant; | ||
822 | |||
811 | num_tx_chains = 0; | 823 | num_tx_chains = 0; |
812 | for (i = 0; i < NUM_RX_CHAINS; i++) { | 824 | for (i = 0; i < NUM_RX_CHAINS; i++) { |
813 | /* loops on all the bits of | 825 | /* loops on all the bits of |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index db050b811232..3352f7086632 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -308,10 +308,13 @@ int iwl_hw_nic_init(struct iwl_priv *priv) | |||
308 | 308 | ||
309 | spin_unlock_irqrestore(&priv->lock, flags); | 309 | spin_unlock_irqrestore(&priv->lock, flags); |
310 | 310 | ||
311 | /* Allocate and init all Tx and Command queues */ | 311 | /* Allocate or reset and init all Tx and Command queues */ |
312 | ret = iwl_txq_ctx_reset(priv); | 312 | if (!priv->txq) { |
313 | if (ret) | 313 | ret = iwl_txq_ctx_alloc(priv); |
314 | return ret; | 314 | if (ret) |
315 | return ret; | ||
316 | } else | ||
317 | iwl_txq_ctx_reset(priv); | ||
315 | 318 | ||
316 | set_bit(STATUS_INIT, &priv->status); | 319 | set_bit(STATUS_INIT, &priv->status); |
317 | 320 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 4ef7739f9e8e..732590f5fe30 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -442,7 +442,8 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); | |||
442 | /***************************************************** | 442 | /***************************************************** |
443 | * TX | 443 | * TX |
444 | ******************************************************/ | 444 | ******************************************************/ |
445 | int iwl_txq_ctx_reset(struct iwl_priv *priv); | 445 | int iwl_txq_ctx_alloc(struct iwl_priv *priv); |
446 | void iwl_txq_ctx_reset(struct iwl_priv *priv); | ||
446 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 447 | void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
447 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, | 448 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, |
448 | struct iwl_tx_queue *txq, | 449 | struct iwl_tx_queue *txq, |
@@ -456,6 +457,8 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv, | |||
456 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); | 457 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); |
457 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | 458 | int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, |
458 | int slots_num, u32 txq_id); | 459 | int slots_num, u32 txq_id); |
460 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
461 | int slots_num, u32 txq_id); | ||
459 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); | 462 | void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); |
460 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); | 463 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); |
461 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); | 464 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index f0b7e6cfbe4f..8dd0c036d547 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
194 | struct iwl_queue *q = &txq->q; | 194 | struct iwl_queue *q = &txq->q; |
195 | struct device *dev = &priv->pci_dev->dev; | 195 | struct device *dev = &priv->pci_dev->dev; |
196 | int i; | 196 | int i; |
197 | bool huge = false; | ||
197 | 198 | ||
198 | if (q->n_bd == 0) | 199 | if (q->n_bd == 0) |
199 | return; | 200 | return; |
200 | 201 | ||
202 | for (; q->read_ptr != q->write_ptr; | ||
203 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
204 | /* we have no way to tell if it is a huge cmd ATM */ | ||
205 | i = get_cmd_index(q, q->read_ptr, 0); | ||
206 | |||
207 | if (txq->meta[i].flags & CMD_SIZE_HUGE) { | ||
208 | huge = true; | ||
209 | continue; | ||
210 | } | ||
211 | |||
212 | pci_unmap_single(priv->pci_dev, | ||
213 | pci_unmap_addr(&txq->meta[i], mapping), | ||
214 | pci_unmap_len(&txq->meta[i], len), | ||
215 | PCI_DMA_BIDIRECTIONAL); | ||
216 | } | ||
217 | if (huge) { | ||
218 | i = q->n_window; | ||
219 | pci_unmap_single(priv->pci_dev, | ||
220 | pci_unmap_addr(&txq->meta[i], mapping), | ||
221 | pci_unmap_len(&txq->meta[i], len), | ||
222 | PCI_DMA_BIDIRECTIONAL); | ||
223 | } | ||
224 | |||
201 | /* De-alloc array of command/tx buffers */ | 225 | /* De-alloc array of command/tx buffers */ |
202 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | 226 | for (i = 0; i <= TFD_CMD_SLOTS; i++) |
203 | kfree(txq->cmd[i]); | 227 | kfree(txq->cmd[i]); |
@@ -410,6 +434,26 @@ out_free_arrays: | |||
410 | } | 434 | } |
411 | EXPORT_SYMBOL(iwl_tx_queue_init); | 435 | EXPORT_SYMBOL(iwl_tx_queue_init); |
412 | 436 | ||
437 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
438 | int slots_num, u32 txq_id) | ||
439 | { | ||
440 | int actual_slots = slots_num; | ||
441 | |||
442 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
443 | actual_slots++; | ||
444 | |||
445 | memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); | ||
446 | |||
447 | txq->need_update = 0; | ||
448 | |||
449 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
450 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | ||
451 | |||
452 | /* Tell device where to find queue */ | ||
453 | priv->cfg->ops->lib->txq_init(priv, txq); | ||
454 | } | ||
455 | EXPORT_SYMBOL(iwl_tx_queue_reset); | ||
456 | |||
413 | /** | 457 | /** |
414 | * iwl_hw_txq_ctx_free - Free TXQ Context | 458 | * iwl_hw_txq_ctx_free - Free TXQ Context |
415 | * | 459 | * |
@@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
421 | 465 | ||
422 | /* Tx queues */ | 466 | /* Tx queues */ |
423 | if (priv->txq) { | 467 | if (priv->txq) { |
424 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; | 468 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) |
425 | txq_id++) | ||
426 | if (txq_id == IWL_CMD_QUEUE_NUM) | 469 | if (txq_id == IWL_CMD_QUEUE_NUM) |
427 | iwl_cmd_queue_free(priv); | 470 | iwl_cmd_queue_free(priv); |
428 | else | 471 | else |
@@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
438 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | 481 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); |
439 | 482 | ||
440 | /** | 483 | /** |
441 | * iwl_txq_ctx_reset - Reset TX queue context | 484 | * iwl_txq_ctx_alloc - allocate TX queue context |
442 | * Destroys all DMA structures and initialize them again | 485 | * Allocate all Tx DMA structures and initialize them |
443 | * | 486 | * |
444 | * @param priv | 487 | * @param priv |
445 | * @return error code | 488 | * @return error code |
446 | */ | 489 | */ |
447 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | 490 | int iwl_txq_ctx_alloc(struct iwl_priv *priv) |
448 | { | 491 | { |
449 | int ret = 0; | 492 | int ret; |
450 | int txq_id, slots_num; | 493 | int txq_id, slots_num; |
451 | unsigned long flags; | 494 | unsigned long flags; |
452 | 495 | ||
@@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
504 | return ret; | 547 | return ret; |
505 | } | 548 | } |
506 | 549 | ||
550 | void iwl_txq_ctx_reset(struct iwl_priv *priv) | ||
551 | { | ||
552 | int txq_id, slots_num; | ||
553 | unsigned long flags; | ||
554 | |||
555 | spin_lock_irqsave(&priv->lock, flags); | ||
556 | |||
557 | /* Turn off all Tx DMA fifos */ | ||
558 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
559 | |||
560 | /* Tell NIC where to find the "keep warm" buffer */ | ||
561 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
562 | |||
563 | spin_unlock_irqrestore(&priv->lock, flags); | ||
564 | |||
565 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
566 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
567 | slots_num = txq_id == IWL_CMD_QUEUE_NUM ? | ||
568 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
569 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | ||
570 | } | ||
571 | } | ||
572 | |||
507 | /** | 573 | /** |
508 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | 574 | * iwl_txq_ctx_stop - Stop all Tx DMA channels |
509 | */ | 575 | */ |
510 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | 576 | void iwl_txq_ctx_stop(struct iwl_priv *priv) |
511 | { | 577 | { |
@@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv) | |||
525 | 1000); | 591 | 1000); |
526 | } | 592 | } |
527 | spin_unlock_irqrestore(&priv->lock, flags); | 593 | spin_unlock_irqrestore(&priv->lock, flags); |
528 | |||
529 | /* Deallocate memory for all Tx queues */ | ||
530 | iwl_hw_txq_ctx_free(priv); | ||
531 | } | 594 | } |
532 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | 595 | EXPORT_SYMBOL(iwl_txq_ctx_stop); |
533 | 596 | ||
@@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1050 | 1113 | ||
1051 | spin_lock_irqsave(&priv->hcmd_lock, flags); | 1114 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
1052 | 1115 | ||
1116 | /* If this is a huge cmd, mark the huge flag also on the meta.flags | ||
1117 | * of the _original_ cmd. This is used for DMA mapping clean up. | ||
1118 | */ | ||
1119 | if (cmd->flags & CMD_SIZE_HUGE) { | ||
1120 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
1121 | txq->meta[idx].flags = CMD_SIZE_HUGE; | ||
1122 | } | ||
1123 | |||
1053 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 1124 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
1054 | out_cmd = txq->cmd[idx]; | 1125 | out_cmd = txq->cmd[idx]; |
1055 | out_meta = &txq->meta[idx]; | 1126 | out_meta = &txq->meta[idx]; |
@@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1227 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); | 1298 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
1228 | struct iwl_device_cmd *cmd; | 1299 | struct iwl_device_cmd *cmd; |
1229 | struct iwl_cmd_meta *meta; | 1300 | struct iwl_cmd_meta *meta; |
1301 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
1230 | 1302 | ||
1231 | /* If a Tx command is being handled and it isn't in the actual | 1303 | /* If a Tx command is being handled and it isn't in the actual |
1232 | * command queue then there a command routing bug has been introduced | 1304 | * command queue then there a command routing bug has been introduced |
@@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1240 | return; | 1312 | return; |
1241 | } | 1313 | } |
1242 | 1314 | ||
1243 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | 1315 | /* If this is a huge cmd, clear the huge flag on the meta.flags |
1244 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | 1316 | * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap |
1245 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; | 1317 | * the DMA buffer for the scan (huge) command. |
1318 | */ | ||
1319 | if (huge) { | ||
1320 | cmd_index = get_cmd_index(&txq->q, index, 0); | ||
1321 | txq->meta[cmd_index].flags = 0; | ||
1322 | } | ||
1323 | cmd_index = get_cmd_index(&txq->q, index, huge); | ||
1324 | cmd = txq->cmd[cmd_index]; | ||
1325 | meta = &txq->meta[cmd_index]; | ||
1246 | 1326 | ||
1247 | pci_unmap_single(priv->pci_dev, | 1327 | pci_unmap_single(priv->pci_dev, |
1248 | pci_unmap_addr(meta, mapping), | 1328 | pci_unmap_addr(meta, mapping), |
@@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1264 | get_cmd_string(cmd->hdr.cmd)); | 1344 | get_cmd_string(cmd->hdr.cmd)); |
1265 | wake_up_interruptible(&priv->wait_command_queue); | 1345 | wake_up_interruptible(&priv->wait_command_queue); |
1266 | } | 1346 | } |
1347 | meta->flags = 0; | ||
1267 | } | 1348 | } |
1268 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | 1349 | EXPORT_SYMBOL(iwl_tx_cmd_complete); |
1269 | 1350 | ||