aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/mvm/tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2012-12-27 15:43:48 -0500
committerJohannes Berg <johannes.berg@intel.com>2013-02-05 08:39:12 -0500
commit2bfb50924c7e92362ac937aef2ab56bc7bd3ca52 (patch)
tree26518070c19eb9cfb38ab48cda31fe16c1235433 /drivers/net/wireless/iwlwifi/mvm/tx.c
parentc9f7a8ab7792b48259af6e94706a5d02dd74caef (diff)
iwlwifi: use threaded interrupt handler
With new transports coming up, move to threaded interrupt handling now. This has the advantage that we can use the same locking scheme with all different transports we may need to implement. Note that the TX path obviously still runs in a tasklet, so some spin_lock() calls need to change to spin_lock_bh() calls to properly lock out the TX path. In my test on a Calpella platform this has no impact on throughput or latency. Also add lockdep annotations to avoid lockups due to catch sending synchronous commands or using locks that connect with them from the irq thread. Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/mvm/tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index cada8efe0cca..6b67ce3f679c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -620,7 +620,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
620 seq_ctl = le16_to_cpu(hdr->seq_ctrl); 620 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
621 } 621 }
622 622
623 ieee80211_tx_status(mvm->hw, skb); 623 ieee80211_tx_status_ni(mvm->hw, skb);
624 } 624 }
625 625
626 if (txq_id >= IWL_FIRST_AMPDU_QUEUE) { 626 if (txq_id >= IWL_FIRST_AMPDU_QUEUE) {
@@ -663,12 +663,12 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
663 struct iwl_mvm_tid_data *tid_data = 663 struct iwl_mvm_tid_data *tid_data =
664 &mvmsta->tid_data[tid]; 664 &mvmsta->tid_data[tid];
665 665
666 spin_lock(&mvmsta->lock); 666 spin_lock_bh(&mvmsta->lock);
667 tid_data->next_reclaimed = next_reclaimed; 667 tid_data->next_reclaimed = next_reclaimed;
668 IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", 668 IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
669 next_reclaimed); 669 next_reclaimed);
670 iwl_mvm_check_ratid_empty(mvm, sta, tid); 670 iwl_mvm_check_ratid_empty(mvm, sta, tid);
671 spin_unlock(&mvmsta->lock); 671 spin_unlock_bh(&mvmsta->lock);
672 } 672 }
673 673
674#ifdef CONFIG_PM_SLEEP 674#ifdef CONFIG_PM_SLEEP
@@ -832,7 +832,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
832 return 0; 832 return 0;
833 } 833 }
834 834
835 spin_lock(&mvmsta->lock); 835 spin_lock_bh(&mvmsta->lock);
836 836
837 __skb_queue_head_init(&reclaimed_skbs); 837 __skb_queue_head_init(&reclaimed_skbs);
838 838
@@ -886,13 +886,13 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
886 } 886 }
887 } 887 }
888 888
889 spin_unlock(&mvmsta->lock); 889 spin_unlock_bh(&mvmsta->lock);
890 890
891 rcu_read_unlock(); 891 rcu_read_unlock();
892 892
893 while (!skb_queue_empty(&reclaimed_skbs)) { 893 while (!skb_queue_empty(&reclaimed_skbs)) {
894 skb = __skb_dequeue(&reclaimed_skbs); 894 skb = __skb_dequeue(&reclaimed_skbs);
895 ieee80211_tx_status(mvm->hw, skb); 895 ieee80211_tx_status_ni(mvm->hw, skb);
896 } 896 }
897 897
898 return 0; 898 return 0;