aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2012-11-14 16:32:57 -0500
committerJohannes Berg <johannes.berg@intel.com>2012-11-19 09:04:41 -0500
commitf6d497cdff2f4a8ee4387e2c01a1d107b5a25b02 (patch)
treeb36afcd9d76d796e5e780cde8203ebee75586346 /drivers/net/wireless/iwlwifi
parentf02831be962c7be68c72110fa779e916ab1a8cdd (diff)
iwlwifi: merge 2 functions in reclaim flow
One one just a wrapper of the second, squash them. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c53
1 files changed, 20 insertions, 33 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 4c03b8288c5..ba2a78c150b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -892,39 +892,45 @@ static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
892} 892}
893 893
894/* Frees buffers until index _not_ inclusive */ 894/* Frees buffers until index _not_ inclusive */
895static int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index, 895void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
896 struct sk_buff_head *skbs) 896 struct sk_buff_head *skbs)
897{ 897{
898 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 898 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
899 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 899 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
900 /* n_bd is usually 256 => n_bd - 1 = 0xff */
901 int tfd_num = ssn & (txq->q.n_bd - 1);
900 struct iwl_queue *q = &txq->q; 902 struct iwl_queue *q = &txq->q;
901 int last_to_free; 903 int last_to_free;
902 int freed = 0;
903 904
904 /* This function is not meant to release cmd queue*/ 905 /* This function is not meant to release cmd queue*/
905 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 906 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
906 return 0; 907 return;
907 908
908 lockdep_assert_held(&txq->lock); 909 spin_lock(&txq->lock);
910
911 if (txq->q.read_ptr == tfd_num)
912 goto out;
913
914 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
915 txq_id, txq->q.read_ptr, tfd_num, ssn);
909 916
910 /*Since we free until index _not_ inclusive, the one before index is 917 /*Since we free until index _not_ inclusive, the one before index is
911 * the last we will free. This one must be used */ 918 * the last we will free. This one must be used */
912 last_to_free = iwl_queue_dec_wrap(index, q->n_bd); 919 last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
913 920
914 if ((index >= q->n_bd) || 921 if (iwl_queue_used(q, last_to_free) == 0) {
915 (iwl_queue_used(q, last_to_free) == 0)) {
916 IWL_ERR(trans, 922 IWL_ERR(trans,
917 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 923 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
918 __func__, txq_id, last_to_free, q->n_bd, 924 __func__, txq_id, last_to_free, q->n_bd,
919 q->write_ptr, q->read_ptr); 925 q->write_ptr, q->read_ptr);
920 return 0; 926 goto out;
921 } 927 }
922 928
923 if (WARN_ON(!skb_queue_empty(skbs))) 929 if (WARN_ON(!skb_queue_empty(skbs)))
924 return 0; 930 goto out;
925 931
926 for (; 932 for (;
927 q->read_ptr != index; 933 q->read_ptr != tfd_num;
928 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 934 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
929 935
930 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) 936 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
@@ -937,32 +943,13 @@ static int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index,
937 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 943 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
938 944
939 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 945 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
940 freed++;
941 } 946 }
942 947
943 iwl_pcie_txq_progress(trans_pcie, txq); 948 iwl_pcie_txq_progress(trans_pcie, txq);
944 949
945 return freed; 950 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
946} 951 iwl_wake_queue(trans, txq);
947 952out:
948void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
949 struct sk_buff_head *skbs)
950{
951 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
952 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
953 /* n_bd is usually 256 => n_bd - 1 = 0xff */
954 int tfd_num = ssn & (txq->q.n_bd - 1);
955
956 spin_lock(&txq->lock);
957
958 if (txq->q.read_ptr != tfd_num) {
959 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
960 txq_id, txq->q.read_ptr, tfd_num, ssn);
961 iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
962 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
963 iwl_wake_queue(trans, txq);
964 }
965
966 spin_unlock(&txq->lock); 953 spin_unlock(&txq->lock);
967} 954}
968 955