aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c64
1 files changed, 33 insertions, 31 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 6eff3d4d0616..10701b8eef23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
60static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, 60static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
61 struct iwl_dma_ptr *ptr, size_t size) 61 struct iwl_dma_ptr *ptr, size_t size)
62{ 62{
63 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma); 63 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
64 GFP_KERNEL);
64 if (!ptr->addr) 65 if (!ptr->addr)
65 return -ENOMEM; 66 return -ENOMEM;
66 ptr->size = size; 67 ptr->size = size;
@@ -73,21 +74,20 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
73 if (unlikely(!ptr->addr)) 74 if (unlikely(!ptr->addr))
74 return; 75 return;
75 76
76 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma); 77 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
77 memset(ptr, 0, sizeof(*ptr)); 78 memset(ptr, 0, sizeof(*ptr));
78} 79}
79 80
80/** 81/**
81 * iwl_txq_update_write_ptr - Send new write index to hardware 82 * iwl_txq_update_write_ptr - Send new write index to hardware
82 */ 83 */
83int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) 84void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
84{ 85{
85 u32 reg = 0; 86 u32 reg = 0;
86 int ret = 0;
87 int txq_id = txq->q.id; 87 int txq_id = txq->q.id;
88 88
89 if (txq->need_update == 0) 89 if (txq->need_update == 0)
90 return ret; 90 return;
91 91
92 /* if we're trying to save power */ 92 /* if we're trying to save power */
93 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 93 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
@@ -101,7 +101,7 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
101 txq_id, reg); 101 txq_id, reg);
102 iwl_set_bit(priv, CSR_GP_CNTRL, 102 iwl_set_bit(priv, CSR_GP_CNTRL,
103 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 103 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
104 return ret; 104 return;
105 } 105 }
106 106
107 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 107 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
@@ -114,8 +114,6 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
114 txq->q.write_ptr | (txq_id << 8)); 114 txq->q.write_ptr | (txq_id << 8));
115 115
116 txq->need_update = 0; 116 txq->need_update = 0;
117
118 return ret;
119} 117}
120EXPORT_SYMBOL(iwl_txq_update_write_ptr); 118EXPORT_SYMBOL(iwl_txq_update_write_ptr);
121 119
@@ -146,7 +144,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
146{ 144{
147 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 145 struct iwl_tx_queue *txq = &priv->txq[txq_id];
148 struct iwl_queue *q = &txq->q; 146 struct iwl_queue *q = &txq->q;
149 struct pci_dev *dev = priv->pci_dev; 147 struct device *dev = &priv->pci_dev->dev;
150 int i; 148 int i;
151 149
152 if (q->n_bd == 0) 150 if (q->n_bd == 0)
@@ -163,8 +161,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
163 161
164 /* De-alloc circular buffer of TFDs */ 162 /* De-alloc circular buffer of TFDs */
165 if (txq->q.n_bd) 163 if (txq->q.n_bd)
166 pci_free_consistent(dev, priv->hw_params.tfd_size * 164 dma_free_coherent(dev, priv->hw_params.tfd_size *
167 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 165 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
168 166
169 /* De-alloc array of per-TFD driver data */ 167 /* De-alloc array of per-TFD driver data */
170 kfree(txq->txb); 168 kfree(txq->txb);
@@ -193,7 +191,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
193{ 191{
194 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 192 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
195 struct iwl_queue *q = &txq->q; 193 struct iwl_queue *q = &txq->q;
196 struct pci_dev *dev = priv->pci_dev; 194 struct device *dev = &priv->pci_dev->dev;
197 int i; 195 int i;
198 196
199 if (q->n_bd == 0) 197 if (q->n_bd == 0)
@@ -205,8 +203,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
205 203
206 /* De-alloc circular buffer of TFDs */ 204 /* De-alloc circular buffer of TFDs */
207 if (txq->q.n_bd) 205 if (txq->q.n_bd)
208 pci_free_consistent(dev, priv->hw_params.tfd_size * 206 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
209 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 207 txq->tfds, txq->q.dma_addr);
210 208
211 /* deallocate arrays */ 209 /* deallocate arrays */
212 kfree(txq->cmd); 210 kfree(txq->cmd);
@@ -297,7 +295,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
297static int iwl_tx_queue_alloc(struct iwl_priv *priv, 295static int iwl_tx_queue_alloc(struct iwl_priv *priv,
298 struct iwl_tx_queue *txq, u32 id) 296 struct iwl_tx_queue *txq, u32 id)
299{ 297{
300 struct pci_dev *dev = priv->pci_dev; 298 struct device *dev = &priv->pci_dev->dev;
301 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 299 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
302 300
303 /* Driver private data, only for Tx (not command) queues, 301 /* Driver private data, only for Tx (not command) queues,
@@ -316,8 +314,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
316 314
317 /* Circular buffer of transmit frame descriptors (TFDs), 315 /* Circular buffer of transmit frame descriptors (TFDs),
318 * shared with device */ 316 * shared with device */
319 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr); 317 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
320 318 GFP_KERNEL);
321 if (!txq->tfds) { 319 if (!txq->tfds) {
322 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); 320 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
323 goto error; 321 goto error;
@@ -745,7 +743,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
745 u8 tid = 0; 743 u8 tid = 0;
746 u8 *qc = NULL; 744 u8 *qc = NULL;
747 unsigned long flags; 745 unsigned long flags;
748 int ret;
749 746
750 spin_lock_irqsave(&priv->lock, flags); 747 spin_lock_irqsave(&priv->lock, flags);
751 if (iwl_is_rfkill(priv)) { 748 if (iwl_is_rfkill(priv)) {
@@ -820,8 +817,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
820 hdr->seq_ctrl |= cpu_to_le16(seq_number); 817 hdr->seq_ctrl |= cpu_to_le16(seq_number);
821 seq_number += 0x10; 818 seq_number += 0x10;
822 /* aggregation is on for this <sta,tid> */ 819 /* aggregation is on for this <sta,tid> */
823 if (info->flags & IEEE80211_TX_CTL_AMPDU) 820 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
821 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
824 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 822 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
823 }
825 } 824 }
826 825
827 txq = &priv->txq[txq_id]; 826 txq = &priv->txq[txq_id];
@@ -963,7 +962,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
963 962
964 /* Tell device the write index *just past* this latest filled TFD */ 963 /* Tell device the write index *just past* this latest filled TFD */
965 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 964 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
966 ret = iwl_txq_update_write_ptr(priv, txq); 965 iwl_txq_update_write_ptr(priv, txq);
967 spin_unlock_irqrestore(&priv->lock, flags); 966 spin_unlock_irqrestore(&priv->lock, flags);
968 967
969 /* 968 /*
@@ -977,9 +976,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
977 if (sta_priv && sta_priv->client) 976 if (sta_priv && sta_priv->client)
978 atomic_inc(&sta_priv->pending_frames); 977 atomic_inc(&sta_priv->pending_frames);
979 978
980 if (ret)
981 return ret;
982
983 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { 979 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
984 if (wait_write_ptr) { 980 if (wait_write_ptr) {
985 spin_lock_irqsave(&priv->lock, flags); 981 spin_lock_irqsave(&priv->lock, flags);
@@ -1018,7 +1014,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1018 struct iwl_cmd_meta *out_meta; 1014 struct iwl_cmd_meta *out_meta;
1019 dma_addr_t phys_addr; 1015 dma_addr_t phys_addr;
1020 unsigned long flags; 1016 unsigned long flags;
1021 int len, ret; 1017 int len;
1022 u32 idx; 1018 u32 idx;
1023 u16 fix_size; 1019 u16 fix_size;
1024 1020
@@ -1115,10 +1111,10 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1115 1111
1116 /* Increment and update queue's write index */ 1112 /* Increment and update queue's write index */
1117 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1113 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1118 ret = iwl_txq_update_write_ptr(priv, txq); 1114 iwl_txq_update_write_ptr(priv, txq);
1119 1115
1120 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 1116 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
1121 return ret ? ret : idx; 1117 return idx;
1122} 1118}
1123 1119
1124static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) 1120static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
@@ -1260,6 +1256,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1260 1256
1261 if (!(meta->flags & CMD_ASYNC)) { 1257 if (!(meta->flags & CMD_ASYNC)) {
1262 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1258 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1259 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
1260 get_cmd_string(cmd->hdr.cmd));
1263 wake_up_interruptible(&priv->wait_command_queue); 1261 wake_up_interruptible(&priv->wait_command_queue);
1264 } 1262 }
1265} 1263}
@@ -1346,7 +1344,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1346{ 1344{
1347 int tx_fifo_id, txq_id, sta_id, ssn = -1; 1345 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1348 struct iwl_tid_data *tid_data; 1346 struct iwl_tid_data *tid_data;
1349 int ret, write_ptr, read_ptr; 1347 int write_ptr, read_ptr;
1350 unsigned long flags; 1348 unsigned long flags;
1351 1349
1352 if (!ra) { 1350 if (!ra) {
@@ -1398,13 +1396,17 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1398 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1396 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1399 1397
1400 spin_lock_irqsave(&priv->lock, flags); 1398 spin_lock_irqsave(&priv->lock, flags);
1401 ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, 1399 /*
1400 * the only reason this call can fail is queue number out of range,
1401 * which can happen if uCode is reloaded and all the station
1402 * information are lost. if it is outside the range, there is no need
1403 * to deactivate the uCode queue, just return "success" to allow
1404 * mac80211 to clean up it own data.
1405 */
1406 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1402 tx_fifo_id); 1407 tx_fifo_id);
1403 spin_unlock_irqrestore(&priv->lock, flags); 1408 spin_unlock_irqrestore(&priv->lock, flags);
1404 1409
1405 if (ret)
1406 return ret;
1407
1408 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); 1410 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1409 1411
1410 return 0; 1412 return 0;