aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2011-07-08 11:46:10 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-07-11 15:02:00 -0400
commit1359ca4f305a1680ea6a1347a43bea76c352097c (patch)
treebcd25bd2b13668ea5ed95c15a6bb5593bcac5362 /drivers/net/wireless/iwlwifi/iwl-tx.c
parentafaf6b5742f85bab46232faae97fdd1493061173 (diff)
iwlagn: add an API to free the TX context
Tx free functions move to the transport layer. Unify the functions that deal with tx queues and cmd queue. Since the CMD queue is not fully allocated, but uses the q->n_bd / q->window trick, the release flow of TX queue and CMD queue was different. iwlagn_txq_free_tfd receives now the index of the TFD to be freed, which allows to unify the release flow for all the queues. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c108
1 files changed, 6 insertions, 102 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index dc8f63f81cfa..36b643a385be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -157,14 +157,15 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
157 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 157 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
158 * @priv - driver private data 158 * @priv - driver private data
159 * @txq - tx queue 159 * @txq - tx queue
160 * @index - the index of the TFD to be freed
160 * 161 *
161 * Does NOT advance any TFD circular buffer read/write indexes 162 * Does NOT advance any TFD circular buffer read/write indexes
162 * Does NOT free the TFD itself (which is within circular buffer) 163 * Does NOT free the TFD itself (which is within circular buffer)
163 */ 164 */
164void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) 165void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
166 int index)
165{ 167{
166 struct iwl_tfd *tfd_tmp = txq->tfds; 168 struct iwl_tfd *tfd_tmp = txq->tfds;
167 int index = txq->q.read_ptr;
168 169
169 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index], 170 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
170 DMA_TO_DEVICE); 171 DMA_TO_DEVICE);
@@ -173,12 +174,12 @@ void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
173 if (txq->txb) { 174 if (txq->txb) {
174 struct sk_buff *skb; 175 struct sk_buff *skb;
175 176
176 skb = txq->txb[txq->q.read_ptr].skb; 177 skb = txq->txb[index].skb;
177 178
178 /* can be called from irqs-disabled context */ 179 /* can be called from irqs-disabled context */
179 if (skb) { 180 if (skb) {
180 dev_kfree_skb_any(skb); 181 dev_kfree_skb_any(skb);
181 txq->txb[txq->q.read_ptr].skb = NULL; 182 txq->txb[index].skb = NULL;
182 } 183 }
183 } 184 }
184} 185}
@@ -232,108 +233,11 @@ void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
232 return; 233 return;
233 234
234 while (q->write_ptr != q->read_ptr) { 235 while (q->write_ptr != q->read_ptr) {
235 iwlagn_txq_free_tfd(priv, txq); 236 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
236 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 237 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
237 } 238 }
238} 239}
239 240
240/**
241 * iwl_tx_queue_free - Deallocate DMA queue.
242 * @txq: Transmit queue to deallocate.
243 *
244 * Empty queue by removing and destroying all BD's.
245 * Free all buffers.
246 * 0-fill, but do not free "txq" descriptor structure.
247 */
248void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
249{
250 struct iwl_tx_queue *txq = &priv->txq[txq_id];
251 struct device *dev = priv->bus.dev;
252 int i;
253
254 iwl_tx_queue_unmap(priv, txq_id);
255
256 /* De-alloc array of command/tx buffers */
257 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
258 kfree(txq->cmd[i]);
259
260 /* De-alloc circular buffer of TFDs */
261 if (txq->q.n_bd)
262 dma_free_coherent(dev, priv->hw_params.tfd_size *
263 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
264
265 /* De-alloc array of per-TFD driver data */
266 kfree(txq->txb);
267 txq->txb = NULL;
268
269 /* deallocate arrays */
270 kfree(txq->cmd);
271 kfree(txq->meta);
272 txq->cmd = NULL;
273 txq->meta = NULL;
274
275 /* 0-fill queue descriptor structure */
276 memset(txq, 0, sizeof(*txq));
277}
278
279/**
280 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
281 */
282void iwl_cmd_queue_unmap(struct iwl_priv *priv)
283{
284 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
285 struct iwl_queue *q = &txq->q;
286 int i;
287
288 if (q->n_bd == 0)
289 return;
290
291 while (q->read_ptr != q->write_ptr) {
292 i = get_cmd_index(q, q->read_ptr);
293
294 iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
295 DMA_BIDIRECTIONAL);
296 txq->meta[i].flags = 0;
297
298 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
299 }
300}
301
302/**
303 * iwl_cmd_queue_free - Deallocate DMA queue.
304 * @txq: Transmit queue to deallocate.
305 *
306 * Empty queue by removing and destroying all BD's.
307 * Free all buffers.
308 * 0-fill, but do not free "txq" descriptor structure.
309 */
310void iwl_cmd_queue_free(struct iwl_priv *priv)
311{
312 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
313 struct device *dev = priv->bus.dev;
314 int i;
315
316 iwl_cmd_queue_unmap(priv);
317
318 /* De-alloc array of command/tx buffers */
319 for (i = 0; i < TFD_CMD_SLOTS; i++)
320 kfree(txq->cmd[i]);
321
322 /* De-alloc circular buffer of TFDs */
323 if (txq->q.n_bd)
324 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
325 txq->tfds, txq->q.dma_addr);
326
327 /* deallocate arrays */
328 kfree(txq->cmd);
329 kfree(txq->meta);
330 txq->cmd = NULL;
331 txq->meta = NULL;
332
333 /* 0-fill queue descriptor structure */
334 memset(txq, 0, sizeof(*txq));
335}
336
337/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 241/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
338 * DMA services 242 * DMA services
339 * 243 *