diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans.c | 1170 |
1 files changed, 1170 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c new file mode 100644 index 00000000000..32eb4fe0432 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-trans.c | |||
@@ -0,0 +1,1170 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
4 | * redistributing this file, you may do so under either license. | ||
5 | * | ||
6 | * GPL LICENSE SUMMARY | ||
7 | * | ||
8 | * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
22 | * USA | ||
23 | * | ||
24 | * The full GNU General Public License is included in this distribution | ||
25 | * in the file called LICENSE.GPL. | ||
26 | * | ||
27 | * Contact Information: | ||
28 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
29 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
30 | * | ||
31 | * BSD LICENSE | ||
32 | * | ||
33 | * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. | ||
34 | * All rights reserved. | ||
35 | * | ||
36 | * Redistribution and use in source and binary forms, with or without | ||
37 | * modification, are permitted provided that the following conditions | ||
38 | * are met: | ||
39 | * | ||
40 | * * Redistributions of source code must retain the above copyright | ||
41 | * notice, this list of conditions and the following disclaimer. | ||
42 | * * Redistributions in binary form must reproduce the above copyright | ||
43 | * notice, this list of conditions and the following disclaimer in | ||
44 | * the documentation and/or other materials provided with the | ||
45 | * distribution. | ||
46 | * * Neither the name Intel Corporation nor the names of its | ||
47 | * contributors may be used to endorse or promote products derived | ||
48 | * from this software without specific prior written permission. | ||
49 | * | ||
50 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
51 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
52 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
53 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
54 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
55 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
56 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
57 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
58 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
59 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
60 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
61 | * | ||
62 | *****************************************************************************/ | ||
63 | #include "iwl-dev.h" | ||
64 | #include "iwl-trans.h" | ||
65 | #include "iwl-core.h" | ||
66 | #include "iwl-helpers.h" | ||
67 | #include "iwl-trans-int-pcie.h" | ||
68 | /*TODO remove uneeded includes when the transport layer tx_free will be here */ | ||
69 | #include "iwl-agn.h" | ||
70 | #include "iwl-core.h" | ||
71 | |||
72 | static int iwl_trans_rx_alloc(struct iwl_priv *priv) | ||
73 | { | ||
74 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
75 | struct device *dev = priv->bus->dev; | ||
76 | |||
77 | memset(&priv->rxq, 0, sizeof(priv->rxq)); | ||
78 | |||
79 | spin_lock_init(&rxq->lock); | ||
80 | INIT_LIST_HEAD(&rxq->rx_free); | ||
81 | INIT_LIST_HEAD(&rxq->rx_used); | ||
82 | |||
83 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | ||
84 | return -EINVAL; | ||
85 | |||
86 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | ||
87 | rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
88 | &rxq->bd_dma, GFP_KERNEL); | ||
89 | if (!rxq->bd) | ||
90 | goto err_bd; | ||
91 | memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE); | ||
92 | |||
93 | /*Allocate the driver's pointer to receive buffer status */ | ||
94 | rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts), | ||
95 | &rxq->rb_stts_dma, GFP_KERNEL); | ||
96 | if (!rxq->rb_stts) | ||
97 | goto err_rb_stts; | ||
98 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); | ||
99 | |||
100 | return 0; | ||
101 | |||
102 | err_rb_stts: | ||
103 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
104 | rxq->bd, rxq->bd_dma); | ||
105 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | ||
106 | rxq->bd = NULL; | ||
107 | err_bd: | ||
108 | return -ENOMEM; | ||
109 | } | ||
110 | |||
111 | static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv) | ||
112 | { | ||
113 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
114 | int i; | ||
115 | |||
116 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
117 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
118 | /* In the reset function, these buffers may have been allocated | ||
119 | * to an SKB, so we need to unmap and free potential storage */ | ||
120 | if (rxq->pool[i].page != NULL) { | ||
121 | dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma, | ||
122 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
123 | DMA_FROM_DEVICE); | ||
124 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
125 | rxq->pool[i].page = NULL; | ||
126 | } | ||
127 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | static void iwl_trans_rx_hw_init(struct iwl_priv *priv, | ||
132 | struct iwl_rx_queue *rxq) | ||
133 | { | ||
134 | u32 rb_size; | ||
135 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
136 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | ||
137 | |||
138 | rb_timeout = RX_RB_TIMEOUT; | ||
139 | |||
140 | if (iwlagn_mod_params.amsdu_size_8K) | ||
141 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
142 | else | ||
143 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
144 | |||
145 | /* Stop Rx DMA */ | ||
146 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
147 | |||
148 | /* Reset driver's Rx queue write index */ | ||
149 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
150 | |||
151 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
152 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
153 | (u32)(rxq->bd_dma >> 8)); | ||
154 | |||
155 | /* Tell device where in DRAM to update its Rx status */ | ||
156 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
157 | rxq->rb_stts_dma >> 4); | ||
158 | |||
159 | /* Enable Rx DMA | ||
160 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
161 | * the credit mechanism in 5000 HW RX FIFO | ||
162 | * Direct rx interrupts to hosts | ||
163 | * Rx buffer size 4 or 8k | ||
164 | * RB timeout 0x10 | ||
165 | * 256 RBDs | ||
166 | */ | ||
167 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
168 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
169 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
170 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
171 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | ||
172 | rb_size| | ||
173 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
174 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
175 | |||
176 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
177 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
178 | } | ||
179 | |||
180 | static int iwl_rx_init(struct iwl_priv *priv) | ||
181 | { | ||
182 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
183 | int i, err; | ||
184 | unsigned long flags; | ||
185 | |||
186 | if (!rxq->bd) { | ||
187 | err = iwl_trans_rx_alloc(priv); | ||
188 | if (err) | ||
189 | return err; | ||
190 | } | ||
191 | |||
192 | spin_lock_irqsave(&rxq->lock, flags); | ||
193 | INIT_LIST_HEAD(&rxq->rx_free); | ||
194 | INIT_LIST_HEAD(&rxq->rx_used); | ||
195 | |||
196 | iwl_trans_rxq_free_rx_bufs(priv); | ||
197 | |||
198 | for (i = 0; i < RX_QUEUE_SIZE; i++) | ||
199 | rxq->queue[i] = NULL; | ||
200 | |||
201 | /* Set us so that we have processed and used all buffers, but have | ||
202 | * not restocked the Rx queue with fresh buffers */ | ||
203 | rxq->read = rxq->write = 0; | ||
204 | rxq->write_actual = 0; | ||
205 | rxq->free_count = 0; | ||
206 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
207 | |||
208 | iwlagn_rx_replenish(priv); | ||
209 | |||
210 | iwl_trans_rx_hw_init(priv, rxq); | ||
211 | |||
212 | spin_lock_irqsave(&priv->lock, flags); | ||
213 | rxq->need_update = 1; | ||
214 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
215 | spin_unlock_irqrestore(&priv->lock, flags); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static void iwl_trans_rx_free(struct iwl_priv *priv) | ||
221 | { | ||
222 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
223 | unsigned long flags; | ||
224 | |||
225 | /*if rxq->bd is NULL, it means that nothing has been allocated, | ||
226 | * exit now */ | ||
227 | if (!rxq->bd) { | ||
228 | IWL_DEBUG_INFO(priv, "Free NULL rx context\n"); | ||
229 | return; | ||
230 | } | ||
231 | |||
232 | spin_lock_irqsave(&rxq->lock, flags); | ||
233 | iwl_trans_rxq_free_rx_bufs(priv); | ||
234 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
235 | |||
236 | dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
237 | rxq->bd, rxq->bd_dma); | ||
238 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | ||
239 | rxq->bd = NULL; | ||
240 | |||
241 | if (rxq->rb_stts) | ||
242 | dma_free_coherent(priv->bus->dev, | ||
243 | sizeof(struct iwl_rb_status), | ||
244 | rxq->rb_stts, rxq->rb_stts_dma); | ||
245 | else | ||
246 | IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n"); | ||
247 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); | ||
248 | rxq->rb_stts = NULL; | ||
249 | } | ||
250 | |||
251 | static int iwl_trans_rx_stop(struct iwl_priv *priv) | ||
252 | { | ||
253 | |||
254 | /* stop Rx DMA */ | ||
255 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
256 | return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | ||
257 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | ||
258 | } | ||
259 | |||
260 | static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, | ||
261 | struct iwl_dma_ptr *ptr, size_t size) | ||
262 | { | ||
263 | if (WARN_ON(ptr->addr)) | ||
264 | return -EINVAL; | ||
265 | |||
266 | ptr->addr = dma_alloc_coherent(priv->bus->dev, size, | ||
267 | &ptr->dma, GFP_KERNEL); | ||
268 | if (!ptr->addr) | ||
269 | return -ENOMEM; | ||
270 | ptr->size = size; | ||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, | ||
275 | struct iwl_dma_ptr *ptr) | ||
276 | { | ||
277 | if (unlikely(!ptr->addr)) | ||
278 | return; | ||
279 | |||
280 | dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma); | ||
281 | memset(ptr, 0, sizeof(*ptr)); | ||
282 | } | ||
283 | |||
284 | static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
285 | int slots_num, u32 txq_id) | ||
286 | { | ||
287 | size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; | ||
288 | int i; | ||
289 | |||
290 | if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds)) | ||
291 | return -EINVAL; | ||
292 | |||
293 | txq->q.n_window = slots_num; | ||
294 | |||
295 | txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num, | ||
296 | GFP_KERNEL); | ||
297 | txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num, | ||
298 | GFP_KERNEL); | ||
299 | |||
300 | if (!txq->meta || !txq->cmd) | ||
301 | goto error; | ||
302 | |||
303 | for (i = 0; i < slots_num; i++) { | ||
304 | txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), | ||
305 | GFP_KERNEL); | ||
306 | if (!txq->cmd[i]) | ||
307 | goto error; | ||
308 | } | ||
309 | |||
310 | /* Alloc driver data array and TFD circular buffer */ | ||
311 | /* Driver private data, only for Tx (not command) queues, | ||
312 | * not shared with device. */ | ||
313 | if (txq_id != priv->cmd_queue) { | ||
314 | txq->txb = kzalloc(sizeof(txq->txb[0]) * | ||
315 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | ||
316 | if (!txq->txb) { | ||
317 | IWL_ERR(priv, "kmalloc for auxiliary BD " | ||
318 | "structures failed\n"); | ||
319 | goto error; | ||
320 | } | ||
321 | } else { | ||
322 | txq->txb = NULL; | ||
323 | } | ||
324 | |||
325 | /* Circular buffer of transmit frame descriptors (TFDs), | ||
326 | * shared with device */ | ||
327 | txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr, | ||
328 | GFP_KERNEL); | ||
329 | if (!txq->tfds) { | ||
330 | IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); | ||
331 | goto error; | ||
332 | } | ||
333 | txq->q.id = txq_id; | ||
334 | |||
335 | return 0; | ||
336 | error: | ||
337 | kfree(txq->txb); | ||
338 | txq->txb = NULL; | ||
339 | /* since txq->cmd has been zeroed, | ||
340 | * all non allocated cmd[i] will be NULL */ | ||
341 | if (txq->cmd) | ||
342 | for (i = 0; i < slots_num; i++) | ||
343 | kfree(txq->cmd[i]); | ||
344 | kfree(txq->meta); | ||
345 | kfree(txq->cmd); | ||
346 | txq->meta = NULL; | ||
347 | txq->cmd = NULL; | ||
348 | |||
349 | return -ENOMEM; | ||
350 | |||
351 | } | ||
352 | |||
353 | static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
354 | int slots_num, u32 txq_id) | ||
355 | { | ||
356 | int ret; | ||
357 | |||
358 | txq->need_update = 0; | ||
359 | memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num); | ||
360 | |||
361 | /* | ||
362 | * For the default queues 0-3, set up the swq_id | ||
363 | * already -- all others need to get one later | ||
364 | * (if they need one at all). | ||
365 | */ | ||
366 | if (txq_id < 4) | ||
367 | iwl_set_swq_id(txq, txq_id, txq_id); | ||
368 | |||
369 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | ||
370 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | ||
371 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | ||
372 | |||
373 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
374 | ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, | ||
375 | txq_id); | ||
376 | if (ret) | ||
377 | return ret; | ||
378 | |||
379 | /* | ||
380 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | ||
381 | * given Tx queue, and enable the DMA channel used for that queue. | ||
382 | * Circular buffer (TFD queue in DRAM) physical base address */ | ||
383 | iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), | ||
384 | txq->q.dma_addr >> 8); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | ||
391 | */ | ||
392 | static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) | ||
393 | { | ||
394 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
395 | struct iwl_queue *q = &txq->q; | ||
396 | |||
397 | if (!q->n_bd) | ||
398 | return; | ||
399 | |||
400 | while (q->write_ptr != q->read_ptr) { | ||
401 | /* The read_ptr needs to bound by q->n_window */ | ||
402 | iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr)); | ||
403 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * iwl_tx_queue_free - Deallocate DMA queue. | ||
409 | * @txq: Transmit queue to deallocate. | ||
410 | * | ||
411 | * Empty queue by removing and destroying all BD's. | ||
412 | * Free all buffers. | ||
413 | * 0-fill, but do not free "txq" descriptor structure. | ||
414 | */ | ||
415 | static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | ||
416 | { | ||
417 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
418 | struct device *dev = priv->bus->dev; | ||
419 | int i; | ||
420 | if (WARN_ON(!txq)) | ||
421 | return; | ||
422 | |||
423 | iwl_tx_queue_unmap(priv, txq_id); | ||
424 | |||
425 | /* De-alloc array of command/tx buffers */ | ||
426 | for (i = 0; i < txq->q.n_window; i++) | ||
427 | kfree(txq->cmd[i]); | ||
428 | |||
429 | /* De-alloc circular buffer of TFDs */ | ||
430 | if (txq->q.n_bd) { | ||
431 | dma_free_coherent(dev, priv->hw_params.tfd_size * | ||
432 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | ||
433 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | ||
434 | } | ||
435 | |||
436 | /* De-alloc array of per-TFD driver data */ | ||
437 | kfree(txq->txb); | ||
438 | txq->txb = NULL; | ||
439 | |||
440 | /* deallocate arrays */ | ||
441 | kfree(txq->cmd); | ||
442 | kfree(txq->meta); | ||
443 | txq->cmd = NULL; | ||
444 | txq->meta = NULL; | ||
445 | |||
446 | /* 0-fill queue descriptor structure */ | ||
447 | memset(txq, 0, sizeof(*txq)); | ||
448 | } | ||
449 | |||
450 | /** | ||
451 | * iwl_trans_tx_free - Free TXQ Context | ||
452 | * | ||
453 | * Destroy all TX DMA queues and structures | ||
454 | */ | ||
455 | static void iwl_trans_tx_free(struct iwl_priv *priv) | ||
456 | { | ||
457 | int txq_id; | ||
458 | |||
459 | /* Tx queues */ | ||
460 | if (priv->txq) { | ||
461 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
462 | iwl_tx_queue_free(priv, txq_id); | ||
463 | } | ||
464 | |||
465 | kfree(priv->txq); | ||
466 | priv->txq = NULL; | ||
467 | |||
468 | iwlagn_free_dma_ptr(priv, &priv->kw); | ||
469 | |||
470 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * iwl_trans_tx_alloc - allocate TX context | ||
475 | * Allocate all Tx DMA structures and initialize them | ||
476 | * | ||
477 | * @param priv | ||
478 | * @return error code | ||
479 | */ | ||
480 | static int iwl_trans_tx_alloc(struct iwl_priv *priv) | ||
481 | { | ||
482 | int ret; | ||
483 | int txq_id, slots_num; | ||
484 | |||
485 | /*It is not allowed to alloc twice, so warn when this happens. | ||
486 | * We cannot rely on the previous allocation, so free and fail */ | ||
487 | if (WARN_ON(priv->txq)) { | ||
488 | ret = -EINVAL; | ||
489 | goto error; | ||
490 | } | ||
491 | |||
492 | ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | ||
493 | priv->hw_params.scd_bc_tbls_size); | ||
494 | if (ret) { | ||
495 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | ||
496 | goto error; | ||
497 | } | ||
498 | |||
499 | /* Alloc keep-warm buffer */ | ||
500 | ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | ||
501 | if (ret) { | ||
502 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | ||
503 | goto error; | ||
504 | } | ||
505 | |||
506 | priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * | ||
507 | priv->cfg->base_params->num_of_queues, GFP_KERNEL); | ||
508 | if (!priv->txq) { | ||
509 | IWL_ERR(priv, "Not enough memory for txq\n"); | ||
510 | ret = ENOMEM; | ||
511 | goto error; | ||
512 | } | ||
513 | |||
514 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
515 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
516 | slots_num = (txq_id == priv->cmd_queue) ? | ||
517 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
518 | ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num, | ||
519 | txq_id); | ||
520 | if (ret) { | ||
521 | IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id); | ||
522 | goto error; | ||
523 | } | ||
524 | } | ||
525 | |||
526 | return 0; | ||
527 | |||
528 | error: | ||
529 | trans_tx_free(&priv->trans); | ||
530 | |||
531 | return ret; | ||
532 | } | ||
533 | static int iwl_tx_init(struct iwl_priv *priv) | ||
534 | { | ||
535 | int ret; | ||
536 | int txq_id, slots_num; | ||
537 | unsigned long flags; | ||
538 | bool alloc = false; | ||
539 | |||
540 | if (!priv->txq) { | ||
541 | ret = iwl_trans_tx_alloc(priv); | ||
542 | if (ret) | ||
543 | goto error; | ||
544 | alloc = true; | ||
545 | } | ||
546 | |||
547 | spin_lock_irqsave(&priv->lock, flags); | ||
548 | |||
549 | /* Turn off all Tx DMA fifos */ | ||
550 | iwl_write_prph(priv, SCD_TXFACT, 0); | ||
551 | |||
552 | /* Tell NIC where to find the "keep warm" buffer */ | ||
553 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
554 | |||
555 | spin_unlock_irqrestore(&priv->lock, flags); | ||
556 | |||
557 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
558 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
559 | slots_num = (txq_id == priv->cmd_queue) ? | ||
560 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
561 | ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, | ||
562 | txq_id); | ||
563 | if (ret) { | ||
564 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | ||
565 | goto error; | ||
566 | } | ||
567 | } | ||
568 | |||
569 | return 0; | ||
570 | error: | ||
571 | /*Upon error, free only if we allocated something */ | ||
572 | if (alloc) | ||
573 | trans_tx_free(&priv->trans); | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | static void iwl_set_pwr_vmain(struct iwl_priv *priv) | ||
578 | { | ||
579 | /* | ||
580 | * (for documentation purposes) | ||
581 | * to set power to V_AUX, do: | ||
582 | |||
583 | if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) | ||
584 | iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, | ||
585 | APMG_PS_CTRL_VAL_PWR_SRC_VAUX, | ||
586 | ~APMG_PS_CTRL_MSK_PWR_SRC); | ||
587 | */ | ||
588 | |||
589 | iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, | ||
590 | APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, | ||
591 | ~APMG_PS_CTRL_MSK_PWR_SRC); | ||
592 | } | ||
593 | |||
594 | static int iwl_nic_init(struct iwl_priv *priv) | ||
595 | { | ||
596 | unsigned long flags; | ||
597 | |||
598 | /* nic_init */ | ||
599 | spin_lock_irqsave(&priv->lock, flags); | ||
600 | iwl_apm_init(priv); | ||
601 | |||
602 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | ||
603 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | ||
604 | |||
605 | spin_unlock_irqrestore(&priv->lock, flags); | ||
606 | |||
607 | iwl_set_pwr_vmain(priv); | ||
608 | |||
609 | priv->cfg->lib->nic_config(priv); | ||
610 | |||
611 | /* Allocate the RX queue, or reset if it is already allocated */ | ||
612 | iwl_rx_init(priv); | ||
613 | |||
614 | /* Allocate or reset and init all Tx and Command queues */ | ||
615 | if (iwl_tx_init(priv)) | ||
616 | return -ENOMEM; | ||
617 | |||
618 | if (priv->cfg->base_params->shadow_reg_enable) { | ||
619 | /* enable shadow regs in HW */ | ||
620 | iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL, | ||
621 | 0x800FFFFF); | ||
622 | } | ||
623 | |||
624 | set_bit(STATUS_INIT, &priv->status); | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | |||
629 | #define HW_READY_TIMEOUT (50) | ||
630 | |||
631 | /* Note: returns poll_bit return value, which is >= 0 if success */ | ||
632 | static int iwl_set_hw_ready(struct iwl_priv *priv) | ||
633 | { | ||
634 | int ret; | ||
635 | |||
636 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, | ||
637 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); | ||
638 | |||
639 | /* See if we got it */ | ||
640 | ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, | ||
641 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | ||
642 | CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, | ||
643 | HW_READY_TIMEOUT); | ||
644 | |||
645 | IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : ""); | ||
646 | return ret; | ||
647 | } | ||
648 | |||
649 | /* Note: returns standard 0/-ERROR code */ | ||
650 | static int iwl_trans_prepare_card_hw(struct iwl_priv *priv) | ||
651 | { | ||
652 | int ret; | ||
653 | |||
654 | IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n"); | ||
655 | |||
656 | ret = iwl_set_hw_ready(priv); | ||
657 | if (ret >= 0) | ||
658 | return 0; | ||
659 | |||
660 | /* If HW is not ready, prepare the conditions to check again */ | ||
661 | iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, | ||
662 | CSR_HW_IF_CONFIG_REG_PREPARE); | ||
663 | |||
664 | ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, | ||
665 | ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, | ||
666 | CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); | ||
667 | |||
668 | if (ret < 0) | ||
669 | return ret; | ||
670 | |||
671 | /* HW should be ready by now, check again. */ | ||
672 | ret = iwl_set_hw_ready(priv); | ||
673 | if (ret >= 0) | ||
674 | return 0; | ||
675 | return ret; | ||
676 | } | ||
677 | |||
678 | static int iwl_trans_start_device(struct iwl_priv *priv) | ||
679 | { | ||
680 | int ret; | ||
681 | |||
682 | priv->ucode_owner = IWL_OWNERSHIP_DRIVER; | ||
683 | |||
684 | if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) && | ||
685 | iwl_trans_prepare_card_hw(priv)) { | ||
686 | IWL_WARN(priv, "Exit HW not ready\n"); | ||
687 | return -EIO; | ||
688 | } | ||
689 | |||
690 | /* If platform's RF_KILL switch is NOT set to KILL */ | ||
691 | if (iwl_read32(priv, CSR_GP_CNTRL) & | ||
692 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | ||
693 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
694 | else | ||
695 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
696 | |||
697 | if (iwl_is_rfkill(priv)) { | ||
698 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); | ||
699 | iwl_enable_interrupts(priv); | ||
700 | return -ERFKILL; | ||
701 | } | ||
702 | |||
703 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
704 | |||
705 | ret = iwl_nic_init(priv); | ||
706 | if (ret) { | ||
707 | IWL_ERR(priv, "Unable to init nic\n"); | ||
708 | return ret; | ||
709 | } | ||
710 | |||
711 | /* make sure rfkill handshake bits are cleared */ | ||
712 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
713 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | ||
714 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
715 | |||
716 | /* clear (again), then enable host interrupts */ | ||
717 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
718 | iwl_enable_interrupts(priv); | ||
719 | |||
720 | /* really make sure rfkill handshake bits are cleared */ | ||
721 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
722 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
723 | |||
724 | return 0; | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | ||
729 | * must be called under priv->lock and mac access | ||
730 | */ | ||
731 | static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask) | ||
732 | { | ||
733 | iwl_write_prph(priv, SCD_TXFACT, mask); | ||
734 | } | ||
735 | |||
736 | #define IWL_AC_UNSET -1 | ||
737 | |||
738 | struct queue_to_fifo_ac { | ||
739 | s8 fifo, ac; | ||
740 | }; | ||
741 | |||
742 | static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = { | ||
743 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | ||
744 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | ||
745 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | ||
746 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | ||
747 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | ||
748 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | ||
749 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | ||
750 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | ||
751 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | ||
752 | { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, }, | ||
753 | }; | ||
754 | |||
755 | static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = { | ||
756 | { IWL_TX_FIFO_VO, IEEE80211_AC_VO, }, | ||
757 | { IWL_TX_FIFO_VI, IEEE80211_AC_VI, }, | ||
758 | { IWL_TX_FIFO_BE, IEEE80211_AC_BE, }, | ||
759 | { IWL_TX_FIFO_BK, IEEE80211_AC_BK, }, | ||
760 | { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, }, | ||
761 | { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, }, | ||
762 | { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, }, | ||
763 | { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, }, | ||
764 | { IWL_TX_FIFO_BE_IPAN, 2, }, | ||
765 | { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, }, | ||
766 | }; | ||
767 | static void iwl_trans_tx_start(struct iwl_priv *priv) | ||
768 | { | ||
769 | const struct queue_to_fifo_ac *queue_to_fifo; | ||
770 | struct iwl_rxon_context *ctx; | ||
771 | u32 a; | ||
772 | unsigned long flags; | ||
773 | int i, chan; | ||
774 | u32 reg_val; | ||
775 | |||
776 | spin_lock_irqsave(&priv->lock, flags); | ||
777 | |||
778 | priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR); | ||
779 | a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; | ||
780 | /* reset conext data memory */ | ||
781 | for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; | ||
782 | a += 4) | ||
783 | iwl_write_targ_mem(priv, a, 0); | ||
784 | /* reset tx status memory */ | ||
785 | for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; | ||
786 | a += 4) | ||
787 | iwl_write_targ_mem(priv, a, 0); | ||
788 | for (; a < priv->scd_base_addr + | ||
789 | SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) | ||
790 | iwl_write_targ_mem(priv, a, 0); | ||
791 | |||
792 | iwl_write_prph(priv, SCD_DRAM_BASE_ADDR, | ||
793 | priv->scd_bc_tbls.dma >> 10); | ||
794 | |||
795 | /* Enable DMA channel */ | ||
796 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | ||
797 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | ||
798 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
799 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | ||
800 | |||
801 | /* Update FH chicken bits */ | ||
802 | reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); | ||
803 | iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, | ||
804 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | ||
805 | |||
806 | iwl_write_prph(priv, SCD_QUEUECHAIN_SEL, | ||
807 | SCD_QUEUECHAIN_SEL_ALL(priv)); | ||
808 | iwl_write_prph(priv, SCD_AGGR_SEL, 0); | ||
809 | |||
810 | /* initiate the queues */ | ||
811 | for (i = 0; i < priv->hw_params.max_txq_num; i++) { | ||
812 | iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0); | ||
813 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); | ||
814 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
815 | SCD_CONTEXT_QUEUE_OFFSET(i), 0); | ||
816 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
817 | SCD_CONTEXT_QUEUE_OFFSET(i) + | ||
818 | sizeof(u32), | ||
819 | ((SCD_WIN_SIZE << | ||
820 | SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
821 | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
822 | ((SCD_FRAME_LIMIT << | ||
823 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
824 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
825 | } | ||
826 | |||
827 | iwl_write_prph(priv, SCD_INTERRUPT_MASK, | ||
828 | IWL_MASK(0, priv->hw_params.max_txq_num)); | ||
829 | |||
830 | /* Activate all Tx DMA/FIFO channels */ | ||
831 | iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7)); | ||
832 | |||
833 | /* map queues to FIFOs */ | ||
834 | if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) | ||
835 | queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo; | ||
836 | else | ||
837 | queue_to_fifo = iwlagn_default_queue_to_tx_fifo; | ||
838 | |||
839 | iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0); | ||
840 | |||
841 | /* make sure all queue are not stopped */ | ||
842 | memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); | ||
843 | for (i = 0; i < 4; i++) | ||
844 | atomic_set(&priv->queue_stop_count[i], 0); | ||
845 | for_each_context(priv, ctx) | ||
846 | ctx->last_tx_rejected = false; | ||
847 | |||
848 | /* reset to 0 to enable all the queue first */ | ||
849 | priv->txq_ctx_active_msk = 0; | ||
850 | |||
851 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10); | ||
852 | BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10); | ||
853 | |||
854 | for (i = 0; i < 10; i++) { | ||
855 | int fifo = queue_to_fifo[i].fifo; | ||
856 | int ac = queue_to_fifo[i].ac; | ||
857 | |||
858 | iwl_txq_ctx_activate(priv, i); | ||
859 | |||
860 | if (fifo == IWL_TX_FIFO_UNUSED) | ||
861 | continue; | ||
862 | |||
863 | if (ac != IWL_AC_UNSET) | ||
864 | iwl_set_swq_id(&priv->txq[i], ac, i); | ||
865 | iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0); | ||
866 | } | ||
867 | |||
868 | spin_unlock_irqrestore(&priv->lock, flags); | ||
869 | |||
870 | /* Enable L1-Active */ | ||
871 | iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG, | ||
872 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | ||
873 | } | ||
874 | |||
875 | /** | ||
876 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | ||
877 | */ | ||
878 | static int iwl_trans_tx_stop(struct iwl_priv *priv) | ||
879 | { | ||
880 | int ch, txq_id; | ||
881 | unsigned long flags; | ||
882 | |||
883 | /* Turn off all Tx DMA fifos */ | ||
884 | spin_lock_irqsave(&priv->lock, flags); | ||
885 | |||
886 | iwl_trans_txq_set_sched(priv, 0); | ||
887 | |||
888 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
889 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { | ||
890 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
891 | if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | ||
892 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | ||
893 | 1000)) | ||
894 | IWL_ERR(priv, "Failing on timeout while stopping" | ||
895 | " DMA channel %d [0x%08x]", ch, | ||
896 | iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); | ||
897 | } | ||
898 | spin_unlock_irqrestore(&priv->lock, flags); | ||
899 | |||
900 | if (!priv->txq) { | ||
901 | IWL_WARN(priv, "Stopping tx queues that aren't allocated..."); | ||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | /* Unmap DMA from host system and free skb's */ | ||
906 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
907 | iwl_tx_queue_unmap(priv, txq_id); | ||
908 | |||
909 | return 0; | ||
910 | } | ||
911 | |||
912 | static void iwl_trans_stop_device(struct iwl_priv *priv) | ||
913 | { | ||
914 | unsigned long flags; | ||
915 | |||
916 | /* stop and reset the on-board processor */ | ||
917 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | ||
918 | |||
919 | /* tell the device to stop sending interrupts */ | ||
920 | spin_lock_irqsave(&priv->lock, flags); | ||
921 | iwl_disable_interrupts(priv); | ||
922 | spin_unlock_irqrestore(&priv->lock, flags); | ||
923 | trans_sync_irq(&priv->trans); | ||
924 | |||
925 | /* device going down, Stop using ICT table */ | ||
926 | iwl_disable_ict(priv); | ||
927 | |||
928 | /* | ||
929 | * If a HW restart happens during firmware loading, | ||
930 | * then the firmware loading might call this function | ||
931 | * and later it might be called again due to the | ||
932 | * restart. So don't process again if the device is | ||
933 | * already dead. | ||
934 | */ | ||
935 | if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) { | ||
936 | iwl_trans_tx_stop(priv); | ||
937 | iwl_trans_rx_stop(priv); | ||
938 | |||
939 | /* Power-down device's busmaster DMA clocks */ | ||
940 | iwl_write_prph(priv, APMG_CLK_DIS_REG, | ||
941 | APMG_CLK_VAL_DMA_CLK_RQT); | ||
942 | udelay(5); | ||
943 | } | ||
944 | |||
945 | /* Make sure (redundant) we've released our request to stay awake */ | ||
946 | iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
947 | |||
948 | /* Stop the device, and put it in low power state */ | ||
949 | iwl_apm_stop(priv); | ||
950 | } | ||
951 | |||
952 | static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv, | ||
953 | int txq_id) | ||
954 | { | ||
955 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
956 | struct iwl_queue *q = &txq->q; | ||
957 | struct iwl_device_cmd *dev_cmd; | ||
958 | |||
959 | if (unlikely(iwl_queue_space(q) < q->high_mark)) | ||
960 | return NULL; | ||
961 | |||
962 | /* | ||
963 | * Set up the Tx-command (not MAC!) header. | ||
964 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
965 | * after Tx, uCode's Tx response will return this value so driver can | ||
966 | * locate the frame within the tx queue and do post-tx processing. | ||
967 | */ | ||
968 | dev_cmd = txq->cmd[q->write_ptr]; | ||
969 | memset(dev_cmd, 0, sizeof(*dev_cmd)); | ||
970 | dev_cmd->hdr.cmd = REPLY_TX; | ||
971 | dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
972 | INDEX_TO_SEQ(q->write_ptr))); | ||
973 | return &dev_cmd->cmd.tx; | ||
974 | } | ||
975 | |||
976 | static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb, | ||
977 | struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, | ||
978 | struct iwl_rxon_context *ctx) | ||
979 | { | ||
980 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
981 | struct iwl_queue *q = &txq->q; | ||
982 | struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr]; | ||
983 | struct iwl_cmd_meta *out_meta; | ||
984 | |||
985 | dma_addr_t phys_addr = 0; | ||
986 | dma_addr_t txcmd_phys; | ||
987 | dma_addr_t scratch_phys; | ||
988 | u16 len, firstlen, secondlen; | ||
989 | u8 wait_write_ptr = 0; | ||
990 | u8 hdr_len = ieee80211_hdrlen(fc); | ||
991 | |||
992 | /* Set up driver data for this TFD */ | ||
993 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
994 | txq->txb[q->write_ptr].skb = skb; | ||
995 | txq->txb[q->write_ptr].ctx = ctx; | ||
996 | |||
997 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
998 | out_meta = &txq->meta[q->write_ptr]; | ||
999 | |||
1000 | /* | ||
1001 | * Use the first empty entry in this queue's command buffer array | ||
1002 | * to contain the Tx command and MAC header concatenated together | ||
1003 | * (payload data will be in another buffer). | ||
1004 | * Size of this varies, due to varying MAC header length. | ||
1005 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
1006 | * of the MAC header (device reads on dword boundaries). | ||
1007 | * We'll tell device about this padding later. | ||
1008 | */ | ||
1009 | len = sizeof(struct iwl_tx_cmd) + | ||
1010 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
1011 | firstlen = (len + 3) & ~3; | ||
1012 | |||
1013 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
1014 | if (firstlen != len) | ||
1015 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
1016 | |||
1017 | /* Physical address of this Tx command's header (not MAC header!), | ||
1018 | * within command buffer array. */ | ||
1019 | txcmd_phys = dma_map_single(priv->bus->dev, | ||
1020 | &dev_cmd->hdr, firstlen, | ||
1021 | DMA_BIDIRECTIONAL); | ||
1022 | if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys))) | ||
1023 | return -1; | ||
1024 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
1025 | dma_unmap_len_set(out_meta, len, firstlen); | ||
1026 | |||
1027 | if (!ieee80211_has_morefrags(fc)) { | ||
1028 | txq->need_update = 1; | ||
1029 | } else { | ||
1030 | wait_write_ptr = 1; | ||
1031 | txq->need_update = 0; | ||
1032 | } | ||
1033 | |||
1034 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
1035 | * if any (802.11 null frames have no payload). */ | ||
1036 | secondlen = skb->len - hdr_len; | ||
1037 | if (secondlen > 0) { | ||
1038 | phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len, | ||
1039 | secondlen, DMA_TO_DEVICE); | ||
1040 | if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { | ||
1041 | dma_unmap_single(priv->bus->dev, | ||
1042 | dma_unmap_addr(out_meta, mapping), | ||
1043 | dma_unmap_len(out_meta, len), | ||
1044 | DMA_BIDIRECTIONAL); | ||
1045 | return -1; | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1049 | /* Attach buffers to TFD */ | ||
1050 | iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1); | ||
1051 | if (secondlen > 0) | ||
1052 | iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, | ||
1053 | secondlen, 0); | ||
1054 | |||
1055 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
1056 | offsetof(struct iwl_tx_cmd, scratch); | ||
1057 | |||
1058 | /* take back ownership of DMA buffer to enable update */ | ||
1059 | dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen, | ||
1060 | DMA_BIDIRECTIONAL); | ||
1061 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
1062 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
1063 | |||
1064 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", | ||
1065 | le16_to_cpu(dev_cmd->hdr.sequence)); | ||
1066 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
1067 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
1068 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
1069 | |||
1070 | /* Set up entry for this TFD in Tx byte-count array */ | ||
1071 | iwl_trans_txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len)); | ||
1072 | |||
1073 | dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen, | ||
1074 | DMA_BIDIRECTIONAL); | ||
1075 | |||
1076 | trace_iwlwifi_dev_tx(priv, | ||
1077 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
1078 | sizeof(struct iwl_tfd), | ||
1079 | &dev_cmd->hdr, firstlen, | ||
1080 | skb->data + hdr_len, secondlen); | ||
1081 | |||
1082 | /* Tell device the write index *just past* this latest filled TFD */ | ||
1083 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
1084 | iwl_txq_update_write_ptr(priv, txq); | ||
1085 | |||
1086 | /* | ||
1087 | * At this point the frame is "transmitted" successfully | ||
1088 | * and we will get a TX status notification eventually, | ||
1089 | * regardless of the value of ret. "ret" only indicates | ||
1090 | * whether or not we should update the write pointer. | ||
1091 | */ | ||
1092 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | ||
1093 | if (wait_write_ptr) { | ||
1094 | txq->need_update = 1; | ||
1095 | iwl_txq_update_write_ptr(priv, txq); | ||
1096 | } else { | ||
1097 | iwl_stop_queue(priv, txq); | ||
1098 | } | ||
1099 | } | ||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
1103 | static void iwl_trans_kick_nic(struct iwl_priv *priv) | ||
1104 | { | ||
1105 | /* Remove all resets to allow NIC to operate */ | ||
1106 | iwl_write32(priv, CSR_RESET, 0); | ||
1107 | } | ||
1108 | |||
1109 | static void iwl_trans_sync_irq(struct iwl_priv *priv) | ||
1110 | { | ||
1111 | /* wait to make sure we flush pending tasklet*/ | ||
1112 | synchronize_irq(priv->bus->irq); | ||
1113 | tasklet_kill(&priv->irq_tasklet); | ||
1114 | } | ||
1115 | |||
1116 | static void iwl_trans_free(struct iwl_priv *priv) | ||
1117 | { | ||
1118 | free_irq(priv->bus->irq, priv); | ||
1119 | iwl_free_isr_ict(priv); | ||
1120 | } | ||
1121 | |||
1122 | static const struct iwl_trans_ops trans_ops = { | ||
1123 | .start_device = iwl_trans_start_device, | ||
1124 | .prepare_card_hw = iwl_trans_prepare_card_hw, | ||
1125 | .stop_device = iwl_trans_stop_device, | ||
1126 | |||
1127 | .tx_start = iwl_trans_tx_start, | ||
1128 | |||
1129 | .rx_free = iwl_trans_rx_free, | ||
1130 | .tx_free = iwl_trans_tx_free, | ||
1131 | |||
1132 | .send_cmd = iwl_send_cmd, | ||
1133 | .send_cmd_pdu = iwl_send_cmd_pdu, | ||
1134 | |||
1135 | .get_tx_cmd = iwl_trans_get_tx_cmd, | ||
1136 | .tx = iwl_trans_tx, | ||
1137 | |||
1138 | .txq_agg_disable = iwl_trans_txq_agg_disable, | ||
1139 | .txq_agg_setup = iwl_trans_txq_agg_setup, | ||
1140 | |||
1141 | .kick_nic = iwl_trans_kick_nic, | ||
1142 | |||
1143 | .sync_irq = iwl_trans_sync_irq, | ||
1144 | .free = iwl_trans_free, | ||
1145 | }; | ||
1146 | |||
1147 | int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv) | ||
1148 | { | ||
1149 | int err; | ||
1150 | |||
1151 | priv->trans.ops = &trans_ops; | ||
1152 | priv->trans.priv = priv; | ||
1153 | |||
1154 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | ||
1155 | iwl_irq_tasklet, (unsigned long)priv); | ||
1156 | |||
1157 | iwl_alloc_isr_ict(priv); | ||
1158 | |||
1159 | err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED, | ||
1160 | DRV_NAME, priv); | ||
1161 | if (err) { | ||
1162 | IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq); | ||
1163 | iwl_free_isr_ict(priv); | ||
1164 | return err; | ||
1165 | } | ||
1166 | |||
1167 | INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); | ||
1168 | |||
1169 | return 0; | ||
1170 | } | ||