diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 969 |
1 files changed, 969 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c new file mode 100644 index 000000000000..6baf8deef519 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -0,0 +1,969 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | #include <linux/etherdevice.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/sched.h> | ||
32 | |||
33 | #include "iwl-debug.h" | ||
34 | #include "iwl-csr.h" | ||
35 | #include "iwl-prph.h" | ||
36 | #include "iwl-io.h" | ||
37 | #include "iwl-op-mode.h" | ||
38 | #include "internal.h" | ||
39 | /* FIXME: need to abstract out TX command (once we know what it looks like) */ | ||
40 | #include "dvm/commands.h" | ||
41 | |||
42 | #define IWL_TX_CRC_SIZE 4 | ||
43 | #define IWL_TX_DELIMITER_SIZE 4 | ||
44 | |||
45 | /** | ||
46 | * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | ||
47 | */ | ||
48 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | ||
49 | struct iwl_tx_queue *txq, | ||
50 | u16 byte_cnt) | ||
51 | { | ||
52 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; | ||
53 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
54 | int write_ptr = txq->q.write_ptr; | ||
55 | int txq_id = txq->q.id; | ||
56 | u8 sec_ctl = 0; | ||
57 | u8 sta_id = 0; | ||
58 | u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; | ||
59 | __le16 bc_ent; | ||
60 | struct iwl_tx_cmd *tx_cmd = | ||
61 | (void *) txq->entries[txq->q.write_ptr].cmd->payload; | ||
62 | |||
63 | scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | ||
64 | |||
65 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); | ||
66 | |||
67 | sta_id = tx_cmd->sta_id; | ||
68 | sec_ctl = tx_cmd->sec_ctl; | ||
69 | |||
70 | switch (sec_ctl & TX_CMD_SEC_MSK) { | ||
71 | case TX_CMD_SEC_CCM: | ||
72 | len += CCMP_MIC_LEN; | ||
73 | break; | ||
74 | case TX_CMD_SEC_TKIP: | ||
75 | len += TKIP_ICV_LEN; | ||
76 | break; | ||
77 | case TX_CMD_SEC_WEP: | ||
78 | len += WEP_IV_LEN + WEP_ICV_LEN; | ||
79 | break; | ||
80 | } | ||
81 | |||
82 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); | ||
83 | |||
84 | scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; | ||
85 | |||
86 | if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
87 | scd_bc_tbl[txq_id]. | ||
88 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * iwl_txq_update_write_ptr - Send new write index to hardware | ||
93 | */ | ||
94 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) | ||
95 | { | ||
96 | u32 reg = 0; | ||
97 | int txq_id = txq->q.id; | ||
98 | |||
99 | if (txq->need_update == 0) | ||
100 | return; | ||
101 | |||
102 | if (trans->cfg->base_params->shadow_reg_enable) { | ||
103 | /* shadow register enabled */ | ||
104 | iwl_write32(trans, HBUS_TARG_WRPTR, | ||
105 | txq->q.write_ptr | (txq_id << 8)); | ||
106 | } else { | ||
107 | struct iwl_trans_pcie *trans_pcie = | ||
108 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
109 | /* if we're trying to save power */ | ||
110 | if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { | ||
111 | /* wake up nic if it's powered down ... | ||
112 | * uCode will wake up, and interrupt us again, so next | ||
113 | * time we'll skip this part. */ | ||
114 | reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); | ||
115 | |||
116 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
117 | IWL_DEBUG_INFO(trans, | ||
118 | "Tx queue %d requesting wakeup," | ||
119 | " GP1 = 0x%x\n", txq_id, reg); | ||
120 | iwl_set_bit(trans, CSR_GP_CNTRL, | ||
121 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
122 | return; | ||
123 | } | ||
124 | |||
125 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | ||
126 | txq->q.write_ptr | (txq_id << 8)); | ||
127 | |||
128 | /* | ||
129 | * else not in power-save mode, | ||
130 | * uCode will never sleep when we're | ||
131 | * trying to tx (during RFKILL, we're not trying to tx). | ||
132 | */ | ||
133 | } else | ||
134 | iwl_write32(trans, HBUS_TARG_WRPTR, | ||
135 | txq->q.write_ptr | (txq_id << 8)); | ||
136 | } | ||
137 | txq->need_update = 0; | ||
138 | } | ||
139 | |||
140 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | ||
141 | { | ||
142 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
143 | |||
144 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | ||
145 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
146 | addr |= | ||
147 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; | ||
148 | |||
149 | return addr; | ||
150 | } | ||
151 | |||
152 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | ||
153 | { | ||
154 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
155 | |||
156 | return le16_to_cpu(tb->hi_n_len) >> 4; | ||
157 | } | ||
158 | |||
159 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | ||
160 | dma_addr_t addr, u16 len) | ||
161 | { | ||
162 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
163 | u16 hi_n_len = len << 4; | ||
164 | |||
165 | put_unaligned_le32(addr, &tb->lo); | ||
166 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
167 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | ||
168 | |||
169 | tb->hi_n_len = cpu_to_le16(hi_n_len); | ||
170 | |||
171 | tfd->num_tbs = idx + 1; | ||
172 | } | ||
173 | |||
174 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | ||
175 | { | ||
176 | return tfd->num_tbs & 0x1f; | ||
177 | } | ||
178 | |||
179 | static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | ||
180 | struct iwl_tfd *tfd, enum dma_data_direction dma_dir) | ||
181 | { | ||
182 | int i; | ||
183 | int num_tbs; | ||
184 | |||
185 | /* Sanity check on number of chunks */ | ||
186 | num_tbs = iwl_tfd_get_num_tbs(tfd); | ||
187 | |||
188 | if (num_tbs >= IWL_NUM_OF_TBS) { | ||
189 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | ||
190 | /* @todo issue fatal error, it is quite serious situation */ | ||
191 | return; | ||
192 | } | ||
193 | |||
194 | /* Unmap tx_cmd */ | ||
195 | if (num_tbs) | ||
196 | dma_unmap_single(trans->dev, | ||
197 | dma_unmap_addr(meta, mapping), | ||
198 | dma_unmap_len(meta, len), | ||
199 | DMA_BIDIRECTIONAL); | ||
200 | |||
201 | /* Unmap chunks, if any. */ | ||
202 | for (i = 1; i < num_tbs; i++) | ||
203 | dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), | ||
204 | iwl_tfd_tb_get_len(tfd, i), dma_dir); | ||
205 | |||
206 | tfd->num_tbs = 0; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | ||
211 | * @trans - transport private data | ||
212 | * @txq - tx queue | ||
213 | * @dma_dir - the direction of the DMA mapping | ||
214 | * | ||
215 | * Does NOT advance any TFD circular buffer read/write indexes | ||
216 | * Does NOT free the TFD itself (which is within circular buffer) | ||
217 | */ | ||
218 | void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | ||
219 | enum dma_data_direction dma_dir) | ||
220 | { | ||
221 | struct iwl_tfd *tfd_tmp = txq->tfds; | ||
222 | |||
223 | /* rd_ptr is bounded by n_bd and idx is bounded by n_window */ | ||
224 | int rd_ptr = txq->q.read_ptr; | ||
225 | int idx = get_cmd_index(&txq->q, rd_ptr); | ||
226 | |||
227 | lockdep_assert_held(&txq->lock); | ||
228 | |||
229 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ | ||
230 | iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], | ||
231 | dma_dir); | ||
232 | |||
233 | /* free SKB */ | ||
234 | if (txq->entries) { | ||
235 | struct sk_buff *skb; | ||
236 | |||
237 | skb = txq->entries[idx].skb; | ||
238 | |||
239 | /* Can be called from irqs-disabled context | ||
240 | * If skb is not NULL, it means that the whole queue is being | ||
241 | * freed and that the queue is not empty - free the skb | ||
242 | */ | ||
243 | if (skb) { | ||
244 | iwl_op_mode_free_skb(trans->op_mode, skb); | ||
245 | txq->entries[idx].skb = NULL; | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | ||
251 | struct iwl_tx_queue *txq, | ||
252 | dma_addr_t addr, u16 len, | ||
253 | u8 reset) | ||
254 | { | ||
255 | struct iwl_queue *q; | ||
256 | struct iwl_tfd *tfd, *tfd_tmp; | ||
257 | u32 num_tbs; | ||
258 | |||
259 | q = &txq->q; | ||
260 | tfd_tmp = txq->tfds; | ||
261 | tfd = &tfd_tmp[q->write_ptr]; | ||
262 | |||
263 | if (reset) | ||
264 | memset(tfd, 0, sizeof(*tfd)); | ||
265 | |||
266 | num_tbs = iwl_tfd_get_num_tbs(tfd); | ||
267 | |||
268 | /* Each TFD can point to a maximum 20 Tx buffers */ | ||
269 | if (num_tbs >= IWL_NUM_OF_TBS) { | ||
270 | IWL_ERR(trans, "Error can not send more than %d chunks\n", | ||
271 | IWL_NUM_OF_TBS); | ||
272 | return -EINVAL; | ||
273 | } | ||
274 | |||
275 | if (WARN_ON(addr & ~DMA_BIT_MASK(36))) | ||
276 | return -EINVAL; | ||
277 | |||
278 | if (unlikely(addr & ~IWL_TX_DMA_MASK)) | ||
279 | IWL_ERR(trans, "Unaligned address = %llx\n", | ||
280 | (unsigned long long)addr); | ||
281 | |||
282 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** | ||
288 | * DMA services | ||
289 | * | ||
290 | * Theory of operation | ||
291 | * | ||
292 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | ||
293 | * of buffer descriptors, each of which points to one or more data buffers for | ||
294 | * the device to read from or fill. Driver and device exchange status of each | ||
295 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | ||
296 | * entries in each circular buffer, to protect against confusing empty and full | ||
297 | * queue states. | ||
298 | * | ||
299 | * The device reads or writes the data in the queues via the device's several | ||
300 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | ||
301 | * | ||
302 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
303 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
304 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
305 | * Tx queue resumed. | ||
306 | * | ||
307 | ***************************************************/ | ||
308 | |||
309 | int iwl_queue_space(const struct iwl_queue *q) | ||
310 | { | ||
311 | int s = q->read_ptr - q->write_ptr; | ||
312 | |||
313 | if (q->read_ptr > q->write_ptr) | ||
314 | s -= q->n_bd; | ||
315 | |||
316 | if (s <= 0) | ||
317 | s += q->n_window; | ||
318 | /* keep some reserve to not confuse empty and full situations */ | ||
319 | s -= 2; | ||
320 | if (s < 0) | ||
321 | s = 0; | ||
322 | return s; | ||
323 | } | ||
324 | |||
325 | /** | ||
326 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | ||
327 | */ | ||
328 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | ||
329 | { | ||
330 | q->n_bd = count; | ||
331 | q->n_window = slots_num; | ||
332 | q->id = id; | ||
333 | |||
334 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | ||
335 | * and iwl_queue_dec_wrap are broken. */ | ||
336 | if (WARN_ON(!is_power_of_2(count))) | ||
337 | return -EINVAL; | ||
338 | |||
339 | /* slots_num must be power-of-two size, otherwise | ||
340 | * get_cmd_index is broken. */ | ||
341 | if (WARN_ON(!is_power_of_2(slots_num))) | ||
342 | return -EINVAL; | ||
343 | |||
344 | q->low_mark = q->n_window / 4; | ||
345 | if (q->low_mark < 4) | ||
346 | q->low_mark = 4; | ||
347 | |||
348 | q->high_mark = q->n_window / 8; | ||
349 | if (q->high_mark < 2) | ||
350 | q->high_mark = 2; | ||
351 | |||
352 | q->write_ptr = q->read_ptr = 0; | ||
353 | |||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, | ||
358 | struct iwl_tx_queue *txq) | ||
359 | { | ||
360 | struct iwl_trans_pcie *trans_pcie = | ||
361 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
362 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | ||
363 | int txq_id = txq->q.id; | ||
364 | int read_ptr = txq->q.read_ptr; | ||
365 | u8 sta_id = 0; | ||
366 | __le16 bc_ent; | ||
367 | struct iwl_tx_cmd *tx_cmd = | ||
368 | (void *)txq->entries[txq->q.read_ptr].cmd->payload; | ||
369 | |||
370 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | ||
371 | |||
372 | if (txq_id != trans_pcie->cmd_queue) | ||
373 | sta_id = tx_cmd->sta_id; | ||
374 | |||
375 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | ||
376 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
377 | |||
378 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
379 | scd_bc_tbl[txq_id]. | ||
380 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | ||
381 | } | ||
382 | |||
383 | static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | ||
384 | u16 txq_id) | ||
385 | { | ||
386 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
387 | u32 tbl_dw_addr; | ||
388 | u32 tbl_dw; | ||
389 | u16 scd_q2ratid; | ||
390 | |||
391 | scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; | ||
392 | |||
393 | tbl_dw_addr = trans_pcie->scd_base_addr + | ||
394 | SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); | ||
395 | |||
396 | tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr); | ||
397 | |||
398 | if (txq_id & 0x1) | ||
399 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | ||
400 | else | ||
401 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | ||
402 | |||
403 | iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw); | ||
404 | |||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) | ||
409 | { | ||
410 | /* Simply stop the queue, but don't change any configuration; | ||
411 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | ||
412 | iwl_write_prph(trans, | ||
413 | SCD_QUEUE_STATUS_BITS(txq_id), | ||
414 | (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| | ||
415 | (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | ||
416 | } | ||
417 | |||
418 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | ||
419 | int sta_id, int tid, int frame_limit, u16 ssn) | ||
420 | { | ||
421 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
422 | |||
423 | if (test_and_set_bit(txq_id, trans_pcie->queue_used)) | ||
424 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); | ||
425 | |||
426 | /* Stop this Tx queue before configuring it */ | ||
427 | iwl_txq_set_inactive(trans, txq_id); | ||
428 | |||
429 | /* Set this queue as a chain-building queue unless it is CMD queue */ | ||
430 | if (txq_id != trans_pcie->cmd_queue) | ||
431 | iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); | ||
432 | |||
433 | /* If this queue is mapped to a certain station: it is an AGG queue */ | ||
434 | if (sta_id != IWL_INVALID_STATION) { | ||
435 | u16 ra_tid = BUILD_RAxTID(sta_id, tid); | ||
436 | |||
437 | /* Map receiver-address / traffic-ID to this queue */ | ||
438 | iwl_txq_set_ratid_map(trans, ra_tid, txq_id); | ||
439 | |||
440 | /* enable aggregations for the queue */ | ||
441 | iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); | ||
442 | } else { | ||
443 | /* | ||
444 | * disable aggregations for the queue, this will also make the | ||
445 | * ra_tid mapping configuration irrelevant since it is now a | ||
446 | * non-AGG queue. | ||
447 | */ | ||
448 | iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); | ||
449 | } | ||
450 | |||
451 | /* Place first TFD at index corresponding to start sequence number. | ||
452 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | ||
453 | trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); | ||
454 | trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); | ||
455 | |||
456 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | ||
457 | (ssn & 0xff) | (txq_id << 8)); | ||
458 | iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); | ||
459 | |||
460 | /* Set up Tx window size and frame limit for this queue */ | ||
461 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + | ||
462 | SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); | ||
463 | iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + | ||
464 | SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), | ||
465 | ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
466 | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
467 | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
468 | SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
469 | |||
470 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | ||
471 | iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), | ||
472 | (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | | ||
473 | (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | | ||
474 | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | | ||
475 | SCD_QUEUE_STTS_REG_MSK); | ||
476 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", | ||
477 | txq_id, fifo, ssn & 0xff); | ||
478 | } | ||
479 | |||
480 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | ||
481 | { | ||
482 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
483 | u16 rd_ptr, wr_ptr; | ||
484 | int n_bd = trans_pcie->txq[txq_id].q.n_bd; | ||
485 | |||
486 | if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { | ||
487 | WARN_ONCE(1, "queue %d not used", txq_id); | ||
488 | return; | ||
489 | } | ||
490 | |||
491 | rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1); | ||
492 | wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)); | ||
493 | |||
494 | WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]", | ||
495 | txq_id, rd_ptr, wr_ptr); | ||
496 | |||
497 | iwl_txq_set_inactive(trans, txq_id); | ||
498 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); | ||
499 | } | ||
500 | |||
501 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | ||
502 | |||
503 | /** | ||
504 | * iwl_enqueue_hcmd - enqueue a uCode command | ||
505 | * @priv: device private data point | ||
506 | * @cmd: a point to the ucode command structure | ||
507 | * | ||
508 | * The function returns < 0 values to indicate the operation is | ||
509 | * failed. On success, it turns the index (> 0) of command in the | ||
510 | * command queue. | ||
511 | */ | ||
512 | static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | ||
513 | { | ||
514 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
515 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | ||
516 | struct iwl_queue *q = &txq->q; | ||
517 | struct iwl_device_cmd *out_cmd; | ||
518 | struct iwl_cmd_meta *out_meta; | ||
519 | dma_addr_t phys_addr; | ||
520 | u32 idx; | ||
521 | u16 copy_size, cmd_size; | ||
522 | bool had_nocopy = false; | ||
523 | int i; | ||
524 | u8 *cmd_dest; | ||
525 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
526 | const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; | ||
527 | int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; | ||
528 | int trace_idx; | ||
529 | #endif | ||
530 | |||
531 | copy_size = sizeof(out_cmd->hdr); | ||
532 | cmd_size = sizeof(out_cmd->hdr); | ||
533 | |||
534 | /* need one for the header if the first is NOCOPY */ | ||
535 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); | ||
536 | |||
537 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
538 | if (!cmd->len[i]) | ||
539 | continue; | ||
540 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { | ||
541 | had_nocopy = true; | ||
542 | } else { | ||
543 | /* NOCOPY must not be followed by normal! */ | ||
544 | if (WARN_ON(had_nocopy)) | ||
545 | return -EINVAL; | ||
546 | copy_size += cmd->len[i]; | ||
547 | } | ||
548 | cmd_size += cmd->len[i]; | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * If any of the command structures end up being larger than | ||
553 | * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically | ||
554 | * allocated into separate TFDs, then we will need to | ||
555 | * increase the size of the buffers. | ||
556 | */ | ||
557 | if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) | ||
558 | return -EINVAL; | ||
559 | |||
560 | spin_lock_bh(&txq->lock); | ||
561 | |||
562 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | ||
563 | spin_unlock_bh(&txq->lock); | ||
564 | |||
565 | IWL_ERR(trans, "No space in command queue\n"); | ||
566 | iwl_op_mode_cmd_queue_full(trans->op_mode); | ||
567 | return -ENOSPC; | ||
568 | } | ||
569 | |||
570 | idx = get_cmd_index(q, q->write_ptr); | ||
571 | out_cmd = txq->entries[idx].cmd; | ||
572 | out_meta = &txq->entries[idx].meta; | ||
573 | |||
574 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ | ||
575 | if (cmd->flags & CMD_WANT_SKB) | ||
576 | out_meta->source = cmd; | ||
577 | |||
578 | /* set up the header */ | ||
579 | |||
580 | out_cmd->hdr.cmd = cmd->id; | ||
581 | out_cmd->hdr.flags = 0; | ||
582 | out_cmd->hdr.sequence = | ||
583 | cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | | ||
584 | INDEX_TO_SEQ(q->write_ptr)); | ||
585 | |||
586 | /* and copy the data that needs to be copied */ | ||
587 | |||
588 | cmd_dest = out_cmd->payload; | ||
589 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
590 | if (!cmd->len[i]) | ||
591 | continue; | ||
592 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) | ||
593 | break; | ||
594 | memcpy(cmd_dest, cmd->data[i], cmd->len[i]); | ||
595 | cmd_dest += cmd->len[i]; | ||
596 | } | ||
597 | |||
598 | IWL_DEBUG_HC(trans, | ||
599 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", | ||
600 | trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), | ||
601 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | ||
602 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); | ||
603 | |||
604 | phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, | ||
605 | DMA_BIDIRECTIONAL); | ||
606 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { | ||
607 | idx = -ENOMEM; | ||
608 | goto out; | ||
609 | } | ||
610 | |||
611 | dma_unmap_addr_set(out_meta, mapping, phys_addr); | ||
612 | dma_unmap_len_set(out_meta, len, copy_size); | ||
613 | |||
614 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); | ||
615 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
616 | trace_bufs[0] = &out_cmd->hdr; | ||
617 | trace_lens[0] = copy_size; | ||
618 | trace_idx = 1; | ||
619 | #endif | ||
620 | |||
621 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
622 | if (!cmd->len[i]) | ||
623 | continue; | ||
624 | if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) | ||
625 | continue; | ||
626 | phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i], | ||
627 | cmd->len[i], DMA_BIDIRECTIONAL); | ||
628 | if (dma_mapping_error(trans->dev, phys_addr)) { | ||
629 | iwl_unmap_tfd(trans, out_meta, | ||
630 | &txq->tfds[q->write_ptr], | ||
631 | DMA_BIDIRECTIONAL); | ||
632 | idx = -ENOMEM; | ||
633 | goto out; | ||
634 | } | ||
635 | |||
636 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, | ||
637 | cmd->len[i], 0); | ||
638 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
639 | trace_bufs[trace_idx] = cmd->data[i]; | ||
640 | trace_lens[trace_idx] = cmd->len[i]; | ||
641 | trace_idx++; | ||
642 | #endif | ||
643 | } | ||
644 | |||
645 | out_meta->flags = cmd->flags; | ||
646 | |||
647 | txq->need_update = 1; | ||
648 | |||
649 | /* check that tracing gets all possible blocks */ | ||
650 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); | ||
651 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
652 | trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags, | ||
653 | trace_bufs[0], trace_lens[0], | ||
654 | trace_bufs[1], trace_lens[1], | ||
655 | trace_bufs[2], trace_lens[2]); | ||
656 | #endif | ||
657 | |||
658 | /* start timer if queue currently empty */ | ||
659 | if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) | ||
660 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
661 | |||
662 | /* Increment and update queue's write index */ | ||
663 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
664 | iwl_txq_update_write_ptr(trans, txq); | ||
665 | |||
666 | out: | ||
667 | spin_unlock_bh(&txq->lock); | ||
668 | return idx; | ||
669 | } | ||
670 | |||
671 | static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, | ||
672 | struct iwl_tx_queue *txq) | ||
673 | { | ||
674 | if (!trans_pcie->wd_timeout) | ||
675 | return; | ||
676 | |||
677 | /* | ||
678 | * if empty delete timer, otherwise move timer forward | ||
679 | * since we're making progress on this queue | ||
680 | */ | ||
681 | if (txq->q.read_ptr == txq->q.write_ptr) | ||
682 | del_timer(&txq->stuck_timer); | ||
683 | else | ||
684 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | ||
689 | * | ||
690 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
691 | * need to be reclaimed. As result, some free space forms. If there is | ||
692 | * enough free space (> low mark), wake the stack that feeds us. | ||
693 | */ | ||
694 | static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | ||
695 | int idx) | ||
696 | { | ||
697 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
698 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
699 | struct iwl_queue *q = &txq->q; | ||
700 | int nfreed = 0; | ||
701 | |||
702 | lockdep_assert_held(&txq->lock); | ||
703 | |||
704 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { | ||
705 | IWL_ERR(trans, | ||
706 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", | ||
707 | __func__, txq_id, idx, q->n_bd, | ||
708 | q->write_ptr, q->read_ptr); | ||
709 | return; | ||
710 | } | ||
711 | |||
712 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | ||
713 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
714 | |||
715 | if (nfreed++ > 0) { | ||
716 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", | ||
717 | idx, q->write_ptr, q->read_ptr); | ||
718 | iwl_op_mode_nic_error(trans->op_mode); | ||
719 | } | ||
720 | |||
721 | } | ||
722 | |||
723 | iwl_queue_progress(trans_pcie, txq); | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
728 | * @rxb: Rx buffer to reclaim | ||
729 | * @handler_status: return value of the handler of the command | ||
730 | * (put in setup_rx_handlers) | ||
731 | * | ||
732 | * If an Rx buffer has an async callback associated with it the callback | ||
733 | * will be executed. The attached skb (if present) will only be freed | ||
734 | * if the callback returns 1 | ||
735 | */ | ||
736 | void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | ||
737 | int handler_status) | ||
738 | { | ||
739 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
740 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
741 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
742 | int index = SEQ_TO_INDEX(sequence); | ||
743 | int cmd_index; | ||
744 | struct iwl_device_cmd *cmd; | ||
745 | struct iwl_cmd_meta *meta; | ||
746 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
747 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | ||
748 | |||
749 | /* If a Tx command is being handled and it isn't in the actual | ||
750 | * command queue then there a command routing bug has been introduced | ||
751 | * in the queue management code. */ | ||
752 | if (WARN(txq_id != trans_pcie->cmd_queue, | ||
753 | "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", | ||
754 | txq_id, trans_pcie->cmd_queue, sequence, | ||
755 | trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr, | ||
756 | trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) { | ||
757 | iwl_print_hex_error(trans, pkt, 32); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | spin_lock(&txq->lock); | ||
762 | |||
763 | cmd_index = get_cmd_index(&txq->q, index); | ||
764 | cmd = txq->entries[cmd_index].cmd; | ||
765 | meta = &txq->entries[cmd_index].meta; | ||
766 | |||
767 | iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); | ||
768 | |||
769 | /* Input error checking is done when commands are added to queue. */ | ||
770 | if (meta->flags & CMD_WANT_SKB) { | ||
771 | struct page *p = rxb_steal_page(rxb); | ||
772 | |||
773 | meta->source->resp_pkt = pkt; | ||
774 | meta->source->_rx_page_addr = (unsigned long)page_address(p); | ||
775 | meta->source->_rx_page_order = trans_pcie->rx_page_order; | ||
776 | meta->source->handler_status = handler_status; | ||
777 | } | ||
778 | |||
779 | iwl_hcmd_queue_reclaim(trans, txq_id, index); | ||
780 | |||
781 | if (!(meta->flags & CMD_ASYNC)) { | ||
782 | if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | ||
783 | IWL_WARN(trans, | ||
784 | "HCMD_ACTIVE already clear for command %s\n", | ||
785 | trans_pcie_get_cmd_string(trans_pcie, | ||
786 | cmd->hdr.cmd)); | ||
787 | } | ||
788 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | ||
789 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", | ||
790 | trans_pcie_get_cmd_string(trans_pcie, | ||
791 | cmd->hdr.cmd)); | ||
792 | wake_up(&trans->wait_command_queue); | ||
793 | } | ||
794 | |||
795 | meta->flags = 0; | ||
796 | |||
797 | spin_unlock(&txq->lock); | ||
798 | } | ||
799 | |||
800 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) | ||
801 | |||
802 | static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | ||
803 | { | ||
804 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
805 | int ret; | ||
806 | |||
807 | /* An asynchronous command can not expect an SKB to be set. */ | ||
808 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) | ||
809 | return -EINVAL; | ||
810 | |||
811 | |||
812 | ret = iwl_enqueue_hcmd(trans, cmd); | ||
813 | if (ret < 0) { | ||
814 | IWL_ERR(trans, | ||
815 | "Error sending %s: enqueue_hcmd failed: %d\n", | ||
816 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | ||
817 | return ret; | ||
818 | } | ||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | ||
823 | { | ||
824 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
825 | int cmd_idx; | ||
826 | int ret; | ||
827 | |||
828 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", | ||
829 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | ||
830 | |||
831 | if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, | ||
832 | &trans_pcie->status))) { | ||
833 | IWL_ERR(trans, "Command %s: a command is already active!\n", | ||
834 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | ||
835 | return -EIO; | ||
836 | } | ||
837 | |||
838 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", | ||
839 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | ||
840 | |||
841 | cmd_idx = iwl_enqueue_hcmd(trans, cmd); | ||
842 | if (cmd_idx < 0) { | ||
843 | ret = cmd_idx; | ||
844 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | ||
845 | IWL_ERR(trans, | ||
846 | "Error sending %s: enqueue_hcmd failed: %d\n", | ||
847 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | ret = wait_event_timeout(trans->wait_command_queue, | ||
852 | !test_bit(STATUS_HCMD_ACTIVE, | ||
853 | &trans_pcie->status), | ||
854 | HOST_COMPLETE_TIMEOUT); | ||
855 | if (!ret) { | ||
856 | if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | ||
857 | struct iwl_tx_queue *txq = | ||
858 | &trans_pcie->txq[trans_pcie->cmd_queue]; | ||
859 | struct iwl_queue *q = &txq->q; | ||
860 | |||
861 | IWL_ERR(trans, | ||
862 | "Error sending %s: time out after %dms.\n", | ||
863 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), | ||
864 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | ||
865 | |||
866 | IWL_ERR(trans, | ||
867 | "Current CMD queue read_ptr %d write_ptr %d\n", | ||
868 | q->read_ptr, q->write_ptr); | ||
869 | |||
870 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | ||
871 | IWL_DEBUG_INFO(trans, | ||
872 | "Clearing HCMD_ACTIVE for command %s\n", | ||
873 | trans_pcie_get_cmd_string(trans_pcie, | ||
874 | cmd->id)); | ||
875 | ret = -ETIMEDOUT; | ||
876 | goto cancel; | ||
877 | } | ||
878 | } | ||
879 | |||
880 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { | ||
881 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", | ||
882 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | ||
883 | ret = -EIO; | ||
884 | goto cancel; | ||
885 | } | ||
886 | |||
887 | return 0; | ||
888 | |||
889 | cancel: | ||
890 | if (cmd->flags & CMD_WANT_SKB) { | ||
891 | /* | ||
892 | * Cancel the CMD_WANT_SKB flag for the cmd in the | ||
893 | * TX cmd queue. Otherwise in case the cmd comes | ||
894 | * in later, it will possibly set an invalid | ||
895 | * address (cmd->meta.source). | ||
896 | */ | ||
897 | trans_pcie->txq[trans_pcie->cmd_queue]. | ||
898 | entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; | ||
899 | } | ||
900 | |||
901 | if (cmd->resp_pkt) { | ||
902 | iwl_free_resp(cmd); | ||
903 | cmd->resp_pkt = NULL; | ||
904 | } | ||
905 | |||
906 | return ret; | ||
907 | } | ||
908 | |||
909 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | ||
910 | { | ||
911 | if (cmd->flags & CMD_ASYNC) | ||
912 | return iwl_send_cmd_async(trans, cmd); | ||
913 | |||
914 | return iwl_send_cmd_sync(trans, cmd); | ||
915 | } | ||
916 | |||
917 | /* Frees buffers until index _not_ inclusive */ | ||
918 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | ||
919 | struct sk_buff_head *skbs) | ||
920 | { | ||
921 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
922 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
923 | struct iwl_queue *q = &txq->q; | ||
924 | int last_to_free; | ||
925 | int freed = 0; | ||
926 | |||
927 | /* This function is not meant to release cmd queue*/ | ||
928 | if (WARN_ON(txq_id == trans_pcie->cmd_queue)) | ||
929 | return 0; | ||
930 | |||
931 | lockdep_assert_held(&txq->lock); | ||
932 | |||
933 | /*Since we free until index _not_ inclusive, the one before index is | ||
934 | * the last we will free. This one must be used */ | ||
935 | last_to_free = iwl_queue_dec_wrap(index, q->n_bd); | ||
936 | |||
937 | if ((index >= q->n_bd) || | ||
938 | (iwl_queue_used(q, last_to_free) == 0)) { | ||
939 | IWL_ERR(trans, | ||
940 | "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", | ||
941 | __func__, txq_id, last_to_free, q->n_bd, | ||
942 | q->write_ptr, q->read_ptr); | ||
943 | return 0; | ||
944 | } | ||
945 | |||
946 | if (WARN_ON(!skb_queue_empty(skbs))) | ||
947 | return 0; | ||
948 | |||
949 | for (; | ||
950 | q->read_ptr != index; | ||
951 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
952 | |||
953 | if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) | ||
954 | continue; | ||
955 | |||
956 | __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); | ||
957 | |||
958 | txq->entries[txq->q.read_ptr].skb = NULL; | ||
959 | |||
960 | iwlagn_txq_inval_byte_cnt_tbl(trans, txq); | ||
961 | |||
962 | iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE); | ||
963 | freed++; | ||
964 | } | ||
965 | |||
966 | iwl_queue_progress(trans_pcie, txq); | ||
967 | |||
968 | return freed; | ||
969 | } | ||