aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2012-11-14 07:44:18 -0500
committerJohannes Berg <johannes.berg@intel.com>2012-11-19 09:04:20 -0500
commitf02831be962c7be68c72110fa779e916ab1a8cdd (patch)
tree08d768adea7c0d03381e5c896765049cd42dddb2 /drivers/net
parent7afe3705cd4e2a5490140cc15a15b3ea7a10b889 (diff)
iwlwifi: continue clean up - pcie/tx.c
Rename static functions. Function moved from trans.c to tx.c. A few could be made static, others had to be exported. Functions that implement the transport API are prefixed by iwl_trans_pcie_, the others by iwl_pcie_. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h32
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c701
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c1095
3 files changed, 913 insertions, 915 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index a71a78237b62..0d61e91c0a6f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -315,6 +315,10 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
315 trans_specific); 315 trans_specific);
316} 316}
317 317
318/*
319 * Convention: trans API functions: iwl_trans_pcie_XXX
320 * Other functions: iwl_pcie_XXX
321 */
318struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 322struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
319 const struct pci_device_id *ent, 323 const struct pci_device_id *ent,
320 const struct iwl_cfg *cfg); 324 const struct iwl_cfg *cfg);
@@ -341,25 +345,21 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
341/***************************************************** 345/*****************************************************
342* TX / HCMD 346* TX / HCMD
343******************************************************/ 347******************************************************/
348int iwl_pcie_tx_init(struct iwl_trans *trans);
349void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
350int iwl_pcie_tx_stop(struct iwl_trans *trans);
351void iwl_pcie_tx_free(struct iwl_trans *trans);
352void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
353 int sta_id, int tid, int frame_limit, u16 ssn);
354void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
355int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
356 struct iwl_device_cmd *dev_cmd, int txq_id);
344void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); 357void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
345int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 358int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
346 dma_addr_t addr, u16 len, u8 reset);
347int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
348void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 359void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
349 struct iwl_rx_cmd_buffer *rxb, int handler_status); 360 struct iwl_rx_cmd_buffer *rxb, int handler_status);
350void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 361void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
351 struct iwl_txq *txq, u16 byte_cnt); 362 struct sk_buff_head *skbs);
352void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
353 int sta_id, int tid, int frame_limit, u16 ssn);
354void iwl_pcie_txq_disable(struct iwl_trans *trans, int queue);
355void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
356 enum dma_data_direction dma_dir);
357int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index,
358 struct sk_buff_head *skbs);
359void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id);
360int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
361int iwl_queue_space(const struct iwl_queue *q);
362
363/***************************************************** 363/*****************************************************
364* Error handling 364* Error handling
365******************************************************/ 365******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 8a5b5af968ad..19c11e3b5481 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -74,392 +74,6 @@
74#include "iwl-prph.h" 74#include "iwl-prph.h"
75#include "iwl-agn-hw.h" 75#include "iwl-agn-hw.h"
76#include "internal.h" 76#include "internal.h"
77/* FIXME: need to abstract out TX command (once we know what it looks like) */
78#include "dvm/commands.h"
79
80#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
82 (~(1<<(trans_pcie)->cmd_queue)))
83
84static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
85 struct iwl_dma_ptr *ptr, size_t size)
86{
87 if (WARN_ON(ptr->addr))
88 return -EINVAL;
89
90 ptr->addr = dma_alloc_coherent(trans->dev, size,
91 &ptr->dma, GFP_KERNEL);
92 if (!ptr->addr)
93 return -ENOMEM;
94 ptr->size = size;
95 return 0;
96}
97
98static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
99 struct iwl_dma_ptr *ptr)
100{
101 if (unlikely(!ptr->addr))
102 return;
103
104 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
105 memset(ptr, 0, sizeof(*ptr));
106}
107
108static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
109{
110 struct iwl_txq *txq = (void *)data;
111 struct iwl_queue *q = &txq->q;
112 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
113 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
114 u32 scd_sram_addr = trans_pcie->scd_base_addr +
115 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
116 u8 buf[16];
117 int i;
118
119 spin_lock(&txq->lock);
120 /* check if triggered erroneously */
121 if (txq->q.read_ptr == txq->q.write_ptr) {
122 spin_unlock(&txq->lock);
123 return;
124 }
125 spin_unlock(&txq->lock);
126
127 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
128 jiffies_to_msecs(trans_pcie->wd_timeout));
129 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
130 txq->q.read_ptr, txq->q.write_ptr);
131
132 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
133
134 iwl_print_hex_error(trans, buf, sizeof(buf));
135
136 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
137 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
138 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
139
140 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
141 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
142 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
143 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
144 u32 tbl_dw =
145 iwl_read_targ_mem(trans,
146 trans_pcie->scd_base_addr +
147 SCD_TRANS_TBL_OFFSET_QUEUE(i));
148
149 if (i & 0x1)
150 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
151 else
152 tbl_dw = tbl_dw & 0x0000FFFF;
153
154 IWL_ERR(trans,
155 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
156 i, active ? "" : "in", fifo, tbl_dw,
157 iwl_read_prph(trans,
158 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
159 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
160 }
161
162 for (i = q->read_ptr; i != q->write_ptr;
163 i = iwl_queue_inc_wrap(i, q->n_bd)) {
164 struct iwl_tx_cmd *tx_cmd =
165 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
166 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
167 get_unaligned_le32(&tx_cmd->scratch));
168 }
169
170 iwl_op_mode_nic_error(trans->op_mode);
171}
172
173static int iwl_trans_txq_alloc(struct iwl_trans *trans,
174 struct iwl_txq *txq, int slots_num,
175 u32 txq_id)
176{
177 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
178 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
179 int i;
180
181 if (WARN_ON(txq->entries || txq->tfds))
182 return -EINVAL;
183
184 setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
185 (unsigned long)txq);
186 txq->trans_pcie = trans_pcie;
187
188 txq->q.n_window = slots_num;
189
190 txq->entries = kcalloc(slots_num,
191 sizeof(struct iwl_pcie_txq_entry),
192 GFP_KERNEL);
193
194 if (!txq->entries)
195 goto error;
196
197 if (txq_id == trans_pcie->cmd_queue)
198 for (i = 0; i < slots_num; i++) {
199 txq->entries[i].cmd =
200 kmalloc(sizeof(struct iwl_device_cmd),
201 GFP_KERNEL);
202 if (!txq->entries[i].cmd)
203 goto error;
204 }
205
206 /* Circular buffer of transmit frame descriptors (TFDs),
207 * shared with device */
208 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
209 &txq->q.dma_addr, GFP_KERNEL);
210 if (!txq->tfds) {
211 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
212 goto error;
213 }
214 txq->q.id = txq_id;
215
216 return 0;
217error:
218 if (txq->entries && txq_id == trans_pcie->cmd_queue)
219 for (i = 0; i < slots_num; i++)
220 kfree(txq->entries[i].cmd);
221 kfree(txq->entries);
222 txq->entries = NULL;
223
224 return -ENOMEM;
225
226}
227
228static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
229 int slots_num, u32 txq_id)
230{
231 int ret;
232
233 txq->need_update = 0;
234
235 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
236 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
237 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
238
239 /* Initialize queue's high/low-water marks, and head/tail indexes */
240 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
241 txq_id);
242 if (ret)
243 return ret;
244
245 spin_lock_init(&txq->lock);
246
247 /*
248 * Tell nic where to find circular buffer of Tx Frame Descriptors for
249 * given Tx queue, and enable the DMA channel used for that queue.
250 * Circular buffer (TFD queue in DRAM) physical base address */
251 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
252 txq->q.dma_addr >> 8);
253
254 return 0;
255}
256
257/*
258 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
259 */
260void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
261{
262 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
263 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
264 struct iwl_queue *q = &txq->q;
265 enum dma_data_direction dma_dir;
266
267 if (!q->n_bd)
268 return;
269
270 /* In the command queue, all the TBs are mapped as BIDI
271 * so unmap them as such.
272 */
273 if (txq_id == trans_pcie->cmd_queue)
274 dma_dir = DMA_BIDIRECTIONAL;
275 else
276 dma_dir = DMA_TO_DEVICE;
277
278 spin_lock_bh(&txq->lock);
279 while (q->write_ptr != q->read_ptr) {
280 iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
281 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
282 }
283 spin_unlock_bh(&txq->lock);
284}
285
286/*
287 * iwl_txq_free - Deallocate DMA queue.
288 * @txq: Transmit queue to deallocate.
289 *
290 * Empty queue by removing and destroying all BD's.
291 * Free all buffers.
292 * 0-fill, but do not free "txq" descriptor structure.
293 */
294static void iwl_txq_free(struct iwl_trans *trans, int txq_id)
295{
296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
297 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
298 struct device *dev = trans->dev;
299 int i;
300
301 if (WARN_ON(!txq))
302 return;
303
304 iwl_pcie_txq_unmap(trans, txq_id);
305
306 /* De-alloc array of command/tx buffers */
307 if (txq_id == trans_pcie->cmd_queue)
308 for (i = 0; i < txq->q.n_window; i++) {
309 kfree(txq->entries[i].cmd);
310 kfree(txq->entries[i].copy_cmd);
311 kfree(txq->entries[i].free_buf);
312 }
313
314 /* De-alloc circular buffer of TFDs */
315 if (txq->q.n_bd) {
316 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
317 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
318 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
319 }
320
321 kfree(txq->entries);
322 txq->entries = NULL;
323
324 del_timer_sync(&txq->stuck_timer);
325
326 /* 0-fill queue descriptor structure */
327 memset(txq, 0, sizeof(*txq));
328}
329
330/*
331 * iwl_trans_tx_free - Free TXQ Context
332 *
333 * Destroy all TX DMA queues and structures
334 */
335static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
336{
337 int txq_id;
338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
339
340 /* Tx queues */
341 if (trans_pcie->txq) {
342 for (txq_id = 0;
343 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
344 iwl_txq_free(trans, txq_id);
345 }
346
347 kfree(trans_pcie->txq);
348 trans_pcie->txq = NULL;
349
350 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
351
352 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
353}
354
355/*
356 * iwl_trans_tx_alloc - allocate TX context
357 * Allocate all Tx DMA structures and initialize them
358 */
359static int iwl_trans_tx_alloc(struct iwl_trans *trans)
360{
361 int ret;
362 int txq_id, slots_num;
363 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
364
365 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
366 sizeof(struct iwlagn_scd_bc_tbl);
367
368 /*It is not allowed to alloc twice, so warn when this happens.
369 * We cannot rely on the previous allocation, so free and fail */
370 if (WARN_ON(trans_pcie->txq)) {
371 ret = -EINVAL;
372 goto error;
373 }
374
375 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
376 scd_bc_tbls_size);
377 if (ret) {
378 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
379 goto error;
380 }
381
382 /* Alloc keep-warm buffer */
383 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
384 if (ret) {
385 IWL_ERR(trans, "Keep Warm allocation failed\n");
386 goto error;
387 }
388
389 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
390 sizeof(struct iwl_txq), GFP_KERNEL);
391 if (!trans_pcie->txq) {
392 IWL_ERR(trans, "Not enough memory for txq\n");
393 ret = ENOMEM;
394 goto error;
395 }
396
397 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
398 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
399 txq_id++) {
400 slots_num = (txq_id == trans_pcie->cmd_queue) ?
401 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
402 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
403 slots_num, txq_id);
404 if (ret) {
405 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
406 goto error;
407 }
408 }
409
410 return 0;
411
412error:
413 iwl_trans_pcie_tx_free(trans);
414
415 return ret;
416}
417static int iwl_tx_init(struct iwl_trans *trans)
418{
419 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
420 int ret;
421 int txq_id, slots_num;
422 unsigned long flags;
423 bool alloc = false;
424
425 if (!trans_pcie->txq) {
426 ret = iwl_trans_tx_alloc(trans);
427 if (ret)
428 goto error;
429 alloc = true;
430 }
431
432 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
433
434 /* Turn off all Tx DMA fifos */
435 iwl_write_prph(trans, SCD_TXFACT, 0);
436
437 /* Tell NIC where to find the "keep warm" buffer */
438 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
439 trans_pcie->kw.dma >> 4);
440
441 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
442
443 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
444 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
445 txq_id++) {
446 slots_num = (txq_id == trans_pcie->cmd_queue) ?
447 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
448 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
449 slots_num, txq_id);
450 if (ret) {
451 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
452 goto error;
453 }
454 }
455
456 return 0;
457error:
458 /*Upon error, free only if we allocated something */
459 if (alloc)
460 iwl_trans_pcie_tx_free(trans);
461 return ret;
462}
463 77
464static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans) 78static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
465{ 79{
@@ -659,7 +273,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
659 iwl_pcie_rx_init(trans); 273 iwl_pcie_rx_init(trans);
660 274
661 /* Allocate or reset and init all Tx and Command queues */ 275 /* Allocate or reset and init all Tx and Command queues */
662 if (iwl_tx_init(trans)) 276 if (iwl_pcie_tx_init(trans))
663 return -ENOMEM; 277 return -ENOMEM;
664 278
665 if (trans->cfg->base_params->shadow_reg_enable) { 279 if (trans->cfg->base_params->shadow_reg_enable) {
@@ -874,126 +488,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
874 return iwl_pcie_load_given_ucode(trans, fw); 488 return iwl_pcie_load_given_ucode(trans, fw);
875} 489}
876 490
877/*
878 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
879 */
880static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
881{
882 struct iwl_trans_pcie __maybe_unused *trans_pcie =
883 IWL_TRANS_GET_PCIE_TRANS(trans);
884
885 iwl_write_prph(trans, SCD_TXFACT, mask);
886}
887
888static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
889{
890 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
891 u32 a;
892 int chan;
893 u32 reg_val;
894
895 /* make sure all queue are not stopped/used */
896 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
897 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
898
899 trans_pcie->scd_base_addr =
900 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
901
902 WARN_ON(scd_base_addr != 0 &&
903 scd_base_addr != trans_pcie->scd_base_addr);
904
905 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
906 /* reset conext data memory */
907 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
908 a += 4)
909 iwl_write_targ_mem(trans, a, 0);
910 /* reset tx status memory */
911 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
912 a += 4)
913 iwl_write_targ_mem(trans, a, 0);
914 for (; a < trans_pcie->scd_base_addr +
915 SCD_TRANS_TBL_OFFSET_QUEUE(
916 trans->cfg->base_params->num_of_queues);
917 a += 4)
918 iwl_write_targ_mem(trans, a, 0);
919
920 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
921 trans_pcie->scd_bc_tbls.dma >> 10);
922
923 /* The chain extension of the SCD doesn't work well. This feature is
924 * enabled by default by the HW, so we need to disable it manually.
925 */
926 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
927
928 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
929 trans_pcie->cmd_fifo);
930
931 /* Activate all Tx DMA/FIFO channels */
932 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
933
934 /* Enable DMA channel */
935 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
936 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
937 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
938 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
939
940 /* Update FH chicken bits */
941 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
942 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
943 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
944
945 /* Enable L1-Active */
946 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
947 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
948}
949
950static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 491static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
951{ 492{
952 iwl_pcie_reset_ict(trans); 493 iwl_pcie_reset_ict(trans);
953 iwl_tx_start(trans, scd_addr); 494 iwl_pcie_tx_start(trans, scd_addr);
954}
955
956/*
957 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
958 */
959static int iwl_trans_tx_stop(struct iwl_trans *trans)
960{
961 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
962 int ch, txq_id, ret;
963 unsigned long flags;
964
965 /* Turn off all Tx DMA fifos */
966 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
967
968 iwl_trans_txq_set_sched(trans, 0);
969
970 /* Stop each Tx DMA channel, and wait for it to be idle */
971 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
972 iwl_write_direct32(trans,
973 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
974 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
975 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
976 if (ret < 0)
977 IWL_ERR(trans,
978 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
979 ch,
980 iwl_read_direct32(trans,
981 FH_TSSR_TX_STATUS_REG));
982 }
983 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
984
985 if (!trans_pcie->txq) {
986 IWL_WARN(trans,
987 "Stopping tx queues that aren't allocated...\n");
988 return 0;
989 }
990
991 /* Unmap DMA from host system and free skb's */
992 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
993 txq_id++)
994 iwl_pcie_txq_unmap(trans, txq_id);
995
996 return 0;
997} 495}
998 496
999static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 497static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@@ -1017,7 +515,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1017 * already dead. 515 * already dead.
1018 */ 516 */
1019 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { 517 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
1020 iwl_trans_tx_stop(trans); 518 iwl_pcie_tx_stop(trans);
1021 iwl_pcie_rx_stop(trans); 519 iwl_pcie_rx_stop(trans);
1022 520
1023 /* Power-down device's busmaster DMA clocks */ 521 /* Power-down device's busmaster DMA clocks */
@@ -1070,170 +568,6 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1070 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 568 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1071} 569}
1072 570
1073static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1074 struct iwl_device_cmd *dev_cmd, int txq_id)
1075{
1076 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1077 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1078 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1079 struct iwl_cmd_meta *out_meta;
1080 struct iwl_txq *txq;
1081 struct iwl_queue *q;
1082 dma_addr_t phys_addr = 0;
1083 dma_addr_t txcmd_phys;
1084 dma_addr_t scratch_phys;
1085 u16 len, firstlen, secondlen;
1086 u8 wait_write_ptr = 0;
1087 __le16 fc = hdr->frame_control;
1088 u8 hdr_len = ieee80211_hdrlen(fc);
1089 u16 __maybe_unused wifi_seq;
1090
1091 txq = &trans_pcie->txq[txq_id];
1092 q = &txq->q;
1093
1094 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1095 WARN_ON_ONCE(1);
1096 return -EINVAL;
1097 }
1098
1099 spin_lock(&txq->lock);
1100
1101 /* In AGG mode, the index in the ring must correspond to the WiFi
1102 * sequence number. This is a HW requirements to help the SCD to parse
1103 * the BA.
1104 * Check here that the packets are in the right place on the ring.
1105 */
1106#ifdef CONFIG_IWLWIFI_DEBUG
1107 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1108 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1109 ((wifi_seq & 0xff) != q->write_ptr),
1110 "Q: %d WiFi Seq %d tfdNum %d",
1111 txq_id, wifi_seq, q->write_ptr);
1112#endif
1113
1114 /* Set up driver data for this TFD */
1115 txq->entries[q->write_ptr].skb = skb;
1116 txq->entries[q->write_ptr].cmd = dev_cmd;
1117
1118 dev_cmd->hdr.cmd = REPLY_TX;
1119 dev_cmd->hdr.sequence =
1120 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1121 INDEX_TO_SEQ(q->write_ptr)));
1122
1123 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1124 out_meta = &txq->entries[q->write_ptr].meta;
1125
1126 /*
1127 * Use the first empty entry in this queue's command buffer array
1128 * to contain the Tx command and MAC header concatenated together
1129 * (payload data will be in another buffer).
1130 * Size of this varies, due to varying MAC header length.
1131 * If end is not dword aligned, we'll have 2 extra bytes at the end
1132 * of the MAC header (device reads on dword boundaries).
1133 * We'll tell device about this padding later.
1134 */
1135 len = sizeof(struct iwl_tx_cmd) +
1136 sizeof(struct iwl_cmd_header) + hdr_len;
1137 firstlen = (len + 3) & ~3;
1138
1139 /* Tell NIC about any 2-byte padding after MAC header */
1140 if (firstlen != len)
1141 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1142
1143 /* Physical address of this Tx command's header (not MAC header!),
1144 * within command buffer array. */
1145 txcmd_phys = dma_map_single(trans->dev,
1146 &dev_cmd->hdr, firstlen,
1147 DMA_BIDIRECTIONAL);
1148 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1149 goto out_err;
1150 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1151 dma_unmap_len_set(out_meta, len, firstlen);
1152
1153 if (!ieee80211_has_morefrags(fc)) {
1154 txq->need_update = 1;
1155 } else {
1156 wait_write_ptr = 1;
1157 txq->need_update = 0;
1158 }
1159
1160 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1161 * if any (802.11 null frames have no payload). */
1162 secondlen = skb->len - hdr_len;
1163 if (secondlen > 0) {
1164 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1165 secondlen, DMA_TO_DEVICE);
1166 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1167 dma_unmap_single(trans->dev,
1168 dma_unmap_addr(out_meta, mapping),
1169 dma_unmap_len(out_meta, len),
1170 DMA_BIDIRECTIONAL);
1171 goto out_err;
1172 }
1173 }
1174
1175 /* Attach buffers to TFD */
1176 iwl_pcie_tx_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1177 if (secondlen > 0)
1178 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, secondlen, 0);
1179
1180 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1181 offsetof(struct iwl_tx_cmd, scratch);
1182
1183 /* take back ownership of DMA buffer to enable update */
1184 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1185 DMA_BIDIRECTIONAL);
1186 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1187 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1188
1189 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1190 le16_to_cpu(dev_cmd->hdr.sequence));
1191 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1192
1193 /* Set up entry for this TFD in Tx byte-count array */
1194 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1195
1196 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1197 DMA_BIDIRECTIONAL);
1198
1199 trace_iwlwifi_dev_tx(trans->dev, skb,
1200 &txq->tfds[txq->q.write_ptr],
1201 sizeof(struct iwl_tfd),
1202 &dev_cmd->hdr, firstlen,
1203 skb->data + hdr_len, secondlen);
1204 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1205 skb->data + hdr_len, secondlen);
1206
1207 /* start timer if queue currently empty */
1208 if (txq->need_update && q->read_ptr == q->write_ptr &&
1209 trans_pcie->wd_timeout)
1210 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1211
1212 /* Tell device the write index *just past* this latest filled TFD */
1213 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1214 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1215
1216 /*
1217 * At this point the frame is "transmitted" successfully
1218 * and we will get a TX status notification eventually,
1219 * regardless of the value of ret. "ret" only indicates
1220 * whether or not we should update the write pointer.
1221 */
1222 if (iwl_queue_space(q) < q->high_mark) {
1223 if (wait_write_ptr) {
1224 txq->need_update = 1;
1225 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1226 } else {
1227 iwl_stop_queue(trans, txq);
1228 }
1229 }
1230 spin_unlock(&txq->lock);
1231 return 0;
1232 out_err:
1233 spin_unlock(&txq->lock);
1234 return -1;
1235}
1236
1237static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 571static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1238{ 572{
1239 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 573 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1319,27 +653,6 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1319 } 653 }
1320} 654}
1321 655
1322static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1323 struct sk_buff_head *skbs)
1324{
1325 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1326 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1327 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1328 int tfd_num = ssn & (txq->q.n_bd - 1);
1329
1330 spin_lock(&txq->lock);
1331
1332 if (txq->q.read_ptr != tfd_num) {
1333 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1334 txq_id, txq->q.read_ptr, tfd_num, ssn);
1335 iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
1336 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1337 iwl_wake_queue(trans, txq);
1338 }
1339
1340 spin_unlock(&txq->lock);
1341}
1342
1343static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 656static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1344{ 657{
1345 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 658 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
@@ -1386,7 +699,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1386{ 699{
1387 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 700 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1388 701
1389 iwl_trans_pcie_tx_free(trans); 702 iwl_pcie_tx_free(trans);
1390 iwl_pcie_rx_free(trans); 703 iwl_pcie_rx_free(trans);
1391 704
1392 if (trans_pcie->irq_requested == true) { 705 if (trans_pcie->irq_requested == true) {
@@ -1892,13 +1205,13 @@ static const struct iwl_trans_ops trans_ops_pcie = {
1892 1205
1893 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, 1206 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
1894 1207
1895 .send_cmd = iwl_pcie_send_cmd, 1208 .send_cmd = iwl_trans_pcie_send_hcmd,
1896 1209
1897 .tx = iwl_trans_pcie_tx, 1210 .tx = iwl_trans_pcie_tx,
1898 .reclaim = iwl_trans_pcie_reclaim, 1211 .reclaim = iwl_trans_pcie_reclaim,
1899 1212
1900 .txq_disable = iwl_pcie_txq_disable, 1213 .txq_disable = iwl_trans_pcie_txq_disable,
1901 .txq_enable = iwl_pcie_txq_enable, 1214 .txq_enable = iwl_trans_pcie_txq_enable,
1902 1215
1903 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 1216 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1904 1217
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index eac0481a9c71..4c03b8288c58 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -42,11 +42,171 @@
42#define IWL_TX_CRC_SIZE 4 42#define IWL_TX_CRC_SIZE 4
43#define IWL_TX_DELIMITER_SIZE 4 43#define IWL_TX_DELIMITER_SIZE 4
44 44
45/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
46 * DMA services
47 *
48 * Theory of operation
49 *
50 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
51 * of buffer descriptors, each of which points to one or more data buffers for
52 * the device to read from or fill. Driver and device exchange status of each
53 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
54 * entries in each circular buffer, to protect against confusing empty and full
55 * queue states.
56 *
57 * The device reads or writes the data in the queues via the device's several
58 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
59 *
60 * For Tx queue, there are low mark and high mark limits. If, after queuing
61 * the packet for Tx, free space become < low mark, Tx queue stopped. When
62 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
63 * Tx queue resumed.
64 *
65 ***************************************************/
66static int iwl_queue_space(const struct iwl_queue *q)
67{
68 int s = q->read_ptr - q->write_ptr;
69
70 if (q->read_ptr > q->write_ptr)
71 s -= q->n_bd;
72
73 if (s <= 0)
74 s += q->n_window;
75 /* keep some reserve to not confuse empty and full situations */
76 s -= 2;
77 if (s < 0)
78 s = 0;
79 return s;
80}
81
82/*
83 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
84 */
85static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
86{
87 q->n_bd = count;
88 q->n_window = slots_num;
89 q->id = id;
90
91 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
92 * and iwl_queue_dec_wrap are broken. */
93 if (WARN_ON(!is_power_of_2(count)))
94 return -EINVAL;
95
96 /* slots_num must be power-of-two size, otherwise
97 * get_cmd_index is broken. */
98 if (WARN_ON(!is_power_of_2(slots_num)))
99 return -EINVAL;
100
101 q->low_mark = q->n_window / 4;
102 if (q->low_mark < 4)
103 q->low_mark = 4;
104
105 q->high_mark = q->n_window / 8;
106 if (q->high_mark < 2)
107 q->high_mark = 2;
108
109 q->write_ptr = 0;
110 q->read_ptr = 0;
111
112 return 0;
113}
114
115
116static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
117 struct iwl_dma_ptr *ptr, size_t size)
118{
119 if (WARN_ON(ptr->addr))
120 return -EINVAL;
121
122 ptr->addr = dma_alloc_coherent(trans->dev, size,
123 &ptr->dma, GFP_KERNEL);
124 if (!ptr->addr)
125 return -ENOMEM;
126 ptr->size = size;
127 return 0;
128}
129
130static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
131 struct iwl_dma_ptr *ptr)
132{
133 if (unlikely(!ptr->addr))
134 return;
135
136 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
137 memset(ptr, 0, sizeof(*ptr));
138}
139
140static void iwl_pcie_txq_stuck_timer(unsigned long data)
141{
142 struct iwl_txq *txq = (void *)data;
143 struct iwl_queue *q = &txq->q;
144 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
145 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
146 u32 scd_sram_addr = trans_pcie->scd_base_addr +
147 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
148 u8 buf[16];
149 int i;
150
151 spin_lock(&txq->lock);
152 /* check if triggered erroneously */
153 if (txq->q.read_ptr == txq->q.write_ptr) {
154 spin_unlock(&txq->lock);
155 return;
156 }
157 spin_unlock(&txq->lock);
158
159 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
160 jiffies_to_msecs(trans_pcie->wd_timeout));
161 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
162 txq->q.read_ptr, txq->q.write_ptr);
163
164 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
165
166 iwl_print_hex_error(trans, buf, sizeof(buf));
167
168 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
169 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
170 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
171
172 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
173 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
174 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
175 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
176 u32 tbl_dw =
177 iwl_read_targ_mem(trans,
178 trans_pcie->scd_base_addr +
179 SCD_TRANS_TBL_OFFSET_QUEUE(i));
180
181 if (i & 0x1)
182 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
183 else
184 tbl_dw = tbl_dw & 0x0000FFFF;
185
186 IWL_ERR(trans,
187 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
188 i, active ? "" : "in", fifo, tbl_dw,
189 iwl_read_prph(trans,
190 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
191 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
192 }
193
194 for (i = q->read_ptr; i != q->write_ptr;
195 i = iwl_queue_inc_wrap(i, q->n_bd)) {
196 struct iwl_tx_cmd *tx_cmd =
197 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
198 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
199 get_unaligned_le32(&tx_cmd->scratch));
200 }
201
202 iwl_op_mode_nic_error(trans->op_mode);
203}
204
45/* 205/*
46 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 206 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
47 */ 207 */
48void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 208static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
49 struct iwl_txq *txq, u16 byte_cnt) 209 struct iwl_txq *txq, u16 byte_cnt)
50{ 210{
51 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 211 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
52 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 212 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -87,6 +247,32 @@ void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
87 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 247 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
88} 248}
89 249
250static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
251 struct iwl_txq *txq)
252{
253 struct iwl_trans_pcie *trans_pcie =
254 IWL_TRANS_GET_PCIE_TRANS(trans);
255 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
256 int txq_id = txq->q.id;
257 int read_ptr = txq->q.read_ptr;
258 u8 sta_id = 0;
259 __le16 bc_ent;
260 struct iwl_tx_cmd *tx_cmd =
261 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
262
263 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
264
265 if (txq_id != trans_pcie->cmd_queue)
266 sta_id = tx_cmd->sta_id;
267
268 bc_ent = cpu_to_le16(1 | (sta_id << 12));
269 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
270
271 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
272 scd_bc_tbl[txq_id].
273 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
274}
275
90/* 276/*
91 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 277 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
92 */ 278 */
@@ -136,7 +322,7 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
136 txq->need_update = 0; 322 txq->need_update = 0;
137} 323}
138 324
139static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 325static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
140{ 326{
141 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 327 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
142 328
@@ -148,15 +334,15 @@ static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
148 return addr; 334 return addr;
149} 335}
150 336
151static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) 337static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
152{ 338{
153 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 339 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
154 340
155 return le16_to_cpu(tb->hi_n_len) >> 4; 341 return le16_to_cpu(tb->hi_n_len) >> 4;
156} 342}
157 343
158static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, 344static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
159 dma_addr_t addr, u16 len) 345 dma_addr_t addr, u16 len)
160{ 346{
161 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 347 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
162 u16 hi_n_len = len << 4; 348 u16 hi_n_len = len << 4;
@@ -170,19 +356,20 @@ static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
170 tfd->num_tbs = idx + 1; 356 tfd->num_tbs = idx + 1;
171} 357}
172 358
173static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) 359static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
174{ 360{
175 return tfd->num_tbs & 0x1f; 361 return tfd->num_tbs & 0x1f;
176} 362}
177 363
178static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 364static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
179 struct iwl_tfd *tfd, enum dma_data_direction dma_dir) 365 struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
366 enum dma_data_direction dma_dir)
180{ 367{
181 int i; 368 int i;
182 int num_tbs; 369 int num_tbs;
183 370
184 /* Sanity check on number of chunks */ 371 /* Sanity check on number of chunks */
185 num_tbs = iwl_tfd_get_num_tbs(tfd); 372 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
186 373
187 if (num_tbs >= IWL_NUM_OF_TBS) { 374 if (num_tbs >= IWL_NUM_OF_TBS) {
188 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 375 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
@@ -199,8 +386,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
199 386
200 /* Unmap chunks, if any. */ 387 /* Unmap chunks, if any. */
201 for (i = 1; i < num_tbs; i++) 388 for (i = 1; i < num_tbs; i++)
202 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), 389 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
203 iwl_tfd_tb_get_len(tfd, i), dma_dir); 390 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
204 391
205 tfd->num_tbs = 0; 392 tfd->num_tbs = 0;
206} 393}
@@ -214,8 +401,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
214 * Does NOT advance any TFD circular buffer read/write indexes 401 * Does NOT advance any TFD circular buffer read/write indexes
215 * Does NOT free the TFD itself (which is within circular buffer) 402 * Does NOT free the TFD itself (which is within circular buffer)
216 */ 403 */
217void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 404static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
218 enum dma_data_direction dma_dir) 405 enum dma_data_direction dma_dir)
219{ 406{
220 struct iwl_tfd *tfd_tmp = txq->tfds; 407 struct iwl_tfd *tfd_tmp = txq->tfds;
221 408
@@ -226,8 +413,8 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
226 lockdep_assert_held(&txq->lock); 413 lockdep_assert_held(&txq->lock);
227 414
228 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 415 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
229 iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], 416 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
230 dma_dir); 417 dma_dir);
231 418
232 /* free SKB */ 419 /* free SKB */
233 if (txq->entries) { 420 if (txq->entries) {
@@ -246,8 +433,8 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
246 } 433 }
247} 434}
248 435
249int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 436static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
250 dma_addr_t addr, u16 len, u8 reset) 437 dma_addr_t addr, u16 len, u8 reset)
251{ 438{
252 struct iwl_queue *q; 439 struct iwl_queue *q;
253 struct iwl_tfd *tfd, *tfd_tmp; 440 struct iwl_tfd *tfd, *tfd_tmp;
@@ -260,7 +447,7 @@ int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
260 if (reset) 447 if (reset)
261 memset(tfd, 0, sizeof(*tfd)); 448 memset(tfd, 0, sizeof(*tfd));
262 449
263 num_tbs = iwl_tfd_get_num_tbs(tfd); 450 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
264 451
265 /* Each TFD can point to a maximum 20 Tx buffers */ 452 /* Each TFD can point to a maximum 20 Tx buffers */
266 if (num_tbs >= IWL_NUM_OF_TBS) { 453 if (num_tbs >= IWL_NUM_OF_TBS) {
@@ -276,108 +463,547 @@ int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
276 IWL_ERR(trans, "Unaligned address = %llx\n", 463 IWL_ERR(trans, "Unaligned address = %llx\n",
277 (unsigned long long)addr); 464 (unsigned long long)addr);
278 465
279 iwl_tfd_set_tb(tfd, num_tbs, addr, len); 466 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
280 467
281 return 0; 468 return 0;
282} 469}
283 470
284/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 471static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
285 * DMA services 472 struct iwl_txq *txq, int slots_num,
286 * 473 u32 txq_id)
287 * Theory of operation 474{
288 * 475 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
289 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 476 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
290 * of buffer descriptors, each of which points to one or more data buffers for 477 int i;
291 * the device to read from or fill. Driver and device exchange status of each 478
292 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 479 if (WARN_ON(txq->entries || txq->tfds))
293 * entries in each circular buffer, to protect against confusing empty and full 480 return -EINVAL;
294 * queue states. 481
295 * 482 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
296 * The device reads or writes the data in the queues via the device's several 483 (unsigned long)txq);
297 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 484 txq->trans_pcie = trans_pcie;
298 * 485
299 * For Tx queue, there are low mark and high mark limits. If, after queuing 486 txq->q.n_window = slots_num;
300 * the packet for Tx, free space become < low mark, Tx queue stopped. When 487
301 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 488 txq->entries = kcalloc(slots_num,
302 * Tx queue resumed. 489 sizeof(struct iwl_pcie_txq_entry),
490 GFP_KERNEL);
491
492 if (!txq->entries)
493 goto error;
494
495 if (txq_id == trans_pcie->cmd_queue)
496 for (i = 0; i < slots_num; i++) {
497 txq->entries[i].cmd =
498 kmalloc(sizeof(struct iwl_device_cmd),
499 GFP_KERNEL);
500 if (!txq->entries[i].cmd)
501 goto error;
502 }
503
504 /* Circular buffer of transmit frame descriptors (TFDs),
505 * shared with device */
506 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
507 &txq->q.dma_addr, GFP_KERNEL);
508 if (!txq->tfds) {
509 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
510 goto error;
511 }
512 txq->q.id = txq_id;
513
514 return 0;
515error:
516 if (txq->entries && txq_id == trans_pcie->cmd_queue)
517 for (i = 0; i < slots_num; i++)
518 kfree(txq->entries[i].cmd);
519 kfree(txq->entries);
520 txq->entries = NULL;
521
522 return -ENOMEM;
523
524}
525
526static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
527 int slots_num, u32 txq_id)
528{
529 int ret;
530
531 txq->need_update = 0;
532
533 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
534 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
535 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
536
537 /* Initialize queue's high/low-water marks, and head/tail indexes */
538 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
539 txq_id);
540 if (ret)
541 return ret;
542
543 spin_lock_init(&txq->lock);
544
545 /*
546 * Tell nic where to find circular buffer of Tx Frame Descriptors for
547 * given Tx queue, and enable the DMA channel used for that queue.
548 * Circular buffer (TFD queue in DRAM) physical base address */
549 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
550 txq->q.dma_addr >> 8);
551
552 return 0;
553}
554
555/*
556 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
557 */
558static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
559{
560 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
561 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
562 struct iwl_queue *q = &txq->q;
563 enum dma_data_direction dma_dir;
564
565 if (!q->n_bd)
566 return;
567
568 /* In the command queue, all the TBs are mapped as BIDI
569 * so unmap them as such.
570 */
571 if (txq_id == trans_pcie->cmd_queue)
572 dma_dir = DMA_BIDIRECTIONAL;
573 else
574 dma_dir = DMA_TO_DEVICE;
575
576 spin_lock_bh(&txq->lock);
577 while (q->write_ptr != q->read_ptr) {
578 iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
579 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
580 }
581 spin_unlock_bh(&txq->lock);
582}
583
584/*
585 * iwl_pcie_txq_free - Deallocate DMA queue.
586 * @txq: Transmit queue to deallocate.
303 * 587 *
304 ***************************************************/ 588 * Empty queue by removing and destroying all BD's.
589 * Free all buffers.
590 * 0-fill, but do not free "txq" descriptor structure.
591 */
592static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
593{
594 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
595 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
596 struct device *dev = trans->dev;
597 int i;
305 598
306int iwl_queue_space(const struct iwl_queue *q) 599 if (WARN_ON(!txq))
600 return;
601
602 iwl_pcie_txq_unmap(trans, txq_id);
603
604 /* De-alloc array of command/tx buffers */
605 if (txq_id == trans_pcie->cmd_queue)
606 for (i = 0; i < txq->q.n_window; i++) {
607 kfree(txq->entries[i].cmd);
608 kfree(txq->entries[i].copy_cmd);
609 kfree(txq->entries[i].free_buf);
610 }
611
612 /* De-alloc circular buffer of TFDs */
613 if (txq->q.n_bd) {
614 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
615 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
616 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
617 }
618
619 kfree(txq->entries);
620 txq->entries = NULL;
621
622 del_timer_sync(&txq->stuck_timer);
623
624 /* 0-fill queue descriptor structure */
625 memset(txq, 0, sizeof(*txq));
626}
627
628/*
629 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
630 */
631static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
307{ 632{
308 int s = q->read_ptr - q->write_ptr; 633 struct iwl_trans_pcie __maybe_unused *trans_pcie =
634 IWL_TRANS_GET_PCIE_TRANS(trans);
309 635
310 if (q->read_ptr > q->write_ptr) 636 iwl_write_prph(trans, SCD_TXFACT, mask);
311 s -= q->n_bd; 637}
312 638
313 if (s <= 0) 639void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
314 s += q->n_window; 640{
315 /* keep some reserve to not confuse empty and full situations */ 641 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
316 s -= 2; 642 u32 a;
317 if (s < 0) 643 int chan;
318 s = 0; 644 u32 reg_val;
319 return s; 645
646 /* make sure all queue are not stopped/used */
647 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
648 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
649
650 trans_pcie->scd_base_addr =
651 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
652
653 WARN_ON(scd_base_addr != 0 &&
654 scd_base_addr != trans_pcie->scd_base_addr);
655
656 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
657 /* reset conext data memory */
658 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
659 a += 4)
660 iwl_write_targ_mem(trans, a, 0);
661 /* reset tx status memory */
662 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
663 a += 4)
664 iwl_write_targ_mem(trans, a, 0);
665 for (; a < trans_pcie->scd_base_addr +
666 SCD_TRANS_TBL_OFFSET_QUEUE(
667 trans->cfg->base_params->num_of_queues);
668 a += 4)
669 iwl_write_targ_mem(trans, a, 0);
670
671 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
672 trans_pcie->scd_bc_tbls.dma >> 10);
673
674 /* The chain extension of the SCD doesn't work well. This feature is
675 * enabled by default by the HW, so we need to disable it manually.
676 */
677 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
678
679 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
680 trans_pcie->cmd_fifo);
681
682 /* Activate all Tx DMA/FIFO channels */
683 iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));
684
685 /* Enable DMA channel */
686 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
687 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
688 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
689 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
690
691 /* Update FH chicken bits */
692 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
693 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
694 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
695
696 /* Enable L1-Active */
697 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
698 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
320} 699}
321 700
322/* 701/*
323 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 702 * iwl_pcie_tx_stop - Stop all Tx DMA channels
324 */ 703 */
325int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) 704int iwl_pcie_tx_stop(struct iwl_trans *trans)
326{ 705{
327 q->n_bd = count; 706 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
328 q->n_window = slots_num; 707 int ch, txq_id, ret;
329 q->id = id; 708 unsigned long flags;
330 709
331 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap 710 /* Turn off all Tx DMA fifos */
332 * and iwl_queue_dec_wrap are broken. */ 711 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
333 if (WARN_ON(!is_power_of_2(count)))
334 return -EINVAL;
335 712
336 /* slots_num must be power-of-two size, otherwise 713 iwl_pcie_txq_set_sched(trans, 0);
337 * get_cmd_index is broken. */
338 if (WARN_ON(!is_power_of_2(slots_num)))
339 return -EINVAL;
340 714
341 q->low_mark = q->n_window / 4; 715 /* Stop each Tx DMA channel, and wait for it to be idle */
342 if (q->low_mark < 4) 716 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
343 q->low_mark = 4; 717 iwl_write_direct32(trans,
718 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
719 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
720 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
721 if (ret < 0)
722 IWL_ERR(trans,
723 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
724 ch,
725 iwl_read_direct32(trans,
726 FH_TSSR_TX_STATUS_REG));
727 }
728 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
344 729
345 q->high_mark = q->n_window / 8; 730 if (!trans_pcie->txq) {
346 if (q->high_mark < 2) 731 IWL_WARN(trans,
347 q->high_mark = 2; 732 "Stopping tx queues that aren't allocated...\n");
733 return 0;
734 }
348 735
349 q->write_ptr = q->read_ptr = 0; 736 /* Unmap DMA from host system and free skb's */
737 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
738 txq_id++)
739 iwl_pcie_txq_unmap(trans, txq_id);
350 740
351 return 0; 741 return 0;
352} 742}
353 743
354static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 744/*
355 struct iwl_txq *txq) 745 * iwl_trans_tx_free - Free TXQ Context
746 *
747 * Destroy all TX DMA queues and structures
748 */
749void iwl_pcie_tx_free(struct iwl_trans *trans)
356{ 750{
357 struct iwl_trans_pcie *trans_pcie = 751 int txq_id;
358 IWL_TRANS_GET_PCIE_TRANS(trans); 752 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
359 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
360 int txq_id = txq->q.id;
361 int read_ptr = txq->q.read_ptr;
362 u8 sta_id = 0;
363 __le16 bc_ent;
364 struct iwl_tx_cmd *tx_cmd =
365 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
366 753
367 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 754 /* Tx queues */
755 if (trans_pcie->txq) {
756 for (txq_id = 0;
757 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
758 iwl_pcie_txq_free(trans, txq_id);
759 }
368 760
369 if (txq_id != trans_pcie->cmd_queue) 761 kfree(trans_pcie->txq);
370 sta_id = tx_cmd->sta_id; 762 trans_pcie->txq = NULL;
371 763
372 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 764 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
373 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
374 765
375 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 766 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
376 scd_bc_tbl[txq_id]. 767}
377 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 768
769/*
770 * iwl_pcie_tx_alloc - allocate TX context
771 * Allocate all Tx DMA structures and initialize them
772 */
773static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
774{
775 int ret;
776 int txq_id, slots_num;
777 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
778
779 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
780 sizeof(struct iwlagn_scd_bc_tbl);
781
782 /*It is not allowed to alloc twice, so warn when this happens.
783 * We cannot rely on the previous allocation, so free and fail */
784 if (WARN_ON(trans_pcie->txq)) {
785 ret = -EINVAL;
786 goto error;
787 }
788
789 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
790 scd_bc_tbls_size);
791 if (ret) {
792 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
793 goto error;
794 }
795
796 /* Alloc keep-warm buffer */
797 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
798 if (ret) {
799 IWL_ERR(trans, "Keep Warm allocation failed\n");
800 goto error;
801 }
802
803 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
804 sizeof(struct iwl_txq), GFP_KERNEL);
805 if (!trans_pcie->txq) {
806 IWL_ERR(trans, "Not enough memory for txq\n");
807 ret = ENOMEM;
808 goto error;
809 }
810
811 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
812 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
813 txq_id++) {
814 slots_num = (txq_id == trans_pcie->cmd_queue) ?
815 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
816 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
817 slots_num, txq_id);
818 if (ret) {
819 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
820 goto error;
821 }
822 }
823
824 return 0;
825
826error:
827 iwl_pcie_tx_free(trans);
828
829 return ret;
830}
831int iwl_pcie_tx_init(struct iwl_trans *trans)
832{
833 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
834 int ret;
835 int txq_id, slots_num;
836 unsigned long flags;
837 bool alloc = false;
838
839 if (!trans_pcie->txq) {
840 ret = iwl_pcie_tx_alloc(trans);
841 if (ret)
842 goto error;
843 alloc = true;
844 }
845
846 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
847
848 /* Turn off all Tx DMA fifos */
849 iwl_write_prph(trans, SCD_TXFACT, 0);
850
851 /* Tell NIC where to find the "keep warm" buffer */
852 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
853 trans_pcie->kw.dma >> 4);
854
855 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
856
857 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
858 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
859 txq_id++) {
860 slots_num = (txq_id == trans_pcie->cmd_queue) ?
861 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
862 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
863 slots_num, txq_id);
864 if (ret) {
865 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
866 goto error;
867 }
868 }
869
870 return 0;
871error:
872 /*Upon error, free only if we allocated something */
873 if (alloc)
874 iwl_pcie_tx_free(trans);
875 return ret;
876}
877
878static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
879 struct iwl_txq *txq)
880{
881 if (!trans_pcie->wd_timeout)
882 return;
883
884 /*
885 * if empty delete timer, otherwise move timer forward
886 * since we're making progress on this queue
887 */
888 if (txq->q.read_ptr == txq->q.write_ptr)
889 del_timer(&txq->stuck_timer);
890 else
891 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
892}
893
894/* Frees buffers until index _not_ inclusive */
895static int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index,
896 struct sk_buff_head *skbs)
897{
898 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
899 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
900 struct iwl_queue *q = &txq->q;
901 int last_to_free;
902 int freed = 0;
903
904 /* This function is not meant to release cmd queue*/
905 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
906 return 0;
907
908 lockdep_assert_held(&txq->lock);
909
910 /*Since we free until index _not_ inclusive, the one before index is
911 * the last we will free. This one must be used */
912 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
913
914 if ((index >= q->n_bd) ||
915 (iwl_queue_used(q, last_to_free) == 0)) {
916 IWL_ERR(trans,
917 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
918 __func__, txq_id, last_to_free, q->n_bd,
919 q->write_ptr, q->read_ptr);
920 return 0;
921 }
922
923 if (WARN_ON(!skb_queue_empty(skbs)))
924 return 0;
925
926 for (;
927 q->read_ptr != index;
928 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
929
930 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
931 continue;
932
933 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
934
935 txq->entries[txq->q.read_ptr].skb = NULL;
936
937 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
938
939 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
940 freed++;
941 }
942
943 iwl_pcie_txq_progress(trans_pcie, txq);
944
945 return freed;
378} 946}
379 947
380static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 948void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
949 struct sk_buff_head *skbs)
950{
951 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
952 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
953 /* n_bd is usually 256 => n_bd - 1 = 0xff */
954 int tfd_num = ssn & (txq->q.n_bd - 1);
955
956 spin_lock(&txq->lock);
957
958 if (txq->q.read_ptr != tfd_num) {
959 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
960 txq_id, txq->q.read_ptr, tfd_num, ssn);
961 iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
962 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
963 iwl_wake_queue(trans, txq);
964 }
965
966 spin_unlock(&txq->lock);
967}
968
969/*
970 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
971 *
972 * When FW advances 'R' index, all entries between old and new 'R' index
973 * need to be reclaimed. As result, some free space forms. If there is
974 * enough free space (> low mark), wake the stack that feeds us.
975 */
976static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
977{
978 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
979 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
980 struct iwl_queue *q = &txq->q;
981 int nfreed = 0;
982
983 lockdep_assert_held(&txq->lock);
984
985 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
986 IWL_ERR(trans,
987 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
988 __func__, txq_id, idx, q->n_bd,
989 q->write_ptr, q->read_ptr);
990 return;
991 }
992
993 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
994 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
995
996 if (nfreed++ > 0) {
997 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
998 idx, q->write_ptr, q->read_ptr);
999 iwl_op_mode_nic_error(trans->op_mode);
1000 }
1001 }
1002
1003 iwl_pcie_txq_progress(trans_pcie, txq);
1004}
1005
1006static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
381 u16 txq_id) 1007 u16 txq_id)
382{ 1008{
383 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1009 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -402,7 +1028,8 @@ static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
402 return 0; 1028 return 0;
403} 1029}
404 1030
405static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) 1031static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
1032 u16 txq_id)
406{ 1033{
407 /* Simply stop the queue, but don't change any configuration; 1034 /* Simply stop the queue, but don't change any configuration;
408 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 1035 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -412,8 +1039,8 @@ static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
412 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1039 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
413} 1040}
414 1041
415void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, 1042void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
416 int sta_id, int tid, int frame_limit, u16 ssn) 1043 int sta_id, int tid, int frame_limit, u16 ssn)
417{ 1044{
418 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1045 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
419 1046
@@ -421,7 +1048,7 @@ void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
421 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1048 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
422 1049
423 /* Stop this Tx queue before configuring it */ 1050 /* Stop this Tx queue before configuring it */
424 iwl_txq_set_inactive(trans, txq_id); 1051 iwl_pcie_txq_set_inactive(trans, txq_id);
425 1052
426 /* Set this queue as a chain-building queue unless it is CMD queue */ 1053 /* Set this queue as a chain-building queue unless it is CMD queue */
427 if (txq_id != trans_pcie->cmd_queue) 1054 if (txq_id != trans_pcie->cmd_queue)
@@ -432,7 +1059,7 @@ void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
432 u16 ra_tid = BUILD_RAxTID(sta_id, tid); 1059 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
433 1060
434 /* Map receiver-address / traffic-ID to this queue */ 1061 /* Map receiver-address / traffic-ID to this queue */
435 iwl_txq_set_ratid_map(trans, ra_tid, txq_id); 1062 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
436 1063
437 /* enable aggregations for the queue */ 1064 /* enable aggregations for the queue */
438 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 1065 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
@@ -474,7 +1101,7 @@ void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
474 txq_id, fifo, ssn & 0xff); 1101 txq_id, fifo, ssn & 0xff);
475} 1102}
476 1103
477void iwl_pcie_txq_disable(struct iwl_trans *trans, int txq_id) 1104void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
478{ 1105{
479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1106 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
480 u32 stts_addr = trans_pcie->scd_base_addr + 1107 u32 stts_addr = trans_pcie->scd_base_addr +
@@ -486,7 +1113,7 @@ void iwl_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
486 return; 1113 return;
487 } 1114 }
488 1115
489 iwl_txq_set_inactive(trans, txq_id); 1116 iwl_pcie_txq_set_inactive(trans, txq_id);
490 1117
491 _iwl_write_targ_mem_dwords(trans, stts_addr, 1118 _iwl_write_targ_mem_dwords(trans, stts_addr,
492 zero_val, ARRAY_SIZE(zero_val)); 1119 zero_val, ARRAY_SIZE(zero_val));
@@ -499,7 +1126,7 @@ void iwl_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
499/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1126/*************** HOST COMMAND QUEUE FUNCTIONS *****/
500 1127
501/* 1128/*
502 * iwl_enqueue_hcmd - enqueue a uCode command 1129 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
503 * @priv: device private data point 1130 * @priv: device private data point
504 * @cmd: a point to the ucode command structure 1131 * @cmd: a point to the ucode command structure
505 * 1132 *
@@ -507,7 +1134,8 @@ void iwl_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
507 * failed. On success, it turns the index (> 0) of command in the 1134 * failed. On success, it turns the index (> 0) of command in the
508 * command queue. 1135 * command queue.
509 */ 1136 */
510static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1137static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1138 struct iwl_host_cmd *cmd)
511{ 1139{
512 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1140 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
513 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1141 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
@@ -650,7 +1278,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
650 dma_unmap_addr_set(out_meta, mapping, phys_addr); 1278 dma_unmap_addr_set(out_meta, mapping, phys_addr);
651 dma_unmap_len_set(out_meta, len, copy_size); 1279 dma_unmap_len_set(out_meta, len, copy_size);
652 1280
653 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, copy_size, 1); 1281 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
654 1282
655 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1283 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
656 const void *data = cmd->data[i]; 1284 const void *data = cmd->data[i];
@@ -665,14 +1293,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
665 phys_addr = dma_map_single(trans->dev, (void *)data, 1293 phys_addr = dma_map_single(trans->dev, (void *)data,
666 cmd->len[i], DMA_BIDIRECTIONAL); 1294 cmd->len[i], DMA_BIDIRECTIONAL);
667 if (dma_mapping_error(trans->dev, phys_addr)) { 1295 if (dma_mapping_error(trans->dev, phys_addr)) {
668 iwl_unmap_tfd(trans, out_meta, 1296 iwl_pcie_tfd_unmap(trans, out_meta,
669 &txq->tfds[q->write_ptr], 1297 &txq->tfds[q->write_ptr],
670 DMA_BIDIRECTIONAL); 1298 DMA_BIDIRECTIONAL);
671 idx = -ENOMEM; 1299 idx = -ENOMEM;
672 goto out; 1300 goto out;
673 } 1301 }
674 1302
675 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); 1303 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
676 } 1304 }
677 1305
678 out_meta->flags = cmd->flags; 1306 out_meta->flags = cmd->flags;
@@ -701,61 +1329,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
701 return idx; 1329 return idx;
702} 1330}
703 1331
704static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
705 struct iwl_txq *txq)
706{
707 if (!trans_pcie->wd_timeout)
708 return;
709
710 /*
711 * if empty delete timer, otherwise move timer forward
712 * since we're making progress on this queue
713 */
714 if (txq->q.read_ptr == txq->q.write_ptr)
715 del_timer(&txq->stuck_timer);
716 else
717 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
718}
719
720/*
721 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
722 *
723 * When FW advances 'R' index, all entries between old and new 'R' index
724 * need to be reclaimed. As result, some free space forms. If there is
725 * enough free space (> low mark), wake the stack that feeds us.
726 */
727static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
728 int idx)
729{
730 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
731 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
732 struct iwl_queue *q = &txq->q;
733 int nfreed = 0;
734
735 lockdep_assert_held(&txq->lock);
736
737 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
738 IWL_ERR(trans,
739 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
740 __func__, txq_id, idx, q->n_bd,
741 q->write_ptr, q->read_ptr);
742 return;
743 }
744
745 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
746 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
747
748 if (nfreed++ > 0) {
749 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
750 idx, q->write_ptr, q->read_ptr);
751 iwl_op_mode_nic_error(trans->op_mode);
752 }
753
754 }
755
756 iwl_queue_progress(trans_pcie, txq);
757}
758
759/* 1332/*
760 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1333 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
761 * @rxb: Rx buffer to reclaim 1334 * @rxb: Rx buffer to reclaim
@@ -797,7 +1370,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
797 cmd = txq->entries[cmd_index].cmd; 1370 cmd = txq->entries[cmd_index].cmd;
798 meta = &txq->entries[cmd_index].meta; 1371 meta = &txq->entries[cmd_index].meta;
799 1372
800 iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 1373 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
801 1374
802 /* Input error checking is done when commands are added to queue. */ 1375 /* Input error checking is done when commands are added to queue. */
803 if (meta->flags & CMD_WANT_SKB) { 1376 if (meta->flags & CMD_WANT_SKB) {
@@ -809,7 +1382,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
809 meta->source->handler_status = handler_status; 1382 meta->source->handler_status = handler_status;
810 } 1383 }
811 1384
812 iwl_hcmd_queue_reclaim(trans, txq_id, index); 1385 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
813 1386
814 if (!(meta->flags & CMD_ASYNC)) { 1387 if (!(meta->flags & CMD_ASYNC)) {
815 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { 1388 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
@@ -830,7 +1403,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
830 1403
831#define HOST_COMPLETE_TIMEOUT (2 * HZ) 1404#define HOST_COMPLETE_TIMEOUT (2 * HZ)
832 1405
833static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1406static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1407 struct iwl_host_cmd *cmd)
834{ 1408{
835 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1409 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
836 int ret; 1410 int ret;
@@ -840,7 +1414,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
840 return -EINVAL; 1414 return -EINVAL;
841 1415
842 1416
843 ret = iwl_enqueue_hcmd(trans, cmd); 1417 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
844 if (ret < 0) { 1418 if (ret < 0) {
845 IWL_ERR(trans, 1419 IWL_ERR(trans,
846 "Error sending %s: enqueue_hcmd failed: %d\n", 1420 "Error sending %s: enqueue_hcmd failed: %d\n",
@@ -850,7 +1424,8 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
850 return 0; 1424 return 0;
851} 1425}
852 1426
853static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1427static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1428 struct iwl_host_cmd *cmd)
854{ 1429{
855 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
856 int cmd_idx; 1431 int cmd_idx;
@@ -869,7 +1444,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
869 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1444 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
870 get_cmd_string(trans_pcie, cmd->id)); 1445 get_cmd_string(trans_pcie, cmd->id));
871 1446
872 cmd_idx = iwl_enqueue_hcmd(trans, cmd); 1447 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
873 if (cmd_idx < 0) { 1448 if (cmd_idx < 0) {
874 ret = cmd_idx; 1449 ret = cmd_idx;
875 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 1450 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
@@ -949,7 +1524,7 @@ cancel:
949 return ret; 1524 return ret;
950} 1525}
951 1526
952int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1527int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
953{ 1528{
954 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1529 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
955 1530
@@ -960,62 +1535,172 @@ int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
960 return -ERFKILL; 1535 return -ERFKILL;
961 1536
962 if (cmd->flags & CMD_ASYNC) 1537 if (cmd->flags & CMD_ASYNC)
963 return iwl_send_cmd_async(trans, cmd); 1538 return iwl_pcie_send_hcmd_async(trans, cmd);
964 1539
965 /* We still can fail on RFKILL that can be asserted while we wait */ 1540 /* We still can fail on RFKILL that can be asserted while we wait */
966 return iwl_send_cmd_sync(trans, cmd); 1541 return iwl_pcie_send_hcmd_sync(trans, cmd);
967} 1542}
968 1543
969/* Frees buffers until index _not_ inclusive */ 1544int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
970int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index, 1545 struct iwl_device_cmd *dev_cmd, int txq_id)
971 struct sk_buff_head *skbs)
972{ 1546{
973 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1547 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
974 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 1548 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
975 struct iwl_queue *q = &txq->q; 1549 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
976 int last_to_free; 1550 struct iwl_cmd_meta *out_meta;
977 int freed = 0; 1551 struct iwl_txq *txq;
1552 struct iwl_queue *q;
1553 dma_addr_t phys_addr = 0;
1554 dma_addr_t txcmd_phys;
1555 dma_addr_t scratch_phys;
1556 u16 len, firstlen, secondlen;
1557 u8 wait_write_ptr = 0;
1558 __le16 fc = hdr->frame_control;
1559 u8 hdr_len = ieee80211_hdrlen(fc);
1560 u16 __maybe_unused wifi_seq;
1561
1562 txq = &trans_pcie->txq[txq_id];
1563 q = &txq->q;
978 1564
979 /* This function is not meant to release cmd queue*/ 1565 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
980 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 1566 WARN_ON_ONCE(1);
981 return 0; 1567 return -EINVAL;
1568 }
982 1569
983 lockdep_assert_held(&txq->lock); 1570 spin_lock(&txq->lock);
984 1571
985 /*Since we free until index _not_ inclusive, the one before index is 1572 /* In AGG mode, the index in the ring must correspond to the WiFi
986 * the last we will free. This one must be used */ 1573 * sequence number. This is a HW requirements to help the SCD to parse
987 last_to_free = iwl_queue_dec_wrap(index, q->n_bd); 1574 * the BA.
1575 * Check here that the packets are in the right place on the ring.
1576 */
1577#ifdef CONFIG_IWLWIFI_DEBUG
1578 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1579 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1580 ((wifi_seq & 0xff) != q->write_ptr),
1581 "Q: %d WiFi Seq %d tfdNum %d",
1582 txq_id, wifi_seq, q->write_ptr);
1583#endif
1584
1585 /* Set up driver data for this TFD */
1586 txq->entries[q->write_ptr].skb = skb;
1587 txq->entries[q->write_ptr].cmd = dev_cmd;
1588
1589 dev_cmd->hdr.cmd = REPLY_TX;
1590 dev_cmd->hdr.sequence =
1591 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1592 INDEX_TO_SEQ(q->write_ptr)));
1593
1594 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1595 out_meta = &txq->entries[q->write_ptr].meta;
988 1596
989 if ((index >= q->n_bd) || 1597 /*
990 (iwl_queue_used(q, last_to_free) == 0)) { 1598 * Use the first empty entry in this queue's command buffer array
991 IWL_ERR(trans, 1599 * to contain the Tx command and MAC header concatenated together
992 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1600 * (payload data will be in another buffer).
993 __func__, txq_id, last_to_free, q->n_bd, 1601 * Size of this varies, due to varying MAC header length.
994 q->write_ptr, q->read_ptr); 1602 * If end is not dword aligned, we'll have 2 extra bytes at the end
995 return 0; 1603 * of the MAC header (device reads on dword boundaries).
1604 * We'll tell device about this padding later.
1605 */
1606 len = sizeof(struct iwl_tx_cmd) +
1607 sizeof(struct iwl_cmd_header) + hdr_len;
1608 firstlen = (len + 3) & ~3;
1609
1610 /* Tell NIC about any 2-byte padding after MAC header */
1611 if (firstlen != len)
1612 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1613
1614 /* Physical address of this Tx command's header (not MAC header!),
1615 * within command buffer array. */
1616 txcmd_phys = dma_map_single(trans->dev,
1617 &dev_cmd->hdr, firstlen,
1618 DMA_BIDIRECTIONAL);
1619 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1620 goto out_err;
1621 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1622 dma_unmap_len_set(out_meta, len, firstlen);
1623
1624 if (!ieee80211_has_morefrags(fc)) {
1625 txq->need_update = 1;
1626 } else {
1627 wait_write_ptr = 1;
1628 txq->need_update = 0;
996 } 1629 }
997 1630
998 if (WARN_ON(!skb_queue_empty(skbs))) 1631 /* Set up TFD's 2nd entry to point directly to remainder of skb,
999 return 0; 1632 * if any (802.11 null frames have no payload). */
1633 secondlen = skb->len - hdr_len;
1634 if (secondlen > 0) {
1635 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1636 secondlen, DMA_TO_DEVICE);
1637 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1638 dma_unmap_single(trans->dev,
1639 dma_unmap_addr(out_meta, mapping),
1640 dma_unmap_len(out_meta, len),
1641 DMA_BIDIRECTIONAL);
1642 goto out_err;
1643 }
1644 }
1000 1645
1001 for (; 1646 /* Attach buffers to TFD */
1002 q->read_ptr != index; 1647 iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1003 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1648 if (secondlen > 0)
1649 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
1004 1650
1005 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) 1651 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1006 continue; 1652 offsetof(struct iwl_tx_cmd, scratch);
1007 1653
1008 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); 1654 /* take back ownership of DMA buffer to enable update */
1655 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1656 DMA_BIDIRECTIONAL);
1657 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1658 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1009 1659
1010 txq->entries[txq->q.read_ptr].skb = NULL; 1660 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1661 le16_to_cpu(dev_cmd->hdr.sequence));
1662 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1011 1663
1012 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 1664 /* Set up entry for this TFD in Tx byte-count array */
1665 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1013 1666
1014 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 1667 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1015 freed++; 1668 DMA_BIDIRECTIONAL);
1016 }
1017 1669
1018 iwl_queue_progress(trans_pcie, txq); 1670 trace_iwlwifi_dev_tx(trans->dev, skb,
1671 &txq->tfds[txq->q.write_ptr],
1672 sizeof(struct iwl_tfd),
1673 &dev_cmd->hdr, firstlen,
1674 skb->data + hdr_len, secondlen);
1675 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1676 skb->data + hdr_len, secondlen);
1019 1677
1020 return freed; 1678 /* start timer if queue currently empty */
1679 if (txq->need_update && q->read_ptr == q->write_ptr &&
1680 trans_pcie->wd_timeout)
1681 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1682
1683 /* Tell device the write index *just past* this latest filled TFD */
1684 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1685 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1686
1687 /*
1688 * At this point the frame is "transmitted" successfully
1689 * and we will get a TX status notification eventually,
1690 * regardless of the value of ret. "ret" only indicates
1691 * whether or not we should update the write pointer.
1692 */
1693 if (iwl_queue_space(q) < q->high_mark) {
1694 if (wait_write_ptr) {
1695 txq->need_update = 1;
1696 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1697 } else {
1698 iwl_stop_queue(trans, txq);
1699 }
1700 }
1701 spin_unlock(&txq->lock);
1702 return 0;
1703out_err:
1704 spin_unlock(&txq->lock);
1705 return -1;
1021} 1706}