aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/pcie/trans.c
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2012-11-14 07:44:18 -0500
committerJohannes Berg <johannes.berg@intel.com>2012-11-19 09:04:20 -0500
commitf02831be962c7be68c72110fa779e916ab1a8cdd (patch)
tree08d768adea7c0d03381e5c896765049cd42dddb2 /drivers/net/wireless/iwlwifi/pcie/trans.c
parent7afe3705cd4e2a5490140cc15a15b3ea7a10b889 (diff)
iwlwifi: continue clean up - pcie/tx.c
Rename static functions. Function moved from trans.c to tx.c. A few could be made static, others had to be exported. Functions that implement the transport API are prefixed by iwl_trans_pcie_, the others by iwl_pcie_. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/trans.c')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c701
1 files changed, 7 insertions, 694 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 8a5b5af968ad..19c11e3b5481 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -74,392 +74,6 @@
74#include "iwl-prph.h" 74#include "iwl-prph.h"
75#include "iwl-agn-hw.h" 75#include "iwl-agn-hw.h"
76#include "internal.h" 76#include "internal.h"
77/* FIXME: need to abstract out TX command (once we know what it looks like) */
78#include "dvm/commands.h"
79
80#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
82 (~(1<<(trans_pcie)->cmd_queue)))
83
84static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
85 struct iwl_dma_ptr *ptr, size_t size)
86{
87 if (WARN_ON(ptr->addr))
88 return -EINVAL;
89
90 ptr->addr = dma_alloc_coherent(trans->dev, size,
91 &ptr->dma, GFP_KERNEL);
92 if (!ptr->addr)
93 return -ENOMEM;
94 ptr->size = size;
95 return 0;
96}
97
98static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
99 struct iwl_dma_ptr *ptr)
100{
101 if (unlikely(!ptr->addr))
102 return;
103
104 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
105 memset(ptr, 0, sizeof(*ptr));
106}
107
108static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
109{
110 struct iwl_txq *txq = (void *)data;
111 struct iwl_queue *q = &txq->q;
112 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
113 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
114 u32 scd_sram_addr = trans_pcie->scd_base_addr +
115 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
116 u8 buf[16];
117 int i;
118
119 spin_lock(&txq->lock);
120 /* check if triggered erroneously */
121 if (txq->q.read_ptr == txq->q.write_ptr) {
122 spin_unlock(&txq->lock);
123 return;
124 }
125 spin_unlock(&txq->lock);
126
127 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
128 jiffies_to_msecs(trans_pcie->wd_timeout));
129 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
130 txq->q.read_ptr, txq->q.write_ptr);
131
132 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
133
134 iwl_print_hex_error(trans, buf, sizeof(buf));
135
136 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
137 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
138 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
139
140 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
141 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
142 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
143 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
144 u32 tbl_dw =
145 iwl_read_targ_mem(trans,
146 trans_pcie->scd_base_addr +
147 SCD_TRANS_TBL_OFFSET_QUEUE(i));
148
149 if (i & 0x1)
150 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
151 else
152 tbl_dw = tbl_dw & 0x0000FFFF;
153
154 IWL_ERR(trans,
155 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
156 i, active ? "" : "in", fifo, tbl_dw,
157 iwl_read_prph(trans,
158 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
159 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
160 }
161
162 for (i = q->read_ptr; i != q->write_ptr;
163 i = iwl_queue_inc_wrap(i, q->n_bd)) {
164 struct iwl_tx_cmd *tx_cmd =
165 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
166 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
167 get_unaligned_le32(&tx_cmd->scratch));
168 }
169
170 iwl_op_mode_nic_error(trans->op_mode);
171}
172
173static int iwl_trans_txq_alloc(struct iwl_trans *trans,
174 struct iwl_txq *txq, int slots_num,
175 u32 txq_id)
176{
177 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
178 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
179 int i;
180
181 if (WARN_ON(txq->entries || txq->tfds))
182 return -EINVAL;
183
184 setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
185 (unsigned long)txq);
186 txq->trans_pcie = trans_pcie;
187
188 txq->q.n_window = slots_num;
189
190 txq->entries = kcalloc(slots_num,
191 sizeof(struct iwl_pcie_txq_entry),
192 GFP_KERNEL);
193
194 if (!txq->entries)
195 goto error;
196
197 if (txq_id == trans_pcie->cmd_queue)
198 for (i = 0; i < slots_num; i++) {
199 txq->entries[i].cmd =
200 kmalloc(sizeof(struct iwl_device_cmd),
201 GFP_KERNEL);
202 if (!txq->entries[i].cmd)
203 goto error;
204 }
205
206 /* Circular buffer of transmit frame descriptors (TFDs),
207 * shared with device */
208 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
209 &txq->q.dma_addr, GFP_KERNEL);
210 if (!txq->tfds) {
211 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
212 goto error;
213 }
214 txq->q.id = txq_id;
215
216 return 0;
217error:
218 if (txq->entries && txq_id == trans_pcie->cmd_queue)
219 for (i = 0; i < slots_num; i++)
220 kfree(txq->entries[i].cmd);
221 kfree(txq->entries);
222 txq->entries = NULL;
223
224 return -ENOMEM;
225
226}
227
228static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
229 int slots_num, u32 txq_id)
230{
231 int ret;
232
233 txq->need_update = 0;
234
235 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
236 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
237 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
238
239 /* Initialize queue's high/low-water marks, and head/tail indexes */
240 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
241 txq_id);
242 if (ret)
243 return ret;
244
245 spin_lock_init(&txq->lock);
246
247 /*
248 * Tell nic where to find circular buffer of Tx Frame Descriptors for
249 * given Tx queue, and enable the DMA channel used for that queue.
250 * Circular buffer (TFD queue in DRAM) physical base address */
251 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
252 txq->q.dma_addr >> 8);
253
254 return 0;
255}
256
257/*
258 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
259 */
260void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
261{
262 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
263 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
264 struct iwl_queue *q = &txq->q;
265 enum dma_data_direction dma_dir;
266
267 if (!q->n_bd)
268 return;
269
270 /* In the command queue, all the TBs are mapped as BIDI
271 * so unmap them as such.
272 */
273 if (txq_id == trans_pcie->cmd_queue)
274 dma_dir = DMA_BIDIRECTIONAL;
275 else
276 dma_dir = DMA_TO_DEVICE;
277
278 spin_lock_bh(&txq->lock);
279 while (q->write_ptr != q->read_ptr) {
280 iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
281 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
282 }
283 spin_unlock_bh(&txq->lock);
284}
285
286/*
287 * iwl_txq_free - Deallocate DMA queue.
288 * @txq: Transmit queue to deallocate.
289 *
290 * Empty queue by removing and destroying all BD's.
291 * Free all buffers.
292 * 0-fill, but do not free "txq" descriptor structure.
293 */
294static void iwl_txq_free(struct iwl_trans *trans, int txq_id)
295{
296 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
297 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
298 struct device *dev = trans->dev;
299 int i;
300
301 if (WARN_ON(!txq))
302 return;
303
304 iwl_pcie_txq_unmap(trans, txq_id);
305
306 /* De-alloc array of command/tx buffers */
307 if (txq_id == trans_pcie->cmd_queue)
308 for (i = 0; i < txq->q.n_window; i++) {
309 kfree(txq->entries[i].cmd);
310 kfree(txq->entries[i].copy_cmd);
311 kfree(txq->entries[i].free_buf);
312 }
313
314 /* De-alloc circular buffer of TFDs */
315 if (txq->q.n_bd) {
316 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
317 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
318 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
319 }
320
321 kfree(txq->entries);
322 txq->entries = NULL;
323
324 del_timer_sync(&txq->stuck_timer);
325
326 /* 0-fill queue descriptor structure */
327 memset(txq, 0, sizeof(*txq));
328}
329
330/*
331 * iwl_trans_tx_free - Free TXQ Context
332 *
333 * Destroy all TX DMA queues and structures
334 */
335static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
336{
337 int txq_id;
338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
339
340 /* Tx queues */
341 if (trans_pcie->txq) {
342 for (txq_id = 0;
343 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
344 iwl_txq_free(trans, txq_id);
345 }
346
347 kfree(trans_pcie->txq);
348 trans_pcie->txq = NULL;
349
350 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
351
352 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
353}
354
355/*
356 * iwl_trans_tx_alloc - allocate TX context
357 * Allocate all Tx DMA structures and initialize them
358 */
359static int iwl_trans_tx_alloc(struct iwl_trans *trans)
360{
361 int ret;
362 int txq_id, slots_num;
363 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
364
365 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
366 sizeof(struct iwlagn_scd_bc_tbl);
367
368 /*It is not allowed to alloc twice, so warn when this happens.
369 * We cannot rely on the previous allocation, so free and fail */
370 if (WARN_ON(trans_pcie->txq)) {
371 ret = -EINVAL;
372 goto error;
373 }
374
375 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
376 scd_bc_tbls_size);
377 if (ret) {
378 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
379 goto error;
380 }
381
382 /* Alloc keep-warm buffer */
383 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
384 if (ret) {
385 IWL_ERR(trans, "Keep Warm allocation failed\n");
386 goto error;
387 }
388
389 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
390 sizeof(struct iwl_txq), GFP_KERNEL);
391 if (!trans_pcie->txq) {
392 IWL_ERR(trans, "Not enough memory for txq\n");
393 ret = ENOMEM;
394 goto error;
395 }
396
397 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
398 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
399 txq_id++) {
400 slots_num = (txq_id == trans_pcie->cmd_queue) ?
401 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
402 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
403 slots_num, txq_id);
404 if (ret) {
405 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
406 goto error;
407 }
408 }
409
410 return 0;
411
412error:
413 iwl_trans_pcie_tx_free(trans);
414
415 return ret;
416}
417static int iwl_tx_init(struct iwl_trans *trans)
418{
419 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
420 int ret;
421 int txq_id, slots_num;
422 unsigned long flags;
423 bool alloc = false;
424
425 if (!trans_pcie->txq) {
426 ret = iwl_trans_tx_alloc(trans);
427 if (ret)
428 goto error;
429 alloc = true;
430 }
431
432 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
433
434 /* Turn off all Tx DMA fifos */
435 iwl_write_prph(trans, SCD_TXFACT, 0);
436
437 /* Tell NIC where to find the "keep warm" buffer */
438 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
439 trans_pcie->kw.dma >> 4);
440
441 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
442
443 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
444 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
445 txq_id++) {
446 slots_num = (txq_id == trans_pcie->cmd_queue) ?
447 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
448 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
449 slots_num, txq_id);
450 if (ret) {
451 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
452 goto error;
453 }
454 }
455
456 return 0;
457error:
458 /*Upon error, free only if we allocated something */
459 if (alloc)
460 iwl_trans_pcie_tx_free(trans);
461 return ret;
462}
463 77
464static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans) 78static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
465{ 79{
@@ -659,7 +273,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
659 iwl_pcie_rx_init(trans); 273 iwl_pcie_rx_init(trans);
660 274
661 /* Allocate or reset and init all Tx and Command queues */ 275 /* Allocate or reset and init all Tx and Command queues */
662 if (iwl_tx_init(trans)) 276 if (iwl_pcie_tx_init(trans))
663 return -ENOMEM; 277 return -ENOMEM;
664 278
665 if (trans->cfg->base_params->shadow_reg_enable) { 279 if (trans->cfg->base_params->shadow_reg_enable) {
@@ -874,126 +488,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
874 return iwl_pcie_load_given_ucode(trans, fw); 488 return iwl_pcie_load_given_ucode(trans, fw);
875} 489}
876 490
877/*
878 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
879 */
880static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
881{
882 struct iwl_trans_pcie __maybe_unused *trans_pcie =
883 IWL_TRANS_GET_PCIE_TRANS(trans);
884
885 iwl_write_prph(trans, SCD_TXFACT, mask);
886}
887
888static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
889{
890 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
891 u32 a;
892 int chan;
893 u32 reg_val;
894
895 /* make sure all queue are not stopped/used */
896 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
897 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
898
899 trans_pcie->scd_base_addr =
900 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
901
902 WARN_ON(scd_base_addr != 0 &&
903 scd_base_addr != trans_pcie->scd_base_addr);
904
905 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
906 /* reset conext data memory */
907 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
908 a += 4)
909 iwl_write_targ_mem(trans, a, 0);
910 /* reset tx status memory */
911 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
912 a += 4)
913 iwl_write_targ_mem(trans, a, 0);
914 for (; a < trans_pcie->scd_base_addr +
915 SCD_TRANS_TBL_OFFSET_QUEUE(
916 trans->cfg->base_params->num_of_queues);
917 a += 4)
918 iwl_write_targ_mem(trans, a, 0);
919
920 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
921 trans_pcie->scd_bc_tbls.dma >> 10);
922
923 /* The chain extension of the SCD doesn't work well. This feature is
924 * enabled by default by the HW, so we need to disable it manually.
925 */
926 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
927
928 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
929 trans_pcie->cmd_fifo);
930
931 /* Activate all Tx DMA/FIFO channels */
932 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
933
934 /* Enable DMA channel */
935 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
936 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
937 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
938 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
939
940 /* Update FH chicken bits */
941 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
942 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
943 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
944
945 /* Enable L1-Active */
946 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
947 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
948}
949
950static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 491static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
951{ 492{
952 iwl_pcie_reset_ict(trans); 493 iwl_pcie_reset_ict(trans);
953 iwl_tx_start(trans, scd_addr); 494 iwl_pcie_tx_start(trans, scd_addr);
954}
955
956/*
957 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
958 */
959static int iwl_trans_tx_stop(struct iwl_trans *trans)
960{
961 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
962 int ch, txq_id, ret;
963 unsigned long flags;
964
965 /* Turn off all Tx DMA fifos */
966 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
967
968 iwl_trans_txq_set_sched(trans, 0);
969
970 /* Stop each Tx DMA channel, and wait for it to be idle */
971 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
972 iwl_write_direct32(trans,
973 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
974 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
975 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
976 if (ret < 0)
977 IWL_ERR(trans,
978 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
979 ch,
980 iwl_read_direct32(trans,
981 FH_TSSR_TX_STATUS_REG));
982 }
983 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
984
985 if (!trans_pcie->txq) {
986 IWL_WARN(trans,
987 "Stopping tx queues that aren't allocated...\n");
988 return 0;
989 }
990
991 /* Unmap DMA from host system and free skb's */
992 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
993 txq_id++)
994 iwl_pcie_txq_unmap(trans, txq_id);
995
996 return 0;
997} 495}
998 496
999static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 497static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@@ -1017,7 +515,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1017 * already dead. 515 * already dead.
1018 */ 516 */
1019 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { 517 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
1020 iwl_trans_tx_stop(trans); 518 iwl_pcie_tx_stop(trans);
1021 iwl_pcie_rx_stop(trans); 519 iwl_pcie_rx_stop(trans);
1022 520
1023 /* Power-down device's busmaster DMA clocks */ 521 /* Power-down device's busmaster DMA clocks */
@@ -1070,170 +568,6 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1070 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 568 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1071} 569}
1072 570
1073static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1074 struct iwl_device_cmd *dev_cmd, int txq_id)
1075{
1076 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1077 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1078 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1079 struct iwl_cmd_meta *out_meta;
1080 struct iwl_txq *txq;
1081 struct iwl_queue *q;
1082 dma_addr_t phys_addr = 0;
1083 dma_addr_t txcmd_phys;
1084 dma_addr_t scratch_phys;
1085 u16 len, firstlen, secondlen;
1086 u8 wait_write_ptr = 0;
1087 __le16 fc = hdr->frame_control;
1088 u8 hdr_len = ieee80211_hdrlen(fc);
1089 u16 __maybe_unused wifi_seq;
1090
1091 txq = &trans_pcie->txq[txq_id];
1092 q = &txq->q;
1093
1094 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1095 WARN_ON_ONCE(1);
1096 return -EINVAL;
1097 }
1098
1099 spin_lock(&txq->lock);
1100
1101 /* In AGG mode, the index in the ring must correspond to the WiFi
1102 * sequence number. This is a HW requirements to help the SCD to parse
1103 * the BA.
1104 * Check here that the packets are in the right place on the ring.
1105 */
1106#ifdef CONFIG_IWLWIFI_DEBUG
1107 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1108 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1109 ((wifi_seq & 0xff) != q->write_ptr),
1110 "Q: %d WiFi Seq %d tfdNum %d",
1111 txq_id, wifi_seq, q->write_ptr);
1112#endif
1113
1114 /* Set up driver data for this TFD */
1115 txq->entries[q->write_ptr].skb = skb;
1116 txq->entries[q->write_ptr].cmd = dev_cmd;
1117
1118 dev_cmd->hdr.cmd = REPLY_TX;
1119 dev_cmd->hdr.sequence =
1120 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1121 INDEX_TO_SEQ(q->write_ptr)));
1122
1123 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1124 out_meta = &txq->entries[q->write_ptr].meta;
1125
1126 /*
1127 * Use the first empty entry in this queue's command buffer array
1128 * to contain the Tx command and MAC header concatenated together
1129 * (payload data will be in another buffer).
1130 * Size of this varies, due to varying MAC header length.
1131 * If end is not dword aligned, we'll have 2 extra bytes at the end
1132 * of the MAC header (device reads on dword boundaries).
1133 * We'll tell device about this padding later.
1134 */
1135 len = sizeof(struct iwl_tx_cmd) +
1136 sizeof(struct iwl_cmd_header) + hdr_len;
1137 firstlen = (len + 3) & ~3;
1138
1139 /* Tell NIC about any 2-byte padding after MAC header */
1140 if (firstlen != len)
1141 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1142
1143 /* Physical address of this Tx command's header (not MAC header!),
1144 * within command buffer array. */
1145 txcmd_phys = dma_map_single(trans->dev,
1146 &dev_cmd->hdr, firstlen,
1147 DMA_BIDIRECTIONAL);
1148 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1149 goto out_err;
1150 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1151 dma_unmap_len_set(out_meta, len, firstlen);
1152
1153 if (!ieee80211_has_morefrags(fc)) {
1154 txq->need_update = 1;
1155 } else {
1156 wait_write_ptr = 1;
1157 txq->need_update = 0;
1158 }
1159
1160 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1161 * if any (802.11 null frames have no payload). */
1162 secondlen = skb->len - hdr_len;
1163 if (secondlen > 0) {
1164 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1165 secondlen, DMA_TO_DEVICE);
1166 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1167 dma_unmap_single(trans->dev,
1168 dma_unmap_addr(out_meta, mapping),
1169 dma_unmap_len(out_meta, len),
1170 DMA_BIDIRECTIONAL);
1171 goto out_err;
1172 }
1173 }
1174
1175 /* Attach buffers to TFD */
1176 iwl_pcie_tx_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1177 if (secondlen > 0)
1178 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, secondlen, 0);
1179
1180 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1181 offsetof(struct iwl_tx_cmd, scratch);
1182
1183 /* take back ownership of DMA buffer to enable update */
1184 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1185 DMA_BIDIRECTIONAL);
1186 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1187 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1188
1189 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1190 le16_to_cpu(dev_cmd->hdr.sequence));
1191 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1192
1193 /* Set up entry for this TFD in Tx byte-count array */
1194 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1195
1196 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1197 DMA_BIDIRECTIONAL);
1198
1199 trace_iwlwifi_dev_tx(trans->dev, skb,
1200 &txq->tfds[txq->q.write_ptr],
1201 sizeof(struct iwl_tfd),
1202 &dev_cmd->hdr, firstlen,
1203 skb->data + hdr_len, secondlen);
1204 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1205 skb->data + hdr_len, secondlen);
1206
1207 /* start timer if queue currently empty */
1208 if (txq->need_update && q->read_ptr == q->write_ptr &&
1209 trans_pcie->wd_timeout)
1210 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1211
1212 /* Tell device the write index *just past* this latest filled TFD */
1213 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1214 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1215
1216 /*
1217 * At this point the frame is "transmitted" successfully
1218 * and we will get a TX status notification eventually,
1219 * regardless of the value of ret. "ret" only indicates
1220 * whether or not we should update the write pointer.
1221 */
1222 if (iwl_queue_space(q) < q->high_mark) {
1223 if (wait_write_ptr) {
1224 txq->need_update = 1;
1225 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1226 } else {
1227 iwl_stop_queue(trans, txq);
1228 }
1229 }
1230 spin_unlock(&txq->lock);
1231 return 0;
1232 out_err:
1233 spin_unlock(&txq->lock);
1234 return -1;
1235}
1236
1237static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 571static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1238{ 572{
1239 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 573 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1319,27 +653,6 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1319 } 653 }
1320} 654}
1321 655
1322static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1323 struct sk_buff_head *skbs)
1324{
1325 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1326 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1327 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1328 int tfd_num = ssn & (txq->q.n_bd - 1);
1329
1330 spin_lock(&txq->lock);
1331
1332 if (txq->q.read_ptr != tfd_num) {
1333 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1334 txq_id, txq->q.read_ptr, tfd_num, ssn);
1335 iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
1336 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1337 iwl_wake_queue(trans, txq);
1338 }
1339
1340 spin_unlock(&txq->lock);
1341}
1342
1343static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 656static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1344{ 657{
1345 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 658 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
@@ -1386,7 +699,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1386{ 699{
1387 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 700 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1388 701
1389 iwl_trans_pcie_tx_free(trans); 702 iwl_pcie_tx_free(trans);
1390 iwl_pcie_rx_free(trans); 703 iwl_pcie_rx_free(trans);
1391 704
1392 if (trans_pcie->irq_requested == true) { 705 if (trans_pcie->irq_requested == true) {
@@ -1892,13 +1205,13 @@ static const struct iwl_trans_ops trans_ops_pcie = {
1892 1205
1893 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, 1206 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
1894 1207
1895 .send_cmd = iwl_pcie_send_cmd, 1208 .send_cmd = iwl_trans_pcie_send_hcmd,
1896 1209
1897 .tx = iwl_trans_pcie_tx, 1210 .tx = iwl_trans_pcie_tx,
1898 .reclaim = iwl_trans_pcie_reclaim, 1211 .reclaim = iwl_trans_pcie_reclaim,
1899 1212
1900 .txq_disable = iwl_pcie_txq_disable, 1213 .txq_disable = iwl_trans_pcie_txq_disable,
1901 .txq_enable = iwl_pcie_txq_enable, 1214 .txq_enable = iwl_trans_pcie_txq_enable,
1902 1215
1903 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 1216 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
1904 1217