diff options
author | Sara Sharon <sara.sharon@intel.com> | 2016-06-26 06:17:56 -0400 |
---|---|---|
committer | Luca Coelho <luciano.coelho@intel.com> | 2016-09-16 02:10:21 -0400 |
commit | 6983ba6951139c99f0692c94f83d8d75ea559bcc (patch) | |
tree | 474b72204352c91f3e2d59041834753a2d4af8df | |
parent | 8aacf4b73fe87bc8fbe75a83862f411b52b7f272 (diff) |
iwlwifi: pcie: assign and access a000 TFD & TBs
Previous patch introduced the new formats. This patch
allocates the new structures and adjusts code accordingly.
Signed-off-by: Sara Sharon <sara.sharon@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 19 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 16 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 141 |
3 files changed, 120 insertions, 56 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index b9dc82b981e1..d185692676fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h | |||
@@ -280,7 +280,7 @@ struct iwl_pcie_first_tb_buf { | |||
280 | */ | 280 | */ |
281 | struct iwl_txq { | 281 | struct iwl_txq { |
282 | struct iwl_queue q; | 282 | struct iwl_queue q; |
283 | struct iwl_tfd *tfds; | 283 | void *tfds; |
284 | struct iwl_pcie_first_tb_buf *first_tb_bufs; | 284 | struct iwl_pcie_first_tb_buf *first_tb_bufs; |
285 | dma_addr_t first_tb_dma; | 285 | dma_addr_t first_tb_dma; |
286 | struct iwl_pcie_txq_entry *entries; | 286 | struct iwl_pcie_txq_entry *entries; |
@@ -393,6 +393,7 @@ struct iwl_trans_pcie { | |||
393 | u8 n_no_reclaim_cmds; | 393 | u8 n_no_reclaim_cmds; |
394 | u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; | 394 | u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; |
395 | u8 max_tbs; | 395 | u8 max_tbs; |
396 | u16 tfd_size; | ||
396 | 397 | ||
397 | enum iwl_amsdu_size rx_buf_size; | 398 | enum iwl_amsdu_size rx_buf_size; |
398 | bool bc_table_dword; | 399 | bool bc_table_dword; |
@@ -489,9 +490,21 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | |||
489 | struct sk_buff_head *skbs); | 490 | struct sk_buff_head *skbs); |
490 | void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); | 491 | void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); |
491 | 492 | ||
492 | static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | 493 | static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *tfd, |
494 | u8 idx) | ||
493 | { | 495 | { |
494 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 496 | struct iwl_tfd *tfd_fh; |
497 | struct iwl_tfd_tb *tb; | ||
498 | |||
499 | if (trans->cfg->use_tfh) { | ||
500 | struct iwl_tfh_tfd *tfd_fh = (void *)tfd; | ||
501 | struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; | ||
502 | |||
503 | return le16_to_cpu(tb->tb_len); | ||
504 | } | ||
505 | |||
506 | tfd_fh = (void *)tfd; | ||
507 | tb = &tfd_fh->tbs[idx]; | ||
495 | 508 | ||
496 | return le16_to_cpu(tb->hi_n_len) >> 4; | 509 | return le16_to_cpu(tb->hi_n_len) >> 4; |
497 | } | 510 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 21b1be11100a..e908bb8e10b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
@@ -2437,15 +2437,14 @@ err: | |||
2437 | } | 2437 | } |
2438 | #endif /*CONFIG_IWLWIFI_DEBUGFS */ | 2438 | #endif /*CONFIG_IWLWIFI_DEBUGFS */ |
2439 | 2439 | ||
2440 | static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, | 2440 | static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) |
2441 | struct iwl_tfd *tfd) | ||
2442 | { | 2441 | { |
2443 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 2442 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
2444 | u32 cmdlen = 0; | 2443 | u32 cmdlen = 0; |
2445 | int i; | 2444 | int i; |
2446 | 2445 | ||
2447 | for (i = 0; i < trans_pcie->max_tbs; i++) | 2446 | for (i = 0; i < trans_pcie->max_tbs; i++) |
2448 | cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i); | 2447 | cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i); |
2449 | 2448 | ||
2450 | return cmdlen; | 2449 | return cmdlen; |
2451 | } | 2450 | } |
@@ -2733,7 +2732,8 @@ static struct iwl_trans_dump_data | |||
2733 | u8 idx = get_cmd_index(&cmdq->q, ptr); | 2732 | u8 idx = get_cmd_index(&cmdq->q, ptr); |
2734 | u32 caplen, cmdlen; | 2733 | u32 caplen, cmdlen; |
2735 | 2734 | ||
2736 | cmdlen = iwl_trans_pcie_get_cmdlen(trans, &cmdq->tfds[ptr]); | 2735 | cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + |
2736 | trans_pcie->tfd_size * ptr); | ||
2737 | caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); | 2737 | caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); |
2738 | 2738 | ||
2739 | if (cmdlen) { | 2739 | if (cmdlen) { |
@@ -2876,10 +2876,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2876 | else | 2876 | else |
2877 | addr_size = 36; | 2877 | addr_size = 36; |
2878 | 2878 | ||
2879 | if (cfg->use_tfh) | 2879 | if (cfg->use_tfh) { |
2880 | trans_pcie->max_tbs = IWL_TFH_NUM_TBS; | 2880 | trans_pcie->max_tbs = IWL_TFH_NUM_TBS; |
2881 | else | 2881 | trans_pcie->tfd_size = sizeof(struct iwl_tfh_tb); |
2882 | |||
2883 | } else { | ||
2882 | trans_pcie->max_tbs = IWL_NUM_OF_TBS; | 2884 | trans_pcie->max_tbs = IWL_NUM_OF_TBS; |
2885 | trans_pcie->tfd_size = sizeof(struct iwl_tfd); | ||
2886 | } | ||
2883 | trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie); | 2887 | trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie); |
2884 | 2888 | ||
2885 | pci_set_master(pdev); | 2889 | pci_set_master(pdev); |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 57a657a17e1e..f893ee111b46 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
@@ -312,11 +312,30 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) | |||
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | 315 | static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, |
316 | struct iwl_txq *txq, int idx) | ||
316 | { | 317 | { |
317 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 318 | return txq->tfds + trans_pcie->tfd_size * idx; |
319 | } | ||
320 | |||
321 | static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, | ||
322 | void *tfd, u8 idx) | ||
323 | { | ||
324 | struct iwl_tfd *tfd_fh; | ||
325 | struct iwl_tfd_tb *tb; | ||
326 | dma_addr_t addr; | ||
327 | |||
328 | if (trans->cfg->use_tfh) { | ||
329 | struct iwl_tfh_tfd *tfd_fh = (void *)tfd; | ||
330 | struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; | ||
331 | |||
332 | return (dma_addr_t)(le64_to_cpu(tb->addr)); | ||
333 | } | ||
334 | |||
335 | tfd_fh = (void *)tfd; | ||
336 | tb = &tfd_fh->tbs[idx]; | ||
337 | addr = get_unaligned_le32(&tb->lo); | ||
318 | 338 | ||
319 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | ||
320 | if (sizeof(dma_addr_t) > sizeof(u32)) | 339 | if (sizeof(dma_addr_t) > sizeof(u32)) |
321 | addr |= | 340 | addr |= |
322 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; | 341 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; |
@@ -324,35 +343,57 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | |||
324 | return addr; | 343 | return addr; |
325 | } | 344 | } |
326 | 345 | ||
327 | static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | 346 | static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, |
328 | dma_addr_t addr, u16 len) | 347 | u8 idx, dma_addr_t addr, u16 len) |
329 | { | 348 | { |
330 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 349 | if (trans->cfg->use_tfh) { |
331 | u16 hi_n_len = len << 4; | 350 | struct iwl_tfh_tfd *tfd_fh = (void *)tfd; |
351 | struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx]; | ||
332 | 352 | ||
333 | put_unaligned_le32(addr, &tb->lo); | 353 | put_unaligned_le64(addr, &tb->addr); |
334 | if (sizeof(dma_addr_t) > sizeof(u32)) | 354 | tb->tb_len = cpu_to_le16(len); |
335 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | 355 | |
356 | tfd_fh->num_tbs = cpu_to_le16(idx + 1); | ||
357 | } else { | ||
358 | struct iwl_tfd *tfd_fh = (void *)tfd; | ||
359 | struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; | ||
336 | 360 | ||
337 | tb->hi_n_len = cpu_to_le16(hi_n_len); | 361 | u16 hi_n_len = len << 4; |
338 | 362 | ||
339 | tfd->num_tbs = idx + 1; | 363 | put_unaligned_le32(addr, &tb->lo); |
364 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
365 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | ||
366 | |||
367 | tb->hi_n_len = cpu_to_le16(hi_n_len); | ||
368 | |||
369 | tfd_fh->num_tbs = idx + 1; | ||
370 | } | ||
340 | } | 371 | } |
341 | 372 | ||
342 | static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) | 373 | static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *tfd) |
343 | { | 374 | { |
344 | return tfd->num_tbs & 0x1f; | 375 | struct iwl_tfd *tfd_fh; |
376 | |||
377 | if (trans->cfg->use_tfh) { | ||
378 | struct iwl_tfh_tfd *tfd_fh = (void *)tfd; | ||
379 | |||
380 | return le16_to_cpu(tfd_fh->num_tbs) & 0x1f; | ||
381 | } | ||
382 | |||
383 | tfd_fh = (void *)tfd; | ||
384 | return tfd_fh->num_tbs & 0x1f; | ||
345 | } | 385 | } |
346 | 386 | ||
347 | static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, | 387 | static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, |
348 | struct iwl_cmd_meta *meta, | 388 | struct iwl_cmd_meta *meta, |
349 | struct iwl_tfd *tfd) | 389 | struct iwl_txq *txq, int index) |
350 | { | 390 | { |
351 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 391 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
352 | int i, num_tbs; | 392 | int i, num_tbs; |
393 | void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index); | ||
353 | 394 | ||
354 | /* Sanity check on number of chunks */ | 395 | /* Sanity check on number of chunks */ |
355 | num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); | 396 | num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); |
356 | 397 | ||
357 | if (num_tbs >= trans_pcie->max_tbs) { | 398 | if (num_tbs >= trans_pcie->max_tbs) { |
358 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | 399 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); |
@@ -365,16 +406,28 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, | |||
365 | for (i = 1; i < num_tbs; i++) { | 406 | for (i = 1; i < num_tbs; i++) { |
366 | if (meta->tbs & BIT(i)) | 407 | if (meta->tbs & BIT(i)) |
367 | dma_unmap_page(trans->dev, | 408 | dma_unmap_page(trans->dev, |
368 | iwl_pcie_tfd_tb_get_addr(tfd, i), | 409 | iwl_pcie_tfd_tb_get_addr(trans, tfd, i), |
369 | iwl_pcie_tfd_tb_get_len(tfd, i), | 410 | iwl_pcie_tfd_tb_get_len(trans, tfd, i), |
370 | DMA_TO_DEVICE); | 411 | DMA_TO_DEVICE); |
371 | else | 412 | else |
372 | dma_unmap_single(trans->dev, | 413 | dma_unmap_single(trans->dev, |
373 | iwl_pcie_tfd_tb_get_addr(tfd, i), | 414 | iwl_pcie_tfd_tb_get_addr(trans, tfd, |
374 | iwl_pcie_tfd_tb_get_len(tfd, i), | 415 | i), |
416 | iwl_pcie_tfd_tb_get_len(trans, tfd, | ||
417 | i), | ||
375 | DMA_TO_DEVICE); | 418 | DMA_TO_DEVICE); |
376 | } | 419 | } |
377 | tfd->num_tbs = 0; | 420 | |
421 | if (trans->cfg->use_tfh) { | ||
422 | struct iwl_tfh_tfd *tfd_fh = (void *)tfd; | ||
423 | |||
424 | tfd_fh->num_tbs = 0; | ||
425 | } else { | ||
426 | struct iwl_tfd *tfd_fh = (void *)tfd; | ||
427 | |||
428 | tfd_fh->num_tbs = 0; | ||
429 | } | ||
430 | |||
378 | } | 431 | } |
379 | 432 | ||
380 | /* | 433 | /* |
@@ -388,8 +441,6 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, | |||
388 | */ | 441 | */ |
389 | static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) | 442 | static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) |
390 | { | 443 | { |
391 | struct iwl_tfd *tfd_tmp = txq->tfds; | ||
392 | |||
393 | /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and | 444 | /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and |
394 | * idx is bounded by n_window | 445 | * idx is bounded by n_window |
395 | */ | 446 | */ |
@@ -401,7 +452,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) | |||
401 | /* We have only q->n_window txq->entries, but we use | 452 | /* We have only q->n_window txq->entries, but we use |
402 | * TFD_QUEUE_SIZE_MAX tfds | 453 | * TFD_QUEUE_SIZE_MAX tfds |
403 | */ | 454 | */ |
404 | iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); | 455 | iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); |
405 | 456 | ||
406 | /* free SKB */ | 457 | /* free SKB */ |
407 | if (txq->entries) { | 458 | if (txq->entries) { |
@@ -425,19 +476,18 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, | |||
425 | { | 476 | { |
426 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 477 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
427 | struct iwl_queue *q; | 478 | struct iwl_queue *q; |
428 | struct iwl_tfd *tfd, *tfd_tmp; | 479 | void *tfd; |
429 | u32 num_tbs; | 480 | u32 num_tbs; |
430 | 481 | ||
431 | q = &txq->q; | 482 | q = &txq->q; |
432 | tfd_tmp = txq->tfds; | 483 | tfd = txq->tfds + trans_pcie->tfd_size * q->write_ptr; |
433 | tfd = &tfd_tmp[q->write_ptr]; | ||
434 | 484 | ||
435 | if (reset) | 485 | if (reset) |
436 | memset(tfd, 0, sizeof(*tfd)); | 486 | memset(tfd, 0, trans_pcie->tfd_size); |
437 | 487 | ||
438 | num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); | 488 | num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); |
439 | 489 | ||
440 | /* Each TFD can point to a maximum 20 Tx buffers */ | 490 | /* Each TFD can point to a maximum max_tbs Tx buffers */ |
441 | if (num_tbs >= trans_pcie->max_tbs) { | 491 | if (num_tbs >= trans_pcie->max_tbs) { |
442 | IWL_ERR(trans, "Error can not send more than %d chunks\n", | 492 | IWL_ERR(trans, "Error can not send more than %d chunks\n", |
443 | trans_pcie->max_tbs); | 493 | trans_pcie->max_tbs); |
@@ -448,7 +498,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, | |||
448 | "Unaligned address = %llx\n", (unsigned long long)addr)) | 498 | "Unaligned address = %llx\n", (unsigned long long)addr)) |
449 | return -EINVAL; | 499 | return -EINVAL; |
450 | 500 | ||
451 | iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); | 501 | iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); |
452 | 502 | ||
453 | return num_tbs; | 503 | return num_tbs; |
454 | } | 504 | } |
@@ -458,7 +508,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, | |||
458 | u32 txq_id) | 508 | u32 txq_id) |
459 | { | 509 | { |
460 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 510 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
461 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; | 511 | size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; |
462 | size_t tb0_buf_sz; | 512 | size_t tb0_buf_sz; |
463 | int i; | 513 | int i; |
464 | 514 | ||
@@ -672,7 +722,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) | |||
672 | /* De-alloc circular buffer of TFDs */ | 722 | /* De-alloc circular buffer of TFDs */ |
673 | if (txq->tfds) { | 723 | if (txq->tfds) { |
674 | dma_free_coherent(dev, | 724 | dma_free_coherent(dev, |
675 | sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, | 725 | trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, |
676 | txq->tfds, txq->q.dma_addr); | 726 | txq->tfds, txq->q.dma_addr); |
677 | txq->q.dma_addr = 0; | 727 | txq->q.dma_addr = 0; |
678 | txq->tfds = NULL; | 728 | txq->tfds = NULL; |
@@ -1616,8 +1666,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1616 | copy_size - tb0_size, | 1666 | copy_size - tb0_size, |
1617 | DMA_TO_DEVICE); | 1667 | DMA_TO_DEVICE); |
1618 | if (dma_mapping_error(trans->dev, phys_addr)) { | 1668 | if (dma_mapping_error(trans->dev, phys_addr)) { |
1619 | iwl_pcie_tfd_unmap(trans, out_meta, | 1669 | iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr); |
1620 | &txq->tfds[q->write_ptr]); | ||
1621 | idx = -ENOMEM; | 1670 | idx = -ENOMEM; |
1622 | goto out; | 1671 | goto out; |
1623 | } | 1672 | } |
@@ -1640,8 +1689,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1640 | phys_addr = dma_map_single(trans->dev, (void *)data, | 1689 | phys_addr = dma_map_single(trans->dev, (void *)data, |
1641 | cmdlen[i], DMA_TO_DEVICE); | 1690 | cmdlen[i], DMA_TO_DEVICE); |
1642 | if (dma_mapping_error(trans->dev, phys_addr)) { | 1691 | if (dma_mapping_error(trans->dev, phys_addr)) { |
1643 | iwl_pcie_tfd_unmap(trans, out_meta, | 1692 | iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr); |
1644 | &txq->tfds[q->write_ptr]); | ||
1645 | idx = -ENOMEM; | 1693 | idx = -ENOMEM; |
1646 | goto out; | 1694 | goto out; |
1647 | } | 1695 | } |
@@ -1721,7 +1769,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, | |||
1721 | meta = &txq->entries[cmd_index].meta; | 1769 | meta = &txq->entries[cmd_index].meta; |
1722 | cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); | 1770 | cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); |
1723 | 1771 | ||
1724 | iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]); | 1772 | iwl_pcie_tfd_unmap(trans, meta, txq, index); |
1725 | 1773 | ||
1726 | /* Input error checking is done when commands are added to queue. */ | 1774 | /* Input error checking is done when commands are added to queue. */ |
1727 | if (meta->flags & CMD_WANT_SKB) { | 1775 | if (meta->flags & CMD_WANT_SKB) { |
@@ -1919,6 +1967,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, | |||
1919 | struct iwl_cmd_meta *out_meta, | 1967 | struct iwl_cmd_meta *out_meta, |
1920 | struct iwl_device_cmd *dev_cmd, u16 tb1_len) | 1968 | struct iwl_device_cmd *dev_cmd, u16 tb1_len) |
1921 | { | 1969 | { |
1970 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1922 | struct iwl_queue *q = &txq->q; | 1971 | struct iwl_queue *q = &txq->q; |
1923 | u16 tb2_len; | 1972 | u16 tb2_len; |
1924 | int i; | 1973 | int i; |
@@ -1934,8 +1983,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, | |||
1934 | skb->data + hdr_len, | 1983 | skb->data + hdr_len, |
1935 | tb2_len, DMA_TO_DEVICE); | 1984 | tb2_len, DMA_TO_DEVICE); |
1936 | if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { | 1985 | if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { |
1937 | iwl_pcie_tfd_unmap(trans, out_meta, | 1986 | iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr); |
1938 | &txq->tfds[q->write_ptr]); | ||
1939 | return -EINVAL; | 1987 | return -EINVAL; |
1940 | } | 1988 | } |
1941 | iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); | 1989 | iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); |
@@ -1954,8 +2002,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, | |||
1954 | skb_frag_size(frag), DMA_TO_DEVICE); | 2002 | skb_frag_size(frag), DMA_TO_DEVICE); |
1955 | 2003 | ||
1956 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { | 2004 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { |
1957 | iwl_pcie_tfd_unmap(trans, out_meta, | 2005 | iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr); |
1958 | &txq->tfds[q->write_ptr]); | ||
1959 | return -EINVAL; | 2006 | return -EINVAL; |
1960 | } | 2007 | } |
1961 | tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, | 2008 | tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, |
@@ -1965,8 +2012,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, | |||
1965 | } | 2012 | } |
1966 | 2013 | ||
1967 | trace_iwlwifi_dev_tx(trans->dev, skb, | 2014 | trace_iwlwifi_dev_tx(trans->dev, skb, |
1968 | &txq->tfds[txq->q.write_ptr], | 2015 | iwl_pcie_get_tfd(trans_pcie, txq, q->write_ptr), |
1969 | sizeof(struct iwl_tfd), | 2016 | trans_pcie->tfd_size, |
1970 | &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, | 2017 | &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, |
1971 | skb->data + hdr_len, tb2_len); | 2018 | skb->data + hdr_len, tb2_len); |
1972 | trace_iwlwifi_dev_tx_data(trans->dev, skb, | 2019 | trace_iwlwifi_dev_tx_data(trans->dev, skb, |
@@ -2041,8 +2088,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, | |||
2041 | IEEE80211_CCMP_HDR_LEN : 0; | 2088 | IEEE80211_CCMP_HDR_LEN : 0; |
2042 | 2089 | ||
2043 | trace_iwlwifi_dev_tx(trans->dev, skb, | 2090 | trace_iwlwifi_dev_tx(trans->dev, skb, |
2044 | &txq->tfds[txq->q.write_ptr], | 2091 | iwl_pcie_get_tfd(trans_pcie, txq, q->write_ptr), |
2045 | sizeof(struct iwl_tfd), | 2092 | trans_pcie->tfd_size, |
2046 | &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, | 2093 | &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, |
2047 | NULL, 0); | 2094 | NULL, 0); |
2048 | 2095 | ||
@@ -2198,7 +2245,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, | |||
2198 | return 0; | 2245 | return 0; |
2199 | 2246 | ||
2200 | out_unmap: | 2247 | out_unmap: |
2201 | iwl_pcie_tfd_unmap(trans, out_meta, &txq->tfds[q->write_ptr]); | 2248 | iwl_pcie_tfd_unmap(trans, out_meta, txq, q->write_ptr); |
2202 | return ret; | 2249 | return ret; |
2203 | } | 2250 | } |
2204 | #else /* CONFIG_INET */ | 2251 | #else /* CONFIG_INET */ |