diff options
author | Emmanuel Grumbach <emmanuel.grumbach@intel.com> | 2012-11-14 05:39:52 -0500 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2012-11-19 09:01:35 -0500 |
commit | 990aa6d7b28d26bf22171410b49f191e8e9b09fc (patch) | |
tree | 442c1d51b039ec3ea42b41ddd40cab8ad4612e5f /drivers/net/wireless/iwlwifi/pcie/tx.c | |
parent | b55e57f53f8740a2d1432e4963372d303b798530 (diff) |
iwlwifi: rename functions in transport layer
1) s/tx_queue/txq
for the sake of consistency.
2) s/rx_queue/rxq
for the sake of consistency.
3) Make all functions begin with iwl_pcie_
iwl_queue_init and iwl_queue_space are an exception
since they are not PCIE specific although they are in
pcie subdir.
4) s/trans_pcie_get_cmd_string/get_cmd_string
it is much shorter and used in debug prints which
are long lines.
5) s/iwl_bg_rx_replenish/iwl_pcie_rx_replenish_work
this better emphasizes that it is a work
6) remove invalid kernelDOC markers
pcie/tx.c and pcie/trans.c still needs to be cleaned up.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 108 |
1 files changed, 50 insertions, 58 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index dcc7e1256e39..eac0481a9c71 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -42,12 +42,11 @@ | |||
42 | #define IWL_TX_CRC_SIZE 4 | 42 | #define IWL_TX_CRC_SIZE 4 |
43 | #define IWL_TX_DELIMITER_SIZE 4 | 43 | #define IWL_TX_DELIMITER_SIZE 4 |
44 | 44 | ||
45 | /** | 45 | /* |
46 | * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | 46 | * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array |
47 | */ | 47 | */ |
48 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | 48 | void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, |
49 | struct iwl_tx_queue *txq, | 49 | struct iwl_txq *txq, u16 byte_cnt) |
50 | u16 byte_cnt) | ||
51 | { | 50 | { |
52 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; | 51 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; |
53 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 52 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -88,10 +87,10 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | |||
88 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | 87 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; |
89 | } | 88 | } |
90 | 89 | ||
91 | /** | 90 | /* |
92 | * iwl_txq_update_write_ptr - Send new write index to hardware | 91 | * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware |
93 | */ | 92 | */ |
94 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) | 93 | void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) |
95 | { | 94 | { |
96 | u32 reg = 0; | 95 | u32 reg = 0; |
97 | int txq_id = txq->q.id; | 96 | int txq_id = txq->q.id; |
@@ -206,8 +205,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | |||
206 | tfd->num_tbs = 0; | 205 | tfd->num_tbs = 0; |
207 | } | 206 | } |
208 | 207 | ||
209 | /** | 208 | /* |
210 | * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | 209 | * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] |
211 | * @trans - transport private data | 210 | * @trans - transport private data |
212 | * @txq - tx queue | 211 | * @txq - tx queue |
213 | * @dma_dir - the direction of the DMA mapping | 212 | * @dma_dir - the direction of the DMA mapping |
@@ -215,8 +214,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | |||
215 | * Does NOT advance any TFD circular buffer read/write indexes | 214 | * Does NOT advance any TFD circular buffer read/write indexes |
216 | * Does NOT free the TFD itself (which is within circular buffer) | 215 | * Does NOT free the TFD itself (which is within circular buffer) |
217 | */ | 216 | */ |
218 | void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | 217 | void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, |
219 | enum dma_data_direction dma_dir) | 218 | enum dma_data_direction dma_dir) |
220 | { | 219 | { |
221 | struct iwl_tfd *tfd_tmp = txq->tfds; | 220 | struct iwl_tfd *tfd_tmp = txq->tfds; |
222 | 221 | ||
@@ -247,10 +246,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | |||
247 | } | 246 | } |
248 | } | 247 | } |
249 | 248 | ||
250 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | 249 | int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, |
251 | struct iwl_tx_queue *txq, | 250 | dma_addr_t addr, u16 len, u8 reset) |
252 | dma_addr_t addr, u16 len, | ||
253 | u8 reset) | ||
254 | { | 251 | { |
255 | struct iwl_queue *q; | 252 | struct iwl_queue *q; |
256 | struct iwl_tfd *tfd, *tfd_tmp; | 253 | struct iwl_tfd *tfd, *tfd_tmp; |
@@ -322,7 +319,7 @@ int iwl_queue_space(const struct iwl_queue *q) | |||
322 | return s; | 319 | return s; |
323 | } | 320 | } |
324 | 321 | ||
325 | /** | 322 | /* |
326 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | 323 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes |
327 | */ | 324 | */ |
328 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | 325 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) |
@@ -355,7 +352,7 @@ int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | |||
355 | } | 352 | } |
356 | 353 | ||
357 | static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, | 354 | static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, |
358 | struct iwl_tx_queue *txq) | 355 | struct iwl_txq *txq) |
359 | { | 356 | { |
360 | struct iwl_trans_pcie *trans_pcie = | 357 | struct iwl_trans_pcie *trans_pcie = |
361 | IWL_TRANS_GET_PCIE_TRANS(trans); | 358 | IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -415,8 +412,8 @@ static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) | |||
415 | (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | 412 | (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); |
416 | } | 413 | } |
417 | 414 | ||
418 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | 415 | void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, |
419 | int sta_id, int tid, int frame_limit, u16 ssn) | 416 | int sta_id, int tid, int frame_limit, u16 ssn) |
420 | { | 417 | { |
421 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 418 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
422 | 419 | ||
@@ -477,7 +474,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | |||
477 | txq_id, fifo, ssn & 0xff); | 474 | txq_id, fifo, ssn & 0xff); |
478 | } | 475 | } |
479 | 476 | ||
480 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | 477 | void iwl_pcie_txq_disable(struct iwl_trans *trans, int txq_id) |
481 | { | 478 | { |
482 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 479 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
483 | u32 stts_addr = trans_pcie->scd_base_addr + | 480 | u32 stts_addr = trans_pcie->scd_base_addr + |
@@ -494,14 +491,14 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | |||
494 | _iwl_write_targ_mem_dwords(trans, stts_addr, | 491 | _iwl_write_targ_mem_dwords(trans, stts_addr, |
495 | zero_val, ARRAY_SIZE(zero_val)); | 492 | zero_val, ARRAY_SIZE(zero_val)); |
496 | 493 | ||
497 | iwl_tx_queue_unmap(trans, txq_id); | 494 | iwl_pcie_txq_unmap(trans, txq_id); |
498 | 495 | ||
499 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); | 496 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); |
500 | } | 497 | } |
501 | 498 | ||
502 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | 499 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
503 | 500 | ||
504 | /** | 501 | /* |
505 | * iwl_enqueue_hcmd - enqueue a uCode command | 502 | * iwl_enqueue_hcmd - enqueue a uCode command |
506 | * @priv: device private data point | 503 | * @priv: device private data point |
507 | * @cmd: a point to the ucode command structure | 504 | * @cmd: a point to the ucode command structure |
@@ -513,7 +510,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | |||
513 | static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 510 | static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) |
514 | { | 511 | { |
515 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 512 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
516 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | 513 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
517 | struct iwl_queue *q = &txq->q; | 514 | struct iwl_queue *q = &txq->q; |
518 | struct iwl_device_cmd *out_cmd; | 515 | struct iwl_device_cmd *out_cmd; |
519 | struct iwl_cmd_meta *out_meta; | 516 | struct iwl_cmd_meta *out_meta; |
@@ -576,8 +573,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
576 | */ | 573 | */ |
577 | if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, | 574 | if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, |
578 | "Command %s (%#x) is too large (%d bytes)\n", | 575 | "Command %s (%#x) is too large (%d bytes)\n", |
579 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), | 576 | get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) { |
580 | cmd->id, copy_size)) { | ||
581 | idx = -EINVAL; | 577 | idx = -EINVAL; |
582 | goto free_dup_buf; | 578 | goto free_dup_buf; |
583 | } | 579 | } |
@@ -640,7 +636,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
640 | 636 | ||
641 | IWL_DEBUG_HC(trans, | 637 | IWL_DEBUG_HC(trans, |
642 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", | 638 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", |
643 | trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), | 639 | get_cmd_string(trans_pcie, out_cmd->hdr.cmd), |
644 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | 640 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), |
645 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); | 641 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); |
646 | 642 | ||
@@ -654,7 +650,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
654 | dma_unmap_addr_set(out_meta, mapping, phys_addr); | 650 | dma_unmap_addr_set(out_meta, mapping, phys_addr); |
655 | dma_unmap_len_set(out_meta, len, copy_size); | 651 | dma_unmap_len_set(out_meta, len, copy_size); |
656 | 652 | ||
657 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); | 653 | iwl_pcie_tx_build_tfd(trans, txq, phys_addr, copy_size, 1); |
658 | 654 | ||
659 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | 655 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { |
660 | const void *data = cmd->data[i]; | 656 | const void *data = cmd->data[i]; |
@@ -676,8 +672,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
676 | goto out; | 672 | goto out; |
677 | } | 673 | } |
678 | 674 | ||
679 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, | 675 | iwl_pcie_tx_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); |
680 | cmd->len[i], 0); | ||
681 | } | 676 | } |
682 | 677 | ||
683 | out_meta->flags = cmd->flags; | 678 | out_meta->flags = cmd->flags; |
@@ -696,7 +691,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
696 | 691 | ||
697 | /* Increment and update queue's write index */ | 692 | /* Increment and update queue's write index */ |
698 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 693 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); |
699 | iwl_txq_update_write_ptr(trans, txq); | 694 | iwl_pcie_txq_inc_wr_ptr(trans, txq); |
700 | 695 | ||
701 | out: | 696 | out: |
702 | spin_unlock_bh(&txq->lock); | 697 | spin_unlock_bh(&txq->lock); |
@@ -707,7 +702,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
707 | } | 702 | } |
708 | 703 | ||
709 | static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, | 704 | static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, |
710 | struct iwl_tx_queue *txq) | 705 | struct iwl_txq *txq) |
711 | { | 706 | { |
712 | if (!trans_pcie->wd_timeout) | 707 | if (!trans_pcie->wd_timeout) |
713 | return; | 708 | return; |
@@ -722,7 +717,7 @@ static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, | |||
722 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | 717 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); |
723 | } | 718 | } |
724 | 719 | ||
725 | /** | 720 | /* |
726 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | 721 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd |
727 | * | 722 | * |
728 | * When FW advances 'R' index, all entries between old and new 'R' index | 723 | * When FW advances 'R' index, all entries between old and new 'R' index |
@@ -733,7 +728,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | |||
733 | int idx) | 728 | int idx) |
734 | { | 729 | { |
735 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 730 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
736 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | 731 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; |
737 | struct iwl_queue *q = &txq->q; | 732 | struct iwl_queue *q = &txq->q; |
738 | int nfreed = 0; | 733 | int nfreed = 0; |
739 | 734 | ||
@@ -761,8 +756,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | |||
761 | iwl_queue_progress(trans_pcie, txq); | 756 | iwl_queue_progress(trans_pcie, txq); |
762 | } | 757 | } |
763 | 758 | ||
764 | /** | 759 | /* |
765 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | 760 | * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them |
766 | * @rxb: Rx buffer to reclaim | 761 | * @rxb: Rx buffer to reclaim |
767 | * @handler_status: return value of the handler of the command | 762 | * @handler_status: return value of the handler of the command |
768 | * (put in setup_rx_handlers) | 763 | * (put in setup_rx_handlers) |
@@ -771,8 +766,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | |||
771 | * will be executed. The attached skb (if present) will only be freed | 766 | * will be executed. The attached skb (if present) will only be freed |
772 | * if the callback returns 1 | 767 | * if the callback returns 1 |
773 | */ | 768 | */ |
774 | void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | 769 | void iwl_pcie_hcmd_complete(struct iwl_trans *trans, |
775 | int handler_status) | 770 | struct iwl_rx_cmd_buffer *rxb, int handler_status) |
776 | { | 771 | { |
777 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | 772 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
778 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 773 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
@@ -782,7 +777,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
782 | struct iwl_device_cmd *cmd; | 777 | struct iwl_device_cmd *cmd; |
783 | struct iwl_cmd_meta *meta; | 778 | struct iwl_cmd_meta *meta; |
784 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 779 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
785 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | 780 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
786 | 781 | ||
787 | /* If a Tx command is being handled and it isn't in the actual | 782 | /* If a Tx command is being handled and it isn't in the actual |
788 | * command queue then there a command routing bug has been introduced | 783 | * command queue then there a command routing bug has been introduced |
@@ -820,13 +815,11 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
820 | if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | 815 | if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { |
821 | IWL_WARN(trans, | 816 | IWL_WARN(trans, |
822 | "HCMD_ACTIVE already clear for command %s\n", | 817 | "HCMD_ACTIVE already clear for command %s\n", |
823 | trans_pcie_get_cmd_string(trans_pcie, | 818 | get_cmd_string(trans_pcie, cmd->hdr.cmd)); |
824 | cmd->hdr.cmd)); | ||
825 | } | 819 | } |
826 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 820 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
827 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", | 821 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", |
828 | trans_pcie_get_cmd_string(trans_pcie, | 822 | get_cmd_string(trans_pcie, cmd->hdr.cmd)); |
829 | cmd->hdr.cmd)); | ||
830 | wake_up(&trans_pcie->wait_command_queue); | 823 | wake_up(&trans_pcie->wait_command_queue); |
831 | } | 824 | } |
832 | 825 | ||
@@ -851,7 +844,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
851 | if (ret < 0) { | 844 | if (ret < 0) { |
852 | IWL_ERR(trans, | 845 | IWL_ERR(trans, |
853 | "Error sending %s: enqueue_hcmd failed: %d\n", | 846 | "Error sending %s: enqueue_hcmd failed: %d\n", |
854 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | 847 | get_cmd_string(trans_pcie, cmd->id), ret); |
855 | return ret; | 848 | return ret; |
856 | } | 849 | } |
857 | return 0; | 850 | return 0; |
@@ -864,17 +857,17 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
864 | int ret; | 857 | int ret; |
865 | 858 | ||
866 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", | 859 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", |
867 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 860 | get_cmd_string(trans_pcie, cmd->id)); |
868 | 861 | ||
869 | if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, | 862 | if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, |
870 | &trans_pcie->status))) { | 863 | &trans_pcie->status))) { |
871 | IWL_ERR(trans, "Command %s: a command is already active!\n", | 864 | IWL_ERR(trans, "Command %s: a command is already active!\n", |
872 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 865 | get_cmd_string(trans_pcie, cmd->id)); |
873 | return -EIO; | 866 | return -EIO; |
874 | } | 867 | } |
875 | 868 | ||
876 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", | 869 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", |
877 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 870 | get_cmd_string(trans_pcie, cmd->id)); |
878 | 871 | ||
879 | cmd_idx = iwl_enqueue_hcmd(trans, cmd); | 872 | cmd_idx = iwl_enqueue_hcmd(trans, cmd); |
880 | if (cmd_idx < 0) { | 873 | if (cmd_idx < 0) { |
@@ -882,7 +875,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
882 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 875 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
883 | IWL_ERR(trans, | 876 | IWL_ERR(trans, |
884 | "Error sending %s: enqueue_hcmd failed: %d\n", | 877 | "Error sending %s: enqueue_hcmd failed: %d\n", |
885 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | 878 | get_cmd_string(trans_pcie, cmd->id), ret); |
886 | return ret; | 879 | return ret; |
887 | } | 880 | } |
888 | 881 | ||
@@ -892,13 +885,13 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
892 | HOST_COMPLETE_TIMEOUT); | 885 | HOST_COMPLETE_TIMEOUT); |
893 | if (!ret) { | 886 | if (!ret) { |
894 | if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | 887 | if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { |
895 | struct iwl_tx_queue *txq = | 888 | struct iwl_txq *txq = |
896 | &trans_pcie->txq[trans_pcie->cmd_queue]; | 889 | &trans_pcie->txq[trans_pcie->cmd_queue]; |
897 | struct iwl_queue *q = &txq->q; | 890 | struct iwl_queue *q = &txq->q; |
898 | 891 | ||
899 | IWL_ERR(trans, | 892 | IWL_ERR(trans, |
900 | "Error sending %s: time out after %dms.\n", | 893 | "Error sending %s: time out after %dms.\n", |
901 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), | 894 | get_cmd_string(trans_pcie, cmd->id), |
902 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | 895 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); |
903 | 896 | ||
904 | IWL_ERR(trans, | 897 | IWL_ERR(trans, |
@@ -908,8 +901,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
908 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 901 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
909 | IWL_DEBUG_INFO(trans, | 902 | IWL_DEBUG_INFO(trans, |
910 | "Clearing HCMD_ACTIVE for command %s\n", | 903 | "Clearing HCMD_ACTIVE for command %s\n", |
911 | trans_pcie_get_cmd_string(trans_pcie, | 904 | get_cmd_string(trans_pcie, cmd->id)); |
912 | cmd->id)); | ||
913 | ret = -ETIMEDOUT; | 905 | ret = -ETIMEDOUT; |
914 | goto cancel; | 906 | goto cancel; |
915 | } | 907 | } |
@@ -917,7 +909,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
917 | 909 | ||
918 | if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { | 910 | if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { |
919 | IWL_ERR(trans, "FW error in SYNC CMD %s\n", | 911 | IWL_ERR(trans, "FW error in SYNC CMD %s\n", |
920 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 912 | get_cmd_string(trans_pcie, cmd->id)); |
921 | ret = -EIO; | 913 | ret = -EIO; |
922 | goto cancel; | 914 | goto cancel; |
923 | } | 915 | } |
@@ -930,7 +922,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
930 | 922 | ||
931 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { | 923 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { |
932 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", | 924 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", |
933 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 925 | get_cmd_string(trans_pcie, cmd->id)); |
934 | ret = -EIO; | 926 | ret = -EIO; |
935 | goto cancel; | 927 | goto cancel; |
936 | } | 928 | } |
@@ -957,7 +949,7 @@ cancel: | |||
957 | return ret; | 949 | return ret; |
958 | } | 950 | } |
959 | 951 | ||
960 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 952 | int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) |
961 | { | 953 | { |
962 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 954 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
963 | 955 | ||
@@ -975,11 +967,11 @@ int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
975 | } | 967 | } |
976 | 968 | ||
977 | /* Frees buffers until index _not_ inclusive */ | 969 | /* Frees buffers until index _not_ inclusive */ |
978 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | 970 | int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index, |
979 | struct sk_buff_head *skbs) | 971 | struct sk_buff_head *skbs) |
980 | { | 972 | { |
981 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 973 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
982 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | 974 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; |
983 | struct iwl_queue *q = &txq->q; | 975 | struct iwl_queue *q = &txq->q; |
984 | int last_to_free; | 976 | int last_to_free; |
985 | int freed = 0; | 977 | int freed = 0; |
@@ -1019,7 +1011,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | |||
1019 | 1011 | ||
1020 | iwlagn_txq_inval_byte_cnt_tbl(trans, txq); | 1012 | iwlagn_txq_inval_byte_cnt_tbl(trans, txq); |
1021 | 1013 | ||
1022 | iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE); | 1014 | iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); |
1023 | freed++; | 1015 | freed++; |
1024 | } | 1016 | } |
1025 | 1017 | ||