diff options
author | Johannes Berg <johannes.berg@intel.com> | 2014-04-24 03:57:40 -0400 |
---|---|---|
committer | Emmanuel Grumbach <emmanuel.grumbach@intel.com> | 2014-05-06 14:39:05 -0400 |
commit | 83f32a4b4aa73f36ecc799e22174fe78ed5cb2af (patch) | |
tree | 15977b0b4d3020ba66e3598fc4353a1670937a48 /drivers/net/wireless | |
parent | 6d6e68f8396269608cb1580a3b14be72069bb5f3 (diff) |
iwlwifi: pcie: get rid of q->n_bd
This variable always tracks a constant value (256) so there's
no need to have it. Removing it simplifies code generation,
reducing the .text size (by about 240 bytes on x86-64.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/internal.h | 17 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 2 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/trans.c | 4 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 75 |
4 files changed, 45 insertions, 53 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 1b95d856dfd5..ab21aee0a51d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
@@ -117,21 +117,19 @@ struct iwl_dma_ptr { | |||
117 | /** | 117 | /** |
118 | * iwl_queue_inc_wrap - increment queue index, wrap back to beginning | 118 | * iwl_queue_inc_wrap - increment queue index, wrap back to beginning |
119 | * @index -- current index | 119 | * @index -- current index |
120 | * @n_bd -- total number of entries in queue (must be power of 2) | ||
121 | */ | 120 | */ |
122 | static inline int iwl_queue_inc_wrap(int index, int n_bd) | 121 | static inline int iwl_queue_inc_wrap(int index) |
123 | { | 122 | { |
124 | return ++index & (n_bd - 1); | 123 | return ++index & (TFD_QUEUE_SIZE_MAX - 1); |
125 | } | 124 | } |
126 | 125 | ||
127 | /** | 126 | /** |
128 | * iwl_queue_dec_wrap - decrement queue index, wrap back to end | 127 | * iwl_queue_dec_wrap - decrement queue index, wrap back to end |
129 | * @index -- current index | 128 | * @index -- current index |
130 | * @n_bd -- total number of entries in queue (must be power of 2) | ||
131 | */ | 129 | */ |
132 | static inline int iwl_queue_dec_wrap(int index, int n_bd) | 130 | static inline int iwl_queue_dec_wrap(int index) |
133 | { | 131 | { |
134 | return --index & (n_bd - 1); | 132 | return --index & (TFD_QUEUE_SIZE_MAX - 1); |
135 | } | 133 | } |
136 | 134 | ||
137 | struct iwl_cmd_meta { | 135 | struct iwl_cmd_meta { |
@@ -145,13 +143,13 @@ struct iwl_cmd_meta { | |||
145 | * | 143 | * |
146 | * Contains common data for Rx and Tx queues. | 144 | * Contains common data for Rx and Tx queues. |
147 | * | 145 | * |
148 | * Note the difference between n_bd and n_window: the hardware | 146 | * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware |
149 | * always assumes 256 descriptors, so n_bd is always 256 (unless | 147 | * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless |
150 | * there might be HW changes in the future). For the normal TX | 148 | * there might be HW changes in the future). For the normal TX |
151 | * queues, n_window, which is the size of the software queue data | 149 | * queues, n_window, which is the size of the software queue data |
152 | * is also 256; however, for the command queue, n_window is only | 150 | * is also 256; however, for the command queue, n_window is only |
153 | * 32 since we don't need so many commands pending. Since the HW | 151 | * 32 since we don't need so many commands pending. Since the HW |
154 | * still uses 256 BDs for DMA though, n_bd stays 256. As a result, | 152 | * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, |
155 | * the software buffers (in the variables @meta, @txb in struct | 153 | * the software buffers (in the variables @meta, @txb in struct |
156 | * iwl_txq) only have 32 entries, while the HW buffers (@tfds in | 154 | * iwl_txq) only have 32 entries, while the HW buffers (@tfds in |
157 | * the same struct) have 256. | 155 | * the same struct) have 256. |
@@ -162,7 +160,6 @@ struct iwl_cmd_meta { | |||
162 | * data is a window overlayed over the HW queue. | 160 | * data is a window overlayed over the HW queue. |
163 | */ | 161 | */ |
164 | struct iwl_queue { | 162 | struct iwl_queue { |
165 | int n_bd; /* number of BDs in this queue */ | ||
166 | int write_ptr; /* 1-st empty entry (index) host_w*/ | 163 | int write_ptr; /* 1-st empty entry (index) host_w*/ |
167 | int read_ptr; /* last used entry (index) host_r*/ | 164 | int read_ptr; /* last used entry (index) host_r*/ |
168 | /* use for monitoring and recovering the stuck queue */ | 165 | /* use for monitoring and recovering the stuck queue */ |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 4a26a082a1ba..a2698e5e062c 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -850,7 +850,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) | |||
850 | trans_pcie->ict_index, read); | 850 | trans_pcie->ict_index, read); |
851 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; | 851 | trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; |
852 | trans_pcie->ict_index = | 852 | trans_pcie->ict_index = |
853 | iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); | 853 | ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); |
854 | 854 | ||
855 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); | 855 | read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); |
856 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, | 856 | trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index f98ef1e62eb9..a6f86220e0aa 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -1337,8 +1337,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) | |||
1337 | IWL_ERR(trans, | 1337 | IWL_ERR(trans, |
1338 | "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", | 1338 | "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", |
1339 | cnt, active ? "" : "in", fifo, tbl_dw, | 1339 | cnt, active ? "" : "in", fifo, tbl_dw, |
1340 | iwl_read_prph(trans, | 1340 | iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & |
1341 | SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1), | 1341 | (TFD_QUEUE_SIZE_MAX - 1), |
1342 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); | 1342 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); |
1343 | } | 1343 | } |
1344 | 1344 | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 16ebc4a9514d..93709fe28d76 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -70,20 +70,20 @@ static int iwl_queue_space(const struct iwl_queue *q) | |||
70 | 70 | ||
71 | /* | 71 | /* |
72 | * To avoid ambiguity between empty and completely full queues, there | 72 | * To avoid ambiguity between empty and completely full queues, there |
73 | * should always be less than q->n_bd elements in the queue. | 73 | * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. |
74 | * If q->n_window is smaller than q->n_bd, there is no need to reserve | 74 | * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need |
75 | * any queue entries for this purpose. | 75 | * to reserve any queue entries for this purpose. |
76 | */ | 76 | */ |
77 | if (q->n_window < q->n_bd) | 77 | if (q->n_window < TFD_QUEUE_SIZE_MAX) |
78 | max = q->n_window; | 78 | max = q->n_window; |
79 | else | 79 | else |
80 | max = q->n_bd - 1; | 80 | max = TFD_QUEUE_SIZE_MAX - 1; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * q->n_bd is a power of 2, so the following is equivalent to modulo by | 83 | * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to |
84 | * q->n_bd and is well defined for negative dividends. | 84 | * modulo by TFD_QUEUE_SIZE_MAX and is well defined. |
85 | */ | 85 | */ |
86 | used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1); | 86 | used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); |
87 | 87 | ||
88 | if (WARN_ON(used > max)) | 88 | if (WARN_ON(used > max)) |
89 | return 0; | 89 | return 0; |
@@ -94,17 +94,11 @@ static int iwl_queue_space(const struct iwl_queue *q) | |||
94 | /* | 94 | /* |
95 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | 95 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes |
96 | */ | 96 | */ |
97 | static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | 97 | static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) |
98 | { | 98 | { |
99 | q->n_bd = count; | ||
100 | q->n_window = slots_num; | 99 | q->n_window = slots_num; |
101 | q->id = id; | 100 | q->id = id; |
102 | 101 | ||
103 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | ||
104 | * and iwl_queue_dec_wrap are broken. */ | ||
105 | if (WARN_ON(!is_power_of_2(count))) | ||
106 | return -EINVAL; | ||
107 | |||
108 | /* slots_num must be power-of-two size, otherwise | 102 | /* slots_num must be power-of-two size, otherwise |
109 | * get_cmd_index is broken. */ | 103 | * get_cmd_index is broken. */ |
110 | if (WARN_ON(!is_power_of_2(slots_num))) | 104 | if (WARN_ON(!is_power_of_2(slots_num))) |
@@ -197,13 +191,13 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) | |||
197 | IWL_ERR(trans, | 191 | IWL_ERR(trans, |
198 | "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", | 192 | "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", |
199 | i, active ? "" : "in", fifo, tbl_dw, | 193 | i, active ? "" : "in", fifo, tbl_dw, |
200 | iwl_read_prph(trans, | 194 | iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & |
201 | SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), | 195 | (TFD_QUEUE_SIZE_MAX - 1), |
202 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); | 196 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); |
203 | } | 197 | } |
204 | 198 | ||
205 | for (i = q->read_ptr; i != q->write_ptr; | 199 | for (i = q->read_ptr; i != q->write_ptr; |
206 | i = iwl_queue_inc_wrap(i, q->n_bd)) | 200 | i = iwl_queue_inc_wrap(i)) |
207 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, | 201 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, |
208 | le32_to_cpu(txq->scratchbufs[i].scratch)); | 202 | le32_to_cpu(txq->scratchbufs[i].scratch)); |
209 | 203 | ||
@@ -425,13 +419,17 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) | |||
425 | { | 419 | { |
426 | struct iwl_tfd *tfd_tmp = txq->tfds; | 420 | struct iwl_tfd *tfd_tmp = txq->tfds; |
427 | 421 | ||
428 | /* rd_ptr is bounded by n_bd and idx is bounded by n_window */ | 422 | /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and |
423 | * idx is bounded by n_window | ||
424 | */ | ||
429 | int rd_ptr = txq->q.read_ptr; | 425 | int rd_ptr = txq->q.read_ptr; |
430 | int idx = get_cmd_index(&txq->q, rd_ptr); | 426 | int idx = get_cmd_index(&txq->q, rd_ptr); |
431 | 427 | ||
432 | lockdep_assert_held(&txq->lock); | 428 | lockdep_assert_held(&txq->lock); |
433 | 429 | ||
434 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ | 430 | /* We have only q->n_window txq->entries, but we use |
431 | * TFD_QUEUE_SIZE_MAX tfds | ||
432 | */ | ||
435 | iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); | 433 | iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); |
436 | 434 | ||
437 | /* free SKB */ | 435 | /* free SKB */ |
@@ -565,8 +563,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | |||
565 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | 563 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); |
566 | 564 | ||
567 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | 565 | /* Initialize queue's high/low-water marks, and head/tail indexes */ |
568 | ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, | 566 | ret = iwl_queue_init(&txq->q, slots_num, txq_id); |
569 | txq_id); | ||
570 | if (ret) | 567 | if (ret) |
571 | return ret; | 568 | return ret; |
572 | 569 | ||
@@ -591,15 +588,12 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) | |||
591 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | 588 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; |
592 | struct iwl_queue *q = &txq->q; | 589 | struct iwl_queue *q = &txq->q; |
593 | 590 | ||
594 | if (!q->n_bd) | ||
595 | return; | ||
596 | |||
597 | spin_lock_bh(&txq->lock); | 591 | spin_lock_bh(&txq->lock); |
598 | while (q->write_ptr != q->read_ptr) { | 592 | while (q->write_ptr != q->read_ptr) { |
599 | IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", | 593 | IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", |
600 | txq_id, q->read_ptr); | 594 | txq_id, q->read_ptr); |
601 | iwl_pcie_txq_free_tfd(trans, txq); | 595 | iwl_pcie_txq_free_tfd(trans, txq); |
602 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 596 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); |
603 | } | 597 | } |
604 | txq->active = false; | 598 | txq->active = false; |
605 | spin_unlock_bh(&txq->lock); | 599 | spin_unlock_bh(&txq->lock); |
@@ -636,10 +630,12 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) | |||
636 | } | 630 | } |
637 | 631 | ||
638 | /* De-alloc circular buffer of TFDs */ | 632 | /* De-alloc circular buffer of TFDs */ |
639 | if (txq->q.n_bd) { | 633 | if (txq->tfds) { |
640 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * | 634 | dma_free_coherent(dev, |
641 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | 635 | sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, |
636 | txq->tfds, txq->q.dma_addr); | ||
642 | txq->q.dma_addr = 0; | 637 | txq->q.dma_addr = 0; |
638 | txq->tfds = NULL; | ||
643 | 639 | ||
644 | dma_free_coherent(dev, | 640 | dma_free_coherent(dev, |
645 | sizeof(*txq->scratchbufs) * txq->q.n_window, | 641 | sizeof(*txq->scratchbufs) * txq->q.n_window, |
@@ -948,8 +944,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | |||
948 | { | 944 | { |
949 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 945 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
950 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | 946 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; |
951 | /* n_bd is usually 256 => n_bd - 1 = 0xff */ | 947 | int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); |
952 | int tfd_num = ssn & (txq->q.n_bd - 1); | ||
953 | struct iwl_queue *q = &txq->q; | 948 | struct iwl_queue *q = &txq->q; |
954 | int last_to_free; | 949 | int last_to_free; |
955 | 950 | ||
@@ -973,12 +968,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | |||
973 | 968 | ||
974 | /*Since we free until index _not_ inclusive, the one before index is | 969 | /*Since we free until index _not_ inclusive, the one before index is |
975 | * the last we will free. This one must be used */ | 970 | * the last we will free. This one must be used */ |
976 | last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd); | 971 | last_to_free = iwl_queue_dec_wrap(tfd_num); |
977 | 972 | ||
978 | if (!iwl_queue_used(q, last_to_free)) { | 973 | if (!iwl_queue_used(q, last_to_free)) { |
979 | IWL_ERR(trans, | 974 | IWL_ERR(trans, |
980 | "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", | 975 | "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", |
981 | __func__, txq_id, last_to_free, q->n_bd, | 976 | __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, |
982 | q->write_ptr, q->read_ptr); | 977 | q->write_ptr, q->read_ptr); |
983 | goto out; | 978 | goto out; |
984 | } | 979 | } |
@@ -988,7 +983,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | |||
988 | 983 | ||
989 | for (; | 984 | for (; |
990 | q->read_ptr != tfd_num; | 985 | q->read_ptr != tfd_num; |
991 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 986 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { |
992 | 987 | ||
993 | if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) | 988 | if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) |
994 | continue; | 989 | continue; |
@@ -1027,16 +1022,16 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) | |||
1027 | 1022 | ||
1028 | lockdep_assert_held(&txq->lock); | 1023 | lockdep_assert_held(&txq->lock); |
1029 | 1024 | ||
1030 | if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) { | 1025 | if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) { |
1031 | IWL_ERR(trans, | 1026 | IWL_ERR(trans, |
1032 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", | 1027 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", |
1033 | __func__, txq_id, idx, q->n_bd, | 1028 | __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, |
1034 | q->write_ptr, q->read_ptr); | 1029 | q->write_ptr, q->read_ptr); |
1035 | return; | 1030 | return; |
1036 | } | 1031 | } |
1037 | 1032 | ||
1038 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | 1033 | for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx; |
1039 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1034 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { |
1040 | 1035 | ||
1041 | if (nfreed++ > 0) { | 1036 | if (nfreed++ > 0) { |
1042 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", | 1037 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", |
@@ -1445,7 +1440,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1445 | } | 1440 | } |
1446 | 1441 | ||
1447 | /* Increment and update queue's write index */ | 1442 | /* Increment and update queue's write index */ |
1448 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 1443 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); |
1449 | iwl_pcie_txq_inc_wr_ptr(trans, txq); | 1444 | iwl_pcie_txq_inc_wr_ptr(trans, txq); |
1450 | 1445 | ||
1451 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); | 1446 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); |
@@ -1788,7 +1783,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1788 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | 1783 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); |
1789 | 1784 | ||
1790 | /* Tell device the write index *just past* this latest filled TFD */ | 1785 | /* Tell device the write index *just past* this latest filled TFD */ |
1791 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 1786 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); |
1792 | if (!wait_write_ptr) | 1787 | if (!wait_write_ptr) |
1793 | iwl_pcie_txq_inc_wr_ptr(trans, txq); | 1788 | iwl_pcie_txq_inc_wr_ptr(trans, txq); |
1794 | 1789 | ||