aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2011-08-26 02:11:32 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-08-29 15:33:01 -0400
commit8ad71bef4a9d8173cbcfbb2f796b08d33d4ca01b (patch)
treebce072b23b431864bda8f2d9efa3baad90309905
parente20d434170c3a7f388d5e916825499c9c0738606 (diff)
iwlagn: move tx queues to transport layer
This finalizes the move of the data path to the transport layer. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h50
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c105
6 files changed, 136 insertions, 139 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 009c35a8d20b..f8a4bcf0a34b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -742,7 +742,6 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
742 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 742 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
743 int txq_id = SEQ_TO_QUEUE(sequence); 743 int txq_id = SEQ_TO_QUEUE(sequence);
744 int cmd_index = SEQ_TO_INDEX(sequence); 744 int cmd_index = SEQ_TO_INDEX(sequence);
745 struct iwl_tx_queue *txq = &priv->txq[txq_id];
746 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 745 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
747 struct ieee80211_hdr *hdr; 746 struct ieee80211_hdr *hdr;
748 u32 status = le16_to_cpu(tx_resp->status.status); 747 u32 status = le16_to_cpu(tx_resp->status.status);
@@ -755,17 +754,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
755 struct sk_buff_head skbs; 754 struct sk_buff_head skbs;
756 struct sk_buff *skb; 755 struct sk_buff *skb;
757 struct iwl_rxon_context *ctx; 756 struct iwl_rxon_context *ctx;
758 757 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
759 if ((cmd_index >= txq->q.n_bd) ||
760 (iwl_queue_used(&txq->q, cmd_index) == 0)) {
761 IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
762 "cmd_index %d is out of range [0-%d] %d %d\n",
763 __func__, txq_id, cmd_index, txq->q.n_bd,
764 txq->q.write_ptr, txq->q.read_ptr);
765 return;
766 }
767
768 txq->time_stamp = jiffies;
769 758
770 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> 759 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
771 IWLAGN_TX_RES_TID_POS; 760 IWLAGN_TX_RES_TID_POS;
@@ -774,12 +763,10 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
774 763
775 spin_lock_irqsave(&priv->shrd->sta_lock, flags); 764 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
776 765
777 if (txq->sched_retry) 766 if (is_agg)
778 iwl_rx_reply_tx_agg(priv, tx_resp); 767 iwl_rx_reply_tx_agg(priv, tx_resp);
779 768
780 if (tx_resp->frame_count == 1) { 769 if (tx_resp->frame_count == 1) {
781 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
782
783 __skb_queue_head_init(&skbs); 770 __skb_queue_head_init(&skbs);
784 /*we can free until ssn % q.n_bd not inclusive */ 771 /*we can free until ssn % q.n_bd not inclusive */
785 iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, 772 iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
@@ -850,14 +837,12 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
850{ 837{
851 struct iwl_rx_packet *pkt = rxb_addr(rxb); 838 struct iwl_rx_packet *pkt = rxb_addr(rxb);
852 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 839 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
853 struct iwl_tx_queue *txq = NULL;
854 struct iwl_ht_agg *agg; 840 struct iwl_ht_agg *agg;
855 struct sk_buff_head reclaimed_skbs; 841 struct sk_buff_head reclaimed_skbs;
856 struct ieee80211_tx_info *info; 842 struct ieee80211_tx_info *info;
857 struct ieee80211_hdr *hdr; 843 struct ieee80211_hdr *hdr;
858 struct sk_buff *skb; 844 struct sk_buff *skb;
859 unsigned long flags; 845 unsigned long flags;
860 int index;
861 int sta_id; 846 int sta_id;
862 int tid; 847 int tid;
863 int freed; 848 int freed;
@@ -875,14 +860,10 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
875 return; 860 return;
876 } 861 }
877 862
878 txq = &priv->txq[scd_flow];
879 sta_id = ba_resp->sta_id; 863 sta_id = ba_resp->sta_id;
880 tid = ba_resp->tid; 864 tid = ba_resp->tid;
881 agg = &priv->shrd->tid_data[sta_id][tid].agg; 865 agg = &priv->shrd->tid_data[sta_id][tid].agg;
882 866
883 /* Find index of block-ack window */
884 index = ba_resp_scd_ssn & (txq->q.n_bd - 1);
885
886 spin_lock_irqsave(&priv->shrd->sta_lock, flags); 867 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
887 868
888 if (unlikely(agg->txq_id != scd_flow)) { 869 if (unlikely(agg->txq_id != scd_flow)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 08e8e1bf4830..33a829ad7e2a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -574,19 +574,6 @@ struct iwl_sensitivity_ranges {
574 ****************************************************************************/ 574 ****************************************************************************/
575extern void iwl_update_chain_flags(struct iwl_priv *priv); 575extern void iwl_update_chain_flags(struct iwl_priv *priv);
576extern const u8 iwl_bcast_addr[ETH_ALEN]; 576extern const u8 iwl_bcast_addr[ETH_ALEN];
577extern int iwl_queue_space(const struct iwl_queue *q);
578static inline int iwl_queue_used(const struct iwl_queue *q, int i)
579{
580 return q->write_ptr >= q->read_ptr ?
581 (i >= q->read_ptr && i < q->write_ptr) :
582 !(i < q->read_ptr && i >= q->write_ptr);
583}
584
585
586static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
587{
588 return index & (q->n_window - 1);
589}
590 577
591#define IWL_OPERATION_MODE_AUTO 0 578#define IWL_OPERATION_MODE_AUTO 0
592#define IWL_OPERATION_MODE_HT_ONLY 1 579#define IWL_OPERATION_MODE_HT_ONLY 1
@@ -1156,10 +1143,6 @@ struct iwl_priv {
1156 1143
1157 int activity_timer_active; 1144 int activity_timer_active;
1158 1145
1159 /* Tx DMA processing queues */
1160 struct iwl_tx_queue *txq;
1161 unsigned long txq_ctx_active_msk;
1162
1163 /* counts mgmt, ctl, and data packets */ 1146 /* counts mgmt, ctl, and data packets */
1164 struct traffic_stats tx_stats; 1147 struct traffic_stats tx_stats;
1165 struct traffic_stats rx_stats; 1148 struct traffic_stats rx_stats;
@@ -1172,12 +1155,6 @@ struct iwl_priv {
1172 struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; 1155 struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
1173 unsigned long ucode_key_table; 1156 unsigned long ucode_key_table;
1174 1157
1175 /* queue refcounts */
1176#define IWL_MAX_HW_QUEUES 32
1177 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1178 /* for each AC */
1179 atomic_t queue_stop_count[4];
1180
1181 /* Indication if ieee80211_ops->open has been called */ 1158 /* Indication if ieee80211_ops->open has been called */
1182 u8 is_open; 1159 u8 is_open;
1183 1160
@@ -1334,27 +1311,8 @@ struct iwl_priv {
1334 bool have_rekey_data; 1311 bool have_rekey_data;
1335}; /*iwl_priv */ 1312}; /*iwl_priv */
1336 1313
1337static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1338{
1339 set_bit(txq_id, &priv->txq_ctx_active_msk);
1340}
1341
1342static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1343{
1344 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1345}
1346
1347extern struct iwl_mod_params iwlagn_mod_params; 1314extern struct iwl_mod_params iwlagn_mod_params;
1348 1315
1349static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1350 int txq_id, int idx)
1351{
1352 if (priv->txq[txq_id].skbs[idx])
1353 return (struct ieee80211_hdr *)priv->txq[txq_id].
1354 skbs[idx]->data;
1355 return NULL;
1356}
1357
1358static inline struct iwl_rxon_context * 1316static inline struct iwl_rxon_context *
1359iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) 1317iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1360{ 1318{
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
index 255b326bf0e9..ec4e73737681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
@@ -125,6 +125,10 @@ struct iwl_dma_ptr {
125 * @ac_to_fifo: to what fifo is a specifc AC mapped ? 125 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
126 * @ac_to_queue: to what tx queue is a specifc AC mapped ? 126 * @ac_to_queue: to what tx queue is a specifc AC mapped ?
127 * @mcast_queue: 127 * @mcast_queue:
128 * @txq: Tx DMA processing queues
129 * @txq_ctx_active_msk: what queue is active
130 * queue_stopped: tracks what queue is stopped
131 * queue_stop_count: tracks what SW queue is stopped
128 */ 132 */
129struct iwl_trans_pcie { 133struct iwl_trans_pcie {
130 struct iwl_rx_queue rxq; 134 struct iwl_rx_queue rxq;
@@ -150,6 +154,12 @@ struct iwl_trans_pcie {
150 const u8 *ac_to_fifo[NUM_IWL_RXON_CTX]; 154 const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
151 const u8 *ac_to_queue[NUM_IWL_RXON_CTX]; 155 const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
152 u8 mcast_queue[NUM_IWL_RXON_CTX]; 156 u8 mcast_queue[NUM_IWL_RXON_CTX];
157
158 struct iwl_tx_queue *txq;
159 unsigned long txq_ctx_active_msk;
160#define IWL_MAX_HW_QUEUES 32
161 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
162 atomic_t queue_stop_count[4];
153}; 163};
154 164
155#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ 165#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -207,6 +217,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
207 int index); 217 int index);
208int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 218int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
209 struct sk_buff_head *skbs); 219 struct sk_buff_head *skbs);
220int iwl_queue_space(const struct iwl_queue *q);
210 221
211/***************************************************** 222/*****************************************************
212* Error handling 223* Error handling
@@ -216,6 +227,9 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
216int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); 227int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
217void iwl_dump_csr(struct iwl_trans *trans); 228void iwl_dump_csr(struct iwl_trans *trans);
218 229
230/*****************************************************
231* Helpers
232******************************************************/
219static inline void iwl_disable_interrupts(struct iwl_trans *trans) 233static inline void iwl_disable_interrupts(struct iwl_trans *trans)
220{ 234{
221 clear_bit(STATUS_INT_ENABLED, &trans->shrd->status); 235 clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
@@ -265,12 +279,14 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
265 u8 queue = txq->swq_id; 279 u8 queue = txq->swq_id;
266 u8 ac = queue & 3; 280 u8 ac = queue & 3;
267 u8 hwq = (queue >> 2) & 0x1f; 281 u8 hwq = (queue >> 2) & 0x1f;
282 struct iwl_trans_pcie *trans_pcie =
283 IWL_TRANS_GET_PCIE_TRANS(trans);
268 284
269 if (unlikely(!trans->shrd->mac80211_registered)) 285 if (unlikely(!trans->shrd->mac80211_registered))
270 return; 286 return;
271 287
272 if (test_and_clear_bit(hwq, priv(trans)->queue_stopped)) 288 if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
273 if (atomic_dec_return(&priv(trans)->queue_stop_count[ac]) <= 0) 289 if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
274 ieee80211_wake_queue(trans->shrd->hw, ac); 290 ieee80211_wake_queue(trans->shrd->hw, ac);
275} 291}
276 292
@@ -280,12 +296,14 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
280 u8 queue = txq->swq_id; 296 u8 queue = txq->swq_id;
281 u8 ac = queue & 3; 297 u8 ac = queue & 3;
282 u8 hwq = (queue >> 2) & 0x1f; 298 u8 hwq = (queue >> 2) & 0x1f;
299 struct iwl_trans_pcie *trans_pcie =
300 IWL_TRANS_GET_PCIE_TRANS(trans);
283 301
284 if (unlikely(!trans->shrd->mac80211_registered)) 302 if (unlikely(!trans->shrd->mac80211_registered))
285 return; 303 return;
286 304
287 if (!test_and_set_bit(hwq, priv(trans)->queue_stopped)) 305 if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
288 if (atomic_inc_return(&priv(trans)->queue_stop_count[ac]) > 0) 306 if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
289 ieee80211_stop_queue(trans->shrd->hw, ac); 307 ieee80211_stop_queue(trans->shrd->hw, ac);
290} 308}
291 309
@@ -301,4 +319,28 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
301 319
302#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue 320#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
303 321
322static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
323 int txq_id)
324{
325 set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
326}
327
328static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
329 int txq_id)
330{
331 clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
332}
333
334static inline int iwl_queue_used(const struct iwl_queue *q, int i)
335{
336 return q->write_ptr >= q->read_ptr ?
337 (i >= q->read_ptr && i < q->write_ptr) :
338 !(i < q->read_ptr && i >= q->write_ptr);
339}
340
341static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
342{
343 return index & (q->n_window - 1);
344}
345
304#endif /* __iwl_trans_int_pcie_h__ */ 346#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
index a0699c0ef4f8..2d0ddb8d422d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
@@ -1032,7 +1032,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
1032 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); 1032 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
1033 for (i = 0; i < hw_params(trans).max_txq_num; i++) 1033 for (i = 0; i < hw_params(trans).max_txq_num; i++)
1034 iwl_txq_update_write_ptr(trans, 1034 iwl_txq_update_write_ptr(trans,
1035 &priv(trans)->txq[i]); 1035 &trans_pcie->txq[i]);
1036 1036
1037 isr_stats->wakeup++; 1037 isr_stats->wakeup++;
1038 1038
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index 28c606cade3c..5dd6a6d1dfd7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -407,9 +407,10 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
407 struct iwl_tx_queue *txq, 407 struct iwl_tx_queue *txq,
408 int tx_fifo_id, int scd_retry) 408 int tx_fifo_id, int scd_retry)
409{ 409{
410 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
410 int txq_id = txq->q.id; 411 int txq_id = txq->q.id;
411 int active = 412 int active =
412 test_bit(txq_id, &priv(trans)->txq_ctx_active_msk) ? 1 : 0; 413 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
413 414
414 iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id), 415 iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
415 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 416 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
@@ -482,8 +483,8 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
482 483
483 /* Place first TFD at index corresponding to start sequence number. 484 /* Place first TFD at index corresponding to start sequence number.
484 * Assumes that ssn_idx is valid (!= 0xFFF) */ 485 * Assumes that ssn_idx is valid (!= 0xFFF) */
485 priv(trans)->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 486 trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
486 priv(trans)->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 487 trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
487 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx); 488 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
488 489
489 /* Set up Tx window size and frame limit for this queue */ 490 /* Set up Tx window size and frame limit for this queue */
@@ -500,11 +501,11 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
500 iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); 501 iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
501 502
502 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 503 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
503 iwl_trans_tx_queue_set_status(trans, &priv(trans)->txq[txq_id], 504 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
504 tx_fifo, 1); 505 tx_fifo, 1);
505 506
506 priv(trans)->txq[txq_id].sta_id = sta_id; 507 trans_pcie->txq[txq_id].sta_id = sta_id;
507 priv(trans)->txq[txq_id].tid = tid; 508 trans_pcie->txq[txq_id].tid = tid;
508 509
509 spin_unlock_irqrestore(&trans->shrd->lock, flags); 510 spin_unlock_irqrestore(&trans->shrd->lock, flags);
510} 511}
@@ -517,11 +518,12 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
517 */ 518 */
518static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans) 519static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
519{ 520{
521 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
520 int txq_id; 522 int txq_id;
521 523
522 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) 524 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
523 if (!test_and_set_bit(txq_id, 525 if (!test_and_set_bit(txq_id,
524 &priv(trans)->txq_ctx_active_msk)) 526 &trans_pcie->txq_ctx_active_msk))
525 return txq_id; 527 return txq_id;
526 return -1; 528 return -1;
527} 529}
@@ -530,6 +532,7 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
530 enum iwl_rxon_context_id ctx, int sta_id, 532 enum iwl_rxon_context_id ctx, int sta_id,
531 int tid, u16 *ssn) 533 int tid, u16 *ssn)
532{ 534{
535 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
533 struct iwl_tid_data *tid_data; 536 struct iwl_tid_data *tid_data;
534 unsigned long flags; 537 unsigned long flags;
535 u16 txq_id; 538 u16 txq_id;
@@ -545,7 +548,7 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
545 tid_data = &trans->shrd->tid_data[sta_id][tid]; 548 tid_data = &trans->shrd->tid_data[sta_id][tid];
546 *ssn = SEQ_TO_SN(tid_data->seq_number); 549 *ssn = SEQ_TO_SN(tid_data->seq_number);
547 tid_data->agg.txq_id = txq_id; 550 tid_data->agg.txq_id = txq_id;
548 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id); 551 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
549 552
550 tid_data = &trans->shrd->tid_data[sta_id][tid]; 553 tid_data = &trans->shrd->tid_data[sta_id][tid];
551 if (tid_data->tfds_in_queue == 0) { 554 if (tid_data->tfds_in_queue == 0) {
@@ -564,24 +567,26 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
564 567
565void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id) 568void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
566{ 569{
570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
567 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 571 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
568 572
569 iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id)); 573 iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
570 574
571 priv(trans)->txq[txq_id].q.read_ptr = 0; 575 trans_pcie->txq[txq_id].q.read_ptr = 0;
572 priv(trans)->txq[txq_id].q.write_ptr = 0; 576 trans_pcie->txq[txq_id].q.write_ptr = 0;
573 /* supposes that ssn_idx is valid (!= 0xFFF) */ 577 /* supposes that ssn_idx is valid (!= 0xFFF) */
574 iwl_trans_set_wr_ptrs(trans, txq_id, 0); 578 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
575 579
576 iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); 580 iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
577 iwl_txq_ctx_deactivate(priv(trans), txq_id); 581 iwl_txq_ctx_deactivate(trans_pcie, txq_id);
578 iwl_trans_tx_queue_set_status(trans, &priv(trans)->txq[txq_id], 0, 0); 582 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
579} 583}
580 584
581int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, 585int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
582 enum iwl_rxon_context_id ctx, int sta_id, 586 enum iwl_rxon_context_id ctx, int sta_id,
583 int tid) 587 int tid)
584{ 588{
589 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
585 unsigned long flags; 590 unsigned long flags;
586 int read_ptr, write_ptr; 591 int read_ptr, write_ptr;
587 struct iwl_tid_data *tid_data; 592 struct iwl_tid_data *tid_data;
@@ -621,8 +626,8 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
621 "or starting\n"); 626 "or starting\n");
622 } 627 }
623 628
624 write_ptr = priv(trans)->txq[txq_id].q.write_ptr; 629 write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
625 read_ptr = priv(trans)->txq[txq_id].q.read_ptr; 630 read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
626 631
627 /* The queue is not empty */ 632 /* The queue is not empty */
628 if (write_ptr != read_ptr) { 633 if (write_ptr != read_ptr) {
@@ -663,7 +668,8 @@ turn_off:
663 */ 668 */
664static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 669static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
665{ 670{
666 struct iwl_tx_queue *txq = &priv(trans)->txq[trans->shrd->cmd_queue]; 671 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
672 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
667 struct iwl_queue *q = &txq->q; 673 struct iwl_queue *q = &txq->q;
668 struct iwl_device_cmd *out_cmd; 674 struct iwl_device_cmd *out_cmd;
669 struct iwl_cmd_meta *out_meta; 675 struct iwl_cmd_meta *out_meta;
@@ -852,7 +858,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
852 */ 858 */
853static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) 859static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
854{ 860{
855 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 861 struct iwl_trans_pcie *trans_pcie =
862 IWL_TRANS_GET_PCIE_TRANS(trans(priv));
863 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
856 struct iwl_queue *q = &txq->q; 864 struct iwl_queue *q = &txq->q;
857 int nfreed = 0; 865 int nfreed = 0;
858 866
@@ -893,7 +901,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
893 struct iwl_device_cmd *cmd; 901 struct iwl_device_cmd *cmd;
894 struct iwl_cmd_meta *meta; 902 struct iwl_cmd_meta *meta;
895 struct iwl_trans *trans = trans(priv); 903 struct iwl_trans *trans = trans(priv);
896 struct iwl_tx_queue *txq = &priv->txq[trans->shrd->cmd_queue]; 904 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
905 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
897 unsigned long flags; 906 unsigned long flags;
898 907
899 /* If a Tx command is being handled and it isn't in the actual 908 /* If a Tx command is being handled and it isn't in the actual
@@ -902,8 +911,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
902 if (WARN(txq_id != trans->shrd->cmd_queue, 911 if (WARN(txq_id != trans->shrd->cmd_queue,
903 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 912 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
904 txq_id, trans->shrd->cmd_queue, sequence, 913 txq_id, trans->shrd->cmd_queue, sequence,
905 priv->txq[trans->shrd->cmd_queue].q.read_ptr, 914 trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
906 priv->txq[trans->shrd->cmd_queue].q.write_ptr)) { 915 trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
907 iwl_print_hex_error(priv, pkt, 32); 916 iwl_print_hex_error(priv, pkt, 32);
908 return; 917 return;
909 } 918 }
@@ -1072,6 +1081,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1072 1081
1073static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1082static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1074{ 1083{
1084 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1075 int cmd_idx; 1085 int cmd_idx;
1076 int ret; 1086 int ret;
1077 1087
@@ -1144,7 +1154,7 @@ cancel:
1144 * in later, it will possibly set an invalid 1154 * in later, it will possibly set an invalid
1145 * address (cmd->meta.source). 1155 * address (cmd->meta.source).
1146 */ 1156 */
1147 priv(trans)->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &= 1157 trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1148 ~CMD_WANT_SKB; 1158 ~CMD_WANT_SKB;
1149 } 1159 }
1150fail: 1160fail:
@@ -1181,7 +1191,8 @@ int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
1181int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 1191int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1182 struct sk_buff_head *skbs) 1192 struct sk_buff_head *skbs)
1183{ 1193{
1184 struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id]; 1194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1195 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1185 struct iwl_queue *q = &txq->q; 1196 struct iwl_queue *q = &txq->q;
1186 int last_to_free; 1197 int last_to_free;
1187 int freed = 0; 1198 int freed = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index cce57d53f618..cec13adb018e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -409,8 +409,8 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
409 */ 409 */
410static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) 410static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
411{ 411{
412 struct iwl_priv *priv = priv(trans); 412 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
413 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 413 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
414 struct iwl_queue *q = &txq->q; 414 struct iwl_queue *q = &txq->q;
415 415
416 if (!q->n_bd) 416 if (!q->n_bd)
@@ -433,8 +433,8 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
433 */ 433 */
434static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) 434static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
435{ 435{
436 struct iwl_priv *priv = priv(trans); 436 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
437 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 437 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
438 struct device *dev = bus(trans)->dev; 438 struct device *dev = bus(trans)->dev;
439 int i; 439 int i;
440 if (WARN_ON(!txq)) 440 if (WARN_ON(!txq))
@@ -477,19 +477,17 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
477static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) 477static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
478{ 478{
479 int txq_id; 479 int txq_id;
480 struct iwl_trans_pcie *trans_pcie = 480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
481 IWL_TRANS_GET_PCIE_TRANS(trans);
482 struct iwl_priv *priv = priv(trans);
483 481
484 /* Tx queues */ 482 /* Tx queues */
485 if (priv->txq) { 483 if (trans_pcie->txq) {
486 for (txq_id = 0; 484 for (txq_id = 0;
487 txq_id < hw_params(trans).max_txq_num; txq_id++) 485 txq_id < hw_params(trans).max_txq_num; txq_id++)
488 iwl_tx_queue_free(trans, txq_id); 486 iwl_tx_queue_free(trans, txq_id);
489 } 487 }
490 488
491 kfree(priv->txq); 489 kfree(trans_pcie->txq);
492 priv->txq = NULL; 490 trans_pcie->txq = NULL;
493 491
494 iwlagn_free_dma_ptr(trans, &trans_pcie->kw); 492 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
495 493
@@ -507,16 +505,14 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
507{ 505{
508 int ret; 506 int ret;
509 int txq_id, slots_num; 507 int txq_id, slots_num;
510 struct iwl_priv *priv = priv(trans); 508 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
511 struct iwl_trans_pcie *trans_pcie =
512 IWL_TRANS_GET_PCIE_TRANS(trans);
513 509
514 u16 scd_bc_tbls_size = hw_params(trans).max_txq_num * 510 u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
515 sizeof(struct iwlagn_scd_bc_tbl); 511 sizeof(struct iwlagn_scd_bc_tbl);
516 512
517 /*It is not allowed to alloc twice, so warn when this happens. 513 /*It is not allowed to alloc twice, so warn when this happens.
518 * We cannot rely on the previous allocation, so free and fail */ 514 * We cannot rely on the previous allocation, so free and fail */
519 if (WARN_ON(priv->txq)) { 515 if (WARN_ON(trans_pcie->txq)) {
520 ret = -EINVAL; 516 ret = -EINVAL;
521 goto error; 517 goto error;
522 } 518 }
@@ -535,9 +531,9 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
535 goto error; 531 goto error;
536 } 532 }
537 533
538 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * 534 trans_pcie->txq = kzalloc(sizeof(struct iwl_tx_queue) *
539 hw_params(trans).max_txq_num, GFP_KERNEL); 535 hw_params(trans).max_txq_num, GFP_KERNEL);
540 if (!priv->txq) { 536 if (!trans_pcie->txq) {
541 IWL_ERR(trans, "Not enough memory for txq\n"); 537 IWL_ERR(trans, "Not enough memory for txq\n");
542 ret = ENOMEM; 538 ret = ENOMEM;
543 goto error; 539 goto error;
@@ -547,8 +543,8 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
547 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { 543 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
548 slots_num = (txq_id == trans->shrd->cmd_queue) ? 544 slots_num = (txq_id == trans->shrd->cmd_queue) ?
549 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 545 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
550 ret = iwl_trans_txq_alloc(trans, &priv->txq[txq_id], slots_num, 546 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
551 txq_id); 547 slots_num, txq_id);
552 if (ret) { 548 if (ret) {
553 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 549 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
554 goto error; 550 goto error;
@@ -568,11 +564,9 @@ static int iwl_tx_init(struct iwl_trans *trans)
568 int txq_id, slots_num; 564 int txq_id, slots_num;
569 unsigned long flags; 565 unsigned long flags;
570 bool alloc = false; 566 bool alloc = false;
571 struct iwl_priv *priv = priv(trans); 567 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
572 struct iwl_trans_pcie *trans_pcie =
573 IWL_TRANS_GET_PCIE_TRANS(trans);
574 568
575 if (!priv->txq) { 569 if (!trans_pcie->txq) {
576 ret = iwl_trans_tx_alloc(trans); 570 ret = iwl_trans_tx_alloc(trans);
577 if (ret) 571 if (ret)
578 goto error; 572 goto error;
@@ -594,8 +588,8 @@ static int iwl_tx_init(struct iwl_trans *trans)
594 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) { 588 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
595 slots_num = (txq_id == trans->shrd->cmd_queue) ? 589 slots_num = (txq_id == trans->shrd->cmd_queue) ?
596 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 590 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
597 ret = iwl_trans_txq_init(trans, &priv->txq[txq_id], slots_num, 591 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
598 txq_id); 592 slots_num, txq_id);
599 if (ret) { 593 if (ret) {
600 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 594 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
601 goto error; 595 goto error;
@@ -916,14 +910,15 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
916 iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0); 910 iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
917 911
918 /* make sure all queue are not stopped */ 912 /* make sure all queue are not stopped */
919 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 913 memset(&trans_pcie->queue_stopped[0], 0,
914 sizeof(trans_pcie->queue_stopped));
920 for (i = 0; i < 4; i++) 915 for (i = 0; i < 4; i++)
921 atomic_set(&priv->queue_stop_count[i], 0); 916 atomic_set(&trans_pcie->queue_stop_count[i], 0);
922 for_each_context(priv, ctx) 917 for_each_context(priv, ctx)
923 ctx->last_tx_rejected = false; 918 ctx->last_tx_rejected = false;
924 919
925 /* reset to 0 to enable all the queue first */ 920 /* reset to 0 to enable all the queue first */
926 priv->txq_ctx_active_msk = 0; 921 trans_pcie->txq_ctx_active_msk = 0;
927 922
928 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < 923 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
929 IWLAGN_FIRST_AMPDU_QUEUE); 924 IWLAGN_FIRST_AMPDU_QUEUE);
@@ -934,14 +929,15 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
934 int fifo = queue_to_fifo[i].fifo; 929 int fifo = queue_to_fifo[i].fifo;
935 int ac = queue_to_fifo[i].ac; 930 int ac = queue_to_fifo[i].ac;
936 931
937 iwl_txq_ctx_activate(priv, i); 932 iwl_txq_ctx_activate(trans_pcie, i);
938 933
939 if (fifo == IWL_TX_FIFO_UNUSED) 934 if (fifo == IWL_TX_FIFO_UNUSED)
940 continue; 935 continue;
941 936
942 if (ac != IWL_AC_UNSET) 937 if (ac != IWL_AC_UNSET)
943 iwl_set_swq_id(&priv->txq[i], ac, i); 938 iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
944 iwl_trans_tx_queue_set_status(trans, &priv->txq[i], fifo, 0); 939 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
940 fifo, 0);
945 } 941 }
946 942
947 spin_unlock_irqrestore(&trans->shrd->lock, flags); 943 spin_unlock_irqrestore(&trans->shrd->lock, flags);
@@ -958,7 +954,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
958{ 954{
959 int ch, txq_id; 955 int ch, txq_id;
960 unsigned long flags; 956 unsigned long flags;
961 struct iwl_priv *priv = priv(trans); 957 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
962 958
963 /* Turn off all Tx DMA fifos */ 959 /* Turn off all Tx DMA fifos */
964 spin_lock_irqsave(&trans->shrd->lock, flags); 960 spin_lock_irqsave(&trans->shrd->lock, flags);
@@ -979,7 +975,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
979 } 975 }
980 spin_unlock_irqrestore(&trans->shrd->lock, flags); 976 spin_unlock_irqrestore(&trans->shrd->lock, flags);
981 977
982 if (!priv->txq) { 978 if (!trans_pcie->txq) {
983 IWL_WARN(trans, "Stopping tx queues that aren't allocated..."); 979 IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
984 return 0; 980 return 0;
985 } 981 }
@@ -1108,7 +1104,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1108 } 1104 }
1109 } 1105 }
1110 1106
1111 txq = &priv(trans)->txq[txq_id]; 1107 txq = &trans_pcie->txq[txq_id];
1112 q = &txq->q; 1108 q = &txq->q;
1113 1109
1114 /* Set up driver data for this TFD */ 1110 /* Set up driver data for this TFD */
@@ -1268,7 +1264,8 @@ static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1268static int iwlagn_txq_check_empty(struct iwl_trans *trans, 1264static int iwlagn_txq_check_empty(struct iwl_trans *trans,
1269 int sta_id, u8 tid, int txq_id) 1265 int sta_id, u8 tid, int txq_id)
1270{ 1266{
1271 struct iwl_queue *q = &priv(trans)->txq[txq_id].q; 1267 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1268 struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
1272 struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid]; 1269 struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
1273 1270
1274 lockdep_assert_held(&trans->shrd->sta_lock); 1271 lockdep_assert_held(&trans->shrd->sta_lock);
@@ -1286,7 +1283,7 @@ static int iwlagn_txq_check_empty(struct iwl_trans *trans,
1286 iwl_stop_tx_ba_trans_ready(priv(trans), 1283 iwl_stop_tx_ba_trans_ready(priv(trans),
1287 NUM_IWL_RXON_CTX, 1284 NUM_IWL_RXON_CTX,
1288 sta_id, tid); 1285 sta_id, tid);
1289 iwl_wake_queue(trans, &priv(trans)->txq[txq_id]); 1286 iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
1290 } 1287 }
1291 break; 1288 break;
1292 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1289 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1324,13 +1321,16 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1324 int txq_id, int ssn, u32 status, 1321 int txq_id, int ssn, u32 status,
1325 struct sk_buff_head *skbs) 1322 struct sk_buff_head *skbs)
1326{ 1323{
1327 struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id]; 1324 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1325 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1328 /* n_bd is usually 256 => n_bd - 1 = 0xff */ 1326 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1329 int tfd_num = ssn & (txq->q.n_bd - 1); 1327 int tfd_num = ssn & (txq->q.n_bd - 1);
1330 int freed = 0; 1328 int freed = 0;
1331 u8 agg_state; 1329 u8 agg_state;
1332 bool cond; 1330 bool cond;
1333 1331
1332 txq->time_stamp = jiffies;
1333
1334 if (txq->sched_retry) { 1334 if (txq->sched_retry) {
1335 agg_state = 1335 agg_state =
1336 trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state; 1336 trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
@@ -1421,9 +1421,9 @@ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
1421 txq_id = trans_pcie->ac_to_queue[ctx][ac]; 1421 txq_id = trans_pcie->ac_to_queue[ctx][ac];
1422 IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n", 1422 IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
1423 ac, 1423 ac,
1424 (atomic_read(&priv(trans)->queue_stop_count[ac]) > 0) 1424 (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
1425 ? "stopped" : "awake"); 1425 ? "stopped" : "awake");
1426 iwl_wake_queue(trans, &priv(trans)->txq[txq_id]); 1426 iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
1427 } 1427 }
1428} 1428}
1429 1429
@@ -1448,13 +1448,16 @@ static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1448 1448
1449static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id) 1449static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
1450{ 1450{
1451 iwl_stop_queue(trans, &priv(trans)->txq[txq_id]); 1451 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1452
1453 iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
1452} 1454}
1453 1455
1454#define IWL_FLUSH_WAIT_MS 2000 1456#define IWL_FLUSH_WAIT_MS 2000
1455 1457
1456static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) 1458static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1457{ 1459{
1460 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1458 struct iwl_tx_queue *txq; 1461 struct iwl_tx_queue *txq;
1459 struct iwl_queue *q; 1462 struct iwl_queue *q;
1460 int cnt; 1463 int cnt;
@@ -1465,7 +1468,7 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1465 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { 1468 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1466 if (cnt == trans->shrd->cmd_queue) 1469 if (cnt == trans->shrd->cmd_queue)
1467 continue; 1470 continue;
1468 txq = &priv(trans)->txq[cnt]; 1471 txq = &trans_pcie->txq[cnt];
1469 q = &txq->q; 1472 q = &txq->q;
1470 while (q->read_ptr != q->write_ptr && !time_after(jiffies, 1473 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1471 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) 1474 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
@@ -1486,7 +1489,8 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1486 */ 1489 */
1487static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt) 1490static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
1488{ 1491{
1489 struct iwl_tx_queue *txq = &priv(trans)->txq[cnt]; 1492 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1493 struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
1490 struct iwl_queue *q = &txq->q; 1494 struct iwl_queue *q = &txq->q;
1491 unsigned long timeout; 1495 unsigned long timeout;
1492 1496
@@ -1578,7 +1582,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1578 const u8 *ptr; 1582 const u8 *ptr;
1579 ssize_t ret; 1583 ssize_t ret;
1580 1584
1581 if (!priv->txq) { 1585 if (!trans_pcie->txq) {
1582 IWL_ERR(trans, "txq not ready\n"); 1586 IWL_ERR(trans, "txq not ready\n");
1583 return -EAGAIN; 1587 return -EAGAIN;
1584 } 1588 }
@@ -1589,7 +1593,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1589 } 1593 }
1590 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); 1594 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
1591 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { 1595 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1592 txq = &priv->txq[cnt]; 1596 txq = &trans_pcie->txq[cnt];
1593 q = &txq->q; 1597 q = &txq->q;
1594 pos += scnprintf(buf + pos, bufsz - pos, 1598 pos += scnprintf(buf + pos, bufsz - pos,
1595 "q[%d]: read_ptr: %u, write_ptr: %u\n", 1599 "q[%d]: read_ptr: %u, write_ptr: %u\n",
@@ -1666,9 +1670,10 @@ static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1666 1670
1667static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, 1671static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1668 char __user *user_buf, 1672 char __user *user_buf,
1669 size_t count, loff_t *ppos) { 1673 size_t count, loff_t *ppos)
1670 1674{
1671 struct iwl_trans *trans = file->private_data; 1675 struct iwl_trans *trans = file->private_data;
1676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1672 struct iwl_priv *priv = priv(trans); 1677 struct iwl_priv *priv = priv(trans);
1673 struct iwl_tx_queue *txq; 1678 struct iwl_tx_queue *txq;
1674 struct iwl_queue *q; 1679 struct iwl_queue *q;
@@ -1678,7 +1683,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1678 int ret; 1683 int ret;
1679 const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num; 1684 const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
1680 1685
1681 if (!priv->txq) { 1686 if (!trans_pcie->txq) {
1682 IWL_ERR(priv, "txq not ready\n"); 1687 IWL_ERR(priv, "txq not ready\n");
1683 return -EAGAIN; 1688 return -EAGAIN;
1684 } 1689 }
@@ -1687,21 +1692,21 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1687 return -ENOMEM; 1692 return -ENOMEM;
1688 1693
1689 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) { 1694 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1690 txq = &priv->txq[cnt]; 1695 txq = &trans_pcie->txq[cnt];
1691 q = &txq->q; 1696 q = &txq->q;
1692 pos += scnprintf(buf + pos, bufsz - pos, 1697 pos += scnprintf(buf + pos, bufsz - pos,
1693 "hwq %.2d: read=%u write=%u stop=%d" 1698 "hwq %.2d: read=%u write=%u stop=%d"
1694 " swq_id=%#.2x (ac %d/hwq %d)\n", 1699 " swq_id=%#.2x (ac %d/hwq %d)\n",
1695 cnt, q->read_ptr, q->write_ptr, 1700 cnt, q->read_ptr, q->write_ptr,
1696 !!test_bit(cnt, priv->queue_stopped), 1701 !!test_bit(cnt, trans_pcie->queue_stopped),
1697 txq->swq_id, txq->swq_id & 3, 1702 txq->swq_id, txq->swq_id & 3,
1698 (txq->swq_id >> 2) & 0x1f); 1703 (txq->swq_id >> 2) & 0x1f);
1699 if (cnt >= 4) 1704 if (cnt >= 4)
1700 continue; 1705 continue;
1701 /* for the ACs, display the stop count too */ 1706 /* for the ACs, display the stop count too */
1702 pos += scnprintf(buf + pos, bufsz - pos, 1707 pos += scnprintf(buf + pos, bufsz - pos,
1703 " stop-count: %d\n", 1708 " stop-count: %d\n",
1704 atomic_read(&priv->queue_stop_count[cnt])); 1709 atomic_read(&trans_pcie->queue_stop_count[cnt]));
1705 } 1710 }
1706 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1711 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1707 kfree(buf); 1712 kfree(buf);