diff options
author | David S. Miller <davem@davemloft.net> | 2015-02-09 15:07:20 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-02-09 15:13:58 -0500 |
commit | c8ac18f2006b2926ce375c01646b2f487d1c33b2 (patch) | |
tree | 29e53fe6b19cf0cae4372353624a1dd8f0589824 /drivers/net/wireless/iwlwifi/pcie/tx.c | |
parent | 93c1af6ca94c1e763efba76a127b5c135e3d23a6 (diff) | |
parent | d53071143aa5a7cb37cf7db8101042e700b5413f (diff) |
Merge tag 'wireless-drivers-next-for-davem-2015-02-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next
Major changes:
iwlwifi:
* more work for new devices (4165 / 8260)
* cleanups / improvemnts in rate control
* fixes for TDLS
* major statistics work from Johannes - more to come
* improvements for the fw error dump infrastructure
* usual amount of small fixes here and there (scan, D0i3 etc...)
* add support for beamforming
* enable stuck queue detection for iwlmvm
* a few fixes for EBS scan
* fixes for various failure paths
* improvements for TDLS Offchannel
wil6210:
* performance tuning
* some AP features
brcm80211:
* rework some code in SDIO part of the brcmfmac driver related to
suspend/resume that were found doing stress testing
* in PCIe part scheduling of worker thread needed to be relaxed
* minor fixes and exposing firmware revision information to
user-space, ie. ethtool.
mwifiex:
* enhancements for change virtual interface handling
* remove coupling between netdev and FW supported interface
combination, now conversion from any type of supported interface
types to any other type is possible
* DFS support in AP mode
ath9k:
* fix calibration issues on some boards
* Wake-on-WLAN improvements
ath10k:
* add support for qca6174 hardware
* enable RX batching to reduce CPU load
Conflicts:
drivers/net/wireless/rtlwifi/pci.c
Conflict resolution is to get rid of the 'end' label and keep
the rest.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 58 |
1 files changed, 32 insertions, 26 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index d40cd4a67d6e..af0bce736358 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -147,7 +147,6 @@ static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, | |||
147 | static void iwl_pcie_txq_stuck_timer(unsigned long data) | 147 | static void iwl_pcie_txq_stuck_timer(unsigned long data) |
148 | { | 148 | { |
149 | struct iwl_txq *txq = (void *)data; | 149 | struct iwl_txq *txq = (void *)data; |
150 | struct iwl_queue *q = &txq->q; | ||
151 | struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; | 150 | struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; |
152 | struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); | 151 | struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); |
153 | u32 scd_sram_addr = trans_pcie->scd_base_addr + | 152 | u32 scd_sram_addr = trans_pcie->scd_base_addr + |
@@ -164,7 +163,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) | |||
164 | spin_unlock(&txq->lock); | 163 | spin_unlock(&txq->lock); |
165 | 164 | ||
166 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, | 165 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, |
167 | jiffies_to_msecs(trans_pcie->wd_timeout)); | 166 | jiffies_to_msecs(txq->wd_timeout)); |
168 | IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", | 167 | IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", |
169 | txq->q.read_ptr, txq->q.write_ptr); | 168 | txq->q.read_ptr, txq->q.write_ptr); |
170 | 169 | ||
@@ -198,11 +197,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) | |||
198 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); | 197 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); |
199 | } | 198 | } |
200 | 199 | ||
201 | for (i = q->read_ptr; i != q->write_ptr; | ||
202 | i = iwl_queue_inc_wrap(i)) | ||
203 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, | ||
204 | le32_to_cpu(txq->scratchbufs[i].scratch)); | ||
205 | |||
206 | iwl_force_nmi(trans); | 200 | iwl_force_nmi(trans); |
207 | } | 201 | } |
208 | 202 | ||
@@ -680,7 +674,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) | |||
680 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); | 674 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); |
681 | 675 | ||
682 | iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, | 676 | iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, |
683 | trans_pcie->cmd_fifo); | 677 | trans_pcie->cmd_fifo, |
678 | trans_pcie->cmd_q_wdg_timeout); | ||
684 | 679 | ||
685 | /* Activate all Tx DMA/FIFO channels */ | 680 | /* Activate all Tx DMA/FIFO channels */ |
686 | iwl_scd_activate_fifos(trans); | 681 | iwl_scd_activate_fifos(trans); |
@@ -722,7 +717,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) | |||
722 | iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, | 717 | iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, |
723 | trans_pcie->kw.dma >> 4); | 718 | trans_pcie->kw.dma >> 4); |
724 | 719 | ||
725 | iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr); | 720 | /* |
721 | * Send 0 as the scd_base_addr since the device may have be reset | ||
722 | * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will | ||
723 | * contain garbage. | ||
724 | */ | ||
725 | iwl_pcie_tx_start(trans, 0); | ||
726 | } | 726 | } |
727 | 727 | ||
728 | /* | 728 | /* |
@@ -898,6 +898,10 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) | |||
898 | } | 898 | } |
899 | } | 899 | } |
900 | 900 | ||
901 | if (trans->cfg->base_params->num_of_queues > 20) | ||
902 | iwl_set_bits_prph(trans, SCD_GP_CTRL, | ||
903 | SCD_GP_CTRL_ENABLE_31_QUEUES); | ||
904 | |||
901 | return 0; | 905 | return 0; |
902 | error: | 906 | error: |
903 | /*Upon error, free only if we allocated something */ | 907 | /*Upon error, free only if we allocated something */ |
@@ -906,10 +910,9 @@ error: | |||
906 | return ret; | 910 | return ret; |
907 | } | 911 | } |
908 | 912 | ||
909 | static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie, | 913 | static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) |
910 | struct iwl_txq *txq) | ||
911 | { | 914 | { |
912 | if (!trans_pcie->wd_timeout) | 915 | if (!txq->wd_timeout) |
913 | return; | 916 | return; |
914 | 917 | ||
915 | /* | 918 | /* |
@@ -919,7 +922,7 @@ static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie, | |||
919 | if (txq->q.read_ptr == txq->q.write_ptr) | 922 | if (txq->q.read_ptr == txq->q.write_ptr) |
920 | del_timer(&txq->stuck_timer); | 923 | del_timer(&txq->stuck_timer); |
921 | else | 924 | else |
922 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | 925 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); |
923 | } | 926 | } |
924 | 927 | ||
925 | /* Frees buffers until index _not_ inclusive */ | 928 | /* Frees buffers until index _not_ inclusive */ |
@@ -981,7 +984,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | |||
981 | iwl_pcie_txq_free_tfd(trans, txq); | 984 | iwl_pcie_txq_free_tfd(trans, txq); |
982 | } | 985 | } |
983 | 986 | ||
984 | iwl_pcie_txq_progress(trans_pcie, txq); | 987 | iwl_pcie_txq_progress(txq); |
985 | 988 | ||
986 | if (iwl_queue_space(&txq->q) > txq->q.low_mark) | 989 | if (iwl_queue_space(&txq->q) > txq->q.low_mark) |
987 | iwl_wake_queue(trans, txq); | 990 | iwl_wake_queue(trans, txq); |
@@ -1109,7 +1112,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) | |||
1109 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); | 1112 | spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); |
1110 | } | 1113 | } |
1111 | 1114 | ||
1112 | iwl_pcie_txq_progress(trans_pcie, txq); | 1115 | iwl_pcie_txq_progress(txq); |
1113 | } | 1116 | } |
1114 | 1117 | ||
1115 | static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | 1118 | static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, |
@@ -1142,14 +1145,18 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | |||
1142 | #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) | 1145 | #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) |
1143 | 1146 | ||
1144 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, | 1147 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, |
1145 | const struct iwl_trans_txq_scd_cfg *cfg) | 1148 | const struct iwl_trans_txq_scd_cfg *cfg, |
1149 | unsigned int wdg_timeout) | ||
1146 | { | 1150 | { |
1147 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1151 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1152 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
1148 | int fifo = -1; | 1153 | int fifo = -1; |
1149 | 1154 | ||
1150 | if (test_and_set_bit(txq_id, trans_pcie->queue_used)) | 1155 | if (test_and_set_bit(txq_id, trans_pcie->queue_used)) |
1151 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); | 1156 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); |
1152 | 1157 | ||
1158 | txq->wd_timeout = msecs_to_jiffies(wdg_timeout); | ||
1159 | |||
1153 | if (cfg) { | 1160 | if (cfg) { |
1154 | fifo = cfg->fifo; | 1161 | fifo = cfg->fifo; |
1155 | 1162 | ||
@@ -1173,7 +1180,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, | |||
1173 | 1180 | ||
1174 | /* enable aggregations for the queue */ | 1181 | /* enable aggregations for the queue */ |
1175 | iwl_scd_txq_enable_agg(trans, txq_id); | 1182 | iwl_scd_txq_enable_agg(trans, txq_id); |
1176 | trans_pcie->txq[txq_id].ampdu = true; | 1183 | txq->ampdu = true; |
1177 | } else { | 1184 | } else { |
1178 | /* | 1185 | /* |
1179 | * disable aggregations for the queue, this will also | 1186 | * disable aggregations for the queue, this will also |
@@ -1182,14 +1189,14 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, | |||
1182 | */ | 1189 | */ |
1183 | iwl_scd_txq_disable_agg(trans, txq_id); | 1190 | iwl_scd_txq_disable_agg(trans, txq_id); |
1184 | 1191 | ||
1185 | ssn = trans_pcie->txq[txq_id].q.read_ptr; | 1192 | ssn = txq->q.read_ptr; |
1186 | } | 1193 | } |
1187 | } | 1194 | } |
1188 | 1195 | ||
1189 | /* Place first TFD at index corresponding to start sequence number. | 1196 | /* Place first TFD at index corresponding to start sequence number. |
1190 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | 1197 | * Assumes that ssn_idx is valid (!= 0xFFF) */ |
1191 | trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); | 1198 | txq->q.read_ptr = (ssn & 0xff); |
1192 | trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); | 1199 | txq->q.write_ptr = (ssn & 0xff); |
1193 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, | 1200 | iwl_write_direct32(trans, HBUS_TARG_WRPTR, |
1194 | (ssn & 0xff) | (txq_id << 8)); | 1201 | (ssn & 0xff) | (txq_id << 8)); |
1195 | 1202 | ||
@@ -1230,7 +1237,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, | |||
1230 | txq_id, ssn & 0xff); | 1237 | txq_id, ssn & 0xff); |
1231 | } | 1238 | } |
1232 | 1239 | ||
1233 | trans_pcie->txq[txq_id].active = true; | 1240 | txq->active = true; |
1234 | } | 1241 | } |
1235 | 1242 | ||
1236 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, | 1243 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, |
@@ -1495,8 +1502,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1495 | trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); | 1502 | trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); |
1496 | 1503 | ||
1497 | /* start timer if queue currently empty */ | 1504 | /* start timer if queue currently empty */ |
1498 | if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) | 1505 | if (q->read_ptr == q->write_ptr && txq->wd_timeout) |
1499 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | 1506 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); |
1500 | 1507 | ||
1501 | spin_lock_irqsave(&trans_pcie->reg_lock, flags); | 1508 | spin_lock_irqsave(&trans_pcie->reg_lock, flags); |
1502 | ret = iwl_pcie_set_cmd_in_flight(trans, cmd); | 1509 | ret = iwl_pcie_set_cmd_in_flight(trans, cmd); |
@@ -1846,9 +1853,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1846 | 1853 | ||
1847 | /* start timer if queue currently empty */ | 1854 | /* start timer if queue currently empty */ |
1848 | if (q->read_ptr == q->write_ptr) { | 1855 | if (q->read_ptr == q->write_ptr) { |
1849 | if (txq->need_update && trans_pcie->wd_timeout) | 1856 | if (txq->wd_timeout) |
1850 | mod_timer(&txq->stuck_timer, | 1857 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); |
1851 | jiffies + trans_pcie->wd_timeout); | ||
1852 | IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); | 1858 | IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); |
1853 | iwl_trans_pcie_ref(trans); | 1859 | iwl_trans_pcie_ref(trans); |
1854 | } | 1860 | } |