diff options
author | John W. Linville <linville@tuxdriver.com> | 2012-11-21 14:38:49 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2012-11-21 14:38:49 -0500 |
commit | 1e60896fe07307baa5f3ca1a220dfa9792657352 (patch) | |
tree | b68ba91a72b1b421d37914e4201fc672f12d86ae /drivers/net | |
parent | ad66786718989c20c91e855baa40371e01daf0a1 (diff) | |
parent | eea54c8ec971d4759c541dba351477dafc39ce54 (diff) |
Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next
Conflicts:
drivers/net/wireless/iwlwifi/pcie/trans.c
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/wireless/iwlwifi/dvm/mac80211.c | 9 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/dvm/main.c | 2 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-config.h | 2 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-fh.h | 2 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/drv.c | 1 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/internal.h | 104 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 357 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/trans.c | 1018 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 1146 |
9 files changed, 1299 insertions, 1342 deletions
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index bf189f115413..c862c0906349 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c | |||
@@ -168,8 +168,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, | |||
168 | hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | | 168 | hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | |
169 | IEEE80211_HW_SUPPORTS_STATIC_SMPS; | 169 | IEEE80211_HW_SUPPORTS_STATIC_SMPS; |
170 | 170 | ||
171 | /* enable 11w if the uCode advertise */ | 171 | /* |
172 | if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP) | 172 | * Enable 11w if advertised by firmware and software crypto |
173 | * is not enabled (as the firmware will interpret some mgmt | ||
174 | * packets, so enabling it with software crypto isn't safe) | ||
175 | */ | ||
176 | if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && | ||
177 | !iwlwifi_mod_params.sw_crypto) | ||
173 | hw->flags |= IEEE80211_HW_MFP_CAPABLE; | 178 | hw->flags |= IEEE80211_HW_MFP_CAPABLE; |
174 | 179 | ||
175 | hw->sta_data_size = sizeof(struct iwl_station_priv); | 180 | hw->sta_data_size = sizeof(struct iwl_station_priv); |
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 37bb4575ad8d..e3a07c916812 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c | |||
@@ -1191,8 +1191,6 @@ static void iwl_option_config(struct iwl_priv *priv) | |||
1191 | 1191 | ||
1192 | static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) | 1192 | static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) |
1193 | { | 1193 | { |
1194 | priv->eeprom_data->sku = priv->eeprom_data->sku; | ||
1195 | |||
1196 | if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE && | 1194 | if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE && |
1197 | !priv->cfg->ht_params) { | 1195 | !priv->cfg->ht_params) { |
1198 | IWL_ERR(priv, "Invalid 11n configuration\n"); | 1196 | IWL_ERR(priv, "Invalid 11n configuration\n"); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 87f465a49df1..196266aa5a9d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h | |||
@@ -150,7 +150,7 @@ enum iwl_led_mode { | |||
150 | struct iwl_base_params { | 150 | struct iwl_base_params { |
151 | int eeprom_size; | 151 | int eeprom_size; |
152 | int num_of_queues; /* def: HW dependent */ | 152 | int num_of_queues; /* def: HW dependent */ |
153 | /* for iwl_apm_init() */ | 153 | /* for iwl_pcie_apm_init() */ |
154 | u32 pll_cfg_val; | 154 | u32 pll_cfg_val; |
155 | 155 | ||
156 | const u16 max_ll_items; | 156 | const u16 max_ll_items; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index 806046641747..ec48563d3c6a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h | |||
@@ -267,7 +267,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) | |||
267 | 267 | ||
268 | #define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) | 268 | #define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) |
269 | #define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) | 269 | #define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) |
270 | #define RX_RB_TIMEOUT (0x10) | 270 | #define RX_RB_TIMEOUT (0x11) |
271 | 271 | ||
272 | #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) | 272 | #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) |
273 | #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) | 273 | #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2a4675396707..956fe6c370bc 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -69,7 +69,6 @@ | |||
69 | 69 | ||
70 | #include "iwl-trans.h" | 70 | #include "iwl-trans.h" |
71 | #include "iwl-drv.h" | 71 | #include "iwl-drv.h" |
72 | #include "iwl-trans.h" | ||
73 | 72 | ||
74 | #include "cfg.h" | 73 | #include "cfg.h" |
75 | #include "internal.h" | 74 | #include "internal.h" |
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 1f065c630d43..d91d2e8c62f5 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
@@ -73,7 +73,7 @@ struct isr_statistics { | |||
73 | }; | 73 | }; |
74 | 74 | ||
75 | /** | 75 | /** |
76 | * struct iwl_rx_queue - Rx queue | 76 | * struct iwl_rxq - Rx queue |
77 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) | 77 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) |
78 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) | 78 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) |
79 | * @pool: | 79 | * @pool: |
@@ -91,7 +91,7 @@ struct isr_statistics { | |||
91 | * | 91 | * |
92 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers | 92 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers |
93 | */ | 93 | */ |
94 | struct iwl_rx_queue { | 94 | struct iwl_rxq { |
95 | __le32 *bd; | 95 | __le32 *bd; |
96 | dma_addr_t bd_dma; | 96 | dma_addr_t bd_dma; |
97 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | 97 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; |
@@ -157,8 +157,8 @@ struct iwl_cmd_meta { | |||
157 | * 32 since we don't need so many commands pending. Since the HW | 157 | * 32 since we don't need so many commands pending. Since the HW |
158 | * still uses 256 BDs for DMA though, n_bd stays 256. As a result, | 158 | * still uses 256 BDs for DMA though, n_bd stays 256. As a result, |
159 | * the software buffers (in the variables @meta, @txb in struct | 159 | * the software buffers (in the variables @meta, @txb in struct |
160 | * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds | 160 | * iwl_txq) only have 32 entries, while the HW buffers (@tfds in |
161 | * in the same struct) have 256. | 161 | * the same struct) have 256. |
162 | * This means that we end up with the following: | 162 | * This means that we end up with the following: |
163 | * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | | 163 | * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | |
164 | * SW entries: | 0 | ... | 31 | | 164 | * SW entries: | 0 | ... | 31 | |
@@ -182,7 +182,7 @@ struct iwl_queue { | |||
182 | #define TFD_TX_CMD_SLOTS 256 | 182 | #define TFD_TX_CMD_SLOTS 256 |
183 | #define TFD_CMD_SLOTS 32 | 183 | #define TFD_CMD_SLOTS 32 |
184 | 184 | ||
185 | struct iwl_pcie_tx_queue_entry { | 185 | struct iwl_pcie_txq_entry { |
186 | struct iwl_device_cmd *cmd; | 186 | struct iwl_device_cmd *cmd; |
187 | struct iwl_device_cmd *copy_cmd; | 187 | struct iwl_device_cmd *copy_cmd; |
188 | struct sk_buff *skb; | 188 | struct sk_buff *skb; |
@@ -192,7 +192,7 @@ struct iwl_pcie_tx_queue_entry { | |||
192 | }; | 192 | }; |
193 | 193 | ||
194 | /** | 194 | /** |
195 | * struct iwl_tx_queue - Tx Queue for DMA | 195 | * struct iwl_txq - Tx Queue for DMA |
196 | * @q: generic Rx/Tx queue descriptor | 196 | * @q: generic Rx/Tx queue descriptor |
197 | * @tfds: transmit frame descriptors (DMA memory) | 197 | * @tfds: transmit frame descriptors (DMA memory) |
198 | * @entries: transmit entries (driver state) | 198 | * @entries: transmit entries (driver state) |
@@ -205,10 +205,10 @@ struct iwl_pcie_tx_queue_entry { | |||
205 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame | 205 | * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame |
206 | * descriptors) and required locking structures. | 206 | * descriptors) and required locking structures. |
207 | */ | 207 | */ |
208 | struct iwl_tx_queue { | 208 | struct iwl_txq { |
209 | struct iwl_queue q; | 209 | struct iwl_queue q; |
210 | struct iwl_tfd *tfds; | 210 | struct iwl_tfd *tfds; |
211 | struct iwl_pcie_tx_queue_entry *entries; | 211 | struct iwl_pcie_txq_entry *entries; |
212 | spinlock_t lock; | 212 | spinlock_t lock; |
213 | struct timer_list stuck_timer; | 213 | struct timer_list stuck_timer; |
214 | struct iwl_trans_pcie *trans_pcie; | 214 | struct iwl_trans_pcie *trans_pcie; |
@@ -238,7 +238,7 @@ struct iwl_tx_queue { | |||
238 | * @wd_timeout: queue watchdog timeout (jiffies) | 238 | * @wd_timeout: queue watchdog timeout (jiffies) |
239 | */ | 239 | */ |
240 | struct iwl_trans_pcie { | 240 | struct iwl_trans_pcie { |
241 | struct iwl_rx_queue rxq; | 241 | struct iwl_rxq rxq; |
242 | struct work_struct rx_replenish; | 242 | struct work_struct rx_replenish; |
243 | struct iwl_trans *trans; | 243 | struct iwl_trans *trans; |
244 | struct iwl_drv *drv; | 244 | struct iwl_drv *drv; |
@@ -260,7 +260,7 @@ struct iwl_trans_pcie { | |||
260 | struct iwl_dma_ptr scd_bc_tbls; | 260 | struct iwl_dma_ptr scd_bc_tbls; |
261 | struct iwl_dma_ptr kw; | 261 | struct iwl_dma_ptr kw; |
262 | 262 | ||
263 | struct iwl_tx_queue *txq; | 263 | struct iwl_txq *txq; |
264 | unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; | 264 | unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; |
265 | unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; | 265 | unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; |
266 | 266 | ||
@@ -287,10 +287,16 @@ struct iwl_trans_pcie { | |||
287 | unsigned long wd_timeout; | 287 | unsigned long wd_timeout; |
288 | }; | 288 | }; |
289 | 289 | ||
290 | /***************************************************** | 290 | /** |
291 | * DRIVER STATUS FUNCTIONS | 291 | * enum iwl_pcie_status: status of the PCIe transport |
292 | ******************************************************/ | 292 | * @STATUS_HCMD_ACTIVE: a SYNC command is being processed |
293 | enum { | 293 | * @STATUS_DEVICE_ENABLED: APM is enabled |
294 | * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) | ||
295 | * @STATUS_INT_ENABLED: interrupts are enabled | ||
296 | * @STATUS_RFKILL: the HW RFkill switch is in KILL position | ||
297 | * @STATUS_FW_ERROR: the fw is in error state | ||
298 | */ | ||
299 | enum iwl_pcie_status { | ||
294 | STATUS_HCMD_ACTIVE, | 300 | STATUS_HCMD_ACTIVE, |
295 | STATUS_DEVICE_ENABLED, | 301 | STATUS_DEVICE_ENABLED, |
296 | STATUS_TPOWER_PMI, | 302 | STATUS_TPOWER_PMI, |
@@ -309,6 +315,10 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) | |||
309 | trans_specific); | 315 | trans_specific); |
310 | } | 316 | } |
311 | 317 | ||
318 | /* | ||
319 | * Convention: trans API functions: iwl_trans_pcie_XXX | ||
320 | * Other functions: iwl_pcie_XXX | ||
321 | */ | ||
312 | struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | 322 | struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, |
313 | const struct pci_device_id *ent, | 323 | const struct pci_device_id *ent, |
314 | const struct iwl_cfg *cfg); | 324 | const struct iwl_cfg *cfg); |
@@ -317,51 +327,43 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); | |||
317 | /***************************************************** | 327 | /***************************************************** |
318 | * RX | 328 | * RX |
319 | ******************************************************/ | 329 | ******************************************************/ |
320 | void iwl_bg_rx_replenish(struct work_struct *data); | 330 | int iwl_pcie_rx_init(struct iwl_trans *trans); |
321 | void iwl_irq_tasklet(struct iwl_trans *trans); | 331 | void iwl_pcie_tasklet(struct iwl_trans *trans); |
322 | void iwl_rx_replenish(struct iwl_trans *trans); | 332 | int iwl_pcie_rx_stop(struct iwl_trans *trans); |
323 | void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | 333 | void iwl_pcie_rx_free(struct iwl_trans *trans); |
324 | struct iwl_rx_queue *q); | ||
325 | 334 | ||
326 | /***************************************************** | 335 | /***************************************************** |
327 | * ICT | 336 | * ICT - interrupt handling |
328 | ******************************************************/ | 337 | ******************************************************/ |
329 | void iwl_reset_ict(struct iwl_trans *trans); | 338 | irqreturn_t iwl_pcie_isr_ict(int irq, void *data); |
330 | void iwl_disable_ict(struct iwl_trans *trans); | 339 | int iwl_pcie_alloc_ict(struct iwl_trans *trans); |
331 | int iwl_alloc_isr_ict(struct iwl_trans *trans); | 340 | void iwl_pcie_free_ict(struct iwl_trans *trans); |
332 | void iwl_free_isr_ict(struct iwl_trans *trans); | 341 | void iwl_pcie_reset_ict(struct iwl_trans *trans); |
333 | irqreturn_t iwl_isr_ict(int irq, void *data); | 342 | void iwl_pcie_disable_ict(struct iwl_trans *trans); |
334 | 343 | ||
335 | /***************************************************** | 344 | /***************************************************** |
336 | * TX / HCMD | 345 | * TX / HCMD |
337 | ******************************************************/ | 346 | ******************************************************/ |
338 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, | 347 | int iwl_pcie_tx_init(struct iwl_trans *trans); |
339 | struct iwl_tx_queue *txq); | 348 | void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); |
340 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | 349 | int iwl_pcie_tx_stop(struct iwl_trans *trans); |
341 | struct iwl_tx_queue *txq, | 350 | void iwl_pcie_tx_free(struct iwl_trans *trans); |
342 | dma_addr_t addr, u16 len, u8 reset); | ||
343 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id); | ||
344 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); | ||
345 | void iwl_tx_cmd_complete(struct iwl_trans *trans, | ||
346 | struct iwl_rx_cmd_buffer *rxb, int handler_status); | ||
347 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | ||
348 | struct iwl_tx_queue *txq, | ||
349 | u16 byte_cnt); | ||
350 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | 351 | void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, |
351 | int sta_id, int tid, int frame_limit, u16 ssn); | 352 | int sta_id, int tid, int frame_limit, u16 ssn); |
352 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); | 353 | void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); |
353 | void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | 354 | int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
354 | enum dma_data_direction dma_dir); | 355 | struct iwl_device_cmd *dev_cmd, int txq_id); |
355 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | 356 | void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); |
356 | struct sk_buff_head *skbs); | 357 | int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); |
357 | void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id); | 358 | void iwl_pcie_hcmd_complete(struct iwl_trans *trans, |
358 | int iwl_queue_space(const struct iwl_queue *q); | 359 | struct iwl_rx_cmd_buffer *rxb, int handler_status); |
359 | 360 | void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | |
361 | struct sk_buff_head *skbs); | ||
360 | /***************************************************** | 362 | /***************************************************** |
361 | * Error handling | 363 | * Error handling |
362 | ******************************************************/ | 364 | ******************************************************/ |
363 | int iwl_dump_fh(struct iwl_trans *trans, char **buf); | 365 | int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf); |
364 | void iwl_dump_csr(struct iwl_trans *trans); | 366 | void iwl_pcie_dump_csr(struct iwl_trans *trans); |
365 | 367 | ||
366 | /***************************************************** | 368 | /***************************************************** |
367 | * Helpers | 369 | * Helpers |
@@ -397,7 +399,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) | |||
397 | } | 399 | } |
398 | 400 | ||
399 | static inline void iwl_wake_queue(struct iwl_trans *trans, | 401 | static inline void iwl_wake_queue(struct iwl_trans *trans, |
400 | struct iwl_tx_queue *txq) | 402 | struct iwl_txq *txq) |
401 | { | 403 | { |
402 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 404 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
403 | 405 | ||
@@ -408,7 +410,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, | |||
408 | } | 410 | } |
409 | 411 | ||
410 | static inline void iwl_stop_queue(struct iwl_trans *trans, | 412 | static inline void iwl_stop_queue(struct iwl_trans *trans, |
411 | struct iwl_tx_queue *txq) | 413 | struct iwl_txq *txq) |
412 | { | 414 | { |
413 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 415 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
414 | 416 | ||
@@ -420,7 +422,7 @@ static inline void iwl_stop_queue(struct iwl_trans *trans, | |||
420 | txq->q.id); | 422 | txq->q.id); |
421 | } | 423 | } |
422 | 424 | ||
423 | static inline int iwl_queue_used(const struct iwl_queue *q, int i) | 425 | static inline bool iwl_queue_used(const struct iwl_queue *q, int i) |
424 | { | 426 | { |
425 | return q->write_ptr >= q->read_ptr ? | 427 | return q->write_ptr >= q->read_ptr ? |
426 | (i >= q->read_ptr && i < q->write_ptr) : | 428 | (i >= q->read_ptr && i < q->write_ptr) : |
@@ -432,8 +434,8 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) | |||
432 | return index & (q->n_window - 1); | 434 | return index & (q->n_window - 1); |
433 | } | 435 | } |
434 | 436 | ||
435 | static inline const char * | 437 | static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie, |
436 | trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd) | 438 | u8 cmd) |
437 | { | 439 | { |
438 | if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) | 440 | if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) |
439 | return "UNKNOWN"; | 441 | return "UNKNOWN"; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 323079769567..bb32510fdd62 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -76,7 +76,7 @@ | |||
76 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | 76 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When |
77 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | 77 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled |
78 | * to replenish the iwl->rxq->rx_free. | 78 | * to replenish the iwl->rxq->rx_free. |
79 | * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the | 79 | * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the |
80 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | 80 | * iwl->rxq is replenished and the READ INDEX is updated (updating the |
81 | * 'processed' and 'read' driver indexes as well) | 81 | * 'processed' and 'read' driver indexes as well) |
82 | * + A received packet is processed and handed to the kernel network stack, | 82 | * + A received packet is processed and handed to the kernel network stack, |
@@ -89,28 +89,28 @@ | |||
89 | * | 89 | * |
90 | * Driver sequence: | 90 | * Driver sequence: |
91 | * | 91 | * |
92 | * iwl_rx_queue_alloc() Allocates rx_free | 92 | * iwl_rxq_alloc() Allocates rx_free |
93 | * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls | 93 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls |
94 | * iwl_rx_queue_restock | 94 | * iwl_pcie_rxq_restock |
95 | * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx | 95 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx |
96 | * queue, updates firmware pointers, and updates | 96 | * queue, updates firmware pointers, and updates |
97 | * the WRITE index. If insufficient rx_free buffers | 97 | * the WRITE index. If insufficient rx_free buffers |
98 | * are available, schedules iwl_rx_replenish | 98 | * are available, schedules iwl_pcie_rx_replenish |
99 | * | 99 | * |
100 | * -- enable interrupts -- | 100 | * -- enable interrupts -- |
101 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | 101 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
102 | * READ INDEX, detaching the SKB from the pool. | 102 | * READ INDEX, detaching the SKB from the pool. |
103 | * Moves the packet buffer from queue to rx_used. | 103 | * Moves the packet buffer from queue to rx_used. |
104 | * Calls iwl_rx_queue_restock to refill any empty | 104 | * Calls iwl_pcie_rxq_restock to refill any empty |
105 | * slots. | 105 | * slots. |
106 | * ... | 106 | * ... |
107 | * | 107 | * |
108 | */ | 108 | */ |
109 | 109 | ||
110 | /** | 110 | /* |
111 | * iwl_rx_queue_space - Return number of free slots available in queue. | 111 | * iwl_rxq_space - Return number of free slots available in queue. |
112 | */ | 112 | */ |
113 | static int iwl_rx_queue_space(const struct iwl_rx_queue *q) | 113 | static int iwl_rxq_space(const struct iwl_rxq *q) |
114 | { | 114 | { |
115 | int s = q->read - q->write; | 115 | int s = q->read - q->write; |
116 | if (s <= 0) | 116 | if (s <= 0) |
@@ -122,11 +122,28 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q) | |||
122 | return s; | 122 | return s; |
123 | } | 123 | } |
124 | 124 | ||
125 | /** | 125 | /* |
126 | * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue | 126 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr |
127 | */ | ||
128 | static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) | ||
129 | { | ||
130 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * iwl_pcie_rx_stop - stops the Rx DMA | ||
135 | */ | ||
136 | int iwl_pcie_rx_stop(struct iwl_trans *trans) | ||
137 | { | ||
138 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
139 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | ||
140 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue | ||
127 | */ | 145 | */ |
128 | void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | 146 | static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) |
129 | struct iwl_rx_queue *q) | ||
130 | { | 147 | { |
131 | unsigned long flags; | 148 | unsigned long flags; |
132 | u32 reg; | 149 | u32 reg; |
@@ -176,16 +193,8 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | |||
176 | spin_unlock_irqrestore(&q->lock, flags); | 193 | spin_unlock_irqrestore(&q->lock, flags); |
177 | } | 194 | } |
178 | 195 | ||
179 | /** | 196 | /* |
180 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | 197 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool |
181 | */ | ||
182 | static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) | ||
183 | { | ||
184 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * iwl_rx_queue_restock - refill RX queue from pre-allocated pool | ||
189 | * | 198 | * |
190 | * If there are slots in the RX queue that need to be restocked, | 199 | * If there are slots in the RX queue that need to be restocked, |
191 | * and we have free pre-allocated buffers, fill the ranks as much | 200 | * and we have free pre-allocated buffers, fill the ranks as much |
@@ -195,10 +204,10 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |||
195 | * also updates the memory address in the firmware to reference the new | 204 | * also updates the memory address in the firmware to reference the new |
196 | * target buffer. | 205 | * target buffer. |
197 | */ | 206 | */ |
198 | static void iwl_rx_queue_restock(struct iwl_trans *trans) | 207 | static void iwl_pcie_rxq_restock(struct iwl_trans *trans) |
199 | { | 208 | { |
200 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 209 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
201 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 210 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
202 | struct iwl_rx_mem_buffer *rxb; | 211 | struct iwl_rx_mem_buffer *rxb; |
203 | unsigned long flags; | 212 | unsigned long flags; |
204 | 213 | ||
@@ -214,7 +223,7 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans) | |||
214 | return; | 223 | return; |
215 | 224 | ||
216 | spin_lock_irqsave(&rxq->lock, flags); | 225 | spin_lock_irqsave(&rxq->lock, flags); |
217 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | 226 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
218 | /* The overwritten rxb must be a used one */ | 227 | /* The overwritten rxb must be a used one */ |
219 | rxb = rxq->queue[rxq->write]; | 228 | rxb = rxq->queue[rxq->write]; |
220 | BUG_ON(rxb && rxb->page); | 229 | BUG_ON(rxb && rxb->page); |
@@ -225,7 +234,7 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans) | |||
225 | list_del(&rxb->list); | 234 | list_del(&rxb->list); |
226 | 235 | ||
227 | /* Point to Rx buffer via next RBD in circular buffer */ | 236 | /* Point to Rx buffer via next RBD in circular buffer */ |
228 | rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma); | 237 | rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); |
229 | rxq->queue[rxq->write] = rxb; | 238 | rxq->queue[rxq->write] = rxb; |
230 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | 239 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; |
231 | rxq->free_count--; | 240 | rxq->free_count--; |
@@ -242,23 +251,23 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans) | |||
242 | spin_lock_irqsave(&rxq->lock, flags); | 251 | spin_lock_irqsave(&rxq->lock, flags); |
243 | rxq->need_update = 1; | 252 | rxq->need_update = 1; |
244 | spin_unlock_irqrestore(&rxq->lock, flags); | 253 | spin_unlock_irqrestore(&rxq->lock, flags); |
245 | iwl_rx_queue_update_write_ptr(trans, rxq); | 254 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
246 | } | 255 | } |
247 | } | 256 | } |
248 | 257 | ||
249 | /* | 258 | /* |
250 | * iwl_rx_allocate - allocate a page for each used RBD | 259 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
251 | * | 260 | * |
252 | * A used RBD is an Rx buffer that has been given to the stack. To use it again | 261 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
253 | * a page must be allocated and the RBD must point to the page. This function | 262 | * a page must be allocated and the RBD must point to the page. This function |
254 | * doesn't change the HW pointer but handles the list of pages that is used by | 263 | * doesn't change the HW pointer but handles the list of pages that is used by |
255 | * iwl_rx_queue_restock. The latter function will update the HW to use the newly | 264 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
256 | * allocated buffers. | 265 | * allocated buffers. |
257 | */ | 266 | */ |
258 | static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) | 267 | static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) |
259 | { | 268 | { |
260 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 269 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
261 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 270 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
262 | struct iwl_rx_mem_buffer *rxb; | 271 | struct iwl_rx_mem_buffer *rxb; |
263 | struct page *page; | 272 | struct page *page; |
264 | unsigned long flags; | 273 | unsigned long flags; |
@@ -340,47 +349,227 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) | |||
340 | } | 349 | } |
341 | } | 350 | } |
342 | 351 | ||
352 | static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) | ||
353 | { | ||
354 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
355 | struct iwl_rxq *rxq = &trans_pcie->rxq; | ||
356 | int i; | ||
357 | |||
358 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
359 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
360 | /* In the reset function, these buffers may have been allocated | ||
361 | * to an SKB, so we need to unmap and free potential storage */ | ||
362 | if (rxq->pool[i].page != NULL) { | ||
363 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, | ||
364 | PAGE_SIZE << trans_pcie->rx_page_order, | ||
365 | DMA_FROM_DEVICE); | ||
366 | __free_pages(rxq->pool[i].page, | ||
367 | trans_pcie->rx_page_order); | ||
368 | rxq->pool[i].page = NULL; | ||
369 | } | ||
370 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
371 | } | ||
372 | } | ||
373 | |||
343 | /* | 374 | /* |
344 | * iwl_rx_replenish - Move all used buffers from rx_used to rx_free | 375 | * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free |
345 | * | 376 | * |
346 | * When moving to rx_free an page is allocated for the slot. | 377 | * When moving to rx_free an page is allocated for the slot. |
347 | * | 378 | * |
348 | * Also restock the Rx queue via iwl_rx_queue_restock. | 379 | * Also restock the Rx queue via iwl_pcie_rxq_restock. |
349 | * This is called as a scheduled work item (except for during initialization) | 380 | * This is called as a scheduled work item (except for during initialization) |
350 | */ | 381 | */ |
351 | void iwl_rx_replenish(struct iwl_trans *trans) | 382 | static void iwl_pcie_rx_replenish(struct iwl_trans *trans) |
352 | { | 383 | { |
353 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 384 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
354 | unsigned long flags; | 385 | unsigned long flags; |
355 | 386 | ||
356 | iwl_rx_allocate(trans, GFP_KERNEL); | 387 | iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); |
357 | 388 | ||
358 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | 389 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
359 | iwl_rx_queue_restock(trans); | 390 | iwl_pcie_rxq_restock(trans); |
360 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 391 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
361 | } | 392 | } |
362 | 393 | ||
363 | static void iwl_rx_replenish_now(struct iwl_trans *trans) | 394 | static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans) |
364 | { | 395 | { |
365 | iwl_rx_allocate(trans, GFP_ATOMIC); | 396 | iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); |
366 | 397 | ||
367 | iwl_rx_queue_restock(trans); | 398 | iwl_pcie_rxq_restock(trans); |
368 | } | 399 | } |
369 | 400 | ||
370 | void iwl_bg_rx_replenish(struct work_struct *data) | 401 | static void iwl_pcie_rx_replenish_work(struct work_struct *data) |
371 | { | 402 | { |
372 | struct iwl_trans_pcie *trans_pcie = | 403 | struct iwl_trans_pcie *trans_pcie = |
373 | container_of(data, struct iwl_trans_pcie, rx_replenish); | 404 | container_of(data, struct iwl_trans_pcie, rx_replenish); |
374 | 405 | ||
375 | iwl_rx_replenish(trans_pcie->trans); | 406 | iwl_pcie_rx_replenish(trans_pcie->trans); |
407 | } | ||
408 | |||
409 | static int iwl_pcie_rx_alloc(struct iwl_trans *trans) | ||
410 | { | ||
411 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
412 | struct iwl_rxq *rxq = &trans_pcie->rxq; | ||
413 | struct device *dev = trans->dev; | ||
414 | |||
415 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); | ||
416 | |||
417 | spin_lock_init(&rxq->lock); | ||
418 | |||
419 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | ||
420 | return -EINVAL; | ||
421 | |||
422 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | ||
423 | rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
424 | &rxq->bd_dma, GFP_KERNEL); | ||
425 | if (!rxq->bd) | ||
426 | goto err_bd; | ||
427 | |||
428 | /*Allocate the driver's pointer to receive buffer status */ | ||
429 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), | ||
430 | &rxq->rb_stts_dma, GFP_KERNEL); | ||
431 | if (!rxq->rb_stts) | ||
432 | goto err_rb_stts; | ||
433 | |||
434 | return 0; | ||
435 | |||
436 | err_rb_stts: | ||
437 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
438 | rxq->bd, rxq->bd_dma); | ||
439 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | ||
440 | rxq->bd = NULL; | ||
441 | err_bd: | ||
442 | return -ENOMEM; | ||
376 | } | 443 | } |
377 | 444 | ||
378 | static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | 445 | static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) |
446 | { | ||
447 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
448 | u32 rb_size; | ||
449 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
450 | |||
451 | if (trans_pcie->rx_buf_size_8k) | ||
452 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
453 | else | ||
454 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
455 | |||
456 | /* Stop Rx DMA */ | ||
457 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
458 | |||
459 | /* Reset driver's Rx queue write index */ | ||
460 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
461 | |||
462 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
463 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
464 | (u32)(rxq->bd_dma >> 8)); | ||
465 | |||
466 | /* Tell device where in DRAM to update its Rx status */ | ||
467 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
468 | rxq->rb_stts_dma >> 4); | ||
469 | |||
470 | /* Enable Rx DMA | ||
471 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
472 | * the credit mechanism in 5000 HW RX FIFO | ||
473 | * Direct rx interrupts to hosts | ||
474 | * Rx buffer size 4 or 8k | ||
475 | * RB timeout 0x10 | ||
476 | * 256 RBDs | ||
477 | */ | ||
478 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
479 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
480 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
481 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
482 | rb_size| | ||
483 | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
484 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
485 | |||
486 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
487 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
488 | } | ||
489 | |||
490 | int iwl_pcie_rx_init(struct iwl_trans *trans) | ||
491 | { | ||
492 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
493 | struct iwl_rxq *rxq = &trans_pcie->rxq; | ||
494 | |||
495 | int i, err; | ||
496 | unsigned long flags; | ||
497 | |||
498 | if (!rxq->bd) { | ||
499 | err = iwl_pcie_rx_alloc(trans); | ||
500 | if (err) | ||
501 | return err; | ||
502 | } | ||
503 | |||
504 | spin_lock_irqsave(&rxq->lock, flags); | ||
505 | INIT_LIST_HEAD(&rxq->rx_free); | ||
506 | INIT_LIST_HEAD(&rxq->rx_used); | ||
507 | |||
508 | INIT_WORK(&trans_pcie->rx_replenish, | ||
509 | iwl_pcie_rx_replenish_work); | ||
510 | |||
511 | iwl_pcie_rxq_free_rbs(trans); | ||
512 | |||
513 | for (i = 0; i < RX_QUEUE_SIZE; i++) | ||
514 | rxq->queue[i] = NULL; | ||
515 | |||
516 | /* Set us so that we have processed and used all buffers, but have | ||
517 | * not restocked the Rx queue with fresh buffers */ | ||
518 | rxq->read = rxq->write = 0; | ||
519 | rxq->write_actual = 0; | ||
520 | rxq->free_count = 0; | ||
521 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
522 | |||
523 | iwl_pcie_rx_replenish(trans); | ||
524 | |||
525 | iwl_pcie_rx_hw_init(trans, rxq); | ||
526 | |||
527 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
528 | rxq->need_update = 1; | ||
529 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); | ||
530 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
531 | |||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | void iwl_pcie_rx_free(struct iwl_trans *trans) | ||
536 | { | ||
537 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
538 | struct iwl_rxq *rxq = &trans_pcie->rxq; | ||
539 | unsigned long flags; | ||
540 | |||
541 | /*if rxq->bd is NULL, it means that nothing has been allocated, | ||
542 | * exit now */ | ||
543 | if (!rxq->bd) { | ||
544 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); | ||
545 | return; | ||
546 | } | ||
547 | |||
548 | spin_lock_irqsave(&rxq->lock, flags); | ||
549 | iwl_pcie_rxq_free_rbs(trans); | ||
550 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
551 | |||
552 | dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
553 | rxq->bd, rxq->bd_dma); | ||
554 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | ||
555 | rxq->bd = NULL; | ||
556 | |||
557 | if (rxq->rb_stts) | ||
558 | dma_free_coherent(trans->dev, | ||
559 | sizeof(struct iwl_rb_status), | ||
560 | rxq->rb_stts, rxq->rb_stts_dma); | ||
561 | else | ||
562 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); | ||
563 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); | ||
564 | rxq->rb_stts = NULL; | ||
565 | } | ||
566 | |||
567 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, | ||
379 | struct iwl_rx_mem_buffer *rxb) | 568 | struct iwl_rx_mem_buffer *rxb) |
380 | { | 569 | { |
381 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 570 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
382 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 571 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
383 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | 572 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
384 | unsigned long flags; | 573 | unsigned long flags; |
385 | bool page_stolen = false; | 574 | bool page_stolen = false; |
386 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; | 575 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
@@ -410,8 +599,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
410 | break; | 599 | break; |
411 | 600 | ||
412 | IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", | 601 | IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", |
413 | rxcb._offset, | 602 | rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd), |
414 | trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd), | ||
415 | pkt->hdr.cmd); | 603 | pkt->hdr.cmd); |
416 | 604 | ||
417 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | 605 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; |
@@ -443,7 +631,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
443 | cmd_index = get_cmd_index(&txq->q, index); | 631 | cmd_index = get_cmd_index(&txq->q, index); |
444 | 632 | ||
445 | if (reclaim) { | 633 | if (reclaim) { |
446 | struct iwl_pcie_tx_queue_entry *ent; | 634 | struct iwl_pcie_txq_entry *ent; |
447 | ent = &txq->entries[cmd_index]; | 635 | ent = &txq->entries[cmd_index]; |
448 | cmd = ent->copy_cmd; | 636 | cmd = ent->copy_cmd; |
449 | WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); | 637 | WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); |
@@ -473,7 +661,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
473 | * iwl_trans_send_cmd() | 661 | * iwl_trans_send_cmd() |
474 | * as we reclaim the driver command queue */ | 662 | * as we reclaim the driver command queue */ |
475 | if (!rxcb._page_stolen) | 663 | if (!rxcb._page_stolen) |
476 | iwl_tx_cmd_complete(trans, &rxcb, err); | 664 | iwl_pcie_hcmd_complete(trans, &rxcb, err); |
477 | else | 665 | else |
478 | IWL_WARN(trans, "Claim null rxb?\n"); | 666 | IWL_WARN(trans, "Claim null rxb?\n"); |
479 | } | 667 | } |
@@ -515,17 +703,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
515 | spin_unlock_irqrestore(&rxq->lock, flags); | 703 | spin_unlock_irqrestore(&rxq->lock, flags); |
516 | } | 704 | } |
517 | 705 | ||
518 | /** | 706 | /* |
519 | * iwl_rx_handle - Main entry function for receiving responses from uCode | 707 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw |
520 | * | ||
521 | * Uses the priv->rx_handlers callback function array to invoke | ||
522 | * the appropriate handlers, including command responses, | ||
523 | * frame-received notifications, and other notifications. | ||
524 | */ | 708 | */ |
525 | static void iwl_rx_handle(struct iwl_trans *trans) | 709 | static void iwl_pcie_rx_handle(struct iwl_trans *trans) |
526 | { | 710 | { |
527 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 711 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
528 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 712 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
529 | u32 r, i; | 713 | u32 r, i; |
530 | u8 fill_rx = 0; | 714 | u8 fill_rx = 0; |
531 | u32 count = 8; | 715 | u32 count = 8; |
@@ -556,7 +740,7 @@ static void iwl_rx_handle(struct iwl_trans *trans) | |||
556 | 740 | ||
557 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", | 741 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", |
558 | r, i, rxb); | 742 | r, i, rxb); |
559 | iwl_rx_handle_rxbuf(trans, rxb); | 743 | iwl_pcie_rx_handle_rb(trans, rxb); |
560 | 744 | ||
561 | i = (i + 1) & RX_QUEUE_MASK; | 745 | i = (i + 1) & RX_QUEUE_MASK; |
562 | /* If there are a lot of unused frames, | 746 | /* If there are a lot of unused frames, |
@@ -565,7 +749,7 @@ static void iwl_rx_handle(struct iwl_trans *trans) | |||
565 | count++; | 749 | count++; |
566 | if (count >= 8) { | 750 | if (count >= 8) { |
567 | rxq->read = i; | 751 | rxq->read = i; |
568 | iwl_rx_replenish_now(trans); | 752 | iwl_pcie_rx_replenish_now(trans); |
569 | count = 0; | 753 | count = 0; |
570 | } | 754 | } |
571 | } | 755 | } |
@@ -574,15 +758,15 @@ static void iwl_rx_handle(struct iwl_trans *trans) | |||
574 | /* Backtrack one entry */ | 758 | /* Backtrack one entry */ |
575 | rxq->read = i; | 759 | rxq->read = i; |
576 | if (fill_rx) | 760 | if (fill_rx) |
577 | iwl_rx_replenish_now(trans); | 761 | iwl_pcie_rx_replenish_now(trans); |
578 | else | 762 | else |
579 | iwl_rx_queue_restock(trans); | 763 | iwl_pcie_rxq_restock(trans); |
580 | } | 764 | } |
581 | 765 | ||
582 | /** | 766 | /* |
583 | * iwl_irq_handle_error - called for HW or SW error interrupt from card | 767 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card |
584 | */ | 768 | */ |
585 | static void iwl_irq_handle_error(struct iwl_trans *trans) | 769 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
586 | { | 770 | { |
587 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 771 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
588 | 772 | ||
@@ -598,8 +782,8 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) | |||
598 | return; | 782 | return; |
599 | } | 783 | } |
600 | 784 | ||
601 | iwl_dump_csr(trans); | 785 | iwl_pcie_dump_csr(trans); |
602 | iwl_dump_fh(trans, NULL); | 786 | iwl_pcie_dump_fh(trans, NULL); |
603 | 787 | ||
604 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); | 788 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); |
605 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 789 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
@@ -608,8 +792,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) | |||
608 | iwl_op_mode_nic_error(trans->op_mode); | 792 | iwl_op_mode_nic_error(trans->op_mode); |
609 | } | 793 | } |
610 | 794 | ||
611 | /* tasklet for iwlagn interrupt */ | 795 | void iwl_pcie_tasklet(struct iwl_trans *trans) |
612 | void iwl_irq_tasklet(struct iwl_trans *trans) | ||
613 | { | 796 | { |
614 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 797 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
615 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | 798 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; |
@@ -661,7 +844,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) | |||
661 | iwl_disable_interrupts(trans); | 844 | iwl_disable_interrupts(trans); |
662 | 845 | ||
663 | isr_stats->hw++; | 846 | isr_stats->hw++; |
664 | iwl_irq_handle_error(trans); | 847 | iwl_pcie_irq_handle_error(trans); |
665 | 848 | ||
666 | handled |= CSR_INT_BIT_HW_ERR; | 849 | handled |= CSR_INT_BIT_HW_ERR; |
667 | 850 | ||
@@ -724,17 +907,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans) | |||
724 | IWL_ERR(trans, "Microcode SW error detected. " | 907 | IWL_ERR(trans, "Microcode SW error detected. " |
725 | " Restarting 0x%X.\n", inta); | 908 | " Restarting 0x%X.\n", inta); |
726 | isr_stats->sw++; | 909 | isr_stats->sw++; |
727 | iwl_irq_handle_error(trans); | 910 | iwl_pcie_irq_handle_error(trans); |
728 | handled |= CSR_INT_BIT_SW_ERR; | 911 | handled |= CSR_INT_BIT_SW_ERR; |
729 | } | 912 | } |
730 | 913 | ||
731 | /* uCode wakes up after power-down sleep */ | 914 | /* uCode wakes up after power-down sleep */ |
732 | if (inta & CSR_INT_BIT_WAKEUP) { | 915 | if (inta & CSR_INT_BIT_WAKEUP) { |
733 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); | 916 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
734 | iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); | 917 | iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); |
735 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) | 918 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) |
736 | iwl_txq_update_write_ptr(trans, | 919 | iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]); |
737 | &trans_pcie->txq[i]); | ||
738 | 920 | ||
739 | isr_stats->wakeup++; | 921 | isr_stats->wakeup++; |
740 | 922 | ||
@@ -772,7 +954,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) | |||
772 | iwl_write8(trans, CSR_INT_PERIODIC_REG, | 954 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
773 | CSR_INT_PERIODIC_DIS); | 955 | CSR_INT_PERIODIC_DIS); |
774 | 956 | ||
775 | iwl_rx_handle(trans); | 957 | iwl_pcie_rx_handle(trans); |
776 | 958 | ||
777 | /* | 959 | /* |
778 | * Enable periodic interrupt in 8 msec only if we received | 960 | * Enable periodic interrupt in 8 msec only if we received |
@@ -830,7 +1012,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) | |||
830 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | 1012 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) |
831 | 1013 | ||
832 | /* Free dram table */ | 1014 | /* Free dram table */ |
833 | void iwl_free_isr_ict(struct iwl_trans *trans) | 1015 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
834 | { | 1016 | { |
835 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1017 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
836 | 1018 | ||
@@ -843,13 +1025,12 @@ void iwl_free_isr_ict(struct iwl_trans *trans) | |||
843 | } | 1025 | } |
844 | } | 1026 | } |
845 | 1027 | ||
846 | |||
847 | /* | 1028 | /* |
848 | * allocate dram shared table, it is an aligned memory | 1029 | * allocate dram shared table, it is an aligned memory |
849 | * block of ICT_SIZE. | 1030 | * block of ICT_SIZE. |
850 | * also reset all data related to ICT table interrupt. | 1031 | * also reset all data related to ICT table interrupt. |
851 | */ | 1032 | */ |
852 | int iwl_alloc_isr_ict(struct iwl_trans *trans) | 1033 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
853 | { | 1034 | { |
854 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1035 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
855 | 1036 | ||
@@ -862,7 +1043,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) | |||
862 | 1043 | ||
863 | /* just an API sanity check ... it is guaranteed to be aligned */ | 1044 | /* just an API sanity check ... it is guaranteed to be aligned */ |
864 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | 1045 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { |
865 | iwl_free_isr_ict(trans); | 1046 | iwl_pcie_free_ict(trans); |
866 | return -EINVAL; | 1047 | return -EINVAL; |
867 | } | 1048 | } |
868 | 1049 | ||
@@ -883,7 +1064,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) | |||
883 | /* Device is going up inform it about using ICT interrupt table, | 1064 | /* Device is going up inform it about using ICT interrupt table, |
884 | * also we need to tell the driver to start using ICT interrupt. | 1065 | * also we need to tell the driver to start using ICT interrupt. |
885 | */ | 1066 | */ |
886 | void iwl_reset_ict(struct iwl_trans *trans) | 1067 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
887 | { | 1068 | { |
888 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1069 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
889 | u32 val; | 1070 | u32 val; |
@@ -913,7 +1094,7 @@ void iwl_reset_ict(struct iwl_trans *trans) | |||
913 | } | 1094 | } |
914 | 1095 | ||
915 | /* Device is going down disable ict interrupt usage */ | 1096 | /* Device is going down disable ict interrupt usage */ |
916 | void iwl_disable_ict(struct iwl_trans *trans) | 1097 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
917 | { | 1098 | { |
918 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1099 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
919 | unsigned long flags; | 1100 | unsigned long flags; |
@@ -924,7 +1105,7 @@ void iwl_disable_ict(struct iwl_trans *trans) | |||
924 | } | 1105 | } |
925 | 1106 | ||
926 | /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ | 1107 | /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ |
927 | static irqreturn_t iwl_isr(int irq, void *data) | 1108 | static irqreturn_t iwl_pcie_isr(int irq, void *data) |
928 | { | 1109 | { |
929 | struct iwl_trans *trans = data; | 1110 | struct iwl_trans *trans = data; |
930 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1111 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -971,7 +1152,7 @@ static irqreturn_t iwl_isr(int irq, void *data) | |||
971 | #endif | 1152 | #endif |
972 | 1153 | ||
973 | trans_pcie->inta |= inta; | 1154 | trans_pcie->inta |= inta; |
974 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | 1155 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
975 | if (likely(inta)) | 1156 | if (likely(inta)) |
976 | tasklet_schedule(&trans_pcie->irq_tasklet); | 1157 | tasklet_schedule(&trans_pcie->irq_tasklet); |
977 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | 1158 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
@@ -996,7 +1177,7 @@ none: | |||
996 | * the interrupt we need to service, driver will set the entries back to 0 and | 1177 | * the interrupt we need to service, driver will set the entries back to 0 and |
997 | * set index. | 1178 | * set index. |
998 | */ | 1179 | */ |
999 | irqreturn_t iwl_isr_ict(int irq, void *data) | 1180 | irqreturn_t iwl_pcie_isr_ict(int irq, void *data) |
1000 | { | 1181 | { |
1001 | struct iwl_trans *trans = data; | 1182 | struct iwl_trans *trans = data; |
1002 | struct iwl_trans_pcie *trans_pcie; | 1183 | struct iwl_trans_pcie *trans_pcie; |
@@ -1016,14 +1197,13 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
1016 | * use legacy interrupt. | 1197 | * use legacy interrupt. |
1017 | */ | 1198 | */ |
1018 | if (unlikely(!trans_pcie->use_ict)) { | 1199 | if (unlikely(!trans_pcie->use_ict)) { |
1019 | irqreturn_t ret = iwl_isr(irq, data); | 1200 | irqreturn_t ret = iwl_pcie_isr(irq, data); |
1020 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 1201 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1021 | return ret; | 1202 | return ret; |
1022 | } | 1203 | } |
1023 | 1204 | ||
1024 | trace_iwlwifi_dev_irq(trans->dev); | 1205 | trace_iwlwifi_dev_irq(trans->dev); |
1025 | 1206 | ||
1026 | |||
1027 | /* Disable (but don't clear!) interrupts here to avoid | 1207 | /* Disable (but don't clear!) interrupts here to avoid |
1028 | * back-to-back ISRs and sporadic interrupts from our NIC. | 1208 | * back-to-back ISRs and sporadic interrupts from our NIC. |
1029 | * If we have something to service, the tasklet will re-enable ints. | 1209 | * If we have something to service, the tasklet will re-enable ints. |
@@ -1032,7 +1212,6 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
1032 | inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ | 1212 | inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ |
1033 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); | 1213 | iwl_write32(trans, CSR_INT_MASK, 0x00000000); |
1034 | 1214 | ||
1035 | |||
1036 | /* Ignore interrupt if there's nothing in NIC to service. | 1215 | /* Ignore interrupt if there's nothing in NIC to service. |
1037 | * This may be due to IRQ shared with another device, | 1216 | * This may be due to IRQ shared with another device, |
1038 | * or due to sporadic interrupts thrown from our NIC. */ | 1217 | * or due to sporadic interrupts thrown from our NIC. */ |
@@ -1081,7 +1260,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
1081 | inta &= trans_pcie->inta_mask; | 1260 | inta &= trans_pcie->inta_mask; |
1082 | trans_pcie->inta |= inta; | 1261 | trans_pcie->inta |= inta; |
1083 | 1262 | ||
1084 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | 1263 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
1085 | if (likely(inta)) | 1264 | if (likely(inta)) |
1086 | tasklet_schedule(&trans_pcie->irq_tasklet); | 1265 | tasklet_schedule(&trans_pcie->irq_tasklet); |
1087 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | 1266 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index f21bf661931d..f6c21e7edaf2 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -74,585 +74,8 @@ | |||
74 | #include "iwl-prph.h" | 74 | #include "iwl-prph.h" |
75 | #include "iwl-agn-hw.h" | 75 | #include "iwl-agn-hw.h" |
76 | #include "internal.h" | 76 | #include "internal.h" |
77 | /* FIXME: need to abstract out TX command (once we know what it looks like) */ | ||
78 | #include "dvm/commands.h" | ||
79 | 77 | ||
80 | #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ | 78 | static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans) |
81 | (((1<<trans->cfg->base_params->num_of_queues) - 1) &\ | ||
82 | (~(1<<(trans_pcie)->cmd_queue))) | ||
83 | |||
84 | static int iwl_trans_rx_alloc(struct iwl_trans *trans) | ||
85 | { | ||
86 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
87 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
88 | struct device *dev = trans->dev; | ||
89 | |||
90 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); | ||
91 | |||
92 | spin_lock_init(&rxq->lock); | ||
93 | |||
94 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | ||
95 | return -EINVAL; | ||
96 | |||
97 | /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ | ||
98 | rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
99 | &rxq->bd_dma, GFP_KERNEL); | ||
100 | if (!rxq->bd) | ||
101 | goto err_bd; | ||
102 | |||
103 | /*Allocate the driver's pointer to receive buffer status */ | ||
104 | rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), | ||
105 | &rxq->rb_stts_dma, GFP_KERNEL); | ||
106 | if (!rxq->rb_stts) | ||
107 | goto err_rb_stts; | ||
108 | |||
109 | return 0; | ||
110 | |||
111 | err_rb_stts: | ||
112 | dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
113 | rxq->bd, rxq->bd_dma); | ||
114 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | ||
115 | rxq->bd = NULL; | ||
116 | err_bd: | ||
117 | return -ENOMEM; | ||
118 | } | ||
119 | |||
120 | static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) | ||
121 | { | ||
122 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
123 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
124 | int i; | ||
125 | |||
126 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
127 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
128 | /* In the reset function, these buffers may have been allocated | ||
129 | * to an SKB, so we need to unmap and free potential storage */ | ||
130 | if (rxq->pool[i].page != NULL) { | ||
131 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, | ||
132 | PAGE_SIZE << trans_pcie->rx_page_order, | ||
133 | DMA_FROM_DEVICE); | ||
134 | __free_pages(rxq->pool[i].page, | ||
135 | trans_pcie->rx_page_order); | ||
136 | rxq->pool[i].page = NULL; | ||
137 | } | ||
138 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static void iwl_trans_rx_hw_init(struct iwl_trans *trans, | ||
143 | struct iwl_rx_queue *rxq) | ||
144 | { | ||
145 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
146 | u32 rb_size; | ||
147 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
148 | u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */ | ||
149 | |||
150 | if (trans_pcie->rx_buf_size_8k) | ||
151 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
152 | else | ||
153 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
154 | |||
155 | /* Stop Rx DMA */ | ||
156 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
157 | |||
158 | /* Reset driver's Rx queue write index */ | ||
159 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
160 | |||
161 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
162 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
163 | (u32)(rxq->bd_dma >> 8)); | ||
164 | |||
165 | /* Tell device where in DRAM to update its Rx status */ | ||
166 | iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
167 | rxq->rb_stts_dma >> 4); | ||
168 | |||
169 | /* Enable Rx DMA | ||
170 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
171 | * the credit mechanism in 5000 HW RX FIFO | ||
172 | * Direct rx interrupts to hosts | ||
173 | * Rx buffer size 4 or 8k | ||
174 | * RB timeout 0x10 | ||
175 | * 256 RBDs | ||
176 | */ | ||
177 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
178 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
179 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
180 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
181 | rb_size| | ||
182 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
183 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
184 | |||
185 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
186 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
187 | } | ||
188 | |||
189 | static int iwl_rx_init(struct iwl_trans *trans) | ||
190 | { | ||
191 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
192 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
193 | |||
194 | int i, err; | ||
195 | unsigned long flags; | ||
196 | |||
197 | if (!rxq->bd) { | ||
198 | err = iwl_trans_rx_alloc(trans); | ||
199 | if (err) | ||
200 | return err; | ||
201 | } | ||
202 | |||
203 | spin_lock_irqsave(&rxq->lock, flags); | ||
204 | INIT_LIST_HEAD(&rxq->rx_free); | ||
205 | INIT_LIST_HEAD(&rxq->rx_used); | ||
206 | |||
207 | iwl_trans_rxq_free_rx_bufs(trans); | ||
208 | |||
209 | for (i = 0; i < RX_QUEUE_SIZE; i++) | ||
210 | rxq->queue[i] = NULL; | ||
211 | |||
212 | /* Set us so that we have processed and used all buffers, but have | ||
213 | * not restocked the Rx queue with fresh buffers */ | ||
214 | rxq->read = rxq->write = 0; | ||
215 | rxq->write_actual = 0; | ||
216 | rxq->free_count = 0; | ||
217 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
218 | |||
219 | iwl_rx_replenish(trans); | ||
220 | |||
221 | iwl_trans_rx_hw_init(trans, rxq); | ||
222 | |||
223 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
224 | rxq->need_update = 1; | ||
225 | iwl_rx_queue_update_write_ptr(trans, rxq); | ||
226 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
227 | |||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) | ||
232 | { | ||
233 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
234 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | ||
235 | unsigned long flags; | ||
236 | |||
237 | /*if rxq->bd is NULL, it means that nothing has been allocated, | ||
238 | * exit now */ | ||
239 | if (!rxq->bd) { | ||
240 | IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | spin_lock_irqsave(&rxq->lock, flags); | ||
245 | iwl_trans_rxq_free_rx_bufs(trans); | ||
246 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
247 | |||
248 | dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, | ||
249 | rxq->bd, rxq->bd_dma); | ||
250 | memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); | ||
251 | rxq->bd = NULL; | ||
252 | |||
253 | if (rxq->rb_stts) | ||
254 | dma_free_coherent(trans->dev, | ||
255 | sizeof(struct iwl_rb_status), | ||
256 | rxq->rb_stts, rxq->rb_stts_dma); | ||
257 | else | ||
258 | IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); | ||
259 | memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); | ||
260 | rxq->rb_stts = NULL; | ||
261 | } | ||
262 | |||
263 | static int iwl_trans_rx_stop(struct iwl_trans *trans) | ||
264 | { | ||
265 | |||
266 | /* stop Rx DMA */ | ||
267 | iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
268 | return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, | ||
269 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | ||
270 | } | ||
271 | |||
272 | static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, | ||
273 | struct iwl_dma_ptr *ptr, size_t size) | ||
274 | { | ||
275 | if (WARN_ON(ptr->addr)) | ||
276 | return -EINVAL; | ||
277 | |||
278 | ptr->addr = dma_alloc_coherent(trans->dev, size, | ||
279 | &ptr->dma, GFP_KERNEL); | ||
280 | if (!ptr->addr) | ||
281 | return -ENOMEM; | ||
282 | ptr->size = size; | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static void iwlagn_free_dma_ptr(struct iwl_trans *trans, | ||
287 | struct iwl_dma_ptr *ptr) | ||
288 | { | ||
289 | if (unlikely(!ptr->addr)) | ||
290 | return; | ||
291 | |||
292 | dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); | ||
293 | memset(ptr, 0, sizeof(*ptr)); | ||
294 | } | ||
295 | |||
296 | static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) | ||
297 | { | ||
298 | struct iwl_tx_queue *txq = (void *)data; | ||
299 | struct iwl_queue *q = &txq->q; | ||
300 | struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; | ||
301 | struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); | ||
302 | u32 scd_sram_addr = trans_pcie->scd_base_addr + | ||
303 | SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); | ||
304 | u8 buf[16]; | ||
305 | int i; | ||
306 | |||
307 | spin_lock(&txq->lock); | ||
308 | /* check if triggered erroneously */ | ||
309 | if (txq->q.read_ptr == txq->q.write_ptr) { | ||
310 | spin_unlock(&txq->lock); | ||
311 | return; | ||
312 | } | ||
313 | spin_unlock(&txq->lock); | ||
314 | |||
315 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, | ||
316 | jiffies_to_msecs(trans_pcie->wd_timeout)); | ||
317 | IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", | ||
318 | txq->q.read_ptr, txq->q.write_ptr); | ||
319 | |||
320 | iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); | ||
321 | |||
322 | iwl_print_hex_error(trans, buf, sizeof(buf)); | ||
323 | |||
324 | for (i = 0; i < FH_TCSR_CHNL_NUM; i++) | ||
325 | IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, | ||
326 | iwl_read_direct32(trans, FH_TX_TRB_REG(i))); | ||
327 | |||
328 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { | ||
329 | u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); | ||
330 | u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; | ||
331 | bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); | ||
332 | u32 tbl_dw = | ||
333 | iwl_read_targ_mem(trans, | ||
334 | trans_pcie->scd_base_addr + | ||
335 | SCD_TRANS_TBL_OFFSET_QUEUE(i)); | ||
336 | |||
337 | if (i & 0x1) | ||
338 | tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; | ||
339 | else | ||
340 | tbl_dw = tbl_dw & 0x0000FFFF; | ||
341 | |||
342 | IWL_ERR(trans, | ||
343 | "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", | ||
344 | i, active ? "" : "in", fifo, tbl_dw, | ||
345 | iwl_read_prph(trans, | ||
346 | SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), | ||
347 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); | ||
348 | } | ||
349 | |||
350 | for (i = q->read_ptr; i != q->write_ptr; | ||
351 | i = iwl_queue_inc_wrap(i, q->n_bd)) { | ||
352 | struct iwl_tx_cmd *tx_cmd = | ||
353 | (struct iwl_tx_cmd *)txq->entries[i].cmd->payload; | ||
354 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, | ||
355 | get_unaligned_le32(&tx_cmd->scratch)); | ||
356 | } | ||
357 | |||
358 | iwl_op_mode_nic_error(trans->op_mode); | ||
359 | } | ||
360 | |||
361 | static int iwl_trans_txq_alloc(struct iwl_trans *trans, | ||
362 | struct iwl_tx_queue *txq, int slots_num, | ||
363 | u32 txq_id) | ||
364 | { | ||
365 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
366 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; | ||
367 | int i; | ||
368 | |||
369 | if (WARN_ON(txq->entries || txq->tfds)) | ||
370 | return -EINVAL; | ||
371 | |||
372 | setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer, | ||
373 | (unsigned long)txq); | ||
374 | txq->trans_pcie = trans_pcie; | ||
375 | |||
376 | txq->q.n_window = slots_num; | ||
377 | |||
378 | txq->entries = kcalloc(slots_num, | ||
379 | sizeof(struct iwl_pcie_tx_queue_entry), | ||
380 | GFP_KERNEL); | ||
381 | |||
382 | if (!txq->entries) | ||
383 | goto error; | ||
384 | |||
385 | if (txq_id == trans_pcie->cmd_queue) | ||
386 | for (i = 0; i < slots_num; i++) { | ||
387 | txq->entries[i].cmd = | ||
388 | kmalloc(sizeof(struct iwl_device_cmd), | ||
389 | GFP_KERNEL); | ||
390 | if (!txq->entries[i].cmd) | ||
391 | goto error; | ||
392 | } | ||
393 | |||
394 | /* Circular buffer of transmit frame descriptors (TFDs), | ||
395 | * shared with device */ | ||
396 | txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, | ||
397 | &txq->q.dma_addr, GFP_KERNEL); | ||
398 | if (!txq->tfds) { | ||
399 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); | ||
400 | goto error; | ||
401 | } | ||
402 | txq->q.id = txq_id; | ||
403 | |||
404 | return 0; | ||
405 | error: | ||
406 | if (txq->entries && txq_id == trans_pcie->cmd_queue) | ||
407 | for (i = 0; i < slots_num; i++) | ||
408 | kfree(txq->entries[i].cmd); | ||
409 | kfree(txq->entries); | ||
410 | txq->entries = NULL; | ||
411 | |||
412 | return -ENOMEM; | ||
413 | |||
414 | } | ||
415 | |||
416 | static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, | ||
417 | int slots_num, u32 txq_id) | ||
418 | { | ||
419 | int ret; | ||
420 | |||
421 | txq->need_update = 0; | ||
422 | |||
423 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | ||
424 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | ||
425 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | ||
426 | |||
427 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
428 | ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, | ||
429 | txq_id); | ||
430 | if (ret) | ||
431 | return ret; | ||
432 | |||
433 | spin_lock_init(&txq->lock); | ||
434 | |||
435 | /* | ||
436 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | ||
437 | * given Tx queue, and enable the DMA channel used for that queue. | ||
438 | * Circular buffer (TFD queue in DRAM) physical base address */ | ||
439 | iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), | ||
440 | txq->q.dma_addr >> 8); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | ||
447 | */ | ||
448 | void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | ||
449 | { | ||
450 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
451 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
452 | struct iwl_queue *q = &txq->q; | ||
453 | enum dma_data_direction dma_dir; | ||
454 | |||
455 | if (!q->n_bd) | ||
456 | return; | ||
457 | |||
458 | /* In the command queue, all the TBs are mapped as BIDI | ||
459 | * so unmap them as such. | ||
460 | */ | ||
461 | if (txq_id == trans_pcie->cmd_queue) | ||
462 | dma_dir = DMA_BIDIRECTIONAL; | ||
463 | else | ||
464 | dma_dir = DMA_TO_DEVICE; | ||
465 | |||
466 | spin_lock_bh(&txq->lock); | ||
467 | while (q->write_ptr != q->read_ptr) { | ||
468 | iwl_txq_free_tfd(trans, txq, dma_dir); | ||
469 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | ||
470 | } | ||
471 | spin_unlock_bh(&txq->lock); | ||
472 | } | ||
473 | |||
474 | /** | ||
475 | * iwl_tx_queue_free - Deallocate DMA queue. | ||
476 | * @txq: Transmit queue to deallocate. | ||
477 | * | ||
478 | * Empty queue by removing and destroying all BD's. | ||
479 | * Free all buffers. | ||
480 | * 0-fill, but do not free "txq" descriptor structure. | ||
481 | */ | ||
482 | static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) | ||
483 | { | ||
484 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
485 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
486 | struct device *dev = trans->dev; | ||
487 | int i; | ||
488 | |||
489 | if (WARN_ON(!txq)) | ||
490 | return; | ||
491 | |||
492 | iwl_tx_queue_unmap(trans, txq_id); | ||
493 | |||
494 | /* De-alloc array of command/tx buffers */ | ||
495 | if (txq_id == trans_pcie->cmd_queue) | ||
496 | for (i = 0; i < txq->q.n_window; i++) { | ||
497 | kfree(txq->entries[i].cmd); | ||
498 | kfree(txq->entries[i].copy_cmd); | ||
499 | kfree(txq->entries[i].free_buf); | ||
500 | } | ||
501 | |||
502 | /* De-alloc circular buffer of TFDs */ | ||
503 | if (txq->q.n_bd) { | ||
504 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * | ||
505 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | ||
506 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | ||
507 | } | ||
508 | |||
509 | kfree(txq->entries); | ||
510 | txq->entries = NULL; | ||
511 | |||
512 | del_timer_sync(&txq->stuck_timer); | ||
513 | |||
514 | /* 0-fill queue descriptor structure */ | ||
515 | memset(txq, 0, sizeof(*txq)); | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * iwl_trans_tx_free - Free TXQ Context | ||
520 | * | ||
521 | * Destroy all TX DMA queues and structures | ||
522 | */ | ||
523 | static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) | ||
524 | { | ||
525 | int txq_id; | ||
526 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
527 | |||
528 | /* Tx queues */ | ||
529 | if (trans_pcie->txq) { | ||
530 | for (txq_id = 0; | ||
531 | txq_id < trans->cfg->base_params->num_of_queues; txq_id++) | ||
532 | iwl_tx_queue_free(trans, txq_id); | ||
533 | } | ||
534 | |||
535 | kfree(trans_pcie->txq); | ||
536 | trans_pcie->txq = NULL; | ||
537 | |||
538 | iwlagn_free_dma_ptr(trans, &trans_pcie->kw); | ||
539 | |||
540 | iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); | ||
541 | } | ||
542 | |||
543 | /** | ||
544 | * iwl_trans_tx_alloc - allocate TX context | ||
545 | * Allocate all Tx DMA structures and initialize them | ||
546 | * | ||
547 | * @param priv | ||
548 | * @return error code | ||
549 | */ | ||
550 | static int iwl_trans_tx_alloc(struct iwl_trans *trans) | ||
551 | { | ||
552 | int ret; | ||
553 | int txq_id, slots_num; | ||
554 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
555 | |||
556 | u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * | ||
557 | sizeof(struct iwlagn_scd_bc_tbl); | ||
558 | |||
559 | /*It is not allowed to alloc twice, so warn when this happens. | ||
560 | * We cannot rely on the previous allocation, so free and fail */ | ||
561 | if (WARN_ON(trans_pcie->txq)) { | ||
562 | ret = -EINVAL; | ||
563 | goto error; | ||
564 | } | ||
565 | |||
566 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, | ||
567 | scd_bc_tbls_size); | ||
568 | if (ret) { | ||
569 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); | ||
570 | goto error; | ||
571 | } | ||
572 | |||
573 | /* Alloc keep-warm buffer */ | ||
574 | ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); | ||
575 | if (ret) { | ||
576 | IWL_ERR(trans, "Keep Warm allocation failed\n"); | ||
577 | goto error; | ||
578 | } | ||
579 | |||
580 | trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, | ||
581 | sizeof(struct iwl_tx_queue), GFP_KERNEL); | ||
582 | if (!trans_pcie->txq) { | ||
583 | IWL_ERR(trans, "Not enough memory for txq\n"); | ||
584 | ret = ENOMEM; | ||
585 | goto error; | ||
586 | } | ||
587 | |||
588 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
589 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
590 | txq_id++) { | ||
591 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
592 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
593 | ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], | ||
594 | slots_num, txq_id); | ||
595 | if (ret) { | ||
596 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); | ||
597 | goto error; | ||
598 | } | ||
599 | } | ||
600 | |||
601 | return 0; | ||
602 | |||
603 | error: | ||
604 | iwl_trans_pcie_tx_free(trans); | ||
605 | |||
606 | return ret; | ||
607 | } | ||
608 | static int iwl_tx_init(struct iwl_trans *trans) | ||
609 | { | ||
610 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
611 | int ret; | ||
612 | int txq_id, slots_num; | ||
613 | unsigned long flags; | ||
614 | bool alloc = false; | ||
615 | |||
616 | if (!trans_pcie->txq) { | ||
617 | ret = iwl_trans_tx_alloc(trans); | ||
618 | if (ret) | ||
619 | goto error; | ||
620 | alloc = true; | ||
621 | } | ||
622 | |||
623 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
624 | |||
625 | /* Turn off all Tx DMA fifos */ | ||
626 | iwl_write_prph(trans, SCD_TXFACT, 0); | ||
627 | |||
628 | /* Tell NIC where to find the "keep warm" buffer */ | ||
629 | iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, | ||
630 | trans_pcie->kw.dma >> 4); | ||
631 | |||
632 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
633 | |||
634 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
635 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
636 | txq_id++) { | ||
637 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
638 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
639 | ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], | ||
640 | slots_num, txq_id); | ||
641 | if (ret) { | ||
642 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); | ||
643 | goto error; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | return 0; | ||
648 | error: | ||
649 | /*Upon error, free only if we allocated something */ | ||
650 | if (alloc) | ||
651 | iwl_trans_pcie_tx_free(trans); | ||
652 | return ret; | ||
653 | } | ||
654 | |||
655 | static void iwl_set_pwr_vmain(struct iwl_trans *trans) | ||
656 | { | 79 | { |
657 | /* | 80 | /* |
658 | * (for documentation purposes) | 81 | * (for documentation purposes) |
@@ -674,18 +97,11 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans) | |||
674 | #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 | 97 | #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 |
675 | #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 | 98 | #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 |
676 | 99 | ||
677 | static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) | 100 | static void iwl_pcie_apm_config(struct iwl_trans *trans) |
678 | { | 101 | { |
679 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 102 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
680 | u16 pci_lnk_ctl; | 103 | u16 lctl; |
681 | 104 | ||
682 | pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, | ||
683 | &pci_lnk_ctl); | ||
684 | return pci_lnk_ctl; | ||
685 | } | ||
686 | |||
687 | static void iwl_apm_config(struct iwl_trans *trans) | ||
688 | { | ||
689 | /* | 105 | /* |
690 | * HW bug W/A for instability in PCIe bus L0S->L1 transition. | 106 | * HW bug W/A for instability in PCIe bus L0S->L1 transition. |
691 | * Check if BIOS (or OS) enabled L1-ASPM on this device. | 107 | * Check if BIOS (or OS) enabled L1-ASPM on this device. |
@@ -694,7 +110,7 @@ static void iwl_apm_config(struct iwl_trans *trans) | |||
694 | * If not (unlikely), enable L0S, so there is at least some | 110 | * If not (unlikely), enable L0S, so there is at least some |
695 | * power savings, even without L1. | 111 | * power savings, even without L1. |
696 | */ | 112 | */ |
697 | u16 lctl = iwl_pciexp_link_ctrl(trans); | 113 | pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); |
698 | 114 | ||
699 | if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == | 115 | if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == |
700 | PCI_CFG_LINK_CTRL_VAL_L1_EN) { | 116 | PCI_CFG_LINK_CTRL_VAL_L1_EN) { |
@@ -711,10 +127,10 @@ static void iwl_apm_config(struct iwl_trans *trans) | |||
711 | 127 | ||
712 | /* | 128 | /* |
713 | * Start up NIC's basic functionality after it has been reset | 129 | * Start up NIC's basic functionality after it has been reset |
714 | * (e.g. after platform boot, or shutdown via iwl_apm_stop()) | 130 | * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) |
715 | * NOTE: This does not load uCode nor start the embedded processor | 131 | * NOTE: This does not load uCode nor start the embedded processor |
716 | */ | 132 | */ |
717 | static int iwl_apm_init(struct iwl_trans *trans) | 133 | static int iwl_pcie_apm_init(struct iwl_trans *trans) |
718 | { | 134 | { |
719 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 135 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
720 | int ret = 0; | 136 | int ret = 0; |
@@ -746,7 +162,7 @@ static int iwl_apm_init(struct iwl_trans *trans) | |||
746 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, | 162 | iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, |
747 | CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); | 163 | CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); |
748 | 164 | ||
749 | iwl_apm_config(trans); | 165 | iwl_pcie_apm_config(trans); |
750 | 166 | ||
751 | /* Configure analog phase-lock-loop before activating to D0A */ | 167 | /* Configure analog phase-lock-loop before activating to D0A */ |
752 | if (trans->cfg->base_params->pll_cfg_val) | 168 | if (trans->cfg->base_params->pll_cfg_val) |
@@ -792,7 +208,7 @@ out: | |||
792 | return ret; | 208 | return ret; |
793 | } | 209 | } |
794 | 210 | ||
795 | static int iwl_apm_stop_master(struct iwl_trans *trans) | 211 | static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) |
796 | { | 212 | { |
797 | int ret = 0; | 213 | int ret = 0; |
798 | 214 | ||
@@ -810,7 +226,7 @@ static int iwl_apm_stop_master(struct iwl_trans *trans) | |||
810 | return ret; | 226 | return ret; |
811 | } | 227 | } |
812 | 228 | ||
813 | static void iwl_apm_stop(struct iwl_trans *trans) | 229 | static void iwl_pcie_apm_stop(struct iwl_trans *trans) |
814 | { | 230 | { |
815 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 231 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
816 | IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); | 232 | IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); |
@@ -818,7 +234,7 @@ static void iwl_apm_stop(struct iwl_trans *trans) | |||
818 | clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); | 234 | clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); |
819 | 235 | ||
820 | /* Stop device's DMA activity */ | 236 | /* Stop device's DMA activity */ |
821 | iwl_apm_stop_master(trans); | 237 | iwl_pcie_apm_stop_master(trans); |
822 | 238 | ||
823 | /* Reset the entire device */ | 239 | /* Reset the entire device */ |
824 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | 240 | iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); |
@@ -833,29 +249,29 @@ static void iwl_apm_stop(struct iwl_trans *trans) | |||
833 | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); | 249 | CSR_GP_CNTRL_REG_FLAG_INIT_DONE); |
834 | } | 250 | } |
835 | 251 | ||
836 | static int iwl_nic_init(struct iwl_trans *trans) | 252 | static int iwl_pcie_nic_init(struct iwl_trans *trans) |
837 | { | 253 | { |
838 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 254 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
839 | unsigned long flags; | 255 | unsigned long flags; |
840 | 256 | ||
841 | /* nic_init */ | 257 | /* nic_init */ |
842 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | 258 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
843 | iwl_apm_init(trans); | 259 | iwl_pcie_apm_init(trans); |
844 | 260 | ||
845 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ | 261 | /* Set interrupt coalescing calibration timer to default (512 usecs) */ |
846 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); | 262 | iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); |
847 | 263 | ||
848 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 264 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
849 | 265 | ||
850 | iwl_set_pwr_vmain(trans); | 266 | iwl_pcie_set_pwr_vmain(trans); |
851 | 267 | ||
852 | iwl_op_mode_nic_config(trans->op_mode); | 268 | iwl_op_mode_nic_config(trans->op_mode); |
853 | 269 | ||
854 | /* Allocate the RX queue, or reset if it is already allocated */ | 270 | /* Allocate the RX queue, or reset if it is already allocated */ |
855 | iwl_rx_init(trans); | 271 | iwl_pcie_rx_init(trans); |
856 | 272 | ||
857 | /* Allocate or reset and init all Tx and Command queues */ | 273 | /* Allocate or reset and init all Tx and Command queues */ |
858 | if (iwl_tx_init(trans)) | 274 | if (iwl_pcie_tx_init(trans)) |
859 | return -ENOMEM; | 275 | return -ENOMEM; |
860 | 276 | ||
861 | if (trans->cfg->base_params->shadow_reg_enable) { | 277 | if (trans->cfg->base_params->shadow_reg_enable) { |
@@ -870,7 +286,7 @@ static int iwl_nic_init(struct iwl_trans *trans) | |||
870 | #define HW_READY_TIMEOUT (50) | 286 | #define HW_READY_TIMEOUT (50) |
871 | 287 | ||
872 | /* Note: returns poll_bit return value, which is >= 0 if success */ | 288 | /* Note: returns poll_bit return value, which is >= 0 if success */ |
873 | static int iwl_set_hw_ready(struct iwl_trans *trans) | 289 | static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) |
874 | { | 290 | { |
875 | int ret; | 291 | int ret; |
876 | 292 | ||
@@ -888,14 +304,14 @@ static int iwl_set_hw_ready(struct iwl_trans *trans) | |||
888 | } | 304 | } |
889 | 305 | ||
890 | /* Note: returns standard 0/-ERROR code */ | 306 | /* Note: returns standard 0/-ERROR code */ |
891 | static int iwl_prepare_card_hw(struct iwl_trans *trans) | 307 | static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) |
892 | { | 308 | { |
893 | int ret; | 309 | int ret; |
894 | int t = 0; | 310 | int t = 0; |
895 | 311 | ||
896 | IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); | 312 | IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); |
897 | 313 | ||
898 | ret = iwl_set_hw_ready(trans); | 314 | ret = iwl_pcie_set_hw_ready(trans); |
899 | /* If the card is ready, exit 0 */ | 315 | /* If the card is ready, exit 0 */ |
900 | if (ret >= 0) | 316 | if (ret >= 0) |
901 | return 0; | 317 | return 0; |
@@ -905,7 +321,7 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans) | |||
905 | CSR_HW_IF_CONFIG_REG_PREPARE); | 321 | CSR_HW_IF_CONFIG_REG_PREPARE); |
906 | 322 | ||
907 | do { | 323 | do { |
908 | ret = iwl_set_hw_ready(trans); | 324 | ret = iwl_pcie_set_hw_ready(trans); |
909 | if (ret >= 0) | 325 | if (ret >= 0) |
910 | return 0; | 326 | return 0; |
911 | 327 | ||
@@ -919,7 +335,7 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans) | |||
919 | /* | 335 | /* |
920 | * ucode | 336 | * ucode |
921 | */ | 337 | */ |
922 | static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, | 338 | static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, |
923 | dma_addr_t phy_addr, u32 byte_cnt) | 339 | dma_addr_t phy_addr, u32 byte_cnt) |
924 | { | 340 | { |
925 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 341 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -966,7 +382,7 @@ static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, | |||
966 | return 0; | 382 | return 0; |
967 | } | 383 | } |
968 | 384 | ||
969 | static int iwl_load_section(struct iwl_trans *trans, u8 section_num, | 385 | static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, |
970 | const struct fw_desc *section) | 386 | const struct fw_desc *section) |
971 | { | 387 | { |
972 | u8 *v_addr; | 388 | u8 *v_addr; |
@@ -987,8 +403,9 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num, | |||
987 | copy_size = min_t(u32, PAGE_SIZE, section->len - offset); | 403 | copy_size = min_t(u32, PAGE_SIZE, section->len - offset); |
988 | 404 | ||
989 | memcpy(v_addr, (u8 *)section->data + offset, copy_size); | 405 | memcpy(v_addr, (u8 *)section->data + offset, copy_size); |
990 | ret = iwl_load_firmware_chunk(trans, section->offset + offset, | 406 | ret = iwl_pcie_load_firmware_chunk(trans, |
991 | p_addr, copy_size); | 407 | section->offset + offset, |
408 | p_addr, copy_size); | ||
992 | if (ret) { | 409 | if (ret) { |
993 | IWL_ERR(trans, | 410 | IWL_ERR(trans, |
994 | "Could not load the [%d] uCode section\n", | 411 | "Could not load the [%d] uCode section\n", |
@@ -1001,7 +418,7 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num, | |||
1001 | return ret; | 418 | return ret; |
1002 | } | 419 | } |
1003 | 420 | ||
1004 | static int iwl_load_given_ucode(struct iwl_trans *trans, | 421 | static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, |
1005 | const struct fw_img *image) | 422 | const struct fw_img *image) |
1006 | { | 423 | { |
1007 | int i, ret = 0; | 424 | int i, ret = 0; |
@@ -1010,7 +427,7 @@ static int iwl_load_given_ucode(struct iwl_trans *trans, | |||
1010 | if (!image->sec[i].data) | 427 | if (!image->sec[i].data) |
1011 | break; | 428 | break; |
1012 | 429 | ||
1013 | ret = iwl_load_section(trans, i, &image->sec[i]); | 430 | ret = iwl_pcie_load_section(trans, i, &image->sec[i]); |
1014 | if (ret) | 431 | if (ret) |
1015 | return ret; | 432 | return ret; |
1016 | } | 433 | } |
@@ -1029,7 +446,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, | |||
1029 | bool hw_rfkill; | 446 | bool hw_rfkill; |
1030 | 447 | ||
1031 | /* This may fail if AMT took ownership of the device */ | 448 | /* This may fail if AMT took ownership of the device */ |
1032 | if (iwl_prepare_card_hw(trans)) { | 449 | if (iwl_pcie_prepare_card_hw(trans)) { |
1033 | IWL_WARN(trans, "Exit HW not ready\n"); | 450 | IWL_WARN(trans, "Exit HW not ready\n"); |
1034 | return -EIO; | 451 | return -EIO; |
1035 | } | 452 | } |
@@ -1046,7 +463,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, | |||
1046 | 463 | ||
1047 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); | 464 | iwl_write32(trans, CSR_INT, 0xFFFFFFFF); |
1048 | 465 | ||
1049 | ret = iwl_nic_init(trans); | 466 | ret = iwl_pcie_nic_init(trans); |
1050 | if (ret) { | 467 | if (ret) { |
1051 | IWL_ERR(trans, "Unable to init nic\n"); | 468 | IWL_ERR(trans, "Unable to init nic\n"); |
1052 | return ret; | 469 | return ret; |
@@ -1066,129 +483,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, | |||
1066 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | 483 | iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); |
1067 | 484 | ||
1068 | /* Load the given image to the HW */ | 485 | /* Load the given image to the HW */ |
1069 | return iwl_load_given_ucode(trans, fw); | 486 | return iwl_pcie_load_given_ucode(trans, fw); |
1070 | } | ||
1071 | |||
1072 | /* | ||
1073 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | ||
1074 | */ | ||
1075 | static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) | ||
1076 | { | ||
1077 | struct iwl_trans_pcie __maybe_unused *trans_pcie = | ||
1078 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1079 | |||
1080 | iwl_write_prph(trans, SCD_TXFACT, mask); | ||
1081 | } | ||
1082 | |||
1083 | static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr) | ||
1084 | { | ||
1085 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1086 | u32 a; | ||
1087 | int chan; | ||
1088 | u32 reg_val; | ||
1089 | |||
1090 | /* make sure all queue are not stopped/used */ | ||
1091 | memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); | ||
1092 | memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); | ||
1093 | |||
1094 | trans_pcie->scd_base_addr = | ||
1095 | iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); | ||
1096 | |||
1097 | WARN_ON(scd_base_addr != 0 && | ||
1098 | scd_base_addr != trans_pcie->scd_base_addr); | ||
1099 | |||
1100 | a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; | ||
1101 | /* reset conext data memory */ | ||
1102 | for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; | ||
1103 | a += 4) | ||
1104 | iwl_write_targ_mem(trans, a, 0); | ||
1105 | /* reset tx status memory */ | ||
1106 | for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; | ||
1107 | a += 4) | ||
1108 | iwl_write_targ_mem(trans, a, 0); | ||
1109 | for (; a < trans_pcie->scd_base_addr + | ||
1110 | SCD_TRANS_TBL_OFFSET_QUEUE( | ||
1111 | trans->cfg->base_params->num_of_queues); | ||
1112 | a += 4) | ||
1113 | iwl_write_targ_mem(trans, a, 0); | ||
1114 | |||
1115 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, | ||
1116 | trans_pcie->scd_bc_tbls.dma >> 10); | ||
1117 | |||
1118 | /* The chain extension of the SCD doesn't work well. This feature is | ||
1119 | * enabled by default by the HW, so we need to disable it manually. | ||
1120 | */ | ||
1121 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); | ||
1122 | |||
1123 | iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, | ||
1124 | trans_pcie->cmd_fifo); | ||
1125 | |||
1126 | /* Activate all Tx DMA/FIFO channels */ | ||
1127 | iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); | ||
1128 | |||
1129 | /* Enable DMA channel */ | ||
1130 | for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) | ||
1131 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | ||
1132 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
1133 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | ||
1134 | |||
1135 | /* Update FH chicken bits */ | ||
1136 | reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); | ||
1137 | iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, | ||
1138 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | ||
1139 | |||
1140 | /* Enable L1-Active */ | ||
1141 | iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, | ||
1142 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | ||
1143 | } | 487 | } |
1144 | 488 | ||
1145 | static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) | 489 | static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) |
1146 | { | 490 | { |
1147 | iwl_reset_ict(trans); | 491 | iwl_pcie_reset_ict(trans); |
1148 | iwl_tx_start(trans, scd_addr); | 492 | iwl_pcie_tx_start(trans, scd_addr); |
1149 | } | ||
1150 | |||
1151 | /** | ||
1152 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | ||
1153 | */ | ||
1154 | static int iwl_trans_tx_stop(struct iwl_trans *trans) | ||
1155 | { | ||
1156 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1157 | int ch, txq_id, ret; | ||
1158 | unsigned long flags; | ||
1159 | |||
1160 | /* Turn off all Tx DMA fifos */ | ||
1161 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
1162 | |||
1163 | iwl_trans_txq_set_sched(trans, 0); | ||
1164 | |||
1165 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
1166 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { | ||
1167 | iwl_write_direct32(trans, | ||
1168 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
1169 | ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, | ||
1170 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); | ||
1171 | if (ret < 0) | ||
1172 | IWL_ERR(trans, | ||
1173 | "Failing on timeout while stopping DMA channel %d [0x%08x]\n", | ||
1174 | ch, | ||
1175 | iwl_read_direct32(trans, | ||
1176 | FH_TSSR_TX_STATUS_REG)); | ||
1177 | } | ||
1178 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
1179 | |||
1180 | if (!trans_pcie->txq) { | ||
1181 | IWL_WARN(trans, | ||
1182 | "Stopping tx queues that aren't allocated...\n"); | ||
1183 | return 0; | ||
1184 | } | ||
1185 | |||
1186 | /* Unmap DMA from host system and free skb's */ | ||
1187 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
1188 | txq_id++) | ||
1189 | iwl_tx_queue_unmap(trans, txq_id); | ||
1190 | |||
1191 | return 0; | ||
1192 | } | 493 | } |
1193 | 494 | ||
1194 | static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | 495 | static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) |
@@ -1202,7 +503,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
1202 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 503 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1203 | 504 | ||
1204 | /* device going down, Stop using ICT table */ | 505 | /* device going down, Stop using ICT table */ |
1205 | iwl_disable_ict(trans); | 506 | iwl_pcie_disable_ict(trans); |
1206 | 507 | ||
1207 | /* | 508 | /* |
1208 | * If a HW restart happens during firmware loading, | 509 | * If a HW restart happens during firmware loading, |
@@ -1212,8 +513,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
1212 | * already dead. | 513 | * already dead. |
1213 | */ | 514 | */ |
1214 | if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { | 515 | if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { |
1215 | iwl_trans_tx_stop(trans); | 516 | iwl_pcie_tx_stop(trans); |
1216 | iwl_trans_rx_stop(trans); | 517 | iwl_pcie_rx_stop(trans); |
1217 | 518 | ||
1218 | /* Power-down device's busmaster DMA clocks */ | 519 | /* Power-down device's busmaster DMA clocks */ |
1219 | iwl_write_prph(trans, APMG_CLK_DIS_REG, | 520 | iwl_write_prph(trans, APMG_CLK_DIS_REG, |
@@ -1226,7 +527,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
1226 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 527 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
1227 | 528 | ||
1228 | /* Stop the device, and put it in low power state */ | 529 | /* Stop the device, and put it in low power state */ |
1229 | iwl_apm_stop(trans); | 530 | iwl_pcie_apm_stop(trans); |
1230 | 531 | ||
1231 | /* Upon stop, the APM issues an interrupt if HW RF kill is set. | 532 | /* Upon stop, the APM issues an interrupt if HW RF kill is set. |
1232 | * Clean again the interrupt here | 533 | * Clean again the interrupt here |
@@ -1265,171 +566,6 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) | |||
1265 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 566 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
1266 | } | 567 | } |
1267 | 568 | ||
1268 | static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | ||
1269 | struct iwl_device_cmd *dev_cmd, int txq_id) | ||
1270 | { | ||
1271 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1272 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1273 | struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; | ||
1274 | struct iwl_cmd_meta *out_meta; | ||
1275 | struct iwl_tx_queue *txq; | ||
1276 | struct iwl_queue *q; | ||
1277 | dma_addr_t phys_addr = 0; | ||
1278 | dma_addr_t txcmd_phys; | ||
1279 | dma_addr_t scratch_phys; | ||
1280 | u16 len, firstlen, secondlen; | ||
1281 | u8 wait_write_ptr = 0; | ||
1282 | __le16 fc = hdr->frame_control; | ||
1283 | u8 hdr_len = ieee80211_hdrlen(fc); | ||
1284 | u16 __maybe_unused wifi_seq; | ||
1285 | |||
1286 | txq = &trans_pcie->txq[txq_id]; | ||
1287 | q = &txq->q; | ||
1288 | |||
1289 | if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { | ||
1290 | WARN_ON_ONCE(1); | ||
1291 | return -EINVAL; | ||
1292 | } | ||
1293 | |||
1294 | spin_lock(&txq->lock); | ||
1295 | |||
1296 | /* In AGG mode, the index in the ring must correspond to the WiFi | ||
1297 | * sequence number. This is a HW requirements to help the SCD to parse | ||
1298 | * the BA. | ||
1299 | * Check here that the packets are in the right place on the ring. | ||
1300 | */ | ||
1301 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1302 | wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | ||
1303 | WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && | ||
1304 | ((wifi_seq & 0xff) != q->write_ptr), | ||
1305 | "Q: %d WiFi Seq %d tfdNum %d", | ||
1306 | txq_id, wifi_seq, q->write_ptr); | ||
1307 | #endif | ||
1308 | |||
1309 | /* Set up driver data for this TFD */ | ||
1310 | txq->entries[q->write_ptr].skb = skb; | ||
1311 | txq->entries[q->write_ptr].cmd = dev_cmd; | ||
1312 | |||
1313 | dev_cmd->hdr.cmd = REPLY_TX; | ||
1314 | dev_cmd->hdr.sequence = | ||
1315 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
1316 | INDEX_TO_SEQ(q->write_ptr))); | ||
1317 | |||
1318 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
1319 | out_meta = &txq->entries[q->write_ptr].meta; | ||
1320 | |||
1321 | /* | ||
1322 | * Use the first empty entry in this queue's command buffer array | ||
1323 | * to contain the Tx command and MAC header concatenated together | ||
1324 | * (payload data will be in another buffer). | ||
1325 | * Size of this varies, due to varying MAC header length. | ||
1326 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
1327 | * of the MAC header (device reads on dword boundaries). | ||
1328 | * We'll tell device about this padding later. | ||
1329 | */ | ||
1330 | len = sizeof(struct iwl_tx_cmd) + | ||
1331 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
1332 | firstlen = (len + 3) & ~3; | ||
1333 | |||
1334 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
1335 | if (firstlen != len) | ||
1336 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
1337 | |||
1338 | /* Physical address of this Tx command's header (not MAC header!), | ||
1339 | * within command buffer array. */ | ||
1340 | txcmd_phys = dma_map_single(trans->dev, | ||
1341 | &dev_cmd->hdr, firstlen, | ||
1342 | DMA_BIDIRECTIONAL); | ||
1343 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) | ||
1344 | goto out_err; | ||
1345 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
1346 | dma_unmap_len_set(out_meta, len, firstlen); | ||
1347 | |||
1348 | if (!ieee80211_has_morefrags(fc)) { | ||
1349 | txq->need_update = 1; | ||
1350 | } else { | ||
1351 | wait_write_ptr = 1; | ||
1352 | txq->need_update = 0; | ||
1353 | } | ||
1354 | |||
1355 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
1356 | * if any (802.11 null frames have no payload). */ | ||
1357 | secondlen = skb->len - hdr_len; | ||
1358 | if (secondlen > 0) { | ||
1359 | phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, | ||
1360 | secondlen, DMA_TO_DEVICE); | ||
1361 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { | ||
1362 | dma_unmap_single(trans->dev, | ||
1363 | dma_unmap_addr(out_meta, mapping), | ||
1364 | dma_unmap_len(out_meta, len), | ||
1365 | DMA_BIDIRECTIONAL); | ||
1366 | goto out_err; | ||
1367 | } | ||
1368 | } | ||
1369 | |||
1370 | /* Attach buffers to TFD */ | ||
1371 | iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1); | ||
1372 | if (secondlen > 0) | ||
1373 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, | ||
1374 | secondlen, 0); | ||
1375 | |||
1376 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
1377 | offsetof(struct iwl_tx_cmd, scratch); | ||
1378 | |||
1379 | /* take back ownership of DMA buffer to enable update */ | ||
1380 | dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, | ||
1381 | DMA_BIDIRECTIONAL); | ||
1382 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
1383 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
1384 | |||
1385 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", | ||
1386 | le16_to_cpu(dev_cmd->hdr.sequence)); | ||
1387 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
1388 | |||
1389 | /* Set up entry for this TFD in Tx byte-count array */ | ||
1390 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); | ||
1391 | |||
1392 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, | ||
1393 | DMA_BIDIRECTIONAL); | ||
1394 | |||
1395 | trace_iwlwifi_dev_tx(trans->dev, skb, | ||
1396 | &txq->tfds[txq->q.write_ptr], | ||
1397 | sizeof(struct iwl_tfd), | ||
1398 | &dev_cmd->hdr, firstlen, | ||
1399 | skb->data + hdr_len, secondlen); | ||
1400 | trace_iwlwifi_dev_tx_data(trans->dev, skb, | ||
1401 | skb->data + hdr_len, secondlen); | ||
1402 | |||
1403 | /* start timer if queue currently empty */ | ||
1404 | if (txq->need_update && q->read_ptr == q->write_ptr && | ||
1405 | trans_pcie->wd_timeout) | ||
1406 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
1407 | |||
1408 | /* Tell device the write index *just past* this latest filled TFD */ | ||
1409 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
1410 | iwl_txq_update_write_ptr(trans, txq); | ||
1411 | |||
1412 | /* | ||
1413 | * At this point the frame is "transmitted" successfully | ||
1414 | * and we will get a TX status notification eventually, | ||
1415 | * regardless of the value of ret. "ret" only indicates | ||
1416 | * whether or not we should update the write pointer. | ||
1417 | */ | ||
1418 | if (iwl_queue_space(q) < q->high_mark) { | ||
1419 | if (wait_write_ptr) { | ||
1420 | txq->need_update = 1; | ||
1421 | iwl_txq_update_write_ptr(trans, txq); | ||
1422 | } else { | ||
1423 | iwl_stop_queue(trans, txq); | ||
1424 | } | ||
1425 | } | ||
1426 | spin_unlock(&txq->lock); | ||
1427 | return 0; | ||
1428 | out_err: | ||
1429 | spin_unlock(&txq->lock); | ||
1430 | return -1; | ||
1431 | } | ||
1432 | |||
1433 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | 569 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) |
1434 | { | 570 | { |
1435 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 571 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -1440,29 +576,28 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
1440 | 576 | ||
1441 | if (!trans_pcie->irq_requested) { | 577 | if (!trans_pcie->irq_requested) { |
1442 | tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) | 578 | tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) |
1443 | iwl_irq_tasklet, (unsigned long)trans); | 579 | iwl_pcie_tasklet, (unsigned long)trans); |
1444 | 580 | ||
1445 | iwl_alloc_isr_ict(trans); | 581 | iwl_pcie_alloc_ict(trans); |
1446 | 582 | ||
1447 | err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, | 583 | err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict, |
1448 | DRV_NAME, trans); | 584 | IRQF_SHARED, DRV_NAME, trans); |
1449 | if (err) { | 585 | if (err) { |
1450 | IWL_ERR(trans, "Error allocating IRQ %d\n", | 586 | IWL_ERR(trans, "Error allocating IRQ %d\n", |
1451 | trans_pcie->irq); | 587 | trans_pcie->irq); |
1452 | goto error; | 588 | goto error; |
1453 | } | 589 | } |
1454 | 590 | ||
1455 | INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish); | ||
1456 | trans_pcie->irq_requested = true; | 591 | trans_pcie->irq_requested = true; |
1457 | } | 592 | } |
1458 | 593 | ||
1459 | err = iwl_prepare_card_hw(trans); | 594 | err = iwl_pcie_prepare_card_hw(trans); |
1460 | if (err) { | 595 | if (err) { |
1461 | IWL_ERR(trans, "Error while preparing HW: %d\n", err); | 596 | IWL_ERR(trans, "Error while preparing HW: %d\n", err); |
1462 | goto err_free_irq; | 597 | goto err_free_irq; |
1463 | } | 598 | } |
1464 | 599 | ||
1465 | iwl_apm_init(trans); | 600 | iwl_pcie_apm_init(trans); |
1466 | 601 | ||
1467 | /* From now on, the op_mode will be kept updated about RF kill state */ | 602 | /* From now on, the op_mode will be kept updated about RF kill state */ |
1468 | iwl_enable_rfkill_int(trans); | 603 | iwl_enable_rfkill_int(trans); |
@@ -1476,7 +611,7 @@ err_free_irq: | |||
1476 | trans_pcie->irq_requested = false; | 611 | trans_pcie->irq_requested = false; |
1477 | free_irq(trans_pcie->irq, trans); | 612 | free_irq(trans_pcie->irq, trans); |
1478 | error: | 613 | error: |
1479 | iwl_free_isr_ict(trans); | 614 | iwl_pcie_free_ict(trans); |
1480 | tasklet_kill(&trans_pcie->irq_tasklet); | 615 | tasklet_kill(&trans_pcie->irq_tasklet); |
1481 | return err; | 616 | return err; |
1482 | } | 617 | } |
@@ -1492,7 +627,7 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, | |||
1492 | iwl_disable_interrupts(trans); | 627 | iwl_disable_interrupts(trans); |
1493 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 628 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1494 | 629 | ||
1495 | iwl_apm_stop(trans); | 630 | iwl_pcie_apm_stop(trans); |
1496 | 631 | ||
1497 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | 632 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
1498 | iwl_disable_interrupts(trans); | 633 | iwl_disable_interrupts(trans); |
@@ -1516,27 +651,6 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, | |||
1516 | } | 651 | } |
1517 | } | 652 | } |
1518 | 653 | ||
1519 | static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | ||
1520 | struct sk_buff_head *skbs) | ||
1521 | { | ||
1522 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
1523 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
1524 | /* n_bd is usually 256 => n_bd - 1 = 0xff */ | ||
1525 | int tfd_num = ssn & (txq->q.n_bd - 1); | ||
1526 | |||
1527 | spin_lock(&txq->lock); | ||
1528 | |||
1529 | if (txq->q.read_ptr != tfd_num) { | ||
1530 | IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", | ||
1531 | txq_id, txq->q.read_ptr, tfd_num, ssn); | ||
1532 | iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); | ||
1533 | if (iwl_queue_space(&txq->q) > txq->q.low_mark) | ||
1534 | iwl_wake_queue(trans, txq); | ||
1535 | } | ||
1536 | |||
1537 | spin_unlock(&txq->lock); | ||
1538 | } | ||
1539 | |||
1540 | static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) | 654 | static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) |
1541 | { | 655 | { |
1542 | writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); | 656 | writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); |
@@ -1583,12 +697,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) | |||
1583 | { | 697 | { |
1584 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 698 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1585 | 699 | ||
1586 | iwl_trans_pcie_tx_free(trans); | 700 | iwl_pcie_tx_free(trans); |
1587 | iwl_trans_pcie_rx_free(trans); | 701 | iwl_pcie_rx_free(trans); |
1588 | 702 | ||
1589 | if (trans_pcie->irq_requested == true) { | 703 | if (trans_pcie->irq_requested == true) { |
1590 | free_irq(trans_pcie->irq, trans); | 704 | free_irq(trans_pcie->irq, trans); |
1591 | iwl_free_isr_ict(trans); | 705 | iwl_pcie_free_ict(trans); |
1592 | } | 706 | } |
1593 | 707 | ||
1594 | pci_disable_msi(trans_pcie->pci_dev); | 708 | pci_disable_msi(trans_pcie->pci_dev); |
@@ -1634,10 +748,10 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) | |||
1634 | 748 | ||
1635 | #define IWL_FLUSH_WAIT_MS 2000 | 749 | #define IWL_FLUSH_WAIT_MS 2000 |
1636 | 750 | ||
1637 | static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) | 751 | static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) |
1638 | { | 752 | { |
1639 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 753 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1640 | struct iwl_tx_queue *txq; | 754 | struct iwl_txq *txq; |
1641 | struct iwl_queue *q; | 755 | struct iwl_queue *q; |
1642 | int cnt; | 756 | int cnt; |
1643 | unsigned long now = jiffies; | 757 | unsigned long now = jiffies; |
@@ -1681,7 +795,7 @@ static const char *get_fh_string(int cmd) | |||
1681 | #undef IWL_CMD | 795 | #undef IWL_CMD |
1682 | } | 796 | } |
1683 | 797 | ||
1684 | int iwl_dump_fh(struct iwl_trans *trans, char **buf) | 798 | int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf) |
1685 | { | 799 | { |
1686 | int i; | 800 | int i; |
1687 | static const u32 fh_tbl[] = { | 801 | static const u32 fh_tbl[] = { |
@@ -1760,7 +874,7 @@ static const char *get_csr_string(int cmd) | |||
1760 | #undef IWL_CMD | 874 | #undef IWL_CMD |
1761 | } | 875 | } |
1762 | 876 | ||
1763 | void iwl_dump_csr(struct iwl_trans *trans) | 877 | void iwl_pcie_dump_csr(struct iwl_trans *trans) |
1764 | { | 878 | { |
1765 | int i; | 879 | int i; |
1766 | static const u32 csr_tbl[] = { | 880 | static const u32 csr_tbl[] = { |
@@ -1817,7 +931,6 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ | |||
1817 | const char __user *user_buf, \ | 931 | const char __user *user_buf, \ |
1818 | size_t count, loff_t *ppos); | 932 | size_t count, loff_t *ppos); |
1819 | 933 | ||
1820 | |||
1821 | #define DEBUGFS_READ_FILE_OPS(name) \ | 934 | #define DEBUGFS_READ_FILE_OPS(name) \ |
1822 | DEBUGFS_READ_FUNC(name); \ | 935 | DEBUGFS_READ_FUNC(name); \ |
1823 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ | 936 | static const struct file_operations iwl_dbgfs_##name##_ops = { \ |
@@ -1850,7 +963,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, | |||
1850 | { | 963 | { |
1851 | struct iwl_trans *trans = file->private_data; | 964 | struct iwl_trans *trans = file->private_data; |
1852 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 965 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1853 | struct iwl_tx_queue *txq; | 966 | struct iwl_txq *txq; |
1854 | struct iwl_queue *q; | 967 | struct iwl_queue *q; |
1855 | char *buf; | 968 | char *buf; |
1856 | int pos = 0; | 969 | int pos = 0; |
@@ -1887,7 +1000,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, | |||
1887 | { | 1000 | { |
1888 | struct iwl_trans *trans = file->private_data; | 1001 | struct iwl_trans *trans = file->private_data; |
1889 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1002 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
1890 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 1003 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
1891 | char buf[256]; | 1004 | char buf[256]; |
1892 | int pos = 0; | 1005 | int pos = 0; |
1893 | const size_t bufsz = sizeof(buf); | 1006 | const size_t bufsz = sizeof(buf); |
@@ -2006,7 +1119,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file, | |||
2006 | if (sscanf(buf, "%d", &csr) != 1) | 1119 | if (sscanf(buf, "%d", &csr) != 1) |
2007 | return -EFAULT; | 1120 | return -EFAULT; |
2008 | 1121 | ||
2009 | iwl_dump_csr(trans); | 1122 | iwl_pcie_dump_csr(trans); |
2010 | 1123 | ||
2011 | return count; | 1124 | return count; |
2012 | } | 1125 | } |
@@ -2020,7 +1133,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, | |||
2020 | int pos = 0; | 1133 | int pos = 0; |
2021 | ssize_t ret = -EFAULT; | 1134 | ssize_t ret = -EFAULT; |
2022 | 1135 | ||
2023 | ret = pos = iwl_dump_fh(trans, &buf); | 1136 | ret = pos = iwl_pcie_dump_fh(trans, &buf); |
2024 | if (buf) { | 1137 | if (buf) { |
2025 | ret = simple_read_from_buffer(user_buf, | 1138 | ret = simple_read_from_buffer(user_buf, |
2026 | count, ppos, buf, pos); | 1139 | count, ppos, buf, pos); |
@@ -2089,7 +1202,7 @@ static const struct iwl_trans_ops trans_ops_pcie = { | |||
2089 | 1202 | ||
2090 | .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, | 1203 | .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, |
2091 | 1204 | ||
2092 | .send_cmd = iwl_trans_pcie_send_cmd, | 1205 | .send_cmd = iwl_trans_pcie_send_hcmd, |
2093 | 1206 | ||
2094 | .tx = iwl_trans_pcie_tx, | 1207 | .tx = iwl_trans_pcie_tx, |
2095 | .reclaim = iwl_trans_pcie_reclaim, | 1208 | .reclaim = iwl_trans_pcie_reclaim, |
@@ -2099,7 +1212,7 @@ static const struct iwl_trans_ops trans_ops_pcie = { | |||
2099 | 1212 | ||
2100 | .dbgfs_register = iwl_trans_pcie_dbgfs_register, | 1213 | .dbgfs_register = iwl_trans_pcie_dbgfs_register, |
2101 | 1214 | ||
2102 | .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, | 1215 | .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, |
2103 | 1216 | ||
2104 | #ifdef CONFIG_PM_SLEEP | 1217 | #ifdef CONFIG_PM_SLEEP |
2105 | .suspend = iwl_trans_pcie_suspend, | 1218 | .suspend = iwl_trans_pcie_suspend, |
@@ -2124,7 +1237,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2124 | trans = kzalloc(sizeof(struct iwl_trans) + | 1237 | trans = kzalloc(sizeof(struct iwl_trans) + |
2125 | sizeof(struct iwl_trans_pcie), GFP_KERNEL); | 1238 | sizeof(struct iwl_trans_pcie), GFP_KERNEL); |
2126 | 1239 | ||
2127 | if (WARN_ON(!trans)) | 1240 | if (!trans) |
2128 | return NULL; | 1241 | return NULL; |
2129 | 1242 | ||
2130 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1243 | trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -2180,8 +1293,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2180 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | 1293 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); |
2181 | 1294 | ||
2182 | err = pci_enable_msi(pdev); | 1295 | err = pci_enable_msi(pdev); |
2183 | if (err) | 1296 | if (err) { |
2184 | dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); | 1297 | dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); |
1298 | /* enable rfkill interrupt: hw bug w/a */ | ||
1299 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | ||
1300 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { | ||
1301 | pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; | ||
1302 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | ||
1303 | } | ||
1304 | } | ||
2185 | 1305 | ||
2186 | trans->dev = &pdev->dev; | 1306 | trans->dev = &pdev->dev; |
2187 | trans_pcie->irq = pdev->irq; | 1307 | trans_pcie->irq = pdev->irq; |
@@ -2191,14 +1311,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2191 | snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), | 1311 | snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), |
2192 | "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); | 1312 | "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); |
2193 | 1313 | ||
2194 | /* TODO: Move this away, not needed if not MSI */ | ||
2195 | /* enable rfkill interrupt: hw bug w/a */ | ||
2196 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | ||
2197 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { | ||
2198 | pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; | ||
2199 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | ||
2200 | } | ||
2201 | |||
2202 | /* Initialize the wait queue for commands */ | 1314 | /* Initialize the wait queue for commands */ |
2203 | init_waitqueue_head(&trans_pcie->wait_command_queue); | 1315 | init_waitqueue_head(&trans_pcie->wait_command_queue); |
2204 | spin_lock_init(&trans->reg_lock); | 1316 | spin_lock_init(&trans->reg_lock); |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index dcc7e1256e39..6c5b867c353a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -42,12 +42,170 @@ | |||
42 | #define IWL_TX_CRC_SIZE 4 | 42 | #define IWL_TX_CRC_SIZE 4 |
43 | #define IWL_TX_DELIMITER_SIZE 4 | 43 | #define IWL_TX_DELIMITER_SIZE 4 |
44 | 44 | ||
45 | /** | 45 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
46 | * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | 46 | * DMA services |
47 | * | ||
48 | * Theory of operation | ||
49 | * | ||
50 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | ||
51 | * of buffer descriptors, each of which points to one or more data buffers for | ||
52 | * the device to read from or fill. Driver and device exchange status of each | ||
53 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | ||
54 | * entries in each circular buffer, to protect against confusing empty and full | ||
55 | * queue states. | ||
56 | * | ||
57 | * The device reads or writes the data in the queues via the device's several | ||
58 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | ||
59 | * | ||
60 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
61 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
62 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
63 | * Tx queue resumed. | ||
64 | * | ||
65 | ***************************************************/ | ||
66 | static int iwl_queue_space(const struct iwl_queue *q) | ||
67 | { | ||
68 | int s = q->read_ptr - q->write_ptr; | ||
69 | |||
70 | if (q->read_ptr > q->write_ptr) | ||
71 | s -= q->n_bd; | ||
72 | |||
73 | if (s <= 0) | ||
74 | s += q->n_window; | ||
75 | /* keep some reserve to not confuse empty and full situations */ | ||
76 | s -= 2; | ||
77 | if (s < 0) | ||
78 | s = 0; | ||
79 | return s; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | ||
84 | */ | ||
85 | static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | ||
86 | { | ||
87 | q->n_bd = count; | ||
88 | q->n_window = slots_num; | ||
89 | q->id = id; | ||
90 | |||
91 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | ||
92 | * and iwl_queue_dec_wrap are broken. */ | ||
93 | if (WARN_ON(!is_power_of_2(count))) | ||
94 | return -EINVAL; | ||
95 | |||
96 | /* slots_num must be power-of-two size, otherwise | ||
97 | * get_cmd_index is broken. */ | ||
98 | if (WARN_ON(!is_power_of_2(slots_num))) | ||
99 | return -EINVAL; | ||
100 | |||
101 | q->low_mark = q->n_window / 4; | ||
102 | if (q->low_mark < 4) | ||
103 | q->low_mark = 4; | ||
104 | |||
105 | q->high_mark = q->n_window / 8; | ||
106 | if (q->high_mark < 2) | ||
107 | q->high_mark = 2; | ||
108 | |||
109 | q->write_ptr = 0; | ||
110 | q->read_ptr = 0; | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, | ||
116 | struct iwl_dma_ptr *ptr, size_t size) | ||
117 | { | ||
118 | if (WARN_ON(ptr->addr)) | ||
119 | return -EINVAL; | ||
120 | |||
121 | ptr->addr = dma_alloc_coherent(trans->dev, size, | ||
122 | &ptr->dma, GFP_KERNEL); | ||
123 | if (!ptr->addr) | ||
124 | return -ENOMEM; | ||
125 | ptr->size = size; | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, | ||
130 | struct iwl_dma_ptr *ptr) | ||
131 | { | ||
132 | if (unlikely(!ptr->addr)) | ||
133 | return; | ||
134 | |||
135 | dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); | ||
136 | memset(ptr, 0, sizeof(*ptr)); | ||
137 | } | ||
138 | |||
139 | static void iwl_pcie_txq_stuck_timer(unsigned long data) | ||
140 | { | ||
141 | struct iwl_txq *txq = (void *)data; | ||
142 | struct iwl_queue *q = &txq->q; | ||
143 | struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; | ||
144 | struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); | ||
145 | u32 scd_sram_addr = trans_pcie->scd_base_addr + | ||
146 | SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); | ||
147 | u8 buf[16]; | ||
148 | int i; | ||
149 | |||
150 | spin_lock(&txq->lock); | ||
151 | /* check if triggered erroneously */ | ||
152 | if (txq->q.read_ptr == txq->q.write_ptr) { | ||
153 | spin_unlock(&txq->lock); | ||
154 | return; | ||
155 | } | ||
156 | spin_unlock(&txq->lock); | ||
157 | |||
158 | IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, | ||
159 | jiffies_to_msecs(trans_pcie->wd_timeout)); | ||
160 | IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", | ||
161 | txq->q.read_ptr, txq->q.write_ptr); | ||
162 | |||
163 | iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); | ||
164 | |||
165 | iwl_print_hex_error(trans, buf, sizeof(buf)); | ||
166 | |||
167 | for (i = 0; i < FH_TCSR_CHNL_NUM; i++) | ||
168 | IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, | ||
169 | iwl_read_direct32(trans, FH_TX_TRB_REG(i))); | ||
170 | |||
171 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { | ||
172 | u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); | ||
173 | u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; | ||
174 | bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); | ||
175 | u32 tbl_dw = | ||
176 | iwl_read_targ_mem(trans, | ||
177 | trans_pcie->scd_base_addr + | ||
178 | SCD_TRANS_TBL_OFFSET_QUEUE(i)); | ||
179 | |||
180 | if (i & 0x1) | ||
181 | tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; | ||
182 | else | ||
183 | tbl_dw = tbl_dw & 0x0000FFFF; | ||
184 | |||
185 | IWL_ERR(trans, | ||
186 | "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", | ||
187 | i, active ? "" : "in", fifo, tbl_dw, | ||
188 | iwl_read_prph(trans, | ||
189 | SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), | ||
190 | iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); | ||
191 | } | ||
192 | |||
193 | for (i = q->read_ptr; i != q->write_ptr; | ||
194 | i = iwl_queue_inc_wrap(i, q->n_bd)) { | ||
195 | struct iwl_tx_cmd *tx_cmd = | ||
196 | (struct iwl_tx_cmd *)txq->entries[i].cmd->payload; | ||
197 | IWL_ERR(trans, "scratch %d = 0x%08x\n", i, | ||
198 | get_unaligned_le32(&tx_cmd->scratch)); | ||
199 | } | ||
200 | |||
201 | iwl_op_mode_nic_error(trans->op_mode); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | ||
47 | */ | 206 | */ |
48 | void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | 207 | static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, |
49 | struct iwl_tx_queue *txq, | 208 | struct iwl_txq *txq, u16 byte_cnt) |
50 | u16 byte_cnt) | ||
51 | { | 209 | { |
52 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; | 210 | struct iwlagn_scd_bc_tbl *scd_bc_tbl; |
53 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 211 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -88,10 +246,36 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, | |||
88 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | 246 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; |
89 | } | 247 | } |
90 | 248 | ||
91 | /** | 249 | static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, |
92 | * iwl_txq_update_write_ptr - Send new write index to hardware | 250 | struct iwl_txq *txq) |
251 | { | ||
252 | struct iwl_trans_pcie *trans_pcie = | ||
253 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
254 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | ||
255 | int txq_id = txq->q.id; | ||
256 | int read_ptr = txq->q.read_ptr; | ||
257 | u8 sta_id = 0; | ||
258 | __le16 bc_ent; | ||
259 | struct iwl_tx_cmd *tx_cmd = | ||
260 | (void *)txq->entries[txq->q.read_ptr].cmd->payload; | ||
261 | |||
262 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | ||
263 | |||
264 | if (txq_id != trans_pcie->cmd_queue) | ||
265 | sta_id = tx_cmd->sta_id; | ||
266 | |||
267 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | ||
268 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
269 | |||
270 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
271 | scd_bc_tbl[txq_id]. | ||
272 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware | ||
93 | */ | 277 | */ |
94 | void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) | 278 | void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) |
95 | { | 279 | { |
96 | u32 reg = 0; | 280 | u32 reg = 0; |
97 | int txq_id = txq->q.id; | 281 | int txq_id = txq->q.id; |
@@ -137,7 +321,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) | |||
137 | txq->need_update = 0; | 321 | txq->need_update = 0; |
138 | } | 322 | } |
139 | 323 | ||
140 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | 324 | static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) |
141 | { | 325 | { |
142 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 326 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
143 | 327 | ||
@@ -149,15 +333,15 @@ static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | |||
149 | return addr; | 333 | return addr; |
150 | } | 334 | } |
151 | 335 | ||
152 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | 336 | static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) |
153 | { | 337 | { |
154 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 338 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
155 | 339 | ||
156 | return le16_to_cpu(tb->hi_n_len) >> 4; | 340 | return le16_to_cpu(tb->hi_n_len) >> 4; |
157 | } | 341 | } |
158 | 342 | ||
159 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | 343 | static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, |
160 | dma_addr_t addr, u16 len) | 344 | dma_addr_t addr, u16 len) |
161 | { | 345 | { |
162 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | 346 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
163 | u16 hi_n_len = len << 4; | 347 | u16 hi_n_len = len << 4; |
@@ -171,19 +355,20 @@ static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | |||
171 | tfd->num_tbs = idx + 1; | 355 | tfd->num_tbs = idx + 1; |
172 | } | 356 | } |
173 | 357 | ||
174 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | 358 | static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd) |
175 | { | 359 | { |
176 | return tfd->num_tbs & 0x1f; | 360 | return tfd->num_tbs & 0x1f; |
177 | } | 361 | } |
178 | 362 | ||
179 | static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | 363 | static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, |
180 | struct iwl_tfd *tfd, enum dma_data_direction dma_dir) | 364 | struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, |
365 | enum dma_data_direction dma_dir) | ||
181 | { | 366 | { |
182 | int i; | 367 | int i; |
183 | int num_tbs; | 368 | int num_tbs; |
184 | 369 | ||
185 | /* Sanity check on number of chunks */ | 370 | /* Sanity check on number of chunks */ |
186 | num_tbs = iwl_tfd_get_num_tbs(tfd); | 371 | num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); |
187 | 372 | ||
188 | if (num_tbs >= IWL_NUM_OF_TBS) { | 373 | if (num_tbs >= IWL_NUM_OF_TBS) { |
189 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | 374 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); |
@@ -200,14 +385,14 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | |||
200 | 385 | ||
201 | /* Unmap chunks, if any. */ | 386 | /* Unmap chunks, if any. */ |
202 | for (i = 1; i < num_tbs; i++) | 387 | for (i = 1; i < num_tbs; i++) |
203 | dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), | 388 | dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), |
204 | iwl_tfd_tb_get_len(tfd, i), dma_dir); | 389 | iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); |
205 | 390 | ||
206 | tfd->num_tbs = 0; | 391 | tfd->num_tbs = 0; |
207 | } | 392 | } |
208 | 393 | ||
209 | /** | 394 | /* |
210 | * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | 395 | * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] |
211 | * @trans - transport private data | 396 | * @trans - transport private data |
212 | * @txq - tx queue | 397 | * @txq - tx queue |
213 | * @dma_dir - the direction of the DMA mapping | 398 | * @dma_dir - the direction of the DMA mapping |
@@ -215,8 +400,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, | |||
215 | * Does NOT advance any TFD circular buffer read/write indexes | 400 | * Does NOT advance any TFD circular buffer read/write indexes |
216 | * Does NOT free the TFD itself (which is within circular buffer) | 401 | * Does NOT free the TFD itself (which is within circular buffer) |
217 | */ | 402 | */ |
218 | void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | 403 | static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, |
219 | enum dma_data_direction dma_dir) | 404 | enum dma_data_direction dma_dir) |
220 | { | 405 | { |
221 | struct iwl_tfd *tfd_tmp = txq->tfds; | 406 | struct iwl_tfd *tfd_tmp = txq->tfds; |
222 | 407 | ||
@@ -227,8 +412,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | |||
227 | lockdep_assert_held(&txq->lock); | 412 | lockdep_assert_held(&txq->lock); |
228 | 413 | ||
229 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ | 414 | /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ |
230 | iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], | 415 | iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], |
231 | dma_dir); | 416 | dma_dir); |
232 | 417 | ||
233 | /* free SKB */ | 418 | /* free SKB */ |
234 | if (txq->entries) { | 419 | if (txq->entries) { |
@@ -247,10 +432,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, | |||
247 | } | 432 | } |
248 | } | 433 | } |
249 | 434 | ||
250 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | 435 | static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, |
251 | struct iwl_tx_queue *txq, | 436 | dma_addr_t addr, u16 len, u8 reset) |
252 | dma_addr_t addr, u16 len, | ||
253 | u8 reset) | ||
254 | { | 437 | { |
255 | struct iwl_queue *q; | 438 | struct iwl_queue *q; |
256 | struct iwl_tfd *tfd, *tfd_tmp; | 439 | struct iwl_tfd *tfd, *tfd_tmp; |
@@ -263,7 +446,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | |||
263 | if (reset) | 446 | if (reset) |
264 | memset(tfd, 0, sizeof(*tfd)); | 447 | memset(tfd, 0, sizeof(*tfd)); |
265 | 448 | ||
266 | num_tbs = iwl_tfd_get_num_tbs(tfd); | 449 | num_tbs = iwl_pcie_tfd_get_num_tbs(tfd); |
267 | 450 | ||
268 | /* Each TFD can point to a maximum 20 Tx buffers */ | 451 | /* Each TFD can point to a maximum 20 Tx buffers */ |
269 | if (num_tbs >= IWL_NUM_OF_TBS) { | 452 | if (num_tbs >= IWL_NUM_OF_TBS) { |
@@ -279,108 +462,534 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, | |||
279 | IWL_ERR(trans, "Unaligned address = %llx\n", | 462 | IWL_ERR(trans, "Unaligned address = %llx\n", |
280 | (unsigned long long)addr); | 463 | (unsigned long long)addr); |
281 | 464 | ||
282 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); | 465 | iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); |
283 | 466 | ||
284 | return 0; | 467 | return 0; |
285 | } | 468 | } |
286 | 469 | ||
287 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** | 470 | static int iwl_pcie_txq_alloc(struct iwl_trans *trans, |
288 | * DMA services | 471 | struct iwl_txq *txq, int slots_num, |
289 | * | 472 | u32 txq_id) |
290 | * Theory of operation | 473 | { |
291 | * | 474 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
292 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | 475 | size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; |
293 | * of buffer descriptors, each of which points to one or more data buffers for | 476 | int i; |
294 | * the device to read from or fill. Driver and device exchange status of each | 477 | |
295 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | 478 | if (WARN_ON(txq->entries || txq->tfds)) |
296 | * entries in each circular buffer, to protect against confusing empty and full | 479 | return -EINVAL; |
297 | * queue states. | 480 | |
298 | * | 481 | setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, |
299 | * The device reads or writes the data in the queues via the device's several | 482 | (unsigned long)txq); |
300 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | 483 | txq->trans_pcie = trans_pcie; |
301 | * | 484 | |
302 | * For Tx queue, there are low mark and high mark limits. If, after queuing | 485 | txq->q.n_window = slots_num; |
303 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | 486 | |
304 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | 487 | txq->entries = kcalloc(slots_num, |
305 | * Tx queue resumed. | 488 | sizeof(struct iwl_pcie_txq_entry), |
489 | GFP_KERNEL); | ||
490 | |||
491 | if (!txq->entries) | ||
492 | goto error; | ||
493 | |||
494 | if (txq_id == trans_pcie->cmd_queue) | ||
495 | for (i = 0; i < slots_num; i++) { | ||
496 | txq->entries[i].cmd = | ||
497 | kmalloc(sizeof(struct iwl_device_cmd), | ||
498 | GFP_KERNEL); | ||
499 | if (!txq->entries[i].cmd) | ||
500 | goto error; | ||
501 | } | ||
502 | |||
503 | /* Circular buffer of transmit frame descriptors (TFDs), | ||
504 | * shared with device */ | ||
505 | txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, | ||
506 | &txq->q.dma_addr, GFP_KERNEL); | ||
507 | if (!txq->tfds) { | ||
508 | IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); | ||
509 | goto error; | ||
510 | } | ||
511 | txq->q.id = txq_id; | ||
512 | |||
513 | return 0; | ||
514 | error: | ||
515 | if (txq->entries && txq_id == trans_pcie->cmd_queue) | ||
516 | for (i = 0; i < slots_num; i++) | ||
517 | kfree(txq->entries[i].cmd); | ||
518 | kfree(txq->entries); | ||
519 | txq->entries = NULL; | ||
520 | |||
521 | return -ENOMEM; | ||
522 | |||
523 | } | ||
524 | |||
525 | static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | ||
526 | int slots_num, u32 txq_id) | ||
527 | { | ||
528 | int ret; | ||
529 | |||
530 | txq->need_update = 0; | ||
531 | |||
532 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | ||
533 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | ||
534 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | ||
535 | |||
536 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
537 | ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, | ||
538 | txq_id); | ||
539 | if (ret) | ||
540 | return ret; | ||
541 | |||
542 | spin_lock_init(&txq->lock); | ||
543 | |||
544 | /* | ||
545 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | ||
546 | * given Tx queue, and enable the DMA channel used for that queue. | ||
547 | * Circular buffer (TFD queue in DRAM) physical base address */ | ||
548 | iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), | ||
549 | txq->q.dma_addr >> 8); | ||
550 | |||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's | ||
556 | */ | ||
557 | static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) | ||
558 | { | ||
559 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
560 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
561 | struct iwl_queue *q = &txq->q; | ||
562 | enum dma_data_direction dma_dir; | ||
563 | |||
564 | if (!q->n_bd) | ||
565 | return; | ||
566 | |||
567 | /* In the command queue, all the TBs are mapped as BIDI | ||
568 | * so unmap them as such. | ||
569 | */ | ||
570 | if (txq_id == trans_pcie->cmd_queue) | ||
571 | dma_dir = DMA_BIDIRECTIONAL; | ||
572 | else | ||
573 | dma_dir = DMA_TO_DEVICE; | ||
574 | |||
575 | spin_lock_bh(&txq->lock); | ||
576 | while (q->write_ptr != q->read_ptr) { | ||
577 | iwl_pcie_txq_free_tfd(trans, txq, dma_dir); | ||
578 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | ||
579 | } | ||
580 | spin_unlock_bh(&txq->lock); | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * iwl_pcie_txq_free - Deallocate DMA queue. | ||
585 | * @txq: Transmit queue to deallocate. | ||
306 | * | 586 | * |
307 | ***************************************************/ | 587 | * Empty queue by removing and destroying all BD's. |
588 | * Free all buffers. | ||
589 | * 0-fill, but do not free "txq" descriptor structure. | ||
590 | */ | ||
591 | static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) | ||
592 | { | ||
593 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
594 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
595 | struct device *dev = trans->dev; | ||
596 | int i; | ||
597 | |||
598 | if (WARN_ON(!txq)) | ||
599 | return; | ||
600 | |||
601 | iwl_pcie_txq_unmap(trans, txq_id); | ||
602 | |||
603 | /* De-alloc array of command/tx buffers */ | ||
604 | if (txq_id == trans_pcie->cmd_queue) | ||
605 | for (i = 0; i < txq->q.n_window; i++) { | ||
606 | kfree(txq->entries[i].cmd); | ||
607 | kfree(txq->entries[i].copy_cmd); | ||
608 | kfree(txq->entries[i].free_buf); | ||
609 | } | ||
610 | |||
611 | /* De-alloc circular buffer of TFDs */ | ||
612 | if (txq->q.n_bd) { | ||
613 | dma_free_coherent(dev, sizeof(struct iwl_tfd) * | ||
614 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | ||
615 | memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); | ||
616 | } | ||
617 | |||
618 | kfree(txq->entries); | ||
619 | txq->entries = NULL; | ||
308 | 620 | ||
309 | int iwl_queue_space(const struct iwl_queue *q) | 621 | del_timer_sync(&txq->stuck_timer); |
622 | |||
623 | /* 0-fill queue descriptor structure */ | ||
624 | memset(txq, 0, sizeof(*txq)); | ||
625 | } | ||
626 | |||
627 | /* | ||
628 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | ||
629 | */ | ||
630 | static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask) | ||
310 | { | 631 | { |
311 | int s = q->read_ptr - q->write_ptr; | 632 | struct iwl_trans_pcie __maybe_unused *trans_pcie = |
633 | IWL_TRANS_GET_PCIE_TRANS(trans); | ||
312 | 634 | ||
313 | if (q->read_ptr > q->write_ptr) | 635 | iwl_write_prph(trans, SCD_TXFACT, mask); |
314 | s -= q->n_bd; | 636 | } |
315 | 637 | ||
316 | if (s <= 0) | 638 | void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) |
317 | s += q->n_window; | 639 | { |
318 | /* keep some reserve to not confuse empty and full situations */ | 640 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
319 | s -= 2; | 641 | u32 a; |
320 | if (s < 0) | 642 | int chan; |
321 | s = 0; | 643 | u32 reg_val; |
322 | return s; | 644 | |
645 | /* make sure all queue are not stopped/used */ | ||
646 | memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); | ||
647 | memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); | ||
648 | |||
649 | trans_pcie->scd_base_addr = | ||
650 | iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); | ||
651 | |||
652 | WARN_ON(scd_base_addr != 0 && | ||
653 | scd_base_addr != trans_pcie->scd_base_addr); | ||
654 | |||
655 | a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; | ||
656 | /* reset conext data memory */ | ||
657 | for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; | ||
658 | a += 4) | ||
659 | iwl_write_targ_mem(trans, a, 0); | ||
660 | /* reset tx status memory */ | ||
661 | for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; | ||
662 | a += 4) | ||
663 | iwl_write_targ_mem(trans, a, 0); | ||
664 | for (; a < trans_pcie->scd_base_addr + | ||
665 | SCD_TRANS_TBL_OFFSET_QUEUE( | ||
666 | trans->cfg->base_params->num_of_queues); | ||
667 | a += 4) | ||
668 | iwl_write_targ_mem(trans, a, 0); | ||
669 | |||
670 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, | ||
671 | trans_pcie->scd_bc_tbls.dma >> 10); | ||
672 | |||
673 | /* The chain extension of the SCD doesn't work well. This feature is | ||
674 | * enabled by default by the HW, so we need to disable it manually. | ||
675 | */ | ||
676 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); | ||
677 | |||
678 | iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, | ||
679 | trans_pcie->cmd_fifo); | ||
680 | |||
681 | /* Activate all Tx DMA/FIFO channels */ | ||
682 | iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7)); | ||
683 | |||
684 | /* Enable DMA channel */ | ||
685 | for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) | ||
686 | iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), | ||
687 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | ||
688 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); | ||
689 | |||
690 | /* Update FH chicken bits */ | ||
691 | reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); | ||
692 | iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, | ||
693 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); | ||
694 | |||
695 | /* Enable L1-Active */ | ||
696 | iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, | ||
697 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); | ||
323 | } | 698 | } |
324 | 699 | ||
325 | /** | 700 | /* |
326 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | 701 | * iwl_pcie_tx_stop - Stop all Tx DMA channels |
327 | */ | 702 | */ |
328 | int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) | 703 | int iwl_pcie_tx_stop(struct iwl_trans *trans) |
329 | { | 704 | { |
330 | q->n_bd = count; | 705 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
331 | q->n_window = slots_num; | 706 | int ch, txq_id, ret; |
332 | q->id = id; | 707 | unsigned long flags; |
333 | 708 | ||
334 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | 709 | /* Turn off all Tx DMA fifos */ |
335 | * and iwl_queue_dec_wrap are broken. */ | 710 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
336 | if (WARN_ON(!is_power_of_2(count))) | ||
337 | return -EINVAL; | ||
338 | 711 | ||
339 | /* slots_num must be power-of-two size, otherwise | 712 | iwl_pcie_txq_set_sched(trans, 0); |
340 | * get_cmd_index is broken. */ | ||
341 | if (WARN_ON(!is_power_of_2(slots_num))) | ||
342 | return -EINVAL; | ||
343 | 713 | ||
344 | q->low_mark = q->n_window / 4; | 714 | /* Stop each Tx DMA channel, and wait for it to be idle */ |
345 | if (q->low_mark < 4) | 715 | for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { |
346 | q->low_mark = 4; | 716 | iwl_write_direct32(trans, |
717 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
718 | ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, | ||
719 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); | ||
720 | if (ret < 0) | ||
721 | IWL_ERR(trans, | ||
722 | "Failing on timeout while stopping DMA channel %d [0x%08x]\n", | ||
723 | ch, | ||
724 | iwl_read_direct32(trans, | ||
725 | FH_TSSR_TX_STATUS_REG)); | ||
726 | } | ||
727 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
347 | 728 | ||
348 | q->high_mark = q->n_window / 8; | 729 | if (!trans_pcie->txq) { |
349 | if (q->high_mark < 2) | 730 | IWL_WARN(trans, |
350 | q->high_mark = 2; | 731 | "Stopping tx queues that aren't allocated...\n"); |
732 | return 0; | ||
733 | } | ||
351 | 734 | ||
352 | q->write_ptr = q->read_ptr = 0; | 735 | /* Unmap DMA from host system and free skb's */ |
736 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
737 | txq_id++) | ||
738 | iwl_pcie_txq_unmap(trans, txq_id); | ||
353 | 739 | ||
354 | return 0; | 740 | return 0; |
355 | } | 741 | } |
356 | 742 | ||
357 | static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, | 743 | /* |
358 | struct iwl_tx_queue *txq) | 744 | * iwl_trans_tx_free - Free TXQ Context |
745 | * | ||
746 | * Destroy all TX DMA queues and structures | ||
747 | */ | ||
748 | void iwl_pcie_tx_free(struct iwl_trans *trans) | ||
359 | { | 749 | { |
360 | struct iwl_trans_pcie *trans_pcie = | 750 | int txq_id; |
361 | IWL_TRANS_GET_PCIE_TRANS(trans); | 751 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
362 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; | ||
363 | int txq_id = txq->q.id; | ||
364 | int read_ptr = txq->q.read_ptr; | ||
365 | u8 sta_id = 0; | ||
366 | __le16 bc_ent; | ||
367 | struct iwl_tx_cmd *tx_cmd = | ||
368 | (void *)txq->entries[txq->q.read_ptr].cmd->payload; | ||
369 | 752 | ||
370 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | 753 | /* Tx queues */ |
754 | if (trans_pcie->txq) { | ||
755 | for (txq_id = 0; | ||
756 | txq_id < trans->cfg->base_params->num_of_queues; txq_id++) | ||
757 | iwl_pcie_txq_free(trans, txq_id); | ||
758 | } | ||
371 | 759 | ||
372 | if (txq_id != trans_pcie->cmd_queue) | 760 | kfree(trans_pcie->txq); |
373 | sta_id = tx_cmd->sta_id; | 761 | trans_pcie->txq = NULL; |
374 | 762 | ||
375 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | 763 | iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); |
376 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
377 | 764 | ||
378 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | 765 | iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); |
379 | scd_bc_tbl[txq_id]. | 766 | } |
380 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | 767 | |
768 | /* | ||
769 | * iwl_pcie_tx_alloc - allocate TX context | ||
770 | * Allocate all Tx DMA structures and initialize them | ||
771 | */ | ||
772 | static int iwl_pcie_tx_alloc(struct iwl_trans *trans) | ||
773 | { | ||
774 | int ret; | ||
775 | int txq_id, slots_num; | ||
776 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
777 | |||
778 | u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * | ||
779 | sizeof(struct iwlagn_scd_bc_tbl); | ||
780 | |||
781 | /*It is not allowed to alloc twice, so warn when this happens. | ||
782 | * We cannot rely on the previous allocation, so free and fail */ | ||
783 | if (WARN_ON(trans_pcie->txq)) { | ||
784 | ret = -EINVAL; | ||
785 | goto error; | ||
786 | } | ||
787 | |||
788 | ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, | ||
789 | scd_bc_tbls_size); | ||
790 | if (ret) { | ||
791 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); | ||
792 | goto error; | ||
793 | } | ||
794 | |||
795 | /* Alloc keep-warm buffer */ | ||
796 | ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); | ||
797 | if (ret) { | ||
798 | IWL_ERR(trans, "Keep Warm allocation failed\n"); | ||
799 | goto error; | ||
800 | } | ||
801 | |||
802 | trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, | ||
803 | sizeof(struct iwl_txq), GFP_KERNEL); | ||
804 | if (!trans_pcie->txq) { | ||
805 | IWL_ERR(trans, "Not enough memory for txq\n"); | ||
806 | ret = ENOMEM; | ||
807 | goto error; | ||
808 | } | ||
809 | |||
810 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
811 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
812 | txq_id++) { | ||
813 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
814 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
815 | ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id], | ||
816 | slots_num, txq_id); | ||
817 | if (ret) { | ||
818 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); | ||
819 | goto error; | ||
820 | } | ||
821 | } | ||
822 | |||
823 | return 0; | ||
824 | |||
825 | error: | ||
826 | iwl_pcie_tx_free(trans); | ||
827 | |||
828 | return ret; | ||
829 | } | ||
830 | int iwl_pcie_tx_init(struct iwl_trans *trans) | ||
831 | { | ||
832 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
833 | int ret; | ||
834 | int txq_id, slots_num; | ||
835 | unsigned long flags; | ||
836 | bool alloc = false; | ||
837 | |||
838 | if (!trans_pcie->txq) { | ||
839 | ret = iwl_pcie_tx_alloc(trans); | ||
840 | if (ret) | ||
841 | goto error; | ||
842 | alloc = true; | ||
843 | } | ||
844 | |||
845 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | ||
846 | |||
847 | /* Turn off all Tx DMA fifos */ | ||
848 | iwl_write_prph(trans, SCD_TXFACT, 0); | ||
849 | |||
850 | /* Tell NIC where to find the "keep warm" buffer */ | ||
851 | iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, | ||
852 | trans_pcie->kw.dma >> 4); | ||
853 | |||
854 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | ||
855 | |||
856 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
857 | for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; | ||
858 | txq_id++) { | ||
859 | slots_num = (txq_id == trans_pcie->cmd_queue) ? | ||
860 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
861 | ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id], | ||
862 | slots_num, txq_id); | ||
863 | if (ret) { | ||
864 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); | ||
865 | goto error; | ||
866 | } | ||
867 | } | ||
868 | |||
869 | return 0; | ||
870 | error: | ||
871 | /*Upon error, free only if we allocated something */ | ||
872 | if (alloc) | ||
873 | iwl_pcie_tx_free(trans); | ||
874 | return ret; | ||
875 | } | ||
876 | |||
877 | static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie, | ||
878 | struct iwl_txq *txq) | ||
879 | { | ||
880 | if (!trans_pcie->wd_timeout) | ||
881 | return; | ||
882 | |||
883 | /* | ||
884 | * if empty delete timer, otherwise move timer forward | ||
885 | * since we're making progress on this queue | ||
886 | */ | ||
887 | if (txq->q.read_ptr == txq->q.write_ptr) | ||
888 | del_timer(&txq->stuck_timer); | ||
889 | else | ||
890 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
891 | } | ||
892 | |||
893 | /* Frees buffers until index _not_ inclusive */ | ||
894 | void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, | ||
895 | struct sk_buff_head *skbs) | ||
896 | { | ||
897 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
898 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
899 | /* n_bd is usually 256 => n_bd - 1 = 0xff */ | ||
900 | int tfd_num = ssn & (txq->q.n_bd - 1); | ||
901 | struct iwl_queue *q = &txq->q; | ||
902 | int last_to_free; | ||
903 | |||
904 | /* This function is not meant to release cmd queue*/ | ||
905 | if (WARN_ON(txq_id == trans_pcie->cmd_queue)) | ||
906 | return; | ||
907 | |||
908 | spin_lock(&txq->lock); | ||
909 | |||
910 | if (txq->q.read_ptr == tfd_num) | ||
911 | goto out; | ||
912 | |||
913 | IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", | ||
914 | txq_id, txq->q.read_ptr, tfd_num, ssn); | ||
915 | |||
916 | /*Since we free until index _not_ inclusive, the one before index is | ||
917 | * the last we will free. This one must be used */ | ||
918 | last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd); | ||
919 | |||
920 | if (!iwl_queue_used(q, last_to_free)) { | ||
921 | IWL_ERR(trans, | ||
922 | "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", | ||
923 | __func__, txq_id, last_to_free, q->n_bd, | ||
924 | q->write_ptr, q->read_ptr); | ||
925 | goto out; | ||
926 | } | ||
927 | |||
928 | if (WARN_ON(!skb_queue_empty(skbs))) | ||
929 | goto out; | ||
930 | |||
931 | for (; | ||
932 | q->read_ptr != tfd_num; | ||
933 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
934 | |||
935 | if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) | ||
936 | continue; | ||
937 | |||
938 | __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); | ||
939 | |||
940 | txq->entries[txq->q.read_ptr].skb = NULL; | ||
941 | |||
942 | iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); | ||
943 | |||
944 | iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); | ||
945 | } | ||
946 | |||
947 | iwl_pcie_txq_progress(trans_pcie, txq); | ||
948 | |||
949 | if (iwl_queue_space(&txq->q) > txq->q.low_mark) | ||
950 | iwl_wake_queue(trans, txq); | ||
951 | out: | ||
952 | spin_unlock(&txq->lock); | ||
381 | } | 953 | } |
382 | 954 | ||
383 | static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | 955 | /* |
956 | * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd | ||
957 | * | ||
958 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
959 | * need to be reclaimed. As result, some free space forms. If there is | ||
960 | * enough free space (> low mark), wake the stack that feeds us. | ||
961 | */ | ||
962 | static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) | ||
963 | { | ||
964 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
965 | struct iwl_txq *txq = &trans_pcie->txq[txq_id]; | ||
966 | struct iwl_queue *q = &txq->q; | ||
967 | int nfreed = 0; | ||
968 | |||
969 | lockdep_assert_held(&txq->lock); | ||
970 | |||
971 | if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) { | ||
972 | IWL_ERR(trans, | ||
973 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", | ||
974 | __func__, txq_id, idx, q->n_bd, | ||
975 | q->write_ptr, q->read_ptr); | ||
976 | return; | ||
977 | } | ||
978 | |||
979 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | ||
980 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
981 | |||
982 | if (nfreed++ > 0) { | ||
983 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", | ||
984 | idx, q->write_ptr, q->read_ptr); | ||
985 | iwl_op_mode_nic_error(trans->op_mode); | ||
986 | } | ||
987 | } | ||
988 | |||
989 | iwl_pcie_txq_progress(trans_pcie, txq); | ||
990 | } | ||
991 | |||
992 | static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | ||
384 | u16 txq_id) | 993 | u16 txq_id) |
385 | { | 994 | { |
386 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 995 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -405,7 +1014,8 @@ static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, | |||
405 | return 0; | 1014 | return 0; |
406 | } | 1015 | } |
407 | 1016 | ||
408 | static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) | 1017 | static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans, |
1018 | u16 txq_id) | ||
409 | { | 1019 | { |
410 | /* Simply stop the queue, but don't change any configuration; | 1020 | /* Simply stop the queue, but don't change any configuration; |
411 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | 1021 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ |
@@ -424,7 +1034,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | |||
424 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); | 1034 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); |
425 | 1035 | ||
426 | /* Stop this Tx queue before configuring it */ | 1036 | /* Stop this Tx queue before configuring it */ |
427 | iwl_txq_set_inactive(trans, txq_id); | 1037 | iwl_pcie_txq_set_inactive(trans, txq_id); |
428 | 1038 | ||
429 | /* Set this queue as a chain-building queue unless it is CMD queue */ | 1039 | /* Set this queue as a chain-building queue unless it is CMD queue */ |
430 | if (txq_id != trans_pcie->cmd_queue) | 1040 | if (txq_id != trans_pcie->cmd_queue) |
@@ -435,7 +1045,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, | |||
435 | u16 ra_tid = BUILD_RAxTID(sta_id, tid); | 1045 | u16 ra_tid = BUILD_RAxTID(sta_id, tid); |
436 | 1046 | ||
437 | /* Map receiver-address / traffic-ID to this queue */ | 1047 | /* Map receiver-address / traffic-ID to this queue */ |
438 | iwl_txq_set_ratid_map(trans, ra_tid, txq_id); | 1048 | iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); |
439 | 1049 | ||
440 | /* enable aggregations for the queue */ | 1050 | /* enable aggregations for the queue */ |
441 | iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); | 1051 | iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); |
@@ -489,20 +1099,20 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | |||
489 | return; | 1099 | return; |
490 | } | 1100 | } |
491 | 1101 | ||
492 | iwl_txq_set_inactive(trans, txq_id); | 1102 | iwl_pcie_txq_set_inactive(trans, txq_id); |
493 | 1103 | ||
494 | _iwl_write_targ_mem_dwords(trans, stts_addr, | 1104 | _iwl_write_targ_mem_dwords(trans, stts_addr, |
495 | zero_val, ARRAY_SIZE(zero_val)); | 1105 | zero_val, ARRAY_SIZE(zero_val)); |
496 | 1106 | ||
497 | iwl_tx_queue_unmap(trans, txq_id); | 1107 | iwl_pcie_txq_unmap(trans, txq_id); |
498 | 1108 | ||
499 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); | 1109 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); |
500 | } | 1110 | } |
501 | 1111 | ||
502 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | 1112 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
503 | 1113 | ||
504 | /** | 1114 | /* |
505 | * iwl_enqueue_hcmd - enqueue a uCode command | 1115 | * iwl_pcie_enqueue_hcmd - enqueue a uCode command |
506 | * @priv: device private data point | 1116 | * @priv: device private data point |
507 | * @cmd: a point to the ucode command structure | 1117 | * @cmd: a point to the ucode command structure |
508 | * | 1118 | * |
@@ -510,10 +1120,11 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) | |||
510 | * failed. On success, it turns the index (> 0) of command in the | 1120 | * failed. On success, it turns the index (> 0) of command in the |
511 | * command queue. | 1121 | * command queue. |
512 | */ | 1122 | */ |
513 | static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 1123 | static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, |
1124 | struct iwl_host_cmd *cmd) | ||
514 | { | 1125 | { |
515 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1126 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
516 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | 1127 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
517 | struct iwl_queue *q = &txq->q; | 1128 | struct iwl_queue *q = &txq->q; |
518 | struct iwl_device_cmd *out_cmd; | 1129 | struct iwl_device_cmd *out_cmd; |
519 | struct iwl_cmd_meta *out_meta; | 1130 | struct iwl_cmd_meta *out_meta; |
@@ -576,8 +1187,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
576 | */ | 1187 | */ |
577 | if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, | 1188 | if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, |
578 | "Command %s (%#x) is too large (%d bytes)\n", | 1189 | "Command %s (%#x) is too large (%d bytes)\n", |
579 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), | 1190 | get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) { |
580 | cmd->id, copy_size)) { | ||
581 | idx = -EINVAL; | 1191 | idx = -EINVAL; |
582 | goto free_dup_buf; | 1192 | goto free_dup_buf; |
583 | } | 1193 | } |
@@ -640,7 +1250,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
640 | 1250 | ||
641 | IWL_DEBUG_HC(trans, | 1251 | IWL_DEBUG_HC(trans, |
642 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", | 1252 | "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", |
643 | trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), | 1253 | get_cmd_string(trans_pcie, out_cmd->hdr.cmd), |
644 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | 1254 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), |
645 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); | 1255 | cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); |
646 | 1256 | ||
@@ -654,7 +1264,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
654 | dma_unmap_addr_set(out_meta, mapping, phys_addr); | 1264 | dma_unmap_addr_set(out_meta, mapping, phys_addr); |
655 | dma_unmap_len_set(out_meta, len, copy_size); | 1265 | dma_unmap_len_set(out_meta, len, copy_size); |
656 | 1266 | ||
657 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); | 1267 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1); |
658 | 1268 | ||
659 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | 1269 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { |
660 | const void *data = cmd->data[i]; | 1270 | const void *data = cmd->data[i]; |
@@ -669,15 +1279,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
669 | phys_addr = dma_map_single(trans->dev, (void *)data, | 1279 | phys_addr = dma_map_single(trans->dev, (void *)data, |
670 | cmd->len[i], DMA_BIDIRECTIONAL); | 1280 | cmd->len[i], DMA_BIDIRECTIONAL); |
671 | if (dma_mapping_error(trans->dev, phys_addr)) { | 1281 | if (dma_mapping_error(trans->dev, phys_addr)) { |
672 | iwl_unmap_tfd(trans, out_meta, | 1282 | iwl_pcie_tfd_unmap(trans, out_meta, |
673 | &txq->tfds[q->write_ptr], | 1283 | &txq->tfds[q->write_ptr], |
674 | DMA_BIDIRECTIONAL); | 1284 | DMA_BIDIRECTIONAL); |
675 | idx = -ENOMEM; | 1285 | idx = -ENOMEM; |
676 | goto out; | 1286 | goto out; |
677 | } | 1287 | } |
678 | 1288 | ||
679 | iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, | 1289 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0); |
680 | cmd->len[i], 0); | ||
681 | } | 1290 | } |
682 | 1291 | ||
683 | out_meta->flags = cmd->flags; | 1292 | out_meta->flags = cmd->flags; |
@@ -696,7 +1305,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
696 | 1305 | ||
697 | /* Increment and update queue's write index */ | 1306 | /* Increment and update queue's write index */ |
698 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 1307 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); |
699 | iwl_txq_update_write_ptr(trans, txq); | 1308 | iwl_pcie_txq_inc_wr_ptr(trans, txq); |
700 | 1309 | ||
701 | out: | 1310 | out: |
702 | spin_unlock_bh(&txq->lock); | 1311 | spin_unlock_bh(&txq->lock); |
@@ -706,63 +1315,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
706 | return idx; | 1315 | return idx; |
707 | } | 1316 | } |
708 | 1317 | ||
709 | static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, | 1318 | /* |
710 | struct iwl_tx_queue *txq) | 1319 | * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them |
711 | { | ||
712 | if (!trans_pcie->wd_timeout) | ||
713 | return; | ||
714 | |||
715 | /* | ||
716 | * if empty delete timer, otherwise move timer forward | ||
717 | * since we're making progress on this queue | ||
718 | */ | ||
719 | if (txq->q.read_ptr == txq->q.write_ptr) | ||
720 | del_timer(&txq->stuck_timer); | ||
721 | else | ||
722 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
723 | } | ||
724 | |||
725 | /** | ||
726 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | ||
727 | * | ||
728 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
729 | * need to be reclaimed. As result, some free space forms. If there is | ||
730 | * enough free space (> low mark), wake the stack that feeds us. | ||
731 | */ | ||
732 | static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | ||
733 | int idx) | ||
734 | { | ||
735 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
736 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | ||
737 | struct iwl_queue *q = &txq->q; | ||
738 | int nfreed = 0; | ||
739 | |||
740 | lockdep_assert_held(&txq->lock); | ||
741 | |||
742 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { | ||
743 | IWL_ERR(trans, | ||
744 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", | ||
745 | __func__, txq_id, idx, q->n_bd, | ||
746 | q->write_ptr, q->read_ptr); | ||
747 | return; | ||
748 | } | ||
749 | |||
750 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | ||
751 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
752 | |||
753 | if (nfreed++ > 0) { | ||
754 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", | ||
755 | idx, q->write_ptr, q->read_ptr); | ||
756 | iwl_op_mode_nic_error(trans->op_mode); | ||
757 | } | ||
758 | |||
759 | } | ||
760 | |||
761 | iwl_queue_progress(trans_pcie, txq); | ||
762 | } | ||
763 | |||
764 | /** | ||
765 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
766 | * @rxb: Rx buffer to reclaim | 1320 | * @rxb: Rx buffer to reclaim |
767 | * @handler_status: return value of the handler of the command | 1321 | * @handler_status: return value of the handler of the command |
768 | * (put in setup_rx_handlers) | 1322 | * (put in setup_rx_handlers) |
@@ -771,8 +1325,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, | |||
771 | * will be executed. The attached skb (if present) will only be freed | 1325 | * will be executed. The attached skb (if present) will only be freed |
772 | * if the callback returns 1 | 1326 | * if the callback returns 1 |
773 | */ | 1327 | */ |
774 | void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | 1328 | void iwl_pcie_hcmd_complete(struct iwl_trans *trans, |
775 | int handler_status) | 1329 | struct iwl_rx_cmd_buffer *rxb, int handler_status) |
776 | { | 1330 | { |
777 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | 1331 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
778 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 1332 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
@@ -782,7 +1336,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
782 | struct iwl_device_cmd *cmd; | 1336 | struct iwl_device_cmd *cmd; |
783 | struct iwl_cmd_meta *meta; | 1337 | struct iwl_cmd_meta *meta; |
784 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1338 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
785 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | 1339 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
786 | 1340 | ||
787 | /* If a Tx command is being handled and it isn't in the actual | 1341 | /* If a Tx command is being handled and it isn't in the actual |
788 | * command queue then there a command routing bug has been introduced | 1342 | * command queue then there a command routing bug has been introduced |
@@ -802,7 +1356,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
802 | cmd = txq->entries[cmd_index].cmd; | 1356 | cmd = txq->entries[cmd_index].cmd; |
803 | meta = &txq->entries[cmd_index].meta; | 1357 | meta = &txq->entries[cmd_index].meta; |
804 | 1358 | ||
805 | iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); | 1359 | iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); |
806 | 1360 | ||
807 | /* Input error checking is done when commands are added to queue. */ | 1361 | /* Input error checking is done when commands are added to queue. */ |
808 | if (meta->flags & CMD_WANT_SKB) { | 1362 | if (meta->flags & CMD_WANT_SKB) { |
@@ -814,19 +1368,17 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
814 | meta->source->handler_status = handler_status; | 1368 | meta->source->handler_status = handler_status; |
815 | } | 1369 | } |
816 | 1370 | ||
817 | iwl_hcmd_queue_reclaim(trans, txq_id, index); | 1371 | iwl_pcie_cmdq_reclaim(trans, txq_id, index); |
818 | 1372 | ||
819 | if (!(meta->flags & CMD_ASYNC)) { | 1373 | if (!(meta->flags & CMD_ASYNC)) { |
820 | if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | 1374 | if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { |
821 | IWL_WARN(trans, | 1375 | IWL_WARN(trans, |
822 | "HCMD_ACTIVE already clear for command %s\n", | 1376 | "HCMD_ACTIVE already clear for command %s\n", |
823 | trans_pcie_get_cmd_string(trans_pcie, | 1377 | get_cmd_string(trans_pcie, cmd->hdr.cmd)); |
824 | cmd->hdr.cmd)); | ||
825 | } | 1378 | } |
826 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 1379 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
827 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", | 1380 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", |
828 | trans_pcie_get_cmd_string(trans_pcie, | 1381 | get_cmd_string(trans_pcie, cmd->hdr.cmd)); |
829 | cmd->hdr.cmd)); | ||
830 | wake_up(&trans_pcie->wait_command_queue); | 1382 | wake_up(&trans_pcie->wait_command_queue); |
831 | } | 1383 | } |
832 | 1384 | ||
@@ -837,7 +1389,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, | |||
837 | 1389 | ||
838 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) | 1390 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) |
839 | 1391 | ||
840 | static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 1392 | static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, |
1393 | struct iwl_host_cmd *cmd) | ||
841 | { | 1394 | { |
842 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1395 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
843 | int ret; | 1396 | int ret; |
@@ -846,43 +1399,43 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
846 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) | 1399 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) |
847 | return -EINVAL; | 1400 | return -EINVAL; |
848 | 1401 | ||
849 | 1402 | ret = iwl_pcie_enqueue_hcmd(trans, cmd); | |
850 | ret = iwl_enqueue_hcmd(trans, cmd); | ||
851 | if (ret < 0) { | 1403 | if (ret < 0) { |
852 | IWL_ERR(trans, | 1404 | IWL_ERR(trans, |
853 | "Error sending %s: enqueue_hcmd failed: %d\n", | 1405 | "Error sending %s: enqueue_hcmd failed: %d\n", |
854 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | 1406 | get_cmd_string(trans_pcie, cmd->id), ret); |
855 | return ret; | 1407 | return ret; |
856 | } | 1408 | } |
857 | return 0; | 1409 | return 0; |
858 | } | 1410 | } |
859 | 1411 | ||
860 | static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 1412 | static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, |
1413 | struct iwl_host_cmd *cmd) | ||
861 | { | 1414 | { |
862 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1415 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
863 | int cmd_idx; | 1416 | int cmd_idx; |
864 | int ret; | 1417 | int ret; |
865 | 1418 | ||
866 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", | 1419 | IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", |
867 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1420 | get_cmd_string(trans_pcie, cmd->id)); |
868 | 1421 | ||
869 | if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, | 1422 | if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, |
870 | &trans_pcie->status))) { | 1423 | &trans_pcie->status))) { |
871 | IWL_ERR(trans, "Command %s: a command is already active!\n", | 1424 | IWL_ERR(trans, "Command %s: a command is already active!\n", |
872 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1425 | get_cmd_string(trans_pcie, cmd->id)); |
873 | return -EIO; | 1426 | return -EIO; |
874 | } | 1427 | } |
875 | 1428 | ||
876 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", | 1429 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", |
877 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1430 | get_cmd_string(trans_pcie, cmd->id)); |
878 | 1431 | ||
879 | cmd_idx = iwl_enqueue_hcmd(trans, cmd); | 1432 | cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); |
880 | if (cmd_idx < 0) { | 1433 | if (cmd_idx < 0) { |
881 | ret = cmd_idx; | 1434 | ret = cmd_idx; |
882 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 1435 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
883 | IWL_ERR(trans, | 1436 | IWL_ERR(trans, |
884 | "Error sending %s: enqueue_hcmd failed: %d\n", | 1437 | "Error sending %s: enqueue_hcmd failed: %d\n", |
885 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); | 1438 | get_cmd_string(trans_pcie, cmd->id), ret); |
886 | return ret; | 1439 | return ret; |
887 | } | 1440 | } |
888 | 1441 | ||
@@ -892,13 +1445,13 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
892 | HOST_COMPLETE_TIMEOUT); | 1445 | HOST_COMPLETE_TIMEOUT); |
893 | if (!ret) { | 1446 | if (!ret) { |
894 | if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { | 1447 | if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { |
895 | struct iwl_tx_queue *txq = | 1448 | struct iwl_txq *txq = |
896 | &trans_pcie->txq[trans_pcie->cmd_queue]; | 1449 | &trans_pcie->txq[trans_pcie->cmd_queue]; |
897 | struct iwl_queue *q = &txq->q; | 1450 | struct iwl_queue *q = &txq->q; |
898 | 1451 | ||
899 | IWL_ERR(trans, | 1452 | IWL_ERR(trans, |
900 | "Error sending %s: time out after %dms.\n", | 1453 | "Error sending %s: time out after %dms.\n", |
901 | trans_pcie_get_cmd_string(trans_pcie, cmd->id), | 1454 | get_cmd_string(trans_pcie, cmd->id), |
902 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | 1455 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); |
903 | 1456 | ||
904 | IWL_ERR(trans, | 1457 | IWL_ERR(trans, |
@@ -908,8 +1461,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
908 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 1461 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
909 | IWL_DEBUG_INFO(trans, | 1462 | IWL_DEBUG_INFO(trans, |
910 | "Clearing HCMD_ACTIVE for command %s\n", | 1463 | "Clearing HCMD_ACTIVE for command %s\n", |
911 | trans_pcie_get_cmd_string(trans_pcie, | 1464 | get_cmd_string(trans_pcie, cmd->id)); |
912 | cmd->id)); | ||
913 | ret = -ETIMEDOUT; | 1465 | ret = -ETIMEDOUT; |
914 | goto cancel; | 1466 | goto cancel; |
915 | } | 1467 | } |
@@ -917,7 +1469,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
917 | 1469 | ||
918 | if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { | 1470 | if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { |
919 | IWL_ERR(trans, "FW error in SYNC CMD %s\n", | 1471 | IWL_ERR(trans, "FW error in SYNC CMD %s\n", |
920 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1472 | get_cmd_string(trans_pcie, cmd->id)); |
921 | ret = -EIO; | 1473 | ret = -EIO; |
922 | goto cancel; | 1474 | goto cancel; |
923 | } | 1475 | } |
@@ -930,7 +1482,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
930 | 1482 | ||
931 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { | 1483 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { |
932 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", | 1484 | IWL_ERR(trans, "Error: Response NULL in '%s'\n", |
933 | trans_pcie_get_cmd_string(trans_pcie, cmd->id)); | 1485 | get_cmd_string(trans_pcie, cmd->id)); |
934 | ret = -EIO; | 1486 | ret = -EIO; |
935 | goto cancel; | 1487 | goto cancel; |
936 | } | 1488 | } |
@@ -957,7 +1509,7 @@ cancel: | |||
957 | return ret; | 1509 | return ret; |
958 | } | 1510 | } |
959 | 1511 | ||
960 | int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | 1512 | int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) |
961 | { | 1513 | { |
962 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1514 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
963 | 1515 | ||
@@ -968,62 +1520,172 @@ int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) | |||
968 | return -ERFKILL; | 1520 | return -ERFKILL; |
969 | 1521 | ||
970 | if (cmd->flags & CMD_ASYNC) | 1522 | if (cmd->flags & CMD_ASYNC) |
971 | return iwl_send_cmd_async(trans, cmd); | 1523 | return iwl_pcie_send_hcmd_async(trans, cmd); |
972 | 1524 | ||
973 | /* We still can fail on RFKILL that can be asserted while we wait */ | 1525 | /* We still can fail on RFKILL that can be asserted while we wait */ |
974 | return iwl_send_cmd_sync(trans, cmd); | 1526 | return iwl_pcie_send_hcmd_sync(trans, cmd); |
975 | } | 1527 | } |
976 | 1528 | ||
977 | /* Frees buffers until index _not_ inclusive */ | 1529 | int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
978 | int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, | 1530 | struct iwl_device_cmd *dev_cmd, int txq_id) |
979 | struct sk_buff_head *skbs) | ||
980 | { | 1531 | { |
981 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1532 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
982 | struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; | 1533 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
983 | struct iwl_queue *q = &txq->q; | 1534 | struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; |
984 | int last_to_free; | 1535 | struct iwl_cmd_meta *out_meta; |
985 | int freed = 0; | 1536 | struct iwl_txq *txq; |
1537 | struct iwl_queue *q; | ||
1538 | dma_addr_t phys_addr = 0; | ||
1539 | dma_addr_t txcmd_phys; | ||
1540 | dma_addr_t scratch_phys; | ||
1541 | u16 len, firstlen, secondlen; | ||
1542 | u8 wait_write_ptr = 0; | ||
1543 | __le16 fc = hdr->frame_control; | ||
1544 | u8 hdr_len = ieee80211_hdrlen(fc); | ||
1545 | u16 __maybe_unused wifi_seq; | ||
1546 | |||
1547 | txq = &trans_pcie->txq[txq_id]; | ||
1548 | q = &txq->q; | ||
986 | 1549 | ||
987 | /* This function is not meant to release cmd queue*/ | 1550 | if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { |
988 | if (WARN_ON(txq_id == trans_pcie->cmd_queue)) | 1551 | WARN_ON_ONCE(1); |
989 | return 0; | 1552 | return -EINVAL; |
1553 | } | ||
990 | 1554 | ||
991 | lockdep_assert_held(&txq->lock); | 1555 | spin_lock(&txq->lock); |
992 | 1556 | ||
993 | /*Since we free until index _not_ inclusive, the one before index is | 1557 | /* In AGG mode, the index in the ring must correspond to the WiFi |
994 | * the last we will free. This one must be used */ | 1558 | * sequence number. This is a HW requirements to help the SCD to parse |
995 | last_to_free = iwl_queue_dec_wrap(index, q->n_bd); | 1559 | * the BA. |
1560 | * Check here that the packets are in the right place on the ring. | ||
1561 | */ | ||
1562 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1563 | wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | ||
1564 | WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && | ||
1565 | ((wifi_seq & 0xff) != q->write_ptr), | ||
1566 | "Q: %d WiFi Seq %d tfdNum %d", | ||
1567 | txq_id, wifi_seq, q->write_ptr); | ||
1568 | #endif | ||
1569 | |||
1570 | /* Set up driver data for this TFD */ | ||
1571 | txq->entries[q->write_ptr].skb = skb; | ||
1572 | txq->entries[q->write_ptr].cmd = dev_cmd; | ||
1573 | |||
1574 | dev_cmd->hdr.cmd = REPLY_TX; | ||
1575 | dev_cmd->hdr.sequence = | ||
1576 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
1577 | INDEX_TO_SEQ(q->write_ptr))); | ||
1578 | |||
1579 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
1580 | out_meta = &txq->entries[q->write_ptr].meta; | ||
996 | 1581 | ||
997 | if ((index >= q->n_bd) || | 1582 | /* |
998 | (iwl_queue_used(q, last_to_free) == 0)) { | 1583 | * Use the first empty entry in this queue's command buffer array |
999 | IWL_ERR(trans, | 1584 | * to contain the Tx command and MAC header concatenated together |
1000 | "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", | 1585 | * (payload data will be in another buffer). |
1001 | __func__, txq_id, last_to_free, q->n_bd, | 1586 | * Size of this varies, due to varying MAC header length. |
1002 | q->write_ptr, q->read_ptr); | 1587 | * If end is not dword aligned, we'll have 2 extra bytes at the end |
1003 | return 0; | 1588 | * of the MAC header (device reads on dword boundaries). |
1589 | * We'll tell device about this padding later. | ||
1590 | */ | ||
1591 | len = sizeof(struct iwl_tx_cmd) + | ||
1592 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
1593 | firstlen = (len + 3) & ~3; | ||
1594 | |||
1595 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
1596 | if (firstlen != len) | ||
1597 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
1598 | |||
1599 | /* Physical address of this Tx command's header (not MAC header!), | ||
1600 | * within command buffer array. */ | ||
1601 | txcmd_phys = dma_map_single(trans->dev, | ||
1602 | &dev_cmd->hdr, firstlen, | ||
1603 | DMA_BIDIRECTIONAL); | ||
1604 | if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) | ||
1605 | goto out_err; | ||
1606 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
1607 | dma_unmap_len_set(out_meta, len, firstlen); | ||
1608 | |||
1609 | if (!ieee80211_has_morefrags(fc)) { | ||
1610 | txq->need_update = 1; | ||
1611 | } else { | ||
1612 | wait_write_ptr = 1; | ||
1613 | txq->need_update = 0; | ||
1004 | } | 1614 | } |
1005 | 1615 | ||
1006 | if (WARN_ON(!skb_queue_empty(skbs))) | 1616 | /* Set up TFD's 2nd entry to point directly to remainder of skb, |
1007 | return 0; | 1617 | * if any (802.11 null frames have no payload). */ |
1618 | secondlen = skb->len - hdr_len; | ||
1619 | if (secondlen > 0) { | ||
1620 | phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, | ||
1621 | secondlen, DMA_TO_DEVICE); | ||
1622 | if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { | ||
1623 | dma_unmap_single(trans->dev, | ||
1624 | dma_unmap_addr(out_meta, mapping), | ||
1625 | dma_unmap_len(out_meta, len), | ||
1626 | DMA_BIDIRECTIONAL); | ||
1627 | goto out_err; | ||
1628 | } | ||
1629 | } | ||
1008 | 1630 | ||
1009 | for (; | 1631 | /* Attach buffers to TFD */ |
1010 | q->read_ptr != index; | 1632 | iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1); |
1011 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1633 | if (secondlen > 0) |
1634 | iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0); | ||
1012 | 1635 | ||
1013 | if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) | 1636 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + |
1014 | continue; | 1637 | offsetof(struct iwl_tx_cmd, scratch); |
1015 | 1638 | ||
1016 | __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); | 1639 | /* take back ownership of DMA buffer to enable update */ |
1640 | dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, | ||
1641 | DMA_BIDIRECTIONAL); | ||
1642 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
1643 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
1017 | 1644 | ||
1018 | txq->entries[txq->q.read_ptr].skb = NULL; | 1645 | IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", |
1646 | le16_to_cpu(dev_cmd->hdr.sequence)); | ||
1647 | IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
1019 | 1648 | ||
1020 | iwlagn_txq_inval_byte_cnt_tbl(trans, txq); | 1649 | /* Set up entry for this TFD in Tx byte-count array */ |
1650 | iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); | ||
1021 | 1651 | ||
1022 | iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE); | 1652 | dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, |
1023 | freed++; | 1653 | DMA_BIDIRECTIONAL); |
1024 | } | 1654 | |
1655 | trace_iwlwifi_dev_tx(trans->dev, skb, | ||
1656 | &txq->tfds[txq->q.write_ptr], | ||
1657 | sizeof(struct iwl_tfd), | ||
1658 | &dev_cmd->hdr, firstlen, | ||
1659 | skb->data + hdr_len, secondlen); | ||
1660 | trace_iwlwifi_dev_tx_data(trans->dev, skb, | ||
1661 | skb->data + hdr_len, secondlen); | ||
1662 | |||
1663 | /* start timer if queue currently empty */ | ||
1664 | if (txq->need_update && q->read_ptr == q->write_ptr && | ||
1665 | trans_pcie->wd_timeout) | ||
1666 | mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); | ||
1025 | 1667 | ||
1026 | iwl_queue_progress(trans_pcie, txq); | 1668 | /* Tell device the write index *just past* this latest filled TFD */ |
1669 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
1670 | iwl_pcie_txq_inc_wr_ptr(trans, txq); | ||
1027 | 1671 | ||
1028 | return freed; | 1672 | /* |
1673 | * At this point the frame is "transmitted" successfully | ||
1674 | * and we will get a TX status notification eventually, | ||
1675 | * regardless of the value of ret. "ret" only indicates | ||
1676 | * whether or not we should update the write pointer. | ||
1677 | */ | ||
1678 | if (iwl_queue_space(q) < q->high_mark) { | ||
1679 | if (wait_write_ptr) { | ||
1680 | txq->need_update = 1; | ||
1681 | iwl_pcie_txq_inc_wr_ptr(trans, txq); | ||
1682 | } else { | ||
1683 | iwl_stop_queue(trans, txq); | ||
1684 | } | ||
1685 | } | ||
1686 | spin_unlock(&txq->lock); | ||
1687 | return 0; | ||
1688 | out_err: | ||
1689 | spin_unlock(&txq->lock); | ||
1690 | return -1; | ||
1029 | } | 1691 | } |