aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/pcie
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h88
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c148
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c116
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c108
4 files changed, 218 insertions, 242 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index ebf3aa0fedf2..d058ddaebd93 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -73,7 +73,7 @@ struct isr_statistics {
73}; 73};
74 74
75/** 75/**
76 * struct iwl_rx_queue - Rx queue 76 * struct iwl_rxq - Rx queue
77 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 77 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
78 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 78 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
79 * @pool: 79 * @pool:
@@ -91,7 +91,7 @@ struct isr_statistics {
91 * 91 *
92 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 92 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
93 */ 93 */
94struct iwl_rx_queue { 94struct iwl_rxq {
95 __le32 *bd; 95 __le32 *bd;
96 dma_addr_t bd_dma; 96 dma_addr_t bd_dma;
97 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 97 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -157,8 +157,8 @@ struct iwl_cmd_meta {
157 * 32 since we don't need so many commands pending. Since the HW 157 * 32 since we don't need so many commands pending. Since the HW
158 * still uses 256 BDs for DMA though, n_bd stays 256. As a result, 158 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
159 * the software buffers (in the variables @meta, @txb in struct 159 * the software buffers (in the variables @meta, @txb in struct
160 * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds 160 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
161 * in the same struct) have 256. 161 * the same struct) have 256.
162 * This means that we end up with the following: 162 * This means that we end up with the following:
163 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 163 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
164 * SW entries: | 0 | ... | 31 | 164 * SW entries: | 0 | ... | 31 |
@@ -182,7 +182,7 @@ struct iwl_queue {
182#define TFD_TX_CMD_SLOTS 256 182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32 183#define TFD_CMD_SLOTS 32
184 184
185struct iwl_pcie_tx_queue_entry { 185struct iwl_pcie_txq_entry {
186 struct iwl_device_cmd *cmd; 186 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd; 187 struct iwl_device_cmd *copy_cmd;
188 struct sk_buff *skb; 188 struct sk_buff *skb;
@@ -192,7 +192,7 @@ struct iwl_pcie_tx_queue_entry {
192}; 192};
193 193
194/** 194/**
195 * struct iwl_tx_queue - Tx Queue for DMA 195 * struct iwl_txq - Tx Queue for DMA
196 * @q: generic Rx/Tx queue descriptor 196 * @q: generic Rx/Tx queue descriptor
197 * @tfds: transmit frame descriptors (DMA memory) 197 * @tfds: transmit frame descriptors (DMA memory)
198 * @entries: transmit entries (driver state) 198 * @entries: transmit entries (driver state)
@@ -205,10 +205,10 @@ struct iwl_pcie_tx_queue_entry {
205 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 205 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
206 * descriptors) and required locking structures. 206 * descriptors) and required locking structures.
207 */ 207 */
208struct iwl_tx_queue { 208struct iwl_txq {
209 struct iwl_queue q; 209 struct iwl_queue q;
210 struct iwl_tfd *tfds; 210 struct iwl_tfd *tfds;
211 struct iwl_pcie_tx_queue_entry *entries; 211 struct iwl_pcie_txq_entry *entries;
212 spinlock_t lock; 212 spinlock_t lock;
213 struct timer_list stuck_timer; 213 struct timer_list stuck_timer;
214 struct iwl_trans_pcie *trans_pcie; 214 struct iwl_trans_pcie *trans_pcie;
@@ -238,7 +238,7 @@ struct iwl_tx_queue {
238 * @wd_timeout: queue watchdog timeout (jiffies) 238 * @wd_timeout: queue watchdog timeout (jiffies)
239 */ 239 */
240struct iwl_trans_pcie { 240struct iwl_trans_pcie {
241 struct iwl_rx_queue rxq; 241 struct iwl_rxq rxq;
242 struct work_struct rx_replenish; 242 struct work_struct rx_replenish;
243 struct iwl_trans *trans; 243 struct iwl_trans *trans;
244 struct iwl_drv *drv; 244 struct iwl_drv *drv;
@@ -260,7 +260,7 @@ struct iwl_trans_pcie {
260 struct iwl_dma_ptr scd_bc_tbls; 260 struct iwl_dma_ptr scd_bc_tbls;
261 struct iwl_dma_ptr kw; 261 struct iwl_dma_ptr kw;
262 262
263 struct iwl_tx_queue *txq; 263 struct iwl_txq *txq;
264 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 264 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
265 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 265 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
266 266
@@ -323,51 +323,47 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
323/***************************************************** 323/*****************************************************
324* RX 324* RX
325******************************************************/ 325******************************************************/
326void iwl_bg_rx_replenish(struct work_struct *data); 326void iwl_pcie_rx_replenish_work(struct work_struct *data);
327void iwl_irq_tasklet(struct iwl_trans *trans); 327void iwl_pcie_rx_replenish(struct iwl_trans *trans);
328void iwl_rx_replenish(struct iwl_trans *trans); 328void iwl_pcie_tasklet(struct iwl_trans *trans);
329void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 329void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q);
330 struct iwl_rx_queue *q);
331 330
332/***************************************************** 331/*****************************************************
333* ICT 332* ICT - interrupt handling
334******************************************************/ 333******************************************************/
335void iwl_reset_ict(struct iwl_trans *trans); 334irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
336void iwl_disable_ict(struct iwl_trans *trans); 335int iwl_pcie_alloc_ict(struct iwl_trans *trans);
337int iwl_alloc_isr_ict(struct iwl_trans *trans); 336void iwl_pcie_free_ict(struct iwl_trans *trans);
338void iwl_free_isr_ict(struct iwl_trans *trans); 337void iwl_pcie_reset_ict(struct iwl_trans *trans);
339irqreturn_t iwl_isr_ict(int irq, void *data); 338void iwl_pcie_disable_ict(struct iwl_trans *trans);
340 339
341/***************************************************** 340/*****************************************************
342* TX / HCMD 341* TX / HCMD
343******************************************************/ 342******************************************************/
344void iwl_txq_update_write_ptr(struct iwl_trans *trans, 343void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
345 struct iwl_tx_queue *txq); 344int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
346int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, 345 dma_addr_t addr, u16 len, u8 reset);
347 struct iwl_tx_queue *txq, 346int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
348 dma_addr_t addr, u16 len, u8 reset); 347void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
349int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id); 348 struct iwl_rx_cmd_buffer *rxb, int handler_status);
350int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 349void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
351void iwl_tx_cmd_complete(struct iwl_trans *trans, 350 struct iwl_txq *txq, u16 byte_cnt);
352 struct iwl_rx_cmd_buffer *rxb, int handler_status); 351void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
353void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 352 int sta_id, int tid, int frame_limit, u16 ssn);
354 struct iwl_tx_queue *txq, 353void iwl_pcie_txq_disable(struct iwl_trans *trans, int queue);
355 u16 byte_cnt); 354void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
356void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, 355 enum dma_data_direction dma_dir);
357 int sta_id, int tid, int frame_limit, u16 ssn); 356int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index,
358void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
359void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
360 enum dma_data_direction dma_dir);
361int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
362 struct sk_buff_head *skbs); 357 struct sk_buff_head *skbs);
363void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id); 358void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id);
359int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
364int iwl_queue_space(const struct iwl_queue *q); 360int iwl_queue_space(const struct iwl_queue *q);
365 361
366/***************************************************** 362/*****************************************************
367* Error handling 363* Error handling
368******************************************************/ 364******************************************************/
369int iwl_dump_fh(struct iwl_trans *trans, char **buf); 365int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
370void iwl_dump_csr(struct iwl_trans *trans); 366void iwl_pcie_dump_csr(struct iwl_trans *trans);
371 367
372/***************************************************** 368/*****************************************************
373* Helpers 369* Helpers
@@ -403,7 +399,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
403} 399}
404 400
405static inline void iwl_wake_queue(struct iwl_trans *trans, 401static inline void iwl_wake_queue(struct iwl_trans *trans,
406 struct iwl_tx_queue *txq) 402 struct iwl_txq *txq)
407{ 403{
408 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 404 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
409 405
@@ -414,7 +410,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
414} 410}
415 411
416static inline void iwl_stop_queue(struct iwl_trans *trans, 412static inline void iwl_stop_queue(struct iwl_trans *trans,
417 struct iwl_tx_queue *txq) 413 struct iwl_txq *txq)
418{ 414{
419 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 415 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
420 416
@@ -438,8 +434,8 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
438 return index & (q->n_window - 1); 434 return index & (q->n_window - 1);
439} 435}
440 436
441static inline const char * 437static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
442trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd) 438 u8 cmd)
443{ 439{
444 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) 440 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
445 return "UNKNOWN"; 441 return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 11a93eddc84f..087d022bc93a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -76,7 +76,7 @@
76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
78 * to replenish the iwl->rxq->rx_free. 78 * to replenish the iwl->rxq->rx_free.
79 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the 79 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80 * iwl->rxq is replenished and the READ INDEX is updated (updating the 80 * iwl->rxq is replenished and the READ INDEX is updated (updating the
81 * 'processed' and 'read' driver indexes as well) 81 * 'processed' and 'read' driver indexes as well)
82 * + A received packet is processed and handed to the kernel network stack, 82 * + A received packet is processed and handed to the kernel network stack,
@@ -89,28 +89,28 @@
89 * 89 *
90 * Driver sequence: 90 * Driver sequence:
91 * 91 *
92 * iwl_rx_queue_alloc() Allocates rx_free 92 * iwl_rxq_alloc() Allocates rx_free
93 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls 93 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
94 * iwl_rx_queue_restock 94 * iwl_pcie_rxq_restock
95 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx 95 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
96 * queue, updates firmware pointers, and updates 96 * queue, updates firmware pointers, and updates
97 * the WRITE index. If insufficient rx_free buffers 97 * the WRITE index. If insufficient rx_free buffers
98 * are available, schedules iwl_rx_replenish 98 * are available, schedules iwl_pcie_rx_replenish
99 * 99 *
100 * -- enable interrupts -- 100 * -- enable interrupts --
101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
102 * READ INDEX, detaching the SKB from the pool. 102 * READ INDEX, detaching the SKB from the pool.
103 * Moves the packet buffer from queue to rx_used. 103 * Moves the packet buffer from queue to rx_used.
104 * Calls iwl_rx_queue_restock to refill any empty 104 * Calls iwl_pcie_rxq_restock to refill any empty
105 * slots. 105 * slots.
106 * ... 106 * ...
107 * 107 *
108 */ 108 */
109 109
110/** 110/*
111 * iwl_rx_queue_space - Return number of free slots available in queue. 111 * iwl_rxq_space - Return number of free slots available in queue.
112 */ 112 */
113static int iwl_rx_queue_space(const struct iwl_rx_queue *q) 113static int iwl_rxq_space(const struct iwl_rxq *q)
114{ 114{
115 int s = q->read - q->write; 115 int s = q->read - q->write;
116 if (s <= 0) 116 if (s <= 0)
@@ -122,11 +122,10 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
122 return s; 122 return s;
123} 123}
124 124
125/** 125/*
126 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 126 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
127 */ 127 */
128void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 128void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
129 struct iwl_rx_queue *q)
130{ 129{
131 unsigned long flags; 130 unsigned long flags;
132 u32 reg; 131 u32 reg;
@@ -176,7 +175,7 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
176 spin_unlock_irqrestore(&q->lock, flags); 175 spin_unlock_irqrestore(&q->lock, flags);
177} 176}
178 177
179/** 178/*
180 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 179 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
181 */ 180 */
182static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) 181static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
@@ -184,8 +183,8 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
184 return cpu_to_le32((u32)(dma_addr >> 8)); 183 return cpu_to_le32((u32)(dma_addr >> 8));
185} 184}
186 185
187/** 186/*
188 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool 187 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
189 * 188 *
190 * If there are slots in the RX queue that need to be restocked, 189 * If there are slots in the RX queue that need to be restocked,
191 * and we have free pre-allocated buffers, fill the ranks as much 190 * and we have free pre-allocated buffers, fill the ranks as much
@@ -195,10 +194,10 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
195 * also updates the memory address in the firmware to reference the new 194 * also updates the memory address in the firmware to reference the new
196 * target buffer. 195 * target buffer.
197 */ 196 */
198static void iwl_rx_queue_restock(struct iwl_trans *trans) 197static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
199{ 198{
200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 199 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
201 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 200 struct iwl_rxq *rxq = &trans_pcie->rxq;
202 struct iwl_rx_mem_buffer *rxb; 201 struct iwl_rx_mem_buffer *rxb;
203 unsigned long flags; 202 unsigned long flags;
204 203
@@ -214,7 +213,7 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
214 return; 213 return;
215 214
216 spin_lock_irqsave(&rxq->lock, flags); 215 spin_lock_irqsave(&rxq->lock, flags);
217 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 216 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
218 /* The overwritten rxb must be a used one */ 217 /* The overwritten rxb must be a used one */
219 rxb = rxq->queue[rxq->write]; 218 rxb = rxq->queue[rxq->write];
220 BUG_ON(rxb && rxb->page); 219 BUG_ON(rxb && rxb->page);
@@ -242,23 +241,23 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
242 spin_lock_irqsave(&rxq->lock, flags); 241 spin_lock_irqsave(&rxq->lock, flags);
243 rxq->need_update = 1; 242 rxq->need_update = 1;
244 spin_unlock_irqrestore(&rxq->lock, flags); 243 spin_unlock_irqrestore(&rxq->lock, flags);
245 iwl_rx_queue_update_write_ptr(trans, rxq); 244 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
246 } 245 }
247} 246}
248 247
249/* 248/*
250 * iwl_rx_allocate - allocate a page for each used RBD 249 * iwl_pcie_rx_allocate - allocate a page for each used RBD
251 * 250 *
252 * A used RBD is an Rx buffer that has been given to the stack. To use it again 251 * A used RBD is an Rx buffer that has been given to the stack. To use it again
253 * a page must be allocated and the RBD must point to the page. This function 252 * a page must be allocated and the RBD must point to the page. This function
254 * doesn't change the HW pointer but handles the list of pages that is used by 253 * doesn't change the HW pointer but handles the list of pages that is used by
255 * iwl_rx_queue_restock. The latter function will update the HW to use the newly 254 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
256 * allocated buffers. 255 * allocated buffers.
257 */ 256 */
258static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) 257static void iwl_pcie_rx_allocate(struct iwl_trans *trans, gfp_t priority)
259{ 258{
260 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 259 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
261 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 260 struct iwl_rxq *rxq = &trans_pcie->rxq;
262 struct iwl_rx_mem_buffer *rxb; 261 struct iwl_rx_mem_buffer *rxb;
263 struct page *page; 262 struct page *page;
264 unsigned long flags; 263 unsigned long flags;
@@ -333,46 +332,46 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
333} 332}
334 333
335/* 334/*
336 * iwl_rx_replenish - Move all used buffers from rx_used to rx_free 335 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
337 * 336 *
338 * When moving to rx_free an page is allocated for the slot. 337 * When moving to rx_free an page is allocated for the slot.
339 * 338 *
340 * Also restock the Rx queue via iwl_rx_queue_restock. 339 * Also restock the Rx queue via iwl_pcie_rxq_restock.
341 * This is called as a scheduled work item (except for during initialization) 340 * This is called as a scheduled work item (except for during initialization)
342 */ 341 */
343void iwl_rx_replenish(struct iwl_trans *trans) 342void iwl_pcie_rx_replenish(struct iwl_trans *trans)
344{ 343{
345 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 344 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
346 unsigned long flags; 345 unsigned long flags;
347 346
348 iwl_rx_allocate(trans, GFP_KERNEL); 347 iwl_pcie_rx_allocate(trans, GFP_KERNEL);
349 348
350 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 349 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
351 iwl_rx_queue_restock(trans); 350 iwl_pcie_rxq_restock(trans);
352 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 351 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
353} 352}
354 353
355static void iwl_rx_replenish_now(struct iwl_trans *trans) 354static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
356{ 355{
357 iwl_rx_allocate(trans, GFP_ATOMIC); 356 iwl_pcie_rx_allocate(trans, GFP_ATOMIC);
358 357
359 iwl_rx_queue_restock(trans); 358 iwl_pcie_rxq_restock(trans);
360} 359}
361 360
362void iwl_bg_rx_replenish(struct work_struct *data) 361void iwl_pcie_rx_replenish_work(struct work_struct *data)
363{ 362{
364 struct iwl_trans_pcie *trans_pcie = 363 struct iwl_trans_pcie *trans_pcie =
365 container_of(data, struct iwl_trans_pcie, rx_replenish); 364 container_of(data, struct iwl_trans_pcie, rx_replenish);
366 365
367 iwl_rx_replenish(trans_pcie->trans); 366 iwl_pcie_rx_replenish(trans_pcie->trans);
368} 367}
369 368
370static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, 369static void iwl_pcie_rx_handle_rxbuf(struct iwl_trans *trans,
371 struct iwl_rx_mem_buffer *rxb) 370 struct iwl_rx_mem_buffer *rxb)
372{ 371{
373 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 372 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
374 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 373 struct iwl_rxq *rxq = &trans_pcie->rxq;
375 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 374 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
376 unsigned long flags; 375 unsigned long flags;
377 bool page_stolen = false; 376 bool page_stolen = false;
378 int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 377 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
@@ -402,8 +401,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
402 break; 401 break;
403 402
404 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", 403 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
405 rxcb._offset, 404 rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
406 trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
407 pkt->hdr.cmd); 405 pkt->hdr.cmd);
408 406
409 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 407 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
@@ -435,7 +433,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
435 cmd_index = get_cmd_index(&txq->q, index); 433 cmd_index = get_cmd_index(&txq->q, index);
436 434
437 if (reclaim) { 435 if (reclaim) {
438 struct iwl_pcie_tx_queue_entry *ent; 436 struct iwl_pcie_txq_entry *ent;
439 ent = &txq->entries[cmd_index]; 437 ent = &txq->entries[cmd_index];
440 cmd = ent->copy_cmd; 438 cmd = ent->copy_cmd;
441 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); 439 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
@@ -465,7 +463,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
465 * iwl_trans_send_cmd() 463 * iwl_trans_send_cmd()
466 * as we reclaim the driver command queue */ 464 * as we reclaim the driver command queue */
467 if (!rxcb._page_stolen) 465 if (!rxcb._page_stolen)
468 iwl_tx_cmd_complete(trans, &rxcb, err); 466 iwl_pcie_hcmd_complete(trans, &rxcb, err);
469 else 467 else
470 IWL_WARN(trans, "Claim null rxb?\n"); 468 IWL_WARN(trans, "Claim null rxb?\n");
471 } 469 }
@@ -496,17 +494,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
496 spin_unlock_irqrestore(&rxq->lock, flags); 494 spin_unlock_irqrestore(&rxq->lock, flags);
497} 495}
498 496
499/** 497/*
500 * iwl_rx_handle - Main entry function for receiving responses from uCode 498 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
501 *
502 * Uses the priv->rx_handlers callback function array to invoke
503 * the appropriate handlers, including command responses,
504 * frame-received notifications, and other notifications.
505 */ 499 */
506static void iwl_rx_handle(struct iwl_trans *trans) 500static void iwl_pcie_rx_handle(struct iwl_trans *trans)
507{ 501{
508 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 502 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
509 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 503 struct iwl_rxq *rxq = &trans_pcie->rxq;
510 u32 r, i; 504 u32 r, i;
511 u8 fill_rx = 0; 505 u8 fill_rx = 0;
512 u32 count = 8; 506 u32 count = 8;
@@ -537,7 +531,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
537 531
538 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", 532 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
539 r, i, rxb); 533 r, i, rxb);
540 iwl_rx_handle_rxbuf(trans, rxb); 534 iwl_pcie_rx_handle_rxbuf(trans, rxb);
541 535
542 i = (i + 1) & RX_QUEUE_MASK; 536 i = (i + 1) & RX_QUEUE_MASK;
543 /* If there are a lot of unused frames, 537 /* If there are a lot of unused frames,
@@ -546,7 +540,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
546 count++; 540 count++;
547 if (count >= 8) { 541 if (count >= 8) {
548 rxq->read = i; 542 rxq->read = i;
549 iwl_rx_replenish_now(trans); 543 iwl_pcie_rx_replenish_now(trans);
550 count = 0; 544 count = 0;
551 } 545 }
552 } 546 }
@@ -555,15 +549,15 @@ static void iwl_rx_handle(struct iwl_trans *trans)
555 /* Backtrack one entry */ 549 /* Backtrack one entry */
556 rxq->read = i; 550 rxq->read = i;
557 if (fill_rx) 551 if (fill_rx)
558 iwl_rx_replenish_now(trans); 552 iwl_pcie_rx_replenish_now(trans);
559 else 553 else
560 iwl_rx_queue_restock(trans); 554 iwl_pcie_rxq_restock(trans);
561} 555}
562 556
563/** 557/*
564 * iwl_irq_handle_error - called for HW or SW error interrupt from card 558 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
565 */ 559 */
566static void iwl_irq_handle_error(struct iwl_trans *trans) 560static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
567{ 561{
568 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 562 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
569 563
@@ -579,8 +573,8 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
579 return; 573 return;
580 } 574 }
581 575
582 iwl_dump_csr(trans); 576 iwl_pcie_dump_csr(trans);
583 iwl_dump_fh(trans, NULL); 577 iwl_pcie_dump_fh(trans, NULL);
584 578
585 set_bit(STATUS_FW_ERROR, &trans_pcie->status); 579 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
586 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 580 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
@@ -590,7 +584,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
590} 584}
591 585
592/* tasklet for iwlagn interrupt */ 586/* tasklet for iwlagn interrupt */
593void iwl_irq_tasklet(struct iwl_trans *trans) 587void iwl_pcie_tasklet(struct iwl_trans *trans)
594{ 588{
595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 589 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
596 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 590 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@@ -642,7 +636,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
642 iwl_disable_interrupts(trans); 636 iwl_disable_interrupts(trans);
643 637
644 isr_stats->hw++; 638 isr_stats->hw++;
645 iwl_irq_handle_error(trans); 639 iwl_pcie_irq_handle_error(trans);
646 640
647 handled |= CSR_INT_BIT_HW_ERR; 641 handled |= CSR_INT_BIT_HW_ERR;
648 642
@@ -705,17 +699,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
705 IWL_ERR(trans, "Microcode SW error detected. " 699 IWL_ERR(trans, "Microcode SW error detected. "
706 " Restarting 0x%X.\n", inta); 700 " Restarting 0x%X.\n", inta);
707 isr_stats->sw++; 701 isr_stats->sw++;
708 iwl_irq_handle_error(trans); 702 iwl_pcie_irq_handle_error(trans);
709 handled |= CSR_INT_BIT_SW_ERR; 703 handled |= CSR_INT_BIT_SW_ERR;
710 } 704 }
711 705
712 /* uCode wakes up after power-down sleep */ 706 /* uCode wakes up after power-down sleep */
713 if (inta & CSR_INT_BIT_WAKEUP) { 707 if (inta & CSR_INT_BIT_WAKEUP) {
714 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 708 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
715 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); 709 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
716 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) 710 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
717 iwl_txq_update_write_ptr(trans, 711 iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
718 &trans_pcie->txq[i]);
719 712
720 isr_stats->wakeup++; 713 isr_stats->wakeup++;
721 714
@@ -753,7 +746,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
753 iwl_write8(trans, CSR_INT_PERIODIC_REG, 746 iwl_write8(trans, CSR_INT_PERIODIC_REG,
754 CSR_INT_PERIODIC_DIS); 747 CSR_INT_PERIODIC_DIS);
755 748
756 iwl_rx_handle(trans); 749 iwl_pcie_rx_handle(trans);
757 750
758 /* 751 /*
759 * Enable periodic interrupt in 8 msec only if we received 752 * Enable periodic interrupt in 8 msec only if we received
@@ -811,7 +804,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
811#define ICT_COUNT (ICT_SIZE / sizeof(u32)) 804#define ICT_COUNT (ICT_SIZE / sizeof(u32))
812 805
813/* Free dram table */ 806/* Free dram table */
814void iwl_free_isr_ict(struct iwl_trans *trans) 807void iwl_pcie_free_ict(struct iwl_trans *trans)
815{ 808{
816 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 809 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
817 810
@@ -824,13 +817,12 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
824 } 817 }
825} 818}
826 819
827
828/* 820/*
829 * allocate dram shared table, it is an aligned memory 821 * allocate dram shared table, it is an aligned memory
830 * block of ICT_SIZE. 822 * block of ICT_SIZE.
831 * also reset all data related to ICT table interrupt. 823 * also reset all data related to ICT table interrupt.
832 */ 824 */
833int iwl_alloc_isr_ict(struct iwl_trans *trans) 825int iwl_pcie_alloc_ict(struct iwl_trans *trans)
834{ 826{
835 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 827 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
836 828
@@ -843,7 +835,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
843 835
844 /* just an API sanity check ... it is guaranteed to be aligned */ 836 /* just an API sanity check ... it is guaranteed to be aligned */
845 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 837 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
846 iwl_free_isr_ict(trans); 838 iwl_pcie_free_ict(trans);
847 return -EINVAL; 839 return -EINVAL;
848 } 840 }
849 841
@@ -864,7 +856,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
864/* Device is going up inform it about using ICT interrupt table, 856/* Device is going up inform it about using ICT interrupt table,
865 * also we need to tell the driver to start using ICT interrupt. 857 * also we need to tell the driver to start using ICT interrupt.
866 */ 858 */
867void iwl_reset_ict(struct iwl_trans *trans) 859void iwl_pcie_reset_ict(struct iwl_trans *trans)
868{ 860{
869 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 861 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
870 u32 val; 862 u32 val;
@@ -894,7 +886,7 @@ void iwl_reset_ict(struct iwl_trans *trans)
894} 886}
895 887
896/* Device is going down disable ict interrupt usage */ 888/* Device is going down disable ict interrupt usage */
897void iwl_disable_ict(struct iwl_trans *trans) 889void iwl_pcie_disable_ict(struct iwl_trans *trans)
898{ 890{
899 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 891 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
900 unsigned long flags; 892 unsigned long flags;
@@ -905,7 +897,7 @@ void iwl_disable_ict(struct iwl_trans *trans)
905} 897}
906 898
907/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ 899/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
908static irqreturn_t iwl_isr(int irq, void *data) 900static irqreturn_t iwl_pcie_isr(int irq, void *data)
909{ 901{
910 struct iwl_trans *trans = data; 902 struct iwl_trans *trans = data;
911 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 903 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -952,7 +944,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
952#endif 944#endif
953 945
954 trans_pcie->inta |= inta; 946 trans_pcie->inta |= inta;
955 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 947 /* iwl_pcie_tasklet() will service interrupts and re-enable them */
956 if (likely(inta)) 948 if (likely(inta))
957 tasklet_schedule(&trans_pcie->irq_tasklet); 949 tasklet_schedule(&trans_pcie->irq_tasklet);
958 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 950 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
@@ -977,7 +969,7 @@ none:
977 * the interrupt we need to service, driver will set the entries back to 0 and 969 * the interrupt we need to service, driver will set the entries back to 0 and
978 * set index. 970 * set index.
979 */ 971 */
980irqreturn_t iwl_isr_ict(int irq, void *data) 972irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
981{ 973{
982 struct iwl_trans *trans = data; 974 struct iwl_trans *trans = data;
983 struct iwl_trans_pcie *trans_pcie; 975 struct iwl_trans_pcie *trans_pcie;
@@ -997,7 +989,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
997 * use legacy interrupt. 989 * use legacy interrupt.
998 */ 990 */
999 if (unlikely(!trans_pcie->use_ict)) { 991 if (unlikely(!trans_pcie->use_ict)) {
1000 irqreturn_t ret = iwl_isr(irq, data); 992 irqreturn_t ret = iwl_pcie_isr(irq, data);
1001 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 993 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1002 return ret; 994 return ret;
1003 } 995 }
@@ -1062,7 +1054,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
1062 inta &= trans_pcie->inta_mask; 1054 inta &= trans_pcie->inta_mask;
1063 trans_pcie->inta |= inta; 1055 trans_pcie->inta |= inta;
1064 1056
1065 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 1057 /* iwl_pcie_tasklet() will service interrupts and re-enable them */
1066 if (likely(inta)) 1058 if (likely(inta))
1067 tasklet_schedule(&trans_pcie->irq_tasklet); 1059 tasklet_schedule(&trans_pcie->irq_tasklet);
1068 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 1060 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 7eb5f483f77d..1eed9882b7b8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -84,7 +84,7 @@
84static int iwl_trans_rx_alloc(struct iwl_trans *trans) 84static int iwl_trans_rx_alloc(struct iwl_trans *trans)
85{ 85{
86 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 86 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
87 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 87 struct iwl_rxq *rxq = &trans_pcie->rxq;
88 struct device *dev = trans->dev; 88 struct device *dev = trans->dev;
89 89
90 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 90 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
@@ -120,7 +120,7 @@ err_bd:
120static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) 120static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
121{ 121{
122 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 122 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
123 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 123 struct iwl_rxq *rxq = &trans_pcie->rxq;
124 int i; 124 int i;
125 125
126 /* Fill the rx_used queue with _all_ of the Rx buffers */ 126 /* Fill the rx_used queue with _all_ of the Rx buffers */
@@ -139,8 +139,7 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
139 } 139 }
140} 140}
141 141
142static void iwl_trans_rx_hw_init(struct iwl_trans *trans, 142static void iwl_trans_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
143 struct iwl_rx_queue *rxq)
144{ 143{
145 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 144 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
146 u32 rb_size; 145 u32 rb_size;
@@ -189,7 +188,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
189static int iwl_rx_init(struct iwl_trans *trans) 188static int iwl_rx_init(struct iwl_trans *trans)
190{ 189{
191 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 190 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
192 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 191 struct iwl_rxq *rxq = &trans_pcie->rxq;
193 192
194 int i, err; 193 int i, err;
195 unsigned long flags; 194 unsigned long flags;
@@ -216,13 +215,13 @@ static int iwl_rx_init(struct iwl_trans *trans)
216 rxq->free_count = 0; 215 rxq->free_count = 0;
217 spin_unlock_irqrestore(&rxq->lock, flags); 216 spin_unlock_irqrestore(&rxq->lock, flags);
218 217
219 iwl_rx_replenish(trans); 218 iwl_pcie_rx_replenish(trans);
220 219
221 iwl_trans_rx_hw_init(trans, rxq); 220 iwl_trans_rx_hw_init(trans, rxq);
222 221
223 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 222 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
224 rxq->need_update = 1; 223 rxq->need_update = 1;
225 iwl_rx_queue_update_write_ptr(trans, rxq); 224 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
226 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 225 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
227 226
228 return 0; 227 return 0;
@@ -231,7 +230,7 @@ static int iwl_rx_init(struct iwl_trans *trans)
231static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) 230static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
232{ 231{
233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 232 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
234 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 233 struct iwl_rxq *rxq = &trans_pcie->rxq;
235 unsigned long flags; 234 unsigned long flags;
236 235
237 /*if rxq->bd is NULL, it means that nothing has been allocated, 236 /*if rxq->bd is NULL, it means that nothing has been allocated,
@@ -295,7 +294,7 @@ static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
295 294
296static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) 295static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
297{ 296{
298 struct iwl_tx_queue *txq = (void *)data; 297 struct iwl_txq *txq = (void *)data;
299 struct iwl_queue *q = &txq->q; 298 struct iwl_queue *q = &txq->q;
300 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; 299 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
301 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); 300 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -359,7 +358,7 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
359} 358}
360 359
361static int iwl_trans_txq_alloc(struct iwl_trans *trans, 360static int iwl_trans_txq_alloc(struct iwl_trans *trans,
362 struct iwl_tx_queue *txq, int slots_num, 361 struct iwl_txq *txq, int slots_num,
363 u32 txq_id) 362 u32 txq_id)
364{ 363{
365 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 364 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -376,7 +375,7 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
376 txq->q.n_window = slots_num; 375 txq->q.n_window = slots_num;
377 376
378 txq->entries = kcalloc(slots_num, 377 txq->entries = kcalloc(slots_num,
379 sizeof(struct iwl_pcie_tx_queue_entry), 378 sizeof(struct iwl_pcie_txq_entry),
380 GFP_KERNEL); 379 GFP_KERNEL);
381 380
382 if (!txq->entries) 381 if (!txq->entries)
@@ -413,7 +412,7 @@ error:
413 412
414} 413}
415 414
416static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, 415static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
417 int slots_num, u32 txq_id) 416 int slots_num, u32 txq_id)
418{ 417{
419 int ret; 418 int ret;
@@ -443,12 +442,12 @@ static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
443} 442}
444 443
445/* 444/*
446 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 445 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
447 */ 446 */
448void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) 447void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
449{ 448{
450 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
451 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 450 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
452 struct iwl_queue *q = &txq->q; 451 struct iwl_queue *q = &txq->q;
453 enum dma_data_direction dma_dir; 452 enum dma_data_direction dma_dir;
454 453
@@ -465,31 +464,31 @@ void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
465 464
466 spin_lock_bh(&txq->lock); 465 spin_lock_bh(&txq->lock);
467 while (q->write_ptr != q->read_ptr) { 466 while (q->write_ptr != q->read_ptr) {
468 iwl_txq_free_tfd(trans, txq, dma_dir); 467 iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
469 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 468 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
470 } 469 }
471 spin_unlock_bh(&txq->lock); 470 spin_unlock_bh(&txq->lock);
472} 471}
473 472
474/** 473/*
475 * iwl_tx_queue_free - Deallocate DMA queue. 474 * iwl_txq_free - Deallocate DMA queue.
476 * @txq: Transmit queue to deallocate. 475 * @txq: Transmit queue to deallocate.
477 * 476 *
478 * Empty queue by removing and destroying all BD's. 477 * Empty queue by removing and destroying all BD's.
479 * Free all buffers. 478 * Free all buffers.
480 * 0-fill, but do not free "txq" descriptor structure. 479 * 0-fill, but do not free "txq" descriptor structure.
481 */ 480 */
482static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) 481static void iwl_txq_free(struct iwl_trans *trans, int txq_id)
483{ 482{
484 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 483 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
485 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 484 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
486 struct device *dev = trans->dev; 485 struct device *dev = trans->dev;
487 int i; 486 int i;
488 487
489 if (WARN_ON(!txq)) 488 if (WARN_ON(!txq))
490 return; 489 return;
491 490
492 iwl_tx_queue_unmap(trans, txq_id); 491 iwl_pcie_txq_unmap(trans, txq_id);
493 492
494 /* De-alloc array of command/tx buffers */ 493 /* De-alloc array of command/tx buffers */
495 if (txq_id == trans_pcie->cmd_queue) 494 if (txq_id == trans_pcie->cmd_queue)
@@ -515,7 +514,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
515 memset(txq, 0, sizeof(*txq)); 514 memset(txq, 0, sizeof(*txq));
516} 515}
517 516
518/** 517/*
519 * iwl_trans_tx_free - Free TXQ Context 518 * iwl_trans_tx_free - Free TXQ Context
520 * 519 *
521 * Destroy all TX DMA queues and structures 520 * Destroy all TX DMA queues and structures
@@ -529,7 +528,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
529 if (trans_pcie->txq) { 528 if (trans_pcie->txq) {
530 for (txq_id = 0; 529 for (txq_id = 0;
531 txq_id < trans->cfg->base_params->num_of_queues; txq_id++) 530 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
532 iwl_tx_queue_free(trans, txq_id); 531 iwl_txq_free(trans, txq_id);
533 } 532 }
534 533
535 kfree(trans_pcie->txq); 534 kfree(trans_pcie->txq);
@@ -540,12 +539,9 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
540 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); 539 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
541} 540}
542 541
543/** 542/*
544 * iwl_trans_tx_alloc - allocate TX context 543 * iwl_trans_tx_alloc - allocate TX context
545 * Allocate all Tx DMA structures and initialize them 544 * Allocate all Tx DMA structures and initialize them
546 *
547 * @param priv
548 * @return error code
549 */ 545 */
550static int iwl_trans_tx_alloc(struct iwl_trans *trans) 546static int iwl_trans_tx_alloc(struct iwl_trans *trans)
551{ 547{
@@ -578,7 +574,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
578 } 574 }
579 575
580 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, 576 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
581 sizeof(struct iwl_tx_queue), GFP_KERNEL); 577 sizeof(struct iwl_txq), GFP_KERNEL);
582 if (!trans_pcie->txq) { 578 if (!trans_pcie->txq) {
583 IWL_ERR(trans, "Not enough memory for txq\n"); 579 IWL_ERR(trans, "Not enough memory for txq\n");
584 ret = ENOMEM; 580 ret = ENOMEM;
@@ -1146,11 +1142,11 @@ static void iwl_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
1146 1142
1147static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1143static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1148{ 1144{
1149 iwl_reset_ict(trans); 1145 iwl_pcie_reset_ict(trans);
1150 iwl_tx_start(trans, scd_addr); 1146 iwl_tx_start(trans, scd_addr);
1151} 1147}
1152 1148
1153/** 1149/*
1154 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels 1150 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
1155 */ 1151 */
1156static int iwl_trans_tx_stop(struct iwl_trans *trans) 1152static int iwl_trans_tx_stop(struct iwl_trans *trans)
@@ -1188,7 +1184,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
1188 /* Unmap DMA from host system and free skb's */ 1184 /* Unmap DMA from host system and free skb's */
1189 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; 1185 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1190 txq_id++) 1186 txq_id++)
1191 iwl_tx_queue_unmap(trans, txq_id); 1187 iwl_pcie_txq_unmap(trans, txq_id);
1192 1188
1193 return 0; 1189 return 0;
1194} 1190}
@@ -1204,7 +1200,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1204 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1200 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1205 1201
1206 /* device going down, Stop using ICT table */ 1202 /* device going down, Stop using ICT table */
1207 iwl_disable_ict(trans); 1203 iwl_pcie_disable_ict(trans);
1208 1204
1209 /* 1205 /*
1210 * If a HW restart happens during firmware loading, 1206 * If a HW restart happens during firmware loading,
@@ -1274,7 +1270,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1274 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1270 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1275 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; 1271 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1276 struct iwl_cmd_meta *out_meta; 1272 struct iwl_cmd_meta *out_meta;
1277 struct iwl_tx_queue *txq; 1273 struct iwl_txq *txq;
1278 struct iwl_queue *q; 1274 struct iwl_queue *q;
1279 dma_addr_t phys_addr = 0; 1275 dma_addr_t phys_addr = 0;
1280 dma_addr_t txcmd_phys; 1276 dma_addr_t txcmd_phys;
@@ -1370,10 +1366,9 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1370 } 1366 }
1371 1367
1372 /* Attach buffers to TFD */ 1368 /* Attach buffers to TFD */
1373 iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1); 1369 iwl_pcie_tx_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1374 if (secondlen > 0) 1370 if (secondlen > 0)
1375 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, 1371 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, secondlen, 0);
1376 secondlen, 0);
1377 1372
1378 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 1373 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1379 offsetof(struct iwl_tx_cmd, scratch); 1374 offsetof(struct iwl_tx_cmd, scratch);
@@ -1389,7 +1384,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1389 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); 1384 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1390 1385
1391 /* Set up entry for this TFD in Tx byte-count array */ 1386 /* Set up entry for this TFD in Tx byte-count array */
1392 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1387 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1393 1388
1394 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, 1389 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1395 DMA_BIDIRECTIONAL); 1390 DMA_BIDIRECTIONAL);
@@ -1409,7 +1404,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1409 1404
1410 /* Tell device the write index *just past* this latest filled TFD */ 1405 /* Tell device the write index *just past* this latest filled TFD */
1411 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1406 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1412 iwl_txq_update_write_ptr(trans, txq); 1407 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1413 1408
1414 /* 1409 /*
1415 * At this point the frame is "transmitted" successfully 1410 * At this point the frame is "transmitted" successfully
@@ -1420,7 +1415,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1420 if (iwl_queue_space(q) < q->high_mark) { 1415 if (iwl_queue_space(q) < q->high_mark) {
1421 if (wait_write_ptr) { 1416 if (wait_write_ptr) {
1422 txq->need_update = 1; 1417 txq->need_update = 1;
1423 iwl_txq_update_write_ptr(trans, txq); 1418 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1424 } else { 1419 } else {
1425 iwl_stop_queue(trans, txq); 1420 iwl_stop_queue(trans, txq);
1426 } 1421 }
@@ -1442,19 +1437,20 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1442 1437
1443 if (!trans_pcie->irq_requested) { 1438 if (!trans_pcie->irq_requested) {
1444 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) 1439 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1445 iwl_irq_tasklet, (unsigned long)trans); 1440 iwl_pcie_tasklet, (unsigned long)trans);
1446 1441
1447 iwl_alloc_isr_ict(trans); 1442 iwl_pcie_alloc_ict(trans);
1448 1443
1449 err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, 1444 err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
1450 DRV_NAME, trans); 1445 IRQF_SHARED, DRV_NAME, trans);
1451 if (err) { 1446 if (err) {
1452 IWL_ERR(trans, "Error allocating IRQ %d\n", 1447 IWL_ERR(trans, "Error allocating IRQ %d\n",
1453 trans_pcie->irq); 1448 trans_pcie->irq);
1454 goto error; 1449 goto error;
1455 } 1450 }
1456 1451
1457 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish); 1452 INIT_WORK(&trans_pcie->rx_replenish,
1453 iwl_pcie_rx_replenish_work);
1458 trans_pcie->irq_requested = true; 1454 trans_pcie->irq_requested = true;
1459 } 1455 }
1460 1456
@@ -1478,7 +1474,7 @@ err_free_irq:
1478 trans_pcie->irq_requested = false; 1474 trans_pcie->irq_requested = false;
1479 free_irq(trans_pcie->irq, trans); 1475 free_irq(trans_pcie->irq, trans);
1480error: 1476error:
1481 iwl_free_isr_ict(trans); 1477 iwl_pcie_free_ict(trans);
1482 tasklet_kill(&trans_pcie->irq_tasklet); 1478 tasklet_kill(&trans_pcie->irq_tasklet);
1483 return err; 1479 return err;
1484} 1480}
@@ -1522,7 +1518,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1522 struct sk_buff_head *skbs) 1518 struct sk_buff_head *skbs)
1523{ 1519{
1524 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1520 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1525 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 1521 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
1526 /* n_bd is usually 256 => n_bd - 1 = 0xff */ 1522 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1527 int tfd_num = ssn & (txq->q.n_bd - 1); 1523 int tfd_num = ssn & (txq->q.n_bd - 1);
1528 1524
@@ -1531,7 +1527,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1531 if (txq->q.read_ptr != tfd_num) { 1527 if (txq->q.read_ptr != tfd_num) {
1532 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", 1528 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1533 txq_id, txq->q.read_ptr, tfd_num, ssn); 1529 txq_id, txq->q.read_ptr, tfd_num, ssn);
1534 iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); 1530 iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
1535 if (iwl_queue_space(&txq->q) > txq->q.low_mark) 1531 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1536 iwl_wake_queue(trans, txq); 1532 iwl_wake_queue(trans, txq);
1537 } 1533 }
@@ -1590,7 +1586,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1590 1586
1591 if (trans_pcie->irq_requested == true) { 1587 if (trans_pcie->irq_requested == true) {
1592 free_irq(trans_pcie->irq, trans); 1588 free_irq(trans_pcie->irq, trans);
1593 iwl_free_isr_ict(trans); 1589 iwl_pcie_free_ict(trans);
1594 } 1590 }
1595 1591
1596 pci_disable_msi(trans_pcie->pci_dev); 1592 pci_disable_msi(trans_pcie->pci_dev);
@@ -1636,10 +1632,10 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1636 1632
1637#define IWL_FLUSH_WAIT_MS 2000 1633#define IWL_FLUSH_WAIT_MS 2000
1638 1634
1639static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) 1635static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1640{ 1636{
1641 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1637 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1642 struct iwl_tx_queue *txq; 1638 struct iwl_txq *txq;
1643 struct iwl_queue *q; 1639 struct iwl_queue *q;
1644 int cnt; 1640 int cnt;
1645 unsigned long now = jiffies; 1641 unsigned long now = jiffies;
@@ -1683,7 +1679,7 @@ static const char *get_fh_string(int cmd)
1683#undef IWL_CMD 1679#undef IWL_CMD
1684} 1680}
1685 1681
1686int iwl_dump_fh(struct iwl_trans *trans, char **buf) 1682int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
1687{ 1683{
1688 int i; 1684 int i;
1689 static const u32 fh_tbl[] = { 1685 static const u32 fh_tbl[] = {
@@ -1762,7 +1758,7 @@ static const char *get_csr_string(int cmd)
1762#undef IWL_CMD 1758#undef IWL_CMD
1763} 1759}
1764 1760
1765void iwl_dump_csr(struct iwl_trans *trans) 1761void iwl_pcie_dump_csr(struct iwl_trans *trans)
1766{ 1762{
1767 int i; 1763 int i;
1768 static const u32 csr_tbl[] = { 1764 static const u32 csr_tbl[] = {
@@ -1852,7 +1848,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1852{ 1848{
1853 struct iwl_trans *trans = file->private_data; 1849 struct iwl_trans *trans = file->private_data;
1854 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1850 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1855 struct iwl_tx_queue *txq; 1851 struct iwl_txq *txq;
1856 struct iwl_queue *q; 1852 struct iwl_queue *q;
1857 char *buf; 1853 char *buf;
1858 int pos = 0; 1854 int pos = 0;
@@ -1889,7 +1885,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1889{ 1885{
1890 struct iwl_trans *trans = file->private_data; 1886 struct iwl_trans *trans = file->private_data;
1891 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1887 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1892 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 1888 struct iwl_rxq *rxq = &trans_pcie->rxq;
1893 char buf[256]; 1889 char buf[256];
1894 int pos = 0; 1890 int pos = 0;
1895 const size_t bufsz = sizeof(buf); 1891 const size_t bufsz = sizeof(buf);
@@ -2008,7 +2004,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
2008 if (sscanf(buf, "%d", &csr) != 1) 2004 if (sscanf(buf, "%d", &csr) != 1)
2009 return -EFAULT; 2005 return -EFAULT;
2010 2006
2011 iwl_dump_csr(trans); 2007 iwl_pcie_dump_csr(trans);
2012 2008
2013 return count; 2009 return count;
2014} 2010}
@@ -2022,7 +2018,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2022 int pos = 0; 2018 int pos = 0;
2023 ssize_t ret = -EFAULT; 2019 ssize_t ret = -EFAULT;
2024 2020
2025 ret = pos = iwl_dump_fh(trans, &buf); 2021 ret = pos = iwl_pcie_dump_fh(trans, &buf);
2026 if (buf) { 2022 if (buf) {
2027 ret = simple_read_from_buffer(user_buf, 2023 ret = simple_read_from_buffer(user_buf,
2028 count, ppos, buf, pos); 2024 count, ppos, buf, pos);
@@ -2091,17 +2087,17 @@ static const struct iwl_trans_ops trans_ops_pcie = {
2091 2087
2092 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, 2088 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
2093 2089
2094 .send_cmd = iwl_trans_pcie_send_cmd, 2090 .send_cmd = iwl_pcie_send_cmd,
2095 2091
2096 .tx = iwl_trans_pcie_tx, 2092 .tx = iwl_trans_pcie_tx,
2097 .reclaim = iwl_trans_pcie_reclaim, 2093 .reclaim = iwl_trans_pcie_reclaim,
2098 2094
2099 .txq_disable = iwl_trans_pcie_txq_disable, 2095 .txq_disable = iwl_pcie_txq_disable,
2100 .txq_enable = iwl_trans_pcie_txq_enable, 2096 .txq_enable = iwl_pcie_txq_enable,
2101 2097
2102 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 2098 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2103 2099
2104 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, 2100 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2105 2101
2106#ifdef CONFIG_PM_SLEEP 2102#ifdef CONFIG_PM_SLEEP
2107 .suspend = iwl_trans_pcie_suspend, 2103 .suspend = iwl_trans_pcie_suspend,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index dcc7e1256e39..eac0481a9c71 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -42,12 +42,11 @@
42#define IWL_TX_CRC_SIZE 4 42#define IWL_TX_CRC_SIZE 4
43#define IWL_TX_DELIMITER_SIZE 4 43#define IWL_TX_DELIMITER_SIZE 4
44 44
45/** 45/*
46 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 46 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
47 */ 47 */
48void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 48void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
49 struct iwl_tx_queue *txq, 49 struct iwl_txq *txq, u16 byte_cnt)
50 u16 byte_cnt)
51{ 50{
52 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 51 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
53 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 52 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -88,10 +87,10 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
88 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 87 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
89} 88}
90 89
91/** 90/*
92 * iwl_txq_update_write_ptr - Send new write index to hardware 91 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
93 */ 92 */
94void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) 93void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
95{ 94{
96 u32 reg = 0; 95 u32 reg = 0;
97 int txq_id = txq->q.id; 96 int txq_id = txq->q.id;
@@ -206,8 +205,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
206 tfd->num_tbs = 0; 205 tfd->num_tbs = 0;
207} 206}
208 207
209/** 208/*
210 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 209 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
211 * @trans - transport private data 210 * @trans - transport private data
212 * @txq - tx queue 211 * @txq - tx queue
213 * @dma_dir - the direction of the DMA mapping 212 * @dma_dir - the direction of the DMA mapping
@@ -215,8 +214,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
215 * Does NOT advance any TFD circular buffer read/write indexes 214 * Does NOT advance any TFD circular buffer read/write indexes
216 * Does NOT free the TFD itself (which is within circular buffer) 215 * Does NOT free the TFD itself (which is within circular buffer)
217 */ 216 */
218void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 217void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
219 enum dma_data_direction dma_dir) 218 enum dma_data_direction dma_dir)
220{ 219{
221 struct iwl_tfd *tfd_tmp = txq->tfds; 220 struct iwl_tfd *tfd_tmp = txq->tfds;
222 221
@@ -247,10 +246,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
247 } 246 }
248} 247}
249 248
250int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, 249int iwl_pcie_tx_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
251 struct iwl_tx_queue *txq, 250 dma_addr_t addr, u16 len, u8 reset)
252 dma_addr_t addr, u16 len,
253 u8 reset)
254{ 251{
255 struct iwl_queue *q; 252 struct iwl_queue *q;
256 struct iwl_tfd *tfd, *tfd_tmp; 253 struct iwl_tfd *tfd, *tfd_tmp;
@@ -322,7 +319,7 @@ int iwl_queue_space(const struct iwl_queue *q)
322 return s; 319 return s;
323} 320}
324 321
325/** 322/*
326 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 323 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
327 */ 324 */
328int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) 325int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
@@ -355,7 +352,7 @@ int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
355} 352}
356 353
357static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 354static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
358 struct iwl_tx_queue *txq) 355 struct iwl_txq *txq)
359{ 356{
360 struct iwl_trans_pcie *trans_pcie = 357 struct iwl_trans_pcie *trans_pcie =
361 IWL_TRANS_GET_PCIE_TRANS(trans); 358 IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -415,8 +412,8 @@ static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
415 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 412 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
416} 413}
417 414
418void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, 415void iwl_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
419 int sta_id, int tid, int frame_limit, u16 ssn) 416 int sta_id, int tid, int frame_limit, u16 ssn)
420{ 417{
421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 418 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
422 419
@@ -477,7 +474,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
477 txq_id, fifo, ssn & 0xff); 474 txq_id, fifo, ssn & 0xff);
478} 475}
479 476
480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) 477void iwl_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
481{ 478{
482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
483 u32 stts_addr = trans_pcie->scd_base_addr + 480 u32 stts_addr = trans_pcie->scd_base_addr +
@@ -494,14 +491,14 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
494 _iwl_write_targ_mem_dwords(trans, stts_addr, 491 _iwl_write_targ_mem_dwords(trans, stts_addr,
495 zero_val, ARRAY_SIZE(zero_val)); 492 zero_val, ARRAY_SIZE(zero_val));
496 493
497 iwl_tx_queue_unmap(trans, txq_id); 494 iwl_pcie_txq_unmap(trans, txq_id);
498 495
499 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 496 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
500} 497}
501 498
502/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 499/*************** HOST COMMAND QUEUE FUNCTIONS *****/
503 500
504/** 501/*
505 * iwl_enqueue_hcmd - enqueue a uCode command 502 * iwl_enqueue_hcmd - enqueue a uCode command
506 * @priv: device private data point 503 * @priv: device private data point
507 * @cmd: a point to the ucode command structure 504 * @cmd: a point to the ucode command structure
@@ -513,7 +510,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
513static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 510static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
514{ 511{
515 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 512 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
516 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 513 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
517 struct iwl_queue *q = &txq->q; 514 struct iwl_queue *q = &txq->q;
518 struct iwl_device_cmd *out_cmd; 515 struct iwl_device_cmd *out_cmd;
519 struct iwl_cmd_meta *out_meta; 516 struct iwl_cmd_meta *out_meta;
@@ -576,8 +573,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
576 */ 573 */
577 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 574 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
578 "Command %s (%#x) is too large (%d bytes)\n", 575 "Command %s (%#x) is too large (%d bytes)\n",
579 trans_pcie_get_cmd_string(trans_pcie, cmd->id), 576 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
580 cmd->id, copy_size)) {
581 idx = -EINVAL; 577 idx = -EINVAL;
582 goto free_dup_buf; 578 goto free_dup_buf;
583 } 579 }
@@ -640,7 +636,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
640 636
641 IWL_DEBUG_HC(trans, 637 IWL_DEBUG_HC(trans,
642 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 638 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
643 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 639 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
644 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 640 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
645 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 641 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
646 642
@@ -654,7 +650,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
654 dma_unmap_addr_set(out_meta, mapping, phys_addr); 650 dma_unmap_addr_set(out_meta, mapping, phys_addr);
655 dma_unmap_len_set(out_meta, len, copy_size); 651 dma_unmap_len_set(out_meta, len, copy_size);
656 652
657 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); 653 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, copy_size, 1);
658 654
659 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 655 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
660 const void *data = cmd->data[i]; 656 const void *data = cmd->data[i];
@@ -676,8 +672,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
676 goto out; 672 goto out;
677 } 673 }
678 674
679 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, 675 iwl_pcie_tx_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
680 cmd->len[i], 0);
681 } 676 }
682 677
683 out_meta->flags = cmd->flags; 678 out_meta->flags = cmd->flags;
@@ -696,7 +691,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
696 691
697 /* Increment and update queue's write index */ 692 /* Increment and update queue's write index */
698 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 693 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
699 iwl_txq_update_write_ptr(trans, txq); 694 iwl_pcie_txq_inc_wr_ptr(trans, txq);
700 695
701 out: 696 out:
702 spin_unlock_bh(&txq->lock); 697 spin_unlock_bh(&txq->lock);
@@ -707,7 +702,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
707} 702}
708 703
709static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, 704static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
710 struct iwl_tx_queue *txq) 705 struct iwl_txq *txq)
711{ 706{
712 if (!trans_pcie->wd_timeout) 707 if (!trans_pcie->wd_timeout)
713 return; 708 return;
@@ -722,7 +717,7 @@ static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
722 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); 717 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
723} 718}
724 719
725/** 720/*
726 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 721 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
727 * 722 *
728 * When FW advances 'R' index, all entries between old and new 'R' index 723 * When FW advances 'R' index, all entries between old and new 'R' index
@@ -733,7 +728,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
733 int idx) 728 int idx)
734{ 729{
735 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 730 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
736 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 731 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
737 struct iwl_queue *q = &txq->q; 732 struct iwl_queue *q = &txq->q;
738 int nfreed = 0; 733 int nfreed = 0;
739 734
@@ -761,8 +756,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
761 iwl_queue_progress(trans_pcie, txq); 756 iwl_queue_progress(trans_pcie, txq);
762} 757}
763 758
764/** 759/*
765 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them 760 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
766 * @rxb: Rx buffer to reclaim 761 * @rxb: Rx buffer to reclaim
767 * @handler_status: return value of the handler of the command 762 * @handler_status: return value of the handler of the command
768 * (put in setup_rx_handlers) 763 * (put in setup_rx_handlers)
@@ -771,8 +766,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
771 * will be executed. The attached skb (if present) will only be freed 766 * will be executed. The attached skb (if present) will only be freed
772 * if the callback returns 1 767 * if the callback returns 1
773 */ 768 */
774void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, 769void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
775 int handler_status) 770 struct iwl_rx_cmd_buffer *rxb, int handler_status)
776{ 771{
777 struct iwl_rx_packet *pkt = rxb_addr(rxb); 772 struct iwl_rx_packet *pkt = rxb_addr(rxb);
778 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 773 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -782,7 +777,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
782 struct iwl_device_cmd *cmd; 777 struct iwl_device_cmd *cmd;
783 struct iwl_cmd_meta *meta; 778 struct iwl_cmd_meta *meta;
784 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 779 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
785 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 780 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
786 781
787 /* If a Tx command is being handled and it isn't in the actual 782 /* If a Tx command is being handled and it isn't in the actual
788 * command queue then there a command routing bug has been introduced 783 * command queue then there a command routing bug has been introduced
@@ -820,13 +815,11 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
820 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { 815 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
821 IWL_WARN(trans, 816 IWL_WARN(trans,
822 "HCMD_ACTIVE already clear for command %s\n", 817 "HCMD_ACTIVE already clear for command %s\n",
823 trans_pcie_get_cmd_string(trans_pcie, 818 get_cmd_string(trans_pcie, cmd->hdr.cmd));
824 cmd->hdr.cmd));
825 } 819 }
826 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 820 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
827 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 821 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
828 trans_pcie_get_cmd_string(trans_pcie, 822 get_cmd_string(trans_pcie, cmd->hdr.cmd));
829 cmd->hdr.cmd));
830 wake_up(&trans_pcie->wait_command_queue); 823 wake_up(&trans_pcie->wait_command_queue);
831 } 824 }
832 825
@@ -851,7 +844,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
851 if (ret < 0) { 844 if (ret < 0) {
852 IWL_ERR(trans, 845 IWL_ERR(trans,
853 "Error sending %s: enqueue_hcmd failed: %d\n", 846 "Error sending %s: enqueue_hcmd failed: %d\n",
854 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); 847 get_cmd_string(trans_pcie, cmd->id), ret);
855 return ret; 848 return ret;
856 } 849 }
857 return 0; 850 return 0;
@@ -864,17 +857,17 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
864 int ret; 857 int ret;
865 858
866 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 859 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
867 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 860 get_cmd_string(trans_pcie, cmd->id));
868 861
869 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, 862 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
870 &trans_pcie->status))) { 863 &trans_pcie->status))) {
871 IWL_ERR(trans, "Command %s: a command is already active!\n", 864 IWL_ERR(trans, "Command %s: a command is already active!\n",
872 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 865 get_cmd_string(trans_pcie, cmd->id));
873 return -EIO; 866 return -EIO;
874 } 867 }
875 868
876 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 869 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
877 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 870 get_cmd_string(trans_pcie, cmd->id));
878 871
879 cmd_idx = iwl_enqueue_hcmd(trans, cmd); 872 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
880 if (cmd_idx < 0) { 873 if (cmd_idx < 0) {
@@ -882,7 +875,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
882 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 875 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
883 IWL_ERR(trans, 876 IWL_ERR(trans,
884 "Error sending %s: enqueue_hcmd failed: %d\n", 877 "Error sending %s: enqueue_hcmd failed: %d\n",
885 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); 878 get_cmd_string(trans_pcie, cmd->id), ret);
886 return ret; 879 return ret;
887 } 880 }
888 881
@@ -892,13 +885,13 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
892 HOST_COMPLETE_TIMEOUT); 885 HOST_COMPLETE_TIMEOUT);
893 if (!ret) { 886 if (!ret) {
894 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { 887 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
895 struct iwl_tx_queue *txq = 888 struct iwl_txq *txq =
896 &trans_pcie->txq[trans_pcie->cmd_queue]; 889 &trans_pcie->txq[trans_pcie->cmd_queue];
897 struct iwl_queue *q = &txq->q; 890 struct iwl_queue *q = &txq->q;
898 891
899 IWL_ERR(trans, 892 IWL_ERR(trans,
900 "Error sending %s: time out after %dms.\n", 893 "Error sending %s: time out after %dms.\n",
901 trans_pcie_get_cmd_string(trans_pcie, cmd->id), 894 get_cmd_string(trans_pcie, cmd->id),
902 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 895 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
903 896
904 IWL_ERR(trans, 897 IWL_ERR(trans,
@@ -908,8 +901,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
908 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 901 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
909 IWL_DEBUG_INFO(trans, 902 IWL_DEBUG_INFO(trans,
910 "Clearing HCMD_ACTIVE for command %s\n", 903 "Clearing HCMD_ACTIVE for command %s\n",
911 trans_pcie_get_cmd_string(trans_pcie, 904 get_cmd_string(trans_pcie, cmd->id));
912 cmd->id));
913 ret = -ETIMEDOUT; 905 ret = -ETIMEDOUT;
914 goto cancel; 906 goto cancel;
915 } 907 }
@@ -917,7 +909,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
917 909
918 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) { 910 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
919 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 911 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
920 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 912 get_cmd_string(trans_pcie, cmd->id));
921 ret = -EIO; 913 ret = -EIO;
922 goto cancel; 914 goto cancel;
923 } 915 }
@@ -930,7 +922,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
930 922
931 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 923 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
932 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 924 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
933 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 925 get_cmd_string(trans_pcie, cmd->id));
934 ret = -EIO; 926 ret = -EIO;
935 goto cancel; 927 goto cancel;
936 } 928 }
@@ -957,7 +949,7 @@ cancel:
957 return ret; 949 return ret;
958} 950}
959 951
960int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 952int iwl_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
961{ 953{
962 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 954 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
963 955
@@ -975,11 +967,11 @@ int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
975} 967}
976 968
977/* Frees buffers until index _not_ inclusive */ 969/* Frees buffers until index _not_ inclusive */
978int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 970int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index,
979 struct sk_buff_head *skbs) 971 struct sk_buff_head *skbs)
980{ 972{
981 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 973 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
982 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 974 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
983 struct iwl_queue *q = &txq->q; 975 struct iwl_queue *q = &txq->q;
984 int last_to_free; 976 int last_to_free;
985 int freed = 0; 977 int freed = 0;
@@ -1019,7 +1011,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1019 1011
1020 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 1012 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1021 1013
1022 iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 1014 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
1023 freed++; 1015 freed++;
1024 } 1016 }
1025 1017