diff options
author | Emmanuel Grumbach <emmanuel.grumbach@intel.com> | 2012-11-14 05:39:52 -0500 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2012-11-19 09:01:35 -0500 |
commit | 990aa6d7b28d26bf22171410b49f191e8e9b09fc (patch) | |
tree | 442c1d51b039ec3ea42b41ddd40cab8ad4612e5f /drivers/net/wireless/iwlwifi/pcie/rx.c | |
parent | b55e57f53f8740a2d1432e4963372d303b798530 (diff) |
iwlwifi: rename functions in transport layer
1) s/tx_queue/txq
for the sake of consistency.
2) s/rx_queue/rxq
for the sake of consistency.
3) Make all functions begin with iwl_pcie_
iwl_queue_init and iwl_queue_space are an exception
since they are not PCIE specific although they are in
pcie subdir.
4) s/trans_pcie_get_cmd_string/get_cmd_string
it is much shorter and used in debug prints which
are long lines.
5) s/iwl_bg_rx_replenish/iwl_pcie_rx_replenish_work
this better emphasizes that it is a work
6) remove invalid kernelDOC markers
pcie/tx.c and pcie/trans.c still needs to be cleaned up.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/rx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 148 |
1 files changed, 70 insertions, 78 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 11a93eddc84f..087d022bc93a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -76,7 +76,7 @@ | |||
76 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | 76 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When |
77 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | 77 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled |
78 | * to replenish the iwl->rxq->rx_free. | 78 | * to replenish the iwl->rxq->rx_free. |
79 | * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the | 79 | * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the |
80 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | 80 | * iwl->rxq is replenished and the READ INDEX is updated (updating the |
81 | * 'processed' and 'read' driver indexes as well) | 81 | * 'processed' and 'read' driver indexes as well) |
82 | * + A received packet is processed and handed to the kernel network stack, | 82 | * + A received packet is processed and handed to the kernel network stack, |
@@ -89,28 +89,28 @@ | |||
89 | * | 89 | * |
90 | * Driver sequence: | 90 | * Driver sequence: |
91 | * | 91 | * |
92 | * iwl_rx_queue_alloc() Allocates rx_free | 92 | * iwl_rxq_alloc() Allocates rx_free |
93 | * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls | 93 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls |
94 | * iwl_rx_queue_restock | 94 | * iwl_pcie_rxq_restock |
95 | * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx | 95 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx |
96 | * queue, updates firmware pointers, and updates | 96 | * queue, updates firmware pointers, and updates |
97 | * the WRITE index. If insufficient rx_free buffers | 97 | * the WRITE index. If insufficient rx_free buffers |
98 | * are available, schedules iwl_rx_replenish | 98 | * are available, schedules iwl_pcie_rx_replenish |
99 | * | 99 | * |
100 | * -- enable interrupts -- | 100 | * -- enable interrupts -- |
101 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | 101 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
102 | * READ INDEX, detaching the SKB from the pool. | 102 | * READ INDEX, detaching the SKB from the pool. |
103 | * Moves the packet buffer from queue to rx_used. | 103 | * Moves the packet buffer from queue to rx_used. |
104 | * Calls iwl_rx_queue_restock to refill any empty | 104 | * Calls iwl_pcie_rxq_restock to refill any empty |
105 | * slots. | 105 | * slots. |
106 | * ... | 106 | * ... |
107 | * | 107 | * |
108 | */ | 108 | */ |
109 | 109 | ||
110 | /** | 110 | /* |
111 | * iwl_rx_queue_space - Return number of free slots available in queue. | 111 | * iwl_rxq_space - Return number of free slots available in queue. |
112 | */ | 112 | */ |
113 | static int iwl_rx_queue_space(const struct iwl_rx_queue *q) | 113 | static int iwl_rxq_space(const struct iwl_rxq *q) |
114 | { | 114 | { |
115 | int s = q->read - q->write; | 115 | int s = q->read - q->write; |
116 | if (s <= 0) | 116 | if (s <= 0) |
@@ -122,11 +122,10 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q) | |||
122 | return s; | 122 | return s; |
123 | } | 123 | } |
124 | 124 | ||
125 | /** | 125 | /* |
126 | * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue | 126 | * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue |
127 | */ | 127 | */ |
128 | void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | 128 | void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q) |
129 | struct iwl_rx_queue *q) | ||
130 | { | 129 | { |
131 | unsigned long flags; | 130 | unsigned long flags; |
132 | u32 reg; | 131 | u32 reg; |
@@ -176,7 +175,7 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, | |||
176 | spin_unlock_irqrestore(&q->lock, flags); | 175 | spin_unlock_irqrestore(&q->lock, flags); |
177 | } | 176 | } |
178 | 177 | ||
179 | /** | 178 | /* |
180 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | 179 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr |
181 | */ | 180 | */ |
182 | static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) | 181 | static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) |
@@ -184,8 +183,8 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |||
184 | return cpu_to_le32((u32)(dma_addr >> 8)); | 183 | return cpu_to_le32((u32)(dma_addr >> 8)); |
185 | } | 184 | } |
186 | 185 | ||
187 | /** | 186 | /* |
188 | * iwl_rx_queue_restock - refill RX queue from pre-allocated pool | 187 | * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool |
189 | * | 188 | * |
190 | * If there are slots in the RX queue that need to be restocked, | 189 | * If there are slots in the RX queue that need to be restocked, |
191 | * and we have free pre-allocated buffers, fill the ranks as much | 190 | * and we have free pre-allocated buffers, fill the ranks as much |
@@ -195,10 +194,10 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) | |||
195 | * also updates the memory address in the firmware to reference the new | 194 | * also updates the memory address in the firmware to reference the new |
196 | * target buffer. | 195 | * target buffer. |
197 | */ | 196 | */ |
198 | static void iwl_rx_queue_restock(struct iwl_trans *trans) | 197 | static void iwl_pcie_rxq_restock(struct iwl_trans *trans) |
199 | { | 198 | { |
200 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 199 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
201 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 200 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
202 | struct iwl_rx_mem_buffer *rxb; | 201 | struct iwl_rx_mem_buffer *rxb; |
203 | unsigned long flags; | 202 | unsigned long flags; |
204 | 203 | ||
@@ -214,7 +213,7 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans) | |||
214 | return; | 213 | return; |
215 | 214 | ||
216 | spin_lock_irqsave(&rxq->lock, flags); | 215 | spin_lock_irqsave(&rxq->lock, flags); |
217 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | 216 | while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { |
218 | /* The overwritten rxb must be a used one */ | 217 | /* The overwritten rxb must be a used one */ |
219 | rxb = rxq->queue[rxq->write]; | 218 | rxb = rxq->queue[rxq->write]; |
220 | BUG_ON(rxb && rxb->page); | 219 | BUG_ON(rxb && rxb->page); |
@@ -242,23 +241,23 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans) | |||
242 | spin_lock_irqsave(&rxq->lock, flags); | 241 | spin_lock_irqsave(&rxq->lock, flags); |
243 | rxq->need_update = 1; | 242 | rxq->need_update = 1; |
244 | spin_unlock_irqrestore(&rxq->lock, flags); | 243 | spin_unlock_irqrestore(&rxq->lock, flags); |
245 | iwl_rx_queue_update_write_ptr(trans, rxq); | 244 | iwl_pcie_rxq_inc_wr_ptr(trans, rxq); |
246 | } | 245 | } |
247 | } | 246 | } |
248 | 247 | ||
249 | /* | 248 | /* |
250 | * iwl_rx_allocate - allocate a page for each used RBD | 249 | * iwl_pcie_rx_allocate - allocate a page for each used RBD |
251 | * | 250 | * |
252 | * A used RBD is an Rx buffer that has been given to the stack. To use it again | 251 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
253 | * a page must be allocated and the RBD must point to the page. This function | 252 | * a page must be allocated and the RBD must point to the page. This function |
254 | * doesn't change the HW pointer but handles the list of pages that is used by | 253 | * doesn't change the HW pointer but handles the list of pages that is used by |
255 | * iwl_rx_queue_restock. The latter function will update the HW to use the newly | 254 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
256 | * allocated buffers. | 255 | * allocated buffers. |
257 | */ | 256 | */ |
258 | static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) | 257 | static void iwl_pcie_rx_allocate(struct iwl_trans *trans, gfp_t priority) |
259 | { | 258 | { |
260 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 259 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
261 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 260 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
262 | struct iwl_rx_mem_buffer *rxb; | 261 | struct iwl_rx_mem_buffer *rxb; |
263 | struct page *page; | 262 | struct page *page; |
264 | unsigned long flags; | 263 | unsigned long flags; |
@@ -333,46 +332,46 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) | |||
333 | } | 332 | } |
334 | 333 | ||
335 | /* | 334 | /* |
336 | * iwl_rx_replenish - Move all used buffers from rx_used to rx_free | 335 | * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free |
337 | * | 336 | * |
338 | * When moving to rx_free an page is allocated for the slot. | 337 | * When moving to rx_free an page is allocated for the slot. |
339 | * | 338 | * |
340 | * Also restock the Rx queue via iwl_rx_queue_restock. | 339 | * Also restock the Rx queue via iwl_pcie_rxq_restock. |
341 | * This is called as a scheduled work item (except for during initialization) | 340 | * This is called as a scheduled work item (except for during initialization) |
342 | */ | 341 | */ |
343 | void iwl_rx_replenish(struct iwl_trans *trans) | 342 | void iwl_pcie_rx_replenish(struct iwl_trans *trans) |
344 | { | 343 | { |
345 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 344 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
346 | unsigned long flags; | 345 | unsigned long flags; |
347 | 346 | ||
348 | iwl_rx_allocate(trans, GFP_KERNEL); | 347 | iwl_pcie_rx_allocate(trans, GFP_KERNEL); |
349 | 348 | ||
350 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); | 349 | spin_lock_irqsave(&trans_pcie->irq_lock, flags); |
351 | iwl_rx_queue_restock(trans); | 350 | iwl_pcie_rxq_restock(trans); |
352 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 351 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
353 | } | 352 | } |
354 | 353 | ||
355 | static void iwl_rx_replenish_now(struct iwl_trans *trans) | 354 | static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans) |
356 | { | 355 | { |
357 | iwl_rx_allocate(trans, GFP_ATOMIC); | 356 | iwl_pcie_rx_allocate(trans, GFP_ATOMIC); |
358 | 357 | ||
359 | iwl_rx_queue_restock(trans); | 358 | iwl_pcie_rxq_restock(trans); |
360 | } | 359 | } |
361 | 360 | ||
362 | void iwl_bg_rx_replenish(struct work_struct *data) | 361 | void iwl_pcie_rx_replenish_work(struct work_struct *data) |
363 | { | 362 | { |
364 | struct iwl_trans_pcie *trans_pcie = | 363 | struct iwl_trans_pcie *trans_pcie = |
365 | container_of(data, struct iwl_trans_pcie, rx_replenish); | 364 | container_of(data, struct iwl_trans_pcie, rx_replenish); |
366 | 365 | ||
367 | iwl_rx_replenish(trans_pcie->trans); | 366 | iwl_pcie_rx_replenish(trans_pcie->trans); |
368 | } | 367 | } |
369 | 368 | ||
370 | static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | 369 | static void iwl_pcie_rx_handle_rxbuf(struct iwl_trans *trans, |
371 | struct iwl_rx_mem_buffer *rxb) | 370 | struct iwl_rx_mem_buffer *rxb) |
372 | { | 371 | { |
373 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 372 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
374 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 373 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
375 | struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; | 374 | struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; |
376 | unsigned long flags; | 375 | unsigned long flags; |
377 | bool page_stolen = false; | 376 | bool page_stolen = false; |
378 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; | 377 | int max_len = PAGE_SIZE << trans_pcie->rx_page_order; |
@@ -402,8 +401,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
402 | break; | 401 | break; |
403 | 402 | ||
404 | IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", | 403 | IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", |
405 | rxcb._offset, | 404 | rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd), |
406 | trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd), | ||
407 | pkt->hdr.cmd); | 405 | pkt->hdr.cmd); |
408 | 406 | ||
409 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | 407 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; |
@@ -435,7 +433,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
435 | cmd_index = get_cmd_index(&txq->q, index); | 433 | cmd_index = get_cmd_index(&txq->q, index); |
436 | 434 | ||
437 | if (reclaim) { | 435 | if (reclaim) { |
438 | struct iwl_pcie_tx_queue_entry *ent; | 436 | struct iwl_pcie_txq_entry *ent; |
439 | ent = &txq->entries[cmd_index]; | 437 | ent = &txq->entries[cmd_index]; |
440 | cmd = ent->copy_cmd; | 438 | cmd = ent->copy_cmd; |
441 | WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); | 439 | WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); |
@@ -465,7 +463,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
465 | * iwl_trans_send_cmd() | 463 | * iwl_trans_send_cmd() |
466 | * as we reclaim the driver command queue */ | 464 | * as we reclaim the driver command queue */ |
467 | if (!rxcb._page_stolen) | 465 | if (!rxcb._page_stolen) |
468 | iwl_tx_cmd_complete(trans, &rxcb, err); | 466 | iwl_pcie_hcmd_complete(trans, &rxcb, err); |
469 | else | 467 | else |
470 | IWL_WARN(trans, "Claim null rxb?\n"); | 468 | IWL_WARN(trans, "Claim null rxb?\n"); |
471 | } | 469 | } |
@@ -496,17 +494,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
496 | spin_unlock_irqrestore(&rxq->lock, flags); | 494 | spin_unlock_irqrestore(&rxq->lock, flags); |
497 | } | 495 | } |
498 | 496 | ||
499 | /** | 497 | /* |
500 | * iwl_rx_handle - Main entry function for receiving responses from uCode | 498 | * iwl_pcie_rx_handle - Main entry function for receiving responses from fw |
501 | * | ||
502 | * Uses the priv->rx_handlers callback function array to invoke | ||
503 | * the appropriate handlers, including command responses, | ||
504 | * frame-received notifications, and other notifications. | ||
505 | */ | 499 | */ |
506 | static void iwl_rx_handle(struct iwl_trans *trans) | 500 | static void iwl_pcie_rx_handle(struct iwl_trans *trans) |
507 | { | 501 | { |
508 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 502 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
509 | struct iwl_rx_queue *rxq = &trans_pcie->rxq; | 503 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
510 | u32 r, i; | 504 | u32 r, i; |
511 | u8 fill_rx = 0; | 505 | u8 fill_rx = 0; |
512 | u32 count = 8; | 506 | u32 count = 8; |
@@ -537,7 +531,7 @@ static void iwl_rx_handle(struct iwl_trans *trans) | |||
537 | 531 | ||
538 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", | 532 | IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", |
539 | r, i, rxb); | 533 | r, i, rxb); |
540 | iwl_rx_handle_rxbuf(trans, rxb); | 534 | iwl_pcie_rx_handle_rxbuf(trans, rxb); |
541 | 535 | ||
542 | i = (i + 1) & RX_QUEUE_MASK; | 536 | i = (i + 1) & RX_QUEUE_MASK; |
543 | /* If there are a lot of unused frames, | 537 | /* If there are a lot of unused frames, |
@@ -546,7 +540,7 @@ static void iwl_rx_handle(struct iwl_trans *trans) | |||
546 | count++; | 540 | count++; |
547 | if (count >= 8) { | 541 | if (count >= 8) { |
548 | rxq->read = i; | 542 | rxq->read = i; |
549 | iwl_rx_replenish_now(trans); | 543 | iwl_pcie_rx_replenish_now(trans); |
550 | count = 0; | 544 | count = 0; |
551 | } | 545 | } |
552 | } | 546 | } |
@@ -555,15 +549,15 @@ static void iwl_rx_handle(struct iwl_trans *trans) | |||
555 | /* Backtrack one entry */ | 549 | /* Backtrack one entry */ |
556 | rxq->read = i; | 550 | rxq->read = i; |
557 | if (fill_rx) | 551 | if (fill_rx) |
558 | iwl_rx_replenish_now(trans); | 552 | iwl_pcie_rx_replenish_now(trans); |
559 | else | 553 | else |
560 | iwl_rx_queue_restock(trans); | 554 | iwl_pcie_rxq_restock(trans); |
561 | } | 555 | } |
562 | 556 | ||
563 | /** | 557 | /* |
564 | * iwl_irq_handle_error - called for HW or SW error interrupt from card | 558 | * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card |
565 | */ | 559 | */ |
566 | static void iwl_irq_handle_error(struct iwl_trans *trans) | 560 | static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) |
567 | { | 561 | { |
568 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 562 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
569 | 563 | ||
@@ -579,8 +573,8 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) | |||
579 | return; | 573 | return; |
580 | } | 574 | } |
581 | 575 | ||
582 | iwl_dump_csr(trans); | 576 | iwl_pcie_dump_csr(trans); |
583 | iwl_dump_fh(trans, NULL); | 577 | iwl_pcie_dump_fh(trans, NULL); |
584 | 578 | ||
585 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); | 579 | set_bit(STATUS_FW_ERROR, &trans_pcie->status); |
586 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); | 580 | clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); |
@@ -590,7 +584,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) | |||
590 | } | 584 | } |
591 | 585 | ||
592 | /* tasklet for iwlagn interrupt */ | 586 | /* tasklet for iwlagn interrupt */ |
593 | void iwl_irq_tasklet(struct iwl_trans *trans) | 587 | void iwl_pcie_tasklet(struct iwl_trans *trans) |
594 | { | 588 | { |
595 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 589 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
596 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; | 590 | struct isr_statistics *isr_stats = &trans_pcie->isr_stats; |
@@ -642,7 +636,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) | |||
642 | iwl_disable_interrupts(trans); | 636 | iwl_disable_interrupts(trans); |
643 | 637 | ||
644 | isr_stats->hw++; | 638 | isr_stats->hw++; |
645 | iwl_irq_handle_error(trans); | 639 | iwl_pcie_irq_handle_error(trans); |
646 | 640 | ||
647 | handled |= CSR_INT_BIT_HW_ERR; | 641 | handled |= CSR_INT_BIT_HW_ERR; |
648 | 642 | ||
@@ -705,17 +699,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans) | |||
705 | IWL_ERR(trans, "Microcode SW error detected. " | 699 | IWL_ERR(trans, "Microcode SW error detected. " |
706 | " Restarting 0x%X.\n", inta); | 700 | " Restarting 0x%X.\n", inta); |
707 | isr_stats->sw++; | 701 | isr_stats->sw++; |
708 | iwl_irq_handle_error(trans); | 702 | iwl_pcie_irq_handle_error(trans); |
709 | handled |= CSR_INT_BIT_SW_ERR; | 703 | handled |= CSR_INT_BIT_SW_ERR; |
710 | } | 704 | } |
711 | 705 | ||
712 | /* uCode wakes up after power-down sleep */ | 706 | /* uCode wakes up after power-down sleep */ |
713 | if (inta & CSR_INT_BIT_WAKEUP) { | 707 | if (inta & CSR_INT_BIT_WAKEUP) { |
714 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); | 708 | IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); |
715 | iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); | 709 | iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); |
716 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) | 710 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) |
717 | iwl_txq_update_write_ptr(trans, | 711 | iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]); |
718 | &trans_pcie->txq[i]); | ||
719 | 712 | ||
720 | isr_stats->wakeup++; | 713 | isr_stats->wakeup++; |
721 | 714 | ||
@@ -753,7 +746,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) | |||
753 | iwl_write8(trans, CSR_INT_PERIODIC_REG, | 746 | iwl_write8(trans, CSR_INT_PERIODIC_REG, |
754 | CSR_INT_PERIODIC_DIS); | 747 | CSR_INT_PERIODIC_DIS); |
755 | 748 | ||
756 | iwl_rx_handle(trans); | 749 | iwl_pcie_rx_handle(trans); |
757 | 750 | ||
758 | /* | 751 | /* |
759 | * Enable periodic interrupt in 8 msec only if we received | 752 | * Enable periodic interrupt in 8 msec only if we received |
@@ -811,7 +804,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) | |||
811 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) | 804 | #define ICT_COUNT (ICT_SIZE / sizeof(u32)) |
812 | 805 | ||
813 | /* Free dram table */ | 806 | /* Free dram table */ |
814 | void iwl_free_isr_ict(struct iwl_trans *trans) | 807 | void iwl_pcie_free_ict(struct iwl_trans *trans) |
815 | { | 808 | { |
816 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 809 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
817 | 810 | ||
@@ -824,13 +817,12 @@ void iwl_free_isr_ict(struct iwl_trans *trans) | |||
824 | } | 817 | } |
825 | } | 818 | } |
826 | 819 | ||
827 | |||
828 | /* | 820 | /* |
829 | * allocate dram shared table, it is an aligned memory | 821 | * allocate dram shared table, it is an aligned memory |
830 | * block of ICT_SIZE. | 822 | * block of ICT_SIZE. |
831 | * also reset all data related to ICT table interrupt. | 823 | * also reset all data related to ICT table interrupt. |
832 | */ | 824 | */ |
833 | int iwl_alloc_isr_ict(struct iwl_trans *trans) | 825 | int iwl_pcie_alloc_ict(struct iwl_trans *trans) |
834 | { | 826 | { |
835 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 827 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
836 | 828 | ||
@@ -843,7 +835,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) | |||
843 | 835 | ||
844 | /* just an API sanity check ... it is guaranteed to be aligned */ | 836 | /* just an API sanity check ... it is guaranteed to be aligned */ |
845 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { | 837 | if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { |
846 | iwl_free_isr_ict(trans); | 838 | iwl_pcie_free_ict(trans); |
847 | return -EINVAL; | 839 | return -EINVAL; |
848 | } | 840 | } |
849 | 841 | ||
@@ -864,7 +856,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) | |||
864 | /* Device is going up inform it about using ICT interrupt table, | 856 | /* Device is going up inform it about using ICT interrupt table, |
865 | * also we need to tell the driver to start using ICT interrupt. | 857 | * also we need to tell the driver to start using ICT interrupt. |
866 | */ | 858 | */ |
867 | void iwl_reset_ict(struct iwl_trans *trans) | 859 | void iwl_pcie_reset_ict(struct iwl_trans *trans) |
868 | { | 860 | { |
869 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 861 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
870 | u32 val; | 862 | u32 val; |
@@ -894,7 +886,7 @@ void iwl_reset_ict(struct iwl_trans *trans) | |||
894 | } | 886 | } |
895 | 887 | ||
896 | /* Device is going down disable ict interrupt usage */ | 888 | /* Device is going down disable ict interrupt usage */ |
897 | void iwl_disable_ict(struct iwl_trans *trans) | 889 | void iwl_pcie_disable_ict(struct iwl_trans *trans) |
898 | { | 890 | { |
899 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 891 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
900 | unsigned long flags; | 892 | unsigned long flags; |
@@ -905,7 +897,7 @@ void iwl_disable_ict(struct iwl_trans *trans) | |||
905 | } | 897 | } |
906 | 898 | ||
907 | /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ | 899 | /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ |
908 | static irqreturn_t iwl_isr(int irq, void *data) | 900 | static irqreturn_t iwl_pcie_isr(int irq, void *data) |
909 | { | 901 | { |
910 | struct iwl_trans *trans = data; | 902 | struct iwl_trans *trans = data; |
911 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 903 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
@@ -952,7 +944,7 @@ static irqreturn_t iwl_isr(int irq, void *data) | |||
952 | #endif | 944 | #endif |
953 | 945 | ||
954 | trans_pcie->inta |= inta; | 946 | trans_pcie->inta |= inta; |
955 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | 947 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
956 | if (likely(inta)) | 948 | if (likely(inta)) |
957 | tasklet_schedule(&trans_pcie->irq_tasklet); | 949 | tasklet_schedule(&trans_pcie->irq_tasklet); |
958 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | 950 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |
@@ -977,7 +969,7 @@ none: | |||
977 | * the interrupt we need to service, driver will set the entries back to 0 and | 969 | * the interrupt we need to service, driver will set the entries back to 0 and |
978 | * set index. | 970 | * set index. |
979 | */ | 971 | */ |
980 | irqreturn_t iwl_isr_ict(int irq, void *data) | 972 | irqreturn_t iwl_pcie_isr_ict(int irq, void *data) |
981 | { | 973 | { |
982 | struct iwl_trans *trans = data; | 974 | struct iwl_trans *trans = data; |
983 | struct iwl_trans_pcie *trans_pcie; | 975 | struct iwl_trans_pcie *trans_pcie; |
@@ -997,7 +989,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
997 | * use legacy interrupt. | 989 | * use legacy interrupt. |
998 | */ | 990 | */ |
999 | if (unlikely(!trans_pcie->use_ict)) { | 991 | if (unlikely(!trans_pcie->use_ict)) { |
1000 | irqreturn_t ret = iwl_isr(irq, data); | 992 | irqreturn_t ret = iwl_pcie_isr(irq, data); |
1001 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); | 993 | spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); |
1002 | return ret; | 994 | return ret; |
1003 | } | 995 | } |
@@ -1062,7 +1054,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data) | |||
1062 | inta &= trans_pcie->inta_mask; | 1054 | inta &= trans_pcie->inta_mask; |
1063 | trans_pcie->inta |= inta; | 1055 | trans_pcie->inta |= inta; |
1064 | 1056 | ||
1065 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | 1057 | /* iwl_pcie_tasklet() will service interrupts and re-enable them */ |
1066 | if (likely(inta)) | 1058 | if (likely(inta)) |
1067 | tasklet_schedule(&trans_pcie->irq_tasklet); | 1059 | tasklet_schedule(&trans_pcie->irq_tasklet); |
1068 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && | 1060 | else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && |