aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2011-08-26 02:10:51 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-08-29 15:25:33 -0400
commit5a878bf60b2bb1f1509f49b8b1784e3c9f204c64 (patch)
treeba414eec2287d0ac9809a26d37e4ad7dd9dc301d /drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
parent87e5666c0722d5f4cad3560ab5c180c8bba62b8b (diff)
iwlagn: iwl_rx_queue moves to the iwl_trans_pcie
Since this struct is specific to pcie transport, move it the the pcie specific transport layer. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c114
1 files changed, 62 insertions, 52 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
index 6f5edf731542..fb06acf83fc6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
@@ -127,9 +127,10 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
127/** 127/**
128 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 128 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
129 */ 129 */
130void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 130void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
131 struct iwl_rx_queue *q) 131 struct iwl_rx_queue *q)
132{ 132{
133 struct iwl_priv *priv = priv(trans);
133 unsigned long flags; 134 unsigned long flags;
134 u32 reg; 135 u32 reg;
135 136
@@ -145,11 +146,11 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
145 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual); 146 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual);
146 } else { 147 } else {
147 /* If power-saving is in use, make sure device is awake */ 148 /* If power-saving is in use, make sure device is awake */
148 if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) { 149 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
149 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 150 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
150 151
151 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 152 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
152 IWL_DEBUG_INFO(priv, 153 IWL_DEBUG_INFO(trans,
153 "Rx queue requesting wakeup," 154 "Rx queue requesting wakeup,"
154 " GP1 = 0x%x\n", reg); 155 " GP1 = 0x%x\n", reg);
155 iwl_set_bit(priv, CSR_GP_CNTRL, 156 iwl_set_bit(priv, CSR_GP_CNTRL,
@@ -178,8 +179,7 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
178/** 179/**
179 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 180 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
180 */ 181 */
181static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, 182static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
182 dma_addr_t dma_addr)
183{ 183{
184 return cpu_to_le32((u32)(dma_addr >> 8)); 184 return cpu_to_le32((u32)(dma_addr >> 8));
185} 185}
@@ -195,9 +195,12 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
195 * also updates the memory address in the firmware to reference the new 195 * also updates the memory address in the firmware to reference the new
196 * target buffer. 196 * target buffer.
197 */ 197 */
198static void iwlagn_rx_queue_restock(struct iwl_priv *priv) 198static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
199{ 199{
200 struct iwl_rx_queue *rxq = &priv->rxq; 200 struct iwl_trans_pcie *trans_pcie =
201 IWL_TRANS_GET_PCIE_TRANS(trans);
202
203 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
201 struct list_head *element; 204 struct list_head *element;
202 struct iwl_rx_mem_buffer *rxb; 205 struct iwl_rx_mem_buffer *rxb;
203 unsigned long flags; 206 unsigned long flags;
@@ -214,8 +217,7 @@ static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
214 list_del(element); 217 list_del(element);
215 218
216 /* Point to Rx buffer via next RBD in circular buffer */ 219 /* Point to Rx buffer via next RBD in circular buffer */
217 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, 220 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
218 rxb->page_dma);
219 rxq->queue[rxq->write] = rxb; 221 rxq->queue[rxq->write] = rxb;
220 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 222 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
221 rxq->free_count--; 223 rxq->free_count--;
@@ -224,7 +226,7 @@ static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
224 /* If the pre-allocated buffer pool is dropping low, schedule to 226 /* If the pre-allocated buffer pool is dropping low, schedule to
225 * refill it */ 227 * refill it */
226 if (rxq->free_count <= RX_LOW_WATERMARK) 228 if (rxq->free_count <= RX_LOW_WATERMARK)
227 queue_work(priv->shrd->workqueue, &priv->rx_replenish); 229 queue_work(trans->shrd->workqueue, &trans_pcie->rx_replenish);
228 230
229 231
230 /* If we've added more space for the firmware to place data, tell it. 232 /* If we've added more space for the firmware to place data, tell it.
@@ -233,7 +235,7 @@ static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
233 spin_lock_irqsave(&rxq->lock, flags); 235 spin_lock_irqsave(&rxq->lock, flags);
234 rxq->need_update = 1; 236 rxq->need_update = 1;
235 spin_unlock_irqrestore(&rxq->lock, flags); 237 spin_unlock_irqrestore(&rxq->lock, flags);
236 iwl_rx_queue_update_write_ptr(priv, rxq); 238 iwl_rx_queue_update_write_ptr(trans, rxq);
237 } 239 }
238} 240}
239 241
@@ -245,9 +247,12 @@ static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
245 * Also restock the Rx queue via iwl_rx_queue_restock. 247 * Also restock the Rx queue via iwl_rx_queue_restock.
246 * This is called as a scheduled work item (except for during initialization) 248 * This is called as a scheduled work item (except for during initialization)
247 */ 249 */
248static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) 250static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
249{ 251{
250 struct iwl_rx_queue *rxq = &priv->rxq; 252 struct iwl_trans_pcie *trans_pcie =
253 IWL_TRANS_GET_PCIE_TRANS(trans);
254
255 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
251 struct list_head *element; 256 struct list_head *element;
252 struct iwl_rx_mem_buffer *rxb; 257 struct iwl_rx_mem_buffer *rxb;
253 struct page *page; 258 struct page *page;
@@ -265,21 +270,21 @@ static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
265 if (rxq->free_count > RX_LOW_WATERMARK) 270 if (rxq->free_count > RX_LOW_WATERMARK)
266 gfp_mask |= __GFP_NOWARN; 271 gfp_mask |= __GFP_NOWARN;
267 272
268 if (hw_params(priv).rx_page_order > 0) 273 if (hw_params(trans).rx_page_order > 0)
269 gfp_mask |= __GFP_COMP; 274 gfp_mask |= __GFP_COMP;
270 275
271 /* Alloc a new receive buffer */ 276 /* Alloc a new receive buffer */
272 page = alloc_pages(gfp_mask, 277 page = alloc_pages(gfp_mask,
273 hw_params(priv).rx_page_order); 278 hw_params(trans).rx_page_order);
274 if (!page) { 279 if (!page) {
275 if (net_ratelimit()) 280 if (net_ratelimit())
276 IWL_DEBUG_INFO(priv, "alloc_pages failed, " 281 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
277 "order: %d\n", 282 "order: %d\n",
278 hw_params(priv).rx_page_order); 283 hw_params(trans).rx_page_order);
279 284
280 if ((rxq->free_count <= RX_LOW_WATERMARK) && 285 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
281 net_ratelimit()) 286 net_ratelimit())
282 IWL_CRIT(priv, "Failed to alloc_pages with %s." 287 IWL_CRIT(trans, "Failed to alloc_pages with %s."
283 "Only %u free buffers remaining.\n", 288 "Only %u free buffers remaining.\n",
284 priority == GFP_ATOMIC ? 289 priority == GFP_ATOMIC ?
285 "GFP_ATOMIC" : "GFP_KERNEL", 290 "GFP_ATOMIC" : "GFP_KERNEL",
@@ -294,7 +299,7 @@ static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
294 299
295 if (list_empty(&rxq->rx_used)) { 300 if (list_empty(&rxq->rx_used)) {
296 spin_unlock_irqrestore(&rxq->lock, flags); 301 spin_unlock_irqrestore(&rxq->lock, flags);
297 __free_pages(page, hw_params(priv).rx_page_order); 302 __free_pages(page, hw_params(trans).rx_page_order);
298 return; 303 return;
299 } 304 }
300 element = rxq->rx_used.next; 305 element = rxq->rx_used.next;
@@ -306,8 +311,8 @@ static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
306 BUG_ON(rxb->page); 311 BUG_ON(rxb->page);
307 rxb->page = page; 312 rxb->page = page;
308 /* Get physical address of the RB */ 313 /* Get physical address of the RB */
309 rxb->page_dma = dma_map_page(priv->bus->dev, page, 0, 314 rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
310 PAGE_SIZE << hw_params(priv).rx_page_order, 315 PAGE_SIZE << hw_params(trans).rx_page_order,
311 DMA_FROM_DEVICE); 316 DMA_FROM_DEVICE);
312 /* dma address must be no more than 36 bits */ 317 /* dma address must be no more than 36 bits */
313 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 318 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
@@ -323,35 +328,36 @@ static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
323 } 328 }
324} 329}
325 330
326void iwlagn_rx_replenish(struct iwl_priv *priv) 331void iwlagn_rx_replenish(struct iwl_trans *trans)
327{ 332{
328 unsigned long flags; 333 unsigned long flags;
329 334
330 iwlagn_rx_allocate(priv, GFP_KERNEL); 335 iwlagn_rx_allocate(trans, GFP_KERNEL);
331 336
332 spin_lock_irqsave(&priv->shrd->lock, flags); 337 spin_lock_irqsave(&trans->shrd->lock, flags);
333 iwlagn_rx_queue_restock(priv); 338 iwlagn_rx_queue_restock(trans);
334 spin_unlock_irqrestore(&priv->shrd->lock, flags); 339 spin_unlock_irqrestore(&trans->shrd->lock, flags);
335} 340}
336 341
337static void iwlagn_rx_replenish_now(struct iwl_priv *priv) 342static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
338{ 343{
339 iwlagn_rx_allocate(priv, GFP_ATOMIC); 344 iwlagn_rx_allocate(trans, GFP_ATOMIC);
340 345
341 iwlagn_rx_queue_restock(priv); 346 iwlagn_rx_queue_restock(trans);
342} 347}
343 348
344void iwl_bg_rx_replenish(struct work_struct *data) 349void iwl_bg_rx_replenish(struct work_struct *data)
345{ 350{
346 struct iwl_priv *priv = 351 struct iwl_trans_pcie *trans_pcie =
347 container_of(data, struct iwl_priv, rx_replenish); 352 container_of(data, struct iwl_trans_pcie, rx_replenish);
353 struct iwl_trans *trans = trans_pcie->trans;
348 354
349 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) 355 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
350 return; 356 return;
351 357
352 mutex_lock(&priv->shrd->mutex); 358 mutex_lock(&trans->shrd->mutex);
353 iwlagn_rx_replenish(priv); 359 iwlagn_rx_replenish(trans);
354 mutex_unlock(&priv->shrd->mutex); 360 mutex_unlock(&trans->shrd->mutex);
355} 361}
356 362
357/** 363/**
@@ -361,11 +367,13 @@ void iwl_bg_rx_replenish(struct work_struct *data)
361 * the appropriate handlers, including command responses, 367 * the appropriate handlers, including command responses,
362 * frame-received notifications, and other notifications. 368 * frame-received notifications, and other notifications.
363 */ 369 */
364static void iwl_rx_handle(struct iwl_priv *priv) 370static void iwl_rx_handle(struct iwl_trans *trans)
365{ 371{
366 struct iwl_rx_mem_buffer *rxb; 372 struct iwl_rx_mem_buffer *rxb;
367 struct iwl_rx_packet *pkt; 373 struct iwl_rx_packet *pkt;
368 struct iwl_rx_queue *rxq = &priv->rxq; 374 struct iwl_trans_pcie *trans_pcie =
375 IWL_TRANS_GET_PCIE_TRANS(trans);
376 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
369 u32 r, i; 377 u32 r, i;
370 int reclaim; 378 int reclaim;
371 unsigned long flags; 379 unsigned long flags;
@@ -380,7 +388,7 @@ static void iwl_rx_handle(struct iwl_priv *priv)
380 388
381 /* Rx interrupt, but nothing sent from uCode */ 389 /* Rx interrupt, but nothing sent from uCode */
382 if (i == r) 390 if (i == r)
383 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 391 IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
384 392
385 /* calculate total frames need to be restock after handling RX */ 393 /* calculate total frames need to be restock after handling RX */
386 total_empty = r - rxq->write_actual; 394 total_empty = r - rxq->write_actual;
@@ -405,17 +413,17 @@ static void iwl_rx_handle(struct iwl_priv *priv)
405 413
406 rxq->queue[i] = NULL; 414 rxq->queue[i] = NULL;
407 415
408 dma_unmap_page(priv->bus->dev, rxb->page_dma, 416 dma_unmap_page(bus(trans)->dev, rxb->page_dma,
409 PAGE_SIZE << hw_params(priv).rx_page_order, 417 PAGE_SIZE << hw_params(trans).rx_page_order,
410 DMA_FROM_DEVICE); 418 DMA_FROM_DEVICE);
411 pkt = rxb_addr(rxb); 419 pkt = rxb_addr(rxb);
412 420
413 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, 421 IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
414 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 422 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
415 423
416 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 424 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
417 len += sizeof(u32); /* account for status word */ 425 len += sizeof(u32); /* account for status word */
418 trace_iwlwifi_dev_rx(priv, pkt, len); 426 trace_iwlwifi_dev_rx(priv(trans), pkt, len);
419 427
420 /* Reclaim a command buffer only if this packet is a response 428 /* Reclaim a command buffer only if this packet is a response
421 * to a (driver-originated) command. 429 * to a (driver-originated) command.
@@ -431,7 +439,7 @@ static void iwl_rx_handle(struct iwl_priv *priv)
431 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 439 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
432 (pkt->hdr.cmd != REPLY_TX); 440 (pkt->hdr.cmd != REPLY_TX);
433 441
434 iwl_rx_dispatch(priv, rxb); 442 iwl_rx_dispatch(priv(trans), rxb);
435 443
436 /* 444 /*
437 * XXX: After here, we should always check rxb->page 445 * XXX: After here, we should always check rxb->page
@@ -446,9 +454,9 @@ static void iwl_rx_handle(struct iwl_priv *priv)
446 * iwl_trans_send_cmd() 454 * iwl_trans_send_cmd()
447 * as we reclaim the driver command queue */ 455 * as we reclaim the driver command queue */
448 if (rxb->page) 456 if (rxb->page)
449 iwl_tx_cmd_complete(priv, rxb); 457 iwl_tx_cmd_complete(priv(trans), rxb);
450 else 458 else
451 IWL_WARN(priv, "Claim null rxb?\n"); 459 IWL_WARN(trans, "Claim null rxb?\n");
452 } 460 }
453 461
454 /* Reuse the page if possible. For notification packets and 462 /* Reuse the page if possible. For notification packets and
@@ -456,9 +464,9 @@ static void iwl_rx_handle(struct iwl_priv *priv)
456 * rx_free list for reuse later. */ 464 * rx_free list for reuse later. */
457 spin_lock_irqsave(&rxq->lock, flags); 465 spin_lock_irqsave(&rxq->lock, flags);
458 if (rxb->page != NULL) { 466 if (rxb->page != NULL) {
459 rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page, 467 rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
460 0, PAGE_SIZE << 468 0, PAGE_SIZE <<
461 hw_params(priv).rx_page_order, 469 hw_params(trans).rx_page_order,
462 DMA_FROM_DEVICE); 470 DMA_FROM_DEVICE);
463 list_add_tail(&rxb->list, &rxq->rx_free); 471 list_add_tail(&rxb->list, &rxq->rx_free);
464 rxq->free_count++; 472 rxq->free_count++;
@@ -474,7 +482,7 @@ static void iwl_rx_handle(struct iwl_priv *priv)
474 count++; 482 count++;
475 if (count >= 8) { 483 if (count >= 8) {
476 rxq->read = i; 484 rxq->read = i;
477 iwlagn_rx_replenish_now(priv); 485 iwlagn_rx_replenish_now(trans);
478 count = 0; 486 count = 0;
479 } 487 }
480 } 488 }
@@ -483,9 +491,9 @@ static void iwl_rx_handle(struct iwl_priv *priv)
483 /* Backtrack one entry */ 491 /* Backtrack one entry */
484 rxq->read = i; 492 rxq->read = i;
485 if (fill_rx) 493 if (fill_rx)
486 iwlagn_rx_replenish_now(priv); 494 iwlagn_rx_replenish_now(trans);
487 else 495 else
488 iwlagn_rx_queue_restock(priv); 496 iwlagn_rx_queue_restock(trans);
489} 497}
490 498
491/* tasklet for iwlagn interrupt */ 499/* tasklet for iwlagn interrupt */
@@ -611,8 +619,10 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
611 619
612 /* uCode wakes up after power-down sleep */ 620 /* uCode wakes up after power-down sleep */
613 if (inta & CSR_INT_BIT_WAKEUP) { 621 if (inta & CSR_INT_BIT_WAKEUP) {
622 struct iwl_trans_pcie *trans_pcie =
623 IWL_TRANS_GET_PCIE_TRANS(trans(priv));
614 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 624 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
615 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 625 iwl_rx_queue_update_write_ptr(trans(priv), &trans_pcie->rxq);
616 for (i = 0; i < hw_params(priv).max_txq_num; i++) 626 for (i = 0; i < hw_params(priv).max_txq_num; i++)
617 iwl_txq_update_write_ptr(priv, &priv->txq[i]); 627 iwl_txq_update_write_ptr(priv, &priv->txq[i]);
618 628
@@ -650,7 +660,7 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
650 /* Disable periodic interrupt; we use it as just a one-shot. */ 660 /* Disable periodic interrupt; we use it as just a one-shot. */
651 iwl_write8(priv, CSR_INT_PERIODIC_REG, 661 iwl_write8(priv, CSR_INT_PERIODIC_REG,
652 CSR_INT_PERIODIC_DIS); 662 CSR_INT_PERIODIC_DIS);
653 iwl_rx_handle(priv); 663 iwl_rx_handle(trans(priv));
654 664
655 /* 665 /*
656 * Enable periodic interrupt in 8 msec only if we received 666 * Enable periodic interrupt in 8 msec only if we received