diff options
author | Wey-Yi Guy <wey-yi.w.guy@intel.com> | 2010-03-17 16:34:35 -0400 |
---|---|---|
committer | Reinette Chatre <reinette.chatre@intel.com> | 2010-03-25 14:19:33 -0400 |
commit | 54b81550dd674466fe7d01629d2aab015c545a1e (patch) | |
tree | ed589d93ecef142f95379170e35e3224fd580fb4 /drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |
parent | 74bcdb33e99f49ef5202dd2f8109945b4570edc2 (diff) |
iwlwifi: move agn only rx functions from iwlcore to iwlagn
Identify the rx functions only used by agn driver and move those from
iwlcore to iwlagn.
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-lib.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn-lib.c | 200 |
1 files changed, 199 insertions, 1 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 6f9d52d04464..3117382cfd48 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |||
@@ -491,7 +491,7 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv) | |||
491 | } else | 491 | } else |
492 | iwlagn_rx_queue_reset(priv, rxq); | 492 | iwlagn_rx_queue_reset(priv, rxq); |
493 | 493 | ||
494 | iwl_rx_replenish(priv); | 494 | iwlagn_rx_replenish(priv); |
495 | 495 | ||
496 | iwlagn_rx_init(priv, rxq); | 496 | iwlagn_rx_init(priv, rxq); |
497 | 497 | ||
@@ -511,3 +511,201 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv) | |||
511 | 511 | ||
512 | return 0; | 512 | return 0; |
513 | } | 513 | } |
514 | |||
515 | /** | ||
516 | * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
517 | */ | ||
518 | static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
519 | dma_addr_t dma_addr) | ||
520 | { | ||
521 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
522 | } | ||
523 | |||
524 | /** | ||
525 | * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool | ||
526 | * | ||
527 | * If there are slots in the RX queue that need to be restocked, | ||
528 | * and we have free pre-allocated buffers, fill the ranks as much | ||
529 | * as we can, pulling from rx_free. | ||
530 | * | ||
531 | * This moves the 'write' index forward to catch up with 'processed', and | ||
532 | * also updates the memory address in the firmware to reference the new | ||
533 | * target buffer. | ||
534 | */ | ||
535 | void iwlagn_rx_queue_restock(struct iwl_priv *priv) | ||
536 | { | ||
537 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
538 | struct list_head *element; | ||
539 | struct iwl_rx_mem_buffer *rxb; | ||
540 | unsigned long flags; | ||
541 | int write; | ||
542 | |||
543 | spin_lock_irqsave(&rxq->lock, flags); | ||
544 | write = rxq->write & ~0x7; | ||
545 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
546 | /* Get next free Rx buffer, remove from free list */ | ||
547 | element = rxq->rx_free.next; | ||
548 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
549 | list_del(element); | ||
550 | |||
551 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
552 | rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, | ||
553 | rxb->page_dma); | ||
554 | rxq->queue[rxq->write] = rxb; | ||
555 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
556 | rxq->free_count--; | ||
557 | } | ||
558 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
559 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
560 | * refill it */ | ||
561 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
562 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
563 | |||
564 | |||
565 | /* If we've added more space for the firmware to place data, tell it. | ||
566 | * Increment device's write pointer in multiples of 8. */ | ||
567 | if (rxq->write_actual != (rxq->write & ~0x7)) { | ||
568 | spin_lock_irqsave(&rxq->lock, flags); | ||
569 | rxq->need_update = 1; | ||
570 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
571 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
572 | } | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free | ||
577 | * | ||
578 | * When moving to rx_free an SKB is allocated for the slot. | ||
579 | * | ||
580 | * Also restock the Rx queue via iwl_rx_queue_restock. | ||
581 | * This is called as a scheduled work item (except for during initialization) | ||
582 | */ | ||
583 | void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) | ||
584 | { | ||
585 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
586 | struct list_head *element; | ||
587 | struct iwl_rx_mem_buffer *rxb; | ||
588 | struct page *page; | ||
589 | unsigned long flags; | ||
590 | gfp_t gfp_mask = priority; | ||
591 | |||
592 | while (1) { | ||
593 | spin_lock_irqsave(&rxq->lock, flags); | ||
594 | if (list_empty(&rxq->rx_used)) { | ||
595 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
596 | return; | ||
597 | } | ||
598 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
599 | |||
600 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
601 | gfp_mask |= __GFP_NOWARN; | ||
602 | |||
603 | if (priv->hw_params.rx_page_order > 0) | ||
604 | gfp_mask |= __GFP_COMP; | ||
605 | |||
606 | /* Alloc a new receive buffer */ | ||
607 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); | ||
608 | if (!page) { | ||
609 | if (net_ratelimit()) | ||
610 | IWL_DEBUG_INFO(priv, "alloc_pages failed, " | ||
611 | "order: %d\n", | ||
612 | priv->hw_params.rx_page_order); | ||
613 | |||
614 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | ||
615 | net_ratelimit()) | ||
616 | IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", | ||
617 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | ||
618 | rxq->free_count); | ||
619 | /* We don't reschedule replenish work here -- we will | ||
620 | * call the restock method and if it still needs | ||
621 | * more buffers it will schedule replenish */ | ||
622 | return; | ||
623 | } | ||
624 | |||
625 | spin_lock_irqsave(&rxq->lock, flags); | ||
626 | |||
627 | if (list_empty(&rxq->rx_used)) { | ||
628 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
629 | __free_pages(page, priv->hw_params.rx_page_order); | ||
630 | return; | ||
631 | } | ||
632 | element = rxq->rx_used.next; | ||
633 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
634 | list_del(element); | ||
635 | |||
636 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
637 | |||
638 | rxb->page = page; | ||
639 | /* Get physical address of the RB */ | ||
640 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, | ||
641 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
642 | PCI_DMA_FROMDEVICE); | ||
643 | /* dma address must be no more than 36 bits */ | ||
644 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | ||
645 | /* and also 256 byte aligned! */ | ||
646 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | ||
647 | |||
648 | spin_lock_irqsave(&rxq->lock, flags); | ||
649 | |||
650 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
651 | rxq->free_count++; | ||
652 | priv->alloc_rxb_page++; | ||
653 | |||
654 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
655 | } | ||
656 | } | ||
657 | |||
658 | void iwlagn_rx_replenish(struct iwl_priv *priv) | ||
659 | { | ||
660 | unsigned long flags; | ||
661 | |||
662 | iwlagn_rx_allocate(priv, GFP_KERNEL); | ||
663 | |||
664 | spin_lock_irqsave(&priv->lock, flags); | ||
665 | iwlagn_rx_queue_restock(priv); | ||
666 | spin_unlock_irqrestore(&priv->lock, flags); | ||
667 | } | ||
668 | |||
669 | void iwlagn_rx_replenish_now(struct iwl_priv *priv) | ||
670 | { | ||
671 | iwlagn_rx_allocate(priv, GFP_ATOMIC); | ||
672 | |||
673 | iwlagn_rx_queue_restock(priv); | ||
674 | } | ||
675 | |||
676 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
677 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | ||
678 | * This free routine walks the list of POOL entries and if SKB is set to | ||
679 | * non NULL it is unmapped and freed | ||
680 | */ | ||
681 | void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
682 | { | ||
683 | int i; | ||
684 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
685 | if (rxq->pool[i].page != NULL) { | ||
686 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
687 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
688 | PCI_DMA_FROMDEVICE); | ||
689 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
690 | rxq->pool[i].page = NULL; | ||
691 | } | ||
692 | } | ||
693 | |||
694 | dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
695 | rxq->dma_addr); | ||
696 | dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), | ||
697 | rxq->rb_stts, rxq->rb_stts_dma); | ||
698 | rxq->bd = NULL; | ||
699 | rxq->rb_stts = NULL; | ||
700 | } | ||
701 | |||
702 | int iwlagn_rxq_stop(struct iwl_priv *priv) | ||
703 | { | ||
704 | |||
705 | /* stop Rx DMA */ | ||
706 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
707 | iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | ||
708 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | ||
709 | |||
710 | return 0; | ||
711 | } | ||