diff options
author | Emmanuel Grumbach <emmanuel.grumbach@intel.com> | 2011-06-14 03:13:24 -0400 |
---|---|---|
committer | Wey-Yi Guy <wey-yi.w.guy@intel.com> | 2011-07-01 10:57:34 -0400 |
commit | c85eb6196958ae54eba3ff0660d2b5af3d58521a (patch) | |
tree | 9b4e0e85127ff1481f0ebd0614edf48a90816a4c /drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |
parent | 300d0834ebd3f3c57b0063c2fd6bc26d8405626c (diff) |
iwlagn: introduce transport layer and implement rx_init
The transport layer is responsible for all the queues, DMA rings etc...
This is the beginning of the separation of all the code that is tighly
related to HW design to the aforementioned transport layer.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-lib.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn-lib.c | 41 |
1 files changed, 1 insertions, 40 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index efdab6506ae7..3d971142786e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c | |||
@@ -628,38 +628,6 @@ struct iwl_mod_params iwlagn_mod_params = { | |||
628 | /* the rest are 0 by default */ | 628 | /* the rest are 0 by default */ |
629 | }; | 629 | }; |
630 | 630 | ||
631 | void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
632 | { | ||
633 | unsigned long flags; | ||
634 | int i; | ||
635 | spin_lock_irqsave(&rxq->lock, flags); | ||
636 | INIT_LIST_HEAD(&rxq->rx_free); | ||
637 | INIT_LIST_HEAD(&rxq->rx_used); | ||
638 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
639 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
640 | /* In the reset function, these buffers may have been allocated | ||
641 | * to an SKB, so we need to unmap and free potential storage */ | ||
642 | if (rxq->pool[i].page != NULL) { | ||
643 | dma_unmap_page(priv->bus.dev, rxq->pool[i].page_dma, | ||
644 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
645 | DMA_FROM_DEVICE); | ||
646 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
647 | rxq->pool[i].page = NULL; | ||
648 | } | ||
649 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
650 | } | ||
651 | |||
652 | for (i = 0; i < RX_QUEUE_SIZE; i++) | ||
653 | rxq->queue[i] = NULL; | ||
654 | |||
655 | /* Set us so that we have processed and used all buffers, but have | ||
656 | * not restocked the Rx queue with fresh buffers */ | ||
657 | rxq->read = rxq->write = 0; | ||
658 | rxq->write_actual = 0; | ||
659 | rxq->free_count = 0; | ||
660 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
661 | } | ||
662 | |||
663 | int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | 631 | int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) |
664 | { | 632 | { |
665 | u32 rb_size; | 633 | u32 rb_size; |
@@ -747,14 +715,7 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv) | |||
747 | priv->cfg->ops->lib->apm_ops.config(priv); | 715 | priv->cfg->ops->lib->apm_ops.config(priv); |
748 | 716 | ||
749 | /* Allocate the RX queue, or reset if it is already allocated */ | 717 | /* Allocate the RX queue, or reset if it is already allocated */ |
750 | if (!rxq->bd) { | 718 | priv->trans.ops->rx_init(priv); |
751 | ret = iwl_rx_queue_alloc(priv); | ||
752 | if (ret) { | ||
753 | IWL_ERR(priv, "Unable to initialize Rx queue\n"); | ||
754 | return -ENOMEM; | ||
755 | } | ||
756 | } else | ||
757 | iwlagn_rx_queue_reset(priv, rxq); | ||
758 | 719 | ||
759 | iwlagn_rx_replenish(priv); | 720 | iwlagn_rx_replenish(priv); |
760 | 721 | ||