aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-trans.c
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2011-08-26 02:10:39 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-08-29 15:25:30 -0400
commitd618912417fbce4f6514fe1cbef7df2e73bdb6c2 (patch)
treee14bf2a7a09d898431cd0bac6d8b38318cb6dc48 /drivers/net/wireless/iwlwifi/iwl-trans.c
parent8f470ce31de1a9dfe6b53e0967eaa7e72b741714 (diff)
iwlagn: hw_params moves to iwl_shared
Since it is used by all the layers, it needs to move to iwl_shared. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index e4a70fed8cf8..92128383cae7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -120,7 +120,7 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
120 * to an SKB, so we need to unmap and free potential storage */ 120 * to an SKB, so we need to unmap and free potential storage */
121 if (rxq->pool[i].page != NULL) { 121 if (rxq->pool[i].page != NULL) {
122 dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma, 122 dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
123 PAGE_SIZE << priv->hw_params.rx_page_order, 123 PAGE_SIZE << hw_params(priv).rx_page_order,
124 DMA_FROM_DEVICE); 124 DMA_FROM_DEVICE);
125 __iwl_free_pages(priv, rxq->pool[i].page); 125 __iwl_free_pages(priv, rxq->pool[i].page);
126 rxq->pool[i].page = NULL; 126 rxq->pool[i].page = NULL;
@@ -285,7 +285,7 @@ static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
285static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq, 285static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
286 int slots_num, u32 txq_id) 286 int slots_num, u32 txq_id)
287{ 287{
288 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 288 size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
289 int i; 289 int i;
290 290
291 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds)) 291 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
@@ -429,7 +429,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
429 429
430 /* De-alloc circular buffer of TFDs */ 430 /* De-alloc circular buffer of TFDs */
431 if (txq->q.n_bd) { 431 if (txq->q.n_bd) {
432 dma_free_coherent(dev, priv->hw_params.tfd_size * 432 dma_free_coherent(dev, hw_params(priv).tfd_size *
433 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 433 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
434 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); 434 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
435 } 435 }
@@ -459,7 +459,8 @@ static void iwl_trans_tx_free(struct iwl_priv *priv)
459 459
460 /* Tx queues */ 460 /* Tx queues */
461 if (priv->txq) { 461 if (priv->txq) {
462 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 462 for (txq_id = 0;
463 txq_id < hw_params(priv).max_txq_num; txq_id++)
463 iwl_tx_queue_free(priv, txq_id); 464 iwl_tx_queue_free(priv, txq_id);
464 } 465 }
465 466
@@ -491,7 +492,7 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv)
491 } 492 }
492 493
493 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, 494 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
494 priv->hw_params.scd_bc_tbls_size); 495 hw_params(priv).scd_bc_tbls_size);
495 if (ret) { 496 if (ret) {
496 IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); 497 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
497 goto error; 498 goto error;
@@ -513,7 +514,7 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv)
513 } 514 }
514 515
515 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 516 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
516 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 517 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
517 slots_num = (txq_id == priv->cmd_queue) ? 518 slots_num = (txq_id == priv->cmd_queue) ?
518 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 519 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
519 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num, 520 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
@@ -556,7 +557,7 @@ static int iwl_tx_init(struct iwl_priv *priv)
556 spin_unlock_irqrestore(&priv->lock, flags); 557 spin_unlock_irqrestore(&priv->lock, flags);
557 558
558 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 559 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
559 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 560 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
560 slots_num = (txq_id == priv->cmd_queue) ? 561 slots_num = (txq_id == priv->cmd_queue) ?
561 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 562 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
562 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, 563 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
@@ -789,7 +790,8 @@ static void iwl_trans_tx_start(struct iwl_priv *priv)
789 a += 4) 790 a += 4)
790 iwl_write_targ_mem(priv, a, 0); 791 iwl_write_targ_mem(priv, a, 0);
791 for (; a < priv->scd_base_addr + 792 for (; a < priv->scd_base_addr +
792 SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) 793 SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
794 a += 4)
793 iwl_write_targ_mem(priv, a, 0); 795 iwl_write_targ_mem(priv, a, 0);
794 796
795 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR, 797 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
@@ -811,7 +813,7 @@ static void iwl_trans_tx_start(struct iwl_priv *priv)
811 iwl_write_prph(priv, SCD_AGGR_SEL, 0); 813 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
812 814
813 /* initiate the queues */ 815 /* initiate the queues */
814 for (i = 0; i < priv->hw_params.max_txq_num; i++) { 816 for (i = 0; i < hw_params(priv).max_txq_num; i++) {
815 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0); 817 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
816 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); 818 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
817 iwl_write_targ_mem(priv, priv->scd_base_addr + 819 iwl_write_targ_mem(priv, priv->scd_base_addr +
@@ -828,7 +830,7 @@ static void iwl_trans_tx_start(struct iwl_priv *priv)
828 } 830 }
829 831
830 iwl_write_prph(priv, SCD_INTERRUPT_MASK, 832 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
831 IWL_MASK(0, priv->hw_params.max_txq_num)); 833 IWL_MASK(0, hw_params(priv).max_txq_num));
832 834
833 /* Activate all Tx DMA/FIFO channels */ 835 /* Activate all Tx DMA/FIFO channels */
834 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7)); 836 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
@@ -908,7 +910,7 @@ static int iwl_trans_tx_stop(struct iwl_priv *priv)
908 } 910 }
909 911
910 /* Unmap DMA from host system and free skb's */ 912 /* Unmap DMA from host system and free skb's */
911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 913 for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
912 iwl_tx_queue_unmap(priv, txq_id); 914 iwl_tx_queue_unmap(priv, txq_id);
913 915
914 return 0; 916 return 0;