aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-trans.c
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2011-07-07 08:50:10 -0400
committerWey-Yi Guy <wey-yi.w.guy@intel.com>2011-07-16 10:39:42 -0400
commitb3c2ce131c7cd8c53b72b0cc04241cde17ce0c1d (patch)
treedd6e9946011a2179749f4c2b0b0a394e6171bc50 /drivers/net/wireless/iwlwifi/iwl-trans.c
parent1a361cd838173879672cb0f0ebe1e7654d7edff6 (diff)
iwlagn: add tx start API to transport layer
tx start will start the tx queues: basically configure the SCD Remove the IWLAGN prefix to SCD defines on the way. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.guy@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c153
1 files changed, 151 insertions, 2 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index 8d4555404799..7c748f65c86f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -547,7 +547,7 @@ static int iwl_trans_tx_init(struct iwl_priv *priv)
547 spin_lock_irqsave(&priv->lock, flags); 547 spin_lock_irqsave(&priv->lock, flags);
548 548
549 /* Turn off all Tx DMA fifos */ 549 /* Turn off all Tx DMA fifos */
550 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, 0); 550 iwl_write_prph(priv, SCD_TXFACT, 0);
551 551
552 /* Tell NIC where to find the "keep warm" buffer */ 552 /* Tell NIC where to find the "keep warm" buffer */
553 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 553 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
@@ -574,6 +574,154 @@ error:
574 return ret; 574 return ret;
575} 575}
576 576
577/*
578 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
579 * must be called under priv->lock and mac access
580 */
581static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
582{
583 iwl_write_prph(priv, SCD_TXFACT, mask);
584}
585
586#define IWL_AC_UNSET -1
587
588struct queue_to_fifo_ac {
589 s8 fifo, ac;
590};
591
592static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
593 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
594 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
595 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
596 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
597 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
598 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
599 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
600 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
601 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
602 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
603};
604
605static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
606 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
607 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
608 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
609 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
610 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
611 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
612 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
613 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
614 { IWL_TX_FIFO_BE_IPAN, 2, },
615 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
616};
617static void iwl_trans_tx_start(struct iwl_priv *priv)
618{
619 const struct queue_to_fifo_ac *queue_to_fifo;
620 struct iwl_rxon_context *ctx;
621 u32 a;
622 unsigned long flags;
623 int i, chan;
624 u32 reg_val;
625
626 spin_lock_irqsave(&priv->lock, flags);
627
628 priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
629 a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
630 /* reset conext data memory */
631 for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
632 a += 4)
633 iwl_write_targ_mem(priv, a, 0);
634 /* reset tx status memory */
635 for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
636 a += 4)
637 iwl_write_targ_mem(priv, a, 0);
638 for (; a < priv->scd_base_addr +
639 SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
640 iwl_write_targ_mem(priv, a, 0);
641
642 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
643 priv->scd_bc_tbls.dma >> 10);
644
645 /* Enable DMA channel */
646 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
647 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
648 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
649 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
650
651 /* Update FH chicken bits */
652 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
653 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
654 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
655
656 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
657 SCD_QUEUECHAIN_SEL_ALL(priv));
658 iwl_write_prph(priv, SCD_AGGR_SEL, 0);
659
660 /* initiate the queues */
661 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
662 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
663 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
664 iwl_write_targ_mem(priv, priv->scd_base_addr +
665 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
666 iwl_write_targ_mem(priv, priv->scd_base_addr +
667 SCD_CONTEXT_QUEUE_OFFSET(i) +
668 sizeof(u32),
669 ((SCD_WIN_SIZE <<
670 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
671 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
672 ((SCD_FRAME_LIMIT <<
673 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
674 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
675 }
676
677 iwl_write_prph(priv, SCD_INTERRUPT_MASK,
678 IWL_MASK(0, priv->hw_params.max_txq_num));
679
680 /* Activate all Tx DMA/FIFO channels */
681 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
682
683 /* map queues to FIFOs */
684 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
685 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
686 else
687 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
688
689 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
690
691 /* make sure all queue are not stopped */
692 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
693 for (i = 0; i < 4; i++)
694 atomic_set(&priv->queue_stop_count[i], 0);
695 for_each_context(priv, ctx)
696 ctx->last_tx_rejected = false;
697
698 /* reset to 0 to enable all the queue first */
699 priv->txq_ctx_active_msk = 0;
700
701 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
702 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
703
704 for (i = 0; i < 10; i++) {
705 int fifo = queue_to_fifo[i].fifo;
706 int ac = queue_to_fifo[i].ac;
707
708 iwl_txq_ctx_activate(priv, i);
709
710 if (fifo == IWL_TX_FIFO_UNUSED)
711 continue;
712
713 if (ac != IWL_AC_UNSET)
714 iwl_set_swq_id(&priv->txq[i], ac, i);
715 iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
716 }
717
718 spin_unlock_irqrestore(&priv->lock, flags);
719
720 /* Enable L1-Active */
721 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
722 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
723}
724
577/** 725/**
578 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels 726 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
579 */ 727 */
@@ -585,7 +733,7 @@ static int iwl_trans_tx_stop(struct iwl_priv *priv)
585 /* Turn off all Tx DMA fifos */ 733 /* Turn off all Tx DMA fifos */
586 spin_lock_irqsave(&priv->lock, flags); 734 spin_lock_irqsave(&priv->lock, flags);
587 735
588 iwlagn_txq_set_sched(priv, 0); 736 iwl_trans_txq_set_sched(priv, 0);
589 737
590 /* Stop each Tx DMA channel, and wait for it to be idle */ 738 /* Stop each Tx DMA channel, and wait for it to be idle */
591 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 739 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
@@ -822,6 +970,7 @@ static const struct iwl_trans_ops trans_ops = {
822 .rx_free = iwl_trans_rx_free, 970 .rx_free = iwl_trans_rx_free,
823 971
824 .tx_init = iwl_trans_tx_init, 972 .tx_init = iwl_trans_tx_init,
973 .tx_start = iwl_trans_tx_start,
825 .tx_free = iwl_trans_tx_free, 974 .tx_free = iwl_trans_tx_free,
826 975
827 .stop_device = iwl_trans_stop_device, 976 .stop_device = iwl_trans_stop_device,