aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/pcie/trans.c
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2012-06-21 04:53:44 -0400
committerJohannes Berg <johannes.berg@intel.com>2012-06-25 03:37:58 -0400
commitb04db9ac4f7641332e0133b2fd8f82e6e4553947 (patch)
tree4d4b0fc6009f865fa3866c9062d2ae76e8e13ada /drivers/net/wireless/iwlwifi/pcie/trans.c
parente75dac921d88ac1fa1ad08686ab242556f8b888b (diff)
iwlwifi: configure the queues from the op_mode
Since the op_mode defines the queue mapping, let it do it completely through the API functions. Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/trans.c')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c22
1 files changed, 4 insertions, 18 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 42f369d15f48..bac0eb0d046d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1059,7 +1059,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
1059{ 1059{
1060 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1060 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1061 u32 a; 1061 u32 a;
1062 int i, chan; 1062 int chan;
1063 u32 reg_val; 1063 u32 reg_val;
1064 1064
1065 /* make sure all queue are not stopped/used */ 1065 /* make sure all queue are not stopped/used */
@@ -1091,12 +1091,8 @@ static void iwl_tx_start(struct iwl_trans *trans)
1091 */ 1091 */
1092 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 1092 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1093 1093
1094 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) { 1094 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
1095 int fifo = trans_pcie->setup_q_to_fifo[i]; 1095 trans_pcie->cmd_fifo);
1096
1097 iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
1098 IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0);
1099 }
1100 1096
1101 /* Activate all Tx DMA/FIFO channels */ 1097 /* Activate all Tx DMA/FIFO channels */
1102 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); 1098 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
@@ -1528,6 +1524,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1528 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1524 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1529 1525
1530 trans_pcie->cmd_queue = trans_cfg->cmd_queue; 1526 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1527 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1531 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1528 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1532 trans_pcie->n_no_reclaim_cmds = 0; 1529 trans_pcie->n_no_reclaim_cmds = 0;
1533 else 1530 else
@@ -1536,17 +1533,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1536 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1533 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1537 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1534 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1538 1535
1539 trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
1540
1541 if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
1542 trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
1543
1544 /* at least the command queue must be mapped */
1545 WARN_ON(!trans_pcie->n_q_to_fifo);
1546
1547 memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
1548 trans_pcie->n_q_to_fifo * sizeof(u8));
1549
1550 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k; 1536 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1551 if (trans_pcie->rx_buf_size_8k) 1537 if (trans_pcie->rx_buf_size_8k)
1552 trans_pcie->rx_page_order = get_order(8 * 1024); 1538 trans_pcie->rx_page_order = get_order(8 * 1024);