aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-5000.c
diff options
context:
space:
mode:
authorTomas Winkler <tomas.winkler@intel.com>2008-10-24 02:48:55 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-10-31 19:02:27 -0400
commit127901ab69bbb263fb2b46e850cf20c57ac321d3 (patch)
tree35ab1fa324ca430ab486e9bc63cfea41966b1c49 /drivers/net/wireless/iwlwifi/iwl-5000.c
parent951891c7ef844919d30aac7b1fc7396fd8be23ff (diff)
iwlwifi: refactor tx byte count table usage
This patch drops unreadable usage of IWL_SET/GET_BITS16 in byte count tables handling This patch also cleans a bit the byte count table code and adds WARN_ON traps on invalid values This patch is pure cleanup, no functional changes. Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Cc: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-5000.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c52
1 files changed, 25 insertions, 27 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index b4b7e8b2a42..0c9281e9f2a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -723,7 +723,7 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
723 723
724 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, 724 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
725 (priv->shared_phys + 725 (priv->shared_phys +
726 offsetof(struct iwl5000_shared, queues_byte_cnt_tbls)) >> 10); 726 offsetof(struct iwl5000_shared, queues_bc_tbls)) >> 10);
727 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, 727 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
728 IWL50_SCD_QUEUECHAIN_SEL_ALL( 728 IWL50_SCD_QUEUECHAIN_SEL_ALL(
729 priv->hw_params.max_txq_num)); 729 priv->hw_params.max_txq_num));
@@ -891,15 +891,17 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
891 u16 byte_cnt) 891 u16 byte_cnt)
892{ 892{
893 struct iwl5000_shared *shared_data = priv->shared_virt; 893 struct iwl5000_shared *shared_data = priv->shared_virt;
894 int write_ptr = txq->q.write_ptr;
894 int txq_id = txq->q.id; 895 int txq_id = txq->q.id;
895 u8 sec_ctl = 0; 896 u8 sec_ctl = 0;
896 u8 sta = 0; 897 u8 sta_id = 0;
897 int len; 898 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
899 __le16 bc_ent;
898 900
899 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 901 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
900 902
901 if (txq_id != IWL_CMD_QUEUE_NUM) { 903 if (txq_id != IWL_CMD_QUEUE_NUM) {
902 sta = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 904 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
903 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; 905 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
904 906
905 switch (sec_ctl & TX_CMD_SEC_MSK) { 907 switch (sec_ctl & TX_CMD_SEC_MSK) {
@@ -915,40 +917,36 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
915 } 917 }
916 } 918 }
917 919
918 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 920 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
919 tfd_offset[txq->q.write_ptr], byte_cnt, len);
920 921
921 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 922 shared_data->queues_bc_tbls[txq_id].tfd_offset[write_ptr] = bc_ent;
922 tfd_offset[txq->q.write_ptr], sta_id, sta);
923 923
924 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) { 924 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
925 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 925 shared_data->queues_bc_tbls[txq_id].
926 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr], 926 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
927 byte_cnt, len);
928 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
929 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
930 sta_id, sta);
931 }
932} 927}
933 928
934static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, 929static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
935 struct iwl_tx_queue *txq) 930 struct iwl_tx_queue *txq)
936{ 931{
937 int txq_id = txq->q.id;
938 struct iwl5000_shared *shared_data = priv->shared_virt; 932 struct iwl5000_shared *shared_data = priv->shared_virt;
939 u8 sta = 0; 933 int txq_id = txq->q.id;
934 int read_ptr = txq->q.read_ptr;
935 u8 sta_id = 0;
936 __le16 bc_ent;
937
938 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
940 939
941 if (txq_id != IWL_CMD_QUEUE_NUM) 940 if (txq_id != IWL_CMD_QUEUE_NUM)
942 sta = txq->cmd[txq->q.read_ptr]->cmd.tx.sta_id; 941 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
943 942
944 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr]. 943 bc_ent = cpu_to_le16(1 | (sta_id << 12));
945 val = cpu_to_le16(1 | (sta << 12)); 944 shared_data->queues_bc_tbls[txq_id].
945 tfd_offset[read_ptr] = bc_ent;
946 946
947 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) { 947 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
948 shared_data->queues_byte_cnt_tbls[txq_id]. 948 shared_data->queues_bc_tbls[txq_id].
949 tfd_offset[IWL50_QUEUE_SIZE + txq->q.read_ptr]. 949 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
950 val = cpu_to_le16(1 | (sta << 12));
951 }
952} 950}
953 951
954static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 952static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,