diff options
author | Tomas Winkler <tomas.winkler@intel.com> | 2008-05-29 04:35:12 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-06-03 15:00:24 -0400 |
commit | 17b889290a184b52ee394c31dd5a52b8c1b3456d (patch) | |
tree | 2e8889673a9f26b1a1afb2291ede6032eeadd1bb /drivers/net/wireless/iwlwifi/iwl-tx.c | |
parent | a5e8b5056ea8762e67c9fa980c8db48009ed2a67 (diff) |
iwlwifi: move tx reclaim flow into iwl-tx
This patch
1. moves TX reclaim flow into iwl-tx
2. separates command queue and tx queue reclaim flow
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 108 |
1 files changed, 108 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 885a4c11ac49..b2b2ed29602a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -1060,6 +1060,114 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1060 | return ret ? ret : idx; | 1060 | return ret ? ret : idx; |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1064 | { | ||
1065 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1066 | struct iwl_queue *q = &txq->q; | ||
1067 | struct iwl_tx_info *tx_info; | ||
1068 | int nfreed = 0; | ||
1069 | |||
1070 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1071 | IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | ||
1072 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1073 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1074 | return 0; | ||
1075 | } | ||
1076 | |||
1077 | for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; | ||
1078 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1079 | |||
1080 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1081 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | ||
1082 | tx_info->skb[0] = NULL; | ||
1083 | iwl_hw_txq_free_tfd(priv, txq); | ||
1084 | |||
1085 | nfreed++; | ||
1086 | } | ||
1087 | return nfreed; | ||
1088 | } | ||
1089 | EXPORT_SYMBOL(iwl_tx_queue_reclaim); | ||
1090 | |||
1091 | |||
1092 | /** | ||
1093 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | ||
1094 | * | ||
1095 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
1096 | * need to be reclaimed. As result, some free space forms. If there is | ||
1097 | * enough free space (> low mark), wake the stack that feeds us. | ||
1098 | */ | ||
1099 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1100 | { | ||
1101 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1102 | struct iwl_queue *q = &txq->q; | ||
1103 | int nfreed = 0; | ||
1104 | |||
1105 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1106 | IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | ||
1107 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1108 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1109 | return; | ||
1110 | } | ||
1111 | |||
1112 | for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; | ||
1113 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1114 | |||
1115 | if (nfreed > 1) { | ||
1116 | IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, | ||
1117 | q->write_ptr, q->read_ptr); | ||
1118 | queue_work(priv->workqueue, &priv->restart); | ||
1119 | } | ||
1120 | nfreed++; | ||
1121 | } | ||
1122 | } | ||
1123 | |||
1124 | /** | ||
1125 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
1126 | * @rxb: Rx buffer to reclaim | ||
1127 | * | ||
1128 | * If an Rx buffer has an async callback associated with it the callback | ||
1129 | * will be executed. The attached skb (if present) will only be freed | ||
1130 | * if the callback returns 1 | ||
1131 | */ | ||
1132 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | ||
1133 | { | ||
1134 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | ||
1135 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
1136 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
1137 | int index = SEQ_TO_INDEX(sequence); | ||
1138 | int huge = sequence & SEQ_HUGE_FRAME; | ||
1139 | int cmd_index; | ||
1140 | struct iwl_cmd *cmd; | ||
1141 | |||
1142 | /* If a Tx command is being handled and it isn't in the actual | ||
1143 | * command queue then there a command routing bug has been introduced | ||
1144 | * in the queue management code. */ | ||
1145 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
1146 | IWL_ERROR("Error wrong command queue %d command id 0x%X\n", | ||
1147 | txq_id, pkt->hdr.cmd); | ||
1148 | BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); | ||
1149 | |||
1150 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | ||
1151 | cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | ||
1152 | |||
1153 | /* Input error checking is done when commands are added to queue. */ | ||
1154 | if (cmd->meta.flags & CMD_WANT_SKB) { | ||
1155 | cmd->meta.source->u.skb = rxb->skb; | ||
1156 | rxb->skb = NULL; | ||
1157 | } else if (cmd->meta.u.callback && | ||
1158 | !cmd->meta.u.callback(priv, cmd, rxb->skb)) | ||
1159 | rxb->skb = NULL; | ||
1160 | |||
1161 | iwl_hcmd_queue_reclaim(priv, txq_id, index); | ||
1162 | |||
1163 | if (!(cmd->meta.flags & CMD_ASYNC)) { | ||
1164 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
1165 | wake_up_interruptible(&priv->wait_command_queue); | ||
1166 | } | ||
1167 | } | ||
1168 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | ||
1169 | |||
1170 | |||
1063 | #ifdef CONFIG_IWLWIF_DEBUG | 1171 | #ifdef CONFIG_IWLWIF_DEBUG |
1064 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | 1172 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x |
1065 | 1173 | ||