aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c94
1 files changed, 59 insertions, 35 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index fb9bcfa6d94..6199bf60d31 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -97,7 +97,8 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
97 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 97 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
98 98
99 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 99 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
100 IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg); 100 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
101 txq_id, reg);
101 iwl_set_bit(priv, CSR_GP_CNTRL, 102 iwl_set_bit(priv, CSR_GP_CNTRL,
102 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 103 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
103 return ret; 104 return ret;
@@ -132,7 +133,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
132 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 133 struct iwl_tx_queue *txq = &priv->txq[txq_id];
133 struct iwl_queue *q = &txq->q; 134 struct iwl_queue *q = &txq->q;
134 struct pci_dev *dev = priv->pci_dev; 135 struct pci_dev *dev = priv->pci_dev;
135 int i, len; 136 int i;
136 137
137 if (q->n_bd == 0) 138 if (q->n_bd == 0)
138 return; 139 return;
@@ -142,8 +143,6 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
142 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) 143 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
143 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 144 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
144 145
145 len = sizeof(struct iwl_device_cmd) * q->n_window;
146
147 /* De-alloc array of command/tx buffers */ 146 /* De-alloc array of command/tx buffers */
148 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 147 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
149 kfree(txq->cmd[i]); 148 kfree(txq->cmd[i]);
@@ -181,14 +180,11 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
181 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 180 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
182 struct iwl_queue *q = &txq->q; 181 struct iwl_queue *q = &txq->q;
183 struct pci_dev *dev = priv->pci_dev; 182 struct pci_dev *dev = priv->pci_dev;
184 int i, len; 183 int i;
185 184
186 if (q->n_bd == 0) 185 if (q->n_bd == 0)
187 return; 186 return;
188 187
189 len = sizeof(struct iwl_device_cmd) * q->n_window;
190 len += IWL_MAX_SCAN_SIZE;
191
192 /* De-alloc array of command/tx buffers */ 188 /* De-alloc array of command/tx buffers */
193 for (i = 0; i <= TFD_CMD_SLOTS; i++) 189 for (i = 0; i <= TFD_CMD_SLOTS; i++)
194 kfree(txq->cmd[i]); 190 kfree(txq->cmd[i]);
@@ -370,8 +366,13 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
370 366
371 txq->need_update = 0; 367 txq->need_update = 0;
372 368
373 /* aggregation TX queues will get their ID when aggregation begins */ 369 /*
374 if (txq_id <= IWL_TX_FIFO_AC3) 370 * Aggregation TX queues will get their ID when aggregation begins;
371 * they overwrite the setting done here. The command FIFO doesn't
372 * need an swq_id so don't set one to catch errors, all others can
373 * be set up to the identity mapping.
374 */
375 if (txq_id != IWL_CMD_QUEUE_NUM)
375 txq->swq_id = txq_id; 376 txq->swq_id = txq_id;
376 377
377 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 378 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
@@ -406,15 +407,19 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
406 int txq_id; 407 int txq_id;
407 408
408 /* Tx queues */ 409 /* Tx queues */
409 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 410 if (priv->txq)
410 if (txq_id == IWL_CMD_QUEUE_NUM) 411 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
411 iwl_cmd_queue_free(priv); 412 txq_id++)
412 else 413 if (txq_id == IWL_CMD_QUEUE_NUM)
413 iwl_tx_queue_free(priv, txq_id); 414 iwl_cmd_queue_free(priv);
414 415 else
416 iwl_tx_queue_free(priv, txq_id);
415 iwl_free_dma_ptr(priv, &priv->kw); 417 iwl_free_dma_ptr(priv, &priv->kw);
416 418
417 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); 419 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
420
421 /* free tx queue structure */
422 iwl_free_txq_mem(priv);
418} 423}
419EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 424EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
420 425
@@ -446,6 +451,12 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
446 IWL_ERR(priv, "Keep Warm allocation failed\n"); 451 IWL_ERR(priv, "Keep Warm allocation failed\n");
447 goto error_kw; 452 goto error_kw;
448 } 453 }
454
455 /* allocate tx queue structure */
456 ret = iwl_alloc_txq_mem(priv);
457 if (ret)
458 goto error;
459
449 spin_lock_irqsave(&priv->lock, flags); 460 spin_lock_irqsave(&priv->lock, flags);
450 461
451 /* Turn off all Tx DMA fifos */ 462 /* Turn off all Tx DMA fifos */
@@ -582,9 +593,7 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
582 u8 rate_plcp; 593 u8 rate_plcp;
583 594
584 /* Set retry limit on DATA packets and Probe Responses*/ 595 /* Set retry limit on DATA packets and Probe Responses*/
585 if (priv->data_retry_limit != -1) 596 if (ieee80211_is_probe_resp(fc))
586 data_retry_limit = priv->data_retry_limit;
587 else if (ieee80211_is_probe_resp(fc))
588 data_retry_limit = 3; 597 data_retry_limit = 3;
589 else 598 else
590 data_retry_limit = IWL_DEFAULT_TX_RETRY; 599 data_retry_limit = IWL_DEFAULT_TX_RETRY;
@@ -710,7 +719,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
710 dma_addr_t phys_addr; 719 dma_addr_t phys_addr;
711 dma_addr_t txcmd_phys; 720 dma_addr_t txcmd_phys;
712 dma_addr_t scratch_phys; 721 dma_addr_t scratch_phys;
713 u16 len, len_org; 722 u16 len, len_org, firstlen, secondlen;
714 u16 seq_number = 0; 723 u16 seq_number = 0;
715 __le16 fc; 724 __le16 fc;
716 u8 hdr_len; 725 u8 hdr_len;
@@ -843,7 +852,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
843 sizeof(struct iwl_cmd_header) + hdr_len; 852 sizeof(struct iwl_cmd_header) + hdr_len;
844 853
845 len_org = len; 854 len_org = len;
846 len = (len + 3) & ~3; 855 firstlen = len = (len + 3) & ~3;
847 856
848 if (len_org != len) 857 if (len_org != len)
849 len_org = 1; 858 len_org = 1;
@@ -877,7 +886,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
877 886
878 /* Set up TFD's 2nd entry to point directly to remainder of skb, 887 /* Set up TFD's 2nd entry to point directly to remainder of skb,
879 * if any (802.11 null frames have no payload). */ 888 * if any (802.11 null frames have no payload). */
880 len = skb->len - hdr_len; 889 secondlen = len = skb->len - hdr_len;
881 if (len) { 890 if (len) {
882 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 891 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
883 len, PCI_DMA_TODEVICE); 892 len, PCI_DMA_TODEVICE);
@@ -911,6 +920,12 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
911 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 920 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
912 len, PCI_DMA_BIDIRECTIONAL); 921 len, PCI_DMA_BIDIRECTIONAL);
913 922
923 trace_iwlwifi_dev_tx(priv,
924 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
925 sizeof(struct iwl_tfd),
926 &out_cmd->hdr, firstlen,
927 skb->data + hdr_len, secondlen);
928
914 /* Tell device the write index *just past* this latest filled TFD */ 929 /* Tell device the write index *just past* this latest filled TFD */
915 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 930 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
916 ret = iwl_txq_update_write_ptr(priv, txq); 931 ret = iwl_txq_update_write_ptr(priv, txq);
@@ -970,13 +985,20 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
970 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 985 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
971 !(cmd->flags & CMD_SIZE_HUGE)); 986 !(cmd->flags & CMD_SIZE_HUGE));
972 987
973 if (iwl_is_rfkill(priv)) { 988 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
974 IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n"); 989 IWL_WARN(priv, "Not sending command - %s KILL\n",
990 iwl_is_rfkill(priv) ? "RF" : "CT");
975 return -EIO; 991 return -EIO;
976 } 992 }
977 993
978 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 994 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
979 IWL_ERR(priv, "No space for Tx\n"); 995 IWL_ERR(priv, "No space for Tx\n");
996 if (iwl_within_ct_kill_margin(priv))
997 iwl_tt_enter_ct_kill(priv);
998 else {
999 IWL_ERR(priv, "Restarting adapter due to queue full\n");
1000 queue_work(priv->workqueue, &priv->restart);
1001 }
980 return -ENOSPC; 1002 return -ENOSPC;
981 } 1003 }
982 1004
@@ -1039,6 +1061,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1039 pci_unmap_addr_set(out_meta, mapping, phys_addr); 1061 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1040 pci_unmap_len_set(out_meta, len, fix_size); 1062 pci_unmap_len_set(out_meta, len, fix_size);
1041 1063
1064 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
1065
1042 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 1066 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1043 phys_addr, fix_size, 1, 1067 phys_addr, fix_size, 1,
1044 U32_PAD(cmd->len)); 1068 U32_PAD(cmd->len));
@@ -1105,11 +1129,6 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1105 return; 1129 return;
1106 } 1130 }
1107 1131
1108 pci_unmap_single(priv->pci_dev,
1109 pci_unmap_addr(&txq->meta[cmd_idx], mapping),
1110 pci_unmap_len(&txq->meta[cmd_idx], len),
1111 PCI_DMA_BIDIRECTIONAL);
1112
1113 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 1132 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1114 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1133 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1115 1134
@@ -1132,7 +1151,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1132 */ 1151 */
1133void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1152void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1134{ 1153{
1135 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1154 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1136 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1155 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1137 int txq_id = SEQ_TO_QUEUE(sequence); 1156 int txq_id = SEQ_TO_QUEUE(sequence);
1138 int index = SEQ_TO_INDEX(sequence); 1157 int index = SEQ_TO_INDEX(sequence);
@@ -1157,12 +1176,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1157 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1176 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1158 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; 1177 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
1159 1178
1179 pci_unmap_single(priv->pci_dev,
1180 pci_unmap_addr(meta, mapping),
1181 pci_unmap_len(meta, len),
1182 PCI_DMA_BIDIRECTIONAL);
1183
1160 /* Input error checking is done when commands are added to queue. */ 1184 /* Input error checking is done when commands are added to queue. */
1161 if (meta->flags & CMD_WANT_SKB) { 1185 if (meta->flags & CMD_WANT_SKB) {
1162 meta->source->reply_skb = rxb->skb; 1186 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
1163 rxb->skb = NULL; 1187 rxb->page = NULL;
1164 } else if (meta->callback) 1188 } else if (meta->callback)
1165 meta->callback(priv, cmd, rxb->skb); 1189 meta->callback(priv, cmd, pkt);
1166 1190
1167 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 1191 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1168 1192
@@ -1401,7 +1425,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1401 1425
1402 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); 1426 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1403 memset(&info->status, 0, sizeof(info->status)); 1427 memset(&info->status, 0, sizeof(info->status));
1404 info->flags = IEEE80211_TX_STAT_ACK; 1428 info->flags |= IEEE80211_TX_STAT_ACK;
1405 info->flags |= IEEE80211_TX_STAT_AMPDU; 1429 info->flags |= IEEE80211_TX_STAT_AMPDU;
1406 info->status.ampdu_ack_map = successes; 1430 info->status.ampdu_ack_map = successes;
1407 info->status.ampdu_ack_len = agg->frame_count; 1431 info->status.ampdu_ack_len = agg->frame_count;
@@ -1421,7 +1445,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1421void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, 1445void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1422 struct iwl_rx_mem_buffer *rxb) 1446 struct iwl_rx_mem_buffer *rxb)
1423{ 1447{
1424 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1448 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1425 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 1449 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1426 struct iwl_tx_queue *txq = NULL; 1450 struct iwl_tx_queue *txq = NULL;
1427 struct iwl_ht_agg *agg; 1451 struct iwl_ht_agg *agg;