diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 155 |
1 files changed, 114 insertions, 41 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index fb9bcfa6d947..888a8e9fe9ef 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -97,7 +97,8 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
97 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | 97 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); |
98 | 98 | ||
99 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | 99 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { |
100 | IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg); | 100 | IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", |
101 | txq_id, reg); | ||
101 | iwl_set_bit(priv, CSR_GP_CNTRL, | 102 | iwl_set_bit(priv, CSR_GP_CNTRL, |
102 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 103 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
103 | return ret; | 104 | return ret; |
@@ -132,7 +133,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | |||
132 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 133 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
133 | struct iwl_queue *q = &txq->q; | 134 | struct iwl_queue *q = &txq->q; |
134 | struct pci_dev *dev = priv->pci_dev; | 135 | struct pci_dev *dev = priv->pci_dev; |
135 | int i, len; | 136 | int i; |
136 | 137 | ||
137 | if (q->n_bd == 0) | 138 | if (q->n_bd == 0) |
138 | return; | 139 | return; |
@@ -142,8 +143,6 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | |||
142 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) | 143 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) |
143 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | 144 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
144 | 145 | ||
145 | len = sizeof(struct iwl_device_cmd) * q->n_window; | ||
146 | |||
147 | /* De-alloc array of command/tx buffers */ | 146 | /* De-alloc array of command/tx buffers */ |
148 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) | 147 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) |
149 | kfree(txq->cmd[i]); | 148 | kfree(txq->cmd[i]); |
@@ -181,14 +180,11 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
181 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | 180 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; |
182 | struct iwl_queue *q = &txq->q; | 181 | struct iwl_queue *q = &txq->q; |
183 | struct pci_dev *dev = priv->pci_dev; | 182 | struct pci_dev *dev = priv->pci_dev; |
184 | int i, len; | 183 | int i; |
185 | 184 | ||
186 | if (q->n_bd == 0) | 185 | if (q->n_bd == 0) |
187 | return; | 186 | return; |
188 | 187 | ||
189 | len = sizeof(struct iwl_device_cmd) * q->n_window; | ||
190 | len += IWL_MAX_SCAN_SIZE; | ||
191 | |||
192 | /* De-alloc array of command/tx buffers */ | 188 | /* De-alloc array of command/tx buffers */ |
193 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | 189 | for (i = 0; i <= TFD_CMD_SLOTS; i++) |
194 | kfree(txq->cmd[i]); | 190 | kfree(txq->cmd[i]); |
@@ -370,8 +366,13 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |||
370 | 366 | ||
371 | txq->need_update = 0; | 367 | txq->need_update = 0; |
372 | 368 | ||
373 | /* aggregation TX queues will get their ID when aggregation begins */ | 369 | /* |
374 | if (txq_id <= IWL_TX_FIFO_AC3) | 370 | * Aggregation TX queues will get their ID when aggregation begins; |
371 | * they overwrite the setting done here. The command FIFO doesn't | ||
372 | * need an swq_id so don't set one to catch errors, all others can | ||
373 | * be set up to the identity mapping. | ||
374 | */ | ||
375 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
375 | txq->swq_id = txq_id; | 376 | txq->swq_id = txq_id; |
376 | 377 | ||
377 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | 378 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise |
@@ -406,15 +407,19 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
406 | int txq_id; | 407 | int txq_id; |
407 | 408 | ||
408 | /* Tx queues */ | 409 | /* Tx queues */ |
409 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | 410 | if (priv->txq) |
410 | if (txq_id == IWL_CMD_QUEUE_NUM) | 411 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; |
411 | iwl_cmd_queue_free(priv); | 412 | txq_id++) |
412 | else | 413 | if (txq_id == IWL_CMD_QUEUE_NUM) |
413 | iwl_tx_queue_free(priv, txq_id); | 414 | iwl_cmd_queue_free(priv); |
414 | 415 | else | |
416 | iwl_tx_queue_free(priv, txq_id); | ||
415 | iwl_free_dma_ptr(priv, &priv->kw); | 417 | iwl_free_dma_ptr(priv, &priv->kw); |
416 | 418 | ||
417 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | 419 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); |
420 | |||
421 | /* free tx queue structure */ | ||
422 | iwl_free_txq_mem(priv); | ||
418 | } | 423 | } |
419 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | 424 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); |
420 | 425 | ||
@@ -446,6 +451,12 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
446 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | 451 | IWL_ERR(priv, "Keep Warm allocation failed\n"); |
447 | goto error_kw; | 452 | goto error_kw; |
448 | } | 453 | } |
454 | |||
455 | /* allocate tx queue structure */ | ||
456 | ret = iwl_alloc_txq_mem(priv); | ||
457 | if (ret) | ||
458 | goto error; | ||
459 | |||
449 | spin_lock_irqsave(&priv->lock, flags); | 460 | spin_lock_irqsave(&priv->lock, flags); |
450 | 461 | ||
451 | /* Turn off all Tx DMA fifos */ | 462 | /* Turn off all Tx DMA fifos */ |
@@ -582,9 +593,7 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | |||
582 | u8 rate_plcp; | 593 | u8 rate_plcp; |
583 | 594 | ||
584 | /* Set retry limit on DATA packets and Probe Responses*/ | 595 | /* Set retry limit on DATA packets and Probe Responses*/ |
585 | if (priv->data_retry_limit != -1) | 596 | if (ieee80211_is_probe_resp(fc)) |
586 | data_retry_limit = priv->data_retry_limit; | ||
587 | else if (ieee80211_is_probe_resp(fc)) | ||
588 | data_retry_limit = 3; | 597 | data_retry_limit = 3; |
589 | else | 598 | else |
590 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | 599 | data_retry_limit = IWL_DEFAULT_TX_RETRY; |
@@ -701,6 +710,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
701 | { | 710 | { |
702 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 711 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
703 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 712 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
713 | struct ieee80211_sta *sta = info->control.sta; | ||
714 | struct iwl_station_priv *sta_priv = NULL; | ||
704 | struct iwl_tx_queue *txq; | 715 | struct iwl_tx_queue *txq; |
705 | struct iwl_queue *q; | 716 | struct iwl_queue *q; |
706 | struct iwl_device_cmd *out_cmd; | 717 | struct iwl_device_cmd *out_cmd; |
@@ -710,7 +721,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
710 | dma_addr_t phys_addr; | 721 | dma_addr_t phys_addr; |
711 | dma_addr_t txcmd_phys; | 722 | dma_addr_t txcmd_phys; |
712 | dma_addr_t scratch_phys; | 723 | dma_addr_t scratch_phys; |
713 | u16 len, len_org; | 724 | u16 len, len_org, firstlen, secondlen; |
714 | u16 seq_number = 0; | 725 | u16 seq_number = 0; |
715 | __le16 fc; | 726 | __le16 fc; |
716 | u8 hdr_len; | 727 | u8 hdr_len; |
@@ -763,6 +774,24 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
763 | 774 | ||
764 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | 775 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); |
765 | 776 | ||
777 | if (sta) | ||
778 | sta_priv = (void *)sta->drv_priv; | ||
779 | |||
780 | if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && | ||
781 | sta_priv->asleep) { | ||
782 | WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); | ||
783 | /* | ||
784 | * This sends an asynchronous command to the device, | ||
785 | * but we can rely on it being processed before the | ||
786 | * next frame is processed -- and the next frame to | ||
787 | * this station is the one that will consume this | ||
788 | * counter. | ||
789 | * For now set the counter to just 1 since we do not | ||
790 | * support uAPSD yet. | ||
791 | */ | ||
792 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | ||
793 | } | ||
794 | |||
766 | txq_id = skb_get_queue_mapping(skb); | 795 | txq_id = skb_get_queue_mapping(skb); |
767 | if (ieee80211_is_data_qos(fc)) { | 796 | if (ieee80211_is_data_qos(fc)) { |
768 | qc = ieee80211_get_qos_ctl(hdr); | 797 | qc = ieee80211_get_qos_ctl(hdr); |
@@ -843,7 +872,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
843 | sizeof(struct iwl_cmd_header) + hdr_len; | 872 | sizeof(struct iwl_cmd_header) + hdr_len; |
844 | 873 | ||
845 | len_org = len; | 874 | len_org = len; |
846 | len = (len + 3) & ~3; | 875 | firstlen = len = (len + 3) & ~3; |
847 | 876 | ||
848 | if (len_org != len) | 877 | if (len_org != len) |
849 | len_org = 1; | 878 | len_org = 1; |
@@ -877,7 +906,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
877 | 906 | ||
878 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | 907 | /* Set up TFD's 2nd entry to point directly to remainder of skb, |
879 | * if any (802.11 null frames have no payload). */ | 908 | * if any (802.11 null frames have no payload). */ |
880 | len = skb->len - hdr_len; | 909 | secondlen = len = skb->len - hdr_len; |
881 | if (len) { | 910 | if (len) { |
882 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | 911 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, |
883 | len, PCI_DMA_TODEVICE); | 912 | len, PCI_DMA_TODEVICE); |
@@ -911,11 +940,28 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
911 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | 940 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, |
912 | len, PCI_DMA_BIDIRECTIONAL); | 941 | len, PCI_DMA_BIDIRECTIONAL); |
913 | 942 | ||
943 | trace_iwlwifi_dev_tx(priv, | ||
944 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
945 | sizeof(struct iwl_tfd), | ||
946 | &out_cmd->hdr, firstlen, | ||
947 | skb->data + hdr_len, secondlen); | ||
948 | |||
914 | /* Tell device the write index *just past* this latest filled TFD */ | 949 | /* Tell device the write index *just past* this latest filled TFD */ |
915 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 950 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); |
916 | ret = iwl_txq_update_write_ptr(priv, txq); | 951 | ret = iwl_txq_update_write_ptr(priv, txq); |
917 | spin_unlock_irqrestore(&priv->lock, flags); | 952 | spin_unlock_irqrestore(&priv->lock, flags); |
918 | 953 | ||
954 | /* | ||
955 | * At this point the frame is "transmitted" successfully | ||
956 | * and we will get a TX status notification eventually, | ||
957 | * regardless of the value of ret. "ret" only indicates | ||
958 | * whether or not we should update the write pointer. | ||
959 | */ | ||
960 | |||
961 | /* avoid atomic ops if it isn't an associated client */ | ||
962 | if (sta_priv && sta_priv->client) | ||
963 | atomic_inc(&sta_priv->pending_frames); | ||
964 | |||
919 | if (ret) | 965 | if (ret) |
920 | return ret; | 966 | return ret; |
921 | 967 | ||
@@ -970,13 +1016,20 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
970 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | 1016 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && |
971 | !(cmd->flags & CMD_SIZE_HUGE)); | 1017 | !(cmd->flags & CMD_SIZE_HUGE)); |
972 | 1018 | ||
973 | if (iwl_is_rfkill(priv)) { | 1019 | if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { |
974 | IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n"); | 1020 | IWL_WARN(priv, "Not sending command - %s KILL\n", |
1021 | iwl_is_rfkill(priv) ? "RF" : "CT"); | ||
975 | return -EIO; | 1022 | return -EIO; |
976 | } | 1023 | } |
977 | 1024 | ||
978 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | 1025 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
979 | IWL_ERR(priv, "No space for Tx\n"); | 1026 | IWL_ERR(priv, "No space in command queue\n"); |
1027 | if (iwl_within_ct_kill_margin(priv)) | ||
1028 | iwl_tt_enter_ct_kill(priv); | ||
1029 | else { | ||
1030 | IWL_ERR(priv, "Restarting adapter due to queue full\n"); | ||
1031 | queue_work(priv->workqueue, &priv->restart); | ||
1032 | } | ||
980 | return -ENOSPC; | 1033 | return -ENOSPC; |
981 | } | 1034 | } |
982 | 1035 | ||
@@ -1039,6 +1092,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1039 | pci_unmap_addr_set(out_meta, mapping, phys_addr); | 1092 | pci_unmap_addr_set(out_meta, mapping, phys_addr); |
1040 | pci_unmap_len_set(out_meta, len, fix_size); | 1093 | pci_unmap_len_set(out_meta, len, fix_size); |
1041 | 1094 | ||
1095 | trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); | ||
1096 | |||
1042 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | 1097 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
1043 | phys_addr, fix_size, 1, | 1098 | phys_addr, fix_size, 1, |
1044 | U32_PAD(cmd->len)); | 1099 | U32_PAD(cmd->len)); |
@@ -1051,6 +1106,24 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1051 | return ret ? ret : idx; | 1106 | return ret ? ret : idx; |
1052 | } | 1107 | } |
1053 | 1108 | ||
1109 | static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) | ||
1110 | { | ||
1111 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
1112 | struct ieee80211_sta *sta; | ||
1113 | struct iwl_station_priv *sta_priv; | ||
1114 | |||
1115 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); | ||
1116 | if (sta) { | ||
1117 | sta_priv = (void *)sta->drv_priv; | ||
1118 | /* avoid atomic ops if this isn't a client */ | ||
1119 | if (sta_priv->client && | ||
1120 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1121 | ieee80211_sta_block_awake(priv->hw, sta, false); | ||
1122 | } | ||
1123 | |||
1124 | ieee80211_tx_status_irqsafe(priv->hw, skb); | ||
1125 | } | ||
1126 | |||
1054 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | 1127 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) |
1055 | { | 1128 | { |
1056 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 1129 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
@@ -1070,7 +1143,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1070 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1143 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { |
1071 | 1144 | ||
1072 | tx_info = &txq->txb[txq->q.read_ptr]; | 1145 | tx_info = &txq->txb[txq->q.read_ptr]; |
1073 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | 1146 | iwl_tx_status(priv, tx_info->skb[0]); |
1074 | tx_info->skb[0] = NULL; | 1147 | tx_info->skb[0] = NULL; |
1075 | 1148 | ||
1076 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | 1149 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) |
@@ -1105,11 +1178,6 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, | |||
1105 | return; | 1178 | return; |
1106 | } | 1179 | } |
1107 | 1180 | ||
1108 | pci_unmap_single(priv->pci_dev, | ||
1109 | pci_unmap_addr(&txq->meta[cmd_idx], mapping), | ||
1110 | pci_unmap_len(&txq->meta[cmd_idx], len), | ||
1111 | PCI_DMA_BIDIRECTIONAL); | ||
1112 | |||
1113 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | 1181 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; |
1114 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1182 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { |
1115 | 1183 | ||
@@ -1132,7 +1200,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, | |||
1132 | */ | 1200 | */ |
1133 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | 1201 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) |
1134 | { | 1202 | { |
1135 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 1203 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
1136 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 1204 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
1137 | int txq_id = SEQ_TO_QUEUE(sequence); | 1205 | int txq_id = SEQ_TO_QUEUE(sequence); |
1138 | int index = SEQ_TO_INDEX(sequence); | 1206 | int index = SEQ_TO_INDEX(sequence); |
@@ -1157,12 +1225,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1157 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | 1225 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; |
1158 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; | 1226 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; |
1159 | 1227 | ||
1228 | pci_unmap_single(priv->pci_dev, | ||
1229 | pci_unmap_addr(meta, mapping), | ||
1230 | pci_unmap_len(meta, len), | ||
1231 | PCI_DMA_BIDIRECTIONAL); | ||
1232 | |||
1160 | /* Input error checking is done when commands are added to queue. */ | 1233 | /* Input error checking is done when commands are added to queue. */ |
1161 | if (meta->flags & CMD_WANT_SKB) { | 1234 | if (meta->flags & CMD_WANT_SKB) { |
1162 | meta->source->reply_skb = rxb->skb; | 1235 | meta->source->reply_page = (unsigned long)rxb_addr(rxb); |
1163 | rxb->skb = NULL; | 1236 | rxb->page = NULL; |
1164 | } else if (meta->callback) | 1237 | } else if (meta->callback) |
1165 | meta->callback(priv, cmd, rxb->skb); | 1238 | meta->callback(priv, cmd, pkt); |
1166 | 1239 | ||
1167 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); | 1240 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); |
1168 | 1241 | ||
@@ -1240,7 +1313,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | |||
1240 | if (tid_data->tfds_in_queue == 0) { | 1313 | if (tid_data->tfds_in_queue == 0) { |
1241 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | 1314 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); |
1242 | tid_data->agg.state = IWL_AGG_ON; | 1315 | tid_data->agg.state = IWL_AGG_ON; |
1243 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); | 1316 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); |
1244 | } else { | 1317 | } else { |
1245 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", | 1318 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", |
1246 | tid_data->tfds_in_queue); | 1319 | tid_data->tfds_in_queue); |
@@ -1305,7 +1378,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | |||
1305 | if (ret) | 1378 | if (ret) |
1306 | return ret; | 1379 | return ret; |
1307 | 1380 | ||
1308 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); | 1381 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); |
1309 | 1382 | ||
1310 | return 0; | 1383 | return 0; |
1311 | } | 1384 | } |
@@ -1329,7 +1402,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | |||
1329 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | 1402 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, |
1330 | ssn, tx_fifo); | 1403 | ssn, tx_fifo); |
1331 | tid_data->agg.state = IWL_AGG_OFF; | 1404 | tid_data->agg.state = IWL_AGG_OFF; |
1332 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); | 1405 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); |
1333 | } | 1406 | } |
1334 | break; | 1407 | break; |
1335 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | 1408 | case IWL_EMPTYING_HW_QUEUE_ADDBA: |
@@ -1337,7 +1410,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | |||
1337 | if (tid_data->tfds_in_queue == 0) { | 1410 | if (tid_data->tfds_in_queue == 0) { |
1338 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); | 1411 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); |
1339 | tid_data->agg.state = IWL_AGG_ON; | 1412 | tid_data->agg.state = IWL_AGG_ON; |
1340 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); | 1413 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); |
1341 | } | 1414 | } |
1342 | break; | 1415 | break; |
1343 | } | 1416 | } |
@@ -1401,7 +1474,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | |||
1401 | 1474 | ||
1402 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | 1475 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); |
1403 | memset(&info->status, 0, sizeof(info->status)); | 1476 | memset(&info->status, 0, sizeof(info->status)); |
1404 | info->flags = IEEE80211_TX_STAT_ACK; | 1477 | info->flags |= IEEE80211_TX_STAT_ACK; |
1405 | info->flags |= IEEE80211_TX_STAT_AMPDU; | 1478 | info->flags |= IEEE80211_TX_STAT_AMPDU; |
1406 | info->status.ampdu_ack_map = successes; | 1479 | info->status.ampdu_ack_map = successes; |
1407 | info->status.ampdu_ack_len = agg->frame_count; | 1480 | info->status.ampdu_ack_len = agg->frame_count; |
@@ -1421,7 +1494,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | |||
1421 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | 1494 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, |
1422 | struct iwl_rx_mem_buffer *rxb) | 1495 | struct iwl_rx_mem_buffer *rxb) |
1423 | { | 1496 | { |
1424 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 1497 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
1425 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | 1498 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; |
1426 | struct iwl_tx_queue *txq = NULL; | 1499 | struct iwl_tx_queue *txq = NULL; |
1427 | struct iwl_ht_agg *agg; | 1500 | struct iwl_ht_agg *agg; |