diff options
author | Tomas Winkler <tomas.winkler@intel.com> | 2008-10-14 15:32:48 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-10-31 19:00:30 -0400 |
commit | 499b1883038a7db2dcf8b64229f8533ce2c8f0fc (patch) | |
tree | 34e5682d4e7e682412c1ffb5c2ff06a991c7ecbd /drivers/net/wireless/iwlwifi/iwl-tx.c | |
parent | 76eff18bdc5feaa53f1be33709b67df02f1d55e9 (diff) |
iwlwifi: fix TX cmd dma unmapping
This patch:
1. fixes command DMA unmapping, this might be visible only
on platforms where DMA unmapping is no noop such as PPC64 (not tested)
2. attaches correctly high memory part of the host command buffer
3. changes structure of TFD TB
instead of describing transmit buffer (TB) tuple it describes single
TB and makes code more readable on price of one unaligned access
4. eliminates using of IWL_GET/SET_BITs for TFD handling
5. renames TFD structures to mach the HW spec
6. reduces iwl_tx_info size by reserving first TB to the host command
This patch should not have any visible effect on x86 32
This patch is rework of
iwlwifi: fix DMA code and bugs from
Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Reviewed-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 231 |
1 files changed, 124 insertions, 107 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index b047fd156c0b..c3656c46f55f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -56,92 +56,112 @@ static const u16 default_tid_to_tx_fifo[] = { | |||
56 | IWL_TX_FIFO_AC3 | 56 | IWL_TX_FIFO_AC3 |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | ||
60 | { | ||
61 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
62 | |||
63 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | ||
64 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
65 | addr |= | ||
66 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; | ||
67 | |||
68 | return addr; | ||
69 | } | ||
70 | |||
71 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | ||
72 | { | ||
73 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
74 | |||
75 | return le16_to_cpu(tb->hi_n_len) >> 4; | ||
76 | } | ||
77 | |||
78 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | ||
79 | dma_addr_t addr, u16 len) | ||
80 | { | ||
81 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
82 | u16 hi_n_len = len << 4; | ||
83 | |||
84 | put_unaligned_le32(addr, &tb->lo); | ||
85 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
86 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | ||
87 | |||
88 | tb->hi_n_len = cpu_to_le16(hi_n_len); | ||
89 | |||
90 | tfd->num_tbs = idx + 1; | ||
91 | } | ||
92 | |||
93 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | ||
94 | { | ||
95 | return tfd->num_tbs & 0x1f; | ||
96 | } | ||
59 | 97 | ||
60 | /** | 98 | /** |
61 | * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | 99 | * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] |
100 | * @priv - driver private data | ||
101 | * @txq - tx queue | ||
62 | * | 102 | * |
63 | * Does NOT advance any TFD circular buffer read/write indexes | 103 | * Does NOT advance any TFD circular buffer read/write indexes |
64 | * Does NOT free the TFD itself (which is within circular buffer) | 104 | * Does NOT free the TFD itself (which is within circular buffer) |
65 | */ | 105 | */ |
66 | static int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) | 106 | static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) |
67 | { | 107 | { |
68 | struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; | 108 | struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0]; |
69 | struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; | 109 | struct iwl_tfd *tfd; |
70 | struct pci_dev *dev = priv->pci_dev; | 110 | struct pci_dev *dev = priv->pci_dev; |
111 | int index = txq->q.read_ptr; | ||
71 | int i; | 112 | int i; |
72 | int counter = 0; | 113 | int num_tbs; |
73 | int index, is_odd; | 114 | |
115 | tfd = &tfd_tmp[index]; | ||
74 | 116 | ||
75 | /* Sanity check on number of chunks */ | 117 | /* Sanity check on number of chunks */ |
76 | counter = IWL_GET_BITS(*bd, num_tbs); | 118 | num_tbs = iwl_tfd_get_num_tbs(tfd); |
77 | if (counter > MAX_NUM_OF_TBS) { | 119 | |
78 | IWL_ERROR("Too many chunks: %i\n", counter); | 120 | if (num_tbs >= IWL_NUM_OF_TBS) { |
121 | IWL_ERROR("Too many chunks: %i\n", num_tbs); | ||
79 | /* @todo issue fatal error, it is quite serious situation */ | 122 | /* @todo issue fatal error, it is quite serious situation */ |
80 | return 0; | 123 | return; |
81 | } | 124 | } |
82 | 125 | ||
83 | /* Unmap chunks, if any. | 126 | /* Unmap tx_cmd */ |
84 | * TFD info for odd chunks is different format than for even chunks. */ | 127 | if (num_tbs) |
85 | for (i = 0; i < counter; i++) { | 128 | pci_unmap_single(dev, |
86 | index = i / 2; | 129 | pci_unmap_addr(&txq->cmd[index]->meta, mapping), |
87 | is_odd = i & 0x1; | 130 | pci_unmap_len(&txq->cmd[index]->meta, len), |
88 | |||
89 | if (is_odd) | ||
90 | pci_unmap_single( | ||
91 | dev, | ||
92 | IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) | | ||
93 | (IWL_GET_BITS(bd->pa[index], | ||
94 | tb2_addr_hi20) << 16), | ||
95 | IWL_GET_BITS(bd->pa[index], tb2_len), | ||
96 | PCI_DMA_TODEVICE); | 131 | PCI_DMA_TODEVICE); |
97 | 132 | ||
98 | else if (i > 0) | 133 | /* Unmap chunks, if any. */ |
99 | pci_unmap_single(dev, | 134 | for (i = 1; i < num_tbs; i++) { |
100 | le32_to_cpu(bd->pa[index].tb1_addr), | 135 | pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), |
101 | IWL_GET_BITS(bd->pa[index], tb1_len), | 136 | iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); |
102 | PCI_DMA_TODEVICE); | ||
103 | |||
104 | /* Free SKB, if any, for this chunk */ | ||
105 | if (txq->txb[txq->q.read_ptr].skb[i]) { | ||
106 | struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i]; | ||
107 | 137 | ||
108 | dev_kfree_skb(skb); | 138 | if (txq->txb) { |
109 | txq->txb[txq->q.read_ptr].skb[i] = NULL; | 139 | dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]); |
140 | txq->txb[txq->q.read_ptr].skb[i - 1] = NULL; | ||
110 | } | 141 | } |
111 | } | 142 | } |
112 | return 0; | ||
113 | } | 143 | } |
114 | 144 | ||
115 | static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, | 145 | static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, |
116 | dma_addr_t addr, u16 len) | 146 | struct iwl_tfd *tfd, |
147 | dma_addr_t addr, u16 len) | ||
117 | { | 148 | { |
118 | int index, is_odd; | 149 | |
119 | struct iwl_tfd_frame *tfd = ptr; | 150 | u32 num_tbs = iwl_tfd_get_num_tbs(tfd); |
120 | u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); | ||
121 | 151 | ||
122 | /* Each TFD can point to a maximum 20 Tx buffers */ | 152 | /* Each TFD can point to a maximum 20 Tx buffers */ |
123 | if (num_tbs >= MAX_NUM_OF_TBS) { | 153 | if (num_tbs >= IWL_NUM_OF_TBS) { |
124 | IWL_ERROR("Error can not send more than %d chunks\n", | 154 | IWL_ERROR("Error can not send more than %d chunks\n", |
125 | MAX_NUM_OF_TBS); | 155 | IWL_NUM_OF_TBS); |
126 | return -EINVAL; | 156 | return -EINVAL; |
127 | } | 157 | } |
128 | 158 | ||
129 | index = num_tbs / 2; | 159 | BUG_ON(addr & ~DMA_BIT_MASK(36)); |
130 | is_odd = num_tbs & 0x1; | 160 | if (unlikely(addr & ~IWL_TX_DMA_MASK)) |
131 | 161 | IWL_ERROR("Unaligned address = %llx\n", | |
132 | if (!is_odd) { | 162 | (unsigned long long)addr); |
133 | tfd->pa[index].tb1_addr = cpu_to_le32(addr); | ||
134 | IWL_SET_BITS(tfd->pa[index], tb1_addr_hi, | ||
135 | iwl_get_dma_hi_address(addr)); | ||
136 | IWL_SET_BITS(tfd->pa[index], tb1_len, len); | ||
137 | } else { | ||
138 | IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16, | ||
139 | (u32) (addr & 0xffff)); | ||
140 | IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16); | ||
141 | IWL_SET_BITS(tfd->pa[index], tb2_len, len); | ||
142 | } | ||
143 | 163 | ||
144 | IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); | 164 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); |
145 | 165 | ||
146 | return 0; | 166 | return 0; |
147 | } | 167 | } |
@@ -224,8 +244,8 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | |||
224 | 244 | ||
225 | /* De-alloc circular buffer of TFDs */ | 245 | /* De-alloc circular buffer of TFDs */ |
226 | if (txq->q.n_bd) | 246 | if (txq->q.n_bd) |
227 | pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * | 247 | pci_free_consistent(dev, sizeof(struct iwl_tfd) * |
228 | txq->q.n_bd, txq->bd, txq->q.dma_addr); | 248 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
229 | 249 | ||
230 | /* De-alloc array of per-TFD driver data */ | 250 | /* De-alloc array of per-TFD driver data */ |
231 | kfree(txq->txb); | 251 | kfree(txq->txb); |
@@ -263,8 +283,8 @@ static void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
263 | 283 | ||
264 | /* De-alloc circular buffer of TFDs */ | 284 | /* De-alloc circular buffer of TFDs */ |
265 | if (txq->q.n_bd) | 285 | if (txq->q.n_bd) |
266 | pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * | 286 | pci_free_consistent(dev, sizeof(struct iwl_tfd) * |
267 | txq->q.n_bd, txq->bd, txq->q.dma_addr); | 287 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
268 | 288 | ||
269 | /* 0-fill queue descriptor structure */ | 289 | /* 0-fill queue descriptor structure */ |
270 | memset(txq, 0, sizeof(*txq)); | 290 | memset(txq, 0, sizeof(*txq)); |
@@ -364,13 +384,13 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv, | |||
364 | 384 | ||
365 | /* Circular buffer of transmit frame descriptors (TFDs), | 385 | /* Circular buffer of transmit frame descriptors (TFDs), |
366 | * shared with device */ | 386 | * shared with device */ |
367 | txq->bd = pci_alloc_consistent(dev, | 387 | txq->tfds = pci_alloc_consistent(dev, |
368 | sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, | 388 | sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX, |
369 | &txq->q.dma_addr); | 389 | &txq->q.dma_addr); |
370 | 390 | ||
371 | if (!txq->bd) { | 391 | if (!txq->tfds) { |
372 | IWL_ERROR("pci_alloc_consistent(%zd) failed\n", | 392 | IWL_ERROR("pci_alloc_consistent(%zd) failed\n", |
373 | sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); | 393 | sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX); |
374 | goto error; | 394 | goto error; |
375 | } | 395 | } |
376 | txq->q.id = id; | 396 | txq->q.id = id; |
@@ -394,15 +414,15 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv, | |||
394 | static int iwl_hw_tx_queue_init(struct iwl_priv *priv, | 414 | static int iwl_hw_tx_queue_init(struct iwl_priv *priv, |
395 | struct iwl_tx_queue *txq) | 415 | struct iwl_tx_queue *txq) |
396 | { | 416 | { |
397 | int rc; | 417 | int ret; |
398 | unsigned long flags; | 418 | unsigned long flags; |
399 | int txq_id = txq->q.id; | 419 | int txq_id = txq->q.id; |
400 | 420 | ||
401 | spin_lock_irqsave(&priv->lock, flags); | 421 | spin_lock_irqsave(&priv->lock, flags); |
402 | rc = iwl_grab_nic_access(priv); | 422 | ret = iwl_grab_nic_access(priv); |
403 | if (rc) { | 423 | if (ret) { |
404 | spin_unlock_irqrestore(&priv->lock, flags); | 424 | spin_unlock_irqrestore(&priv->lock, flags); |
405 | return rc; | 425 | return ret; |
406 | } | 426 | } |
407 | 427 | ||
408 | /* Circular buffer (TFD queue in DRAM) physical base address */ | 428 | /* Circular buffer (TFD queue in DRAM) physical base address */ |
@@ -410,10 +430,10 @@ static int iwl_hw_tx_queue_init(struct iwl_priv *priv, | |||
410 | txq->q.dma_addr >> 8); | 430 | txq->q.dma_addr >> 8); |
411 | 431 | ||
412 | /* Enable DMA channel, using same id as for TFD queue */ | 432 | /* Enable DMA channel, using same id as for TFD queue */ |
413 | iwl_write_direct32( | 433 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), |
414 | priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), | ||
415 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | | 434 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | |
416 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); | 435 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); |
436 | |||
417 | iwl_release_nic_access(priv); | 437 | iwl_release_nic_access(priv); |
418 | spin_unlock_irqrestore(&priv->lock, flags); | 438 | spin_unlock_irqrestore(&priv->lock, flags); |
419 | 439 | ||
@@ -788,7 +808,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
788 | { | 808 | { |
789 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 809 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
790 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 810 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
791 | struct iwl_tfd_frame *tfd; | 811 | struct iwl_tfd *tfd; |
792 | struct iwl_tx_queue *txq; | 812 | struct iwl_tx_queue *txq; |
793 | struct iwl_queue *q; | 813 | struct iwl_queue *q; |
794 | struct iwl_cmd *out_cmd; | 814 | struct iwl_cmd *out_cmd; |
@@ -882,7 +902,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
882 | spin_lock_irqsave(&priv->lock, flags); | 902 | spin_lock_irqsave(&priv->lock, flags); |
883 | 903 | ||
884 | /* Set up first empty TFD within this queue's circular TFD buffer */ | 904 | /* Set up first empty TFD within this queue's circular TFD buffer */ |
885 | tfd = &txq->bd[q->write_ptr]; | 905 | tfd = &txq->tfds[q->write_ptr]; |
886 | memset(tfd, 0, sizeof(*tfd)); | 906 | memset(tfd, 0, sizeof(*tfd)); |
887 | idx = get_cmd_index(q, q->write_ptr, 0); | 907 | idx = get_cmd_index(q, q->write_ptr, 0); |
888 | 908 | ||
@@ -931,12 +951,14 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
931 | 951 | ||
932 | /* Physical address of this Tx command's header (not MAC header!), | 952 | /* Physical address of this Tx command's header (not MAC header!), |
933 | * within command buffer array. */ | 953 | * within command buffer array. */ |
934 | txcmd_phys = pci_map_single(priv->pci_dev, out_cmd, | 954 | txcmd_phys = pci_map_single(priv->pci_dev, |
935 | sizeof(struct iwl_cmd), PCI_DMA_TODEVICE); | 955 | out_cmd, sizeof(struct iwl_cmd), |
936 | txcmd_phys += offsetof(struct iwl_cmd, hdr); | 956 | PCI_DMA_TODEVICE); |
937 | 957 | pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); | |
958 | pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); | ||
938 | /* Add buffer containing Tx command and MAC(!) header to TFD's | 959 | /* Add buffer containing Tx command and MAC(!) header to TFD's |
939 | * first entry */ | 960 | * first entry */ |
961 | txcmd_phys += offsetof(struct iwl_cmd, hdr); | ||
940 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); | 962 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); |
941 | 963 | ||
942 | if (info->control.hw_key) | 964 | if (info->control.hw_key) |
@@ -969,7 +991,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
969 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | 991 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + |
970 | offsetof(struct iwl_tx_cmd, scratch); | 992 | offsetof(struct iwl_tx_cmd, scratch); |
971 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | 993 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); |
972 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys); | 994 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); |
973 | 995 | ||
974 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | 996 | if (!ieee80211_has_morefrags(hdr->frame_control)) { |
975 | txq->need_update = 1; | 997 | txq->need_update = 1; |
@@ -1030,7 +1052,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1030 | { | 1052 | { |
1031 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | 1053 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; |
1032 | struct iwl_queue *q = &txq->q; | 1054 | struct iwl_queue *q = &txq->q; |
1033 | struct iwl_tfd_frame *tfd; | 1055 | struct iwl_tfd *tfd; |
1034 | struct iwl_cmd *out_cmd; | 1056 | struct iwl_cmd *out_cmd; |
1035 | dma_addr_t phys_addr; | 1057 | dma_addr_t phys_addr; |
1036 | unsigned long flags; | 1058 | unsigned long flags; |
@@ -1059,7 +1081,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1059 | 1081 | ||
1060 | spin_lock_irqsave(&priv->hcmd_lock, flags); | 1082 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
1061 | 1083 | ||
1062 | tfd = &txq->bd[q->write_ptr]; | 1084 | tfd = &txq->tfds[q->write_ptr]; |
1063 | memset(tfd, 0, sizeof(*tfd)); | 1085 | memset(tfd, 0, sizeof(*tfd)); |
1064 | 1086 | ||
1065 | 1087 | ||
@@ -1080,9 +1102,13 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1080 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; | 1102 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; |
1081 | len = (idx == TFD_CMD_SLOTS) ? | 1103 | len = (idx == TFD_CMD_SLOTS) ? |
1082 | IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); | 1104 | IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); |
1083 | phys_addr = pci_map_single(priv->pci_dev, out_cmd, len, | 1105 | |
1084 | PCI_DMA_TODEVICE); | 1106 | phys_addr = pci_map_single(priv->pci_dev, out_cmd, |
1107 | len, PCI_DMA_TODEVICE); | ||
1108 | pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); | ||
1109 | pci_unmap_len_set(&out_cmd->meta, len, len); | ||
1085 | phys_addr += offsetof(struct iwl_cmd, hdr); | 1110 | phys_addr += offsetof(struct iwl_cmd, hdr); |
1111 | |||
1086 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); | 1112 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); |
1087 | 1113 | ||
1088 | #ifdef CONFIG_IWLWIFI_DEBUG | 1114 | #ifdef CONFIG_IWLWIFI_DEBUG |
@@ -1132,8 +1158,9 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1132 | return 0; | 1158 | return 0; |
1133 | } | 1159 | } |
1134 | 1160 | ||
1135 | for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; | 1161 | for (index = iwl_queue_inc_wrap(index, q->n_bd); |
1136 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1162 | q->read_ptr != index; |
1163 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1137 | 1164 | ||
1138 | tx_info = &txq->txb[txq->q.read_ptr]; | 1165 | tx_info = &txq->txb[txq->q.read_ptr]; |
1139 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | 1166 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); |
@@ -1157,44 +1184,34 @@ EXPORT_SYMBOL(iwl_tx_queue_reclaim); | |||
1157 | * need to be reclaimed. As result, some free space forms. If there is | 1184 | * need to be reclaimed. As result, some free space forms. If there is |
1158 | * enough free space (> low mark), wake the stack that feeds us. | 1185 | * enough free space (> low mark), wake the stack that feeds us. |
1159 | */ | 1186 | */ |
1160 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | 1187 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, |
1188 | int idx, int cmd_idx) | ||
1161 | { | 1189 | { |
1162 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 1190 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
1163 | struct iwl_queue *q = &txq->q; | 1191 | struct iwl_queue *q = &txq->q; |
1164 | struct iwl_tfd_frame *bd = &txq->bd[index]; | ||
1165 | dma_addr_t dma_addr; | ||
1166 | int is_odd, buf_len; | ||
1167 | int nfreed = 0; | 1192 | int nfreed = 0; |
1168 | 1193 | ||
1169 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | 1194 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { |
1170 | IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | 1195 | IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " |
1171 | "is out of range [0-%d] %d %d.\n", txq_id, | 1196 | "is out of range [0-%d] %d %d.\n", txq_id, |
1172 | index, q->n_bd, q->write_ptr, q->read_ptr); | 1197 | idx, q->n_bd, q->write_ptr, q->read_ptr); |
1173 | return; | 1198 | return; |
1174 | } | 1199 | } |
1175 | 1200 | ||
1176 | for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; | 1201 | pci_unmap_single(priv->pci_dev, |
1177 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1202 | pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping), |
1203 | pci_unmap_len(&txq->cmd[cmd_idx]->meta, len), | ||
1204 | PCI_DMA_TODEVICE); | ||
1205 | |||
1206 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | ||
1207 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1178 | 1208 | ||
1179 | if (nfreed > 1) { | 1209 | if (nfreed++ > 0) { |
1180 | IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, | 1210 | IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx, |
1181 | q->write_ptr, q->read_ptr); | 1211 | q->write_ptr, q->read_ptr); |
1182 | queue_work(priv->workqueue, &priv->restart); | 1212 | queue_work(priv->workqueue, &priv->restart); |
1183 | } | 1213 | } |
1184 | is_odd = (index/2) & 0x1; | ||
1185 | if (is_odd) { | ||
1186 | dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) | | ||
1187 | (IWL_GET_BITS(bd->pa[index], | ||
1188 | tb2_addr_hi20) << 16); | ||
1189 | buf_len = IWL_GET_BITS(bd->pa[index], tb2_len); | ||
1190 | } else { | ||
1191 | dma_addr = le32_to_cpu(bd->pa[index].tb1_addr); | ||
1192 | buf_len = IWL_GET_BITS(bd->pa[index], tb1_len); | ||
1193 | } | ||
1194 | 1214 | ||
1195 | pci_unmap_single(priv->pci_dev, dma_addr, buf_len, | ||
1196 | PCI_DMA_TODEVICE); | ||
1197 | nfreed++; | ||
1198 | } | 1215 | } |
1199 | } | 1216 | } |
1200 | 1217 | ||
@@ -1234,7 +1251,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1234 | !cmd->meta.u.callback(priv, cmd, rxb->skb)) | 1251 | !cmd->meta.u.callback(priv, cmd, rxb->skb)) |
1235 | rxb->skb = NULL; | 1252 | rxb->skb = NULL; |
1236 | 1253 | ||
1237 | iwl_hcmd_queue_reclaim(priv, txq_id, index); | 1254 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); |
1238 | 1255 | ||
1239 | if (!(cmd->meta.flags & CMD_ASYNC)) { | 1256 | if (!(cmd->meta.flags & CMD_ASYNC)) { |
1240 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 1257 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |