diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn-tx.c | 1333 |
1 files changed, 1333 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c new file mode 100644 index 000000000000..3077eac58880 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c | |||
@@ -0,0 +1,1333 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * GPL LICENSE SUMMARY | ||
4 | * | ||
5 | * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
19 | * USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called LICENSE.GPL. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/sched.h> | ||
34 | |||
35 | #include "iwl-dev.h" | ||
36 | #include "iwl-core.h" | ||
37 | #include "iwl-sta.h" | ||
38 | #include "iwl-io.h" | ||
39 | #include "iwl-helpers.h" | ||
40 | #include "iwl-agn-hw.h" | ||
41 | #include "iwl-agn.h" | ||
42 | |||
43 | /* | ||
44 | * mac80211 queues, ACs, hardware queues, FIFOs. | ||
45 | * | ||
46 | * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues | ||
47 | * | ||
48 | * Mac80211 uses the following numbers, which we get as from it | ||
49 | * by way of skb_get_queue_mapping(skb): | ||
50 | * | ||
51 | * VO 0 | ||
52 | * VI 1 | ||
53 | * BE 2 | ||
54 | * BK 3 | ||
55 | * | ||
56 | * | ||
57 | * Regular (not A-MPDU) frames are put into hardware queues corresponding | ||
58 | * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their | ||
59 | * own queue per aggregation session (RA/TID combination), such queues are | ||
60 | * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In | ||
61 | * order to map frames to the right queue, we also need an AC->hw queue | ||
62 | * mapping. This is implemented here. | ||
63 | * | ||
64 | * Due to the way hw queues are set up (by the hw specific modules like | ||
65 | * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity | ||
66 | * mapping. | ||
67 | */ | ||
68 | |||
69 | static const u8 tid_to_ac[] = { | ||
70 | /* this matches the mac80211 numbers */ | ||
71 | 2, 3, 3, 2, 1, 1, 0, 0 | ||
72 | }; | ||
73 | |||
74 | static const u8 ac_to_fifo[] = { | ||
75 | IWL_TX_FIFO_VO, | ||
76 | IWL_TX_FIFO_VI, | ||
77 | IWL_TX_FIFO_BE, | ||
78 | IWL_TX_FIFO_BK, | ||
79 | }; | ||
80 | |||
81 | static inline int get_fifo_from_ac(u8 ac) | ||
82 | { | ||
83 | return ac_to_fifo[ac]; | ||
84 | } | ||
85 | |||
86 | static inline int get_fifo_from_tid(u16 tid) | ||
87 | { | ||
88 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
89 | return get_fifo_from_ac(tid_to_ac[tid]); | ||
90 | |||
91 | /* no support for TIDs 8-15 yet */ | ||
92 | return -EINVAL; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | ||
97 | */ | ||
98 | void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | ||
99 | struct iwl_tx_queue *txq, | ||
100 | u16 byte_cnt) | ||
101 | { | ||
102 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | ||
103 | int write_ptr = txq->q.write_ptr; | ||
104 | int txq_id = txq->q.id; | ||
105 | u8 sec_ctl = 0; | ||
106 | u8 sta_id = 0; | ||
107 | u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; | ||
108 | __le16 bc_ent; | ||
109 | |||
110 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); | ||
111 | |||
112 | if (txq_id != IWL_CMD_QUEUE_NUM) { | ||
113 | sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; | ||
114 | sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; | ||
115 | |||
116 | switch (sec_ctl & TX_CMD_SEC_MSK) { | ||
117 | case TX_CMD_SEC_CCM: | ||
118 | len += CCMP_MIC_LEN; | ||
119 | break; | ||
120 | case TX_CMD_SEC_TKIP: | ||
121 | len += TKIP_ICV_LEN; | ||
122 | break; | ||
123 | case TX_CMD_SEC_WEP: | ||
124 | len += WEP_IV_LEN + WEP_ICV_LEN; | ||
125 | break; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); | ||
130 | |||
131 | scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; | ||
132 | |||
133 | if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
134 | scd_bc_tbl[txq_id]. | ||
135 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | ||
136 | } | ||
137 | |||
138 | void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | ||
139 | struct iwl_tx_queue *txq) | ||
140 | { | ||
141 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | ||
142 | int txq_id = txq->q.id; | ||
143 | int read_ptr = txq->q.read_ptr; | ||
144 | u8 sta_id = 0; | ||
145 | __le16 bc_ent; | ||
146 | |||
147 | WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); | ||
148 | |||
149 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
150 | sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; | ||
151 | |||
152 | bc_ent = cpu_to_le16(1 | (sta_id << 12)); | ||
153 | scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; | ||
154 | |||
155 | if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) | ||
156 | scd_bc_tbl[txq_id]. | ||
157 | tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; | ||
158 | } | ||
159 | |||
160 | static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, | ||
161 | u16 txq_id) | ||
162 | { | ||
163 | u32 tbl_dw_addr; | ||
164 | u32 tbl_dw; | ||
165 | u16 scd_q2ratid; | ||
166 | |||
167 | scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; | ||
168 | |||
169 | tbl_dw_addr = priv->scd_base_addr + | ||
170 | IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); | ||
171 | |||
172 | tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); | ||
173 | |||
174 | if (txq_id & 0x1) | ||
175 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | ||
176 | else | ||
177 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | ||
178 | |||
179 | iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) | ||
185 | { | ||
186 | /* Simply stop the queue, but don't change any configuration; | ||
187 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | ||
188 | iwl_write_prph(priv, | ||
189 | IWL50_SCD_QUEUE_STATUS_BITS(txq_id), | ||
190 | (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)| | ||
191 | (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | ||
192 | } | ||
193 | |||
194 | void iwlagn_set_wr_ptrs(struct iwl_priv *priv, | ||
195 | int txq_id, u32 index) | ||
196 | { | ||
197 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | ||
198 | (index & 0xff) | (txq_id << 8)); | ||
199 | iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index); | ||
200 | } | ||
201 | |||
202 | void iwlagn_tx_queue_set_status(struct iwl_priv *priv, | ||
203 | struct iwl_tx_queue *txq, | ||
204 | int tx_fifo_id, int scd_retry) | ||
205 | { | ||
206 | int txq_id = txq->q.id; | ||
207 | int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; | ||
208 | |||
209 | iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id), | ||
210 | (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) | | ||
211 | (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) | | ||
212 | (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) | | ||
213 | IWL50_SCD_QUEUE_STTS_REG_MSK); | ||
214 | |||
215 | txq->sched_retry = scd_retry; | ||
216 | |||
217 | IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", | ||
218 | active ? "Activate" : "Deactivate", | ||
219 | scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); | ||
220 | } | ||
221 | |||
222 | int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, | ||
223 | int tx_fifo, int sta_id, int tid, u16 ssn_idx) | ||
224 | { | ||
225 | unsigned long flags; | ||
226 | u16 ra_tid; | ||
227 | |||
228 | if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || | ||
229 | (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues | ||
230 | <= txq_id)) { | ||
231 | IWL_WARN(priv, | ||
232 | "queue number out of range: %d, must be %d to %d\n", | ||
233 | txq_id, IWLAGN_FIRST_AMPDU_QUEUE, | ||
234 | IWLAGN_FIRST_AMPDU_QUEUE + | ||
235 | priv->cfg->num_of_ampdu_queues - 1); | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | |||
239 | ra_tid = BUILD_RAxTID(sta_id, tid); | ||
240 | |||
241 | /* Modify device's station table to Tx this TID */ | ||
242 | iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); | ||
243 | |||
244 | spin_lock_irqsave(&priv->lock, flags); | ||
245 | |||
246 | /* Stop this Tx queue before configuring it */ | ||
247 | iwlagn_tx_queue_stop_scheduler(priv, txq_id); | ||
248 | |||
249 | /* Map receiver-address / traffic-ID to this queue */ | ||
250 | iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); | ||
251 | |||
252 | /* Set this queue as a chain-building queue */ | ||
253 | iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id)); | ||
254 | |||
255 | /* enable aggregations for the queue */ | ||
256 | iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id)); | ||
257 | |||
258 | /* Place first TFD at index corresponding to start sequence number. | ||
259 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | ||
260 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
261 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
262 | iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); | ||
263 | |||
264 | /* Set up Tx window size and frame limit for this queue */ | ||
265 | iwl_write_targ_mem(priv, priv->scd_base_addr + | ||
266 | IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + | ||
267 | sizeof(u32), | ||
268 | ((SCD_WIN_SIZE << | ||
269 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & | ||
270 | IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | | ||
271 | ((SCD_FRAME_LIMIT << | ||
272 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & | ||
273 | IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); | ||
274 | |||
275 | iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
276 | |||
277 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | ||
278 | iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); | ||
279 | |||
280 | spin_unlock_irqrestore(&priv->lock, flags); | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, | ||
286 | u16 ssn_idx, u8 tx_fifo) | ||
287 | { | ||
288 | if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || | ||
289 | (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues | ||
290 | <= txq_id)) { | ||
291 | IWL_ERR(priv, | ||
292 | "queue number out of range: %d, must be %d to %d\n", | ||
293 | txq_id, IWLAGN_FIRST_AMPDU_QUEUE, | ||
294 | IWLAGN_FIRST_AMPDU_QUEUE + | ||
295 | priv->cfg->num_of_ampdu_queues - 1); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | |||
299 | iwlagn_tx_queue_stop_scheduler(priv, txq_id); | ||
300 | |||
301 | iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id)); | ||
302 | |||
303 | priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
304 | priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
305 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | ||
306 | iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); | ||
307 | |||
308 | iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
309 | iwl_txq_ctx_deactivate(priv, txq_id); | ||
310 | iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask | ||
317 | * must be called under priv->lock and mac access | ||
318 | */ | ||
319 | void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask) | ||
320 | { | ||
321 | iwl_write_prph(priv, IWL50_SCD_TXFACT, mask); | ||
322 | } | ||
323 | |||
324 | static inline int get_queue_from_ac(u16 ac) | ||
325 | { | ||
326 | return ac; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * handle build REPLY_TX command notification. | ||
331 | */ | ||
332 | static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, | ||
333 | struct iwl_tx_cmd *tx_cmd, | ||
334 | struct ieee80211_tx_info *info, | ||
335 | struct ieee80211_hdr *hdr, | ||
336 | u8 std_id) | ||
337 | { | ||
338 | __le16 fc = hdr->frame_control; | ||
339 | __le32 tx_flags = tx_cmd->tx_flags; | ||
340 | |||
341 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
342 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
343 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
344 | if (ieee80211_is_mgmt(fc)) | ||
345 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
346 | if (ieee80211_is_probe_resp(fc) && | ||
347 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
348 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
349 | } else { | ||
350 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
351 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
352 | } | ||
353 | |||
354 | if (ieee80211_is_back_req(fc)) | ||
355 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
356 | |||
357 | |||
358 | tx_cmd->sta_id = std_id; | ||
359 | if (ieee80211_has_morefrags(fc)) | ||
360 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
361 | |||
362 | if (ieee80211_is_data_qos(fc)) { | ||
363 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
364 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
365 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
366 | } else { | ||
367 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
368 | } | ||
369 | |||
370 | priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); | ||
371 | |||
372 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
373 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
374 | |||
375 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
376 | if (ieee80211_is_mgmt(fc)) { | ||
377 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
378 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
379 | else | ||
380 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
381 | } else { | ||
382 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
383 | } | ||
384 | |||
385 | tx_cmd->driver_txop = 0; | ||
386 | tx_cmd->tx_flags = tx_flags; | ||
387 | tx_cmd->next_frame_len = 0; | ||
388 | } | ||
389 | |||
390 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
391 | |||
392 | static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, | ||
393 | struct iwl_tx_cmd *tx_cmd, | ||
394 | struct ieee80211_tx_info *info, | ||
395 | __le16 fc) | ||
396 | { | ||
397 | u32 rate_flags; | ||
398 | int rate_idx; | ||
399 | u8 rts_retry_limit; | ||
400 | u8 data_retry_limit; | ||
401 | u8 rate_plcp; | ||
402 | |||
403 | /* Set retry limit on DATA packets and Probe Responses*/ | ||
404 | if (ieee80211_is_probe_resp(fc)) | ||
405 | data_retry_limit = 3; | ||
406 | else | ||
407 | data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; | ||
408 | tx_cmd->data_retry_limit = data_retry_limit; | ||
409 | |||
410 | /* Set retry limit on RTS packets */ | ||
411 | rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; | ||
412 | if (data_retry_limit < rts_retry_limit) | ||
413 | rts_retry_limit = data_retry_limit; | ||
414 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
415 | |||
416 | /* DATA packets will use the uCode station table for rate/antenna | ||
417 | * selection */ | ||
418 | if (ieee80211_is_data(fc)) { | ||
419 | tx_cmd->initial_rate_index = 0; | ||
420 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
421 | return; | ||
422 | } | ||
423 | |||
424 | /** | ||
425 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | ||
426 | * not really a TX rate. Thus, we use the lowest supported rate for | ||
427 | * this band. Also use the lowest supported rate if the stored rate | ||
428 | * index is invalid. | ||
429 | */ | ||
430 | rate_idx = info->control.rates[0].idx; | ||
431 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || | ||
432 | (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) | ||
433 | rate_idx = rate_lowest_index(&priv->bands[info->band], | ||
434 | info->control.sta); | ||
435 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | ||
436 | if (info->band == IEEE80211_BAND_5GHZ) | ||
437 | rate_idx += IWL_FIRST_OFDM_RATE; | ||
438 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | ||
439 | rate_plcp = iwl_rates[rate_idx].plcp; | ||
440 | /* Zero out flags for this packet */ | ||
441 | rate_flags = 0; | ||
442 | |||
443 | /* Set CCK flag as needed */ | ||
444 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | ||
445 | rate_flags |= RATE_MCS_CCK_MSK; | ||
446 | |||
447 | /* Set up RTS and CTS flags for certain packets */ | ||
448 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { | ||
449 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | ||
450 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | ||
451 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): | ||
452 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): | ||
453 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | ||
454 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
455 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
456 | } | ||
457 | break; | ||
458 | default: | ||
459 | break; | ||
460 | } | ||
461 | |||
462 | /* Set up antennas */ | ||
463 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); | ||
464 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); | ||
465 | |||
466 | /* Set the rate in the TX cmd */ | ||
467 | tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
468 | } | ||
469 | |||
470 | static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | ||
471 | struct ieee80211_tx_info *info, | ||
472 | struct iwl_tx_cmd *tx_cmd, | ||
473 | struct sk_buff *skb_frag, | ||
474 | int sta_id) | ||
475 | { | ||
476 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
477 | |||
478 | switch (keyconf->alg) { | ||
479 | case ALG_CCMP: | ||
480 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
481 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
482 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
483 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
484 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | ||
485 | break; | ||
486 | |||
487 | case ALG_TKIP: | ||
488 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
489 | ieee80211_get_tkip_key(keyconf, skb_frag, | ||
490 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); | ||
491 | IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); | ||
492 | break; | ||
493 | |||
494 | case ALG_WEP: | ||
495 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
496 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
497 | |||
498 | if (keyconf->keylen == WEP_KEY_LEN_128) | ||
499 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
500 | |||
501 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
502 | |||
503 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | ||
504 | "with key %d\n", keyconf->keyidx); | ||
505 | break; | ||
506 | |||
507 | default: | ||
508 | IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); | ||
509 | break; | ||
510 | } | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * start REPLY_TX command process | ||
515 | */ | ||
516 | int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
517 | { | ||
518 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
519 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
520 | struct ieee80211_sta *sta = info->control.sta; | ||
521 | struct iwl_station_priv *sta_priv = NULL; | ||
522 | struct iwl_tx_queue *txq; | ||
523 | struct iwl_queue *q; | ||
524 | struct iwl_device_cmd *out_cmd; | ||
525 | struct iwl_cmd_meta *out_meta; | ||
526 | struct iwl_tx_cmd *tx_cmd; | ||
527 | int swq_id, txq_id; | ||
528 | dma_addr_t phys_addr; | ||
529 | dma_addr_t txcmd_phys; | ||
530 | dma_addr_t scratch_phys; | ||
531 | u16 len, len_org, firstlen, secondlen; | ||
532 | u16 seq_number = 0; | ||
533 | __le16 fc; | ||
534 | u8 hdr_len; | ||
535 | u8 sta_id; | ||
536 | u8 wait_write_ptr = 0; | ||
537 | u8 tid = 0; | ||
538 | u8 *qc = NULL; | ||
539 | unsigned long flags; | ||
540 | |||
541 | spin_lock_irqsave(&priv->lock, flags); | ||
542 | if (iwl_is_rfkill(priv)) { | ||
543 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | ||
544 | goto drop_unlock; | ||
545 | } | ||
546 | |||
547 | fc = hdr->frame_control; | ||
548 | |||
549 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
550 | if (ieee80211_is_auth(fc)) | ||
551 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | ||
552 | else if (ieee80211_is_assoc_req(fc)) | ||
553 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | ||
554 | else if (ieee80211_is_reassoc_req(fc)) | ||
555 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | ||
556 | #endif | ||
557 | |||
558 | hdr_len = ieee80211_hdrlen(fc); | ||
559 | |||
560 | /* Find (or create) index into station table for destination station */ | ||
561 | if (info->flags & IEEE80211_TX_CTL_INJECTED) | ||
562 | sta_id = priv->hw_params.bcast_sta_id; | ||
563 | else | ||
564 | sta_id = iwl_get_sta_id(priv, hdr); | ||
565 | if (sta_id == IWL_INVALID_STATION) { | ||
566 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | ||
567 | hdr->addr1); | ||
568 | goto drop_unlock; | ||
569 | } | ||
570 | |||
571 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | ||
572 | |||
573 | if (sta) | ||
574 | sta_priv = (void *)sta->drv_priv; | ||
575 | |||
576 | if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && | ||
577 | sta_priv->asleep) { | ||
578 | WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); | ||
579 | /* | ||
580 | * This sends an asynchronous command to the device, | ||
581 | * but we can rely on it being processed before the | ||
582 | * next frame is processed -- and the next frame to | ||
583 | * this station is the one that will consume this | ||
584 | * counter. | ||
585 | * For now set the counter to just 1 since we do not | ||
586 | * support uAPSD yet. | ||
587 | */ | ||
588 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | ||
589 | } | ||
590 | |||
591 | txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); | ||
592 | if (ieee80211_is_data_qos(fc)) { | ||
593 | qc = ieee80211_get_qos_ctl(hdr); | ||
594 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
595 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
596 | goto drop_unlock; | ||
597 | seq_number = priv->stations[sta_id].tid[tid].seq_number; | ||
598 | seq_number &= IEEE80211_SCTL_SEQ; | ||
599 | hdr->seq_ctrl = hdr->seq_ctrl & | ||
600 | cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
601 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | ||
602 | seq_number += 0x10; | ||
603 | /* aggregation is on for this <sta,tid> */ | ||
604 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | ||
605 | priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { | ||
606 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | ||
607 | } | ||
608 | } | ||
609 | |||
610 | txq = &priv->txq[txq_id]; | ||
611 | swq_id = txq->swq_id; | ||
612 | q = &txq->q; | ||
613 | |||
614 | if (unlikely(iwl_queue_space(q) < q->high_mark)) | ||
615 | goto drop_unlock; | ||
616 | |||
617 | if (ieee80211_is_data_qos(fc)) | ||
618 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
619 | |||
620 | /* Set up driver data for this TFD */ | ||
621 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
622 | txq->txb[q->write_ptr].skb[0] = skb; | ||
623 | |||
624 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
625 | out_cmd = txq->cmd[q->write_ptr]; | ||
626 | out_meta = &txq->meta[q->write_ptr]; | ||
627 | tx_cmd = &out_cmd->cmd.tx; | ||
628 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
629 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | ||
630 | |||
631 | /* | ||
632 | * Set up the Tx-command (not MAC!) header. | ||
633 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
634 | * after Tx, uCode's Tx response will return this value so driver can | ||
635 | * locate the frame within the tx queue and do post-tx processing. | ||
636 | */ | ||
637 | out_cmd->hdr.cmd = REPLY_TX; | ||
638 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
639 | INDEX_TO_SEQ(q->write_ptr))); | ||
640 | |||
641 | /* Copy MAC header from skb into command buffer */ | ||
642 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
643 | |||
644 | |||
645 | /* Total # bytes to be transmitted */ | ||
646 | len = (u16)skb->len; | ||
647 | tx_cmd->len = cpu_to_le16(len); | ||
648 | |||
649 | if (info->control.hw_key) | ||
650 | iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | ||
651 | |||
652 | /* TODO need this for burst mode later on */ | ||
653 | iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); | ||
654 | iwl_dbg_log_tx_data_frame(priv, len, hdr); | ||
655 | |||
656 | iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); | ||
657 | |||
658 | iwl_update_stats(priv, true, fc, len); | ||
659 | /* | ||
660 | * Use the first empty entry in this queue's command buffer array | ||
661 | * to contain the Tx command and MAC header concatenated together | ||
662 | * (payload data will be in another buffer). | ||
663 | * Size of this varies, due to varying MAC header length. | ||
664 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
665 | * of the MAC header (device reads on dword boundaries). | ||
666 | * We'll tell device about this padding later. | ||
667 | */ | ||
668 | len = sizeof(struct iwl_tx_cmd) + | ||
669 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
670 | |||
671 | len_org = len; | ||
672 | firstlen = len = (len + 3) & ~3; | ||
673 | |||
674 | if (len_org != len) | ||
675 | len_org = 1; | ||
676 | else | ||
677 | len_org = 0; | ||
678 | |||
679 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
680 | if (len_org) | ||
681 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
682 | |||
683 | /* Physical address of this Tx command's header (not MAC header!), | ||
684 | * within command buffer array. */ | ||
685 | txcmd_phys = pci_map_single(priv->pci_dev, | ||
686 | &out_cmd->hdr, len, | ||
687 | PCI_DMA_BIDIRECTIONAL); | ||
688 | pci_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
689 | pci_unmap_len_set(out_meta, len, len); | ||
690 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
691 | * first entry */ | ||
692 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
693 | txcmd_phys, len, 1, 0); | ||
694 | |||
695 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
696 | txq->need_update = 1; | ||
697 | if (qc) | ||
698 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
699 | } else { | ||
700 | wait_write_ptr = 1; | ||
701 | txq->need_update = 0; | ||
702 | } | ||
703 | |||
704 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
705 | * if any (802.11 null frames have no payload). */ | ||
706 | secondlen = len = skb->len - hdr_len; | ||
707 | if (len) { | ||
708 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
709 | len, PCI_DMA_TODEVICE); | ||
710 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
711 | phys_addr, len, | ||
712 | 0, 0); | ||
713 | } | ||
714 | |||
715 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
716 | offsetof(struct iwl_tx_cmd, scratch); | ||
717 | |||
718 | len = sizeof(struct iwl_tx_cmd) + | ||
719 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
720 | /* take back ownership of DMA buffer to enable update */ | ||
721 | pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, | ||
722 | len, PCI_DMA_BIDIRECTIONAL); | ||
723 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
724 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); | ||
725 | |||
726 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", | ||
727 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
728 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
729 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
730 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
731 | |||
732 | /* Set up entry for this TFD in Tx byte-count array */ | ||
733 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
734 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | ||
735 | le16_to_cpu(tx_cmd->len)); | ||
736 | |||
737 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | ||
738 | len, PCI_DMA_BIDIRECTIONAL); | ||
739 | |||
740 | trace_iwlwifi_dev_tx(priv, | ||
741 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
742 | sizeof(struct iwl_tfd), | ||
743 | &out_cmd->hdr, firstlen, | ||
744 | skb->data + hdr_len, secondlen); | ||
745 | |||
746 | /* Tell device the write index *just past* this latest filled TFD */ | ||
747 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
748 | iwl_txq_update_write_ptr(priv, txq); | ||
749 | spin_unlock_irqrestore(&priv->lock, flags); | ||
750 | |||
751 | /* | ||
752 | * At this point the frame is "transmitted" successfully | ||
753 | * and we will get a TX status notification eventually, | ||
754 | * regardless of the value of ret. "ret" only indicates | ||
755 | * whether or not we should update the write pointer. | ||
756 | */ | ||
757 | |||
758 | /* avoid atomic ops if it isn't an associated client */ | ||
759 | if (sta_priv && sta_priv->client) | ||
760 | atomic_inc(&sta_priv->pending_frames); | ||
761 | |||
762 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | ||
763 | if (wait_write_ptr) { | ||
764 | spin_lock_irqsave(&priv->lock, flags); | ||
765 | txq->need_update = 1; | ||
766 | iwl_txq_update_write_ptr(priv, txq); | ||
767 | spin_unlock_irqrestore(&priv->lock, flags); | ||
768 | } else { | ||
769 | iwl_stop_queue(priv, txq->swq_id); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | return 0; | ||
774 | |||
775 | drop_unlock: | ||
776 | spin_unlock_irqrestore(&priv->lock, flags); | ||
777 | return -1; | ||
778 | } | ||
779 | |||
780 | static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, | ||
781 | struct iwl_dma_ptr *ptr, size_t size) | ||
782 | { | ||
783 | ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, | ||
784 | GFP_KERNEL); | ||
785 | if (!ptr->addr) | ||
786 | return -ENOMEM; | ||
787 | ptr->size = size; | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, | ||
792 | struct iwl_dma_ptr *ptr) | ||
793 | { | ||
794 | if (unlikely(!ptr->addr)) | ||
795 | return; | ||
796 | |||
797 | dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | ||
798 | memset(ptr, 0, sizeof(*ptr)); | ||
799 | } | ||
800 | |||
801 | /** | ||
802 | * iwlagn_hw_txq_ctx_free - Free TXQ Context | ||
803 | * | ||
804 | * Destroy all TX DMA queues and structures | ||
805 | */ | ||
806 | void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv) | ||
807 | { | ||
808 | int txq_id; | ||
809 | |||
810 | /* Tx queues */ | ||
811 | if (priv->txq) { | ||
812 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
813 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
814 | iwl_cmd_queue_free(priv); | ||
815 | else | ||
816 | iwl_tx_queue_free(priv, txq_id); | ||
817 | } | ||
818 | iwlagn_free_dma_ptr(priv, &priv->kw); | ||
819 | |||
820 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
821 | |||
822 | /* free tx queue structure */ | ||
823 | iwl_free_txq_mem(priv); | ||
824 | } | ||
825 | |||
826 | /** | ||
827 | * iwlagn_txq_ctx_alloc - allocate TX queue context | ||
828 | * Allocate all Tx DMA structures and initialize them | ||
829 | * | ||
830 | * @param priv | ||
831 | * @return error code | ||
832 | */ | ||
833 | int iwlagn_txq_ctx_alloc(struct iwl_priv *priv) | ||
834 | { | ||
835 | int ret; | ||
836 | int txq_id, slots_num; | ||
837 | unsigned long flags; | ||
838 | |||
839 | /* Free all tx/cmd queues and keep-warm buffer */ | ||
840 | iwlagn_hw_txq_ctx_free(priv); | ||
841 | |||
842 | ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, | ||
843 | priv->hw_params.scd_bc_tbls_size); | ||
844 | if (ret) { | ||
845 | IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); | ||
846 | goto error_bc_tbls; | ||
847 | } | ||
848 | /* Alloc keep-warm buffer */ | ||
849 | ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); | ||
850 | if (ret) { | ||
851 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | ||
852 | goto error_kw; | ||
853 | } | ||
854 | |||
855 | /* allocate tx queue structure */ | ||
856 | ret = iwl_alloc_txq_mem(priv); | ||
857 | if (ret) | ||
858 | goto error; | ||
859 | |||
860 | spin_lock_irqsave(&priv->lock, flags); | ||
861 | |||
862 | /* Turn off all Tx DMA fifos */ | ||
863 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
864 | |||
865 | /* Tell NIC where to find the "keep warm" buffer */ | ||
866 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
867 | |||
868 | spin_unlock_irqrestore(&priv->lock, flags); | ||
869 | |||
870 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
871 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
872 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | ||
873 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
874 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, | ||
875 | txq_id); | ||
876 | if (ret) { | ||
877 | IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); | ||
878 | goto error; | ||
879 | } | ||
880 | } | ||
881 | |||
882 | return ret; | ||
883 | |||
884 | error: | ||
885 | iwlagn_hw_txq_ctx_free(priv); | ||
886 | iwlagn_free_dma_ptr(priv, &priv->kw); | ||
887 | error_kw: | ||
888 | iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); | ||
889 | error_bc_tbls: | ||
890 | return ret; | ||
891 | } | ||
892 | |||
893 | void iwlagn_txq_ctx_reset(struct iwl_priv *priv) | ||
894 | { | ||
895 | int txq_id, slots_num; | ||
896 | unsigned long flags; | ||
897 | |||
898 | spin_lock_irqsave(&priv->lock, flags); | ||
899 | |||
900 | /* Turn off all Tx DMA fifos */ | ||
901 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
902 | |||
903 | /* Tell NIC where to find the "keep warm" buffer */ | ||
904 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
905 | |||
906 | spin_unlock_irqrestore(&priv->lock, flags); | ||
907 | |||
908 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
909 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
910 | slots_num = txq_id == IWL_CMD_QUEUE_NUM ? | ||
911 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
912 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | ||
913 | } | ||
914 | } | ||
915 | |||
916 | /** | ||
917 | * iwlagn_txq_ctx_stop - Stop all Tx DMA channels | ||
918 | */ | ||
919 | void iwlagn_txq_ctx_stop(struct iwl_priv *priv) | ||
920 | { | ||
921 | int ch; | ||
922 | unsigned long flags; | ||
923 | |||
924 | /* Turn off all Tx DMA fifos */ | ||
925 | spin_lock_irqsave(&priv->lock, flags); | ||
926 | |||
927 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
928 | |||
929 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
930 | for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { | ||
931 | iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
932 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | ||
933 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | ||
934 | 1000); | ||
935 | } | ||
936 | spin_unlock_irqrestore(&priv->lock, flags); | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * Find first available (lowest unused) Tx Queue, mark it "active". | ||
941 | * Called only when finding queue for aggregation. | ||
942 | * Should never return anything < 7, because they should already | ||
943 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | ||
944 | */ | ||
945 | static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv) | ||
946 | { | ||
947 | int txq_id; | ||
948 | |||
949 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
950 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | ||
951 | return txq_id; | ||
952 | return -1; | ||
953 | } | ||
954 | |||
955 | int iwlagn_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | ||
956 | { | ||
957 | int sta_id; | ||
958 | int tx_fifo; | ||
959 | int txq_id; | ||
960 | int ret; | ||
961 | unsigned long flags; | ||
962 | struct iwl_tid_data *tid_data; | ||
963 | |||
964 | tx_fifo = get_fifo_from_tid(tid); | ||
965 | if (unlikely(tx_fifo < 0)) | ||
966 | return tx_fifo; | ||
967 | |||
968 | IWL_WARN(priv, "%s on ra = %pM tid = %d\n", | ||
969 | __func__, ra, tid); | ||
970 | |||
971 | sta_id = iwl_find_station(priv, ra); | ||
972 | if (sta_id == IWL_INVALID_STATION) { | ||
973 | IWL_ERR(priv, "Start AGG on invalid station\n"); | ||
974 | return -ENXIO; | ||
975 | } | ||
976 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
977 | return -EINVAL; | ||
978 | |||
979 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | ||
980 | IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); | ||
981 | return -ENXIO; | ||
982 | } | ||
983 | |||
984 | txq_id = iwlagn_txq_ctx_activate_free(priv); | ||
985 | if (txq_id == -1) { | ||
986 | IWL_ERR(priv, "No free aggregation queue available\n"); | ||
987 | return -ENXIO; | ||
988 | } | ||
989 | |||
990 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
991 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
992 | *ssn = SEQ_TO_SN(tid_data->seq_number); | ||
993 | tid_data->agg.txq_id = txq_id; | ||
994 | priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); | ||
995 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
996 | |||
997 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | ||
998 | sta_id, tid, *ssn); | ||
999 | if (ret) | ||
1000 | return ret; | ||
1001 | |||
1002 | if (tid_data->tfds_in_queue == 0) { | ||
1003 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1004 | tid_data->agg.state = IWL_AGG_ON; | ||
1005 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1006 | } else { | ||
1007 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", | ||
1008 | tid_data->tfds_in_queue); | ||
1009 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | ||
1010 | } | ||
1011 | return ret; | ||
1012 | } | ||
1013 | |||
1014 | int iwlagn_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | ||
1015 | { | ||
1016 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | ||
1017 | struct iwl_tid_data *tid_data; | ||
1018 | int write_ptr, read_ptr; | ||
1019 | unsigned long flags; | ||
1020 | |||
1021 | if (!ra) { | ||
1022 | IWL_ERR(priv, "ra = NULL\n"); | ||
1023 | return -EINVAL; | ||
1024 | } | ||
1025 | |||
1026 | tx_fifo_id = get_fifo_from_tid(tid); | ||
1027 | if (unlikely(tx_fifo_id < 0)) | ||
1028 | return tx_fifo_id; | ||
1029 | |||
1030 | sta_id = iwl_find_station(priv, ra); | ||
1031 | |||
1032 | if (sta_id == IWL_INVALID_STATION) { | ||
1033 | IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); | ||
1034 | return -ENXIO; | ||
1035 | } | ||
1036 | |||
1037 | if (priv->stations[sta_id].tid[tid].agg.state == | ||
1038 | IWL_EMPTYING_HW_QUEUE_ADDBA) { | ||
1039 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); | ||
1040 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1041 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | ||
1046 | IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); | ||
1047 | |||
1048 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1049 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | ||
1050 | txq_id = tid_data->agg.txq_id; | ||
1051 | write_ptr = priv->txq[txq_id].q.write_ptr; | ||
1052 | read_ptr = priv->txq[txq_id].q.read_ptr; | ||
1053 | |||
1054 | /* The queue is not empty */ | ||
1055 | if (write_ptr != read_ptr) { | ||
1056 | IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); | ||
1057 | priv->stations[sta_id].tid[tid].agg.state = | ||
1058 | IWL_EMPTYING_HW_QUEUE_DELBA; | ||
1059 | return 0; | ||
1060 | } | ||
1061 | |||
1062 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | ||
1063 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1064 | |||
1065 | spin_lock_irqsave(&priv->lock, flags); | ||
1066 | /* | ||
1067 | * the only reason this call can fail is queue number out of range, | ||
1068 | * which can happen if uCode is reloaded and all the station | ||
1069 | * information are lost. if it is outside the range, there is no need | ||
1070 | * to deactivate the uCode queue, just return "success" to allow | ||
1071 | * mac80211 to clean up it own data. | ||
1072 | */ | ||
1073 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | ||
1074 | tx_fifo_id); | ||
1075 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1076 | |||
1077 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); | ||
1078 | |||
1079 | return 0; | ||
1080 | } | ||
1081 | |||
1082 | int iwlagn_txq_check_empty(struct iwl_priv *priv, | ||
1083 | int sta_id, u8 tid, int txq_id) | ||
1084 | { | ||
1085 | struct iwl_queue *q = &priv->txq[txq_id].q; | ||
1086 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | ||
1087 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | ||
1088 | |||
1089 | switch (priv->stations[sta_id].tid[tid].agg.state) { | ||
1090 | case IWL_EMPTYING_HW_QUEUE_DELBA: | ||
1091 | /* We are reclaiming the last packet of the */ | ||
1092 | /* aggregated HW queue */ | ||
1093 | if ((txq_id == tid_data->agg.txq_id) && | ||
1094 | (q->read_ptr == q->write_ptr)) { | ||
1095 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1096 | int tx_fifo = get_fifo_from_tid(tid); | ||
1097 | IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); | ||
1098 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | ||
1099 | ssn, tx_fifo); | ||
1100 | tid_data->agg.state = IWL_AGG_OFF; | ||
1101 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1102 | } | ||
1103 | break; | ||
1104 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | ||
1105 | /* We are reclaiming the last packet of the queue */ | ||
1106 | if (tid_data->tfds_in_queue == 0) { | ||
1107 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); | ||
1108 | tid_data->agg.state = IWL_AGG_ON; | ||
1109 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); | ||
1110 | } | ||
1111 | break; | ||
1112 | } | ||
1113 | return 0; | ||
1114 | } | ||
1115 | |||
1116 | static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) | ||
1117 | { | ||
1118 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
1119 | struct ieee80211_sta *sta; | ||
1120 | struct iwl_station_priv *sta_priv; | ||
1121 | |||
1122 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); | ||
1123 | if (sta) { | ||
1124 | sta_priv = (void *)sta->drv_priv; | ||
1125 | /* avoid atomic ops if this isn't a client */ | ||
1126 | if (sta_priv->client && | ||
1127 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1128 | ieee80211_sta_block_awake(priv->hw, sta, false); | ||
1129 | } | ||
1130 | |||
1131 | ieee80211_tx_status_irqsafe(priv->hw, skb); | ||
1132 | } | ||
1133 | |||
1134 | int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1135 | { | ||
1136 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1137 | struct iwl_queue *q = &txq->q; | ||
1138 | struct iwl_tx_info *tx_info; | ||
1139 | int nfreed = 0; | ||
1140 | struct ieee80211_hdr *hdr; | ||
1141 | |||
1142 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1143 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | ||
1144 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1145 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1146 | return 0; | ||
1147 | } | ||
1148 | |||
1149 | for (index = iwl_queue_inc_wrap(index, q->n_bd); | ||
1150 | q->read_ptr != index; | ||
1151 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1152 | |||
1153 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1154 | iwlagn_tx_status(priv, tx_info->skb[0]); | ||
1155 | |||
1156 | hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; | ||
1157 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) | ||
1158 | nfreed++; | ||
1159 | tx_info->skb[0] = NULL; | ||
1160 | |||
1161 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | ||
1162 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | ||
1163 | |||
1164 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | ||
1165 | } | ||
1166 | return nfreed; | ||
1167 | } | ||
1168 | |||
1169 | /** | ||
1170 | * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack | ||
1171 | * | ||
1172 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | ||
1173 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | ||
1174 | */ | ||
1175 | static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv, | ||
1176 | struct iwl_ht_agg *agg, | ||
1177 | struct iwl_compressed_ba_resp *ba_resp) | ||
1178 | |||
1179 | { | ||
1180 | int i, sh, ack; | ||
1181 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | ||
1182 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1183 | u64 bitmap; | ||
1184 | int successes = 0; | ||
1185 | struct ieee80211_tx_info *info; | ||
1186 | |||
1187 | if (unlikely(!agg->wait_for_ba)) { | ||
1188 | IWL_ERR(priv, "Received BA when not expected\n"); | ||
1189 | return -EINVAL; | ||
1190 | } | ||
1191 | |||
1192 | /* Mark that the expected block-ack response arrived */ | ||
1193 | agg->wait_for_ba = 0; | ||
1194 | IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); | ||
1195 | |||
1196 | /* Calculate shift to align block-ack bits with our Tx window bits */ | ||
1197 | sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); | ||
1198 | if (sh < 0) /* tbw something is wrong with indices */ | ||
1199 | sh += 0x100; | ||
1200 | |||
1201 | /* don't use 64-bit values for now */ | ||
1202 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | ||
1203 | |||
1204 | if (agg->frame_count > (64 - sh)) { | ||
1205 | IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); | ||
1206 | return -1; | ||
1207 | } | ||
1208 | |||
1209 | /* check for success or failure according to the | ||
1210 | * transmitted bitmap and block-ack bitmap */ | ||
1211 | bitmap &= agg->bitmap; | ||
1212 | |||
1213 | /* For each frame attempted in aggregation, | ||
1214 | * update driver's record of tx frame's status. */ | ||
1215 | for (i = 0; i < agg->frame_count ; i++) { | ||
1216 | ack = bitmap & (1ULL << i); | ||
1217 | successes += !!ack; | ||
1218 | IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", | ||
1219 | ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, | ||
1220 | agg->start_idx + i); | ||
1221 | } | ||
1222 | |||
1223 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | ||
1224 | memset(&info->status, 0, sizeof(info->status)); | ||
1225 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1226 | info->flags |= IEEE80211_TX_STAT_AMPDU; | ||
1227 | info->status.ampdu_ack_map = successes; | ||
1228 | info->status.ampdu_ack_len = agg->frame_count; | ||
1229 | iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); | ||
1230 | |||
1231 | IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); | ||
1232 | |||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1236 | /** | ||
1237 | * translate ucode response to mac80211 tx status control values | ||
1238 | */ | ||
1239 | void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, | ||
1240 | struct ieee80211_tx_info *info) | ||
1241 | { | ||
1242 | struct ieee80211_tx_rate *r = &info->control.rates[0]; | ||
1243 | |||
1244 | info->antenna_sel_tx = | ||
1245 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); | ||
1246 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
1247 | r->flags |= IEEE80211_TX_RC_MCS; | ||
1248 | if (rate_n_flags & RATE_MCS_GF_MSK) | ||
1249 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | ||
1250 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
1251 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | ||
1252 | if (rate_n_flags & RATE_MCS_DUP_MSK) | ||
1253 | r->flags |= IEEE80211_TX_RC_DUP_DATA; | ||
1254 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
1255 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | ||
1256 | r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band); | ||
1257 | } | ||
1258 | |||
1259 | /** | ||
1260 | * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | ||
1261 | * | ||
1262 | * Handles block-acknowledge notification from device, which reports success | ||
1263 | * of frames sent via aggregation. | ||
1264 | */ | ||
1265 | void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, | ||
1266 | struct iwl_rx_mem_buffer *rxb) | ||
1267 | { | ||
1268 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1269 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | ||
1270 | struct iwl_tx_queue *txq = NULL; | ||
1271 | struct iwl_ht_agg *agg; | ||
1272 | int index; | ||
1273 | int sta_id; | ||
1274 | int tid; | ||
1275 | |||
1276 | /* "flow" corresponds to Tx queue */ | ||
1277 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1278 | |||
1279 | /* "ssn" is start of block-ack Tx window, corresponds to index | ||
1280 | * (in Tx queue's circular buffer) of first TFD/frame in window */ | ||
1281 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | ||
1282 | |||
1283 | if (scd_flow >= priv->hw_params.max_txq_num) { | ||
1284 | IWL_ERR(priv, | ||
1285 | "BUG_ON scd_flow is bigger than number of queues\n"); | ||
1286 | return; | ||
1287 | } | ||
1288 | |||
1289 | txq = &priv->txq[scd_flow]; | ||
1290 | sta_id = ba_resp->sta_id; | ||
1291 | tid = ba_resp->tid; | ||
1292 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
1293 | |||
1294 | /* Find index just before block-ack window */ | ||
1295 | index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | ||
1296 | |||
1297 | /* TODO: Need to get this copy more safely - now good for debug */ | ||
1298 | |||
1299 | IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " | ||
1300 | "sta_id = %d\n", | ||
1301 | agg->wait_for_ba, | ||
1302 | (u8 *) &ba_resp->sta_addr_lo32, | ||
1303 | ba_resp->sta_id); | ||
1304 | IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " | ||
1305 | "%d, scd_ssn = %d\n", | ||
1306 | ba_resp->tid, | ||
1307 | ba_resp->seq_ctl, | ||
1308 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | ||
1309 | ba_resp->scd_flow, | ||
1310 | ba_resp->scd_ssn); | ||
1311 | IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", | ||
1312 | agg->start_idx, | ||
1313 | (unsigned long long)agg->bitmap); | ||
1314 | |||
1315 | /* Update driver's record of ACK vs. not for each frame in window */ | ||
1316 | iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp); | ||
1317 | |||
1318 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | ||
1319 | * block-ack window (we assume that they've been successfully | ||
1320 | * transmitted ... if not, it's too late anyway). */ | ||
1321 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | ||
1322 | /* calculate mac80211 ampdu sw queue to wake */ | ||
1323 | int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index); | ||
1324 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); | ||
1325 | |||
1326 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | ||
1327 | priv->mac80211_registered && | ||
1328 | (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) | ||
1329 | iwl_wake_queue(priv, txq->swq_id); | ||
1330 | |||
1331 | iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); | ||
1332 | } | ||
1333 | } | ||