diff options
-rw-r--r-- | drivers/net/wireless/iwlegacy/4965-mac.c | 1477 | ||||
-rw-r--r-- | drivers/net/wireless/iwlegacy/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/wireless/iwlegacy/iwl-4965-rx.c | 215 | ||||
-rw-r--r-- | drivers/net/wireless/iwlegacy/iwl-4965-tx.c | 1371 |
4 files changed, 1478 insertions, 1588 deletions
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index 142d39f94498..b6f96b4b1f5c 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
@@ -86,6 +86,1483 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); | |||
86 | MODULE_LICENSE("GPL"); | 86 | MODULE_LICENSE("GPL"); |
87 | MODULE_ALIAS("iwl4965"); | 87 | MODULE_ALIAS("iwl4965"); |
88 | 88 | ||
89 | void il4965_rx_missed_beacon_notif(struct il_priv *il, | ||
90 | struct il_rx_buf *rxb) | ||
91 | |||
92 | { | ||
93 | struct il_rx_pkt *pkt = rxb_addr(rxb); | ||
94 | struct il_missed_beacon_notif *missed_beacon; | ||
95 | |||
96 | missed_beacon = &pkt->u.missed_beacon; | ||
97 | if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > | ||
98 | il->missed_beacon_threshold) { | ||
99 | D_CALIB( | ||
100 | "missed bcn cnsq %d totl %d rcd %d expctd %d\n", | ||
101 | le32_to_cpu(missed_beacon->consecutive_missed_beacons), | ||
102 | le32_to_cpu(missed_beacon->total_missed_becons), | ||
103 | le32_to_cpu(missed_beacon->num_recvd_beacons), | ||
104 | le32_to_cpu(missed_beacon->num_expected_beacons)); | ||
105 | if (!test_bit(STATUS_SCANNING, &il->status)) | ||
106 | il4965_init_sensitivity(il); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | /* Calculate noise level, based on measurements during network silence just | ||
111 | * before arriving beacon. This measurement can be done only if we know | ||
112 | * exactly when to expect beacons, therefore only when we're associated. */ | ||
113 | static void il4965_rx_calc_noise(struct il_priv *il) | ||
114 | { | ||
115 | struct stats_rx_non_phy *rx_info; | ||
116 | int num_active_rx = 0; | ||
117 | int total_silence = 0; | ||
118 | int bcn_silence_a, bcn_silence_b, bcn_silence_c; | ||
119 | int last_rx_noise; | ||
120 | |||
121 | rx_info = &(il->_4965.stats.rx.general); | ||
122 | bcn_silence_a = | ||
123 | le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; | ||
124 | bcn_silence_b = | ||
125 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; | ||
126 | bcn_silence_c = | ||
127 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; | ||
128 | |||
129 | if (bcn_silence_a) { | ||
130 | total_silence += bcn_silence_a; | ||
131 | num_active_rx++; | ||
132 | } | ||
133 | if (bcn_silence_b) { | ||
134 | total_silence += bcn_silence_b; | ||
135 | num_active_rx++; | ||
136 | } | ||
137 | if (bcn_silence_c) { | ||
138 | total_silence += bcn_silence_c; | ||
139 | num_active_rx++; | ||
140 | } | ||
141 | |||
142 | /* Average among active antennas */ | ||
143 | if (num_active_rx) | ||
144 | last_rx_noise = (total_silence / num_active_rx) - 107; | ||
145 | else | ||
146 | last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE; | ||
147 | |||
148 | D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", | ||
149 | bcn_silence_a, bcn_silence_b, bcn_silence_c, | ||
150 | last_rx_noise); | ||
151 | } | ||
152 | |||
153 | #ifdef CONFIG_IWLEGACY_DEBUGFS | ||
154 | /* | ||
155 | * based on the assumption of all stats counter are in DWORD | ||
156 | * FIXME: This function is for debugging, do not deal with | ||
157 | * the case of counters roll-over. | ||
158 | */ | ||
159 | static void il4965_accumulative_stats(struct il_priv *il, | ||
160 | __le32 *stats) | ||
161 | { | ||
162 | int i, size; | ||
163 | __le32 *prev_stats; | ||
164 | u32 *accum_stats; | ||
165 | u32 *delta, *max_delta; | ||
166 | struct stats_general_common *general, *accum_general; | ||
167 | struct stats_tx *tx, *accum_tx; | ||
168 | |||
169 | prev_stats = (__le32 *)&il->_4965.stats; | ||
170 | accum_stats = (u32 *)&il->_4965.accum_stats; | ||
171 | size = sizeof(struct il_notif_stats); | ||
172 | general = &il->_4965.stats.general.common; | ||
173 | accum_general = &il->_4965.accum_stats.general.common; | ||
174 | tx = &il->_4965.stats.tx; | ||
175 | accum_tx = &il->_4965.accum_stats.tx; | ||
176 | delta = (u32 *)&il->_4965.delta_stats; | ||
177 | max_delta = (u32 *)&il->_4965.max_delta; | ||
178 | |||
179 | for (i = sizeof(__le32); i < size; | ||
180 | i += sizeof(__le32), stats++, prev_stats++, delta++, | ||
181 | max_delta++, accum_stats++) { | ||
182 | if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { | ||
183 | *delta = (le32_to_cpu(*stats) - | ||
184 | le32_to_cpu(*prev_stats)); | ||
185 | *accum_stats += *delta; | ||
186 | if (*delta > *max_delta) | ||
187 | *max_delta = *delta; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | /* reset accumulative stats for "no-counter" type stats */ | ||
192 | accum_general->temperature = general->temperature; | ||
193 | accum_general->ttl_timestamp = general->ttl_timestamp; | ||
194 | } | ||
195 | #endif | ||
196 | |||
197 | #define REG_RECALIB_PERIOD (60) | ||
198 | |||
199 | void il4965_rx_stats(struct il_priv *il, | ||
200 | struct il_rx_buf *rxb) | ||
201 | { | ||
202 | int change; | ||
203 | struct il_rx_pkt *pkt = rxb_addr(rxb); | ||
204 | |||
205 | D_RX( | ||
206 | "Statistics notification received (%d vs %d).\n", | ||
207 | (int)sizeof(struct il_notif_stats), | ||
208 | le32_to_cpu(pkt->len_n_flags) & | ||
209 | FH_RSCSR_FRAME_SIZE_MSK); | ||
210 | |||
211 | change = ((il->_4965.stats.general.common.temperature != | ||
212 | pkt->u.stats.general.common.temperature) || | ||
213 | ((il->_4965.stats.flag & | ||
214 | STATISTICS_REPLY_FLG_HT40_MODE_MSK) != | ||
215 | (pkt->u.stats.flag & | ||
216 | STATISTICS_REPLY_FLG_HT40_MODE_MSK))); | ||
217 | #ifdef CONFIG_IWLEGACY_DEBUGFS | ||
218 | il4965_accumulative_stats(il, (__le32 *)&pkt->u.stats); | ||
219 | #endif | ||
220 | |||
221 | /* TODO: reading some of stats is unneeded */ | ||
222 | memcpy(&il->_4965.stats, &pkt->u.stats, | ||
223 | sizeof(il->_4965.stats)); | ||
224 | |||
225 | set_bit(STATUS_STATISTICS, &il->status); | ||
226 | |||
227 | /* Reschedule the stats timer to occur in | ||
228 | * REG_RECALIB_PERIOD seconds to ensure we get a | ||
229 | * thermal update even if the uCode doesn't give | ||
230 | * us one */ | ||
231 | mod_timer(&il->stats_periodic, jiffies + | ||
232 | msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); | ||
233 | |||
234 | if (unlikely(!test_bit(STATUS_SCANNING, &il->status)) && | ||
235 | (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { | ||
236 | il4965_rx_calc_noise(il); | ||
237 | queue_work(il->workqueue, &il->run_time_calib_work); | ||
238 | } | ||
239 | if (il->cfg->ops->lib->temp_ops.temperature && change) | ||
240 | il->cfg->ops->lib->temp_ops.temperature(il); | ||
241 | } | ||
242 | |||
243 | void il4965_reply_stats(struct il_priv *il, | ||
244 | struct il_rx_buf *rxb) | ||
245 | { | ||
246 | struct il_rx_pkt *pkt = rxb_addr(rxb); | ||
247 | |||
248 | if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { | ||
249 | #ifdef CONFIG_IWLEGACY_DEBUGFS | ||
250 | memset(&il->_4965.accum_stats, 0, | ||
251 | sizeof(struct il_notif_stats)); | ||
252 | memset(&il->_4965.delta_stats, 0, | ||
253 | sizeof(struct il_notif_stats)); | ||
254 | memset(&il->_4965.max_delta, 0, | ||
255 | sizeof(struct il_notif_stats)); | ||
256 | #endif | ||
257 | D_RX("Statistics have been cleared\n"); | ||
258 | } | ||
259 | il4965_rx_stats(il, rxb); | ||
260 | } | ||
261 | |||
262 | static const u8 tid_to_ac[] = { | ||
263 | IEEE80211_AC_BE, | ||
264 | IEEE80211_AC_BK, | ||
265 | IEEE80211_AC_BK, | ||
266 | IEEE80211_AC_BE, | ||
267 | IEEE80211_AC_VI, | ||
268 | IEEE80211_AC_VI, | ||
269 | IEEE80211_AC_VO, | ||
270 | IEEE80211_AC_VO | ||
271 | }; | ||
272 | |||
273 | static inline int il4965_get_ac_from_tid(u16 tid) | ||
274 | { | ||
275 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
276 | return tid_to_ac[tid]; | ||
277 | |||
278 | /* no support for TIDs 8-15 yet */ | ||
279 | return -EINVAL; | ||
280 | } | ||
281 | |||
282 | static inline int | ||
283 | il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid) | ||
284 | { | ||
285 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
286 | return ctx->ac_to_fifo[tid_to_ac[tid]]; | ||
287 | |||
288 | /* no support for TIDs 8-15 yet */ | ||
289 | return -EINVAL; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * handle build REPLY_TX command notification. | ||
294 | */ | ||
295 | static void il4965_tx_cmd_build_basic(struct il_priv *il, | ||
296 | struct sk_buff *skb, | ||
297 | struct il_tx_cmd *tx_cmd, | ||
298 | struct ieee80211_tx_info *info, | ||
299 | struct ieee80211_hdr *hdr, | ||
300 | u8 std_id) | ||
301 | { | ||
302 | __le16 fc = hdr->frame_control; | ||
303 | __le32 tx_flags = tx_cmd->tx_flags; | ||
304 | |||
305 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
306 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
307 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
308 | if (ieee80211_is_mgmt(fc)) | ||
309 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
310 | if (ieee80211_is_probe_resp(fc) && | ||
311 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
312 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
313 | } else { | ||
314 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
315 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
316 | } | ||
317 | |||
318 | if (ieee80211_is_back_req(fc)) | ||
319 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
320 | |||
321 | tx_cmd->sta_id = std_id; | ||
322 | if (ieee80211_has_morefrags(fc)) | ||
323 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
324 | |||
325 | if (ieee80211_is_data_qos(fc)) { | ||
326 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
327 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
328 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
329 | } else { | ||
330 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
331 | } | ||
332 | |||
333 | il_tx_cmd_protection(il, info, fc, &tx_flags); | ||
334 | |||
335 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
336 | if (ieee80211_is_mgmt(fc)) { | ||
337 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
338 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
339 | else | ||
340 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
341 | } else { | ||
342 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
343 | } | ||
344 | |||
345 | tx_cmd->driver_txop = 0; | ||
346 | tx_cmd->tx_flags = tx_flags; | ||
347 | tx_cmd->next_frame_len = 0; | ||
348 | } | ||
349 | |||
350 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
351 | |||
352 | static void il4965_tx_cmd_build_rate(struct il_priv *il, | ||
353 | struct il_tx_cmd *tx_cmd, | ||
354 | struct ieee80211_tx_info *info, | ||
355 | __le16 fc) | ||
356 | { | ||
357 | u32 rate_flags; | ||
358 | int rate_idx; | ||
359 | u8 rts_retry_limit; | ||
360 | u8 data_retry_limit; | ||
361 | u8 rate_plcp; | ||
362 | |||
363 | /* Set retry limit on DATA packets and Probe Responses*/ | ||
364 | if (ieee80211_is_probe_resp(fc)) | ||
365 | data_retry_limit = 3; | ||
366 | else | ||
367 | data_retry_limit = IL4965_DEFAULT_TX_RETRY; | ||
368 | tx_cmd->data_retry_limit = data_retry_limit; | ||
369 | |||
370 | /* Set retry limit on RTS packets */ | ||
371 | rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; | ||
372 | if (data_retry_limit < rts_retry_limit) | ||
373 | rts_retry_limit = data_retry_limit; | ||
374 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
375 | |||
376 | /* DATA packets will use the uCode station table for rate/antenna | ||
377 | * selection */ | ||
378 | if (ieee80211_is_data(fc)) { | ||
379 | tx_cmd->initial_rate_idx = 0; | ||
380 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
381 | return; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | ||
386 | * not really a TX rate. Thus, we use the lowest supported rate for | ||
387 | * this band. Also use the lowest supported rate if the stored rate | ||
388 | * idx is invalid. | ||
389 | */ | ||
390 | rate_idx = info->control.rates[0].idx; | ||
391 | if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || | ||
392 | rate_idx < 0 || rate_idx > RATE_COUNT_LEGACY) | ||
393 | rate_idx = rate_lowest_index(&il->bands[info->band], | ||
394 | info->control.sta); | ||
395 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | ||
396 | if (info->band == IEEE80211_BAND_5GHZ) | ||
397 | rate_idx += IL_FIRST_OFDM_RATE; | ||
398 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | ||
399 | rate_plcp = il_rates[rate_idx].plcp; | ||
400 | /* Zero out flags for this packet */ | ||
401 | rate_flags = 0; | ||
402 | |||
403 | /* Set CCK flag as needed */ | ||
404 | if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE) | ||
405 | rate_flags |= RATE_MCS_CCK_MSK; | ||
406 | |||
407 | /* Set up antennas */ | ||
408 | il->mgmt_tx_ant = il4965_toggle_tx_ant(il, il->mgmt_tx_ant, | ||
409 | il->hw_params.valid_tx_ant); | ||
410 | |||
411 | rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant); | ||
412 | |||
413 | /* Set the rate in the TX cmd */ | ||
414 | tx_cmd->rate_n_flags = il4965_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
415 | } | ||
416 | |||
417 | static void il4965_tx_cmd_build_hwcrypto(struct il_priv *il, | ||
418 | struct ieee80211_tx_info *info, | ||
419 | struct il_tx_cmd *tx_cmd, | ||
420 | struct sk_buff *skb_frag, | ||
421 | int sta_id) | ||
422 | { | ||
423 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
424 | |||
425 | switch (keyconf->cipher) { | ||
426 | case WLAN_CIPHER_SUITE_CCMP: | ||
427 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
428 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
429 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
430 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
431 | D_TX("tx_cmd with AES hwcrypto\n"); | ||
432 | break; | ||
433 | |||
434 | case WLAN_CIPHER_SUITE_TKIP: | ||
435 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
436 | ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); | ||
437 | D_TX("tx_cmd with tkip hwcrypto\n"); | ||
438 | break; | ||
439 | |||
440 | case WLAN_CIPHER_SUITE_WEP104: | ||
441 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
442 | /* fall through */ | ||
443 | case WLAN_CIPHER_SUITE_WEP40: | ||
444 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
445 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
446 | |||
447 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
448 | |||
449 | D_TX("Configuring packet for WEP encryption " | ||
450 | "with key %d\n", keyconf->keyidx); | ||
451 | break; | ||
452 | |||
453 | default: | ||
454 | IL_ERR("Unknown encode cipher %x\n", keyconf->cipher); | ||
455 | break; | ||
456 | } | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * start REPLY_TX command process | ||
461 | */ | ||
462 | int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) | ||
463 | { | ||
464 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
465 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
466 | struct ieee80211_sta *sta = info->control.sta; | ||
467 | struct il_station_priv *sta_priv = NULL; | ||
468 | struct il_tx_queue *txq; | ||
469 | struct il_queue *q; | ||
470 | struct il_device_cmd *out_cmd; | ||
471 | struct il_cmd_meta *out_meta; | ||
472 | struct il_tx_cmd *tx_cmd; | ||
473 | struct il_rxon_context *ctx = &il->ctx; | ||
474 | int txq_id; | ||
475 | dma_addr_t phys_addr; | ||
476 | dma_addr_t txcmd_phys; | ||
477 | dma_addr_t scratch_phys; | ||
478 | u16 len, firstlen, secondlen; | ||
479 | u16 seq_number = 0; | ||
480 | __le16 fc; | ||
481 | u8 hdr_len; | ||
482 | u8 sta_id; | ||
483 | u8 wait_write_ptr = 0; | ||
484 | u8 tid = 0; | ||
485 | u8 *qc = NULL; | ||
486 | unsigned long flags; | ||
487 | bool is_agg = false; | ||
488 | |||
489 | if (info->control.vif) | ||
490 | ctx = il_rxon_ctx_from_vif(info->control.vif); | ||
491 | |||
492 | spin_lock_irqsave(&il->lock, flags); | ||
493 | if (il_is_rfkill(il)) { | ||
494 | D_DROP("Dropping - RF KILL\n"); | ||
495 | goto drop_unlock; | ||
496 | } | ||
497 | |||
498 | fc = hdr->frame_control; | ||
499 | |||
500 | #ifdef CONFIG_IWLEGACY_DEBUG | ||
501 | if (ieee80211_is_auth(fc)) | ||
502 | D_TX("Sending AUTH frame\n"); | ||
503 | else if (ieee80211_is_assoc_req(fc)) | ||
504 | D_TX("Sending ASSOC frame\n"); | ||
505 | else if (ieee80211_is_reassoc_req(fc)) | ||
506 | D_TX("Sending REASSOC frame\n"); | ||
507 | #endif | ||
508 | |||
509 | hdr_len = ieee80211_hdrlen(fc); | ||
510 | |||
511 | /* For management frames use broadcast id to do not break aggregation */ | ||
512 | if (!ieee80211_is_data(fc)) | ||
513 | sta_id = ctx->bcast_sta_id; | ||
514 | else { | ||
515 | /* Find idx into station table for destination station */ | ||
516 | sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta); | ||
517 | |||
518 | if (sta_id == IL_INVALID_STATION) { | ||
519 | D_DROP("Dropping - INVALID STATION: %pM\n", | ||
520 | hdr->addr1); | ||
521 | goto drop_unlock; | ||
522 | } | ||
523 | } | ||
524 | |||
525 | D_TX("station Id %d\n", sta_id); | ||
526 | |||
527 | if (sta) | ||
528 | sta_priv = (void *)sta->drv_priv; | ||
529 | |||
530 | if (sta_priv && sta_priv->asleep && | ||
531 | (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) { | ||
532 | /* | ||
533 | * This sends an asynchronous command to the device, | ||
534 | * but we can rely on it being processed before the | ||
535 | * next frame is processed -- and the next frame to | ||
536 | * this station is the one that will consume this | ||
537 | * counter. | ||
538 | * For now set the counter to just 1 since we do not | ||
539 | * support uAPSD yet. | ||
540 | */ | ||
541 | il4965_sta_modify_sleep_tx_count(il, sta_id, 1); | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Send this frame after DTIM -- there's a special queue | ||
546 | * reserved for this for contexts that support AP mode. | ||
547 | */ | ||
548 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { | ||
549 | txq_id = ctx->mcast_queue; | ||
550 | /* | ||
551 | * The microcode will clear the more data | ||
552 | * bit in the last frame it transmits. | ||
553 | */ | ||
554 | hdr->frame_control |= | ||
555 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | ||
556 | } else | ||
557 | txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; | ||
558 | |||
559 | /* irqs already disabled/saved above when locking il->lock */ | ||
560 | spin_lock(&il->sta_lock); | ||
561 | |||
562 | if (ieee80211_is_data_qos(fc)) { | ||
563 | qc = ieee80211_get_qos_ctl(hdr); | ||
564 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
565 | if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { | ||
566 | spin_unlock(&il->sta_lock); | ||
567 | goto drop_unlock; | ||
568 | } | ||
569 | seq_number = il->stations[sta_id].tid[tid].seq_number; | ||
570 | seq_number &= IEEE80211_SCTL_SEQ; | ||
571 | hdr->seq_ctrl = hdr->seq_ctrl & | ||
572 | cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
573 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | ||
574 | seq_number += 0x10; | ||
575 | /* aggregation is on for this <sta,tid> */ | ||
576 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | ||
577 | il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) { | ||
578 | txq_id = il->stations[sta_id].tid[tid].agg.txq_id; | ||
579 | is_agg = true; | ||
580 | } | ||
581 | } | ||
582 | |||
583 | txq = &il->txq[txq_id]; | ||
584 | q = &txq->q; | ||
585 | |||
586 | if (unlikely(il_queue_space(q) < q->high_mark)) { | ||
587 | spin_unlock(&il->sta_lock); | ||
588 | goto drop_unlock; | ||
589 | } | ||
590 | |||
591 | if (ieee80211_is_data_qos(fc)) { | ||
592 | il->stations[sta_id].tid[tid].tfds_in_queue++; | ||
593 | if (!ieee80211_has_morefrags(fc)) | ||
594 | il->stations[sta_id].tid[tid].seq_number = seq_number; | ||
595 | } | ||
596 | |||
597 | spin_unlock(&il->sta_lock); | ||
598 | |||
599 | /* Set up driver data for this TFD */ | ||
600 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info)); | ||
601 | txq->txb[q->write_ptr].skb = skb; | ||
602 | txq->txb[q->write_ptr].ctx = ctx; | ||
603 | |||
604 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
605 | out_cmd = txq->cmd[q->write_ptr]; | ||
606 | out_meta = &txq->meta[q->write_ptr]; | ||
607 | tx_cmd = &out_cmd->cmd.tx; | ||
608 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
609 | memset(tx_cmd, 0, sizeof(struct il_tx_cmd)); | ||
610 | |||
611 | /* | ||
612 | * Set up the Tx-command (not MAC!) header. | ||
613 | * Store the chosen Tx queue and TFD idx within the sequence field; | ||
614 | * after Tx, uCode's Tx response will return this value so driver can | ||
615 | * locate the frame within the tx queue and do post-tx processing. | ||
616 | */ | ||
617 | out_cmd->hdr.cmd = REPLY_TX; | ||
618 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
619 | IDX_TO_SEQ(q->write_ptr))); | ||
620 | |||
621 | /* Copy MAC header from skb into command buffer */ | ||
622 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
623 | |||
624 | |||
625 | /* Total # bytes to be transmitted */ | ||
626 | len = (u16)skb->len; | ||
627 | tx_cmd->len = cpu_to_le16(len); | ||
628 | |||
629 | if (info->control.hw_key) | ||
630 | il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id); | ||
631 | |||
632 | /* TODO need this for burst mode later on */ | ||
633 | il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); | ||
634 | il_dbg_log_tx_data_frame(il, len, hdr); | ||
635 | |||
636 | il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); | ||
637 | |||
638 | il_update_stats(il, true, fc, len); | ||
639 | /* | ||
640 | * Use the first empty entry in this queue's command buffer array | ||
641 | * to contain the Tx command and MAC header concatenated together | ||
642 | * (payload data will be in another buffer). | ||
643 | * Size of this varies, due to varying MAC header length. | ||
644 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
645 | * of the MAC header (device reads on dword boundaries). | ||
646 | * We'll tell device about this padding later. | ||
647 | */ | ||
648 | len = sizeof(struct il_tx_cmd) + | ||
649 | sizeof(struct il_cmd_header) + hdr_len; | ||
650 | firstlen = (len + 3) & ~3; | ||
651 | |||
652 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
653 | if (firstlen != len) | ||
654 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
655 | |||
656 | /* Physical address of this Tx command's header (not MAC header!), | ||
657 | * within command buffer array. */ | ||
658 | txcmd_phys = pci_map_single(il->pci_dev, | ||
659 | &out_cmd->hdr, firstlen, | ||
660 | PCI_DMA_BIDIRECTIONAL); | ||
661 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
662 | dma_unmap_len_set(out_meta, len, firstlen); | ||
663 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
664 | * first entry */ | ||
665 | il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, | ||
666 | txcmd_phys, firstlen, 1, 0); | ||
667 | |||
668 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
669 | txq->need_update = 1; | ||
670 | } else { | ||
671 | wait_write_ptr = 1; | ||
672 | txq->need_update = 0; | ||
673 | } | ||
674 | |||
675 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
676 | * if any (802.11 null frames have no payload). */ | ||
677 | secondlen = skb->len - hdr_len; | ||
678 | if (secondlen > 0) { | ||
679 | phys_addr = pci_map_single(il->pci_dev, skb->data + hdr_len, | ||
680 | secondlen, PCI_DMA_TODEVICE); | ||
681 | il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, | ||
682 | phys_addr, secondlen, | ||
683 | 0, 0); | ||
684 | } | ||
685 | |||
686 | scratch_phys = txcmd_phys + sizeof(struct il_cmd_header) + | ||
687 | offsetof(struct il_tx_cmd, scratch); | ||
688 | |||
689 | /* take back ownership of DMA buffer to enable update */ | ||
690 | pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, | ||
691 | firstlen, PCI_DMA_BIDIRECTIONAL); | ||
692 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
693 | tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); | ||
694 | |||
695 | D_TX("sequence nr = 0X%x\n", | ||
696 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
697 | D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
698 | il_print_hex_dump(il, IL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
699 | il_print_hex_dump(il, IL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
700 | |||
701 | /* Set up entry for this TFD in Tx byte-count array */ | ||
702 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
703 | il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, | ||
704 | le16_to_cpu(tx_cmd->len)); | ||
705 | |||
706 | pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, | ||
707 | firstlen, PCI_DMA_BIDIRECTIONAL); | ||
708 | |||
709 | /* Tell device the write idx *just past* this latest filled TFD */ | ||
710 | q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
711 | il_txq_update_write_ptr(il, txq); | ||
712 | spin_unlock_irqrestore(&il->lock, flags); | ||
713 | |||
714 | /* | ||
715 | * At this point the frame is "transmitted" successfully | ||
716 | * and we will get a TX status notification eventually, | ||
717 | * regardless of the value of ret. "ret" only indicates | ||
718 | * whether or not we should update the write pointer. | ||
719 | */ | ||
720 | |||
721 | /* | ||
722 | * Avoid atomic ops if it isn't an associated client. | ||
723 | * Also, if this is a packet for aggregation, don't | ||
724 | * increase the counter because the ucode will stop | ||
725 | * aggregation queues when their respective station | ||
726 | * goes to sleep. | ||
727 | */ | ||
728 | if (sta_priv && sta_priv->client && !is_agg) | ||
729 | atomic_inc(&sta_priv->pending_frames); | ||
730 | |||
731 | if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { | ||
732 | if (wait_write_ptr) { | ||
733 | spin_lock_irqsave(&il->lock, flags); | ||
734 | txq->need_update = 1; | ||
735 | il_txq_update_write_ptr(il, txq); | ||
736 | spin_unlock_irqrestore(&il->lock, flags); | ||
737 | } else { | ||
738 | il_stop_queue(il, txq); | ||
739 | } | ||
740 | } | ||
741 | |||
742 | return 0; | ||
743 | |||
744 | drop_unlock: | ||
745 | spin_unlock_irqrestore(&il->lock, flags); | ||
746 | return -1; | ||
747 | } | ||
748 | |||
749 | static inline int il4965_alloc_dma_ptr(struct il_priv *il, | ||
750 | struct il_dma_ptr *ptr, size_t size) | ||
751 | { | ||
752 | ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, | ||
753 | GFP_KERNEL); | ||
754 | if (!ptr->addr) | ||
755 | return -ENOMEM; | ||
756 | ptr->size = size; | ||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static inline void il4965_free_dma_ptr(struct il_priv *il, | ||
761 | struct il_dma_ptr *ptr) | ||
762 | { | ||
763 | if (unlikely(!ptr->addr)) | ||
764 | return; | ||
765 | |||
766 | dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | ||
767 | memset(ptr, 0, sizeof(*ptr)); | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * il4965_hw_txq_ctx_free - Free TXQ Context | ||
772 | * | ||
773 | * Destroy all TX DMA queues and structures | ||
774 | */ | ||
775 | void il4965_hw_txq_ctx_free(struct il_priv *il) | ||
776 | { | ||
777 | int txq_id; | ||
778 | |||
779 | /* Tx queues */ | ||
780 | if (il->txq) { | ||
781 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | ||
782 | if (txq_id == il->cmd_queue) | ||
783 | il_cmd_queue_free(il); | ||
784 | else | ||
785 | il_tx_queue_free(il, txq_id); | ||
786 | } | ||
787 | il4965_free_dma_ptr(il, &il->kw); | ||
788 | |||
789 | il4965_free_dma_ptr(il, &il->scd_bc_tbls); | ||
790 | |||
791 | /* free tx queue structure */ | ||
792 | il_txq_mem(il); | ||
793 | } | ||
794 | |||
795 | /** | ||
796 | * il4965_txq_ctx_alloc - allocate TX queue context | ||
797 | * Allocate all Tx DMA structures and initialize them | ||
798 | * | ||
799 | * @param il | ||
800 | * @return error code | ||
801 | */ | ||
802 | int il4965_txq_ctx_alloc(struct il_priv *il) | ||
803 | { | ||
804 | int ret; | ||
805 | int txq_id, slots_num; | ||
806 | unsigned long flags; | ||
807 | |||
808 | /* Free all tx/cmd queues and keep-warm buffer */ | ||
809 | il4965_hw_txq_ctx_free(il); | ||
810 | |||
811 | ret = il4965_alloc_dma_ptr(il, &il->scd_bc_tbls, | ||
812 | il->hw_params.scd_bc_tbls_size); | ||
813 | if (ret) { | ||
814 | IL_ERR("Scheduler BC Table allocation failed\n"); | ||
815 | goto error_bc_tbls; | ||
816 | } | ||
817 | /* Alloc keep-warm buffer */ | ||
818 | ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE); | ||
819 | if (ret) { | ||
820 | IL_ERR("Keep Warm allocation failed\n"); | ||
821 | goto error_kw; | ||
822 | } | ||
823 | |||
824 | /* allocate tx queue structure */ | ||
825 | ret = il_alloc_txq_mem(il); | ||
826 | if (ret) | ||
827 | goto error; | ||
828 | |||
829 | spin_lock_irqsave(&il->lock, flags); | ||
830 | |||
831 | /* Turn off all Tx DMA fifos */ | ||
832 | il4965_txq_set_sched(il, 0); | ||
833 | |||
834 | /* Tell NIC where to find the "keep warm" buffer */ | ||
835 | il_wr(il, FH_KW_MEM_ADDR_REG, il->kw.dma >> 4); | ||
836 | |||
837 | spin_unlock_irqrestore(&il->lock, flags); | ||
838 | |||
839 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
840 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { | ||
841 | slots_num = (txq_id == il->cmd_queue) ? | ||
842 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
843 | ret = il_tx_queue_init(il, | ||
844 | &il->txq[txq_id], slots_num, | ||
845 | txq_id); | ||
846 | if (ret) { | ||
847 | IL_ERR("Tx %d queue init failed\n", txq_id); | ||
848 | goto error; | ||
849 | } | ||
850 | } | ||
851 | |||
852 | return ret; | ||
853 | |||
854 | error: | ||
855 | il4965_hw_txq_ctx_free(il); | ||
856 | il4965_free_dma_ptr(il, &il->kw); | ||
857 | error_kw: | ||
858 | il4965_free_dma_ptr(il, &il->scd_bc_tbls); | ||
859 | error_bc_tbls: | ||
860 | return ret; | ||
861 | } | ||
862 | |||
863 | void il4965_txq_ctx_reset(struct il_priv *il) | ||
864 | { | ||
865 | int txq_id, slots_num; | ||
866 | unsigned long flags; | ||
867 | |||
868 | spin_lock_irqsave(&il->lock, flags); | ||
869 | |||
870 | /* Turn off all Tx DMA fifos */ | ||
871 | il4965_txq_set_sched(il, 0); | ||
872 | |||
873 | /* Tell NIC where to find the "keep warm" buffer */ | ||
874 | il_wr(il, FH_KW_MEM_ADDR_REG, il->kw.dma >> 4); | ||
875 | |||
876 | spin_unlock_irqrestore(&il->lock, flags); | ||
877 | |||
878 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
879 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { | ||
880 | slots_num = txq_id == il->cmd_queue ? | ||
881 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
882 | il_tx_queue_reset(il, &il->txq[txq_id], | ||
883 | slots_num, txq_id); | ||
884 | } | ||
885 | } | ||
886 | |||
887 | /** | ||
888 | * il4965_txq_ctx_stop - Stop all Tx DMA channels | ||
889 | */ | ||
890 | void il4965_txq_ctx_stop(struct il_priv *il) | ||
891 | { | ||
892 | int ch, txq_id; | ||
893 | unsigned long flags; | ||
894 | |||
895 | /* Turn off all Tx DMA fifos */ | ||
896 | spin_lock_irqsave(&il->lock, flags); | ||
897 | |||
898 | il4965_txq_set_sched(il, 0); | ||
899 | |||
900 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
901 | for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) { | ||
902 | il_wr(il, | ||
903 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
904 | if (il_poll_bit(il, FH_TSSR_TX_STATUS_REG, | ||
905 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | ||
906 | 1000)) | ||
907 | IL_ERR("Failing on timeout while stopping" | ||
908 | " DMA channel %d [0x%08x]", ch, | ||
909 | il_rd(il, | ||
910 | FH_TSSR_TX_STATUS_REG)); | ||
911 | } | ||
912 | spin_unlock_irqrestore(&il->lock, flags); | ||
913 | |||
914 | if (!il->txq) | ||
915 | return; | ||
916 | |||
917 | /* Unmap DMA from host system and free skb's */ | ||
918 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | ||
919 | if (txq_id == il->cmd_queue) | ||
920 | il_cmd_queue_unmap(il); | ||
921 | else | ||
922 | il_tx_queue_unmap(il, txq_id); | ||
923 | } | ||
924 | |||
925 | /* | ||
926 | * Find first available (lowest unused) Tx Queue, mark it "active". | ||
927 | * Called only when finding queue for aggregation. | ||
928 | * Should never return anything < 7, because they should already | ||
929 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | ||
930 | */ | ||
931 | static int il4965_txq_ctx_activate_free(struct il_priv *il) | ||
932 | { | ||
933 | int txq_id; | ||
934 | |||
935 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | ||
936 | if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk)) | ||
937 | return txq_id; | ||
938 | return -1; | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration | ||
943 | */ | ||
944 | static void il4965_tx_queue_stop_scheduler(struct il_priv *il, | ||
945 | u16 txq_id) | ||
946 | { | ||
947 | /* Simply stop the queue, but don't change any configuration; | ||
948 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | ||
949 | il_wr_prph(il, | ||
950 | IL49_SCD_QUEUE_STATUS_BITS(txq_id), | ||
951 | (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)| | ||
952 | (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | ||
953 | } | ||
954 | |||
955 | /** | ||
956 | * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue | ||
957 | */ | ||
958 | static int il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, | ||
959 | u16 txq_id) | ||
960 | { | ||
961 | u32 tbl_dw_addr; | ||
962 | u32 tbl_dw; | ||
963 | u16 scd_q2ratid; | ||
964 | |||
965 | scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; | ||
966 | |||
967 | tbl_dw_addr = il->scd_base_addr + | ||
968 | IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); | ||
969 | |||
970 | tbl_dw = il_read_targ_mem(il, tbl_dw_addr); | ||
971 | |||
972 | if (txq_id & 0x1) | ||
973 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | ||
974 | else | ||
975 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | ||
976 | |||
977 | il_write_targ_mem(il, tbl_dw_addr, tbl_dw); | ||
978 | |||
979 | return 0; | ||
980 | } | ||
981 | |||
982 | /** | ||
983 | * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue | ||
984 | * | ||
985 | * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE, | ||
986 | * i.e. it must be one of the higher queues used for aggregation | ||
987 | */ | ||
988 | static int il4965_txq_agg_enable(struct il_priv *il, int txq_id, | ||
989 | int tx_fifo, int sta_id, int tid, u16 ssn_idx) | ||
990 | { | ||
991 | unsigned long flags; | ||
992 | u16 ra_tid; | ||
993 | int ret; | ||
994 | |||
995 | if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || | ||
996 | (IL49_FIRST_AMPDU_QUEUE + | ||
997 | il->cfg->base_params->num_of_ampdu_queues <= txq_id)) { | ||
998 | IL_WARN( | ||
999 | "queue number out of range: %d, must be %d to %d\n", | ||
1000 | txq_id, IL49_FIRST_AMPDU_QUEUE, | ||
1001 | IL49_FIRST_AMPDU_QUEUE + | ||
1002 | il->cfg->base_params->num_of_ampdu_queues - 1); | ||
1003 | return -EINVAL; | ||
1004 | } | ||
1005 | |||
1006 | ra_tid = BUILD_RAxTID(sta_id, tid); | ||
1007 | |||
1008 | /* Modify device's station table to Tx this TID */ | ||
1009 | ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid); | ||
1010 | if (ret) | ||
1011 | return ret; | ||
1012 | |||
1013 | spin_lock_irqsave(&il->lock, flags); | ||
1014 | |||
1015 | /* Stop this Tx queue before configuring it */ | ||
1016 | il4965_tx_queue_stop_scheduler(il, txq_id); | ||
1017 | |||
1018 | /* Map receiver-address / traffic-ID to this queue */ | ||
1019 | il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id); | ||
1020 | |||
1021 | /* Set this queue as a chain-building queue */ | ||
1022 | il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); | ||
1023 | |||
1024 | /* Place first TFD at idx corresponding to start sequence number. | ||
1025 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | ||
1026 | il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
1027 | il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
1028 | il4965_set_wr_ptrs(il, txq_id, ssn_idx); | ||
1029 | |||
1030 | /* Set up Tx win size and frame limit for this queue */ | ||
1031 | il_write_targ_mem(il, | ||
1032 | il->scd_base_addr + IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), | ||
1033 | (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & | ||
1034 | IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); | ||
1035 | |||
1036 | il_write_targ_mem(il, il->scd_base_addr + | ||
1037 | IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), | ||
1038 | (SCD_FRAME_LIMIT << IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) | ||
1039 | & IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); | ||
1040 | |||
1041 | il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
1042 | |||
1043 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | ||
1044 | il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1); | ||
1045 | |||
1046 | spin_unlock_irqrestore(&il->lock, flags); | ||
1047 | |||
1048 | return 0; | ||
1049 | } | ||
1050 | |||
1051 | |||
1052 | int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, | ||
1053 | struct ieee80211_sta *sta, u16 tid, u16 *ssn) | ||
1054 | { | ||
1055 | int sta_id; | ||
1056 | int tx_fifo; | ||
1057 | int txq_id; | ||
1058 | int ret; | ||
1059 | unsigned long flags; | ||
1060 | struct il_tid_data *tid_data; | ||
1061 | |||
1062 | tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid); | ||
1063 | if (unlikely(tx_fifo < 0)) | ||
1064 | return tx_fifo; | ||
1065 | |||
1066 | IL_WARN("%s on ra = %pM tid = %d\n", | ||
1067 | __func__, sta->addr, tid); | ||
1068 | |||
1069 | sta_id = il_sta_id(sta); | ||
1070 | if (sta_id == IL_INVALID_STATION) { | ||
1071 | IL_ERR("Start AGG on invalid station\n"); | ||
1072 | return -ENXIO; | ||
1073 | } | ||
1074 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
1075 | return -EINVAL; | ||
1076 | |||
1077 | if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) { | ||
1078 | IL_ERR("Start AGG when state is not IL_AGG_OFF !\n"); | ||
1079 | return -ENXIO; | ||
1080 | } | ||
1081 | |||
1082 | txq_id = il4965_txq_ctx_activate_free(il); | ||
1083 | if (txq_id == -1) { | ||
1084 | IL_ERR("No free aggregation queue available\n"); | ||
1085 | return -ENXIO; | ||
1086 | } | ||
1087 | |||
1088 | spin_lock_irqsave(&il->sta_lock, flags); | ||
1089 | tid_data = &il->stations[sta_id].tid[tid]; | ||
1090 | *ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1091 | tid_data->agg.txq_id = txq_id; | ||
1092 | il_set_swq_id(&il->txq[txq_id], | ||
1093 | il4965_get_ac_from_tid(tid), txq_id); | ||
1094 | spin_unlock_irqrestore(&il->sta_lock, flags); | ||
1095 | |||
1096 | ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, | ||
1097 | sta_id, tid, *ssn); | ||
1098 | if (ret) | ||
1099 | return ret; | ||
1100 | |||
1101 | spin_lock_irqsave(&il->sta_lock, flags); | ||
1102 | tid_data = &il->stations[sta_id].tid[tid]; | ||
1103 | if (tid_data->tfds_in_queue == 0) { | ||
1104 | D_HT("HW queue is empty\n"); | ||
1105 | tid_data->agg.state = IL_AGG_ON; | ||
1106 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); | ||
1107 | } else { | ||
1108 | D_HT( | ||
1109 | "HW queue is NOT empty: %d packets in HW queue\n", | ||
1110 | tid_data->tfds_in_queue); | ||
1111 | tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA; | ||
1112 | } | ||
1113 | spin_unlock_irqrestore(&il->sta_lock, flags); | ||
1114 | return ret; | ||
1115 | } | ||
1116 | |||
1117 | /** | ||
1118 | * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE | ||
1119 | * il->lock must be held by the caller | ||
1120 | */ | ||
1121 | static int il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, | ||
1122 | u16 ssn_idx, u8 tx_fifo) | ||
1123 | { | ||
1124 | if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || | ||
1125 | (IL49_FIRST_AMPDU_QUEUE + | ||
1126 | il->cfg->base_params->num_of_ampdu_queues <= txq_id)) { | ||
1127 | IL_WARN( | ||
1128 | "queue number out of range: %d, must be %d to %d\n", | ||
1129 | txq_id, IL49_FIRST_AMPDU_QUEUE, | ||
1130 | IL49_FIRST_AMPDU_QUEUE + | ||
1131 | il->cfg->base_params->num_of_ampdu_queues - 1); | ||
1132 | return -EINVAL; | ||
1133 | } | ||
1134 | |||
1135 | il4965_tx_queue_stop_scheduler(il, txq_id); | ||
1136 | |||
1137 | il_clear_bits_prph(il, | ||
1138 | IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); | ||
1139 | |||
1140 | il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
1141 | il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
1142 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | ||
1143 | il4965_set_wr_ptrs(il, txq_id, ssn_idx); | ||
1144 | |||
1145 | il_clear_bits_prph(il, | ||
1146 | IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
1147 | il_txq_ctx_deactivate(il, txq_id); | ||
1148 | il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0); | ||
1149 | |||
1150 | return 0; | ||
1151 | } | ||
1152 | |||
1153 | int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, | ||
1154 | struct ieee80211_sta *sta, u16 tid) | ||
1155 | { | ||
1156 | int tx_fifo_id, txq_id, sta_id, ssn; | ||
1157 | struct il_tid_data *tid_data; | ||
1158 | int write_ptr, read_ptr; | ||
1159 | unsigned long flags; | ||
1160 | |||
1161 | tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid); | ||
1162 | if (unlikely(tx_fifo_id < 0)) | ||
1163 | return tx_fifo_id; | ||
1164 | |||
1165 | sta_id = il_sta_id(sta); | ||
1166 | |||
1167 | if (sta_id == IL_INVALID_STATION) { | ||
1168 | IL_ERR("Invalid station for AGG tid %d\n", tid); | ||
1169 | return -ENXIO; | ||
1170 | } | ||
1171 | |||
1172 | spin_lock_irqsave(&il->sta_lock, flags); | ||
1173 | |||
1174 | tid_data = &il->stations[sta_id].tid[tid]; | ||
1175 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | ||
1176 | txq_id = tid_data->agg.txq_id; | ||
1177 | |||
1178 | switch (il->stations[sta_id].tid[tid].agg.state) { | ||
1179 | case IL_EMPTYING_HW_QUEUE_ADDBA: | ||
1180 | /* | ||
1181 | * This can happen if the peer stops aggregation | ||
1182 | * again before we've had a chance to drain the | ||
1183 | * queue we selected previously, i.e. before the | ||
1184 | * session was really started completely. | ||
1185 | */ | ||
1186 | D_HT("AGG stop before setup done\n"); | ||
1187 | goto turn_off; | ||
1188 | case IL_AGG_ON: | ||
1189 | break; | ||
1190 | default: | ||
1191 | IL_WARN("Stopping AGG while state not ON or starting\n"); | ||
1192 | } | ||
1193 | |||
1194 | write_ptr = il->txq[txq_id].q.write_ptr; | ||
1195 | read_ptr = il->txq[txq_id].q.read_ptr; | ||
1196 | |||
1197 | /* The queue is not empty */ | ||
1198 | if (write_ptr != read_ptr) { | ||
1199 | D_HT("Stopping a non empty AGG HW QUEUE\n"); | ||
1200 | il->stations[sta_id].tid[tid].agg.state = | ||
1201 | IL_EMPTYING_HW_QUEUE_DELBA; | ||
1202 | spin_unlock_irqrestore(&il->sta_lock, flags); | ||
1203 | return 0; | ||
1204 | } | ||
1205 | |||
1206 | D_HT("HW queue is empty\n"); | ||
1207 | turn_off: | ||
1208 | il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF; | ||
1209 | |||
1210 | /* do not restore/save irqs */ | ||
1211 | spin_unlock(&il->sta_lock); | ||
1212 | spin_lock(&il->lock); | ||
1213 | |||
1214 | /* | ||
1215 | * the only reason this call can fail is queue number out of range, | ||
1216 | * which can happen if uCode is reloaded and all the station | ||
1217 | * information are lost. if it is outside the range, there is no need | ||
1218 | * to deactivate the uCode queue, just return "success" to allow | ||
1219 | * mac80211 to clean up it own data. | ||
1220 | */ | ||
1221 | il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id); | ||
1222 | spin_unlock_irqrestore(&il->lock, flags); | ||
1223 | |||
1224 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | ||
1225 | |||
1226 | return 0; | ||
1227 | } | ||
1228 | |||
1229 | int il4965_txq_check_empty(struct il_priv *il, | ||
1230 | int sta_id, u8 tid, int txq_id) | ||
1231 | { | ||
1232 | struct il_queue *q = &il->txq[txq_id].q; | ||
1233 | u8 *addr = il->stations[sta_id].sta.sta.addr; | ||
1234 | struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid]; | ||
1235 | struct il_rxon_context *ctx; | ||
1236 | |||
1237 | ctx = &il->ctx; | ||
1238 | |||
1239 | lockdep_assert_held(&il->sta_lock); | ||
1240 | |||
1241 | switch (il->stations[sta_id].tid[tid].agg.state) { | ||
1242 | case IL_EMPTYING_HW_QUEUE_DELBA: | ||
1243 | /* We are reclaiming the last packet of the */ | ||
1244 | /* aggregated HW queue */ | ||
1245 | if (txq_id == tid_data->agg.txq_id && | ||
1246 | q->read_ptr == q->write_ptr) { | ||
1247 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1248 | int tx_fifo = il4965_get_fifo_from_tid(ctx, tid); | ||
1249 | D_HT( | ||
1250 | "HW queue empty: continue DELBA flow\n"); | ||
1251 | il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo); | ||
1252 | tid_data->agg.state = IL_AGG_OFF; | ||
1253 | ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); | ||
1254 | } | ||
1255 | break; | ||
1256 | case IL_EMPTYING_HW_QUEUE_ADDBA: | ||
1257 | /* We are reclaiming the last packet of the queue */ | ||
1258 | if (tid_data->tfds_in_queue == 0) { | ||
1259 | D_HT( | ||
1260 | "HW queue empty: continue ADDBA flow\n"); | ||
1261 | tid_data->agg.state = IL_AGG_ON; | ||
1262 | ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid); | ||
1263 | } | ||
1264 | break; | ||
1265 | } | ||
1266 | |||
1267 | return 0; | ||
1268 | } | ||
1269 | |||
1270 | static void il4965_non_agg_tx_status(struct il_priv *il, | ||
1271 | struct il_rxon_context *ctx, | ||
1272 | const u8 *addr1) | ||
1273 | { | ||
1274 | struct ieee80211_sta *sta; | ||
1275 | struct il_station_priv *sta_priv; | ||
1276 | |||
1277 | rcu_read_lock(); | ||
1278 | sta = ieee80211_find_sta(ctx->vif, addr1); | ||
1279 | if (sta) { | ||
1280 | sta_priv = (void *)sta->drv_priv; | ||
1281 | /* avoid atomic ops if this isn't a client */ | ||
1282 | if (sta_priv->client && | ||
1283 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1284 | ieee80211_sta_block_awake(il->hw, sta, false); | ||
1285 | } | ||
1286 | rcu_read_unlock(); | ||
1287 | } | ||
1288 | |||
1289 | static void | ||
1290 | il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, | ||
1291 | bool is_agg) | ||
1292 | { | ||
1293 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data; | ||
1294 | |||
1295 | if (!is_agg) | ||
1296 | il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1); | ||
1297 | |||
1298 | ieee80211_tx_status_irqsafe(il->hw, tx_info->skb); | ||
1299 | } | ||
1300 | |||
1301 | int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx) | ||
1302 | { | ||
1303 | struct il_tx_queue *txq = &il->txq[txq_id]; | ||
1304 | struct il_queue *q = &txq->q; | ||
1305 | struct il_tx_info *tx_info; | ||
1306 | int nfreed = 0; | ||
1307 | struct ieee80211_hdr *hdr; | ||
1308 | |||
1309 | if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { | ||
1310 | IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " | ||
1311 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1312 | idx, q->n_bd, q->write_ptr, q->read_ptr); | ||
1313 | return 0; | ||
1314 | } | ||
1315 | |||
1316 | for (idx = il_queue_inc_wrap(idx, q->n_bd); | ||
1317 | q->read_ptr != idx; | ||
1318 | q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1319 | |||
1320 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1321 | |||
1322 | if (WARN_ON_ONCE(tx_info->skb == NULL)) | ||
1323 | continue; | ||
1324 | |||
1325 | hdr = (struct ieee80211_hdr *)tx_info->skb->data; | ||
1326 | if (ieee80211_is_data_qos(hdr->frame_control)) | ||
1327 | nfreed++; | ||
1328 | |||
1329 | il4965_tx_status(il, tx_info, | ||
1330 | txq_id >= IL4965_FIRST_AMPDU_QUEUE); | ||
1331 | tx_info->skb = NULL; | ||
1332 | |||
1333 | il->cfg->ops->lib->txq_free_tfd(il, txq); | ||
1334 | } | ||
1335 | return nfreed; | ||
1336 | } | ||
1337 | |||
1338 | /** | ||
1339 | * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack | ||
1340 | * | ||
1341 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | ||
1342 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | ||
1343 | */ | ||
1344 | static int il4965_tx_status_reply_compressed_ba(struct il_priv *il, | ||
1345 | struct il_ht_agg *agg, | ||
1346 | struct il_compressed_ba_resp *ba_resp) | ||
1347 | |||
1348 | { | ||
1349 | int i, sh, ack; | ||
1350 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | ||
1351 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1352 | int successes = 0; | ||
1353 | struct ieee80211_tx_info *info; | ||
1354 | u64 bitmap, sent_bitmap; | ||
1355 | |||
1356 | if (unlikely(!agg->wait_for_ba)) { | ||
1357 | if (unlikely(ba_resp->bitmap)) | ||
1358 | IL_ERR("Received BA when not expected\n"); | ||
1359 | return -EINVAL; | ||
1360 | } | ||
1361 | |||
1362 | /* Mark that the expected block-ack response arrived */ | ||
1363 | agg->wait_for_ba = 0; | ||
1364 | D_TX_REPLY("BA %d %d\n", agg->start_idx, | ||
1365 | ba_resp->seq_ctl); | ||
1366 | |||
1367 | /* Calculate shift to align block-ack bits with our Tx win bits */ | ||
1368 | sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4); | ||
1369 | if (sh < 0) /* tbw something is wrong with indices */ | ||
1370 | sh += 0x100; | ||
1371 | |||
1372 | if (agg->frame_count > (64 - sh)) { | ||
1373 | D_TX_REPLY("more frames than bitmap size"); | ||
1374 | return -1; | ||
1375 | } | ||
1376 | |||
1377 | /* don't use 64-bit values for now */ | ||
1378 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | ||
1379 | |||
1380 | /* check for success or failure according to the | ||
1381 | * transmitted bitmap and block-ack bitmap */ | ||
1382 | sent_bitmap = bitmap & agg->bitmap; | ||
1383 | |||
1384 | /* For each frame attempted in aggregation, | ||
1385 | * update driver's record of tx frame's status. */ | ||
1386 | i = 0; | ||
1387 | while (sent_bitmap) { | ||
1388 | ack = sent_bitmap & 1ULL; | ||
1389 | successes += ack; | ||
1390 | D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", | ||
1391 | ack ? "ACK" : "NACK", i, | ||
1392 | (agg->start_idx + i) & 0xff, | ||
1393 | agg->start_idx + i); | ||
1394 | sent_bitmap >>= 1; | ||
1395 | ++i; | ||
1396 | } | ||
1397 | |||
1398 | D_TX_REPLY("Bitmap %llx\n", | ||
1399 | (unsigned long long)bitmap); | ||
1400 | |||
1401 | info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb); | ||
1402 | memset(&info->status, 0, sizeof(info->status)); | ||
1403 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1404 | info->flags |= IEEE80211_TX_STAT_AMPDU; | ||
1405 | info->status.ampdu_ack_len = successes; | ||
1406 | info->status.ampdu_len = agg->frame_count; | ||
1407 | il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info); | ||
1408 | |||
1409 | return 0; | ||
1410 | } | ||
1411 | |||
1412 | /** | ||
1413 | * translate ucode response to mac80211 tx status control values | ||
1414 | */ | ||
1415 | void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, | ||
1416 | struct ieee80211_tx_info *info) | ||
1417 | { | ||
1418 | struct ieee80211_tx_rate *r = &info->control.rates[0]; | ||
1419 | |||
1420 | info->antenna_sel_tx = | ||
1421 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); | ||
1422 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
1423 | r->flags |= IEEE80211_TX_RC_MCS; | ||
1424 | if (rate_n_flags & RATE_MCS_GF_MSK) | ||
1425 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | ||
1426 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
1427 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | ||
1428 | if (rate_n_flags & RATE_MCS_DUP_MSK) | ||
1429 | r->flags |= IEEE80211_TX_RC_DUP_DATA; | ||
1430 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
1431 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | ||
1432 | r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); | ||
1433 | } | ||
1434 | |||
1435 | /** | ||
1436 | * il4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | ||
1437 | * | ||
1438 | * Handles block-acknowledge notification from device, which reports success | ||
1439 | * of frames sent via aggregation. | ||
1440 | */ | ||
1441 | void il4965_rx_reply_compressed_ba(struct il_priv *il, | ||
1442 | struct il_rx_buf *rxb) | ||
1443 | { | ||
1444 | struct il_rx_pkt *pkt = rxb_addr(rxb); | ||
1445 | struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | ||
1446 | struct il_tx_queue *txq = NULL; | ||
1447 | struct il_ht_agg *agg; | ||
1448 | int idx; | ||
1449 | int sta_id; | ||
1450 | int tid; | ||
1451 | unsigned long flags; | ||
1452 | |||
1453 | /* "flow" corresponds to Tx queue */ | ||
1454 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1455 | |||
1456 | /* "ssn" is start of block-ack Tx win, corresponds to idx | ||
1457 | * (in Tx queue's circular buffer) of first TFD/frame in win */ | ||
1458 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | ||
1459 | |||
1460 | if (scd_flow >= il->hw_params.max_txq_num) { | ||
1461 | IL_ERR( | ||
1462 | "BUG_ON scd_flow is bigger than number of queues\n"); | ||
1463 | return; | ||
1464 | } | ||
1465 | |||
1466 | txq = &il->txq[scd_flow]; | ||
1467 | sta_id = ba_resp->sta_id; | ||
1468 | tid = ba_resp->tid; | ||
1469 | agg = &il->stations[sta_id].tid[tid].agg; | ||
1470 | if (unlikely(agg->txq_id != scd_flow)) { | ||
1471 | /* | ||
1472 | * FIXME: this is a uCode bug which need to be addressed, | ||
1473 | * log the information and return for now! | ||
1474 | * since it is possible happen very often and in order | ||
1475 | * not to fill the syslog, don't enable the logging by default | ||
1476 | */ | ||
1477 | D_TX_REPLY( | ||
1478 | "BA scd_flow %d does not match txq_id %d\n", | ||
1479 | scd_flow, agg->txq_id); | ||
1480 | return; | ||
1481 | } | ||
1482 | |||
1483 | /* Find idx just before block-ack win */ | ||
1484 | idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | ||
1485 | |||
1486 | spin_lock_irqsave(&il->sta_lock, flags); | ||
1487 | |||
1488 | D_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, " | ||
1489 | "sta_id = %d\n", | ||
1490 | agg->wait_for_ba, | ||
1491 | (u8 *) &ba_resp->sta_addr_lo32, | ||
1492 | ba_resp->sta_id); | ||
1493 | D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," | ||
1494 | "scd_flow = " | ||
1495 | "%d, scd_ssn = %d\n", | ||
1496 | ba_resp->tid, | ||
1497 | ba_resp->seq_ctl, | ||
1498 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | ||
1499 | ba_resp->scd_flow, | ||
1500 | ba_resp->scd_ssn); | ||
1501 | D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", | ||
1502 | agg->start_idx, | ||
1503 | (unsigned long long)agg->bitmap); | ||
1504 | |||
1505 | /* Update driver's record of ACK vs. not for each frame in win */ | ||
1506 | il4965_tx_status_reply_compressed_ba(il, agg, ba_resp); | ||
1507 | |||
1508 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | ||
1509 | * block-ack win (we assume that they've been successfully | ||
1510 | * transmitted ... if not, it's too late anyway). */ | ||
1511 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | ||
1512 | /* calculate mac80211 ampdu sw queue to wake */ | ||
1513 | int freed = il4965_tx_queue_reclaim(il, scd_flow, idx); | ||
1514 | il4965_free_tfds_in_queue(il, sta_id, tid, freed); | ||
1515 | |||
1516 | if (il_queue_space(&txq->q) > txq->q.low_mark && | ||
1517 | il->mac80211_registered && | ||
1518 | agg->state != IL_EMPTYING_HW_QUEUE_DELBA) | ||
1519 | il_wake_queue(il, txq); | ||
1520 | |||
1521 | il4965_txq_check_empty(il, sta_id, tid, scd_flow); | ||
1522 | } | ||
1523 | |||
1524 | spin_unlock_irqrestore(&il->sta_lock, flags); | ||
1525 | } | ||
1526 | |||
1527 | #ifdef CONFIG_IWLEGACY_DEBUG | ||
1528 | const char *il4965_get_tx_fail_reason(u32 status) | ||
1529 | { | ||
1530 | #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x | ||
1531 | #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x | ||
1532 | |||
1533 | switch (status & TX_STATUS_MSK) { | ||
1534 | case TX_STATUS_SUCCESS: | ||
1535 | return "SUCCESS"; | ||
1536 | TX_STATUS_POSTPONE(DELAY); | ||
1537 | TX_STATUS_POSTPONE(FEW_BYTES); | ||
1538 | TX_STATUS_POSTPONE(QUIET_PERIOD); | ||
1539 | TX_STATUS_POSTPONE(CALC_TTAK); | ||
1540 | TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); | ||
1541 | TX_STATUS_FAIL(SHORT_LIMIT); | ||
1542 | TX_STATUS_FAIL(LONG_LIMIT); | ||
1543 | TX_STATUS_FAIL(FIFO_UNDERRUN); | ||
1544 | TX_STATUS_FAIL(DRAIN_FLOW); | ||
1545 | TX_STATUS_FAIL(RFKILL_FLUSH); | ||
1546 | TX_STATUS_FAIL(LIFE_EXPIRE); | ||
1547 | TX_STATUS_FAIL(DEST_PS); | ||
1548 | TX_STATUS_FAIL(HOST_ABORTED); | ||
1549 | TX_STATUS_FAIL(BT_RETRY); | ||
1550 | TX_STATUS_FAIL(STA_INVALID); | ||
1551 | TX_STATUS_FAIL(FRAG_DROPPED); | ||
1552 | TX_STATUS_FAIL(TID_DISABLE); | ||
1553 | TX_STATUS_FAIL(FIFO_FLUSHED); | ||
1554 | TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); | ||
1555 | TX_STATUS_FAIL(PASSIVE_NO_RX); | ||
1556 | TX_STATUS_FAIL(NO_BEACON_ON_RADAR); | ||
1557 | } | ||
1558 | |||
1559 | return "UNKNOWN"; | ||
1560 | |||
1561 | #undef TX_STATUS_FAIL | ||
1562 | #undef TX_STATUS_POSTPONE | ||
1563 | } | ||
1564 | #endif /* CONFIG_IWLEGACY_DEBUG */ | ||
1565 | |||
89 | static struct il_link_quality_cmd * | 1566 | static struct il_link_quality_cmd * |
90 | il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id) | 1567 | il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id) |
91 | { | 1568 | { |
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile index 0643fda0656c..884470479223 100644 --- a/drivers/net/wireless/iwlegacy/Makefile +++ b/drivers/net/wireless/iwlegacy/Makefile | |||
@@ -9,8 +9,7 @@ iwl-legacy-objs += $(iwl-legacy-m) | |||
9 | # 4965 | 9 | # 4965 |
10 | obj-$(CONFIG_IWL4965) += iwl4965.o | 10 | obj-$(CONFIG_IWL4965) += iwl4965.o |
11 | iwl4965-objs := 4965.o 4965-mac.o iwl-4965-rs.o | 11 | iwl4965-objs := 4965.o 4965-mac.o iwl-4965-rs.o |
12 | iwl4965-objs += iwl-4965-tx.o | 12 | iwl4965-objs += iwl-4965-lib.o iwl-4965-calib.o |
13 | iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o | ||
14 | iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += iwl-4965-debugfs.o | 13 | iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += iwl-4965-debugfs.o |
15 | 14 | ||
16 | # 3945 | 15 | # 3945 |
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c deleted file mode 100644 index b32295747851..000000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c +++ /dev/null | |||
@@ -1,215 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * GPL LICENSE SUMMARY | ||
4 | * | ||
5 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
19 | * USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called LICENSE.GPL. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/sched.h> | ||
34 | |||
35 | #include "iwl-dev.h" | ||
36 | #include "iwl-core.h" | ||
37 | #include "iwl-4965-calib.h" | ||
38 | #include "iwl-sta.h" | ||
39 | #include "iwl-io.h" | ||
40 | #include "iwl-helpers.h" | ||
41 | #include "iwl-4965-hw.h" | ||
42 | #include "iwl-4965.h" | ||
43 | |||
44 | void il4965_rx_missed_beacon_notif(struct il_priv *il, | ||
45 | struct il_rx_buf *rxb) | ||
46 | |||
47 | { | ||
48 | struct il_rx_pkt *pkt = rxb_addr(rxb); | ||
49 | struct il_missed_beacon_notif *missed_beacon; | ||
50 | |||
51 | missed_beacon = &pkt->u.missed_beacon; | ||
52 | if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > | ||
53 | il->missed_beacon_threshold) { | ||
54 | D_CALIB( | ||
55 | "missed bcn cnsq %d totl %d rcd %d expctd %d\n", | ||
56 | le32_to_cpu(missed_beacon->consecutive_missed_beacons), | ||
57 | le32_to_cpu(missed_beacon->total_missed_becons), | ||
58 | le32_to_cpu(missed_beacon->num_recvd_beacons), | ||
59 | le32_to_cpu(missed_beacon->num_expected_beacons)); | ||
60 | if (!test_bit(STATUS_SCANNING, &il->status)) | ||
61 | il4965_init_sensitivity(il); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | /* Calculate noise level, based on measurements during network silence just | ||
66 | * before arriving beacon. This measurement can be done only if we know | ||
67 | * exactly when to expect beacons, therefore only when we're associated. */ | ||
68 | static void il4965_rx_calc_noise(struct il_priv *il) | ||
69 | { | ||
70 | struct stats_rx_non_phy *rx_info; | ||
71 | int num_active_rx = 0; | ||
72 | int total_silence = 0; | ||
73 | int bcn_silence_a, bcn_silence_b, bcn_silence_c; | ||
74 | int last_rx_noise; | ||
75 | |||
76 | rx_info = &(il->_4965.stats.rx.general); | ||
77 | bcn_silence_a = | ||
78 | le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; | ||
79 | bcn_silence_b = | ||
80 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; | ||
81 | bcn_silence_c = | ||
82 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; | ||
83 | |||
84 | if (bcn_silence_a) { | ||
85 | total_silence += bcn_silence_a; | ||
86 | num_active_rx++; | ||
87 | } | ||
88 | if (bcn_silence_b) { | ||
89 | total_silence += bcn_silence_b; | ||
90 | num_active_rx++; | ||
91 | } | ||
92 | if (bcn_silence_c) { | ||
93 | total_silence += bcn_silence_c; | ||
94 | num_active_rx++; | ||
95 | } | ||
96 | |||
97 | /* Average among active antennas */ | ||
98 | if (num_active_rx) | ||
99 | last_rx_noise = (total_silence / num_active_rx) - 107; | ||
100 | else | ||
101 | last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE; | ||
102 | |||
103 | D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", | ||
104 | bcn_silence_a, bcn_silence_b, bcn_silence_c, | ||
105 | last_rx_noise); | ||
106 | } | ||
107 | |||
108 | #ifdef CONFIG_IWLEGACY_DEBUGFS | ||
109 | /* | ||
110 | * based on the assumption of all stats counter are in DWORD | ||
111 | * FIXME: This function is for debugging, do not deal with | ||
112 | * the case of counters roll-over. | ||
113 | */ | ||
114 | static void il4965_accumulative_stats(struct il_priv *il, | ||
115 | __le32 *stats) | ||
116 | { | ||
117 | int i, size; | ||
118 | __le32 *prev_stats; | ||
119 | u32 *accum_stats; | ||
120 | u32 *delta, *max_delta; | ||
121 | struct stats_general_common *general, *accum_general; | ||
122 | struct stats_tx *tx, *accum_tx; | ||
123 | |||
124 | prev_stats = (__le32 *)&il->_4965.stats; | ||
125 | accum_stats = (u32 *)&il->_4965.accum_stats; | ||
126 | size = sizeof(struct il_notif_stats); | ||
127 | general = &il->_4965.stats.general.common; | ||
128 | accum_general = &il->_4965.accum_stats.general.common; | ||
129 | tx = &il->_4965.stats.tx; | ||
130 | accum_tx = &il->_4965.accum_stats.tx; | ||
131 | delta = (u32 *)&il->_4965.delta_stats; | ||
132 | max_delta = (u32 *)&il->_4965.max_delta; | ||
133 | |||
134 | for (i = sizeof(__le32); i < size; | ||
135 | i += sizeof(__le32), stats++, prev_stats++, delta++, | ||
136 | max_delta++, accum_stats++) { | ||
137 | if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { | ||
138 | *delta = (le32_to_cpu(*stats) - | ||
139 | le32_to_cpu(*prev_stats)); | ||
140 | *accum_stats += *delta; | ||
141 | if (*delta > *max_delta) | ||
142 | *max_delta = *delta; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | /* reset accumulative stats for "no-counter" type stats */ | ||
147 | accum_general->temperature = general->temperature; | ||
148 | accum_general->ttl_timestamp = general->ttl_timestamp; | ||
149 | } | ||
150 | #endif | ||
151 | |||
152 | #define REG_RECALIB_PERIOD (60) | ||
153 | |||
154 | void il4965_rx_stats(struct il_priv *il, | ||
155 | struct il_rx_buf *rxb) | ||
156 | { | ||
157 | int change; | ||
158 | struct il_rx_pkt *pkt = rxb_addr(rxb); | ||
159 | |||
160 | D_RX( | ||
161 | "Statistics notification received (%d vs %d).\n", | ||
162 | (int)sizeof(struct il_notif_stats), | ||
163 | le32_to_cpu(pkt->len_n_flags) & | ||
164 | FH_RSCSR_FRAME_SIZE_MSK); | ||
165 | |||
166 | change = ((il->_4965.stats.general.common.temperature != | ||
167 | pkt->u.stats.general.common.temperature) || | ||
168 | ((il->_4965.stats.flag & | ||
169 | STATISTICS_REPLY_FLG_HT40_MODE_MSK) != | ||
170 | (pkt->u.stats.flag & | ||
171 | STATISTICS_REPLY_FLG_HT40_MODE_MSK))); | ||
172 | #ifdef CONFIG_IWLEGACY_DEBUGFS | ||
173 | il4965_accumulative_stats(il, (__le32 *)&pkt->u.stats); | ||
174 | #endif | ||
175 | |||
176 | /* TODO: reading some of stats is unneeded */ | ||
177 | memcpy(&il->_4965.stats, &pkt->u.stats, | ||
178 | sizeof(il->_4965.stats)); | ||
179 | |||
180 | set_bit(STATUS_STATISTICS, &il->status); | ||
181 | |||
182 | /* Reschedule the stats timer to occur in | ||
183 | * REG_RECALIB_PERIOD seconds to ensure we get a | ||
184 | * thermal update even if the uCode doesn't give | ||
185 | * us one */ | ||
186 | mod_timer(&il->stats_periodic, jiffies + | ||
187 | msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); | ||
188 | |||
189 | if (unlikely(!test_bit(STATUS_SCANNING, &il->status)) && | ||
190 | (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { | ||
191 | il4965_rx_calc_noise(il); | ||
192 | queue_work(il->workqueue, &il->run_time_calib_work); | ||
193 | } | ||
194 | if (il->cfg->ops->lib->temp_ops.temperature && change) | ||
195 | il->cfg->ops->lib->temp_ops.temperature(il); | ||
196 | } | ||
197 | |||
198 | void il4965_reply_stats(struct il_priv *il, | ||
199 | struct il_rx_buf *rxb) | ||
200 | { | ||
201 | struct il_rx_pkt *pkt = rxb_addr(rxb); | ||
202 | |||
203 | if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { | ||
204 | #ifdef CONFIG_IWLEGACY_DEBUGFS | ||
205 | memset(&il->_4965.accum_stats, 0, | ||
206 | sizeof(struct il_notif_stats)); | ||
207 | memset(&il->_4965.delta_stats, 0, | ||
208 | sizeof(struct il_notif_stats)); | ||
209 | memset(&il->_4965.max_delta, 0, | ||
210 | sizeof(struct il_notif_stats)); | ||
211 | #endif | ||
212 | D_RX("Statistics have been cleared\n"); | ||
213 | } | ||
214 | il4965_rx_stats(il, rxb); | ||
215 | } | ||
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c deleted file mode 100644 index a6fa1c2296ac..000000000000 --- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c +++ /dev/null | |||
@@ -1,1371 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * GPL LICENSE SUMMARY | ||
4 | * | ||
5 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of version 2 of the GNU General Public License as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, | ||
19 | * USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution | ||
22 | * in the file called LICENSE.GPL. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/sched.h> | ||
34 | |||
35 | #include "iwl-dev.h" | ||
36 | #include "iwl-core.h" | ||
37 | #include "iwl-sta.h" | ||
38 | #include "iwl-io.h" | ||
39 | #include "iwl-helpers.h" | ||
40 | #include "iwl-4965-hw.h" | ||
41 | #include "iwl-4965.h" | ||
42 | |||
43 | /* | ||
44 | * mac80211 queues, ACs, hardware queues, FIFOs. | ||
45 | * | ||
46 | * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues | ||
47 | * | ||
48 | * Mac80211 uses the following numbers, which we get as from it | ||
49 | * by way of skb_get_queue_mapping(skb): | ||
50 | * | ||
51 | * VO 0 | ||
52 | * VI 1 | ||
53 | * BE 2 | ||
54 | * BK 3 | ||
55 | * | ||
56 | * | ||
57 | * Regular (not A-MPDU) frames are put into hardware queues corresponding | ||
58 | * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their | ||
59 | * own queue per aggregation session (RA/TID combination), such queues are | ||
60 | * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In | ||
61 | * order to map frames to the right queue, we also need an AC->hw queue | ||
62 | * mapping. This is implemented here. | ||
63 | * | ||
64 | * Due to the way hw queues are set up (by the hw specific modules like | ||
65 | * iwl-4965.c), the AC->hw queue mapping is the identity | ||
66 | * mapping. | ||
67 | */ | ||
68 | |||
69 | static const u8 tid_to_ac[] = { | ||
70 | IEEE80211_AC_BE, | ||
71 | IEEE80211_AC_BK, | ||
72 | IEEE80211_AC_BK, | ||
73 | IEEE80211_AC_BE, | ||
74 | IEEE80211_AC_VI, | ||
75 | IEEE80211_AC_VI, | ||
76 | IEEE80211_AC_VO, | ||
77 | IEEE80211_AC_VO | ||
78 | }; | ||
79 | |||
80 | static inline int il4965_get_ac_from_tid(u16 tid) | ||
81 | { | ||
82 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
83 | return tid_to_ac[tid]; | ||
84 | |||
85 | /* no support for TIDs 8-15 yet */ | ||
86 | return -EINVAL; | ||
87 | } | ||
88 | |||
89 | static inline int | ||
90 | il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid) | ||
91 | { | ||
92 | if (likely(tid < ARRAY_SIZE(tid_to_ac))) | ||
93 | return ctx->ac_to_fifo[tid_to_ac[tid]]; | ||
94 | |||
95 | /* no support for TIDs 8-15 yet */ | ||
96 | return -EINVAL; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * handle build REPLY_TX command notification. | ||
101 | */ | ||
102 | static void il4965_tx_cmd_build_basic(struct il_priv *il, | ||
103 | struct sk_buff *skb, | ||
104 | struct il_tx_cmd *tx_cmd, | ||
105 | struct ieee80211_tx_info *info, | ||
106 | struct ieee80211_hdr *hdr, | ||
107 | u8 std_id) | ||
108 | { | ||
109 | __le16 fc = hdr->frame_control; | ||
110 | __le32 tx_flags = tx_cmd->tx_flags; | ||
111 | |||
112 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
113 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
114 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
115 | if (ieee80211_is_mgmt(fc)) | ||
116 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
117 | if (ieee80211_is_probe_resp(fc) && | ||
118 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
119 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
120 | } else { | ||
121 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
122 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
123 | } | ||
124 | |||
125 | if (ieee80211_is_back_req(fc)) | ||
126 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
127 | |||
128 | tx_cmd->sta_id = std_id; | ||
129 | if (ieee80211_has_morefrags(fc)) | ||
130 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
131 | |||
132 | if (ieee80211_is_data_qos(fc)) { | ||
133 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
134 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
135 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
136 | } else { | ||
137 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
138 | } | ||
139 | |||
140 | il_tx_cmd_protection(il, info, fc, &tx_flags); | ||
141 | |||
142 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
143 | if (ieee80211_is_mgmt(fc)) { | ||
144 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
145 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
146 | else | ||
147 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
148 | } else { | ||
149 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
150 | } | ||
151 | |||
152 | tx_cmd->driver_txop = 0; | ||
153 | tx_cmd->tx_flags = tx_flags; | ||
154 | tx_cmd->next_frame_len = 0; | ||
155 | } | ||
156 | |||
157 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
158 | |||
159 | static void il4965_tx_cmd_build_rate(struct il_priv *il, | ||
160 | struct il_tx_cmd *tx_cmd, | ||
161 | struct ieee80211_tx_info *info, | ||
162 | __le16 fc) | ||
163 | { | ||
164 | u32 rate_flags; | ||
165 | int rate_idx; | ||
166 | u8 rts_retry_limit; | ||
167 | u8 data_retry_limit; | ||
168 | u8 rate_plcp; | ||
169 | |||
170 | /* Set retry limit on DATA packets and Probe Responses*/ | ||
171 | if (ieee80211_is_probe_resp(fc)) | ||
172 | data_retry_limit = 3; | ||
173 | else | ||
174 | data_retry_limit = IL4965_DEFAULT_TX_RETRY; | ||
175 | tx_cmd->data_retry_limit = data_retry_limit; | ||
176 | |||
177 | /* Set retry limit on RTS packets */ | ||
178 | rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; | ||
179 | if (data_retry_limit < rts_retry_limit) | ||
180 | rts_retry_limit = data_retry_limit; | ||
181 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
182 | |||
183 | /* DATA packets will use the uCode station table for rate/antenna | ||
184 | * selection */ | ||
185 | if (ieee80211_is_data(fc)) { | ||
186 | tx_cmd->initial_rate_idx = 0; | ||
187 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
188 | return; | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * If the current TX rate stored in mac80211 has the MCS bit set, it's | ||
193 | * not really a TX rate. Thus, we use the lowest supported rate for | ||
194 | * this band. Also use the lowest supported rate if the stored rate | ||
195 | * idx is invalid. | ||
196 | */ | ||
197 | rate_idx = info->control.rates[0].idx; | ||
198 | if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || | ||
199 | rate_idx < 0 || rate_idx > RATE_COUNT_LEGACY) | ||
200 | rate_idx = rate_lowest_index(&il->bands[info->band], | ||
201 | info->control.sta); | ||
202 | /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ | ||
203 | if (info->band == IEEE80211_BAND_5GHZ) | ||
204 | rate_idx += IL_FIRST_OFDM_RATE; | ||
205 | /* Get PLCP rate for tx_cmd->rate_n_flags */ | ||
206 | rate_plcp = il_rates[rate_idx].plcp; | ||
207 | /* Zero out flags for this packet */ | ||
208 | rate_flags = 0; | ||
209 | |||
210 | /* Set CCK flag as needed */ | ||
211 | if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE) | ||
212 | rate_flags |= RATE_MCS_CCK_MSK; | ||
213 | |||
214 | /* Set up antennas */ | ||
215 | il->mgmt_tx_ant = il4965_toggle_tx_ant(il, il->mgmt_tx_ant, | ||
216 | il->hw_params.valid_tx_ant); | ||
217 | |||
218 | rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant); | ||
219 | |||
220 | /* Set the rate in the TX cmd */ | ||
221 | tx_cmd->rate_n_flags = il4965_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
222 | } | ||
223 | |||
224 | static void il4965_tx_cmd_build_hwcrypto(struct il_priv *il, | ||
225 | struct ieee80211_tx_info *info, | ||
226 | struct il_tx_cmd *tx_cmd, | ||
227 | struct sk_buff *skb_frag, | ||
228 | int sta_id) | ||
229 | { | ||
230 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
231 | |||
232 | switch (keyconf->cipher) { | ||
233 | case WLAN_CIPHER_SUITE_CCMP: | ||
234 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
235 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
236 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
237 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
238 | D_TX("tx_cmd with AES hwcrypto\n"); | ||
239 | break; | ||
240 | |||
241 | case WLAN_CIPHER_SUITE_TKIP: | ||
242 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
243 | ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); | ||
244 | D_TX("tx_cmd with tkip hwcrypto\n"); | ||
245 | break; | ||
246 | |||
247 | case WLAN_CIPHER_SUITE_WEP104: | ||
248 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
249 | /* fall through */ | ||
250 | case WLAN_CIPHER_SUITE_WEP40: | ||
251 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
252 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
253 | |||
254 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
255 | |||
256 | D_TX("Configuring packet for WEP encryption " | ||
257 | "with key %d\n", keyconf->keyidx); | ||
258 | break; | ||
259 | |||
260 | default: | ||
261 | IL_ERR("Unknown encode cipher %x\n", keyconf->cipher); | ||
262 | break; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * start REPLY_TX command process | ||
268 | */ | ||
269 | int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb) | ||
270 | { | ||
271 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
272 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
273 | struct ieee80211_sta *sta = info->control.sta; | ||
274 | struct il_station_priv *sta_priv = NULL; | ||
275 | struct il_tx_queue *txq; | ||
276 | struct il_queue *q; | ||
277 | struct il_device_cmd *out_cmd; | ||
278 | struct il_cmd_meta *out_meta; | ||
279 | struct il_tx_cmd *tx_cmd; | ||
280 | struct il_rxon_context *ctx = &il->ctx; | ||
281 | int txq_id; | ||
282 | dma_addr_t phys_addr; | ||
283 | dma_addr_t txcmd_phys; | ||
284 | dma_addr_t scratch_phys; | ||
285 | u16 len, firstlen, secondlen; | ||
286 | u16 seq_number = 0; | ||
287 | __le16 fc; | ||
288 | u8 hdr_len; | ||
289 | u8 sta_id; | ||
290 | u8 wait_write_ptr = 0; | ||
291 | u8 tid = 0; | ||
292 | u8 *qc = NULL; | ||
293 | unsigned long flags; | ||
294 | bool is_agg = false; | ||
295 | |||
296 | if (info->control.vif) | ||
297 | ctx = il_rxon_ctx_from_vif(info->control.vif); | ||
298 | |||
299 | spin_lock_irqsave(&il->lock, flags); | ||
300 | if (il_is_rfkill(il)) { | ||
301 | D_DROP("Dropping - RF KILL\n"); | ||
302 | goto drop_unlock; | ||
303 | } | ||
304 | |||
305 | fc = hdr->frame_control; | ||
306 | |||
307 | #ifdef CONFIG_IWLEGACY_DEBUG | ||
308 | if (ieee80211_is_auth(fc)) | ||
309 | D_TX("Sending AUTH frame\n"); | ||
310 | else if (ieee80211_is_assoc_req(fc)) | ||
311 | D_TX("Sending ASSOC frame\n"); | ||
312 | else if (ieee80211_is_reassoc_req(fc)) | ||
313 | D_TX("Sending REASSOC frame\n"); | ||
314 | #endif | ||
315 | |||
316 | hdr_len = ieee80211_hdrlen(fc); | ||
317 | |||
318 | /* For management frames use broadcast id to do not break aggregation */ | ||
319 | if (!ieee80211_is_data(fc)) | ||
320 | sta_id = ctx->bcast_sta_id; | ||
321 | else { | ||
322 | /* Find idx into station table for destination station */ | ||
323 | sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta); | ||
324 | |||
325 | if (sta_id == IL_INVALID_STATION) { | ||
326 | D_DROP("Dropping - INVALID STATION: %pM\n", | ||
327 | hdr->addr1); | ||
328 | goto drop_unlock; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | D_TX("station Id %d\n", sta_id); | ||
333 | |||
334 | if (sta) | ||
335 | sta_priv = (void *)sta->drv_priv; | ||
336 | |||
337 | if (sta_priv && sta_priv->asleep && | ||
338 | (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) { | ||
339 | /* | ||
340 | * This sends an asynchronous command to the device, | ||
341 | * but we can rely on it being processed before the | ||
342 | * next frame is processed -- and the next frame to | ||
343 | * this station is the one that will consume this | ||
344 | * counter. | ||
345 | * For now set the counter to just 1 since we do not | ||
346 | * support uAPSD yet. | ||
347 | */ | ||
348 | il4965_sta_modify_sleep_tx_count(il, sta_id, 1); | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * Send this frame after DTIM -- there's a special queue | ||
353 | * reserved for this for contexts that support AP mode. | ||
354 | */ | ||
355 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { | ||
356 | txq_id = ctx->mcast_queue; | ||
357 | /* | ||
358 | * The microcode will clear the more data | ||
359 | * bit in the last frame it transmits. | ||
360 | */ | ||
361 | hdr->frame_control |= | ||
362 | cpu_to_le16(IEEE80211_FCTL_MOREDATA); | ||
363 | } else | ||
364 | txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; | ||
365 | |||
366 | /* irqs already disabled/saved above when locking il->lock */ | ||
367 | spin_lock(&il->sta_lock); | ||
368 | |||
369 | if (ieee80211_is_data_qos(fc)) { | ||
370 | qc = ieee80211_get_qos_ctl(hdr); | ||
371 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
372 | if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { | ||
373 | spin_unlock(&il->sta_lock); | ||
374 | goto drop_unlock; | ||
375 | } | ||
376 | seq_number = il->stations[sta_id].tid[tid].seq_number; | ||
377 | seq_number &= IEEE80211_SCTL_SEQ; | ||
378 | hdr->seq_ctrl = hdr->seq_ctrl & | ||
379 | cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
380 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | ||
381 | seq_number += 0x10; | ||
382 | /* aggregation is on for this <sta,tid> */ | ||
383 | if (info->flags & IEEE80211_TX_CTL_AMPDU && | ||
384 | il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) { | ||
385 | txq_id = il->stations[sta_id].tid[tid].agg.txq_id; | ||
386 | is_agg = true; | ||
387 | } | ||
388 | } | ||
389 | |||
390 | txq = &il->txq[txq_id]; | ||
391 | q = &txq->q; | ||
392 | |||
393 | if (unlikely(il_queue_space(q) < q->high_mark)) { | ||
394 | spin_unlock(&il->sta_lock); | ||
395 | goto drop_unlock; | ||
396 | } | ||
397 | |||
398 | if (ieee80211_is_data_qos(fc)) { | ||
399 | il->stations[sta_id].tid[tid].tfds_in_queue++; | ||
400 | if (!ieee80211_has_morefrags(fc)) | ||
401 | il->stations[sta_id].tid[tid].seq_number = seq_number; | ||
402 | } | ||
403 | |||
404 | spin_unlock(&il->sta_lock); | ||
405 | |||
406 | /* Set up driver data for this TFD */ | ||
407 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info)); | ||
408 | txq->txb[q->write_ptr].skb = skb; | ||
409 | txq->txb[q->write_ptr].ctx = ctx; | ||
410 | |||
411 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
412 | out_cmd = txq->cmd[q->write_ptr]; | ||
413 | out_meta = &txq->meta[q->write_ptr]; | ||
414 | tx_cmd = &out_cmd->cmd.tx; | ||
415 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
416 | memset(tx_cmd, 0, sizeof(struct il_tx_cmd)); | ||
417 | |||
418 | /* | ||
419 | * Set up the Tx-command (not MAC!) header. | ||
420 | * Store the chosen Tx queue and TFD idx within the sequence field; | ||
421 | * after Tx, uCode's Tx response will return this value so driver can | ||
422 | * locate the frame within the tx queue and do post-tx processing. | ||
423 | */ | ||
424 | out_cmd->hdr.cmd = REPLY_TX; | ||
425 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
426 | IDX_TO_SEQ(q->write_ptr))); | ||
427 | |||
428 | /* Copy MAC header from skb into command buffer */ | ||
429 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
430 | |||
431 | |||
432 | /* Total # bytes to be transmitted */ | ||
433 | len = (u16)skb->len; | ||
434 | tx_cmd->len = cpu_to_le16(len); | ||
435 | |||
436 | if (info->control.hw_key) | ||
437 | il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id); | ||
438 | |||
439 | /* TODO need this for burst mode later on */ | ||
440 | il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id); | ||
441 | il_dbg_log_tx_data_frame(il, len, hdr); | ||
442 | |||
443 | il4965_tx_cmd_build_rate(il, tx_cmd, info, fc); | ||
444 | |||
445 | il_update_stats(il, true, fc, len); | ||
446 | /* | ||
447 | * Use the first empty entry in this queue's command buffer array | ||
448 | * to contain the Tx command and MAC header concatenated together | ||
449 | * (payload data will be in another buffer). | ||
450 | * Size of this varies, due to varying MAC header length. | ||
451 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
452 | * of the MAC header (device reads on dword boundaries). | ||
453 | * We'll tell device about this padding later. | ||
454 | */ | ||
455 | len = sizeof(struct il_tx_cmd) + | ||
456 | sizeof(struct il_cmd_header) + hdr_len; | ||
457 | firstlen = (len + 3) & ~3; | ||
458 | |||
459 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
460 | if (firstlen != len) | ||
461 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
462 | |||
463 | /* Physical address of this Tx command's header (not MAC header!), | ||
464 | * within command buffer array. */ | ||
465 | txcmd_phys = pci_map_single(il->pci_dev, | ||
466 | &out_cmd->hdr, firstlen, | ||
467 | PCI_DMA_BIDIRECTIONAL); | ||
468 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
469 | dma_unmap_len_set(out_meta, len, firstlen); | ||
470 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
471 | * first entry */ | ||
472 | il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, | ||
473 | txcmd_phys, firstlen, 1, 0); | ||
474 | |||
475 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
476 | txq->need_update = 1; | ||
477 | } else { | ||
478 | wait_write_ptr = 1; | ||
479 | txq->need_update = 0; | ||
480 | } | ||
481 | |||
482 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
483 | * if any (802.11 null frames have no payload). */ | ||
484 | secondlen = skb->len - hdr_len; | ||
485 | if (secondlen > 0) { | ||
486 | phys_addr = pci_map_single(il->pci_dev, skb->data + hdr_len, | ||
487 | secondlen, PCI_DMA_TODEVICE); | ||
488 | il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, | ||
489 | phys_addr, secondlen, | ||
490 | 0, 0); | ||
491 | } | ||
492 | |||
493 | scratch_phys = txcmd_phys + sizeof(struct il_cmd_header) + | ||
494 | offsetof(struct il_tx_cmd, scratch); | ||
495 | |||
496 | /* take back ownership of DMA buffer to enable update */ | ||
497 | pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, | ||
498 | firstlen, PCI_DMA_BIDIRECTIONAL); | ||
499 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
500 | tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); | ||
501 | |||
502 | D_TX("sequence nr = 0X%x\n", | ||
503 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
504 | D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
505 | il_print_hex_dump(il, IL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
506 | il_print_hex_dump(il, IL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
507 | |||
508 | /* Set up entry for this TFD in Tx byte-count array */ | ||
509 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
510 | il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, | ||
511 | le16_to_cpu(tx_cmd->len)); | ||
512 | |||
513 | pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, | ||
514 | firstlen, PCI_DMA_BIDIRECTIONAL); | ||
515 | |||
516 | /* Tell device the write idx *just past* this latest filled TFD */ | ||
517 | q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
518 | il_txq_update_write_ptr(il, txq); | ||
519 | spin_unlock_irqrestore(&il->lock, flags); | ||
520 | |||
521 | /* | ||
522 | * At this point the frame is "transmitted" successfully | ||
523 | * and we will get a TX status notification eventually, | ||
524 | * regardless of the value of ret. "ret" only indicates | ||
525 | * whether or not we should update the write pointer. | ||
526 | */ | ||
527 | |||
528 | /* | ||
529 | * Avoid atomic ops if it isn't an associated client. | ||
530 | * Also, if this is a packet for aggregation, don't | ||
531 | * increase the counter because the ucode will stop | ||
532 | * aggregation queues when their respective station | ||
533 | * goes to sleep. | ||
534 | */ | ||
535 | if (sta_priv && sta_priv->client && !is_agg) | ||
536 | atomic_inc(&sta_priv->pending_frames); | ||
537 | |||
538 | if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { | ||
539 | if (wait_write_ptr) { | ||
540 | spin_lock_irqsave(&il->lock, flags); | ||
541 | txq->need_update = 1; | ||
542 | il_txq_update_write_ptr(il, txq); | ||
543 | spin_unlock_irqrestore(&il->lock, flags); | ||
544 | } else { | ||
545 | il_stop_queue(il, txq); | ||
546 | } | ||
547 | } | ||
548 | |||
549 | return 0; | ||
550 | |||
551 | drop_unlock: | ||
552 | spin_unlock_irqrestore(&il->lock, flags); | ||
553 | return -1; | ||
554 | } | ||
555 | |||
556 | static inline int il4965_alloc_dma_ptr(struct il_priv *il, | ||
557 | struct il_dma_ptr *ptr, size_t size) | ||
558 | { | ||
559 | ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, | ||
560 | GFP_KERNEL); | ||
561 | if (!ptr->addr) | ||
562 | return -ENOMEM; | ||
563 | ptr->size = size; | ||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | static inline void il4965_free_dma_ptr(struct il_priv *il, | ||
568 | struct il_dma_ptr *ptr) | ||
569 | { | ||
570 | if (unlikely(!ptr->addr)) | ||
571 | return; | ||
572 | |||
573 | dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); | ||
574 | memset(ptr, 0, sizeof(*ptr)); | ||
575 | } | ||
576 | |||
577 | /** | ||
578 | * il4965_hw_txq_ctx_free - Free TXQ Context | ||
579 | * | ||
580 | * Destroy all TX DMA queues and structures | ||
581 | */ | ||
582 | void il4965_hw_txq_ctx_free(struct il_priv *il) | ||
583 | { | ||
584 | int txq_id; | ||
585 | |||
586 | /* Tx queues */ | ||
587 | if (il->txq) { | ||
588 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | ||
589 | if (txq_id == il->cmd_queue) | ||
590 | il_cmd_queue_free(il); | ||
591 | else | ||
592 | il_tx_queue_free(il, txq_id); | ||
593 | } | ||
594 | il4965_free_dma_ptr(il, &il->kw); | ||
595 | |||
596 | il4965_free_dma_ptr(il, &il->scd_bc_tbls); | ||
597 | |||
598 | /* free tx queue structure */ | ||
599 | il_txq_mem(il); | ||
600 | } | ||
601 | |||
602 | /** | ||
603 | * il4965_txq_ctx_alloc - allocate TX queue context | ||
604 | * Allocate all Tx DMA structures and initialize them | ||
605 | * | ||
606 | * @param il | ||
607 | * @return error code | ||
608 | */ | ||
609 | int il4965_txq_ctx_alloc(struct il_priv *il) | ||
610 | { | ||
611 | int ret; | ||
612 | int txq_id, slots_num; | ||
613 | unsigned long flags; | ||
614 | |||
615 | /* Free all tx/cmd queues and keep-warm buffer */ | ||
616 | il4965_hw_txq_ctx_free(il); | ||
617 | |||
618 | ret = il4965_alloc_dma_ptr(il, &il->scd_bc_tbls, | ||
619 | il->hw_params.scd_bc_tbls_size); | ||
620 | if (ret) { | ||
621 | IL_ERR("Scheduler BC Table allocation failed\n"); | ||
622 | goto error_bc_tbls; | ||
623 | } | ||
624 | /* Alloc keep-warm buffer */ | ||
625 | ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE); | ||
626 | if (ret) { | ||
627 | IL_ERR("Keep Warm allocation failed\n"); | ||
628 | goto error_kw; | ||
629 | } | ||
630 | |||
631 | /* allocate tx queue structure */ | ||
632 | ret = il_alloc_txq_mem(il); | ||
633 | if (ret) | ||
634 | goto error; | ||
635 | |||
636 | spin_lock_irqsave(&il->lock, flags); | ||
637 | |||
638 | /* Turn off all Tx DMA fifos */ | ||
639 | il4965_txq_set_sched(il, 0); | ||
640 | |||
641 | /* Tell NIC where to find the "keep warm" buffer */ | ||
642 | il_wr(il, FH_KW_MEM_ADDR_REG, il->kw.dma >> 4); | ||
643 | |||
644 | spin_unlock_irqrestore(&il->lock, flags); | ||
645 | |||
646 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ | ||
647 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { | ||
648 | slots_num = (txq_id == il->cmd_queue) ? | ||
649 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
650 | ret = il_tx_queue_init(il, | ||
651 | &il->txq[txq_id], slots_num, | ||
652 | txq_id); | ||
653 | if (ret) { | ||
654 | IL_ERR("Tx %d queue init failed\n", txq_id); | ||
655 | goto error; | ||
656 | } | ||
657 | } | ||
658 | |||
659 | return ret; | ||
660 | |||
661 | error: | ||
662 | il4965_hw_txq_ctx_free(il); | ||
663 | il4965_free_dma_ptr(il, &il->kw); | ||
664 | error_kw: | ||
665 | il4965_free_dma_ptr(il, &il->scd_bc_tbls); | ||
666 | error_bc_tbls: | ||
667 | return ret; | ||
668 | } | ||
669 | |||
670 | void il4965_txq_ctx_reset(struct il_priv *il) | ||
671 | { | ||
672 | int txq_id, slots_num; | ||
673 | unsigned long flags; | ||
674 | |||
675 | spin_lock_irqsave(&il->lock, flags); | ||
676 | |||
677 | /* Turn off all Tx DMA fifos */ | ||
678 | il4965_txq_set_sched(il, 0); | ||
679 | |||
680 | /* Tell NIC where to find the "keep warm" buffer */ | ||
681 | il_wr(il, FH_KW_MEM_ADDR_REG, il->kw.dma >> 4); | ||
682 | |||
683 | spin_unlock_irqrestore(&il->lock, flags); | ||
684 | |||
685 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
686 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) { | ||
687 | slots_num = txq_id == il->cmd_queue ? | ||
688 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
689 | il_tx_queue_reset(il, &il->txq[txq_id], | ||
690 | slots_num, txq_id); | ||
691 | } | ||
692 | } | ||
693 | |||
694 | /** | ||
695 | * il4965_txq_ctx_stop - Stop all Tx DMA channels | ||
696 | */ | ||
697 | void il4965_txq_ctx_stop(struct il_priv *il) | ||
698 | { | ||
699 | int ch, txq_id; | ||
700 | unsigned long flags; | ||
701 | |||
702 | /* Turn off all Tx DMA fifos */ | ||
703 | spin_lock_irqsave(&il->lock, flags); | ||
704 | |||
705 | il4965_txq_set_sched(il, 0); | ||
706 | |||
707 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
708 | for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) { | ||
709 | il_wr(il, | ||
710 | FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); | ||
711 | if (il_poll_bit(il, FH_TSSR_TX_STATUS_REG, | ||
712 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), | ||
713 | 1000)) | ||
714 | IL_ERR("Failing on timeout while stopping" | ||
715 | " DMA channel %d [0x%08x]", ch, | ||
716 | il_rd(il, | ||
717 | FH_TSSR_TX_STATUS_REG)); | ||
718 | } | ||
719 | spin_unlock_irqrestore(&il->lock, flags); | ||
720 | |||
721 | if (!il->txq) | ||
722 | return; | ||
723 | |||
724 | /* Unmap DMA from host system and free skb's */ | ||
725 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | ||
726 | if (txq_id == il->cmd_queue) | ||
727 | il_cmd_queue_unmap(il); | ||
728 | else | ||
729 | il_tx_queue_unmap(il, txq_id); | ||
730 | } | ||
731 | |||
732 | /* | ||
733 | * Find first available (lowest unused) Tx Queue, mark it "active". | ||
734 | * Called only when finding queue for aggregation. | ||
735 | * Should never return anything < 7, because they should already | ||
736 | * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) | ||
737 | */ | ||
738 | static int il4965_txq_ctx_activate_free(struct il_priv *il) | ||
739 | { | ||
740 | int txq_id; | ||
741 | |||
742 | for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) | ||
743 | if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk)) | ||
744 | return txq_id; | ||
745 | return -1; | ||
746 | } | ||
747 | |||
748 | /** | ||
749 | * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration | ||
750 | */ | ||
751 | static void il4965_tx_queue_stop_scheduler(struct il_priv *il, | ||
752 | u16 txq_id) | ||
753 | { | ||
754 | /* Simply stop the queue, but don't change any configuration; | ||
755 | * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ | ||
756 | il_wr_prph(il, | ||
757 | IL49_SCD_QUEUE_STATUS_BITS(txq_id), | ||
758 | (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)| | ||
759 | (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); | ||
760 | } | ||
761 | |||
762 | /** | ||
763 | * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue | ||
764 | */ | ||
765 | static int il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, | ||
766 | u16 txq_id) | ||
767 | { | ||
768 | u32 tbl_dw_addr; | ||
769 | u32 tbl_dw; | ||
770 | u16 scd_q2ratid; | ||
771 | |||
772 | scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; | ||
773 | |||
774 | tbl_dw_addr = il->scd_base_addr + | ||
775 | IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); | ||
776 | |||
777 | tbl_dw = il_read_targ_mem(il, tbl_dw_addr); | ||
778 | |||
779 | if (txq_id & 0x1) | ||
780 | tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); | ||
781 | else | ||
782 | tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); | ||
783 | |||
784 | il_write_targ_mem(il, tbl_dw_addr, tbl_dw); | ||
785 | |||
786 | return 0; | ||
787 | } | ||
788 | |||
789 | /** | ||
790 | * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue | ||
791 | * | ||
792 | * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE, | ||
793 | * i.e. it must be one of the higher queues used for aggregation | ||
794 | */ | ||
795 | static int il4965_txq_agg_enable(struct il_priv *il, int txq_id, | ||
796 | int tx_fifo, int sta_id, int tid, u16 ssn_idx) | ||
797 | { | ||
798 | unsigned long flags; | ||
799 | u16 ra_tid; | ||
800 | int ret; | ||
801 | |||
802 | if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || | ||
803 | (IL49_FIRST_AMPDU_QUEUE + | ||
804 | il->cfg->base_params->num_of_ampdu_queues <= txq_id)) { | ||
805 | IL_WARN( | ||
806 | "queue number out of range: %d, must be %d to %d\n", | ||
807 | txq_id, IL49_FIRST_AMPDU_QUEUE, | ||
808 | IL49_FIRST_AMPDU_QUEUE + | ||
809 | il->cfg->base_params->num_of_ampdu_queues - 1); | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | |||
813 | ra_tid = BUILD_RAxTID(sta_id, tid); | ||
814 | |||
815 | /* Modify device's station table to Tx this TID */ | ||
816 | ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid); | ||
817 | if (ret) | ||
818 | return ret; | ||
819 | |||
820 | spin_lock_irqsave(&il->lock, flags); | ||
821 | |||
822 | /* Stop this Tx queue before configuring it */ | ||
823 | il4965_tx_queue_stop_scheduler(il, txq_id); | ||
824 | |||
825 | /* Map receiver-address / traffic-ID to this queue */ | ||
826 | il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id); | ||
827 | |||
828 | /* Set this queue as a chain-building queue */ | ||
829 | il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); | ||
830 | |||
831 | /* Place first TFD at idx corresponding to start sequence number. | ||
832 | * Assumes that ssn_idx is valid (!= 0xFFF) */ | ||
833 | il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
834 | il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
835 | il4965_set_wr_ptrs(il, txq_id, ssn_idx); | ||
836 | |||
837 | /* Set up Tx win size and frame limit for this queue */ | ||
838 | il_write_targ_mem(il, | ||
839 | il->scd_base_addr + IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), | ||
840 | (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & | ||
841 | IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); | ||
842 | |||
843 | il_write_targ_mem(il, il->scd_base_addr + | ||
844 | IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), | ||
845 | (SCD_FRAME_LIMIT << IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) | ||
846 | & IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); | ||
847 | |||
848 | il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
849 | |||
850 | /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ | ||
851 | il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1); | ||
852 | |||
853 | spin_unlock_irqrestore(&il->lock, flags); | ||
854 | |||
855 | return 0; | ||
856 | } | ||
857 | |||
858 | |||
859 | int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif, | ||
860 | struct ieee80211_sta *sta, u16 tid, u16 *ssn) | ||
861 | { | ||
862 | int sta_id; | ||
863 | int tx_fifo; | ||
864 | int txq_id; | ||
865 | int ret; | ||
866 | unsigned long flags; | ||
867 | struct il_tid_data *tid_data; | ||
868 | |||
869 | tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid); | ||
870 | if (unlikely(tx_fifo < 0)) | ||
871 | return tx_fifo; | ||
872 | |||
873 | IL_WARN("%s on ra = %pM tid = %d\n", | ||
874 | __func__, sta->addr, tid); | ||
875 | |||
876 | sta_id = il_sta_id(sta); | ||
877 | if (sta_id == IL_INVALID_STATION) { | ||
878 | IL_ERR("Start AGG on invalid station\n"); | ||
879 | return -ENXIO; | ||
880 | } | ||
881 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
882 | return -EINVAL; | ||
883 | |||
884 | if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) { | ||
885 | IL_ERR("Start AGG when state is not IL_AGG_OFF !\n"); | ||
886 | return -ENXIO; | ||
887 | } | ||
888 | |||
889 | txq_id = il4965_txq_ctx_activate_free(il); | ||
890 | if (txq_id == -1) { | ||
891 | IL_ERR("No free aggregation queue available\n"); | ||
892 | return -ENXIO; | ||
893 | } | ||
894 | |||
895 | spin_lock_irqsave(&il->sta_lock, flags); | ||
896 | tid_data = &il->stations[sta_id].tid[tid]; | ||
897 | *ssn = SEQ_TO_SN(tid_data->seq_number); | ||
898 | tid_data->agg.txq_id = txq_id; | ||
899 | il_set_swq_id(&il->txq[txq_id], | ||
900 | il4965_get_ac_from_tid(tid), txq_id); | ||
901 | spin_unlock_irqrestore(&il->sta_lock, flags); | ||
902 | |||
903 | ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, | ||
904 | sta_id, tid, *ssn); | ||
905 | if (ret) | ||
906 | return ret; | ||
907 | |||
908 | spin_lock_irqsave(&il->sta_lock, flags); | ||
909 | tid_data = &il->stations[sta_id].tid[tid]; | ||
910 | if (tid_data->tfds_in_queue == 0) { | ||
911 | D_HT("HW queue is empty\n"); | ||
912 | tid_data->agg.state = IL_AGG_ON; | ||
913 | ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); | ||
914 | } else { | ||
915 | D_HT( | ||
916 | "HW queue is NOT empty: %d packets in HW queue\n", | ||
917 | tid_data->tfds_in_queue); | ||
918 | tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA; | ||
919 | } | ||
920 | spin_unlock_irqrestore(&il->sta_lock, flags); | ||
921 | return ret; | ||
922 | } | ||
923 | |||
924 | /** | ||
925 | * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE | ||
926 | * il->lock must be held by the caller | ||
927 | */ | ||
928 | static int il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, | ||
929 | u16 ssn_idx, u8 tx_fifo) | ||
930 | { | ||
931 | if ((IL49_FIRST_AMPDU_QUEUE > txq_id) || | ||
932 | (IL49_FIRST_AMPDU_QUEUE + | ||
933 | il->cfg->base_params->num_of_ampdu_queues <= txq_id)) { | ||
934 | IL_WARN( | ||
935 | "queue number out of range: %d, must be %d to %d\n", | ||
936 | txq_id, IL49_FIRST_AMPDU_QUEUE, | ||
937 | IL49_FIRST_AMPDU_QUEUE + | ||
938 | il->cfg->base_params->num_of_ampdu_queues - 1); | ||
939 | return -EINVAL; | ||
940 | } | ||
941 | |||
942 | il4965_tx_queue_stop_scheduler(il, txq_id); | ||
943 | |||
944 | il_clear_bits_prph(il, | ||
945 | IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); | ||
946 | |||
947 | il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); | ||
948 | il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); | ||
949 | /* supposes that ssn_idx is valid (!= 0xFFF) */ | ||
950 | il4965_set_wr_ptrs(il, txq_id, ssn_idx); | ||
951 | |||
952 | il_clear_bits_prph(il, | ||
953 | IL49_SCD_INTERRUPT_MASK, (1 << txq_id)); | ||
954 | il_txq_ctx_deactivate(il, txq_id); | ||
955 | il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0); | ||
956 | |||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif, | ||
961 | struct ieee80211_sta *sta, u16 tid) | ||
962 | { | ||
963 | int tx_fifo_id, txq_id, sta_id, ssn; | ||
964 | struct il_tid_data *tid_data; | ||
965 | int write_ptr, read_ptr; | ||
966 | unsigned long flags; | ||
967 | |||
968 | tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid); | ||
969 | if (unlikely(tx_fifo_id < 0)) | ||
970 | return tx_fifo_id; | ||
971 | |||
972 | sta_id = il_sta_id(sta); | ||
973 | |||
974 | if (sta_id == IL_INVALID_STATION) { | ||
975 | IL_ERR("Invalid station for AGG tid %d\n", tid); | ||
976 | return -ENXIO; | ||
977 | } | ||
978 | |||
979 | spin_lock_irqsave(&il->sta_lock, flags); | ||
980 | |||
981 | tid_data = &il->stations[sta_id].tid[tid]; | ||
982 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | ||
983 | txq_id = tid_data->agg.txq_id; | ||
984 | |||
985 | switch (il->stations[sta_id].tid[tid].agg.state) { | ||
986 | case IL_EMPTYING_HW_QUEUE_ADDBA: | ||
987 | /* | ||
988 | * This can happen if the peer stops aggregation | ||
989 | * again before we've had a chance to drain the | ||
990 | * queue we selected previously, i.e. before the | ||
991 | * session was really started completely. | ||
992 | */ | ||
993 | D_HT("AGG stop before setup done\n"); | ||
994 | goto turn_off; | ||
995 | case IL_AGG_ON: | ||
996 | break; | ||
997 | default: | ||
998 | IL_WARN("Stopping AGG while state not ON or starting\n"); | ||
999 | } | ||
1000 | |||
1001 | write_ptr = il->txq[txq_id].q.write_ptr; | ||
1002 | read_ptr = il->txq[txq_id].q.read_ptr; | ||
1003 | |||
1004 | /* The queue is not empty */ | ||
1005 | if (write_ptr != read_ptr) { | ||
1006 | D_HT("Stopping a non empty AGG HW QUEUE\n"); | ||
1007 | il->stations[sta_id].tid[tid].agg.state = | ||
1008 | IL_EMPTYING_HW_QUEUE_DELBA; | ||
1009 | spin_unlock_irqrestore(&il->sta_lock, flags); | ||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | D_HT("HW queue is empty\n"); | ||
1014 | turn_off: | ||
1015 | il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF; | ||
1016 | |||
1017 | /* do not restore/save irqs */ | ||
1018 | spin_unlock(&il->sta_lock); | ||
1019 | spin_lock(&il->lock); | ||
1020 | |||
1021 | /* | ||
1022 | * the only reason this call can fail is queue number out of range, | ||
1023 | * which can happen if uCode is reloaded and all the station | ||
1024 | * information are lost. if it is outside the range, there is no need | ||
1025 | * to deactivate the uCode queue, just return "success" to allow | ||
1026 | * mac80211 to clean up it own data. | ||
1027 | */ | ||
1028 | il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id); | ||
1029 | spin_unlock_irqrestore(&il->lock, flags); | ||
1030 | |||
1031 | ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); | ||
1032 | |||
1033 | return 0; | ||
1034 | } | ||
1035 | |||
1036 | int il4965_txq_check_empty(struct il_priv *il, | ||
1037 | int sta_id, u8 tid, int txq_id) | ||
1038 | { | ||
1039 | struct il_queue *q = &il->txq[txq_id].q; | ||
1040 | u8 *addr = il->stations[sta_id].sta.sta.addr; | ||
1041 | struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid]; | ||
1042 | struct il_rxon_context *ctx; | ||
1043 | |||
1044 | ctx = &il->ctx; | ||
1045 | |||
1046 | lockdep_assert_held(&il->sta_lock); | ||
1047 | |||
1048 | switch (il->stations[sta_id].tid[tid].agg.state) { | ||
1049 | case IL_EMPTYING_HW_QUEUE_DELBA: | ||
1050 | /* We are reclaiming the last packet of the */ | ||
1051 | /* aggregated HW queue */ | ||
1052 | if (txq_id == tid_data->agg.txq_id && | ||
1053 | q->read_ptr == q->write_ptr) { | ||
1054 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1055 | int tx_fifo = il4965_get_fifo_from_tid(ctx, tid); | ||
1056 | D_HT( | ||
1057 | "HW queue empty: continue DELBA flow\n"); | ||
1058 | il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo); | ||
1059 | tid_data->agg.state = IL_AGG_OFF; | ||
1060 | ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); | ||
1061 | } | ||
1062 | break; | ||
1063 | case IL_EMPTYING_HW_QUEUE_ADDBA: | ||
1064 | /* We are reclaiming the last packet of the queue */ | ||
1065 | if (tid_data->tfds_in_queue == 0) { | ||
1066 | D_HT( | ||
1067 | "HW queue empty: continue ADDBA flow\n"); | ||
1068 | tid_data->agg.state = IL_AGG_ON; | ||
1069 | ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid); | ||
1070 | } | ||
1071 | break; | ||
1072 | } | ||
1073 | |||
1074 | return 0; | ||
1075 | } | ||
1076 | |||
1077 | static void il4965_non_agg_tx_status(struct il_priv *il, | ||
1078 | struct il_rxon_context *ctx, | ||
1079 | const u8 *addr1) | ||
1080 | { | ||
1081 | struct ieee80211_sta *sta; | ||
1082 | struct il_station_priv *sta_priv; | ||
1083 | |||
1084 | rcu_read_lock(); | ||
1085 | sta = ieee80211_find_sta(ctx->vif, addr1); | ||
1086 | if (sta) { | ||
1087 | sta_priv = (void *)sta->drv_priv; | ||
1088 | /* avoid atomic ops if this isn't a client */ | ||
1089 | if (sta_priv->client && | ||
1090 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1091 | ieee80211_sta_block_awake(il->hw, sta, false); | ||
1092 | } | ||
1093 | rcu_read_unlock(); | ||
1094 | } | ||
1095 | |||
1096 | static void | ||
1097 | il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, | ||
1098 | bool is_agg) | ||
1099 | { | ||
1100 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data; | ||
1101 | |||
1102 | if (!is_agg) | ||
1103 | il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1); | ||
1104 | |||
1105 | ieee80211_tx_status_irqsafe(il->hw, tx_info->skb); | ||
1106 | } | ||
1107 | |||
1108 | int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx) | ||
1109 | { | ||
1110 | struct il_tx_queue *txq = &il->txq[txq_id]; | ||
1111 | struct il_queue *q = &txq->q; | ||
1112 | struct il_tx_info *tx_info; | ||
1113 | int nfreed = 0; | ||
1114 | struct ieee80211_hdr *hdr; | ||
1115 | |||
1116 | if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { | ||
1117 | IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " | ||
1118 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1119 | idx, q->n_bd, q->write_ptr, q->read_ptr); | ||
1120 | return 0; | ||
1121 | } | ||
1122 | |||
1123 | for (idx = il_queue_inc_wrap(idx, q->n_bd); | ||
1124 | q->read_ptr != idx; | ||
1125 | q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1126 | |||
1127 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1128 | |||
1129 | if (WARN_ON_ONCE(tx_info->skb == NULL)) | ||
1130 | continue; | ||
1131 | |||
1132 | hdr = (struct ieee80211_hdr *)tx_info->skb->data; | ||
1133 | if (ieee80211_is_data_qos(hdr->frame_control)) | ||
1134 | nfreed++; | ||
1135 | |||
1136 | il4965_tx_status(il, tx_info, | ||
1137 | txq_id >= IL4965_FIRST_AMPDU_QUEUE); | ||
1138 | tx_info->skb = NULL; | ||
1139 | |||
1140 | il->cfg->ops->lib->txq_free_tfd(il, txq); | ||
1141 | } | ||
1142 | return nfreed; | ||
1143 | } | ||
1144 | |||
1145 | /** | ||
1146 | * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack | ||
1147 | * | ||
1148 | * Go through block-ack's bitmap of ACK'd frames, update driver's record of | ||
1149 | * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. | ||
1150 | */ | ||
1151 | static int il4965_tx_status_reply_compressed_ba(struct il_priv *il, | ||
1152 | struct il_ht_agg *agg, | ||
1153 | struct il_compressed_ba_resp *ba_resp) | ||
1154 | |||
1155 | { | ||
1156 | int i, sh, ack; | ||
1157 | u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); | ||
1158 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1159 | int successes = 0; | ||
1160 | struct ieee80211_tx_info *info; | ||
1161 | u64 bitmap, sent_bitmap; | ||
1162 | |||
1163 | if (unlikely(!agg->wait_for_ba)) { | ||
1164 | if (unlikely(ba_resp->bitmap)) | ||
1165 | IL_ERR("Received BA when not expected\n"); | ||
1166 | return -EINVAL; | ||
1167 | } | ||
1168 | |||
1169 | /* Mark that the expected block-ack response arrived */ | ||
1170 | agg->wait_for_ba = 0; | ||
1171 | D_TX_REPLY("BA %d %d\n", agg->start_idx, | ||
1172 | ba_resp->seq_ctl); | ||
1173 | |||
1174 | /* Calculate shift to align block-ack bits with our Tx win bits */ | ||
1175 | sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4); | ||
1176 | if (sh < 0) /* tbw something is wrong with indices */ | ||
1177 | sh += 0x100; | ||
1178 | |||
1179 | if (agg->frame_count > (64 - sh)) { | ||
1180 | D_TX_REPLY("more frames than bitmap size"); | ||
1181 | return -1; | ||
1182 | } | ||
1183 | |||
1184 | /* don't use 64-bit values for now */ | ||
1185 | bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; | ||
1186 | |||
1187 | /* check for success or failure according to the | ||
1188 | * transmitted bitmap and block-ack bitmap */ | ||
1189 | sent_bitmap = bitmap & agg->bitmap; | ||
1190 | |||
1191 | /* For each frame attempted in aggregation, | ||
1192 | * update driver's record of tx frame's status. */ | ||
1193 | i = 0; | ||
1194 | while (sent_bitmap) { | ||
1195 | ack = sent_bitmap & 1ULL; | ||
1196 | successes += ack; | ||
1197 | D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", | ||
1198 | ack ? "ACK" : "NACK", i, | ||
1199 | (agg->start_idx + i) & 0xff, | ||
1200 | agg->start_idx + i); | ||
1201 | sent_bitmap >>= 1; | ||
1202 | ++i; | ||
1203 | } | ||
1204 | |||
1205 | D_TX_REPLY("Bitmap %llx\n", | ||
1206 | (unsigned long long)bitmap); | ||
1207 | |||
1208 | info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb); | ||
1209 | memset(&info->status, 0, sizeof(info->status)); | ||
1210 | info->flags |= IEEE80211_TX_STAT_ACK; | ||
1211 | info->flags |= IEEE80211_TX_STAT_AMPDU; | ||
1212 | info->status.ampdu_ack_len = successes; | ||
1213 | info->status.ampdu_len = agg->frame_count; | ||
1214 | il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info); | ||
1215 | |||
1216 | return 0; | ||
1217 | } | ||
1218 | |||
1219 | /** | ||
1220 | * translate ucode response to mac80211 tx status control values | ||
1221 | */ | ||
1222 | void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, | ||
1223 | struct ieee80211_tx_info *info) | ||
1224 | { | ||
1225 | struct ieee80211_tx_rate *r = &info->control.rates[0]; | ||
1226 | |||
1227 | info->antenna_sel_tx = | ||
1228 | ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); | ||
1229 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
1230 | r->flags |= IEEE80211_TX_RC_MCS; | ||
1231 | if (rate_n_flags & RATE_MCS_GF_MSK) | ||
1232 | r->flags |= IEEE80211_TX_RC_GREEN_FIELD; | ||
1233 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
1234 | r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; | ||
1235 | if (rate_n_flags & RATE_MCS_DUP_MSK) | ||
1236 | r->flags |= IEEE80211_TX_RC_DUP_DATA; | ||
1237 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
1238 | r->flags |= IEEE80211_TX_RC_SHORT_GI; | ||
1239 | r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); | ||
1240 | } | ||
1241 | |||
1242 | /** | ||
1243 | * il4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA | ||
1244 | * | ||
1245 | * Handles block-acknowledge notification from device, which reports success | ||
1246 | * of frames sent via aggregation. | ||
1247 | */ | ||
1248 | void il4965_rx_reply_compressed_ba(struct il_priv *il, | ||
1249 | struct il_rx_buf *rxb) | ||
1250 | { | ||
1251 | struct il_rx_pkt *pkt = rxb_addr(rxb); | ||
1252 | struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | ||
1253 | struct il_tx_queue *txq = NULL; | ||
1254 | struct il_ht_agg *agg; | ||
1255 | int idx; | ||
1256 | int sta_id; | ||
1257 | int tid; | ||
1258 | unsigned long flags; | ||
1259 | |||
1260 | /* "flow" corresponds to Tx queue */ | ||
1261 | u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); | ||
1262 | |||
1263 | /* "ssn" is start of block-ack Tx win, corresponds to idx | ||
1264 | * (in Tx queue's circular buffer) of first TFD/frame in win */ | ||
1265 | u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); | ||
1266 | |||
1267 | if (scd_flow >= il->hw_params.max_txq_num) { | ||
1268 | IL_ERR( | ||
1269 | "BUG_ON scd_flow is bigger than number of queues\n"); | ||
1270 | return; | ||
1271 | } | ||
1272 | |||
1273 | txq = &il->txq[scd_flow]; | ||
1274 | sta_id = ba_resp->sta_id; | ||
1275 | tid = ba_resp->tid; | ||
1276 | agg = &il->stations[sta_id].tid[tid].agg; | ||
1277 | if (unlikely(agg->txq_id != scd_flow)) { | ||
1278 | /* | ||
1279 | * FIXME: this is a uCode bug which need to be addressed, | ||
1280 | * log the information and return for now! | ||
1281 | * since it is possible happen very often and in order | ||
1282 | * not to fill the syslog, don't enable the logging by default | ||
1283 | */ | ||
1284 | D_TX_REPLY( | ||
1285 | "BA scd_flow %d does not match txq_id %d\n", | ||
1286 | scd_flow, agg->txq_id); | ||
1287 | return; | ||
1288 | } | ||
1289 | |||
1290 | /* Find idx just before block-ack win */ | ||
1291 | idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); | ||
1292 | |||
1293 | spin_lock_irqsave(&il->sta_lock, flags); | ||
1294 | |||
1295 | D_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, " | ||
1296 | "sta_id = %d\n", | ||
1297 | agg->wait_for_ba, | ||
1298 | (u8 *) &ba_resp->sta_addr_lo32, | ||
1299 | ba_resp->sta_id); | ||
1300 | D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," | ||
1301 | "scd_flow = " | ||
1302 | "%d, scd_ssn = %d\n", | ||
1303 | ba_resp->tid, | ||
1304 | ba_resp->seq_ctl, | ||
1305 | (unsigned long long)le64_to_cpu(ba_resp->bitmap), | ||
1306 | ba_resp->scd_flow, | ||
1307 | ba_resp->scd_ssn); | ||
1308 | D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", | ||
1309 | agg->start_idx, | ||
1310 | (unsigned long long)agg->bitmap); | ||
1311 | |||
1312 | /* Update driver's record of ACK vs. not for each frame in win */ | ||
1313 | il4965_tx_status_reply_compressed_ba(il, agg, ba_resp); | ||
1314 | |||
1315 | /* Release all TFDs before the SSN, i.e. all TFDs in front of | ||
1316 | * block-ack win (we assume that they've been successfully | ||
1317 | * transmitted ... if not, it's too late anyway). */ | ||
1318 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | ||
1319 | /* calculate mac80211 ampdu sw queue to wake */ | ||
1320 | int freed = il4965_tx_queue_reclaim(il, scd_flow, idx); | ||
1321 | il4965_free_tfds_in_queue(il, sta_id, tid, freed); | ||
1322 | |||
1323 | if (il_queue_space(&txq->q) > txq->q.low_mark && | ||
1324 | il->mac80211_registered && | ||
1325 | agg->state != IL_EMPTYING_HW_QUEUE_DELBA) | ||
1326 | il_wake_queue(il, txq); | ||
1327 | |||
1328 | il4965_txq_check_empty(il, sta_id, tid, scd_flow); | ||
1329 | } | ||
1330 | |||
1331 | spin_unlock_irqrestore(&il->sta_lock, flags); | ||
1332 | } | ||
1333 | |||
1334 | #ifdef CONFIG_IWLEGACY_DEBUG | ||
1335 | const char *il4965_get_tx_fail_reason(u32 status) | ||
1336 | { | ||
1337 | #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x | ||
1338 | #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x | ||
1339 | |||
1340 | switch (status & TX_STATUS_MSK) { | ||
1341 | case TX_STATUS_SUCCESS: | ||
1342 | return "SUCCESS"; | ||
1343 | TX_STATUS_POSTPONE(DELAY); | ||
1344 | TX_STATUS_POSTPONE(FEW_BYTES); | ||
1345 | TX_STATUS_POSTPONE(QUIET_PERIOD); | ||
1346 | TX_STATUS_POSTPONE(CALC_TTAK); | ||
1347 | TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); | ||
1348 | TX_STATUS_FAIL(SHORT_LIMIT); | ||
1349 | TX_STATUS_FAIL(LONG_LIMIT); | ||
1350 | TX_STATUS_FAIL(FIFO_UNDERRUN); | ||
1351 | TX_STATUS_FAIL(DRAIN_FLOW); | ||
1352 | TX_STATUS_FAIL(RFKILL_FLUSH); | ||
1353 | TX_STATUS_FAIL(LIFE_EXPIRE); | ||
1354 | TX_STATUS_FAIL(DEST_PS); | ||
1355 | TX_STATUS_FAIL(HOST_ABORTED); | ||
1356 | TX_STATUS_FAIL(BT_RETRY); | ||
1357 | TX_STATUS_FAIL(STA_INVALID); | ||
1358 | TX_STATUS_FAIL(FRAG_DROPPED); | ||
1359 | TX_STATUS_FAIL(TID_DISABLE); | ||
1360 | TX_STATUS_FAIL(FIFO_FLUSHED); | ||
1361 | TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); | ||
1362 | TX_STATUS_FAIL(PASSIVE_NO_RX); | ||
1363 | TX_STATUS_FAIL(NO_BEACON_ON_RADAR); | ||
1364 | } | ||
1365 | |||
1366 | return "UNKNOWN"; | ||
1367 | |||
1368 | #undef TX_STATUS_FAIL | ||
1369 | #undef TX_STATUS_POSTPONE | ||
1370 | } | ||
1371 | #endif /* CONFIG_IWLEGACY_DEBUG */ | ||