aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-lib.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1747
1 files changed, 1311 insertions, 436 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 8fd00a6e5120..f803fb62f8bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * GPL LICENSE SUMMARY 3 * GPL LICENSE SUMMARY
4 * 4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as 8 * it under the terms of version 2 of the GNU General Public License as
@@ -40,22 +40,203 @@
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-sta.h" 41#include "iwl-sta.h"
42 42
43static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) 43static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
44{ 44{
45 return le32_to_cpup((__le32 *)&tx_resp->status + 45 return le32_to_cpup((__le32 *)&tx_resp->status +
46 tx_resp->frame_count) & MAX_SN; 46 tx_resp->frame_count) & MAX_SN;
47} 47}
48 48
49static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
50{
51 status &= TX_STATUS_MSK;
52
53 switch (status) {
54 case TX_STATUS_POSTPONE_DELAY:
55 priv->_agn.reply_tx_stats.pp_delay++;
56 break;
57 case TX_STATUS_POSTPONE_FEW_BYTES:
58 priv->_agn.reply_tx_stats.pp_few_bytes++;
59 break;
60 case TX_STATUS_POSTPONE_BT_PRIO:
61 priv->_agn.reply_tx_stats.pp_bt_prio++;
62 break;
63 case TX_STATUS_POSTPONE_QUIET_PERIOD:
64 priv->_agn.reply_tx_stats.pp_quiet_period++;
65 break;
66 case TX_STATUS_POSTPONE_CALC_TTAK:
67 priv->_agn.reply_tx_stats.pp_calc_ttak++;
68 break;
69 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
70 priv->_agn.reply_tx_stats.int_crossed_retry++;
71 break;
72 case TX_STATUS_FAIL_SHORT_LIMIT:
73 priv->_agn.reply_tx_stats.short_limit++;
74 break;
75 case TX_STATUS_FAIL_LONG_LIMIT:
76 priv->_agn.reply_tx_stats.long_limit++;
77 break;
78 case TX_STATUS_FAIL_FIFO_UNDERRUN:
79 priv->_agn.reply_tx_stats.fifo_underrun++;
80 break;
81 case TX_STATUS_FAIL_DRAIN_FLOW:
82 priv->_agn.reply_tx_stats.drain_flow++;
83 break;
84 case TX_STATUS_FAIL_RFKILL_FLUSH:
85 priv->_agn.reply_tx_stats.rfkill_flush++;
86 break;
87 case TX_STATUS_FAIL_LIFE_EXPIRE:
88 priv->_agn.reply_tx_stats.life_expire++;
89 break;
90 case TX_STATUS_FAIL_DEST_PS:
91 priv->_agn.reply_tx_stats.dest_ps++;
92 break;
93 case TX_STATUS_FAIL_HOST_ABORTED:
94 priv->_agn.reply_tx_stats.host_abort++;
95 break;
96 case TX_STATUS_FAIL_BT_RETRY:
97 priv->_agn.reply_tx_stats.bt_retry++;
98 break;
99 case TX_STATUS_FAIL_STA_INVALID:
100 priv->_agn.reply_tx_stats.sta_invalid++;
101 break;
102 case TX_STATUS_FAIL_FRAG_DROPPED:
103 priv->_agn.reply_tx_stats.frag_drop++;
104 break;
105 case TX_STATUS_FAIL_TID_DISABLE:
106 priv->_agn.reply_tx_stats.tid_disable++;
107 break;
108 case TX_STATUS_FAIL_FIFO_FLUSHED:
109 priv->_agn.reply_tx_stats.fifo_flush++;
110 break;
111 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
112 priv->_agn.reply_tx_stats.insuff_cf_poll++;
113 break;
114 case TX_STATUS_FAIL_PASSIVE_NO_RX:
115 priv->_agn.reply_tx_stats.fail_hw_drop++;
116 break;
117 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
118 priv->_agn.reply_tx_stats.sta_color_mismatch++;
119 break;
120 default:
121 priv->_agn.reply_tx_stats.unknown++;
122 break;
123 }
124}
125
126static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
127{
128 status &= AGG_TX_STATUS_MSK;
129
130 switch (status) {
131 case AGG_TX_STATE_UNDERRUN_MSK:
132 priv->_agn.reply_agg_tx_stats.underrun++;
133 break;
134 case AGG_TX_STATE_BT_PRIO_MSK:
135 priv->_agn.reply_agg_tx_stats.bt_prio++;
136 break;
137 case AGG_TX_STATE_FEW_BYTES_MSK:
138 priv->_agn.reply_agg_tx_stats.few_bytes++;
139 break;
140 case AGG_TX_STATE_ABORT_MSK:
141 priv->_agn.reply_agg_tx_stats.abort++;
142 break;
143 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
144 priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
145 break;
146 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
147 priv->_agn.reply_agg_tx_stats.last_sent_try++;
148 break;
149 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
150 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
151 break;
152 case AGG_TX_STATE_SCD_QUERY_MSK:
153 priv->_agn.reply_agg_tx_stats.scd_query++;
154 break;
155 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
156 priv->_agn.reply_agg_tx_stats.bad_crc32++;
157 break;
158 case AGG_TX_STATE_RESPONSE_MSK:
159 priv->_agn.reply_agg_tx_stats.response++;
160 break;
161 case AGG_TX_STATE_DUMP_TX_MSK:
162 priv->_agn.reply_agg_tx_stats.dump_tx++;
163 break;
164 case AGG_TX_STATE_DELAY_TX_MSK:
165 priv->_agn.reply_agg_tx_stats.delay_tx++;
166 break;
167 default:
168 priv->_agn.reply_agg_tx_stats.unknown++;
169 break;
170 }
171}
172
173static void iwlagn_set_tx_status(struct iwl_priv *priv,
174 struct ieee80211_tx_info *info,
175 struct iwl_rxon_context *ctx,
176 struct iwlagn_tx_resp *tx_resp,
177 int txq_id, bool is_agg)
178{
179 u16 status = le16_to_cpu(tx_resp->status.status);
180
181 info->status.rates[0].count = tx_resp->failure_frame + 1;
182 if (is_agg)
183 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
184 info->flags |= iwl_tx_status_to_mac80211(status);
185 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
186 info);
187 if (!iwl_is_tx_success(status))
188 iwlagn_count_tx_err_status(priv, status);
189
190 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
191 iwl_is_associated_ctx(ctx) && ctx->vif &&
192 ctx->vif->type == NL80211_IFTYPE_STATION) {
193 ctx->last_tx_rejected = true;
194 iwl_stop_queue(priv, &priv->txq[txq_id]);
195 }
196
197 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
198 "0x%x retries %d\n",
199 txq_id,
200 iwl_get_tx_fail_reason(status), status,
201 le32_to_cpu(tx_resp->rate_n_flags),
202 tx_resp->failure_frame);
203}
204
205#ifdef CONFIG_IWLWIFI_DEBUG
206#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
207
208const char *iwl_get_agg_tx_fail_reason(u16 status)
209{
210 status &= AGG_TX_STATUS_MSK;
211 switch (status) {
212 case AGG_TX_STATE_TRANSMITTED:
213 return "SUCCESS";
214 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
215 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
216 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
217 AGG_TX_STATE_FAIL(ABORT_MSK);
218 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
219 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
220 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
221 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
222 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
223 AGG_TX_STATE_FAIL(RESPONSE_MSK);
224 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
225 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
226 }
227
228 return "UNKNOWN";
229}
230#endif /* CONFIG_IWLWIFI_DEBUG */
231
49static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, 232static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
50 struct iwl_ht_agg *agg, 233 struct iwl_ht_agg *agg,
51 struct iwl5000_tx_resp *tx_resp, 234 struct iwlagn_tx_resp *tx_resp,
52 int txq_id, u16 start_idx) 235 int txq_id, u16 start_idx)
53{ 236{
54 u16 status; 237 u16 status;
55 struct agg_tx_status *frame_status = &tx_resp->status; 238 struct agg_tx_status *frame_status = &tx_resp->status;
56 struct ieee80211_tx_info *info = NULL;
57 struct ieee80211_hdr *hdr = NULL; 239 struct ieee80211_hdr *hdr = NULL;
58 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
59 int i, sh, idx; 240 int i, sh, idx;
60 u16 seq; 241 u16 seq;
61 242
@@ -64,31 +245,21 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
64 245
65 agg->frame_count = tx_resp->frame_count; 246 agg->frame_count = tx_resp->frame_count;
66 agg->start_idx = start_idx; 247 agg->start_idx = start_idx;
67 agg->rate_n_flags = rate_n_flags; 248 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
68 agg->bitmap = 0; 249 agg->bitmap = 0;
69 250
70 /* # frames attempted by Tx command */ 251 /* # frames attempted by Tx command */
71 if (agg->frame_count == 1) { 252 if (agg->frame_count == 1) {
253 struct iwl_tx_info *txb;
254
72 /* Only one frame was attempted; no block-ack will arrive */ 255 /* Only one frame was attempted; no block-ack will arrive */
73 status = le16_to_cpu(frame_status[0].status);
74 idx = start_idx; 256 idx = start_idx;
75 257
76 /* FIXME: code repetition */
77 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 258 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
78 agg->frame_count, agg->start_idx, idx); 259 agg->frame_count, agg->start_idx, idx);
79 260 txb = &priv->txq[txq_id].txb[idx];
80 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); 261 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
81 info->status.rates[0].count = tx_resp->failure_frame + 1; 262 txb->ctx, tx_resp, txq_id, true);
82 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
83 info->flags |= iwl_tx_status_to_mac80211(status);
84 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
85
86 /* FIXME: code repetition end */
87
88 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
89 status & 0xff, tx_resp->failure_frame);
90 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
91
92 agg->wait_for_ba = 0; 263 agg->wait_for_ba = 0;
93 } else { 264 } else {
94 /* Two or more frames were attempted; expect block-ack */ 265 /* Two or more frames were attempted; expect block-ack */
@@ -109,12 +280,20 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
109 idx = SEQ_TO_INDEX(seq); 280 idx = SEQ_TO_INDEX(seq);
110 txq_id = SEQ_TO_QUEUE(seq); 281 txq_id = SEQ_TO_QUEUE(seq);
111 282
283 if (status & AGG_TX_STATUS_MSK)
284 iwlagn_count_agg_tx_err_status(priv, status);
285
112 if (status & (AGG_TX_STATE_FEW_BYTES_MSK | 286 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
113 AGG_TX_STATE_ABORT_MSK)) 287 AGG_TX_STATE_ABORT_MSK))
114 continue; 288 continue;
115 289
116 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 290 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
117 agg->frame_count, txq_id, idx); 291 agg->frame_count, txq_id, idx);
292 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
293 "try-count (0x%08x)\n",
294 iwl_get_agg_tx_fail_reason(status),
295 status & AGG_TX_STATUS_MSK,
296 status & AGG_TX_TRY_MSK);
118 297
119 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 298 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
120 if (!hdr) { 299 if (!hdr) {
@@ -220,8 +399,9 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
220 int index = SEQ_TO_INDEX(sequence); 399 int index = SEQ_TO_INDEX(sequence);
221 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 400 struct iwl_tx_queue *txq = &priv->txq[txq_id];
222 struct ieee80211_tx_info *info; 401 struct ieee80211_tx_info *info;
223 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 402 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
224 u32 status = le16_to_cpu(tx_resp->status.status); 403 struct iwl_tx_info *txb;
404 u32 status = le16_to_cpu(tx_resp->status.status);
225 int tid; 405 int tid;
226 int sta_id; 406 int sta_id;
227 int freed; 407 int freed;
@@ -235,11 +415,15 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
235 return; 415 return;
236 } 416 }
237 417
238 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 418 txq->time_stamp = jiffies;
419 txb = &txq->txb[txq->q.read_ptr];
420 info = IEEE80211_SKB_CB(txb->skb);
239 memset(&info->status, 0, sizeof(info->status)); 421 memset(&info->status, 0, sizeof(info->status));
240 422
241 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; 423 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
242 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; 424 IWLAGN_TX_RES_TID_POS;
425 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
426 IWLAGN_TX_RES_RA_POS;
243 427
244 spin_lock_irqsave(&priv->sta_lock, flags); 428 spin_lock_irqsave(&priv->sta_lock, flags);
245 if (txq->sched_retry) { 429 if (txq->sched_retry) {
@@ -247,7 +431,15 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
247 struct iwl_ht_agg *agg; 431 struct iwl_ht_agg *agg;
248 432
249 agg = &priv->stations[sta_id].tid[tid].agg; 433 agg = &priv->stations[sta_id].tid[tid].agg;
250 434 /*
435 * If the BT kill count is non-zero, we'll get this
436 * notification again.
437 */
438 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
439 priv->cfg->bt_params &&
440 priv->cfg->bt_params->advanced_bt_coexist) {
441 IWL_WARN(priv, "receive reply tx with bt_kill\n");
442 }
251 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 443 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
252 444
253 /* check if BAR is needed */ 445 /* check if BAR is needed */
@@ -265,35 +457,19 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
265 457
266 if (priv->mac80211_registered && 458 if (priv->mac80211_registered &&
267 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 459 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
268 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 460 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
269 if (agg->state == IWL_AGG_OFF) 461 iwl_wake_queue(priv, txq);
270 iwl_wake_queue(priv, txq_id);
271 else
272 iwl_wake_queue(priv, txq->swq_id);
273 }
274 } 462 }
275 } else { 463 } else {
276 BUG_ON(txq_id != txq->swq_id); 464 iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
277 465 txq_id, false);
278 info->status.rates[0].count = tx_resp->failure_frame + 1;
279 info->flags |= iwl_tx_status_to_mac80211(status);
280 iwlagn_hwrate_to_tx_control(priv,
281 le32_to_cpu(tx_resp->rate_n_flags),
282 info);
283
284 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
285 "0x%x retries %d\n",
286 txq_id,
287 iwl_get_tx_fail_reason(status), status,
288 le32_to_cpu(tx_resp->rate_n_flags),
289 tx_resp->failure_frame);
290
291 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 466 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
292 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 467 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
293 468
294 if (priv->mac80211_registered && 469 if (priv->mac80211_registered &&
295 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 470 iwl_queue_space(&txq->q) > txq->q.low_mark &&
296 iwl_wake_queue(priv, txq_id); 471 status != TX_STATUS_FAIL_PASSIVE_NO_RX)
472 iwl_wake_queue(priv, txq);
297 } 473 }
298 474
299 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 475 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
@@ -307,15 +483,20 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
307 /* init calibration handlers */ 483 /* init calibration handlers */
308 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] = 484 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
309 iwlagn_rx_calib_result; 485 iwlagn_rx_calib_result;
310 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
311 iwlagn_rx_calib_complete;
312 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; 486 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
487
488 /* set up notification wait support */
489 spin_lock_init(&priv->_agn.notif_wait_lock);
490 INIT_LIST_HEAD(&priv->_agn.notif_waits);
491 init_waitqueue_head(&priv->_agn.notif_waitq);
313} 492}
314 493
315void iwlagn_setup_deferred_work(struct iwl_priv *priv) 494void iwlagn_setup_deferred_work(struct iwl_priv *priv)
316{ 495{
317 /* in agn, the tx power calibration is done in uCode */ 496 /*
318 priv->disable_tx_power_cal = 1; 497 * nothing need to be done here anymore
498 * still keep for future use if needed
499 */
319} 500}
320 501
321int iwlagn_hw_valid_rtc_data_addr(u32 addr) 502int iwlagn_hw_valid_rtc_data_addr(u32 addr)
@@ -326,9 +507,13 @@ int iwlagn_hw_valid_rtc_data_addr(u32 addr)
326 507
327int iwlagn_send_tx_power(struct iwl_priv *priv) 508int iwlagn_send_tx_power(struct iwl_priv *priv)
328{ 509{
329 struct iwl5000_tx_power_dbm_cmd tx_power_cmd; 510 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
330 u8 tx_ant_cfg_cmd; 511 u8 tx_ant_cfg_cmd;
331 512
513 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
514 "TX Power requested while scanning!\n"))
515 return -EAGAIN;
516
332 /* half dBm need to multiply */ 517 /* half dBm need to multiply */
333 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 518 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
334 519
@@ -347,24 +532,22 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
347 */ 532 */
348 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; 533 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
349 } 534 }
350 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; 535 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
351 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; 536 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
352 537
353 if (IWL_UCODE_API(priv->ucode_ver) == 1) 538 if (IWL_UCODE_API(priv->ucode_ver) == 1)
354 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; 539 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
355 else 540 else
356 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; 541 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
357 542
358 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd, 543 return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
359 sizeof(tx_power_cmd), &tx_power_cmd, 544 &tx_power_cmd);
360 NULL);
361} 545}
362 546
363void iwlagn_temperature(struct iwl_priv *priv) 547void iwlagn_temperature(struct iwl_priv *priv)
364{ 548{
365 /* store temperature from statistics (in Celsius) */ 549 /* store temperature from correct statistics (in Celsius) */
366 priv->temperature = 550 priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
367 le32_to_cpu(priv->_agn.statistics.general.common.temperature);
368 iwl_tt_handler(priv); 551 iwl_tt_handler(priv);
369} 552}
370 553
@@ -402,6 +585,12 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
402 case INDIRECT_REGULATORY: 585 case INDIRECT_REGULATORY:
403 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); 586 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
404 break; 587 break;
588 case INDIRECT_TXP_LIMIT:
589 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
590 break;
591 case INDIRECT_TXP_LIMIT_SIZE:
592 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
593 break;
405 case INDIRECT_CALIBRATION: 594 case INDIRECT_CALIBRATION:
406 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); 595 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
407 break; 596 break;
@@ -425,13 +614,14 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
425 size_t offset) 614 size_t offset)
426{ 615{
427 u32 address = eeprom_indirect_address(priv, offset); 616 u32 address = eeprom_indirect_address(priv, offset);
428 BUG_ON(address >= priv->cfg->eeprom_size); 617 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
429 return &priv->eeprom[address]; 618 return &priv->eeprom[address];
430} 619}
431 620
432struct iwl_mod_params iwlagn_mod_params = { 621struct iwl_mod_params iwlagn_mod_params = {
433 .amsdu_size_8K = 1, 622 .amsdu_size_8K = 1,
434 .restart_fw = 1, 623 .restart_fw = 1,
624 .plcp_check = true,
435 /* the rest are 0 by default */ 625 /* the rest are 0 by default */
436}; 626};
437 627
@@ -473,10 +663,9 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
473 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 663 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
474 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ 664 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
475 665
476 if (!priv->cfg->use_isr_legacy) 666 rb_timeout = RX_RB_TIMEOUT;
477 rb_timeout = RX_RB_TIMEOUT;
478 667
479 if (priv->cfg->mod_params->amsdu_size_8K) 668 if (iwlagn_mod_params.amsdu_size_8K)
480 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 669 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
481 else 670 else
482 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 671 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
@@ -518,6 +707,23 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
518 return 0; 707 return 0;
519} 708}
520 709
710static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
711{
712/*
713 * (for documentation purposes)
714 * to set power to V_AUX, do:
715
716 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
717 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
718 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
719 ~APMG_PS_CTRL_MSK_PWR_SRC);
720 */
721
722 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
723 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
724 ~APMG_PS_CTRL_MSK_PWR_SRC);
725}
726
521int iwlagn_hw_nic_init(struct iwl_priv *priv) 727int iwlagn_hw_nic_init(struct iwl_priv *priv)
522{ 728{
523 unsigned long flags; 729 unsigned long flags;
@@ -533,7 +739,7 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv)
533 739
534 spin_unlock_irqrestore(&priv->lock, flags); 740 spin_unlock_irqrestore(&priv->lock, flags);
535 741
536 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); 742 iwlagn_set_pwr_vmain(priv);
537 743
538 priv->cfg->ops->lib->apm_ops.config(priv); 744 priv->cfg->ops->lib->apm_ops.config(priv);
539 745
@@ -566,6 +772,12 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv)
566 } else 772 } else
567 iwlagn_txq_ctx_reset(priv); 773 iwlagn_txq_ctx_reset(priv);
568 774
775 if (priv->cfg->base_params->shadow_reg_enable) {
776 /* enable shadow regs in HW */
777 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
778 0x800FFFFF);
779 }
780
569 set_bit(STATUS_INIT, &priv->status); 781 set_bit(STATUS_INIT, &priv->status);
570 782
571 return 0; 783 return 0;
@@ -711,7 +923,6 @@ void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
711 923
712 list_add_tail(&rxb->list, &rxq->rx_free); 924 list_add_tail(&rxb->list, &rxq->rx_free);
713 rxq->free_count++; 925 rxq->free_count++;
714 priv->alloc_rxb_page++;
715 926
716 spin_unlock_irqrestore(&rxq->lock, flags); 927 spin_unlock_irqrestore(&rxq->lock, flags);
717 } 928 }
@@ -793,240 +1004,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
793 return -1; 1004 return -1;
794} 1005}
795 1006
796/* Calc max signal level (dBm) among 3 possible receivers */
797static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
798 struct iwl_rx_phy_res *rx_resp)
799{
800 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
801}
802
803static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
804{
805 u32 decrypt_out = 0;
806
807 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
808 RX_RES_STATUS_STATION_FOUND)
809 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
810 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
811
812 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
813
814 /* packet was not encrypted */
815 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
816 RX_RES_STATUS_SEC_TYPE_NONE)
817 return decrypt_out;
818
819 /* packet was encrypted with unknown alg */
820 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
821 RX_RES_STATUS_SEC_TYPE_ERR)
822 return decrypt_out;
823
824 /* decryption was not done in HW */
825 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
826 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
827 return decrypt_out;
828
829 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
830
831 case RX_RES_STATUS_SEC_TYPE_CCMP:
832 /* alg is CCM: check MIC only */
833 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
834 /* Bad MIC */
835 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
836 else
837 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
838
839 break;
840
841 case RX_RES_STATUS_SEC_TYPE_TKIP:
842 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
843 /* Bad TTAK */
844 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
845 break;
846 }
847 /* fall through if TTAK OK */
848 default:
849 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
850 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
851 else
852 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
853 break;
854 }
855
856 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
857 decrypt_in, decrypt_out);
858
859 return decrypt_out;
860}
861
862static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
863 struct ieee80211_hdr *hdr,
864 u16 len,
865 u32 ampdu_status,
866 struct iwl_rx_mem_buffer *rxb,
867 struct ieee80211_rx_status *stats)
868{
869 struct sk_buff *skb;
870 __le16 fc = hdr->frame_control;
871
872 /* We only process data packets if the interface is open */
873 if (unlikely(!priv->is_open)) {
874 IWL_DEBUG_DROP_LIMIT(priv,
875 "Dropping packet while interface is not open.\n");
876 return;
877 }
878
879 /* In case of HW accelerated crypto and bad decryption, drop */
880 if (!priv->cfg->mod_params->sw_crypto &&
881 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
882 return;
883
884 skb = dev_alloc_skb(128);
885 if (!skb) {
886 IWL_ERR(priv, "dev_alloc_skb failed\n");
887 return;
888 }
889
890 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
891
892 iwl_update_stats(priv, false, fc, len);
893 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
894
895 ieee80211_rx(priv->hw, skb);
896 priv->alloc_rxb_page--;
897 rxb->page = NULL;
898}
899
900/* Called for REPLY_RX (legacy ABG frames), or
901 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
902void iwlagn_rx_reply_rx(struct iwl_priv *priv,
903 struct iwl_rx_mem_buffer *rxb)
904{
905 struct ieee80211_hdr *header;
906 struct ieee80211_rx_status rx_status;
907 struct iwl_rx_packet *pkt = rxb_addr(rxb);
908 struct iwl_rx_phy_res *phy_res;
909 __le32 rx_pkt_status;
910 struct iwl_rx_mpdu_res_start *amsdu;
911 u32 len;
912 u32 ampdu_status;
913 u32 rate_n_flags;
914
915 /**
916 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
917 * REPLY_RX: physical layer info is in this buffer
918 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
919 * command and cached in priv->last_phy_res
920 *
921 * Here we set up local variables depending on which command is
922 * received.
923 */
924 if (pkt->hdr.cmd == REPLY_RX) {
925 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
926 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
927 + phy_res->cfg_phy_cnt);
928
929 len = le16_to_cpu(phy_res->byte_count);
930 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
931 phy_res->cfg_phy_cnt + len);
932 ampdu_status = le32_to_cpu(rx_pkt_status);
933 } else {
934 if (!priv->_agn.last_phy_res_valid) {
935 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
936 return;
937 }
938 phy_res = &priv->_agn.last_phy_res;
939 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
940 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
941 len = le16_to_cpu(amsdu->byte_count);
942 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
943 ampdu_status = iwlagn_translate_rx_status(priv,
944 le32_to_cpu(rx_pkt_status));
945 }
946
947 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
948 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
949 phy_res->cfg_phy_cnt);
950 return;
951 }
952
953 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
954 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
955 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
956 le32_to_cpu(rx_pkt_status));
957 return;
958 }
959
960 /* This will be used in several places later */
961 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
962
963 /* rx_status carries information about the packet to mac80211 */
964 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
965 rx_status.freq =
966 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
967 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
968 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
969 rx_status.rate_idx =
970 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
971 rx_status.flag = 0;
972
973 /* TSF isn't reliable. In order to allow smooth user experience,
974 * this W/A doesn't propagate it to the mac80211 */
975 /*rx_status.flag |= RX_FLAG_TSFT;*/
976
977 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
978
979 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
980 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
981
982 iwl_dbg_log_rx_data_frame(priv, len, header);
983 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
984 rx_status.signal, (unsigned long long)rx_status.mactime);
985
986 /*
987 * "antenna number"
988 *
989 * It seems that the antenna field in the phy flags value
990 * is actually a bit field. This is undefined by radiotap,
991 * it wants an actual antenna number but I always get "7"
992 * for most legacy frames I receive indicating that the
993 * same frame was received on all three RX chains.
994 *
995 * I think this field should be removed in favor of a
996 * new 802.11n radiotap field "RX chains" that is defined
997 * as a bitmask.
998 */
999 rx_status.antenna =
1000 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1001 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1002
1003 /* set the preamble flag if appropriate */
1004 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1005 rx_status.flag |= RX_FLAG_SHORTPRE;
1006
1007 /* Set up the HT phy flags */
1008 if (rate_n_flags & RATE_MCS_HT_MSK)
1009 rx_status.flag |= RX_FLAG_HT;
1010 if (rate_n_flags & RATE_MCS_HT40_MSK)
1011 rx_status.flag |= RX_FLAG_40MHZ;
1012 if (rate_n_flags & RATE_MCS_SGI_MSK)
1013 rx_status.flag |= RX_FLAG_SHORT_GI;
1014
1015 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1016 rxb, &rx_status);
1017}
1018
1019/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1020 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1021void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
1022 struct iwl_rx_mem_buffer *rxb)
1023{
1024 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1025 priv->_agn.last_phy_res_valid = true;
1026 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
1027 sizeof(struct iwl_rx_phy_res));
1028}
1029
1030static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, 1007static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
1031 struct ieee80211_vif *vif, 1008 struct ieee80211_vif *vif,
1032 enum ieee80211_band band, 1009 enum ieee80211_band band,
@@ -1098,7 +1075,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1098 if (chan->band != band) 1075 if (chan->band != band)
1099 continue; 1076 continue;
1100 1077
1101 channel = ieee80211_frequency_to_channel(chan->center_freq); 1078 channel = chan->hw_value;
1102 scan_ch->channel = cpu_to_le16(channel); 1079 scan_ch->channel = cpu_to_le16(channel);
1103 1080
1104 ch_info = iwl_get_channel_info(priv, band, channel); 1081 ch_info = iwl_get_channel_info(priv, band, channel);
@@ -1147,15 +1124,26 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1147 return added; 1124 return added;
1148} 1125}
1149 1126
1150void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) 1127static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
1128{
1129 struct sk_buff *skb = priv->_agn.offchan_tx_skb;
1130
1131 if (skb->len < maxlen)
1132 maxlen = skb->len;
1133
1134 memcpy(data, skb->data, maxlen);
1135
1136 return maxlen;
1137}
1138
1139int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1151{ 1140{
1152 struct iwl_host_cmd cmd = { 1141 struct iwl_host_cmd cmd = {
1153 .id = REPLY_SCAN_CMD, 1142 .id = REPLY_SCAN_CMD,
1154 .len = sizeof(struct iwl_scan_cmd), 1143 .len = { sizeof(struct iwl_scan_cmd), },
1155 .flags = CMD_SIZE_HUGE,
1156 }; 1144 };
1157 struct iwl_scan_cmd *scan; 1145 struct iwl_scan_cmd *scan;
1158 struct ieee80211_conf *conf = NULL; 1146 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1159 u32 rate_flags = 0; 1147 u32 rate_flags = 0;
1160 u16 cmd_len; 1148 u16 cmd_len;
1161 u16 rx_chain = 0; 1149 u16 rx_chain = 0;
@@ -1167,48 +1155,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1167 int chan_mod; 1155 int chan_mod;
1168 u8 active_chains; 1156 u8 active_chains;
1169 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; 1157 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
1158 int ret;
1170 1159
1171 conf = ieee80211_get_hw_conf(priv->hw); 1160 lockdep_assert_held(&priv->mutex);
1172
1173 cancel_delayed_work(&priv->scan_check);
1174
1175 if (!iwl_is_ready(priv)) {
1176 IWL_WARN(priv, "request scan called when driver not ready.\n");
1177 goto done;
1178 }
1179
1180 /* Make sure the scan wasn't canceled before this queued work
1181 * was given the chance to run... */
1182 if (!test_bit(STATUS_SCANNING, &priv->status))
1183 goto done;
1184
1185 /* This should never be called or scheduled if there is currently
1186 * a scan active in the hardware. */
1187 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1188 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
1189 "Ignoring second request.\n");
1190 goto done;
1191 }
1192
1193 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1194 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
1195 goto done;
1196 }
1197
1198 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1199 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
1200 goto done;
1201 }
1202
1203 if (iwl_is_rfkill(priv)) {
1204 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
1205 goto done;
1206 }
1207 1161
1208 if (!test_bit(STATUS_READY, &priv->status)) { 1162 if (vif)
1209 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n"); 1163 ctx = iwl_rxon_ctx_from_vif(vif);
1210 goto done;
1211 }
1212 1164
1213 if (!priv->scan_cmd) { 1165 if (!priv->scan_cmd) {
1214 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + 1166 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
@@ -1216,7 +1168,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1216 if (!priv->scan_cmd) { 1168 if (!priv->scan_cmd) {
1217 IWL_DEBUG_SCAN(priv, 1169 IWL_DEBUG_SCAN(priv,
1218 "fail to allocate memory for scan\n"); 1170 "fail to allocate memory for scan\n");
1219 goto done; 1171 return -ENOMEM;
1220 } 1172 }
1221 } 1173 }
1222 scan = priv->scan_cmd; 1174 scan = priv->scan_cmd;
@@ -1225,20 +1177,25 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1225 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 1177 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1226 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 1178 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1227 1179
1228 if (iwl_is_associated(priv)) { 1180 if (priv->scan_type != IWL_SCAN_OFFCH_TX &&
1181 iwl_is_any_associated(priv)) {
1229 u16 interval = 0; 1182 u16 interval = 0;
1230 u32 extra; 1183 u32 extra;
1231 u32 suspend_time = 100; 1184 u32 suspend_time = 100;
1232 u32 scan_suspend_time = 100; 1185 u32 scan_suspend_time = 100;
1233 unsigned long flags;
1234 1186
1235 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 1187 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1236 spin_lock_irqsave(&priv->lock, flags); 1188 switch (priv->scan_type) {
1237 if (priv->is_internal_short_scan) 1189 case IWL_SCAN_OFFCH_TX:
1190 WARN_ON(1);
1191 break;
1192 case IWL_SCAN_RADIO_RESET:
1238 interval = 0; 1193 interval = 0;
1239 else 1194 break;
1195 case IWL_SCAN_NORMAL:
1240 interval = vif->bss_conf.beacon_int; 1196 interval = vif->bss_conf.beacon_int;
1241 spin_unlock_irqrestore(&priv->lock, flags); 1197 break;
1198 }
1242 1199
1243 scan->suspend_time = 0; 1200 scan->suspend_time = 0;
1244 scan->max_out_time = cpu_to_le32(200 * 1024); 1201 scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1251,38 +1208,52 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1251 scan->suspend_time = cpu_to_le32(scan_suspend_time); 1208 scan->suspend_time = cpu_to_le32(scan_suspend_time);
1252 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", 1209 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
1253 scan_suspend_time, interval); 1210 scan_suspend_time, interval);
1211 } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
1212 scan->suspend_time = 0;
1213 scan->max_out_time =
1214 cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
1254 } 1215 }
1255 1216
1256 if (priv->is_internal_short_scan) { 1217 switch (priv->scan_type) {
1218 case IWL_SCAN_RADIO_RESET:
1257 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); 1219 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
1258 } else if (priv->scan_request->n_ssids) { 1220 break;
1259 int i, p = 0; 1221 case IWL_SCAN_NORMAL:
1260 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); 1222 if (priv->scan_request->n_ssids) {
1261 for (i = 0; i < priv->scan_request->n_ssids; i++) { 1223 int i, p = 0;
1262 /* always does wildcard anyway */ 1224 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
1263 if (!priv->scan_request->ssids[i].ssid_len) 1225 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1264 continue; 1226 /* always does wildcard anyway */
1265 scan->direct_scan[p].id = WLAN_EID_SSID; 1227 if (!priv->scan_request->ssids[i].ssid_len)
1266 scan->direct_scan[p].len = 1228 continue;
1267 priv->scan_request->ssids[i].ssid_len; 1229 scan->direct_scan[p].id = WLAN_EID_SSID;
1268 memcpy(scan->direct_scan[p].ssid, 1230 scan->direct_scan[p].len =
1269 priv->scan_request->ssids[i].ssid, 1231 priv->scan_request->ssids[i].ssid_len;
1270 priv->scan_request->ssids[i].ssid_len); 1232 memcpy(scan->direct_scan[p].ssid,
1271 n_probes++; 1233 priv->scan_request->ssids[i].ssid,
1272 p++; 1234 priv->scan_request->ssids[i].ssid_len);
1273 } 1235 n_probes++;
1274 is_active = true; 1236 p++;
1275 } else 1237 }
1276 IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); 1238 is_active = true;
1239 } else
1240 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1241 break;
1242 case IWL_SCAN_OFFCH_TX:
1243 IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n");
1244 break;
1245 }
1277 1246
1278 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 1247 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1279 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; 1248 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
1280 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 1249 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1281 1250
1282 switch (priv->scan_band) { 1251 switch (priv->scan_band) {
1283 case IEEE80211_BAND_2GHZ: 1252 case IEEE80211_BAND_2GHZ:
1284 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 1253 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1285 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK) 1254 chan_mod = le32_to_cpu(
1255 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
1256 RXON_FLG_CHANNEL_MODE_MSK)
1286 >> RXON_FLG_CHANNEL_MODE_POS; 1257 >> RXON_FLG_CHANNEL_MODE_POS;
1287 if (chan_mod == CHANNEL_MODE_PURE_40) { 1258 if (chan_mod == CHANNEL_MODE_PURE_40) {
1288 rate = IWL_RATE_6M_PLCP; 1259 rate = IWL_RATE_6M_PLCP;
@@ -1290,42 +1261,61 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1290 rate = IWL_RATE_1M_PLCP; 1261 rate = IWL_RATE_1M_PLCP;
1291 rate_flags = RATE_MCS_CCK_MSK; 1262 rate_flags = RATE_MCS_CCK_MSK;
1292 } 1263 }
1293 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED; 1264 /*
1265 * Internal scans are passive, so we can indiscriminately set
1266 * the BT ignore flag on 2.4 GHz since it applies to TX only.
1267 */
1268 if (priv->cfg->bt_params &&
1269 priv->cfg->bt_params->advanced_bt_coexist)
1270 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
1294 break; 1271 break;
1295 case IEEE80211_BAND_5GHZ: 1272 case IEEE80211_BAND_5GHZ:
1296 rate = IWL_RATE_6M_PLCP; 1273 rate = IWL_RATE_6M_PLCP;
1297 /*
1298 * If active scanning is requested but a certain channel is
1299 * marked passive, we can do active scanning if we detect
1300 * transmissions.
1301 *
1302 * There is an issue with some firmware versions that triggers
1303 * a sysassert on a "good CRC threshold" of zero (== disabled),
1304 * on a radar channel even though this means that we should NOT
1305 * send probes.
1306 *
1307 * The "good CRC threshold" is the number of frames that we
1308 * need to receive during our dwell time on a channel before
1309 * sending out probes -- setting this to a huge value will
1310 * mean we never reach it, but at the same time work around
1311 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1312 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1313 */
1314 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1315 IWL_GOOD_CRC_TH_NEVER;
1316 break; 1274 break;
1317 default: 1275 default:
1318 IWL_WARN(priv, "Invalid scan band count\n"); 1276 IWL_WARN(priv, "Invalid scan band\n");
1319 goto done; 1277 return -EIO;
1320 } 1278 }
1321 1279
1280 /*
1281 * If active scanning is requested but a certain channel is
1282 * marked passive, we can do active scanning if we detect
1283 * transmissions.
1284 *
1285 * There is an issue with some firmware versions that triggers
1286 * a sysassert on a "good CRC threshold" of zero (== disabled),
1287 * on a radar channel even though this means that we should NOT
1288 * send probes.
1289 *
1290 * The "good CRC threshold" is the number of frames that we
1291 * need to receive during our dwell time on a channel before
1292 * sending out probes -- setting this to a huge value will
1293 * mean we never reach it, but at the same time work around
1294 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1295 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1296 *
1297 * This was fixed in later versions along with some other
1298 * scan changes, and the threshold behaves as a flag in those
1299 * versions.
1300 */
1301 if (priv->new_scan_threshold_behaviour)
1302 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1303 IWL_GOOD_CRC_TH_DISABLED;
1304 else
1305 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1306 IWL_GOOD_CRC_TH_NEVER;
1307
1322 band = priv->scan_band; 1308 band = priv->scan_band;
1323 1309
1324 if (priv->cfg->scan_rx_antennas[band]) 1310 if (priv->cfg->scan_rx_antennas[band])
1325 rx_ant = priv->cfg->scan_rx_antennas[band]; 1311 rx_ant = priv->cfg->scan_rx_antennas[band];
1326 1312
1327 if (priv->cfg->scan_tx_antennas[band]) 1313 if (band == IEEE80211_BAND_2GHZ &&
1328 scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; 1314 priv->cfg->bt_params &&
1315 priv->cfg->bt_params->advanced_bt_coexist) {
1316 /* transmit 2.4 GHz probes only on first antenna */
1317 scan_tx_antennas = first_antenna(scan_tx_antennas);
1318 }
1329 1319
1330 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band], 1320 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
1331 scan_tx_antennas); 1321 scan_tx_antennas);
@@ -1345,73 +1335,118 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1345 1335
1346 rx_ant = first_antenna(active_chains); 1336 rx_ant = first_antenna(active_chains);
1347 } 1337 }
1338 if (priv->cfg->bt_params &&
1339 priv->cfg->bt_params->advanced_bt_coexist &&
1340 priv->bt_full_concurrent) {
1341 /* operated as 1x1 in full concurrency mode */
1342 rx_ant = first_antenna(rx_ant);
1343 }
1344
1348 /* MIMO is not used here, but value is required */ 1345 /* MIMO is not used here, but value is required */
1349 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 1346 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1350 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 1347 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1351 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 1348 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1352 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 1349 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1353 scan->rx_chain = cpu_to_le16(rx_chain); 1350 scan->rx_chain = cpu_to_le16(rx_chain);
1354 if (!priv->is_internal_short_scan) { 1351 switch (priv->scan_type) {
1352 case IWL_SCAN_NORMAL:
1355 cmd_len = iwl_fill_probe_req(priv, 1353 cmd_len = iwl_fill_probe_req(priv,
1356 (struct ieee80211_mgmt *)scan->data, 1354 (struct ieee80211_mgmt *)scan->data,
1357 vif->addr, 1355 vif->addr,
1358 priv->scan_request->ie, 1356 priv->scan_request->ie,
1359 priv->scan_request->ie_len, 1357 priv->scan_request->ie_len,
1360 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 1358 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1361 } else { 1359 break;
1360 case IWL_SCAN_RADIO_RESET:
1362 /* use bcast addr, will not be transmitted but must be valid */ 1361 /* use bcast addr, will not be transmitted but must be valid */
1363 cmd_len = iwl_fill_probe_req(priv, 1362 cmd_len = iwl_fill_probe_req(priv,
1364 (struct ieee80211_mgmt *)scan->data, 1363 (struct ieee80211_mgmt *)scan->data,
1365 iwl_bcast_addr, NULL, 0, 1364 iwl_bcast_addr, NULL, 0,
1366 IWL_MAX_SCAN_SIZE - sizeof(*scan)); 1365 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1367 1366 break;
1367 case IWL_SCAN_OFFCH_TX:
1368 cmd_len = iwl_fill_offch_tx(priv, scan->data,
1369 IWL_MAX_SCAN_SIZE
1370 - sizeof(*scan)
1371 - sizeof(struct iwl_scan_channel));
1372 scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX;
1373 break;
1374 default:
1375 BUG();
1368 } 1376 }
1369 scan->tx_cmd.len = cpu_to_le16(cmd_len); 1377 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1370 1378
1371 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | 1379 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1372 RXON_FILTER_BCON_AWARE_MSK); 1380 RXON_FILTER_BCON_AWARE_MSK);
1373 1381
1374 if (priv->is_internal_short_scan) { 1382 switch (priv->scan_type) {
1383 case IWL_SCAN_RADIO_RESET:
1375 scan->channel_count = 1384 scan->channel_count =
1376 iwl_get_single_channel_for_scan(priv, vif, band, 1385 iwl_get_single_channel_for_scan(priv, vif, band,
1377 (void *)&scan->data[le16_to_cpu( 1386 (void *)&scan->data[cmd_len]);
1378 scan->tx_cmd.len)]); 1387 break;
1379 } else { 1388 case IWL_SCAN_NORMAL:
1380 scan->channel_count = 1389 scan->channel_count =
1381 iwl_get_channels_for_scan(priv, vif, band, 1390 iwl_get_channels_for_scan(priv, vif, band,
1382 is_active, n_probes, 1391 is_active, n_probes,
1383 (void *)&scan->data[le16_to_cpu( 1392 (void *)&scan->data[cmd_len]);
1384 scan->tx_cmd.len)]); 1393 break;
1394 case IWL_SCAN_OFFCH_TX: {
1395 struct iwl_scan_channel *scan_ch;
1396
1397 scan->channel_count = 1;
1398
1399 scan_ch = (void *)&scan->data[cmd_len];
1400 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1401 scan_ch->channel =
1402 cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
1403 scan_ch->active_dwell =
1404 cpu_to_le16(priv->_agn.offchan_tx_timeout);
1405 scan_ch->passive_dwell = 0;
1406
1407 /* Set txpower levels to defaults */
1408 scan_ch->dsp_atten = 110;
1409
1410 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1411 * power level:
1412 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1413 */
1414 if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
1415 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1416 else
1417 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1418 }
1419 break;
1385 } 1420 }
1421
1386 if (scan->channel_count == 0) { 1422 if (scan->channel_count == 0) {
1387 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 1423 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1388 goto done; 1424 return -EIO;
1389 } 1425 }
1390 1426
1391 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 1427 cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
1392 scan->channel_count * sizeof(struct iwl_scan_channel); 1428 scan->channel_count * sizeof(struct iwl_scan_channel);
1393 cmd.data = scan; 1429 cmd.data[0] = scan;
1394 scan->len = cpu_to_le16(cmd.len); 1430 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1431 scan->len = cpu_to_le16(cmd.len[0]);
1395 1432
1433 /* set scan bit here for PAN params */
1396 set_bit(STATUS_SCAN_HW, &priv->status); 1434 set_bit(STATUS_SCAN_HW, &priv->status);
1397 if (iwl_send_cmd_sync(priv, &cmd))
1398 goto done;
1399 1435
1400 queue_delayed_work(priv->workqueue, &priv->scan_check, 1436 if (priv->cfg->ops->hcmd->set_pan_params) {
1401 IWL_SCAN_CHECK_WATCHDOG); 1437 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
1402 1438 if (ret)
1403 return; 1439 return ret;
1404 1440 }
1405 done: 1441
1406 /* Cannot perform scan. Make sure we clear scanning 1442 ret = iwl_send_cmd_sync(priv, &cmd);
1407 * bits from status so next scan request can be performed. 1443 if (ret) {
1408 * If we don't clear scanning status bit here all next scan 1444 clear_bit(STATUS_SCAN_HW, &priv->status);
1409 * will fail 1445 if (priv->cfg->ops->hcmd->set_pan_params)
1410 */ 1446 priv->cfg->ops->hcmd->set_pan_params(priv);
1411 clear_bit(STATUS_SCAN_HW, &priv->status); 1447 }
1412 clear_bit(STATUS_SCANNING, &priv->status); 1448
1413 /* inform mac80211 scan aborted */ 1449 return ret;
1414 queue_work(priv->workqueue, &priv->abort_scan);
1415} 1450}
1416 1451
1417int iwlagn_manage_ibss_station(struct iwl_priv *priv, 1452int iwlagn_manage_ibss_station(struct iwl_priv *priv,
@@ -1420,8 +1455,9 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1420 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1455 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1421 1456
1422 if (add) 1457 if (add)
1423 return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true, 1458 return iwlagn_add_bssid_station(priv, vif_priv->ctx,
1424 &vif_priv->ibss_bssid_sta_id); 1459 vif->bss_conf.bssid,
1460 &vif_priv->ibss_bssid_sta_id);
1425 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 1461 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1426 vif->bss_conf.bssid); 1462 vif->bss_conf.bssid);
1427} 1463}
@@ -1453,7 +1489,7 @@ int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1453 1489
1454 /* waiting for all the tx frames complete might take a while */ 1490 /* waiting for all the tx frames complete might take a while */
1455 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { 1491 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1456 if (cnt == IWL_CMD_QUEUE_NUM) 1492 if (cnt == priv->cmd_queue)
1457 continue; 1493 continue;
1458 txq = &priv->txq[cnt]; 1494 txq = &priv->txq[cnt];
1459 q = &txq->q; 1495 q = &txq->q;
@@ -1484,9 +1520,9 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1484 struct iwl_txfifo_flush_cmd flush_cmd; 1520 struct iwl_txfifo_flush_cmd flush_cmd;
1485 struct iwl_host_cmd cmd = { 1521 struct iwl_host_cmd cmd = {
1486 .id = REPLY_TXFIFO_FLUSH, 1522 .id = REPLY_TXFIFO_FLUSH,
1487 .len = sizeof(struct iwl_txfifo_flush_cmd), 1523 .len = { sizeof(struct iwl_txfifo_flush_cmd), },
1488 .flags = CMD_SYNC, 1524 .flags = CMD_SYNC,
1489 .data = &flush_cmd, 1525 .data = { &flush_cmd, },
1490 }; 1526 };
1491 1527
1492 might_sleep(); 1528 might_sleep();
@@ -1518,3 +1554,842 @@ done:
1518 ieee80211_wake_queues(priv->hw); 1554 ieee80211_wake_queues(priv->hw);
1519 mutex_unlock(&priv->mutex); 1555 mutex_unlock(&priv->mutex);
1520} 1556}
1557
1558/*
1559 * BT coex
1560 */
1561/*
1562 * Macros to access the lookup table.
1563 *
1564 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
1565* wifi_prio, wifi_txrx and wifi_sh_ant_req.
1566 *
1567 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
1568 *
1569 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
1570 * one after another in 32-bit registers, and "registers" 0 through 7 contain
1571 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
1572 *
1573 * These macros encode that format.
1574 */
1575#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
1576 wifi_txrx, wifi_sh_ant_req) \
1577 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
1578 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
1579
1580#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
1581 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
1582#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1583 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1584 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
1585 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1586 wifi_sh_ant_req))))
1587#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1588 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1589 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
1590 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1591 wifi_sh_ant_req))
1592#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
1593 wifi_req, wifi_prio, wifi_txrx, \
1594 wifi_sh_ant_req) \
1595 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
1596 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1597 wifi_sh_ant_req))
1598
1599#define LUT_WLAN_KILL_OP(lut, op, val) \
1600 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
1601#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1602 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1603 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1604 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
1605#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1606 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1607 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1608 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1609#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1610 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1611 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1612 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1613
1614#define LUT_ANT_SWITCH_OP(lut, op, val) \
1615 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
1616#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1617 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1618 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1619 wifi_req, wifi_prio, wifi_txrx, \
1620 wifi_sh_ant_req))))
1621#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1622 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1623 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1624 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1625#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1626 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1627 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1628 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1629
1630static const __le32 iwlagn_def_3w_lookup[12] = {
1631 cpu_to_le32(0xaaaaaaaa),
1632 cpu_to_le32(0xaaaaaaaa),
1633 cpu_to_le32(0xaeaaaaaa),
1634 cpu_to_le32(0xaaaaaaaa),
1635 cpu_to_le32(0xcc00ff28),
1636 cpu_to_le32(0x0000aaaa),
1637 cpu_to_le32(0xcc00aaaa),
1638 cpu_to_le32(0x0000aaaa),
1639 cpu_to_le32(0xc0004000),
1640 cpu_to_le32(0x00004000),
1641 cpu_to_le32(0xf0005000),
1642 cpu_to_le32(0xf0005000),
1643};
1644
1645static const __le32 iwlagn_concurrent_lookup[12] = {
1646 cpu_to_le32(0xaaaaaaaa),
1647 cpu_to_le32(0xaaaaaaaa),
1648 cpu_to_le32(0xaaaaaaaa),
1649 cpu_to_le32(0xaaaaaaaa),
1650 cpu_to_le32(0xaaaaaaaa),
1651 cpu_to_le32(0xaaaaaaaa),
1652 cpu_to_le32(0xaaaaaaaa),
1653 cpu_to_le32(0xaaaaaaaa),
1654 cpu_to_le32(0x00000000),
1655 cpu_to_le32(0x00000000),
1656 cpu_to_le32(0x00000000),
1657 cpu_to_le32(0x00000000),
1658};
1659
1660void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1661{
1662 struct iwl_basic_bt_cmd basic = {
1663 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
1664 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
1665 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
1666 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
1667 };
1668 struct iwl6000_bt_cmd bt_cmd_6000;
1669 struct iwl2000_bt_cmd bt_cmd_2000;
1670 int ret;
1671
1672 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1673 sizeof(basic.bt3_lookup_table));
1674
1675 if (priv->cfg->bt_params) {
1676 if (priv->cfg->bt_params->bt_session_2) {
1677 bt_cmd_2000.prio_boost = cpu_to_le32(
1678 priv->cfg->bt_params->bt_prio_boost);
1679 bt_cmd_2000.tx_prio_boost = 0;
1680 bt_cmd_2000.rx_prio_boost = 0;
1681 } else {
1682 bt_cmd_6000.prio_boost =
1683 priv->cfg->bt_params->bt_prio_boost;
1684 bt_cmd_6000.tx_prio_boost = 0;
1685 bt_cmd_6000.rx_prio_boost = 0;
1686 }
1687 } else {
1688 IWL_ERR(priv, "failed to construct BT Coex Config\n");
1689 return;
1690 }
1691
1692 basic.kill_ack_mask = priv->kill_ack_mask;
1693 basic.kill_cts_mask = priv->kill_cts_mask;
1694 basic.valid = priv->bt_valid;
1695
1696 /*
1697 * Configure BT coex mode to "no coexistence" when the
1698 * user disabled BT coexistence, we have no interface
1699 * (might be in monitor mode), or the interface is in
1700 * IBSS mode (no proper uCode support for coex then).
1701 */
1702 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1703 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
1704 } else {
1705 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1706 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1707 if (priv->cfg->bt_params &&
1708 priv->cfg->bt_params->bt_sco_disable)
1709 basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1710
1711 if (priv->bt_ch_announce)
1712 basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1713 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
1714 }
1715 priv->bt_enable_flag = basic.flags;
1716 if (priv->bt_full_concurrent)
1717 memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
1718 sizeof(iwlagn_concurrent_lookup));
1719 else
1720 memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
1721 sizeof(iwlagn_def_3w_lookup));
1722
1723 IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
1724 basic.flags ? "active" : "disabled",
1725 priv->bt_full_concurrent ?
1726 "full concurrency" : "3-wire");
1727
1728 if (priv->cfg->bt_params->bt_session_2) {
1729 memcpy(&bt_cmd_2000.basic, &basic,
1730 sizeof(basic));
1731 ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1732 sizeof(bt_cmd_2000), &bt_cmd_2000);
1733 } else {
1734 memcpy(&bt_cmd_6000.basic, &basic,
1735 sizeof(basic));
1736 ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1737 sizeof(bt_cmd_6000), &bt_cmd_6000);
1738 }
1739 if (ret)
1740 IWL_ERR(priv, "failed to send BT Coex Config\n");
1741
1742}
1743
1744static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1745{
1746 struct iwl_priv *priv =
1747 container_of(work, struct iwl_priv, bt_traffic_change_work);
1748 struct iwl_rxon_context *ctx;
1749 int smps_request = -1;
1750
1751 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1752 /* bt coex disabled */
1753 return;
1754 }
1755
1756 /*
1757 * Note: bt_traffic_load can be overridden by scan complete and
1758 * coex profile notifications. Ignore that since only bad consequence
1759 * can be not matching debug print with actual state.
1760 */
1761 IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
1762 priv->bt_traffic_load);
1763
1764 switch (priv->bt_traffic_load) {
1765 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1766 if (priv->bt_status)
1767 smps_request = IEEE80211_SMPS_DYNAMIC;
1768 else
1769 smps_request = IEEE80211_SMPS_AUTOMATIC;
1770 break;
1771 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1772 smps_request = IEEE80211_SMPS_DYNAMIC;
1773 break;
1774 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1775 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1776 smps_request = IEEE80211_SMPS_STATIC;
1777 break;
1778 default:
1779 IWL_ERR(priv, "Invalid BT traffic load: %d\n",
1780 priv->bt_traffic_load);
1781 break;
1782 }
1783
1784 mutex_lock(&priv->mutex);
1785
1786 /*
1787 * We can not send command to firmware while scanning. When the scan
1788 * complete we will schedule this work again. We do check with mutex
1789 * locked to prevent new scan request to arrive. We do not check
1790 * STATUS_SCANNING to avoid race when queue_work two times from
1791 * different notifications, but quit and not perform any work at all.
1792 */
1793 if (test_bit(STATUS_SCAN_HW, &priv->status))
1794 goto out;
1795
1796 if (priv->cfg->ops->lib->update_chain_flags)
1797 priv->cfg->ops->lib->update_chain_flags(priv);
1798
1799 if (smps_request != -1) {
1800 for_each_context(priv, ctx) {
1801 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
1802 ieee80211_request_smps(ctx->vif, smps_request);
1803 }
1804 }
1805out:
1806 mutex_unlock(&priv->mutex);
1807}
1808
1809static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1810 struct iwl_bt_uart_msg *uart_msg)
1811{
1812 IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, "
1813 "Update Req = 0x%X",
1814 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
1815 BT_UART_MSG_FRAME1MSGTYPE_POS,
1816 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
1817 BT_UART_MSG_FRAME1SSN_POS,
1818 (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
1819 BT_UART_MSG_FRAME1UPDATEREQ_POS);
1820
1821 IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
1822 "Chl_SeqN = 0x%X, In band = 0x%X",
1823 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
1824 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
1825 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
1826 BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
1827 (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
1828 BT_UART_MSG_FRAME2CHLSEQN_POS,
1829 (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
1830 BT_UART_MSG_FRAME2INBAND_POS);
1831
1832 IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
1833 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
1834 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
1835 BT_UART_MSG_FRAME3SCOESCO_POS,
1836 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
1837 BT_UART_MSG_FRAME3SNIFF_POS,
1838 (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
1839 BT_UART_MSG_FRAME3A2DP_POS,
1840 (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
1841 BT_UART_MSG_FRAME3ACL_POS,
1842 (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
1843 BT_UART_MSG_FRAME3MASTER_POS,
1844 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
1845 BT_UART_MSG_FRAME3OBEX_POS);
1846
1847 IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X",
1848 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
1849 BT_UART_MSG_FRAME4IDLEDURATION_POS);
1850
1851 IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
1852 "eSCO Retransmissions = 0x%X",
1853 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
1854 BT_UART_MSG_FRAME5TXACTIVITY_POS,
1855 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
1856 BT_UART_MSG_FRAME5RXACTIVITY_POS,
1857 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
1858 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
1859
1860 IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
1861 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
1862 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
1863 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
1864 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
1865
1866 IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
1867 "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
1868 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
1869 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1870 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
1871 BT_UART_MSG_FRAME7PAGE_POS,
1872 (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
1873 BT_UART_MSG_FRAME7INQUIRY_POS,
1874 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
1875 BT_UART_MSG_FRAME7CONNECTABLE_POS);
1876}
1877
1878static void iwlagn_set_kill_msk(struct iwl_priv *priv,
1879 struct iwl_bt_uart_msg *uart_msg)
1880{
1881 u8 kill_msk;
1882 static const __le32 bt_kill_ack_msg[2] = {
1883 IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
1884 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
1885 static const __le32 bt_kill_cts_msg[2] = {
1886 IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
1887 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
1888
1889 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
1890 ? 1 : 0;
1891 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
1892 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
1893 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
1894 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
1895 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
1896 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
1897
1898 /* schedule to send runtime bt_config */
1899 queue_work(priv->workqueue, &priv->bt_runtime_config);
1900 }
1901}
1902
1903void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
1904 struct iwl_rx_mem_buffer *rxb)
1905{
1906 unsigned long flags;
1907 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1908 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
1909 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
1910
1911 if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
1912 /* bt coex disabled */
1913 return;
1914 }
1915
1916 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
1917 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
1918 IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
1919 IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n",
1920 coex->bt_ci_compliance);
1921 iwlagn_print_uartmsg(priv, uart_msg);
1922
1923 priv->last_bt_traffic_load = priv->bt_traffic_load;
1924 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
1925 if (priv->bt_status != coex->bt_status ||
1926 priv->last_bt_traffic_load != coex->bt_traffic_load) {
1927 if (coex->bt_status) {
1928 /* BT on */
1929 if (!priv->bt_ch_announce)
1930 priv->bt_traffic_load =
1931 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1932 else
1933 priv->bt_traffic_load =
1934 coex->bt_traffic_load;
1935 } else {
1936 /* BT off */
1937 priv->bt_traffic_load =
1938 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
1939 }
1940 priv->bt_status = coex->bt_status;
1941 queue_work(priv->workqueue,
1942 &priv->bt_traffic_change_work);
1943 }
1944 }
1945
1946 iwlagn_set_kill_msk(priv, uart_msg);
1947
1948 /* FIXME: based on notification, adjust the prio_boost */
1949
1950 spin_lock_irqsave(&priv->lock, flags);
1951 priv->bt_ci_compliance = coex->bt_ci_compliance;
1952 spin_unlock_irqrestore(&priv->lock, flags);
1953}
1954
1955void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
1956{
1957 iwlagn_rx_handler_setup(priv);
1958 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
1959 iwlagn_bt_coex_profile_notif;
1960}
1961
1962void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
1963{
1964 iwlagn_setup_deferred_work(priv);
1965
1966 INIT_WORK(&priv->bt_traffic_change_work,
1967 iwlagn_bt_traffic_change_work);
1968}
1969
1970void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
1971{
1972 cancel_work_sync(&priv->bt_traffic_change_work);
1973}
1974
1975static bool is_single_rx_stream(struct iwl_priv *priv)
1976{
1977 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1978 priv->current_ht_config.single_chain_sufficient;
1979}
1980
1981#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1982#define IWL_NUM_RX_CHAINS_SINGLE 2
1983#define IWL_NUM_IDLE_CHAINS_DUAL 2
1984#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1985
1986/*
1987 * Determine how many receiver/antenna chains to use.
1988 *
1989 * More provides better reception via diversity. Fewer saves power
1990 * at the expense of throughput, but only when not in powersave to
1991 * start with.
1992 *
1993 * MIMO (dual stream) requires at least 2, but works better with 3.
1994 * This does not determine *which* chains to use, just how many.
1995 */
1996static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
1997{
1998 if (priv->cfg->bt_params &&
1999 priv->cfg->bt_params->advanced_bt_coexist &&
2000 (priv->bt_full_concurrent ||
2001 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
2002 /*
2003 * only use chain 'A' in bt high traffic load or
2004 * full concurrency mode
2005 */
2006 return IWL_NUM_RX_CHAINS_SINGLE;
2007 }
2008 /* # of Rx chains to use when expecting MIMO. */
2009 if (is_single_rx_stream(priv))
2010 return IWL_NUM_RX_CHAINS_SINGLE;
2011 else
2012 return IWL_NUM_RX_CHAINS_MULTIPLE;
2013}
2014
2015/*
2016 * When we are in power saving mode, unless device support spatial
2017 * multiplexing power save, use the active count for rx chain count.
2018 */
2019static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
2020{
2021 /* # Rx chains when idling, depending on SMPS mode */
2022 switch (priv->current_ht_config.smps) {
2023 case IEEE80211_SMPS_STATIC:
2024 case IEEE80211_SMPS_DYNAMIC:
2025 return IWL_NUM_IDLE_CHAINS_SINGLE;
2026 case IEEE80211_SMPS_OFF:
2027 return active_cnt;
2028 default:
2029 WARN(1, "invalid SMPS mode %d",
2030 priv->current_ht_config.smps);
2031 return active_cnt;
2032 }
2033}
2034
2035/* up to 4 chains */
2036static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
2037{
2038 u8 res;
2039 res = (chain_bitmap & BIT(0)) >> 0;
2040 res += (chain_bitmap & BIT(1)) >> 1;
2041 res += (chain_bitmap & BIT(2)) >> 2;
2042 res += (chain_bitmap & BIT(3)) >> 3;
2043 return res;
2044}
2045
2046/**
2047 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2048 *
2049 * Selects how many and which Rx receivers/antennas/chains to use.
2050 * This should not be used for scan command ... it puts data in wrong place.
2051 */
2052void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2053{
2054 bool is_single = is_single_rx_stream(priv);
2055 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
2056 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
2057 u32 active_chains;
2058 u16 rx_chain;
2059
2060 /* Tell uCode which antennas are actually connected.
2061 * Before first association, we assume all antennas are connected.
2062 * Just after first association, iwl_chain_noise_calibration()
2063 * checks which antennas actually *are* connected. */
2064 if (priv->chain_noise_data.active_chains)
2065 active_chains = priv->chain_noise_data.active_chains;
2066 else
2067 active_chains = priv->hw_params.valid_rx_ant;
2068
2069 if (priv->cfg->bt_params &&
2070 priv->cfg->bt_params->advanced_bt_coexist &&
2071 (priv->bt_full_concurrent ||
2072 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
2073 /*
2074 * only use chain 'A' in bt high traffic load or
2075 * full concurrency mode
2076 */
2077 active_chains = first_antenna(active_chains);
2078 }
2079
2080 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
2081
2082 /* How many receivers should we use? */
2083 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
2084 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
2085
2086
2087 /* correct rx chain count according hw settings
2088 * and chain noise calibration
2089 */
2090 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
2091 if (valid_rx_cnt < active_rx_cnt)
2092 active_rx_cnt = valid_rx_cnt;
2093
2094 if (valid_rx_cnt < idle_rx_cnt)
2095 idle_rx_cnt = valid_rx_cnt;
2096
2097 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
2098 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
2099
2100 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
2101
2102 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
2103 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
2104 else
2105 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
2106
2107 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
2108 ctx->staging.rx_chain,
2109 active_rx_cnt, idle_rx_cnt);
2110
2111 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
2112 active_rx_cnt < idle_rx_cnt);
2113}
2114
2115u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
2116{
2117 int i;
2118 u8 ind = ant;
2119
2120 if (priv->band == IEEE80211_BAND_2GHZ &&
2121 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
2122 return 0;
2123
2124 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
2125 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
2126 if (valid & BIT(ind))
2127 return ind;
2128 }
2129 return ant;
2130}
2131
2132static const char *get_csr_string(int cmd)
2133{
2134 switch (cmd) {
2135 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2136 IWL_CMD(CSR_INT_COALESCING);
2137 IWL_CMD(CSR_INT);
2138 IWL_CMD(CSR_INT_MASK);
2139 IWL_CMD(CSR_FH_INT_STATUS);
2140 IWL_CMD(CSR_GPIO_IN);
2141 IWL_CMD(CSR_RESET);
2142 IWL_CMD(CSR_GP_CNTRL);
2143 IWL_CMD(CSR_HW_REV);
2144 IWL_CMD(CSR_EEPROM_REG);
2145 IWL_CMD(CSR_EEPROM_GP);
2146 IWL_CMD(CSR_OTP_GP_REG);
2147 IWL_CMD(CSR_GIO_REG);
2148 IWL_CMD(CSR_GP_UCODE_REG);
2149 IWL_CMD(CSR_GP_DRIVER_REG);
2150 IWL_CMD(CSR_UCODE_DRV_GP1);
2151 IWL_CMD(CSR_UCODE_DRV_GP2);
2152 IWL_CMD(CSR_LED_REG);
2153 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2154 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2155 IWL_CMD(CSR_ANA_PLL_CFG);
2156 IWL_CMD(CSR_HW_REV_WA_REG);
2157 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2158 default:
2159 return "UNKNOWN";
2160 }
2161}
2162
2163void iwl_dump_csr(struct iwl_priv *priv)
2164{
2165 int i;
2166 static const u32 csr_tbl[] = {
2167 CSR_HW_IF_CONFIG_REG,
2168 CSR_INT_COALESCING,
2169 CSR_INT,
2170 CSR_INT_MASK,
2171 CSR_FH_INT_STATUS,
2172 CSR_GPIO_IN,
2173 CSR_RESET,
2174 CSR_GP_CNTRL,
2175 CSR_HW_REV,
2176 CSR_EEPROM_REG,
2177 CSR_EEPROM_GP,
2178 CSR_OTP_GP_REG,
2179 CSR_GIO_REG,
2180 CSR_GP_UCODE_REG,
2181 CSR_GP_DRIVER_REG,
2182 CSR_UCODE_DRV_GP1,
2183 CSR_UCODE_DRV_GP2,
2184 CSR_LED_REG,
2185 CSR_DRAM_INT_TBL_REG,
2186 CSR_GIO_CHICKEN_BITS,
2187 CSR_ANA_PLL_CFG,
2188 CSR_HW_REV_WA_REG,
2189 CSR_DBG_HPET_MEM_REG
2190 };
2191 IWL_ERR(priv, "CSR values:\n");
2192 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
2193 "CSR_INT_PERIODIC_REG)\n");
2194 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2195 IWL_ERR(priv, " %25s: 0X%08x\n",
2196 get_csr_string(csr_tbl[i]),
2197 iwl_read32(priv, csr_tbl[i]));
2198 }
2199}
2200
2201static const char *get_fh_string(int cmd)
2202{
2203 switch (cmd) {
2204 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
2205 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
2206 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
2207 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
2208 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
2209 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
2210 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
2211 IWL_CMD(FH_TSSR_TX_STATUS_REG);
2212 IWL_CMD(FH_TSSR_TX_ERROR_REG);
2213 default:
2214 return "UNKNOWN";
2215 }
2216}
2217
2218int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2219{
2220 int i;
2221#ifdef CONFIG_IWLWIFI_DEBUG
2222 int pos = 0;
2223 size_t bufsz = 0;
2224#endif
2225 static const u32 fh_tbl[] = {
2226 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2227 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2228 FH_RSCSR_CHNL0_WPTR,
2229 FH_MEM_RCSR_CHNL0_CONFIG_REG,
2230 FH_MEM_RSSR_SHARED_CTRL_REG,
2231 FH_MEM_RSSR_RX_STATUS_REG,
2232 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
2233 FH_TSSR_TX_STATUS_REG,
2234 FH_TSSR_TX_ERROR_REG
2235 };
2236#ifdef CONFIG_IWLWIFI_DEBUG
2237 if (display) {
2238 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
2239 *buf = kmalloc(bufsz, GFP_KERNEL);
2240 if (!*buf)
2241 return -ENOMEM;
2242 pos += scnprintf(*buf + pos, bufsz - pos,
2243 "FH register values:\n");
2244 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2245 pos += scnprintf(*buf + pos, bufsz - pos,
2246 " %34s: 0X%08x\n",
2247 get_fh_string(fh_tbl[i]),
2248 iwl_read_direct32(priv, fh_tbl[i]));
2249 }
2250 return pos;
2251 }
2252#endif
2253 IWL_ERR(priv, "FH register values:\n");
2254 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2255 IWL_ERR(priv, " %34s: 0X%08x\n",
2256 get_fh_string(fh_tbl[i]),
2257 iwl_read_direct32(priv, fh_tbl[i]));
2258 }
2259 return 0;
2260}
2261
2262/* notification wait support */
2263void iwlagn_init_notification_wait(struct iwl_priv *priv,
2264 struct iwl_notification_wait *wait_entry,
2265 u8 cmd,
2266 void (*fn)(struct iwl_priv *priv,
2267 struct iwl_rx_packet *pkt,
2268 void *data),
2269 void *fn_data)
2270{
2271 wait_entry->fn = fn;
2272 wait_entry->fn_data = fn_data;
2273 wait_entry->cmd = cmd;
2274 wait_entry->triggered = false;
2275 wait_entry->aborted = false;
2276
2277 spin_lock_bh(&priv->_agn.notif_wait_lock);
2278 list_add(&wait_entry->list, &priv->_agn.notif_waits);
2279 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2280}
2281
2282int iwlagn_wait_notification(struct iwl_priv *priv,
2283 struct iwl_notification_wait *wait_entry,
2284 unsigned long timeout)
2285{
2286 int ret;
2287
2288 ret = wait_event_timeout(priv->_agn.notif_waitq,
2289 wait_entry->triggered || wait_entry->aborted,
2290 timeout);
2291
2292 spin_lock_bh(&priv->_agn.notif_wait_lock);
2293 list_del(&wait_entry->list);
2294 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2295
2296 if (wait_entry->aborted)
2297 return -EIO;
2298
2299 /* return value is always >= 0 */
2300 if (ret <= 0)
2301 return -ETIMEDOUT;
2302 return 0;
2303}
2304
2305void iwlagn_remove_notification(struct iwl_priv *priv,
2306 struct iwl_notification_wait *wait_entry)
2307{
2308 spin_lock_bh(&priv->_agn.notif_wait_lock);
2309 list_del(&wait_entry->list);
2310 spin_unlock_bh(&priv->_agn.notif_wait_lock);
2311}
2312
2313int iwlagn_start_device(struct iwl_priv *priv)
2314{
2315 int ret;
2316
2317 if (iwl_prepare_card_hw(priv)) {
2318 IWL_WARN(priv, "Exit HW not ready\n");
2319 return -EIO;
2320 }
2321
2322 /* If platform's RF_KILL switch is NOT set to KILL */
2323 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2324 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2325 else
2326 set_bit(STATUS_RF_KILL_HW, &priv->status);
2327
2328 if (iwl_is_rfkill(priv)) {
2329 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
2330 iwl_enable_interrupts(priv);
2331 return -ERFKILL;
2332 }
2333
2334 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2335
2336 ret = iwlagn_hw_nic_init(priv);
2337 if (ret) {
2338 IWL_ERR(priv, "Unable to init nic\n");
2339 return ret;
2340 }
2341
2342 /* make sure rfkill handshake bits are cleared */
2343 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2344 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2345 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2346
2347 /* clear (again), then enable host interrupts */
2348 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2349 iwl_enable_interrupts(priv);
2350
2351 /* really make sure rfkill handshake bits are cleared */
2352 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2353 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2354
2355 return 0;
2356}
2357
2358void iwlagn_stop_device(struct iwl_priv *priv)
2359{
2360 unsigned long flags;
2361
2362 /* stop and reset the on-board processor */
2363 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2364
2365 /* tell the device to stop sending interrupts */
2366 spin_lock_irqsave(&priv->lock, flags);
2367 iwl_disable_interrupts(priv);
2368 spin_unlock_irqrestore(&priv->lock, flags);
2369 iwl_synchronize_irq(priv);
2370
2371 /* device going down, Stop using ICT table */
2372 iwl_disable_ict(priv);
2373
2374 /*
2375 * If a HW restart happens during firmware loading,
2376 * then the firmware loading might call this function
2377 * and later it might be called again due to the
2378 * restart. So don't process again if the device is
2379 * already dead.
2380 */
2381 if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
2382 iwlagn_txq_ctx_stop(priv);
2383 iwlagn_rxq_stop(priv);
2384
2385 /* Power-down device's busmaster DMA clocks */
2386 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2387 udelay(5);
2388 }
2389
2390 /* Make sure (redundant) we've released our request to stay awake */
2391 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2392
2393 /* Stop the device, and put it in low power state */
2394 iwl_apm_stop(priv);
2395}