diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-rx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-rx.c | 826 |
1 files changed, 77 insertions, 749 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index e5eb339107dd..0a5d7cf25196 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
@@ -163,197 +163,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q | |||
163 | spin_unlock_irqrestore(&q->lock, flags); | 163 | spin_unlock_irqrestore(&q->lock, flags); |
164 | } | 164 | } |
165 | EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); | 165 | EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); |
166 | /** | ||
167 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
168 | */ | ||
169 | static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
170 | dma_addr_t dma_addr) | ||
171 | { | ||
172 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * iwl_rx_queue_restock - refill RX queue from pre-allocated pool | ||
177 | * | ||
178 | * If there are slots in the RX queue that need to be restocked, | ||
179 | * and we have free pre-allocated buffers, fill the ranks as much | ||
180 | * as we can, pulling from rx_free. | ||
181 | * | ||
182 | * This moves the 'write' index forward to catch up with 'processed', and | ||
183 | * also updates the memory address in the firmware to reference the new | ||
184 | * target buffer. | ||
185 | */ | ||
186 | void iwl_rx_queue_restock(struct iwl_priv *priv) | ||
187 | { | ||
188 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
189 | struct list_head *element; | ||
190 | struct iwl_rx_mem_buffer *rxb; | ||
191 | unsigned long flags; | ||
192 | int write; | ||
193 | |||
194 | spin_lock_irqsave(&rxq->lock, flags); | ||
195 | write = rxq->write & ~0x7; | ||
196 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
197 | /* Get next free Rx buffer, remove from free list */ | ||
198 | element = rxq->rx_free.next; | ||
199 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
200 | list_del(element); | ||
201 | |||
202 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
203 | rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma); | ||
204 | rxq->queue[rxq->write] = rxb; | ||
205 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
206 | rxq->free_count--; | ||
207 | } | ||
208 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
209 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
210 | * refill it */ | ||
211 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
212 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
213 | |||
214 | |||
215 | /* If we've added more space for the firmware to place data, tell it. | ||
216 | * Increment device's write pointer in multiples of 8. */ | ||
217 | if (rxq->write_actual != (rxq->write & ~0x7)) { | ||
218 | spin_lock_irqsave(&rxq->lock, flags); | ||
219 | rxq->need_update = 1; | ||
220 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
221 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
222 | } | ||
223 | } | ||
224 | EXPORT_SYMBOL(iwl_rx_queue_restock); | ||
225 | |||
226 | |||
227 | /** | ||
228 | * iwl_rx_replenish - Move all used packet from rx_used to rx_free | ||
229 | * | ||
230 | * When moving to rx_free an SKB is allocated for the slot. | ||
231 | * | ||
232 | * Also restock the Rx queue via iwl_rx_queue_restock. | ||
233 | * This is called as a scheduled work item (except for during initialization) | ||
234 | */ | ||
235 | void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) | ||
236 | { | ||
237 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
238 | struct list_head *element; | ||
239 | struct iwl_rx_mem_buffer *rxb; | ||
240 | struct page *page; | ||
241 | unsigned long flags; | ||
242 | gfp_t gfp_mask = priority; | ||
243 | |||
244 | while (1) { | ||
245 | spin_lock_irqsave(&rxq->lock, flags); | ||
246 | if (list_empty(&rxq->rx_used)) { | ||
247 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
248 | return; | ||
249 | } | ||
250 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
251 | |||
252 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
253 | gfp_mask |= __GFP_NOWARN; | ||
254 | |||
255 | if (priv->hw_params.rx_page_order > 0) | ||
256 | gfp_mask |= __GFP_COMP; | ||
257 | |||
258 | /* Alloc a new receive buffer */ | ||
259 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); | ||
260 | if (!page) { | ||
261 | if (net_ratelimit()) | ||
262 | IWL_DEBUG_INFO(priv, "alloc_pages failed, " | ||
263 | "order: %d\n", | ||
264 | priv->hw_params.rx_page_order); | ||
265 | |||
266 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | ||
267 | net_ratelimit()) | ||
268 | IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", | ||
269 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | ||
270 | rxq->free_count); | ||
271 | /* We don't reschedule replenish work here -- we will | ||
272 | * call the restock method and if it still needs | ||
273 | * more buffers it will schedule replenish */ | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | spin_lock_irqsave(&rxq->lock, flags); | ||
278 | |||
279 | if (list_empty(&rxq->rx_used)) { | ||
280 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
281 | __free_pages(page, priv->hw_params.rx_page_order); | ||
282 | return; | ||
283 | } | ||
284 | element = rxq->rx_used.next; | ||
285 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
286 | list_del(element); | ||
287 | |||
288 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
289 | |||
290 | rxb->page = page; | ||
291 | /* Get physical address of the RB */ | ||
292 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, | ||
293 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
294 | PCI_DMA_FROMDEVICE); | ||
295 | /* dma address must be no more than 36 bits */ | ||
296 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | ||
297 | /* and also 256 byte aligned! */ | ||
298 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | ||
299 | |||
300 | spin_lock_irqsave(&rxq->lock, flags); | ||
301 | |||
302 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
303 | rxq->free_count++; | ||
304 | priv->alloc_rxb_page++; | ||
305 | |||
306 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
307 | } | ||
308 | } | ||
309 | |||
310 | void iwl_rx_replenish(struct iwl_priv *priv) | ||
311 | { | ||
312 | unsigned long flags; | ||
313 | |||
314 | iwl_rx_allocate(priv, GFP_KERNEL); | ||
315 | |||
316 | spin_lock_irqsave(&priv->lock, flags); | ||
317 | iwl_rx_queue_restock(priv); | ||
318 | spin_unlock_irqrestore(&priv->lock, flags); | ||
319 | } | ||
320 | EXPORT_SYMBOL(iwl_rx_replenish); | ||
321 | |||
322 | void iwl_rx_replenish_now(struct iwl_priv *priv) | ||
323 | { | ||
324 | iwl_rx_allocate(priv, GFP_ATOMIC); | ||
325 | |||
326 | iwl_rx_queue_restock(priv); | ||
327 | } | ||
328 | EXPORT_SYMBOL(iwl_rx_replenish_now); | ||
329 | |||
330 | |||
331 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
332 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | ||
333 | * This free routine walks the list of POOL entries and if SKB is set to | ||
334 | * non NULL it is unmapped and freed | ||
335 | */ | ||
336 | void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
337 | { | ||
338 | int i; | ||
339 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
340 | if (rxq->pool[i].page != NULL) { | ||
341 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
342 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
343 | PCI_DMA_FROMDEVICE); | ||
344 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
345 | rxq->pool[i].page = NULL; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
350 | rxq->dma_addr); | ||
351 | dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), | ||
352 | rxq->rb_stts, rxq->rb_stts_dma); | ||
353 | rxq->bd = NULL; | ||
354 | rxq->rb_stts = NULL; | ||
355 | } | ||
356 | EXPORT_SYMBOL(iwl_rx_queue_free); | ||
357 | 166 | ||
358 | int iwl_rx_queue_alloc(struct iwl_priv *priv) | 167 | int iwl_rx_queue_alloc(struct iwl_priv *priv) |
359 | { | 168 | { |
@@ -396,98 +205,6 @@ err_bd: | |||
396 | } | 205 | } |
397 | EXPORT_SYMBOL(iwl_rx_queue_alloc); | 206 | EXPORT_SYMBOL(iwl_rx_queue_alloc); |
398 | 207 | ||
399 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
400 | { | ||
401 | unsigned long flags; | ||
402 | int i; | ||
403 | spin_lock_irqsave(&rxq->lock, flags); | ||
404 | INIT_LIST_HEAD(&rxq->rx_free); | ||
405 | INIT_LIST_HEAD(&rxq->rx_used); | ||
406 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
407 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
408 | /* In the reset function, these buffers may have been allocated | ||
409 | * to an SKB, so we need to unmap and free potential storage */ | ||
410 | if (rxq->pool[i].page != NULL) { | ||
411 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
412 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
413 | PCI_DMA_FROMDEVICE); | ||
414 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
415 | rxq->pool[i].page = NULL; | ||
416 | } | ||
417 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
418 | } | ||
419 | |||
420 | /* Set us so that we have processed and used all buffers, but have | ||
421 | * not restocked the Rx queue with fresh buffers */ | ||
422 | rxq->read = rxq->write = 0; | ||
423 | rxq->write_actual = 0; | ||
424 | rxq->free_count = 0; | ||
425 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
426 | } | ||
427 | |||
428 | int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
429 | { | ||
430 | u32 rb_size; | ||
431 | const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ | ||
432 | u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ | ||
433 | |||
434 | if (!priv->cfg->use_isr_legacy) | ||
435 | rb_timeout = RX_RB_TIMEOUT; | ||
436 | |||
437 | if (priv->cfg->mod_params->amsdu_size_8K) | ||
438 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; | ||
439 | else | ||
440 | rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; | ||
441 | |||
442 | /* Stop Rx DMA */ | ||
443 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
444 | |||
445 | /* Reset driver's Rx queue write index */ | ||
446 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); | ||
447 | |||
448 | /* Tell device where to find RBD circular buffer in DRAM */ | ||
449 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, | ||
450 | (u32)(rxq->dma_addr >> 8)); | ||
451 | |||
452 | /* Tell device where in DRAM to update its Rx status */ | ||
453 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, | ||
454 | rxq->rb_stts_dma >> 4); | ||
455 | |||
456 | /* Enable Rx DMA | ||
457 | * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in | ||
458 | * the credit mechanism in 5000 HW RX FIFO | ||
459 | * Direct rx interrupts to hosts | ||
460 | * Rx buffer size 4 or 8k | ||
461 | * RB timeout 0x10 | ||
462 | * 256 RBDs | ||
463 | */ | ||
464 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, | ||
465 | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | | ||
466 | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | | ||
467 | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | | ||
468 | FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | | ||
469 | rb_size| | ||
470 | (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| | ||
471 | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); | ||
472 | |||
473 | /* Set interrupt coalescing timer to default (2048 usecs) */ | ||
474 | iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | int iwl_rxq_stop(struct iwl_priv *priv) | ||
480 | { | ||
481 | |||
482 | /* stop Rx DMA */ | ||
483 | iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); | ||
484 | iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, | ||
485 | FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); | ||
486 | |||
487 | return 0; | ||
488 | } | ||
489 | EXPORT_SYMBOL(iwl_rxq_stop); | ||
490 | |||
491 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, | 208 | void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, |
492 | struct iwl_rx_mem_buffer *rxb) | 209 | struct iwl_rx_mem_buffer *rxb) |
493 | 210 | ||
@@ -543,6 +260,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv) | |||
543 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; | 260 | le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; |
544 | int bcn_silence_c = | 261 | int bcn_silence_c = |
545 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; | 262 | le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; |
263 | int last_rx_noise; | ||
546 | 264 | ||
547 | if (bcn_silence_a) { | 265 | if (bcn_silence_a) { |
548 | total_silence += bcn_silence_a; | 266 | total_silence += bcn_silence_a; |
@@ -559,13 +277,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv) | |||
559 | 277 | ||
560 | /* Average among active antennas */ | 278 | /* Average among active antennas */ |
561 | if (num_active_rx) | 279 | if (num_active_rx) |
562 | priv->last_rx_noise = (total_silence / num_active_rx) - 107; | 280 | last_rx_noise = (total_silence / num_active_rx) - 107; |
563 | else | 281 | else |
564 | priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | 282 | last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; |
565 | 283 | ||
566 | IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", | 284 | IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", |
567 | bcn_silence_a, bcn_silence_b, bcn_silence_c, | 285 | bcn_silence_a, bcn_silence_b, bcn_silence_c, |
568 | priv->last_rx_noise); | 286 | last_rx_noise); |
569 | } | 287 | } |
570 | 288 | ||
571 | #ifdef CONFIG_IWLWIFI_DEBUG | 289 | #ifdef CONFIG_IWLWIFI_DEBUG |
@@ -617,29 +335,20 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, | |||
617 | 335 | ||
618 | #define REG_RECALIB_PERIOD (60) | 336 | #define REG_RECALIB_PERIOD (60) |
619 | 337 | ||
620 | #define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" | 338 | /** |
621 | void iwl_rx_statistics(struct iwl_priv *priv, | 339 | * iwl_good_plcp_health - checks for plcp error. |
622 | struct iwl_rx_mem_buffer *rxb) | 340 | * |
341 | * When the plcp error is exceeding the thresholds, reset the radio | ||
342 | * to improve the throughput. | ||
343 | */ | ||
344 | bool iwl_good_plcp_health(struct iwl_priv *priv, | ||
345 | struct iwl_rx_packet *pkt) | ||
623 | { | 346 | { |
624 | int change; | 347 | bool rc = true; |
625 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
626 | int combined_plcp_delta; | 348 | int combined_plcp_delta; |
627 | unsigned int plcp_msec; | 349 | unsigned int plcp_msec; |
628 | unsigned long plcp_received_jiffies; | 350 | unsigned long plcp_received_jiffies; |
629 | 351 | ||
630 | IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", | ||
631 | (int)sizeof(priv->statistics), | ||
632 | le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); | ||
633 | |||
634 | change = ((priv->statistics.general.temperature != | ||
635 | pkt->u.stats.general.temperature) || | ||
636 | ((priv->statistics.flag & | ||
637 | STATISTICS_REPLY_FLG_HT40_MODE_MSK) != | ||
638 | (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); | ||
639 | |||
640 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
641 | iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); | ||
642 | #endif | ||
643 | /* | 352 | /* |
644 | * check for plcp_err and trigger radio reset if it exceeds | 353 | * check for plcp_err and trigger radio reset if it exceeds |
645 | * the plcp error threshold plcp_delta. | 354 | * the plcp error threshold plcp_delta. |
@@ -660,11 +369,11 @@ void iwl_rx_statistics(struct iwl_priv *priv, | |||
660 | le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); | 369 | le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); |
661 | 370 | ||
662 | if ((combined_plcp_delta > 0) && | 371 | if ((combined_plcp_delta > 0) && |
663 | ((combined_plcp_delta * 100) / plcp_msec) > | 372 | ((combined_plcp_delta * 100) / plcp_msec) > |
664 | priv->cfg->plcp_delta_threshold) { | 373 | priv->cfg->plcp_delta_threshold) { |
665 | /* | 374 | /* |
666 | * if plcp_err exceed the threshold, the following | 375 | * if plcp_err exceed the threshold, |
667 | * data is printed in csv format: | 376 | * the following data is printed in csv format: |
668 | * Text: plcp_err exceeded %d, | 377 | * Text: plcp_err exceeded %d, |
669 | * Received ofdm.plcp_err, | 378 | * Received ofdm.plcp_err, |
670 | * Current ofdm.plcp_err, | 379 | * Current ofdm.plcp_err, |
@@ -673,22 +382,76 @@ void iwl_rx_statistics(struct iwl_priv *priv, | |||
673 | * combined_plcp_delta, | 382 | * combined_plcp_delta, |
674 | * plcp_msec | 383 | * plcp_msec |
675 | */ | 384 | */ |
676 | IWL_DEBUG_RADIO(priv, PLCP_MSG, | 385 | IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " |
386 | "%u, %u, %u, %u, %d, %u mSecs\n", | ||
677 | priv->cfg->plcp_delta_threshold, | 387 | priv->cfg->plcp_delta_threshold, |
678 | le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), | 388 | le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), |
679 | le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), | 389 | le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), |
680 | le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), | 390 | le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), |
681 | le32_to_cpu( | 391 | le32_to_cpu( |
682 | priv->statistics.rx.ofdm_ht.plcp_err), | 392 | priv->statistics.rx.ofdm_ht.plcp_err), |
683 | combined_plcp_delta, plcp_msec); | 393 | combined_plcp_delta, plcp_msec); |
394 | rc = false; | ||
395 | } | ||
396 | } | ||
397 | return rc; | ||
398 | } | ||
399 | EXPORT_SYMBOL(iwl_good_plcp_health); | ||
684 | 400 | ||
685 | /* | 401 | void iwl_recover_from_statistics(struct iwl_priv *priv, |
686 | * Reset the RF radio due to the high plcp | 402 | struct iwl_rx_packet *pkt) |
687 | * error rate | 403 | { |
688 | */ | 404 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
689 | iwl_force_reset(priv, IWL_RF_RESET); | 405 | return; |
406 | if (iwl_is_associated(priv)) { | ||
407 | if (priv->cfg->ops->lib->check_ack_health) { | ||
408 | if (!priv->cfg->ops->lib->check_ack_health( | ||
409 | priv, pkt)) { | ||
410 | /* | ||
411 | * low ack count detected | ||
412 | * restart Firmware | ||
413 | */ | ||
414 | IWL_ERR(priv, "low ack count detected, " | ||
415 | "restart firmware\n"); | ||
416 | if (!iwl_force_reset(priv, IWL_FW_RESET)) | ||
417 | return; | ||
418 | } | ||
419 | } | ||
420 | if (priv->cfg->ops->lib->check_plcp_health) { | ||
421 | if (!priv->cfg->ops->lib->check_plcp_health( | ||
422 | priv, pkt)) { | ||
423 | /* | ||
424 | * high plcp error detected | ||
425 | * reset Radio | ||
426 | */ | ||
427 | iwl_force_reset(priv, IWL_RF_RESET); | ||
428 | } | ||
690 | } | 429 | } |
691 | } | 430 | } |
431 | } | ||
432 | EXPORT_SYMBOL(iwl_recover_from_statistics); | ||
433 | |||
434 | void iwl_rx_statistics(struct iwl_priv *priv, | ||
435 | struct iwl_rx_mem_buffer *rxb) | ||
436 | { | ||
437 | int change; | ||
438 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
439 | |||
440 | |||
441 | IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", | ||
442 | (int)sizeof(priv->statistics), | ||
443 | le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); | ||
444 | |||
445 | change = ((priv->statistics.general.temperature != | ||
446 | pkt->u.stats.general.temperature) || | ||
447 | ((priv->statistics.flag & | ||
448 | STATISTICS_REPLY_FLG_HT40_MODE_MSK) != | ||
449 | (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); | ||
450 | |||
451 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
452 | iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); | ||
453 | #endif | ||
454 | iwl_recover_from_statistics(priv, pkt); | ||
692 | 455 | ||
693 | memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); | 456 | memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); |
694 | 457 | ||
@@ -731,139 +494,6 @@ void iwl_reply_statistics(struct iwl_priv *priv, | |||
731 | } | 494 | } |
732 | EXPORT_SYMBOL(iwl_reply_statistics); | 495 | EXPORT_SYMBOL(iwl_reply_statistics); |
733 | 496 | ||
734 | /* Calc max signal level (dBm) among 3 possible receivers */ | ||
735 | static inline int iwl_calc_rssi(struct iwl_priv *priv, | ||
736 | struct iwl_rx_phy_res *rx_resp) | ||
737 | { | ||
738 | return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); | ||
739 | } | ||
740 | |||
741 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
742 | /** | ||
743 | * iwl_dbg_report_frame - dump frame to syslog during debug sessions | ||
744 | * | ||
745 | * You may hack this function to show different aspects of received frames, | ||
746 | * including selective frame dumps. | ||
747 | * group100 parameter selects whether to show 1 out of 100 good data frames. | ||
748 | * All beacon and probe response frames are printed. | ||
749 | */ | ||
750 | static void iwl_dbg_report_frame(struct iwl_priv *priv, | ||
751 | struct iwl_rx_phy_res *phy_res, u16 length, | ||
752 | struct ieee80211_hdr *header, int group100) | ||
753 | { | ||
754 | u32 to_us; | ||
755 | u32 print_summary = 0; | ||
756 | u32 print_dump = 0; /* set to 1 to dump all frames' contents */ | ||
757 | u32 hundred = 0; | ||
758 | u32 dataframe = 0; | ||
759 | __le16 fc; | ||
760 | u16 seq_ctl; | ||
761 | u16 channel; | ||
762 | u16 phy_flags; | ||
763 | u32 rate_n_flags; | ||
764 | u32 tsf_low; | ||
765 | int rssi; | ||
766 | |||
767 | if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX))) | ||
768 | return; | ||
769 | |||
770 | /* MAC header */ | ||
771 | fc = header->frame_control; | ||
772 | seq_ctl = le16_to_cpu(header->seq_ctrl); | ||
773 | |||
774 | /* metadata */ | ||
775 | channel = le16_to_cpu(phy_res->channel); | ||
776 | phy_flags = le16_to_cpu(phy_res->phy_flags); | ||
777 | rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); | ||
778 | |||
779 | /* signal statistics */ | ||
780 | rssi = iwl_calc_rssi(priv, phy_res); | ||
781 | tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff; | ||
782 | |||
783 | to_us = !compare_ether_addr(header->addr1, priv->mac_addr); | ||
784 | |||
785 | /* if data frame is to us and all is good, | ||
786 | * (optionally) print summary for only 1 out of every 100 */ | ||
787 | if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) == | ||
788 | cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { | ||
789 | dataframe = 1; | ||
790 | if (!group100) | ||
791 | print_summary = 1; /* print each frame */ | ||
792 | else if (priv->framecnt_to_us < 100) { | ||
793 | priv->framecnt_to_us++; | ||
794 | print_summary = 0; | ||
795 | } else { | ||
796 | priv->framecnt_to_us = 0; | ||
797 | print_summary = 1; | ||
798 | hundred = 1; | ||
799 | } | ||
800 | } else { | ||
801 | /* print summary for all other frames */ | ||
802 | print_summary = 1; | ||
803 | } | ||
804 | |||
805 | if (print_summary) { | ||
806 | char *title; | ||
807 | int rate_idx; | ||
808 | u32 bitrate; | ||
809 | |||
810 | if (hundred) | ||
811 | title = "100Frames"; | ||
812 | else if (ieee80211_has_retry(fc)) | ||
813 | title = "Retry"; | ||
814 | else if (ieee80211_is_assoc_resp(fc)) | ||
815 | title = "AscRsp"; | ||
816 | else if (ieee80211_is_reassoc_resp(fc)) | ||
817 | title = "RasRsp"; | ||
818 | else if (ieee80211_is_probe_resp(fc)) { | ||
819 | title = "PrbRsp"; | ||
820 | print_dump = 1; /* dump frame contents */ | ||
821 | } else if (ieee80211_is_beacon(fc)) { | ||
822 | title = "Beacon"; | ||
823 | print_dump = 1; /* dump frame contents */ | ||
824 | } else if (ieee80211_is_atim(fc)) | ||
825 | title = "ATIM"; | ||
826 | else if (ieee80211_is_auth(fc)) | ||
827 | title = "Auth"; | ||
828 | else if (ieee80211_is_deauth(fc)) | ||
829 | title = "DeAuth"; | ||
830 | else if (ieee80211_is_disassoc(fc)) | ||
831 | title = "DisAssoc"; | ||
832 | else | ||
833 | title = "Frame"; | ||
834 | |||
835 | rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); | ||
836 | if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) { | ||
837 | bitrate = 0; | ||
838 | WARN_ON_ONCE(1); | ||
839 | } else { | ||
840 | bitrate = iwl_rates[rate_idx].ieee / 2; | ||
841 | } | ||
842 | |||
843 | /* print frame summary. | ||
844 | * MAC addresses show just the last byte (for brevity), | ||
845 | * but you can hack it to show more, if you'd like to. */ | ||
846 | if (dataframe) | ||
847 | IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " | ||
848 | "len=%u, rssi=%d, chnl=%d, rate=%u, \n", | ||
849 | title, le16_to_cpu(fc), header->addr1[5], | ||
850 | length, rssi, channel, bitrate); | ||
851 | else { | ||
852 | /* src/dst addresses assume managed mode */ | ||
853 | IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, " | ||
854 | "len=%u, rssi=%d, tim=%lu usec, " | ||
855 | "phy=0x%02x, chnl=%d\n", | ||
856 | title, le16_to_cpu(fc), header->addr1[5], | ||
857 | header->addr3[5], length, rssi, | ||
858 | tsf_low - priv->scan_start_tsf, | ||
859 | phy_flags, channel); | ||
860 | } | ||
861 | } | ||
862 | if (print_dump) | ||
863 | iwl_print_hex_dump(priv, IWL_DL_RX, header, length); | ||
864 | } | ||
865 | #endif | ||
866 | |||
867 | /* | 497 | /* |
868 | * returns non-zero if packet should be dropped | 498 | * returns non-zero if packet should be dropped |
869 | */ | 499 | */ |
@@ -911,305 +541,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv, | |||
911 | return 0; | 541 | return 0; |
912 | } | 542 | } |
913 | EXPORT_SYMBOL(iwl_set_decrypted_flag); | 543 | EXPORT_SYMBOL(iwl_set_decrypted_flag); |
914 | |||
915 | static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) | ||
916 | { | ||
917 | u32 decrypt_out = 0; | ||
918 | |||
919 | if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == | ||
920 | RX_RES_STATUS_STATION_FOUND) | ||
921 | decrypt_out |= (RX_RES_STATUS_STATION_FOUND | | ||
922 | RX_RES_STATUS_NO_STATION_INFO_MISMATCH); | ||
923 | |||
924 | decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); | ||
925 | |||
926 | /* packet was not encrypted */ | ||
927 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | ||
928 | RX_RES_STATUS_SEC_TYPE_NONE) | ||
929 | return decrypt_out; | ||
930 | |||
931 | /* packet was encrypted with unknown alg */ | ||
932 | if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == | ||
933 | RX_RES_STATUS_SEC_TYPE_ERR) | ||
934 | return decrypt_out; | ||
935 | |||
936 | /* decryption was not done in HW */ | ||
937 | if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != | ||
938 | RX_MPDU_RES_STATUS_DEC_DONE_MSK) | ||
939 | return decrypt_out; | ||
940 | |||
941 | switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { | ||
942 | |||
943 | case RX_RES_STATUS_SEC_TYPE_CCMP: | ||
944 | /* alg is CCM: check MIC only */ | ||
945 | if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) | ||
946 | /* Bad MIC */ | ||
947 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | ||
948 | else | ||
949 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | ||
950 | |||
951 | break; | ||
952 | |||
953 | case RX_RES_STATUS_SEC_TYPE_TKIP: | ||
954 | if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { | ||
955 | /* Bad TTAK */ | ||
956 | decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; | ||
957 | break; | ||
958 | } | ||
959 | /* fall through if TTAK OK */ | ||
960 | default: | ||
961 | if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) | ||
962 | decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; | ||
963 | else | ||
964 | decrypt_out |= RX_RES_STATUS_DECRYPT_OK; | ||
965 | break; | ||
966 | }; | ||
967 | |||
968 | IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", | ||
969 | decrypt_in, decrypt_out); | ||
970 | |||
971 | return decrypt_out; | ||
972 | } | ||
973 | |||
974 | static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, | ||
975 | struct ieee80211_hdr *hdr, | ||
976 | u16 len, | ||
977 | u32 ampdu_status, | ||
978 | struct iwl_rx_mem_buffer *rxb, | ||
979 | struct ieee80211_rx_status *stats) | ||
980 | { | ||
981 | struct sk_buff *skb; | ||
982 | int ret = 0; | ||
983 | __le16 fc = hdr->frame_control; | ||
984 | |||
985 | /* We only process data packets if the interface is open */ | ||
986 | if (unlikely(!priv->is_open)) { | ||
987 | IWL_DEBUG_DROP_LIMIT(priv, | ||
988 | "Dropping packet while interface is not open.\n"); | ||
989 | return; | ||
990 | } | ||
991 | |||
992 | /* In case of HW accelerated crypto and bad decryption, drop */ | ||
993 | if (!priv->cfg->mod_params->sw_crypto && | ||
994 | iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) | ||
995 | return; | ||
996 | |||
997 | skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); | ||
998 | if (!skb) { | ||
999 | IWL_ERR(priv, "alloc_skb failed\n"); | ||
1000 | return; | ||
1001 | } | ||
1002 | |||
1003 | skb_reserve(skb, IWL_LINK_HDR_MAX); | ||
1004 | skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); | ||
1005 | |||
1006 | /* mac80211 currently doesn't support paged SKB. Convert it to | ||
1007 | * linear SKB for management frame and data frame requires | ||
1008 | * software decryption or software defragementation. */ | ||
1009 | if (ieee80211_is_mgmt(fc) || | ||
1010 | ieee80211_has_protected(fc) || | ||
1011 | ieee80211_has_morefrags(fc) || | ||
1012 | le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG || | ||
1013 | (ieee80211_is_data_qos(fc) && | ||
1014 | *ieee80211_get_qos_ctl(hdr) & | ||
1015 | IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)) | ||
1016 | ret = skb_linearize(skb); | ||
1017 | else | ||
1018 | ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? | ||
1019 | 0 : -ENOMEM; | ||
1020 | |||
1021 | if (ret) { | ||
1022 | kfree_skb(skb); | ||
1023 | goto out; | ||
1024 | } | ||
1025 | |||
1026 | /* | ||
1027 | * XXX: We cannot touch the page and its virtual memory (hdr) after | ||
1028 | * here. It might have already been freed by the above skb change. | ||
1029 | */ | ||
1030 | |||
1031 | iwl_update_stats(priv, false, fc, len); | ||
1032 | memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); | ||
1033 | |||
1034 | ieee80211_rx(priv->hw, skb); | ||
1035 | out: | ||
1036 | priv->alloc_rxb_page--; | ||
1037 | rxb->page = NULL; | ||
1038 | } | ||
1039 | |||
1040 | /* This is necessary only for a number of statistics, see the caller. */ | ||
1041 | static int iwl_is_network_packet(struct iwl_priv *priv, | ||
1042 | struct ieee80211_hdr *header) | ||
1043 | { | ||
1044 | /* Filter incoming packets to determine if they are targeted toward | ||
1045 | * this network, discarding packets coming from ourselves */ | ||
1046 | switch (priv->iw_mode) { | ||
1047 | case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */ | ||
1048 | /* packets to our IBSS update information */ | ||
1049 | return !compare_ether_addr(header->addr3, priv->bssid); | ||
1050 | case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */ | ||
1051 | /* packets to our IBSS update information */ | ||
1052 | return !compare_ether_addr(header->addr2, priv->bssid); | ||
1053 | default: | ||
1054 | return 1; | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | /* Called for REPLY_RX (legacy ABG frames), or | ||
1059 | * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ | ||
1060 | void iwl_rx_reply_rx(struct iwl_priv *priv, | ||
1061 | struct iwl_rx_mem_buffer *rxb) | ||
1062 | { | ||
1063 | struct ieee80211_hdr *header; | ||
1064 | struct ieee80211_rx_status rx_status; | ||
1065 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1066 | struct iwl_rx_phy_res *phy_res; | ||
1067 | __le32 rx_pkt_status; | ||
1068 | struct iwl4965_rx_mpdu_res_start *amsdu; | ||
1069 | u32 len; | ||
1070 | u32 ampdu_status; | ||
1071 | u32 rate_n_flags; | ||
1072 | |||
1073 | /** | ||
1074 | * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. | ||
1075 | * REPLY_RX: physical layer info is in this buffer | ||
1076 | * REPLY_RX_MPDU_CMD: physical layer info was sent in separate | ||
1077 | * command and cached in priv->last_phy_res | ||
1078 | * | ||
1079 | * Here we set up local variables depending on which command is | ||
1080 | * received. | ||
1081 | */ | ||
1082 | if (pkt->hdr.cmd == REPLY_RX) { | ||
1083 | phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; | ||
1084 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) | ||
1085 | + phy_res->cfg_phy_cnt); | ||
1086 | |||
1087 | len = le16_to_cpu(phy_res->byte_count); | ||
1088 | rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + | ||
1089 | phy_res->cfg_phy_cnt + len); | ||
1090 | ampdu_status = le32_to_cpu(rx_pkt_status); | ||
1091 | } else { | ||
1092 | if (!priv->last_phy_res[0]) { | ||
1093 | IWL_ERR(priv, "MPDU frame without cached PHY data\n"); | ||
1094 | return; | ||
1095 | } | ||
1096 | phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1]; | ||
1097 | amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; | ||
1098 | header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); | ||
1099 | len = le16_to_cpu(amsdu->byte_count); | ||
1100 | rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); | ||
1101 | ampdu_status = iwl_translate_rx_status(priv, | ||
1102 | le32_to_cpu(rx_pkt_status)); | ||
1103 | } | ||
1104 | |||
1105 | if ((unlikely(phy_res->cfg_phy_cnt > 20))) { | ||
1106 | IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", | ||
1107 | phy_res->cfg_phy_cnt); | ||
1108 | return; | ||
1109 | } | ||
1110 | |||
1111 | if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || | ||
1112 | !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { | ||
1113 | IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", | ||
1114 | le32_to_cpu(rx_pkt_status)); | ||
1115 | return; | ||
1116 | } | ||
1117 | |||
1118 | /* This will be used in several places later */ | ||
1119 | rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); | ||
1120 | |||
1121 | /* rx_status carries information about the packet to mac80211 */ | ||
1122 | rx_status.mactime = le64_to_cpu(phy_res->timestamp); | ||
1123 | rx_status.freq = | ||
1124 | ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel)); | ||
1125 | rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? | ||
1126 | IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; | ||
1127 | rx_status.rate_idx = | ||
1128 | iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); | ||
1129 | rx_status.flag = 0; | ||
1130 | |||
1131 | /* TSF isn't reliable. In order to allow smooth user experience, | ||
1132 | * this W/A doesn't propagate it to the mac80211 */ | ||
1133 | /*rx_status.flag |= RX_FLAG_TSFT;*/ | ||
1134 | |||
1135 | priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); | ||
1136 | |||
1137 | /* Find max signal strength (dBm) among 3 antenna/receiver chains */ | ||
1138 | rx_status.signal = iwl_calc_rssi(priv, phy_res); | ||
1139 | |||
1140 | /* Meaningful noise values are available only from beacon statistics, | ||
1141 | * which are gathered only when associated, and indicate noise | ||
1142 | * only for the associated network channel ... | ||
1143 | * Ignore these noise values while scanning (other channels) */ | ||
1144 | if (iwl_is_associated(priv) && | ||
1145 | !test_bit(STATUS_SCANNING, &priv->status)) { | ||
1146 | rx_status.noise = priv->last_rx_noise; | ||
1147 | } else { | ||
1148 | rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | ||
1149 | } | ||
1150 | |||
1151 | /* Reset beacon noise level if not associated. */ | ||
1152 | if (!iwl_is_associated(priv)) | ||
1153 | priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; | ||
1154 | |||
1155 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1156 | /* Set "1" to report good data frames in groups of 100 */ | ||
1157 | if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX)) | ||
1158 | iwl_dbg_report_frame(priv, phy_res, len, header, 1); | ||
1159 | #endif | ||
1160 | iwl_dbg_log_rx_data_frame(priv, len, header); | ||
1161 | IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n", | ||
1162 | rx_status.signal, rx_status.noise, | ||
1163 | (unsigned long long)rx_status.mactime); | ||
1164 | |||
1165 | /* | ||
1166 | * "antenna number" | ||
1167 | * | ||
1168 | * It seems that the antenna field in the phy flags value | ||
1169 | * is actually a bit field. This is undefined by radiotap, | ||
1170 | * it wants an actual antenna number but I always get "7" | ||
1171 | * for most legacy frames I receive indicating that the | ||
1172 | * same frame was received on all three RX chains. | ||
1173 | * | ||
1174 | * I think this field should be removed in favor of a | ||
1175 | * new 802.11n radiotap field "RX chains" that is defined | ||
1176 | * as a bitmask. | ||
1177 | */ | ||
1178 | rx_status.antenna = | ||
1179 | (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) | ||
1180 | >> RX_RES_PHY_FLAGS_ANTENNA_POS; | ||
1181 | |||
1182 | /* set the preamble flag if appropriate */ | ||
1183 | if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) | ||
1184 | rx_status.flag |= RX_FLAG_SHORTPRE; | ||
1185 | |||
1186 | /* Set up the HT phy flags */ | ||
1187 | if (rate_n_flags & RATE_MCS_HT_MSK) | ||
1188 | rx_status.flag |= RX_FLAG_HT; | ||
1189 | if (rate_n_flags & RATE_MCS_HT40_MSK) | ||
1190 | rx_status.flag |= RX_FLAG_40MHZ; | ||
1191 | if (rate_n_flags & RATE_MCS_SGI_MSK) | ||
1192 | rx_status.flag |= RX_FLAG_SHORT_GI; | ||
1193 | |||
1194 | if (iwl_is_network_packet(priv, header)) { | ||
1195 | priv->last_rx_rssi = rx_status.signal; | ||
1196 | priv->last_beacon_time = priv->ucode_beacon_time; | ||
1197 | priv->last_tsf = le64_to_cpu(phy_res->timestamp); | ||
1198 | } | ||
1199 | |||
1200 | iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status, | ||
1201 | rxb, &rx_status); | ||
1202 | } | ||
1203 | EXPORT_SYMBOL(iwl_rx_reply_rx); | ||
1204 | |||
1205 | /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). | ||
1206 | * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ | ||
1207 | void iwl_rx_reply_rx_phy(struct iwl_priv *priv, | ||
1208 | struct iwl_rx_mem_buffer *rxb) | ||
1209 | { | ||
1210 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
1211 | priv->last_phy_res[0] = 1; | ||
1212 | memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), | ||
1213 | sizeof(struct iwl_rx_phy_res)); | ||
1214 | } | ||
1215 | EXPORT_SYMBOL(iwl_rx_reply_rx_phy); | ||