aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-rx.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/net/wireless/iwlwifi/iwl-rx.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-rx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c382
1 files changed, 251 insertions, 131 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 493626bcd3ec..e5eb339107dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -28,6 +28,7 @@
28 *****************************************************************************/ 28 *****************************************************************************/
29 29
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/slab.h>
31#include <net/mac80211.h> 32#include <net/mac80211.h>
32#include <asm/unaligned.h> 33#include <asm/unaligned.h>
33#include "iwl-eeprom.h" 34#include "iwl-eeprom.h"
@@ -123,12 +124,11 @@ EXPORT_SYMBOL(iwl_rx_queue_space);
123/** 124/**
124 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 125 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */ 126 */
126int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) 127void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
127{ 128{
128 unsigned long flags; 129 unsigned long flags;
129 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; 130 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
130 u32 reg; 131 u32 reg;
131 int ret = 0;
132 132
133 spin_lock_irqsave(&q->lock, flags); 133 spin_lock_irqsave(&q->lock, flags);
134 134
@@ -140,6 +140,8 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
140 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 140 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
141 141
142 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 142 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
143 IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
144 reg);
143 iwl_set_bit(priv, CSR_GP_CNTRL, 145 iwl_set_bit(priv, CSR_GP_CNTRL,
144 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 146 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
145 goto exit_unlock; 147 goto exit_unlock;
@@ -159,7 +161,6 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
159 161
160 exit_unlock: 162 exit_unlock:
161 spin_unlock_irqrestore(&q->lock, flags); 163 spin_unlock_irqrestore(&q->lock, flags);
162 return ret;
163} 164}
164EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); 165EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
165/** 166/**
@@ -182,14 +183,13 @@ static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
182 * also updates the memory address in the firmware to reference the new 183 * also updates the memory address in the firmware to reference the new
183 * target buffer. 184 * target buffer.
184 */ 185 */
185int iwl_rx_queue_restock(struct iwl_priv *priv) 186void iwl_rx_queue_restock(struct iwl_priv *priv)
186{ 187{
187 struct iwl_rx_queue *rxq = &priv->rxq; 188 struct iwl_rx_queue *rxq = &priv->rxq;
188 struct list_head *element; 189 struct list_head *element;
189 struct iwl_rx_mem_buffer *rxb; 190 struct iwl_rx_mem_buffer *rxb;
190 unsigned long flags; 191 unsigned long flags;
191 int write; 192 int write;
192 int ret = 0;
193 193
194 spin_lock_irqsave(&rxq->lock, flags); 194 spin_lock_irqsave(&rxq->lock, flags);
195 write = rxq->write & ~0x7; 195 write = rxq->write & ~0x7;
@@ -200,7 +200,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
200 list_del(element); 200 list_del(element);
201 201
202 /* Point to Rx buffer via next RBD in circular buffer */ 202 /* Point to Rx buffer via next RBD in circular buffer */
203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr); 203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
204 rxq->queue[rxq->write] = rxb; 204 rxq->queue[rxq->write] = rxb;
205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
206 rxq->free_count--; 206 rxq->free_count--;
@@ -218,10 +218,8 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
218 spin_lock_irqsave(&rxq->lock, flags); 218 spin_lock_irqsave(&rxq->lock, flags);
219 rxq->need_update = 1; 219 rxq->need_update = 1;
220 spin_unlock_irqrestore(&rxq->lock, flags); 220 spin_unlock_irqrestore(&rxq->lock, flags);
221 ret = iwl_rx_queue_update_write_ptr(priv, rxq); 221 iwl_rx_queue_update_write_ptr(priv, rxq);
222 } 222 }
223
224 return ret;
225} 223}
226EXPORT_SYMBOL(iwl_rx_queue_restock); 224EXPORT_SYMBOL(iwl_rx_queue_restock);
227 225
@@ -239,8 +237,9 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
239 struct iwl_rx_queue *rxq = &priv->rxq; 237 struct iwl_rx_queue *rxq = &priv->rxq;
240 struct list_head *element; 238 struct list_head *element;
241 struct iwl_rx_mem_buffer *rxb; 239 struct iwl_rx_mem_buffer *rxb;
242 struct sk_buff *skb; 240 struct page *page;
243 unsigned long flags; 241 unsigned long flags;
242 gfp_t gfp_mask = priority;
244 243
245 while (1) { 244 while (1) {
246 spin_lock_irqsave(&rxq->lock, flags); 245 spin_lock_irqsave(&rxq->lock, flags);
@@ -251,30 +250,35 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
251 spin_unlock_irqrestore(&rxq->lock, flags); 250 spin_unlock_irqrestore(&rxq->lock, flags);
252 251
253 if (rxq->free_count > RX_LOW_WATERMARK) 252 if (rxq->free_count > RX_LOW_WATERMARK)
254 priority |= __GFP_NOWARN; 253 gfp_mask |= __GFP_NOWARN;
255 /* Alloc a new receive buffer */ 254
256 skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 255 if (priv->hw_params.rx_page_order > 0)
257 priority); 256 gfp_mask |= __GFP_COMP;
258 257
259 if (!skb) { 258 /* Alloc a new receive buffer */
259 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
260 if (!page) {
260 if (net_ratelimit()) 261 if (net_ratelimit())
261 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 262 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
263 "order: %d\n",
264 priv->hw_params.rx_page_order);
265
262 if ((rxq->free_count <= RX_LOW_WATERMARK) && 266 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
263 net_ratelimit()) 267 net_ratelimit())
264 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", 268 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
265 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", 269 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
266 rxq->free_count); 270 rxq->free_count);
267 /* We don't reschedule replenish work here -- we will 271 /* We don't reschedule replenish work here -- we will
268 * call the restock method and if it still needs 272 * call the restock method and if it still needs
269 * more buffers it will schedule replenish */ 273 * more buffers it will schedule replenish */
270 break; 274 return;
271 } 275 }
272 276
273 spin_lock_irqsave(&rxq->lock, flags); 277 spin_lock_irqsave(&rxq->lock, flags);
274 278
275 if (list_empty(&rxq->rx_used)) { 279 if (list_empty(&rxq->rx_used)) {
276 spin_unlock_irqrestore(&rxq->lock, flags); 280 spin_unlock_irqrestore(&rxq->lock, flags);
277 dev_kfree_skb_any(skb); 281 __free_pages(page, priv->hw_params.rx_page_order);
278 return; 282 return;
279 } 283 }
280 element = rxq->rx_used.next; 284 element = rxq->rx_used.next;
@@ -283,24 +287,21 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
283 287
284 spin_unlock_irqrestore(&rxq->lock, flags); 288 spin_unlock_irqrestore(&rxq->lock, flags);
285 289
286 rxb->skb = skb; 290 rxb->page = page;
287 /* Get physical address of RB/SKB */ 291 /* Get physical address of the RB */
288 rxb->real_dma_addr = pci_map_single( 292 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
289 priv->pci_dev, 293 PAGE_SIZE << priv->hw_params.rx_page_order,
290 rxb->skb->data, 294 PCI_DMA_FROMDEVICE);
291 priv->hw_params.rx_buf_size + 256,
292 PCI_DMA_FROMDEVICE);
293 /* dma address must be no more than 36 bits */ 295 /* dma address must be no more than 36 bits */
294 BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36)); 296 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
295 /* and also 256 byte aligned! */ 297 /* and also 256 byte aligned! */
296 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); 298 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
297 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
298 299
299 spin_lock_irqsave(&rxq->lock, flags); 300 spin_lock_irqsave(&rxq->lock, flags);
300 301
301 list_add_tail(&rxb->list, &rxq->rx_free); 302 list_add_tail(&rxb->list, &rxq->rx_free);
302 rxq->free_count++; 303 rxq->free_count++;
303 priv->alloc_rxb_skb++; 304 priv->alloc_rxb_page++;
304 305
305 spin_unlock_irqrestore(&rxq->lock, flags); 306 spin_unlock_irqrestore(&rxq->lock, flags);
306 } 307 }
@@ -336,19 +337,19 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
336{ 337{
337 int i; 338 int i;
338 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 339 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
339 if (rxq->pool[i].skb != NULL) { 340 if (rxq->pool[i].page != NULL) {
340 pci_unmap_single(priv->pci_dev, 341 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
341 rxq->pool[i].real_dma_addr, 342 PAGE_SIZE << priv->hw_params.rx_page_order,
342 priv->hw_params.rx_buf_size + 256, 343 PCI_DMA_FROMDEVICE);
343 PCI_DMA_FROMDEVICE); 344 __iwl_free_pages(priv, rxq->pool[i].page);
344 dev_kfree_skb(rxq->pool[i].skb); 345 rxq->pool[i].page = NULL;
345 } 346 }
346 } 347 }
347 348
348 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, 349 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
349 rxq->dma_addr); 350 rxq->dma_addr);
350 pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status), 351 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
351 rxq->rb_stts, rxq->rb_stts_dma); 352 rxq->rb_stts, rxq->rb_stts_dma);
352 rxq->bd = NULL; 353 rxq->bd = NULL;
353 rxq->rb_stts = NULL; 354 rxq->rb_stts = NULL;
354} 355}
@@ -357,7 +358,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
357int iwl_rx_queue_alloc(struct iwl_priv *priv) 358int iwl_rx_queue_alloc(struct iwl_priv *priv)
358{ 359{
359 struct iwl_rx_queue *rxq = &priv->rxq; 360 struct iwl_rx_queue *rxq = &priv->rxq;
360 struct pci_dev *dev = priv->pci_dev; 361 struct device *dev = &priv->pci_dev->dev;
361 int i; 362 int i;
362 363
363 spin_lock_init(&rxq->lock); 364 spin_lock_init(&rxq->lock);
@@ -365,12 +366,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
365 INIT_LIST_HEAD(&rxq->rx_used); 366 INIT_LIST_HEAD(&rxq->rx_used);
366 367
367 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 368 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
368 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); 369 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
370 GFP_KERNEL);
369 if (!rxq->bd) 371 if (!rxq->bd)
370 goto err_bd; 372 goto err_bd;
371 373
372 rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status), 374 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
373 &rxq->rb_stts_dma); 375 &rxq->rb_stts_dma, GFP_KERNEL);
374 if (!rxq->rb_stts) 376 if (!rxq->rb_stts)
375 goto err_rb; 377 goto err_rb;
376 378
@@ -387,8 +389,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
387 return 0; 389 return 0;
388 390
389err_rb: 391err_rb:
390 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, 392 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
391 rxq->dma_addr); 393 rxq->dma_addr);
392err_bd: 394err_bd:
393 return -ENOMEM; 395 return -ENOMEM;
394} 396}
@@ -405,14 +407,12 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
405 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 407 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
406 /* In the reset function, these buffers may have been allocated 408 /* In the reset function, these buffers may have been allocated
407 * to an SKB, so we need to unmap and free potential storage */ 409 * to an SKB, so we need to unmap and free potential storage */
408 if (rxq->pool[i].skb != NULL) { 410 if (rxq->pool[i].page != NULL) {
409 pci_unmap_single(priv->pci_dev, 411 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
410 rxq->pool[i].real_dma_addr, 412 PAGE_SIZE << priv->hw_params.rx_page_order,
411 priv->hw_params.rx_buf_size + 256, 413 PCI_DMA_FROMDEVICE);
412 PCI_DMA_FROMDEVICE); 414 __iwl_free_pages(priv, rxq->pool[i].page);
413 priv->alloc_rxb_skb--; 415 rxq->pool[i].page = NULL;
414 dev_kfree_skb(rxq->pool[i].skb);
415 rxq->pool[i].skb = NULL;
416 } 416 }
417 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 417 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
418 } 418 }
@@ -470,7 +470,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
470 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 470 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
472 472
473 iwl_write32(priv, CSR_INT_COALESCING, 0x40); 473 /* Set interrupt coalescing timer to default (2048 usecs) */
474 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
474 475
475 return 0; 476 return 0;
476} 477}
@@ -491,13 +492,14 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
491 struct iwl_rx_mem_buffer *rxb) 492 struct iwl_rx_mem_buffer *rxb)
492 493
493{ 494{
494 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 495 struct iwl_rx_packet *pkt = rxb_addr(rxb);
495 struct iwl_missed_beacon_notif *missed_beacon; 496 struct iwl_missed_beacon_notif *missed_beacon;
496 497
497 missed_beacon = &pkt->u.missed_beacon; 498 missed_beacon = &pkt->u.missed_beacon;
498 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) { 499 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
500 priv->missed_beacon_threshold) {
499 IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n", 501 IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
500 le32_to_cpu(missed_beacon->consequtive_missed_beacons), 502 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
501 le32_to_cpu(missed_beacon->total_missed_becons), 503 le32_to_cpu(missed_beacon->total_missed_becons),
502 le32_to_cpu(missed_beacon->num_recvd_beacons), 504 le32_to_cpu(missed_beacon->num_recvd_beacons),
503 le32_to_cpu(missed_beacon->num_expected_beacons)); 505 le32_to_cpu(missed_beacon->num_expected_beacons));
@@ -507,6 +509,24 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
507} 509}
508EXPORT_SYMBOL(iwl_rx_missed_beacon_notif); 510EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
509 511
512void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
513 struct iwl_rx_mem_buffer *rxb)
514{
515 struct iwl_rx_packet *pkt = rxb_addr(rxb);
516 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
517
518 if (!report->state) {
519 IWL_DEBUG_11H(priv,
520 "Spectrum Measure Notification: Start\n");
521 return;
522 }
523
524 memcpy(&priv->measure_report, report, sizeof(*report));
525 priv->measurement_status |= MEASUREMENT_READY;
526}
527EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
528
529
510 530
511/* Calculate noise level, based on measurements during network silence just 531/* Calculate noise level, based on measurements during network silence just
512 * before arriving beacon. This measurement can be done only if we know 532 * before arriving beacon. This measurement can be done only if we know
@@ -548,13 +568,64 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
548 priv->last_rx_noise); 568 priv->last_rx_noise);
549} 569}
550 570
571#ifdef CONFIG_IWLWIFI_DEBUG
572/*
573 * based on the assumption of all statistics counter are in DWORD
574 * FIXME: This function is for debugging, do not deal with
575 * the case of counters roll-over.
576 */
577static void iwl_accumulative_statistics(struct iwl_priv *priv,
578 __le32 *stats)
579{
580 int i;
581 __le32 *prev_stats;
582 u32 *accum_stats;
583 u32 *delta, *max_delta;
584
585 prev_stats = (__le32 *)&priv->statistics;
586 accum_stats = (u32 *)&priv->accum_statistics;
587 delta = (u32 *)&priv->delta_statistics;
588 max_delta = (u32 *)&priv->max_delta;
589
590 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
591 i += sizeof(__le32), stats++, prev_stats++, delta++,
592 max_delta++, accum_stats++) {
593 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
594 *delta = (le32_to_cpu(*stats) -
595 le32_to_cpu(*prev_stats));
596 *accum_stats += *delta;
597 if (*delta > *max_delta)
598 *max_delta = *delta;
599 }
600 }
601
602 /* reset accumulative statistics for "no-counter" type statistics */
603 priv->accum_statistics.general.temperature =
604 priv->statistics.general.temperature;
605 priv->accum_statistics.general.temperature_m =
606 priv->statistics.general.temperature_m;
607 priv->accum_statistics.general.ttl_timestamp =
608 priv->statistics.general.ttl_timestamp;
609 priv->accum_statistics.tx.tx_power.ant_a =
610 priv->statistics.tx.tx_power.ant_a;
611 priv->accum_statistics.tx.tx_power.ant_b =
612 priv->statistics.tx.tx_power.ant_b;
613 priv->accum_statistics.tx.tx_power.ant_c =
614 priv->statistics.tx.tx_power.ant_c;
615}
616#endif
617
551#define REG_RECALIB_PERIOD (60) 618#define REG_RECALIB_PERIOD (60)
552 619
620#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
553void iwl_rx_statistics(struct iwl_priv *priv, 621void iwl_rx_statistics(struct iwl_priv *priv,
554 struct iwl_rx_mem_buffer *rxb) 622 struct iwl_rx_mem_buffer *rxb)
555{ 623{
556 int change; 624 int change;
557 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 625 struct iwl_rx_packet *pkt = rxb_addr(rxb);
626 int combined_plcp_delta;
627 unsigned int plcp_msec;
628 unsigned long plcp_received_jiffies;
558 629
559 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 630 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
560 (int)sizeof(priv->statistics), 631 (int)sizeof(priv->statistics),
@@ -566,6 +637,59 @@ void iwl_rx_statistics(struct iwl_priv *priv,
566 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 637 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
567 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 638 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
568 639
640#ifdef CONFIG_IWLWIFI_DEBUG
641 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
642#endif
643 /*
644 * check for plcp_err and trigger radio reset if it exceeds
645 * the plcp error threshold plcp_delta.
646 */
647 plcp_received_jiffies = jiffies;
648 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
649 (long) priv->plcp_jiffies);
650 priv->plcp_jiffies = plcp_received_jiffies;
651 /*
652 * check to make sure plcp_msec is not 0 to prevent division
653 * by zero.
654 */
655 if (plcp_msec) {
656 combined_plcp_delta =
657 (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
658 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
659 (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
660 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
661
662 if ((combined_plcp_delta > 0) &&
663 ((combined_plcp_delta * 100) / plcp_msec) >
664 priv->cfg->plcp_delta_threshold) {
665 /*
666 * if plcp_err exceed the threshold, the following
667 * data is printed in csv format:
668 * Text: plcp_err exceeded %d,
669 * Received ofdm.plcp_err,
670 * Current ofdm.plcp_err,
671 * Received ofdm_ht.plcp_err,
672 * Current ofdm_ht.plcp_err,
673 * combined_plcp_delta,
674 * plcp_msec
675 */
676 IWL_DEBUG_RADIO(priv, PLCP_MSG,
677 priv->cfg->plcp_delta_threshold,
678 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
679 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
680 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
681 le32_to_cpu(
682 priv->statistics.rx.ofdm_ht.plcp_err),
683 combined_plcp_delta, plcp_msec);
684
685 /*
686 * Reset the RF radio due to the high plcp
687 * error rate
688 */
689 iwl_force_reset(priv, IWL_RF_RESET);
690 }
691 }
692
569 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); 693 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
570 694
571 set_bit(STATUS_STATISTICS, &priv->status); 695 set_bit(STATUS_STATISTICS, &priv->status);
@@ -582,54 +706,30 @@ void iwl_rx_statistics(struct iwl_priv *priv,
582 iwl_rx_calc_noise(priv); 706 iwl_rx_calc_noise(priv);
583 queue_work(priv->workqueue, &priv->run_time_calib_work); 707 queue_work(priv->workqueue, &priv->run_time_calib_work);
584 } 708 }
585
586 iwl_leds_background(priv);
587
588 if (priv->cfg->ops->lib->temp_ops.temperature && change) 709 if (priv->cfg->ops->lib->temp_ops.temperature && change)
589 priv->cfg->ops->lib->temp_ops.temperature(priv); 710 priv->cfg->ops->lib->temp_ops.temperature(priv);
590} 711}
591EXPORT_SYMBOL(iwl_rx_statistics); 712EXPORT_SYMBOL(iwl_rx_statistics);
592 713
593#define PERFECT_RSSI (-20) /* dBm */ 714void iwl_reply_statistics(struct iwl_priv *priv,
594#define WORST_RSSI (-95) /* dBm */ 715 struct iwl_rx_mem_buffer *rxb)
595#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
596
597/* Calculate an indication of rx signal quality (a percentage, not dBm!).
598 * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
599 * about formulas used below. */
600static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
601{ 716{
602 int sig_qual; 717 struct iwl_rx_packet *pkt = rxb_addr(rxb);
603 int degradation = PERFECT_RSSI - rssi_dbm; 718
604 719 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
605 /* If we get a noise measurement, use signal-to-noise ratio (SNR) 720#ifdef CONFIG_IWLWIFI_DEBUG
606 * as indicator; formula is (signal dbm - noise dbm). 721 memset(&priv->accum_statistics, 0,
607 * SNR at or above 40 is a great signal (100%). 722 sizeof(struct iwl_notif_statistics));
608 * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. 723 memset(&priv->delta_statistics, 0,
609 * Weakest usable signal is usually 10 - 15 dB SNR. */ 724 sizeof(struct iwl_notif_statistics));
610 if (noise_dbm) { 725 memset(&priv->max_delta, 0,
611 if (rssi_dbm - noise_dbm >= 40) 726 sizeof(struct iwl_notif_statistics));
612 return 100; 727#endif
613 else if (rssi_dbm < noise_dbm) 728 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
614 return 0; 729 }
615 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; 730 iwl_rx_statistics(priv, rxb);
616
617 /* Else use just the signal level.
618 * This formula is a least squares fit of data points collected and
619 * compared with a reference system that had a percentage (%) display
620 * for signal quality. */
621 } else
622 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
623 (15 * RSSI_RANGE + 62 * degradation)) /
624 (RSSI_RANGE * RSSI_RANGE);
625
626 if (sig_qual > 100)
627 sig_qual = 100;
628 else if (sig_qual < 1)
629 sig_qual = 0;
630
631 return sig_qual;
632} 731}
732EXPORT_SYMBOL(iwl_reply_statistics);
633 733
634/* Calc max signal level (dBm) among 3 possible receivers */ 734/* Calc max signal level (dBm) among 3 possible receivers */
635static inline int iwl_calc_rssi(struct iwl_priv *priv, 735static inline int iwl_calc_rssi(struct iwl_priv *priv,
@@ -878,6 +978,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
878 struct iwl_rx_mem_buffer *rxb, 978 struct iwl_rx_mem_buffer *rxb,
879 struct ieee80211_rx_status *stats) 979 struct ieee80211_rx_status *stats)
880{ 980{
981 struct sk_buff *skb;
982 int ret = 0;
983 __le16 fc = hdr->frame_control;
984
881 /* We only process data packets if the interface is open */ 985 /* We only process data packets if the interface is open */
882 if (unlikely(!priv->is_open)) { 986 if (unlikely(!priv->is_open)) {
883 IWL_DEBUG_DROP_LIMIT(priv, 987 IWL_DEBUG_DROP_LIMIT(priv,
@@ -890,15 +994,47 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
890 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 994 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
891 return; 995 return;
892 996
893 /* Resize SKB from mac header to end of packet */ 997 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
894 skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data); 998 if (!skb) {
895 skb_put(rxb->skb, len); 999 IWL_ERR(priv, "alloc_skb failed\n");
1000 return;
1001 }
1002
1003 skb_reserve(skb, IWL_LINK_HDR_MAX);
1004 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
1005
1006 /* mac80211 currently doesn't support paged SKB. Convert it to
1007 * linear SKB for management frame and data frame requires
1008 * software decryption or software defragementation. */
1009 if (ieee80211_is_mgmt(fc) ||
1010 ieee80211_has_protected(fc) ||
1011 ieee80211_has_morefrags(fc) ||
1012 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
1013 (ieee80211_is_data_qos(fc) &&
1014 *ieee80211_get_qos_ctl(hdr) &
1015 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
1016 ret = skb_linearize(skb);
1017 else
1018 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
1019 0 : -ENOMEM;
1020
1021 if (ret) {
1022 kfree_skb(skb);
1023 goto out;
1024 }
896 1025
897 iwl_update_stats(priv, false, hdr->frame_control, len); 1026 /*
898 memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); 1027 * XXX: We cannot touch the page and its virtual memory (hdr) after
899 ieee80211_rx_irqsafe(priv->hw, rxb->skb); 1028 * here. It might have already been freed by the above skb change.
900 priv->alloc_rxb_skb--; 1029 */
901 rxb->skb = NULL; 1030
1031 iwl_update_stats(priv, false, fc, len);
1032 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
1033
1034 ieee80211_rx(priv->hw, skb);
1035 out:
1036 priv->alloc_rxb_page--;
1037 rxb->page = NULL;
902} 1038}
903 1039
904/* This is necessary only for a number of statistics, see the caller. */ 1040/* This is necessary only for a number of statistics, see the caller. */
@@ -926,13 +1062,12 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
926{ 1062{
927 struct ieee80211_hdr *header; 1063 struct ieee80211_hdr *header;
928 struct ieee80211_rx_status rx_status; 1064 struct ieee80211_rx_status rx_status;
929 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1065 struct iwl_rx_packet *pkt = rxb_addr(rxb);
930 struct iwl_rx_phy_res *phy_res; 1066 struct iwl_rx_phy_res *phy_res;
931 __le32 rx_pkt_status; 1067 __le32 rx_pkt_status;
932 struct iwl4965_rx_mpdu_res_start *amsdu; 1068 struct iwl4965_rx_mpdu_res_start *amsdu;
933 u32 len; 1069 u32 len;
934 u32 ampdu_status; 1070 u32 ampdu_status;
935 u16 fc;
936 u32 rate_n_flags; 1071 u32 rate_n_flags;
937 1072
938 /** 1073 /**
@@ -1009,11 +1144,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1009 if (iwl_is_associated(priv) && 1144 if (iwl_is_associated(priv) &&
1010 !test_bit(STATUS_SCANNING, &priv->status)) { 1145 !test_bit(STATUS_SCANNING, &priv->status)) {
1011 rx_status.noise = priv->last_rx_noise; 1146 rx_status.noise = priv->last_rx_noise;
1012 rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
1013 rx_status.noise);
1014 } else { 1147 } else {
1015 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 1148 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1016 rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
1017 } 1149 }
1018 1150
1019 /* Reset beacon noise level if not associated. */ 1151 /* Reset beacon noise level if not associated. */
@@ -1026,8 +1158,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1026 iwl_dbg_report_frame(priv, phy_res, len, header, 1); 1158 iwl_dbg_report_frame(priv, phy_res, len, header, 1);
1027#endif 1159#endif
1028 iwl_dbg_log_rx_data_frame(priv, len, header); 1160 iwl_dbg_log_rx_data_frame(priv, len, header);
1029 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n", 1161 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
1030 rx_status.signal, rx_status.noise, rx_status.qual, 1162 rx_status.signal, rx_status.noise,
1031 (unsigned long long)rx_status.mactime); 1163 (unsigned long long)rx_status.mactime);
1032 1164
1033 /* 1165 /*
@@ -1065,20 +1197,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1065 priv->last_tsf = le64_to_cpu(phy_res->timestamp); 1197 priv->last_tsf = le64_to_cpu(phy_res->timestamp);
1066 } 1198 }
1067 1199
1068 fc = le16_to_cpu(header->frame_control); 1200 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1069 switch (fc & IEEE80211_FCTL_FTYPE) { 1201 rxb, &rx_status);
1070 case IEEE80211_FTYPE_MGMT:
1071 case IEEE80211_FTYPE_DATA:
1072 if (priv->iw_mode == NL80211_IFTYPE_AP)
1073 iwl_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
1074 header->addr2);
1075 /* fall through */
1076 default:
1077 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1078 rxb, &rx_status);
1079 break;
1080
1081 }
1082} 1202}
1083EXPORT_SYMBOL(iwl_rx_reply_rx); 1203EXPORT_SYMBOL(iwl_rx_reply_rx);
1084 1204
@@ -1087,7 +1207,7 @@ EXPORT_SYMBOL(iwl_rx_reply_rx);
1087void iwl_rx_reply_rx_phy(struct iwl_priv *priv, 1207void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1088 struct iwl_rx_mem_buffer *rxb) 1208 struct iwl_rx_mem_buffer *rxb)
1089{ 1209{
1090 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1210 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1091 priv->last_phy_res[0] = 1; 1211 priv->last_phy_res[0] = 1;
1092 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), 1212 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1093 sizeof(struct iwl_rx_phy_res)); 1213 sizeof(struct iwl_rx_phy_res));