diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-rx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-rx.c | 180 |
1 files changed, 129 insertions, 51 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 493626bcd3ec..61b3b0e6ed73 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
@@ -140,6 +140,8 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) | |||
140 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | 140 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); |
141 | 141 | ||
142 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | 142 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { |
143 | IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n", | ||
144 | reg); | ||
143 | iwl_set_bit(priv, CSR_GP_CNTRL, | 145 | iwl_set_bit(priv, CSR_GP_CNTRL, |
144 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 146 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
145 | goto exit_unlock; | 147 | goto exit_unlock; |
@@ -200,7 +202,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv) | |||
200 | list_del(element); | 202 | list_del(element); |
201 | 203 | ||
202 | /* Point to Rx buffer via next RBD in circular buffer */ | 204 | /* Point to Rx buffer via next RBD in circular buffer */ |
203 | rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr); | 205 | rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma); |
204 | rxq->queue[rxq->write] = rxb; | 206 | rxq->queue[rxq->write] = rxb; |
205 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | 207 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; |
206 | rxq->free_count--; | 208 | rxq->free_count--; |
@@ -239,8 +241,9 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |||
239 | struct iwl_rx_queue *rxq = &priv->rxq; | 241 | struct iwl_rx_queue *rxq = &priv->rxq; |
240 | struct list_head *element; | 242 | struct list_head *element; |
241 | struct iwl_rx_mem_buffer *rxb; | 243 | struct iwl_rx_mem_buffer *rxb; |
242 | struct sk_buff *skb; | 244 | struct page *page; |
243 | unsigned long flags; | 245 | unsigned long flags; |
246 | gfp_t gfp_mask = priority; | ||
244 | 247 | ||
245 | while (1) { | 248 | while (1) { |
246 | spin_lock_irqsave(&rxq->lock, flags); | 249 | spin_lock_irqsave(&rxq->lock, flags); |
@@ -251,30 +254,35 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |||
251 | spin_unlock_irqrestore(&rxq->lock, flags); | 254 | spin_unlock_irqrestore(&rxq->lock, flags); |
252 | 255 | ||
253 | if (rxq->free_count > RX_LOW_WATERMARK) | 256 | if (rxq->free_count > RX_LOW_WATERMARK) |
254 | priority |= __GFP_NOWARN; | 257 | gfp_mask |= __GFP_NOWARN; |
255 | /* Alloc a new receive buffer */ | ||
256 | skb = alloc_skb(priv->hw_params.rx_buf_size + 256, | ||
257 | priority); | ||
258 | 258 | ||
259 | if (!skb) { | 259 | if (priv->hw_params.rx_page_order > 0) |
260 | gfp_mask |= __GFP_COMP; | ||
261 | |||
262 | /* Alloc a new receive buffer */ | ||
263 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); | ||
264 | if (!page) { | ||
260 | if (net_ratelimit()) | 265 | if (net_ratelimit()) |
261 | IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); | 266 | IWL_DEBUG_INFO(priv, "alloc_pages failed, " |
267 | "order: %d\n", | ||
268 | priv->hw_params.rx_page_order); | ||
269 | |||
262 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | 270 | if ((rxq->free_count <= RX_LOW_WATERMARK) && |
263 | net_ratelimit()) | 271 | net_ratelimit()) |
264 | IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", | 272 | IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", |
265 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | 273 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", |
266 | rxq->free_count); | 274 | rxq->free_count); |
267 | /* We don't reschedule replenish work here -- we will | 275 | /* We don't reschedule replenish work here -- we will |
268 | * call the restock method and if it still needs | 276 | * call the restock method and if it still needs |
269 | * more buffers it will schedule replenish */ | 277 | * more buffers it will schedule replenish */ |
270 | break; | 278 | return; |
271 | } | 279 | } |
272 | 280 | ||
273 | spin_lock_irqsave(&rxq->lock, flags); | 281 | spin_lock_irqsave(&rxq->lock, flags); |
274 | 282 | ||
275 | if (list_empty(&rxq->rx_used)) { | 283 | if (list_empty(&rxq->rx_used)) { |
276 | spin_unlock_irqrestore(&rxq->lock, flags); | 284 | spin_unlock_irqrestore(&rxq->lock, flags); |
277 | dev_kfree_skb_any(skb); | 285 | __free_pages(page, priv->hw_params.rx_page_order); |
278 | return; | 286 | return; |
279 | } | 287 | } |
280 | element = rxq->rx_used.next; | 288 | element = rxq->rx_used.next; |
@@ -283,24 +291,21 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) | |||
283 | 291 | ||
284 | spin_unlock_irqrestore(&rxq->lock, flags); | 292 | spin_unlock_irqrestore(&rxq->lock, flags); |
285 | 293 | ||
286 | rxb->skb = skb; | 294 | rxb->page = page; |
287 | /* Get physical address of RB/SKB */ | 295 | /* Get physical address of the RB */ |
288 | rxb->real_dma_addr = pci_map_single( | 296 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, |
289 | priv->pci_dev, | 297 | PAGE_SIZE << priv->hw_params.rx_page_order, |
290 | rxb->skb->data, | 298 | PCI_DMA_FROMDEVICE); |
291 | priv->hw_params.rx_buf_size + 256, | ||
292 | PCI_DMA_FROMDEVICE); | ||
293 | /* dma address must be no more than 36 bits */ | 299 | /* dma address must be no more than 36 bits */ |
294 | BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36)); | 300 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); |
295 | /* and also 256 byte aligned! */ | 301 | /* and also 256 byte aligned! */ |
296 | rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); | 302 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); |
297 | skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr); | ||
298 | 303 | ||
299 | spin_lock_irqsave(&rxq->lock, flags); | 304 | spin_lock_irqsave(&rxq->lock, flags); |
300 | 305 | ||
301 | list_add_tail(&rxb->list, &rxq->rx_free); | 306 | list_add_tail(&rxb->list, &rxq->rx_free); |
302 | rxq->free_count++; | 307 | rxq->free_count++; |
303 | priv->alloc_rxb_skb++; | 308 | priv->alloc_rxb_page++; |
304 | 309 | ||
305 | spin_unlock_irqrestore(&rxq->lock, flags); | 310 | spin_unlock_irqrestore(&rxq->lock, flags); |
306 | } | 311 | } |
@@ -336,12 +341,14 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |||
336 | { | 341 | { |
337 | int i; | 342 | int i; |
338 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | 343 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { |
339 | if (rxq->pool[i].skb != NULL) { | 344 | if (rxq->pool[i].page != NULL) { |
340 | pci_unmap_single(priv->pci_dev, | 345 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, |
341 | rxq->pool[i].real_dma_addr, | 346 | PAGE_SIZE << priv->hw_params.rx_page_order, |
342 | priv->hw_params.rx_buf_size + 256, | 347 | PCI_DMA_FROMDEVICE); |
343 | PCI_DMA_FROMDEVICE); | 348 | __free_pages(rxq->pool[i].page, |
344 | dev_kfree_skb(rxq->pool[i].skb); | 349 | priv->hw_params.rx_page_order); |
350 | rxq->pool[i].page = NULL; | ||
351 | priv->alloc_rxb_page--; | ||
345 | } | 352 | } |
346 | } | 353 | } |
347 | 354 | ||
@@ -405,14 +412,14 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | |||
405 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | 412 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { |
406 | /* In the reset function, these buffers may have been allocated | 413 | /* In the reset function, these buffers may have been allocated |
407 | * to an SKB, so we need to unmap and free potential storage */ | 414 | * to an SKB, so we need to unmap and free potential storage */ |
408 | if (rxq->pool[i].skb != NULL) { | 415 | if (rxq->pool[i].page != NULL) { |
409 | pci_unmap_single(priv->pci_dev, | 416 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, |
410 | rxq->pool[i].real_dma_addr, | 417 | PAGE_SIZE << priv->hw_params.rx_page_order, |
411 | priv->hw_params.rx_buf_size + 256, | 418 | PCI_DMA_FROMDEVICE); |
412 | PCI_DMA_FROMDEVICE); | 419 | priv->alloc_rxb_page--; |
413 | priv->alloc_rxb_skb--; | 420 | __free_pages(rxq->pool[i].page, |
414 | dev_kfree_skb(rxq->pool[i].skb); | 421 | priv->hw_params.rx_page_order); |
415 | rxq->pool[i].skb = NULL; | 422 | rxq->pool[i].page = NULL; |
416 | } | 423 | } |
417 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | 424 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); |
418 | } | 425 | } |
@@ -491,7 +498,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, | |||
491 | struct iwl_rx_mem_buffer *rxb) | 498 | struct iwl_rx_mem_buffer *rxb) |
492 | 499 | ||
493 | { | 500 | { |
494 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 501 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
495 | struct iwl_missed_beacon_notif *missed_beacon; | 502 | struct iwl_missed_beacon_notif *missed_beacon; |
496 | 503 | ||
497 | missed_beacon = &pkt->u.missed_beacon; | 504 | missed_beacon = &pkt->u.missed_beacon; |
@@ -548,13 +555,51 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv) | |||
548 | priv->last_rx_noise); | 555 | priv->last_rx_noise); |
549 | } | 556 | } |
550 | 557 | ||
558 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
559 | /* | ||
560 | * based on the assumption of all statistics counter are in DWORD | ||
561 | * FIXME: This function is for debugging, do not deal with | ||
562 | * the case of counters roll-over. | ||
563 | */ | ||
564 | static void iwl_accumulative_statistics(struct iwl_priv *priv, | ||
565 | __le32 *stats) | ||
566 | { | ||
567 | int i; | ||
568 | __le32 *prev_stats; | ||
569 | u32 *accum_stats; | ||
570 | |||
571 | prev_stats = (__le32 *)&priv->statistics; | ||
572 | accum_stats = (u32 *)&priv->accum_statistics; | ||
573 | |||
574 | for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics); | ||
575 | i += sizeof(__le32), stats++, prev_stats++, accum_stats++) | ||
576 | if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) | ||
577 | *accum_stats += (le32_to_cpu(*stats) - | ||
578 | le32_to_cpu(*prev_stats)); | ||
579 | |||
580 | /* reset accumulative statistics for "no-counter" type statistics */ | ||
581 | priv->accum_statistics.general.temperature = | ||
582 | priv->statistics.general.temperature; | ||
583 | priv->accum_statistics.general.temperature_m = | ||
584 | priv->statistics.general.temperature_m; | ||
585 | priv->accum_statistics.general.ttl_timestamp = | ||
586 | priv->statistics.general.ttl_timestamp; | ||
587 | priv->accum_statistics.tx.tx_power.ant_a = | ||
588 | priv->statistics.tx.tx_power.ant_a; | ||
589 | priv->accum_statistics.tx.tx_power.ant_b = | ||
590 | priv->statistics.tx.tx_power.ant_b; | ||
591 | priv->accum_statistics.tx.tx_power.ant_c = | ||
592 | priv->statistics.tx.tx_power.ant_c; | ||
593 | } | ||
594 | #endif | ||
595 | |||
551 | #define REG_RECALIB_PERIOD (60) | 596 | #define REG_RECALIB_PERIOD (60) |
552 | 597 | ||
553 | void iwl_rx_statistics(struct iwl_priv *priv, | 598 | void iwl_rx_statistics(struct iwl_priv *priv, |
554 | struct iwl_rx_mem_buffer *rxb) | 599 | struct iwl_rx_mem_buffer *rxb) |
555 | { | 600 | { |
556 | int change; | 601 | int change; |
557 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 602 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
558 | 603 | ||
559 | IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", | 604 | IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", |
560 | (int)sizeof(priv->statistics), | 605 | (int)sizeof(priv->statistics), |
@@ -566,6 +611,9 @@ void iwl_rx_statistics(struct iwl_priv *priv, | |||
566 | STATISTICS_REPLY_FLG_HT40_MODE_MSK) != | 611 | STATISTICS_REPLY_FLG_HT40_MODE_MSK) != |
567 | (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); | 612 | (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); |
568 | 613 | ||
614 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
615 | iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); | ||
616 | #endif | ||
569 | memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); | 617 | memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); |
570 | 618 | ||
571 | set_bit(STATUS_STATISTICS, &priv->status); | 619 | set_bit(STATUS_STATISTICS, &priv->status); |
@@ -582,9 +630,6 @@ void iwl_rx_statistics(struct iwl_priv *priv, | |||
582 | iwl_rx_calc_noise(priv); | 630 | iwl_rx_calc_noise(priv); |
583 | queue_work(priv->workqueue, &priv->run_time_calib_work); | 631 | queue_work(priv->workqueue, &priv->run_time_calib_work); |
584 | } | 632 | } |
585 | |||
586 | iwl_leds_background(priv); | ||
587 | |||
588 | if (priv->cfg->ops->lib->temp_ops.temperature && change) | 633 | if (priv->cfg->ops->lib->temp_ops.temperature && change) |
589 | priv->cfg->ops->lib->temp_ops.temperature(priv); | 634 | priv->cfg->ops->lib->temp_ops.temperature(priv); |
590 | } | 635 | } |
@@ -878,6 +923,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, | |||
878 | struct iwl_rx_mem_buffer *rxb, | 923 | struct iwl_rx_mem_buffer *rxb, |
879 | struct ieee80211_rx_status *stats) | 924 | struct ieee80211_rx_status *stats) |
880 | { | 925 | { |
926 | struct sk_buff *skb; | ||
927 | int ret = 0; | ||
928 | __le16 fc = hdr->frame_control; | ||
929 | |||
881 | /* We only process data packets if the interface is open */ | 930 | /* We only process data packets if the interface is open */ |
882 | if (unlikely(!priv->is_open)) { | 931 | if (unlikely(!priv->is_open)) { |
883 | IWL_DEBUG_DROP_LIMIT(priv, | 932 | IWL_DEBUG_DROP_LIMIT(priv, |
@@ -890,15 +939,44 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, | |||
890 | iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) | 939 | iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) |
891 | return; | 940 | return; |
892 | 941 | ||
893 | /* Resize SKB from mac header to end of packet */ | 942 | skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); |
894 | skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data); | 943 | if (!skb) { |
895 | skb_put(rxb->skb, len); | 944 | IWL_ERR(priv, "alloc_skb failed\n"); |
945 | return; | ||
946 | } | ||
947 | |||
948 | skb_reserve(skb, IWL_LINK_HDR_MAX); | ||
949 | skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); | ||
950 | |||
951 | /* mac80211 currently doesn't support paged SKB. Convert it to | ||
952 | * linear SKB for management frame and data frame requires | ||
953 | * software decryption or software defragementation. */ | ||
954 | if (ieee80211_is_mgmt(fc) || | ||
955 | ieee80211_has_protected(fc) || | ||
956 | ieee80211_has_morefrags(fc) || | ||
957 | le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) | ||
958 | ret = skb_linearize(skb); | ||
959 | else | ||
960 | ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? | ||
961 | 0 : -ENOMEM; | ||
962 | |||
963 | if (ret) { | ||
964 | kfree_skb(skb); | ||
965 | goto out; | ||
966 | } | ||
967 | |||
968 | /* | ||
969 | * XXX: We cannot touch the page and its virtual memory (hdr) after | ||
970 | * here. It might have already been freed by the above skb change. | ||
971 | */ | ||
972 | |||
973 | iwl_update_stats(priv, false, fc, len); | ||
974 | memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); | ||
896 | 975 | ||
897 | iwl_update_stats(priv, false, hdr->frame_control, len); | 976 | ieee80211_rx(priv->hw, skb); |
898 | memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); | 977 | out: |
899 | ieee80211_rx_irqsafe(priv->hw, rxb->skb); | 978 | priv->alloc_rxb_page--; |
900 | priv->alloc_rxb_skb--; | 979 | rxb->page = NULL; |
901 | rxb->skb = NULL; | ||
902 | } | 980 | } |
903 | 981 | ||
904 | /* This is necessary only for a number of statistics, see the caller. */ | 982 | /* This is necessary only for a number of statistics, see the caller. */ |
@@ -926,7 +1004,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv, | |||
926 | { | 1004 | { |
927 | struct ieee80211_hdr *header; | 1005 | struct ieee80211_hdr *header; |
928 | struct ieee80211_rx_status rx_status; | 1006 | struct ieee80211_rx_status rx_status; |
929 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 1007 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
930 | struct iwl_rx_phy_res *phy_res; | 1008 | struct iwl_rx_phy_res *phy_res; |
931 | __le32 rx_pkt_status; | 1009 | __le32 rx_pkt_status; |
932 | struct iwl4965_rx_mpdu_res_start *amsdu; | 1010 | struct iwl4965_rx_mpdu_res_start *amsdu; |
@@ -1087,7 +1165,7 @@ EXPORT_SYMBOL(iwl_rx_reply_rx); | |||
1087 | void iwl_rx_reply_rx_phy(struct iwl_priv *priv, | 1165 | void iwl_rx_reply_rx_phy(struct iwl_priv *priv, |
1088 | struct iwl_rx_mem_buffer *rxb) | 1166 | struct iwl_rx_mem_buffer *rxb) |
1089 | { | 1167 | { |
1090 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 1168 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
1091 | priv->last_phy_res[0] = 1; | 1169 | priv->last_phy_res[0] = 1; |
1092 | memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), | 1170 | memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), |
1093 | sizeof(struct iwl_rx_phy_res)); | 1171 | sizeof(struct iwl_rx_phy_res)); |