diff options
Diffstat (limited to 'net/mac80211/rx.c')
-rw-r--r-- | net/mac80211/rx.c | 819 |
1 files changed, 520 insertions, 299 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 28624282c5f3..902b03ee8f60 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -315,6 +315,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, | |||
315 | static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) | 315 | static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) |
316 | { | 316 | { |
317 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 317 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
318 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
318 | int tid; | 319 | int tid; |
319 | 320 | ||
320 | /* does the frame have a qos control field? */ | 321 | /* does the frame have a qos control field? */ |
@@ -323,9 +324,7 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) | |||
323 | /* frame has qos control */ | 324 | /* frame has qos control */ |
324 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | 325 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; |
325 | if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) | 326 | if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) |
326 | rx->flags |= IEEE80211_RX_AMSDU; | 327 | status->rx_flags |= IEEE80211_RX_AMSDU; |
327 | else | ||
328 | rx->flags &= ~IEEE80211_RX_AMSDU; | ||
329 | } else { | 328 | } else { |
330 | /* | 329 | /* |
331 | * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): | 330 | * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): |
@@ -387,26 +386,25 @@ static ieee80211_rx_result debug_noinline | |||
387 | ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) | 386 | ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) |
388 | { | 387 | { |
389 | struct ieee80211_local *local = rx->local; | 388 | struct ieee80211_local *local = rx->local; |
389 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
390 | struct sk_buff *skb = rx->skb; | 390 | struct sk_buff *skb = rx->skb; |
391 | 391 | ||
392 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning))) | 392 | if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN))) |
393 | return RX_CONTINUE; | ||
394 | |||
395 | if (test_bit(SCAN_HW_SCANNING, &local->scanning)) | ||
393 | return ieee80211_scan_rx(rx->sdata, skb); | 396 | return ieee80211_scan_rx(rx->sdata, skb); |
394 | 397 | ||
395 | if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) && | 398 | if (test_bit(SCAN_SW_SCANNING, &local->scanning)) { |
396 | (rx->flags & IEEE80211_RX_IN_SCAN))) { | ||
397 | /* drop all the other packets during a software scan anyway */ | 399 | /* drop all the other packets during a software scan anyway */ |
398 | if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED) | 400 | if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED) |
399 | dev_kfree_skb(skb); | 401 | dev_kfree_skb(skb); |
400 | return RX_QUEUED; | 402 | return RX_QUEUED; |
401 | } | 403 | } |
402 | 404 | ||
403 | if (unlikely(rx->flags & IEEE80211_RX_IN_SCAN)) { | 405 | /* scanning finished during invoking of handlers */ |
404 | /* scanning finished during invoking of handlers */ | 406 | I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); |
405 | I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); | 407 | return RX_DROP_UNUSABLE; |
406 | return RX_DROP_UNUSABLE; | ||
407 | } | ||
408 | |||
409 | return RX_CONTINUE; | ||
410 | } | 408 | } |
411 | 409 | ||
412 | 410 | ||
@@ -538,20 +536,12 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | |||
538 | int index, | 536 | int index, |
539 | struct sk_buff_head *frames) | 537 | struct sk_buff_head *frames) |
540 | { | 538 | { |
541 | struct ieee80211_supported_band *sband; | ||
542 | struct ieee80211_rate *rate = NULL; | ||
543 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; | 539 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; |
544 | struct ieee80211_rx_status *status; | ||
545 | 540 | ||
546 | if (!skb) | 541 | if (!skb) |
547 | goto no_frame; | 542 | goto no_frame; |
548 | 543 | ||
549 | status = IEEE80211_SKB_RXCB(skb); | 544 | /* release the frame from the reorder ring buffer */ |
550 | |||
551 | /* release the reordered frames to stack */ | ||
552 | sband = hw->wiphy->bands[status->band]; | ||
553 | if (!(status->flag & RX_FLAG_HT)) | ||
554 | rate = &sband->bitrates[status->rate_idx]; | ||
555 | tid_agg_rx->stored_mpdu_num--; | 545 | tid_agg_rx->stored_mpdu_num--; |
556 | tid_agg_rx->reorder_buf[index] = NULL; | 546 | tid_agg_rx->reorder_buf[index] = NULL; |
557 | __skb_queue_tail(frames, skb); | 547 | __skb_queue_tail(frames, skb); |
@@ -580,9 +570,102 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | |||
580 | * frames that have not yet been received are assumed to be lost and the skb | 570 | * frames that have not yet been received are assumed to be lost and the skb |
581 | * can be released for processing. This may also release other skb's from the | 571 | * can be released for processing. This may also release other skb's from the |
582 | * reorder buffer if there are no additional gaps between the frames. | 572 | * reorder buffer if there are no additional gaps between the frames. |
573 | * | ||
574 | * Callers must hold tid_agg_rx->reorder_lock. | ||
583 | */ | 575 | */ |
584 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | 576 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) |
585 | 577 | ||
578 | static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | ||
579 | struct tid_ampdu_rx *tid_agg_rx, | ||
580 | struct sk_buff_head *frames) | ||
581 | { | ||
582 | int index, j; | ||
583 | |||
584 | /* release the buffer until next missing frame */ | ||
585 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
586 | tid_agg_rx->buf_size; | ||
587 | if (!tid_agg_rx->reorder_buf[index] && | ||
588 | tid_agg_rx->stored_mpdu_num > 1) { | ||
589 | /* | ||
590 | * No buffers ready to be released, but check whether any | ||
591 | * frames in the reorder buffer have timed out. | ||
592 | */ | ||
593 | int skipped = 1; | ||
594 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; | ||
595 | j = (j + 1) % tid_agg_rx->buf_size) { | ||
596 | if (!tid_agg_rx->reorder_buf[j]) { | ||
597 | skipped++; | ||
598 | continue; | ||
599 | } | ||
600 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + | ||
601 | HT_RX_REORDER_BUF_TIMEOUT)) | ||
602 | goto set_release_timer; | ||
603 | |||
604 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
605 | if (net_ratelimit()) | ||
606 | wiphy_debug(hw->wiphy, | ||
607 | "release an RX reorder frame due to timeout on earlier frames\n"); | ||
608 | #endif | ||
609 | ieee80211_release_reorder_frame(hw, tid_agg_rx, | ||
610 | j, frames); | ||
611 | |||
612 | /* | ||
613 | * Increment the head seq# also for the skipped slots. | ||
614 | */ | ||
615 | tid_agg_rx->head_seq_num = | ||
616 | (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; | ||
617 | skipped = 0; | ||
618 | } | ||
619 | } else while (tid_agg_rx->reorder_buf[index]) { | ||
620 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); | ||
621 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
622 | tid_agg_rx->buf_size; | ||
623 | } | ||
624 | |||
625 | /* | ||
626 | * Disable the reorder release timer for now. | ||
627 | * | ||
628 | * The current implementation lacks a proper locking scheme | ||
629 | * which would protect vital statistic and debug counters | ||
630 | * from being updated by two different but concurrent BHs. | ||
631 | * | ||
632 | * More information about the topic is available from: | ||
633 | * - thread: http://marc.info/?t=128635927000001 | ||
634 | * | ||
635 | * What was wrong: | ||
636 | * => http://marc.info/?l=linux-wireless&m=128636170811964 | ||
637 | * "Basically the thing is that until your patch, the data | ||
638 | * in the struct didn't actually need locking because it | ||
639 | * was accessed by the RX path only which is not concurrent." | ||
640 | * | ||
641 | * List of what needs to be fixed: | ||
642 | * => http://marc.info/?l=linux-wireless&m=128656352920957 | ||
643 | * | ||
644 | |||
645 | if (tid_agg_rx->stored_mpdu_num) { | ||
646 | j = index = seq_sub(tid_agg_rx->head_seq_num, | ||
647 | tid_agg_rx->ssn) % tid_agg_rx->buf_size; | ||
648 | |||
649 | for (; j != (index - 1) % tid_agg_rx->buf_size; | ||
650 | j = (j + 1) % tid_agg_rx->buf_size) { | ||
651 | if (tid_agg_rx->reorder_buf[j]) | ||
652 | break; | ||
653 | } | ||
654 | |||
655 | set_release_timer: | ||
656 | |||
657 | mod_timer(&tid_agg_rx->reorder_timer, | ||
658 | tid_agg_rx->reorder_time[j] + | ||
659 | HT_RX_REORDER_BUF_TIMEOUT); | ||
660 | } else { | ||
661 | del_timer(&tid_agg_rx->reorder_timer); | ||
662 | } | ||
663 | */ | ||
664 | |||
665 | set_release_timer: | ||
666 | return; | ||
667 | } | ||
668 | |||
586 | /* | 669 | /* |
587 | * As this function belongs to the RX path it must be under | 670 | * As this function belongs to the RX path it must be under |
588 | * rcu_read_lock protection. It returns false if the frame | 671 | * rcu_read_lock protection. It returns false if the frame |
@@ -598,14 +681,16 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
598 | u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; | 681 | u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; |
599 | u16 head_seq_num, buf_size; | 682 | u16 head_seq_num, buf_size; |
600 | int index; | 683 | int index; |
684 | bool ret = true; | ||
601 | 685 | ||
602 | buf_size = tid_agg_rx->buf_size; | 686 | buf_size = tid_agg_rx->buf_size; |
603 | head_seq_num = tid_agg_rx->head_seq_num; | 687 | head_seq_num = tid_agg_rx->head_seq_num; |
604 | 688 | ||
689 | spin_lock(&tid_agg_rx->reorder_lock); | ||
605 | /* frame with out of date sequence number */ | 690 | /* frame with out of date sequence number */ |
606 | if (seq_less(mpdu_seq_num, head_seq_num)) { | 691 | if (seq_less(mpdu_seq_num, head_seq_num)) { |
607 | dev_kfree_skb(skb); | 692 | dev_kfree_skb(skb); |
608 | return true; | 693 | goto out; |
609 | } | 694 | } |
610 | 695 | ||
611 | /* | 696 | /* |
@@ -626,7 +711,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
626 | /* check if we already stored this frame */ | 711 | /* check if we already stored this frame */ |
627 | if (tid_agg_rx->reorder_buf[index]) { | 712 | if (tid_agg_rx->reorder_buf[index]) { |
628 | dev_kfree_skb(skb); | 713 | dev_kfree_skb(skb); |
629 | return true; | 714 | goto out; |
630 | } | 715 | } |
631 | 716 | ||
632 | /* | 717 | /* |
@@ -636,58 +721,19 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
636 | if (mpdu_seq_num == tid_agg_rx->head_seq_num && | 721 | if (mpdu_seq_num == tid_agg_rx->head_seq_num && |
637 | tid_agg_rx->stored_mpdu_num == 0) { | 722 | tid_agg_rx->stored_mpdu_num == 0) { |
638 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | 723 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); |
639 | return false; | 724 | ret = false; |
725 | goto out; | ||
640 | } | 726 | } |
641 | 727 | ||
642 | /* put the frame in the reordering buffer */ | 728 | /* put the frame in the reordering buffer */ |
643 | tid_agg_rx->reorder_buf[index] = skb; | 729 | tid_agg_rx->reorder_buf[index] = skb; |
644 | tid_agg_rx->reorder_time[index] = jiffies; | 730 | tid_agg_rx->reorder_time[index] = jiffies; |
645 | tid_agg_rx->stored_mpdu_num++; | 731 | tid_agg_rx->stored_mpdu_num++; |
646 | /* release the buffer until next missing frame */ | 732 | ieee80211_sta_reorder_release(hw, tid_agg_rx, frames); |
647 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
648 | tid_agg_rx->buf_size; | ||
649 | if (!tid_agg_rx->reorder_buf[index] && | ||
650 | tid_agg_rx->stored_mpdu_num > 1) { | ||
651 | /* | ||
652 | * No buffers ready to be released, but check whether any | ||
653 | * frames in the reorder buffer have timed out. | ||
654 | */ | ||
655 | int j; | ||
656 | int skipped = 1; | ||
657 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; | ||
658 | j = (j + 1) % tid_agg_rx->buf_size) { | ||
659 | if (!tid_agg_rx->reorder_buf[j]) { | ||
660 | skipped++; | ||
661 | continue; | ||
662 | } | ||
663 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + | ||
664 | HT_RX_REORDER_BUF_TIMEOUT)) | ||
665 | break; | ||
666 | 733 | ||
667 | #ifdef CONFIG_MAC80211_HT_DEBUG | 734 | out: |
668 | if (net_ratelimit()) | 735 | spin_unlock(&tid_agg_rx->reorder_lock); |
669 | printk(KERN_DEBUG "%s: release an RX reorder " | 736 | return ret; |
670 | "frame due to timeout on earlier " | ||
671 | "frames\n", | ||
672 | wiphy_name(hw->wiphy)); | ||
673 | #endif | ||
674 | ieee80211_release_reorder_frame(hw, tid_agg_rx, | ||
675 | j, frames); | ||
676 | |||
677 | /* | ||
678 | * Increment the head seq# also for the skipped slots. | ||
679 | */ | ||
680 | tid_agg_rx->head_seq_num = | ||
681 | (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; | ||
682 | skipped = 0; | ||
683 | } | ||
684 | } else while (tid_agg_rx->reorder_buf[index]) { | ||
685 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); | ||
686 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
687 | tid_agg_rx->buf_size; | ||
688 | } | ||
689 | |||
690 | return true; | ||
691 | } | 737 | } |
692 | 738 | ||
693 | /* | 739 | /* |
@@ -761,13 +807,14 @@ static ieee80211_rx_result debug_noinline | |||
761 | ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | 807 | ieee80211_rx_h_check(struct ieee80211_rx_data *rx) |
762 | { | 808 | { |
763 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 809 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
810 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
764 | 811 | ||
765 | /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ | 812 | /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ |
766 | if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { | 813 | if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { |
767 | if (unlikely(ieee80211_has_retry(hdr->frame_control) && | 814 | if (unlikely(ieee80211_has_retry(hdr->frame_control) && |
768 | rx->sta->last_seq_ctrl[rx->queue] == | 815 | rx->sta->last_seq_ctrl[rx->queue] == |
769 | hdr->seq_ctrl)) { | 816 | hdr->seq_ctrl)) { |
770 | if (rx->flags & IEEE80211_RX_RA_MATCH) { | 817 | if (status->rx_flags & IEEE80211_RX_RA_MATCH) { |
771 | rx->local->dot11FrameDuplicateCount++; | 818 | rx->local->dot11FrameDuplicateCount++; |
772 | rx->sta->num_duplicates++; | 819 | rx->sta->num_duplicates++; |
773 | } | 820 | } |
@@ -796,11 +843,12 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | |||
796 | if (unlikely((ieee80211_is_data(hdr->frame_control) || | 843 | if (unlikely((ieee80211_is_data(hdr->frame_control) || |
797 | ieee80211_is_pspoll(hdr->frame_control)) && | 844 | ieee80211_is_pspoll(hdr->frame_control)) && |
798 | rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && | 845 | rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && |
846 | rx->sdata->vif.type != NL80211_IFTYPE_WDS && | ||
799 | (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { | 847 | (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { |
800 | if ((!ieee80211_has_fromds(hdr->frame_control) && | 848 | if ((!ieee80211_has_fromds(hdr->frame_control) && |
801 | !ieee80211_has_tods(hdr->frame_control) && | 849 | !ieee80211_has_tods(hdr->frame_control) && |
802 | ieee80211_is_data(hdr->frame_control)) || | 850 | ieee80211_is_data(hdr->frame_control)) || |
803 | !(rx->flags & IEEE80211_RX_RA_MATCH)) { | 851 | !(status->rx_flags & IEEE80211_RX_RA_MATCH)) { |
804 | /* Drop IBSS frames and frames for other hosts | 852 | /* Drop IBSS frames and frames for other hosts |
805 | * silently. */ | 853 | * silently. */ |
806 | return RX_DROP_MONITOR; | 854 | return RX_DROP_MONITOR; |
@@ -822,7 +870,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
822 | int keyidx; | 870 | int keyidx; |
823 | int hdrlen; | 871 | int hdrlen; |
824 | ieee80211_rx_result result = RX_DROP_UNUSABLE; | 872 | ieee80211_rx_result result = RX_DROP_UNUSABLE; |
825 | struct ieee80211_key *stakey = NULL; | 873 | struct ieee80211_key *sta_ptk = NULL; |
826 | int mmie_keyidx = -1; | 874 | int mmie_keyidx = -1; |
827 | __le16 fc; | 875 | __le16 fc; |
828 | 876 | ||
@@ -857,22 +905,25 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
857 | * No point in finding a key and decrypting if the frame is neither | 905 | * No point in finding a key and decrypting if the frame is neither |
858 | * addressed to us nor a multicast frame. | 906 | * addressed to us nor a multicast frame. |
859 | */ | 907 | */ |
860 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 908 | if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) |
861 | return RX_CONTINUE; | 909 | return RX_CONTINUE; |
862 | 910 | ||
863 | /* start without a key */ | 911 | /* start without a key */ |
864 | rx->key = NULL; | 912 | rx->key = NULL; |
865 | 913 | ||
866 | if (rx->sta) | 914 | if (rx->sta) |
867 | stakey = rcu_dereference(rx->sta->key); | 915 | sta_ptk = rcu_dereference(rx->sta->ptk); |
868 | 916 | ||
869 | fc = hdr->frame_control; | 917 | fc = hdr->frame_control; |
870 | 918 | ||
871 | if (!ieee80211_has_protected(fc)) | 919 | if (!ieee80211_has_protected(fc)) |
872 | mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); | 920 | mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); |
873 | 921 | ||
874 | if (!is_multicast_ether_addr(hdr->addr1) && stakey) { | 922 | if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { |
875 | rx->key = stakey; | 923 | rx->key = sta_ptk; |
924 | if ((status->flag & RX_FLAG_DECRYPTED) && | ||
925 | (status->flag & RX_FLAG_IV_STRIPPED)) | ||
926 | return RX_CONTINUE; | ||
876 | /* Skip decryption if the frame is not protected. */ | 927 | /* Skip decryption if the frame is not protected. */ |
877 | if (!ieee80211_has_protected(fc)) | 928 | if (!ieee80211_has_protected(fc)) |
878 | return RX_CONTINUE; | 929 | return RX_CONTINUE; |
@@ -885,7 +936,10 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
885 | if (mmie_keyidx < NUM_DEFAULT_KEYS || | 936 | if (mmie_keyidx < NUM_DEFAULT_KEYS || |
886 | mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) | 937 | mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) |
887 | return RX_DROP_MONITOR; /* unexpected BIP keyidx */ | 938 | return RX_DROP_MONITOR; /* unexpected BIP keyidx */ |
888 | rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); | 939 | if (rx->sta) |
940 | rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); | ||
941 | if (!rx->key) | ||
942 | rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); | ||
889 | } else if (!ieee80211_has_protected(fc)) { | 943 | } else if (!ieee80211_has_protected(fc)) { |
890 | /* | 944 | /* |
891 | * The frame was not protected, so skip decryption. However, we | 945 | * The frame was not protected, so skip decryption. However, we |
@@ -928,16 +982,25 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
928 | skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); | 982 | skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); |
929 | keyidx = keyid >> 6; | 983 | keyidx = keyid >> 6; |
930 | 984 | ||
931 | rx->key = rcu_dereference(rx->sdata->keys[keyidx]); | 985 | /* check per-station GTK first, if multicast packet */ |
986 | if (is_multicast_ether_addr(hdr->addr1) && rx->sta) | ||
987 | rx->key = rcu_dereference(rx->sta->gtk[keyidx]); | ||
932 | 988 | ||
933 | /* | 989 | /* if not found, try default key */ |
934 | * RSNA-protected unicast frames should always be sent with | 990 | if (!rx->key) { |
935 | * pairwise or station-to-station keys, but for WEP we allow | 991 | rx->key = rcu_dereference(rx->sdata->keys[keyidx]); |
936 | * using a key index as well. | 992 | |
937 | */ | 993 | /* |
938 | if (rx->key && rx->key->conf.alg != ALG_WEP && | 994 | * RSNA-protected unicast frames should always be |
939 | !is_multicast_ether_addr(hdr->addr1)) | 995 | * sent with pairwise or station-to-station keys, |
940 | rx->key = NULL; | 996 | * but for WEP we allow using a key index as well. |
997 | */ | ||
998 | if (rx->key && | ||
999 | rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && | ||
1000 | rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && | ||
1001 | !is_multicast_ether_addr(hdr->addr1)) | ||
1002 | rx->key = NULL; | ||
1003 | } | ||
941 | } | 1004 | } |
942 | 1005 | ||
943 | if (rx->key) { | 1006 | if (rx->key) { |
@@ -951,8 +1014,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
951 | return RX_DROP_UNUSABLE; | 1014 | return RX_DROP_UNUSABLE; |
952 | /* the hdr variable is invalid now! */ | 1015 | /* the hdr variable is invalid now! */ |
953 | 1016 | ||
954 | switch (rx->key->conf.alg) { | 1017 | switch (rx->key->conf.cipher) { |
955 | case ALG_WEP: | 1018 | case WLAN_CIPHER_SUITE_WEP40: |
1019 | case WLAN_CIPHER_SUITE_WEP104: | ||
956 | /* Check for weak IVs if possible */ | 1020 | /* Check for weak IVs if possible */ |
957 | if (rx->sta && ieee80211_is_data(fc) && | 1021 | if (rx->sta && ieee80211_is_data(fc) && |
958 | (!(status->flag & RX_FLAG_IV_STRIPPED) || | 1022 | (!(status->flag & RX_FLAG_IV_STRIPPED) || |
@@ -962,15 +1026,21 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
962 | 1026 | ||
963 | result = ieee80211_crypto_wep_decrypt(rx); | 1027 | result = ieee80211_crypto_wep_decrypt(rx); |
964 | break; | 1028 | break; |
965 | case ALG_TKIP: | 1029 | case WLAN_CIPHER_SUITE_TKIP: |
966 | result = ieee80211_crypto_tkip_decrypt(rx); | 1030 | result = ieee80211_crypto_tkip_decrypt(rx); |
967 | break; | 1031 | break; |
968 | case ALG_CCMP: | 1032 | case WLAN_CIPHER_SUITE_CCMP: |
969 | result = ieee80211_crypto_ccmp_decrypt(rx); | 1033 | result = ieee80211_crypto_ccmp_decrypt(rx); |
970 | break; | 1034 | break; |
971 | case ALG_AES_CMAC: | 1035 | case WLAN_CIPHER_SUITE_AES_CMAC: |
972 | result = ieee80211_crypto_aes_cmac_decrypt(rx); | 1036 | result = ieee80211_crypto_aes_cmac_decrypt(rx); |
973 | break; | 1037 | break; |
1038 | default: | ||
1039 | /* | ||
1040 | * We can reach here only with HW-only algorithms | ||
1041 | * but why didn't it decrypt the frame?! | ||
1042 | */ | ||
1043 | return RX_DROP_UNUSABLE; | ||
974 | } | 1044 | } |
975 | 1045 | ||
976 | /* either the frame has been decrypted or will be dropped */ | 1046 | /* either the frame has been decrypted or will be dropped */ |
@@ -1079,7 +1149,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1079 | sta->last_rx = jiffies; | 1149 | sta->last_rx = jiffies; |
1080 | } | 1150 | } |
1081 | 1151 | ||
1082 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 1152 | if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) |
1083 | return RX_CONTINUE; | 1153 | return RX_CONTINUE; |
1084 | 1154 | ||
1085 | if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) | 1155 | if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) |
@@ -1236,6 +1306,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1236 | unsigned int frag, seq; | 1306 | unsigned int frag, seq; |
1237 | struct ieee80211_fragment_entry *entry; | 1307 | struct ieee80211_fragment_entry *entry; |
1238 | struct sk_buff *skb; | 1308 | struct sk_buff *skb; |
1309 | struct ieee80211_rx_status *status; | ||
1239 | 1310 | ||
1240 | hdr = (struct ieee80211_hdr *)rx->skb->data; | 1311 | hdr = (struct ieee80211_hdr *)rx->skb->data; |
1241 | fc = hdr->frame_control; | 1312 | fc = hdr->frame_control; |
@@ -1265,7 +1336,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1265 | /* This is the first fragment of a new frame. */ | 1336 | /* This is the first fragment of a new frame. */ |
1266 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, | 1337 | entry = ieee80211_reassemble_add(rx->sdata, frag, seq, |
1267 | rx->queue, &(rx->skb)); | 1338 | rx->queue, &(rx->skb)); |
1268 | if (rx->key && rx->key->conf.alg == ALG_CCMP && | 1339 | if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP && |
1269 | ieee80211_has_protected(fc)) { | 1340 | ieee80211_has_protected(fc)) { |
1270 | int queue = ieee80211_is_mgmt(fc) ? | 1341 | int queue = ieee80211_is_mgmt(fc) ? |
1271 | NUM_RX_DATA_QUEUES : rx->queue; | 1342 | NUM_RX_DATA_QUEUES : rx->queue; |
@@ -1294,7 +1365,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1294 | int i; | 1365 | int i; |
1295 | u8 pn[CCMP_PN_LEN], *rpn; | 1366 | u8 pn[CCMP_PN_LEN], *rpn; |
1296 | int queue; | 1367 | int queue; |
1297 | if (!rx->key || rx->key->conf.alg != ALG_CCMP) | 1368 | if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP) |
1298 | return RX_DROP_UNUSABLE; | 1369 | return RX_DROP_UNUSABLE; |
1299 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); | 1370 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); |
1300 | for (i = CCMP_PN_LEN - 1; i >= 0; i--) { | 1371 | for (i = CCMP_PN_LEN - 1; i >= 0; i--) { |
@@ -1335,7 +1406,8 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1335 | } | 1406 | } |
1336 | 1407 | ||
1337 | /* Complete frame has been reassembled - process it now */ | 1408 | /* Complete frame has been reassembled - process it now */ |
1338 | rx->flags |= IEEE80211_RX_FRAGMENTED; | 1409 | status = IEEE80211_SKB_RXCB(rx->skb); |
1410 | status->rx_flags |= IEEE80211_RX_FRAGMENTED; | ||
1339 | 1411 | ||
1340 | out: | 1412 | out: |
1341 | if (rx->sta) | 1413 | if (rx->sta) |
@@ -1352,9 +1424,10 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx) | |||
1352 | { | 1424 | { |
1353 | struct ieee80211_sub_if_data *sdata = rx->sdata; | 1425 | struct ieee80211_sub_if_data *sdata = rx->sdata; |
1354 | __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; | 1426 | __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control; |
1427 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
1355 | 1428 | ||
1356 | if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || | 1429 | if (likely(!rx->sta || !ieee80211_is_pspoll(fc) || |
1357 | !(rx->flags & IEEE80211_RX_RA_MATCH))) | 1430 | !(status->rx_flags & IEEE80211_RX_RA_MATCH))) |
1358 | return RX_CONTINUE; | 1431 | return RX_CONTINUE; |
1359 | 1432 | ||
1360 | if ((sdata->vif.type != NL80211_IFTYPE_AP) && | 1433 | if ((sdata->vif.type != NL80211_IFTYPE_AP) && |
@@ -1492,7 +1565,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) | |||
1492 | * Allow EAPOL frames to us/the PAE group address regardless | 1565 | * Allow EAPOL frames to us/the PAE group address regardless |
1493 | * of whether the frame was encrypted or not. | 1566 | * of whether the frame was encrypted or not. |
1494 | */ | 1567 | */ |
1495 | if (ehdr->h_proto == htons(ETH_P_PAE) && | 1568 | if (ehdr->h_proto == rx->sdata->control_port_protocol && |
1496 | (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || | 1569 | (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || |
1497 | compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) | 1570 | compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) |
1498 | return true; | 1571 | return true; |
@@ -1515,6 +1588,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
1515 | struct sk_buff *skb, *xmit_skb; | 1588 | struct sk_buff *skb, *xmit_skb; |
1516 | struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; | 1589 | struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; |
1517 | struct sta_info *dsta; | 1590 | struct sta_info *dsta; |
1591 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
1518 | 1592 | ||
1519 | skb = rx->skb; | 1593 | skb = rx->skb; |
1520 | xmit_skb = NULL; | 1594 | xmit_skb = NULL; |
@@ -1522,7 +1596,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
1522 | if ((sdata->vif.type == NL80211_IFTYPE_AP || | 1596 | if ((sdata->vif.type == NL80211_IFTYPE_AP || |
1523 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && | 1597 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && |
1524 | !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && | 1598 | !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && |
1525 | (rx->flags & IEEE80211_RX_RA_MATCH) && | 1599 | (status->rx_flags & IEEE80211_RX_RA_MATCH) && |
1526 | (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { | 1600 | (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { |
1527 | if (is_multicast_ether_addr(ehdr->h_dest)) { | 1601 | if (is_multicast_ether_addr(ehdr->h_dest)) { |
1528 | /* | 1602 | /* |
@@ -1599,6 +1673,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1599 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 1673 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
1600 | __le16 fc = hdr->frame_control; | 1674 | __le16 fc = hdr->frame_control; |
1601 | struct sk_buff_head frame_list; | 1675 | struct sk_buff_head frame_list; |
1676 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
1602 | 1677 | ||
1603 | if (unlikely(!ieee80211_is_data(fc))) | 1678 | if (unlikely(!ieee80211_is_data(fc))) |
1604 | return RX_CONTINUE; | 1679 | return RX_CONTINUE; |
@@ -1606,7 +1681,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) | |||
1606 | if (unlikely(!ieee80211_is_data_present(fc))) | 1681 | if (unlikely(!ieee80211_is_data_present(fc))) |
1607 | return RX_DROP_MONITOR; | 1682 | return RX_DROP_MONITOR; |
1608 | 1683 | ||
1609 | if (!(rx->flags & IEEE80211_RX_AMSDU)) | 1684 | if (!(status->rx_flags & IEEE80211_RX_AMSDU)) |
1610 | return RX_CONTINUE; | 1685 | return RX_CONTINUE; |
1611 | 1686 | ||
1612 | if (ieee80211_has_a4(hdr->frame_control) && | 1687 | if (ieee80211_has_a4(hdr->frame_control) && |
@@ -1657,6 +1732,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1657 | struct sk_buff *skb = rx->skb, *fwd_skb; | 1732 | struct sk_buff *skb = rx->skb, *fwd_skb; |
1658 | struct ieee80211_local *local = rx->local; | 1733 | struct ieee80211_local *local = rx->local; |
1659 | struct ieee80211_sub_if_data *sdata = rx->sdata; | 1734 | struct ieee80211_sub_if_data *sdata = rx->sdata; |
1735 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | ||
1660 | 1736 | ||
1661 | hdr = (struct ieee80211_hdr *) skb->data; | 1737 | hdr = (struct ieee80211_hdr *) skb->data; |
1662 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 1738 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
@@ -1702,7 +1778,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1702 | 1778 | ||
1703 | mesh_hdr->ttl--; | 1779 | mesh_hdr->ttl--; |
1704 | 1780 | ||
1705 | if (rx->flags & IEEE80211_RX_RA_MATCH) { | 1781 | if (status->rx_flags & IEEE80211_RX_RA_MATCH) { |
1706 | if (!mesh_hdr->ttl) | 1782 | if (!mesh_hdr->ttl) |
1707 | IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, | 1783 | IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh, |
1708 | dropped_frames_ttl); | 1784 | dropped_frames_ttl); |
@@ -1909,13 +1985,38 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, | |||
1909 | } | 1985 | } |
1910 | 1986 | ||
1911 | static ieee80211_rx_result debug_noinline | 1987 | static ieee80211_rx_result debug_noinline |
1988 | ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) | ||
1989 | { | ||
1990 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; | ||
1991 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
1992 | |||
1993 | /* | ||
1994 | * From here on, look only at management frames. | ||
1995 | * Data and control frames are already handled, | ||
1996 | * and unknown (reserved) frames are useless. | ||
1997 | */ | ||
1998 | if (rx->skb->len < 24) | ||
1999 | return RX_DROP_MONITOR; | ||
2000 | |||
2001 | if (!ieee80211_is_mgmt(mgmt->frame_control)) | ||
2002 | return RX_DROP_MONITOR; | ||
2003 | |||
2004 | if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) | ||
2005 | return RX_DROP_MONITOR; | ||
2006 | |||
2007 | if (ieee80211_drop_unencrypted_mgmt(rx)) | ||
2008 | return RX_DROP_UNUSABLE; | ||
2009 | |||
2010 | return RX_CONTINUE; | ||
2011 | } | ||
2012 | |||
2013 | static ieee80211_rx_result debug_noinline | ||
1912 | ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | 2014 | ieee80211_rx_h_action(struct ieee80211_rx_data *rx) |
1913 | { | 2015 | { |
1914 | struct ieee80211_local *local = rx->local; | 2016 | struct ieee80211_local *local = rx->local; |
1915 | struct ieee80211_sub_if_data *sdata = rx->sdata; | 2017 | struct ieee80211_sub_if_data *sdata = rx->sdata; |
1916 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; | 2018 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; |
1917 | struct sk_buff *nskb; | 2019 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); |
1918 | struct ieee80211_rx_status *status; | ||
1919 | int len = rx->skb->len; | 2020 | int len = rx->skb->len; |
1920 | 2021 | ||
1921 | if (!ieee80211_is_action(mgmt->frame_control)) | 2022 | if (!ieee80211_is_action(mgmt->frame_control)) |
@@ -1928,10 +2029,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1928 | if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) | 2029 | if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) |
1929 | return RX_DROP_UNUSABLE; | 2030 | return RX_DROP_UNUSABLE; |
1930 | 2031 | ||
1931 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 2032 | if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) |
1932 | return RX_DROP_UNUSABLE; | ||
1933 | |||
1934 | if (ieee80211_drop_unencrypted_mgmt(rx)) | ||
1935 | return RX_DROP_UNUSABLE; | 2033 | return RX_DROP_UNUSABLE; |
1936 | 2034 | ||
1937 | switch (mgmt->u.action.category) { | 2035 | switch (mgmt->u.action.category) { |
@@ -2024,17 +2122,36 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2024 | goto queue; | 2122 | goto queue; |
2025 | } | 2123 | } |
2026 | 2124 | ||
2125 | return RX_CONTINUE; | ||
2126 | |||
2027 | invalid: | 2127 | invalid: |
2028 | /* | 2128 | status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; |
2029 | * For AP mode, hostapd is responsible for handling any action | 2129 | /* will return in the next handlers */ |
2030 | * frames that we didn't handle, including returning unknown | 2130 | return RX_CONTINUE; |
2031 | * ones. For all other modes we will return them to the sender, | 2131 | |
2032 | * setting the 0x80 bit in the action category, as required by | 2132 | handled: |
2033 | * 802.11-2007 7.3.1.11. | 2133 | if (rx->sta) |
2034 | */ | 2134 | rx->sta->rx_packets++; |
2035 | if (sdata->vif.type == NL80211_IFTYPE_AP || | 2135 | dev_kfree_skb(rx->skb); |
2036 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 2136 | return RX_QUEUED; |
2037 | return RX_DROP_MONITOR; | 2137 | |
2138 | queue: | ||
2139 | rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; | ||
2140 | skb_queue_tail(&sdata->skb_queue, rx->skb); | ||
2141 | ieee80211_queue_work(&local->hw, &sdata->work); | ||
2142 | if (rx->sta) | ||
2143 | rx->sta->rx_packets++; | ||
2144 | return RX_QUEUED; | ||
2145 | } | ||
2146 | |||
2147 | static ieee80211_rx_result debug_noinline | ||
2148 | ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) | ||
2149 | { | ||
2150 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
2151 | |||
2152 | /* skip known-bad action frames and return them in the next handler */ | ||
2153 | if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) | ||
2154 | return RX_CONTINUE; | ||
2038 | 2155 | ||
2039 | /* | 2156 | /* |
2040 | * Getting here means the kernel doesn't know how to handle | 2157 | * Getting here means the kernel doesn't know how to handle |
@@ -2042,12 +2159,46 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2042 | * so userspace can register for those to know whether ones | 2159 | * so userspace can register for those to know whether ones |
2043 | * it transmitted were processed or returned. | 2160 | * it transmitted were processed or returned. |
2044 | */ | 2161 | */ |
2045 | status = IEEE80211_SKB_RXCB(rx->skb); | ||
2046 | 2162 | ||
2047 | if (cfg80211_rx_action(rx->sdata->dev, status->freq, | 2163 | if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, |
2048 | rx->skb->data, rx->skb->len, | 2164 | rx->skb->data, rx->skb->len, |
2049 | GFP_ATOMIC)) | 2165 | GFP_ATOMIC)) { |
2050 | goto handled; | 2166 | if (rx->sta) |
2167 | rx->sta->rx_packets++; | ||
2168 | dev_kfree_skb(rx->skb); | ||
2169 | return RX_QUEUED; | ||
2170 | } | ||
2171 | |||
2172 | |||
2173 | return RX_CONTINUE; | ||
2174 | } | ||
2175 | |||
2176 | static ieee80211_rx_result debug_noinline | ||
2177 | ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) | ||
2178 | { | ||
2179 | struct ieee80211_local *local = rx->local; | ||
2180 | struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; | ||
2181 | struct sk_buff *nskb; | ||
2182 | struct ieee80211_sub_if_data *sdata = rx->sdata; | ||
2183 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); | ||
2184 | |||
2185 | if (!ieee80211_is_action(mgmt->frame_control)) | ||
2186 | return RX_CONTINUE; | ||
2187 | |||
2188 | /* | ||
2189 | * For AP mode, hostapd is responsible for handling any action | ||
2190 | * frames that we didn't handle, including returning unknown | ||
2191 | * ones. For all other modes we will return them to the sender, | ||
2192 | * setting the 0x80 bit in the action category, as required by | ||
2193 | * 802.11-2007 7.3.1.11. | ||
2194 | * Newer versions of hostapd shall also use the management frame | ||
2195 | * registration mechanisms, but older ones still use cooked | ||
2196 | * monitor interfaces so push all frames there. | ||
2197 | */ | ||
2198 | if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && | ||
2199 | (sdata->vif.type == NL80211_IFTYPE_AP || | ||
2200 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) | ||
2201 | return RX_DROP_MONITOR; | ||
2051 | 2202 | ||
2052 | /* do not return rejected action frames */ | 2203 | /* do not return rejected action frames */ |
2053 | if (mgmt->u.action.category & 0x80) | 2204 | if (mgmt->u.action.category & 0x80) |
@@ -2066,20 +2217,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2066 | 2217 | ||
2067 | ieee80211_tx_skb(rx->sdata, nskb); | 2218 | ieee80211_tx_skb(rx->sdata, nskb); |
2068 | } | 2219 | } |
2069 | |||
2070 | handled: | ||
2071 | if (rx->sta) | ||
2072 | rx->sta->rx_packets++; | ||
2073 | dev_kfree_skb(rx->skb); | 2220 | dev_kfree_skb(rx->skb); |
2074 | return RX_QUEUED; | 2221 | return RX_QUEUED; |
2075 | |||
2076 | queue: | ||
2077 | rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; | ||
2078 | skb_queue_tail(&sdata->skb_queue, rx->skb); | ||
2079 | ieee80211_queue_work(&local->hw, &sdata->work); | ||
2080 | if (rx->sta) | ||
2081 | rx->sta->rx_packets++; | ||
2082 | return RX_QUEUED; | ||
2083 | } | 2222 | } |
2084 | 2223 | ||
2085 | static ieee80211_rx_result debug_noinline | 2224 | static ieee80211_rx_result debug_noinline |
@@ -2090,15 +2229,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | |||
2090 | struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; | 2229 | struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; |
2091 | __le16 stype; | 2230 | __le16 stype; |
2092 | 2231 | ||
2093 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | ||
2094 | return RX_DROP_MONITOR; | ||
2095 | |||
2096 | if (rx->skb->len < 24) | ||
2097 | return RX_DROP_MONITOR; | ||
2098 | |||
2099 | if (ieee80211_drop_unencrypted_mgmt(rx)) | ||
2100 | return RX_DROP_UNUSABLE; | ||
2101 | |||
2102 | rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb); | 2232 | rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb); |
2103 | if (rxs != RX_CONTINUE) | 2233 | if (rxs != RX_CONTINUE) |
2104 | return rxs; | 2234 | return rxs; |
@@ -2199,6 +2329,14 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
2199 | struct net_device *prev_dev = NULL; | 2329 | struct net_device *prev_dev = NULL; |
2200 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2330 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
2201 | 2331 | ||
2332 | /* | ||
2333 | * If cooked monitor has been processed already, then | ||
2334 | * don't do it again. If not, set the flag. | ||
2335 | */ | ||
2336 | if (rx->flags & IEEE80211_RX_CMNTR) | ||
2337 | goto out_free_skb; | ||
2338 | rx->flags |= IEEE80211_RX_CMNTR; | ||
2339 | |||
2202 | if (skb_headroom(skb) < sizeof(*rthdr) && | 2340 | if (skb_headroom(skb) < sizeof(*rthdr) && |
2203 | pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) | 2341 | pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) |
2204 | goto out_free_skb; | 2342 | goto out_free_skb; |
@@ -2253,29 +2391,53 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
2253 | if (prev_dev) { | 2391 | if (prev_dev) { |
2254 | skb->dev = prev_dev; | 2392 | skb->dev = prev_dev; |
2255 | netif_receive_skb(skb); | 2393 | netif_receive_skb(skb); |
2256 | skb = NULL; | 2394 | return; |
2257 | } else | 2395 | } |
2258 | goto out_free_skb; | ||
2259 | |||
2260 | return; | ||
2261 | 2396 | ||
2262 | out_free_skb: | 2397 | out_free_skb: |
2263 | dev_kfree_skb(skb); | 2398 | dev_kfree_skb(skb); |
2264 | } | 2399 | } |
2265 | 2400 | ||
2401 | static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, | ||
2402 | ieee80211_rx_result res) | ||
2403 | { | ||
2404 | switch (res) { | ||
2405 | case RX_DROP_MONITOR: | ||
2406 | I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); | ||
2407 | if (rx->sta) | ||
2408 | rx->sta->rx_dropped++; | ||
2409 | /* fall through */ | ||
2410 | case RX_CONTINUE: { | ||
2411 | struct ieee80211_rate *rate = NULL; | ||
2412 | struct ieee80211_supported_band *sband; | ||
2413 | struct ieee80211_rx_status *status; | ||
2414 | |||
2415 | status = IEEE80211_SKB_RXCB((rx->skb)); | ||
2416 | |||
2417 | sband = rx->local->hw.wiphy->bands[status->band]; | ||
2418 | if (!(status->flag & RX_FLAG_HT)) | ||
2419 | rate = &sband->bitrates[status->rate_idx]; | ||
2420 | |||
2421 | ieee80211_rx_cooked_monitor(rx, rate); | ||
2422 | break; | ||
2423 | } | ||
2424 | case RX_DROP_UNUSABLE: | ||
2425 | I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); | ||
2426 | if (rx->sta) | ||
2427 | rx->sta->rx_dropped++; | ||
2428 | dev_kfree_skb(rx->skb); | ||
2429 | break; | ||
2430 | case RX_QUEUED: | ||
2431 | I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); | ||
2432 | break; | ||
2433 | } | ||
2434 | } | ||
2266 | 2435 | ||
2267 | static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | 2436 | static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, |
2268 | struct ieee80211_rx_data *rx, | 2437 | struct sk_buff_head *frames) |
2269 | struct sk_buff *skb, | ||
2270 | struct ieee80211_rate *rate) | ||
2271 | { | 2438 | { |
2272 | struct sk_buff_head reorder_release; | ||
2273 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2439 | ieee80211_rx_result res = RX_DROP_MONITOR; |
2274 | 2440 | struct sk_buff *skb; | |
2275 | __skb_queue_head_init(&reorder_release); | ||
2276 | |||
2277 | rx->skb = skb; | ||
2278 | rx->sdata = sdata; | ||
2279 | 2441 | ||
2280 | #define CALL_RXH(rxh) \ | 2442 | #define CALL_RXH(rxh) \ |
2281 | do { \ | 2443 | do { \ |
@@ -2284,23 +2446,14 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
2284 | goto rxh_next; \ | 2446 | goto rxh_next; \ |
2285 | } while (0); | 2447 | } while (0); |
2286 | 2448 | ||
2287 | /* | 2449 | while ((skb = __skb_dequeue(frames))) { |
2288 | * NB: the rxh_next label works even if we jump | ||
2289 | * to it from here because then the list will | ||
2290 | * be empty, which is a trivial check | ||
2291 | */ | ||
2292 | CALL_RXH(ieee80211_rx_h_passive_scan) | ||
2293 | CALL_RXH(ieee80211_rx_h_check) | ||
2294 | |||
2295 | ieee80211_rx_reorder_ampdu(rx, &reorder_release); | ||
2296 | |||
2297 | while ((skb = __skb_dequeue(&reorder_release))) { | ||
2298 | /* | 2450 | /* |
2299 | * all the other fields are valid across frames | 2451 | * all the other fields are valid across frames |
2300 | * that belong to an aMPDU since they are on the | 2452 | * that belong to an aMPDU since they are on the |
2301 | * same TID from the same station | 2453 | * same TID from the same station |
2302 | */ | 2454 | */ |
2303 | rx->skb = skb; | 2455 | rx->skb = skb; |
2456 | rx->flags = 0; | ||
2304 | 2457 | ||
2305 | CALL_RXH(ieee80211_rx_h_decrypt) | 2458 | CALL_RXH(ieee80211_rx_h_decrypt) |
2306 | CALL_RXH(ieee80211_rx_h_check_more_data) | 2459 | CALL_RXH(ieee80211_rx_h_check_more_data) |
@@ -2312,50 +2465,92 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
2312 | CALL_RXH(ieee80211_rx_h_remove_qos_control) | 2465 | CALL_RXH(ieee80211_rx_h_remove_qos_control) |
2313 | CALL_RXH(ieee80211_rx_h_amsdu) | 2466 | CALL_RXH(ieee80211_rx_h_amsdu) |
2314 | #ifdef CONFIG_MAC80211_MESH | 2467 | #ifdef CONFIG_MAC80211_MESH |
2315 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 2468 | if (ieee80211_vif_is_mesh(&rx->sdata->vif)) |
2316 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | 2469 | CALL_RXH(ieee80211_rx_h_mesh_fwding); |
2317 | #endif | 2470 | #endif |
2318 | CALL_RXH(ieee80211_rx_h_data) | 2471 | CALL_RXH(ieee80211_rx_h_data) |
2319 | 2472 | ||
2320 | /* special treatment -- needs the queue */ | 2473 | /* special treatment -- needs the queue */ |
2321 | res = ieee80211_rx_h_ctrl(rx, &reorder_release); | 2474 | res = ieee80211_rx_h_ctrl(rx, frames); |
2322 | if (res != RX_CONTINUE) | 2475 | if (res != RX_CONTINUE) |
2323 | goto rxh_next; | 2476 | goto rxh_next; |
2324 | 2477 | ||
2478 | CALL_RXH(ieee80211_rx_h_mgmt_check) | ||
2325 | CALL_RXH(ieee80211_rx_h_action) | 2479 | CALL_RXH(ieee80211_rx_h_action) |
2480 | CALL_RXH(ieee80211_rx_h_userspace_mgmt) | ||
2481 | CALL_RXH(ieee80211_rx_h_action_return) | ||
2326 | CALL_RXH(ieee80211_rx_h_mgmt) | 2482 | CALL_RXH(ieee80211_rx_h_mgmt) |
2327 | 2483 | ||
2484 | rxh_next: | ||
2485 | ieee80211_rx_handlers_result(rx, res); | ||
2486 | |||
2328 | #undef CALL_RXH | 2487 | #undef CALL_RXH |
2488 | } | ||
2489 | } | ||
2490 | |||
2491 | static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | ||
2492 | { | ||
2493 | struct sk_buff_head reorder_release; | ||
2494 | ieee80211_rx_result res = RX_DROP_MONITOR; | ||
2495 | |||
2496 | __skb_queue_head_init(&reorder_release); | ||
2497 | |||
2498 | #define CALL_RXH(rxh) \ | ||
2499 | do { \ | ||
2500 | res = rxh(rx); \ | ||
2501 | if (res != RX_CONTINUE) \ | ||
2502 | goto rxh_next; \ | ||
2503 | } while (0); | ||
2504 | |||
2505 | CALL_RXH(ieee80211_rx_h_passive_scan) | ||
2506 | CALL_RXH(ieee80211_rx_h_check) | ||
2507 | |||
2508 | ieee80211_rx_reorder_ampdu(rx, &reorder_release); | ||
2509 | |||
2510 | ieee80211_rx_handlers(rx, &reorder_release); | ||
2511 | return; | ||
2329 | 2512 | ||
2330 | rxh_next: | 2513 | rxh_next: |
2331 | switch (res) { | 2514 | ieee80211_rx_handlers_result(rx, res); |
2332 | case RX_DROP_MONITOR: | 2515 | |
2333 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | 2516 | #undef CALL_RXH |
2334 | if (rx->sta) | 2517 | } |
2335 | rx->sta->rx_dropped++; | 2518 | |
2336 | /* fall through */ | 2519 | /* |
2337 | case RX_CONTINUE: | 2520 | * This function makes calls into the RX path. Therefore the |
2338 | ieee80211_rx_cooked_monitor(rx, rate); | 2521 | * caller must hold the sta_info->lock and everything has to |
2339 | break; | 2522 | * be under rcu_read_lock protection as well. |
2340 | case RX_DROP_UNUSABLE: | 2523 | */ |
2341 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | 2524 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) |
2342 | if (rx->sta) | 2525 | { |
2343 | rx->sta->rx_dropped++; | 2526 | struct sk_buff_head frames; |
2344 | dev_kfree_skb(rx->skb); | 2527 | struct ieee80211_rx_data rx = { |
2345 | break; | 2528 | .sta = sta, |
2346 | case RX_QUEUED: | 2529 | .sdata = sta->sdata, |
2347 | I802_DEBUG_INC(sdata->local->rx_handlers_queued); | 2530 | .local = sta->local, |
2348 | break; | 2531 | .queue = tid, |
2349 | } | 2532 | }; |
2350 | } | 2533 | struct tid_ampdu_rx *tid_agg_rx; |
2534 | |||
2535 | tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); | ||
2536 | if (!tid_agg_rx) | ||
2537 | return; | ||
2538 | |||
2539 | __skb_queue_head_init(&frames); | ||
2540 | |||
2541 | spin_lock(&tid_agg_rx->reorder_lock); | ||
2542 | ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames); | ||
2543 | spin_unlock(&tid_agg_rx->reorder_lock); | ||
2544 | |||
2545 | ieee80211_rx_handlers(&rx, &frames); | ||
2351 | } | 2546 | } |
2352 | 2547 | ||
2353 | /* main receive path */ | 2548 | /* main receive path */ |
2354 | 2549 | ||
2355 | static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | 2550 | static int prepare_for_handlers(struct ieee80211_rx_data *rx, |
2356 | struct ieee80211_rx_data *rx, | ||
2357 | struct ieee80211_hdr *hdr) | 2551 | struct ieee80211_hdr *hdr) |
2358 | { | 2552 | { |
2553 | struct ieee80211_sub_if_data *sdata = rx->sdata; | ||
2359 | struct sk_buff *skb = rx->skb; | 2554 | struct sk_buff *skb = rx->skb; |
2360 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2555 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
2361 | u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); | 2556 | u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); |
@@ -2369,7 +2564,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2369 | compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { | 2564 | compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { |
2370 | if (!(sdata->dev->flags & IFF_PROMISC)) | 2565 | if (!(sdata->dev->flags & IFF_PROMISC)) |
2371 | return 0; | 2566 | return 0; |
2372 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 2567 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; |
2373 | } | 2568 | } |
2374 | break; | 2569 | break; |
2375 | case NL80211_IFTYPE_ADHOC: | 2570 | case NL80211_IFTYPE_ADHOC: |
@@ -2379,15 +2574,15 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2379 | return 1; | 2574 | return 1; |
2380 | } | 2575 | } |
2381 | else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { | 2576 | else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { |
2382 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) | 2577 | if (!(status->rx_flags & IEEE80211_RX_IN_SCAN)) |
2383 | return 0; | 2578 | return 0; |
2384 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 2579 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; |
2385 | } else if (!multicast && | 2580 | } else if (!multicast && |
2386 | compare_ether_addr(sdata->vif.addr, | 2581 | compare_ether_addr(sdata->vif.addr, |
2387 | hdr->addr1) != 0) { | 2582 | hdr->addr1) != 0) { |
2388 | if (!(sdata->dev->flags & IFF_PROMISC)) | 2583 | if (!(sdata->dev->flags & IFF_PROMISC)) |
2389 | return 0; | 2584 | return 0; |
2390 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 2585 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; |
2391 | } else if (!rx->sta) { | 2586 | } else if (!rx->sta) { |
2392 | int rate_idx; | 2587 | int rate_idx; |
2393 | if (status->flag & RX_FLAG_HT) | 2588 | if (status->flag & RX_FLAG_HT) |
@@ -2405,7 +2600,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2405 | if (!(sdata->dev->flags & IFF_PROMISC)) | 2600 | if (!(sdata->dev->flags & IFF_PROMISC)) |
2406 | return 0; | 2601 | return 0; |
2407 | 2602 | ||
2408 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 2603 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; |
2409 | } | 2604 | } |
2410 | break; | 2605 | break; |
2411 | case NL80211_IFTYPE_AP_VLAN: | 2606 | case NL80211_IFTYPE_AP_VLAN: |
@@ -2416,9 +2611,9 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2416 | return 0; | 2611 | return 0; |
2417 | } else if (!ieee80211_bssid_match(bssid, | 2612 | } else if (!ieee80211_bssid_match(bssid, |
2418 | sdata->vif.addr)) { | 2613 | sdata->vif.addr)) { |
2419 | if (!(rx->flags & IEEE80211_RX_IN_SCAN)) | 2614 | if (!(status->rx_flags & IEEE80211_RX_IN_SCAN)) |
2420 | return 0; | 2615 | return 0; |
2421 | rx->flags &= ~IEEE80211_RX_RA_MATCH; | 2616 | status->rx_flags &= ~IEEE80211_RX_RA_MATCH; |
2422 | } | 2617 | } |
2423 | break; | 2618 | break; |
2424 | case NL80211_IFTYPE_WDS: | 2619 | case NL80211_IFTYPE_WDS: |
@@ -2427,9 +2622,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2427 | if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) | 2622 | if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) |
2428 | return 0; | 2623 | return 0; |
2429 | break; | 2624 | break; |
2430 | case NL80211_IFTYPE_MONITOR: | 2625 | default: |
2431 | case NL80211_IFTYPE_UNSPECIFIED: | ||
2432 | case __NL80211_IFTYPE_AFTER_LAST: | ||
2433 | /* should never get here */ | 2626 | /* should never get here */ |
2434 | WARN_ON(1); | 2627 | WARN_ON(1); |
2435 | break; | 2628 | break; |
@@ -2439,12 +2632,56 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | |||
2439 | } | 2632 | } |
2440 | 2633 | ||
2441 | /* | 2634 | /* |
2635 | * This function returns whether or not the SKB | ||
2636 | * was destined for RX processing or not, which, | ||
2637 | * if consume is true, is equivalent to whether | ||
2638 | * or not the skb was consumed. | ||
2639 | */ | ||
2640 | static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, | ||
2641 | struct sk_buff *skb, bool consume) | ||
2642 | { | ||
2643 | struct ieee80211_local *local = rx->local; | ||
2644 | struct ieee80211_sub_if_data *sdata = rx->sdata; | ||
2645 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | ||
2646 | struct ieee80211_hdr *hdr = (void *)skb->data; | ||
2647 | int prepares; | ||
2648 | |||
2649 | rx->skb = skb; | ||
2650 | status->rx_flags |= IEEE80211_RX_RA_MATCH; | ||
2651 | prepares = prepare_for_handlers(rx, hdr); | ||
2652 | |||
2653 | if (!prepares) | ||
2654 | return false; | ||
2655 | |||
2656 | if (status->flag & RX_FLAG_MMIC_ERROR) { | ||
2657 | if (status->rx_flags & IEEE80211_RX_RA_MATCH) | ||
2658 | ieee80211_rx_michael_mic_report(hdr, rx); | ||
2659 | return false; | ||
2660 | } | ||
2661 | |||
2662 | if (!consume) { | ||
2663 | skb = skb_copy(skb, GFP_ATOMIC); | ||
2664 | if (!skb) { | ||
2665 | if (net_ratelimit()) | ||
2666 | wiphy_debug(local->hw.wiphy, | ||
2667 | "failed to copy multicast frame for %s\n", | ||
2668 | sdata->name); | ||
2669 | return true; | ||
2670 | } | ||
2671 | |||
2672 | rx->skb = skb; | ||
2673 | } | ||
2674 | |||
2675 | ieee80211_invoke_rx_handlers(rx); | ||
2676 | return true; | ||
2677 | } | ||
2678 | |||
2679 | /* | ||
2442 | * This is the actual Rx frames handler. as it blongs to Rx path it must | 2680 | * This is the actual Rx frames handler. as it blongs to Rx path it must |
2443 | * be called with rcu_read_lock protection. | 2681 | * be called with rcu_read_lock protection. |
2444 | */ | 2682 | */ |
2445 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | 2683 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, |
2446 | struct sk_buff *skb, | 2684 | struct sk_buff *skb) |
2447 | struct ieee80211_rate *rate) | ||
2448 | { | 2685 | { |
2449 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2686 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
2450 | struct ieee80211_local *local = hw_to_local(hw); | 2687 | struct ieee80211_local *local = hw_to_local(hw); |
@@ -2452,11 +2689,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
2452 | struct ieee80211_hdr *hdr; | 2689 | struct ieee80211_hdr *hdr; |
2453 | __le16 fc; | 2690 | __le16 fc; |
2454 | struct ieee80211_rx_data rx; | 2691 | struct ieee80211_rx_data rx; |
2455 | int prepares; | 2692 | struct ieee80211_sub_if_data *prev; |
2456 | struct ieee80211_sub_if_data *prev = NULL; | 2693 | struct sta_info *sta, *tmp, *prev_sta; |
2457 | struct sk_buff *skb_new; | ||
2458 | struct sta_info *sta, *tmp; | ||
2459 | bool found_sta = false; | ||
2460 | int err = 0; | 2694 | int err = 0; |
2461 | 2695 | ||
2462 | fc = ((struct ieee80211_hdr *)skb->data)->frame_control; | 2696 | fc = ((struct ieee80211_hdr *)skb->data)->frame_control; |
@@ -2469,7 +2703,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
2469 | 2703 | ||
2470 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || | 2704 | if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || |
2471 | test_bit(SCAN_OFF_CHANNEL, &local->scanning))) | 2705 | test_bit(SCAN_OFF_CHANNEL, &local->scanning))) |
2472 | rx.flags |= IEEE80211_RX_IN_SCAN; | 2706 | status->rx_flags |= IEEE80211_RX_IN_SCAN; |
2473 | 2707 | ||
2474 | if (ieee80211_is_mgmt(fc)) | 2708 | if (ieee80211_is_mgmt(fc)) |
2475 | err = skb_linearize(skb); | 2709 | err = skb_linearize(skb); |
@@ -2486,91 +2720,67 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
2486 | ieee80211_verify_alignment(&rx); | 2720 | ieee80211_verify_alignment(&rx); |
2487 | 2721 | ||
2488 | if (ieee80211_is_data(fc)) { | 2722 | if (ieee80211_is_data(fc)) { |
2723 | prev_sta = NULL; | ||
2724 | |||
2489 | for_each_sta_info(local, hdr->addr2, sta, tmp) { | 2725 | for_each_sta_info(local, hdr->addr2, sta, tmp) { |
2490 | rx.sta = sta; | 2726 | if (!prev_sta) { |
2491 | found_sta = true; | 2727 | prev_sta = sta; |
2492 | rx.sdata = sta->sdata; | ||
2493 | |||
2494 | rx.flags |= IEEE80211_RX_RA_MATCH; | ||
2495 | prepares = prepare_for_handlers(rx.sdata, &rx, hdr); | ||
2496 | if (prepares) { | ||
2497 | if (status->flag & RX_FLAG_MMIC_ERROR) { | ||
2498 | if (rx.flags & IEEE80211_RX_RA_MATCH) | ||
2499 | ieee80211_rx_michael_mic_report(hdr, &rx); | ||
2500 | } else | ||
2501 | prev = rx.sdata; | ||
2502 | } | ||
2503 | } | ||
2504 | } | ||
2505 | if (!found_sta) { | ||
2506 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { | ||
2507 | if (!ieee80211_sdata_running(sdata)) | ||
2508 | continue; | 2728 | continue; |
2729 | } | ||
2509 | 2730 | ||
2510 | if (sdata->vif.type == NL80211_IFTYPE_MONITOR || | 2731 | rx.sta = prev_sta; |
2511 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 2732 | rx.sdata = prev_sta->sdata; |
2512 | continue; | 2733 | ieee80211_prepare_and_rx_handle(&rx, skb, false); |
2513 | 2734 | ||
2514 | /* | 2735 | prev_sta = sta; |
2515 | * frame is destined for this interface, but if it's | 2736 | } |
2516 | * not also for the previous one we handle that after | ||
2517 | * the loop to avoid copying the SKB once too much | ||
2518 | */ | ||
2519 | 2737 | ||
2520 | if (!prev) { | 2738 | if (prev_sta) { |
2521 | prev = sdata; | 2739 | rx.sta = prev_sta; |
2522 | continue; | 2740 | rx.sdata = prev_sta->sdata; |
2523 | } | ||
2524 | 2741 | ||
2525 | rx.sta = sta_info_get_bss(prev, hdr->addr2); | 2742 | if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) |
2743 | return; | ||
2744 | } | ||
2745 | } | ||
2526 | 2746 | ||
2527 | rx.flags |= IEEE80211_RX_RA_MATCH; | 2747 | prev = NULL; |
2528 | prepares = prepare_for_handlers(prev, &rx, hdr); | ||
2529 | 2748 | ||
2530 | if (!prepares) | 2749 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
2531 | goto next; | 2750 | if (!ieee80211_sdata_running(sdata)) |
2751 | continue; | ||
2532 | 2752 | ||
2533 | if (status->flag & RX_FLAG_MMIC_ERROR) { | 2753 | if (sdata->vif.type == NL80211_IFTYPE_MONITOR || |
2534 | rx.sdata = prev; | 2754 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
2535 | if (rx.flags & IEEE80211_RX_RA_MATCH) | 2755 | continue; |
2536 | ieee80211_rx_michael_mic_report(hdr, | ||
2537 | &rx); | ||
2538 | goto next; | ||
2539 | } | ||
2540 | 2756 | ||
2541 | /* | 2757 | /* |
2542 | * frame was destined for the previous interface | 2758 | * frame is destined for this interface, but if it's |
2543 | * so invoke RX handlers for it | 2759 | * not also for the previous one we handle that after |
2544 | */ | 2760 | * the loop to avoid copying the SKB once too much |
2761 | */ | ||
2545 | 2762 | ||
2546 | skb_new = skb_copy(skb, GFP_ATOMIC); | 2763 | if (!prev) { |
2547 | if (!skb_new) { | ||
2548 | if (net_ratelimit()) | ||
2549 | printk(KERN_DEBUG "%s: failed to copy " | ||
2550 | "multicast frame for %s\n", | ||
2551 | wiphy_name(local->hw.wiphy), | ||
2552 | prev->name); | ||
2553 | goto next; | ||
2554 | } | ||
2555 | ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate); | ||
2556 | next: | ||
2557 | prev = sdata; | 2764 | prev = sdata; |
2765 | continue; | ||
2558 | } | 2766 | } |
2559 | 2767 | ||
2560 | if (prev) { | 2768 | rx.sta = sta_info_get_bss(prev, hdr->addr2); |
2561 | rx.sta = sta_info_get_bss(prev, hdr->addr2); | 2769 | rx.sdata = prev; |
2770 | ieee80211_prepare_and_rx_handle(&rx, skb, false); | ||
2562 | 2771 | ||
2563 | rx.flags |= IEEE80211_RX_RA_MATCH; | 2772 | prev = sdata; |
2564 | prepares = prepare_for_handlers(prev, &rx, hdr); | 2773 | } |
2565 | 2774 | ||
2566 | if (!prepares) | 2775 | if (prev) { |
2567 | prev = NULL; | 2776 | rx.sta = sta_info_get_bss(prev, hdr->addr2); |
2568 | } | 2777 | rx.sdata = prev; |
2778 | |||
2779 | if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) | ||
2780 | return; | ||
2569 | } | 2781 | } |
2570 | if (prev) | 2782 | |
2571 | ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); | 2783 | dev_kfree_skb(skb); |
2572 | else | ||
2573 | dev_kfree_skb(skb); | ||
2574 | } | 2784 | } |
2575 | 2785 | ||
2576 | /* | 2786 | /* |
@@ -2611,30 +2821,41 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2611 | if (WARN_ON(!local->started)) | 2821 | if (WARN_ON(!local->started)) |
2612 | goto drop; | 2822 | goto drop; |
2613 | 2823 | ||
2614 | if (status->flag & RX_FLAG_HT) { | 2824 | if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { |
2615 | /* | 2825 | /* |
2616 | * rate_idx is MCS index, which can be [0-76] as documented on: | 2826 | * Validate the rate, unless a PLCP error means that |
2617 | * | 2827 | * we probably can't have a valid rate here anyway. |
2618 | * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n | ||
2619 | * | ||
2620 | * Anything else would be some sort of driver or hardware error. | ||
2621 | * The driver should catch hardware errors. | ||
2622 | */ | 2828 | */ |
2623 | if (WARN((status->rate_idx < 0 || | 2829 | |
2624 | status->rate_idx > 76), | 2830 | if (status->flag & RX_FLAG_HT) { |
2625 | "Rate marked as an HT rate but passed " | 2831 | /* |
2626 | "status->rate_idx is not " | 2832 | * rate_idx is MCS index, which can be [0-76] |
2627 | "an MCS index [0-76]: %d (0x%02x)\n", | 2833 | * as documented on: |
2628 | status->rate_idx, | 2834 | * |
2629 | status->rate_idx)) | 2835 | * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n |
2630 | goto drop; | 2836 | * |
2631 | } else { | 2837 | * Anything else would be some sort of driver or |
2632 | if (WARN_ON(status->rate_idx < 0 || | 2838 | * hardware error. The driver should catch hardware |
2633 | status->rate_idx >= sband->n_bitrates)) | 2839 | * errors. |
2634 | goto drop; | 2840 | */ |
2635 | rate = &sband->bitrates[status->rate_idx]; | 2841 | if (WARN((status->rate_idx < 0 || |
2842 | status->rate_idx > 76), | ||
2843 | "Rate marked as an HT rate but passed " | ||
2844 | "status->rate_idx is not " | ||
2845 | "an MCS index [0-76]: %d (0x%02x)\n", | ||
2846 | status->rate_idx, | ||
2847 | status->rate_idx)) | ||
2848 | goto drop; | ||
2849 | } else { | ||
2850 | if (WARN_ON(status->rate_idx < 0 || | ||
2851 | status->rate_idx >= sband->n_bitrates)) | ||
2852 | goto drop; | ||
2853 | rate = &sband->bitrates[status->rate_idx]; | ||
2854 | } | ||
2636 | } | 2855 | } |
2637 | 2856 | ||
2857 | status->rx_flags = 0; | ||
2858 | |||
2638 | /* | 2859 | /* |
2639 | * key references and virtual interfaces are protected using RCU | 2860 | * key references and virtual interfaces are protected using RCU |
2640 | * and this requires that we are in a read-side RCU section during | 2861 | * and this requires that we are in a read-side RCU section during |
@@ -2654,7 +2875,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2654 | return; | 2875 | return; |
2655 | } | 2876 | } |
2656 | 2877 | ||
2657 | __ieee80211_rx_handle_packet(hw, skb, rate); | 2878 | __ieee80211_rx_handle_packet(hw, skb); |
2658 | 2879 | ||
2659 | rcu_read_unlock(); | 2880 | rcu_read_unlock(); |
2660 | 2881 | ||