diff options
Diffstat (limited to 'net/mac80211/rx.c')
-rw-r--r-- | net/mac80211/rx.c | 190 |
1 files changed, 107 insertions, 83 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index b01e467b76c6..a6701ed87f0d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -533,10 +533,13 @@ static inline u16 seq_sub(u16 sq1, u16 sq2) | |||
533 | 533 | ||
534 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | 534 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, |
535 | struct tid_ampdu_rx *tid_agg_rx, | 535 | struct tid_ampdu_rx *tid_agg_rx, |
536 | int index, | 536 | int index) |
537 | struct sk_buff_head *frames) | ||
538 | { | 537 | { |
538 | struct ieee80211_local *local = hw_to_local(hw); | ||
539 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; | 539 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; |
540 | struct ieee80211_rx_status *status; | ||
541 | |||
542 | lockdep_assert_held(&tid_agg_rx->reorder_lock); | ||
540 | 543 | ||
541 | if (!skb) | 544 | if (!skb) |
542 | goto no_frame; | 545 | goto no_frame; |
@@ -544,7 +547,9 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | |||
544 | /* release the frame from the reorder ring buffer */ | 547 | /* release the frame from the reorder ring buffer */ |
545 | tid_agg_rx->stored_mpdu_num--; | 548 | tid_agg_rx->stored_mpdu_num--; |
546 | tid_agg_rx->reorder_buf[index] = NULL; | 549 | tid_agg_rx->reorder_buf[index] = NULL; |
547 | __skb_queue_tail(frames, skb); | 550 | status = IEEE80211_SKB_RXCB(skb); |
551 | status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; | ||
552 | skb_queue_tail(&local->rx_skb_queue, skb); | ||
548 | 553 | ||
549 | no_frame: | 554 | no_frame: |
550 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | 555 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); |
@@ -552,15 +557,16 @@ no_frame: | |||
552 | 557 | ||
553 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | 558 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, |
554 | struct tid_ampdu_rx *tid_agg_rx, | 559 | struct tid_ampdu_rx *tid_agg_rx, |
555 | u16 head_seq_num, | 560 | u16 head_seq_num) |
556 | struct sk_buff_head *frames) | ||
557 | { | 561 | { |
558 | int index; | 562 | int index; |
559 | 563 | ||
564 | lockdep_assert_held(&tid_agg_rx->reorder_lock); | ||
565 | |||
560 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { | 566 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { |
561 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 567 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
562 | tid_agg_rx->buf_size; | 568 | tid_agg_rx->buf_size; |
563 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); | 569 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); |
564 | } | 570 | } |
565 | } | 571 | } |
566 | 572 | ||
@@ -576,11 +582,12 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | |||
576 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | 582 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) |
577 | 583 | ||
578 | static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | 584 | static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, |
579 | struct tid_ampdu_rx *tid_agg_rx, | 585 | struct tid_ampdu_rx *tid_agg_rx) |
580 | struct sk_buff_head *frames) | ||
581 | { | 586 | { |
582 | int index, j; | 587 | int index, j; |
583 | 588 | ||
589 | lockdep_assert_held(&tid_agg_rx->reorder_lock); | ||
590 | |||
584 | /* release the buffer until next missing frame */ | 591 | /* release the buffer until next missing frame */ |
585 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 592 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
586 | tid_agg_rx->buf_size; | 593 | tid_agg_rx->buf_size; |
@@ -606,8 +613,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
606 | wiphy_debug(hw->wiphy, | 613 | wiphy_debug(hw->wiphy, |
607 | "release an RX reorder frame due to timeout on earlier frames\n"); | 614 | "release an RX reorder frame due to timeout on earlier frames\n"); |
608 | #endif | 615 | #endif |
609 | ieee80211_release_reorder_frame(hw, tid_agg_rx, | 616 | ieee80211_release_reorder_frame(hw, tid_agg_rx, j); |
610 | j, frames); | ||
611 | 617 | ||
612 | /* | 618 | /* |
613 | * Increment the head seq# also for the skipped slots. | 619 | * Increment the head seq# also for the skipped slots. |
@@ -617,31 +623,11 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
617 | skipped = 0; | 623 | skipped = 0; |
618 | } | 624 | } |
619 | } else while (tid_agg_rx->reorder_buf[index]) { | 625 | } else while (tid_agg_rx->reorder_buf[index]) { |
620 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); | 626 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); |
621 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 627 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
622 | tid_agg_rx->buf_size; | 628 | tid_agg_rx->buf_size; |
623 | } | 629 | } |
624 | 630 | ||
625 | /* | ||
626 | * Disable the reorder release timer for now. | ||
627 | * | ||
628 | * The current implementation lacks a proper locking scheme | ||
629 | * which would protect vital statistic and debug counters | ||
630 | * from being updated by two different but concurrent BHs. | ||
631 | * | ||
632 | * More information about the topic is available from: | ||
633 | * - thread: http://marc.info/?t=128635927000001 | ||
634 | * | ||
635 | * What was wrong: | ||
636 | * => http://marc.info/?l=linux-wireless&m=128636170811964 | ||
637 | * "Basically the thing is that until your patch, the data | ||
638 | * in the struct didn't actually need locking because it | ||
639 | * was accessed by the RX path only which is not concurrent." | ||
640 | * | ||
641 | * List of what needs to be fixed: | ||
642 | * => http://marc.info/?l=linux-wireless&m=128656352920957 | ||
643 | * | ||
644 | |||
645 | if (tid_agg_rx->stored_mpdu_num) { | 631 | if (tid_agg_rx->stored_mpdu_num) { |
646 | j = index = seq_sub(tid_agg_rx->head_seq_num, | 632 | j = index = seq_sub(tid_agg_rx->head_seq_num, |
647 | tid_agg_rx->ssn) % tid_agg_rx->buf_size; | 633 | tid_agg_rx->ssn) % tid_agg_rx->buf_size; |
@@ -660,10 +646,6 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
660 | } else { | 646 | } else { |
661 | del_timer(&tid_agg_rx->reorder_timer); | 647 | del_timer(&tid_agg_rx->reorder_timer); |
662 | } | 648 | } |
663 | */ | ||
664 | |||
665 | set_release_timer: | ||
666 | return; | ||
667 | } | 649 | } |
668 | 650 | ||
669 | /* | 651 | /* |
@@ -673,8 +655,7 @@ set_release_timer: | |||
673 | */ | 655 | */ |
674 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | 656 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, |
675 | struct tid_ampdu_rx *tid_agg_rx, | 657 | struct tid_ampdu_rx *tid_agg_rx, |
676 | struct sk_buff *skb, | 658 | struct sk_buff *skb) |
677 | struct sk_buff_head *frames) | ||
678 | { | 659 | { |
679 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 660 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
680 | u16 sc = le16_to_cpu(hdr->seq_ctrl); | 661 | u16 sc = le16_to_cpu(hdr->seq_ctrl); |
@@ -683,10 +664,11 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
683 | int index; | 664 | int index; |
684 | bool ret = true; | 665 | bool ret = true; |
685 | 666 | ||
667 | spin_lock(&tid_agg_rx->reorder_lock); | ||
668 | |||
686 | buf_size = tid_agg_rx->buf_size; | 669 | buf_size = tid_agg_rx->buf_size; |
687 | head_seq_num = tid_agg_rx->head_seq_num; | 670 | head_seq_num = tid_agg_rx->head_seq_num; |
688 | 671 | ||
689 | spin_lock(&tid_agg_rx->reorder_lock); | ||
690 | /* frame with out of date sequence number */ | 672 | /* frame with out of date sequence number */ |
691 | if (seq_less(mpdu_seq_num, head_seq_num)) { | 673 | if (seq_less(mpdu_seq_num, head_seq_num)) { |
692 | dev_kfree_skb(skb); | 674 | dev_kfree_skb(skb); |
@@ -700,8 +682,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
700 | if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { | 682 | if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { |
701 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); | 683 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); |
702 | /* release stored frames up to new head to stack */ | 684 | /* release stored frames up to new head to stack */ |
703 | ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num, | 685 | ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); |
704 | frames); | ||
705 | } | 686 | } |
706 | 687 | ||
707 | /* Now the new frame is always in the range of the reordering buffer */ | 688 | /* Now the new frame is always in the range of the reordering buffer */ |
@@ -729,7 +710,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
729 | tid_agg_rx->reorder_buf[index] = skb; | 710 | tid_agg_rx->reorder_buf[index] = skb; |
730 | tid_agg_rx->reorder_time[index] = jiffies; | 711 | tid_agg_rx->reorder_time[index] = jiffies; |
731 | tid_agg_rx->stored_mpdu_num++; | 712 | tid_agg_rx->stored_mpdu_num++; |
732 | ieee80211_sta_reorder_release(hw, tid_agg_rx, frames); | 713 | ieee80211_sta_reorder_release(hw, tid_agg_rx); |
733 | 714 | ||
734 | out: | 715 | out: |
735 | spin_unlock(&tid_agg_rx->reorder_lock); | 716 | spin_unlock(&tid_agg_rx->reorder_lock); |
@@ -740,8 +721,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
740 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns | 721 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns |
741 | * true if the MPDU was buffered, false if it should be processed. | 722 | * true if the MPDU was buffered, false if it should be processed. |
742 | */ | 723 | */ |
743 | static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | 724 | static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) |
744 | struct sk_buff_head *frames) | ||
745 | { | 725 | { |
746 | struct sk_buff *skb = rx->skb; | 726 | struct sk_buff *skb = rx->skb; |
747 | struct ieee80211_local *local = rx->local; | 727 | struct ieee80211_local *local = rx->local; |
@@ -796,11 +776,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
796 | * sure that we cannot get to it any more before doing | 776 | * sure that we cannot get to it any more before doing |
797 | * anything with it. | 777 | * anything with it. |
798 | */ | 778 | */ |
799 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) | 779 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb)) |
800 | return; | 780 | return; |
801 | 781 | ||
802 | dont_reorder: | 782 | dont_reorder: |
803 | __skb_queue_tail(frames, skb); | 783 | skb_queue_tail(&local->rx_skb_queue, skb); |
804 | } | 784 | } |
805 | 785 | ||
806 | static ieee80211_rx_result debug_noinline | 786 | static ieee80211_rx_result debug_noinline |
@@ -948,12 +928,31 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
948 | * have been expected. | 928 | * have been expected. |
949 | */ | 929 | */ |
950 | struct ieee80211_key *key = NULL; | 930 | struct ieee80211_key *key = NULL; |
931 | struct ieee80211_sub_if_data *sdata = rx->sdata; | ||
932 | int i; | ||
933 | |||
951 | if (ieee80211_is_mgmt(fc) && | 934 | if (ieee80211_is_mgmt(fc) && |
952 | is_multicast_ether_addr(hdr->addr1) && | 935 | is_multicast_ether_addr(hdr->addr1) && |
953 | (key = rcu_dereference(rx->sdata->default_mgmt_key))) | 936 | (key = rcu_dereference(rx->sdata->default_mgmt_key))) |
954 | rx->key = key; | 937 | rx->key = key; |
955 | else if ((key = rcu_dereference(rx->sdata->default_key))) | 938 | else { |
956 | rx->key = key; | 939 | if (rx->sta) { |
940 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) { | ||
941 | key = rcu_dereference(rx->sta->gtk[i]); | ||
942 | if (key) | ||
943 | break; | ||
944 | } | ||
945 | } | ||
946 | if (!key) { | ||
947 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) { | ||
948 | key = rcu_dereference(sdata->keys[i]); | ||
949 | if (key) | ||
950 | break; | ||
951 | } | ||
952 | } | ||
953 | if (key) | ||
954 | rx->key = key; | ||
955 | } | ||
957 | return RX_CONTINUE; | 956 | return RX_CONTINUE; |
958 | } else { | 957 | } else { |
959 | u8 keyid; | 958 | u8 keyid; |
@@ -1102,8 +1101,6 @@ static void ap_sta_ps_end(struct sta_info *sta) | |||
1102 | 1101 | ||
1103 | atomic_dec(&sdata->bss->num_sta_ps); | 1102 | atomic_dec(&sdata->bss->num_sta_ps); |
1104 | 1103 | ||
1105 | clear_sta_flags(sta, WLAN_STA_PS_STA); | ||
1106 | |||
1107 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG | 1104 | #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG |
1108 | printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", | 1105 | printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", |
1109 | sdata->name, sta->sta.addr, sta->sta.aid); | 1106 | sdata->name, sta->sta.addr, sta->sta.aid); |
@@ -1158,12 +1155,14 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) | |||
1158 | sta->rx_fragments++; | 1155 | sta->rx_fragments++; |
1159 | sta->rx_bytes += rx->skb->len; | 1156 | sta->rx_bytes += rx->skb->len; |
1160 | sta->last_signal = status->signal; | 1157 | sta->last_signal = status->signal; |
1158 | ewma_add(&sta->avg_signal, -status->signal); | ||
1161 | 1159 | ||
1162 | /* | 1160 | /* |
1163 | * Change STA power saving mode only at the end of a frame | 1161 | * Change STA power saving mode only at the end of a frame |
1164 | * exchange sequence. | 1162 | * exchange sequence. |
1165 | */ | 1163 | */ |
1166 | if (!ieee80211_has_morefrags(hdr->frame_control) && | 1164 | if (!ieee80211_has_morefrags(hdr->frame_control) && |
1165 | !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && | ||
1167 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || | 1166 | (rx->sdata->vif.type == NL80211_IFTYPE_AP || |
1168 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { | 1167 | rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { |
1169 | if (test_sta_flags(sta, WLAN_STA_PS_STA)) { | 1168 | if (test_sta_flags(sta, WLAN_STA_PS_STA)) { |
@@ -1515,12 +1514,30 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) | |||
1515 | if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { | 1514 | if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { |
1516 | if (unlikely(!ieee80211_has_protected(fc) && | 1515 | if (unlikely(!ieee80211_has_protected(fc) && |
1517 | ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && | 1516 | ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && |
1518 | rx->key)) | 1517 | rx->key)) { |
1518 | if (ieee80211_is_deauth(fc)) | ||
1519 | cfg80211_send_unprot_deauth(rx->sdata->dev, | ||
1520 | rx->skb->data, | ||
1521 | rx->skb->len); | ||
1522 | else if (ieee80211_is_disassoc(fc)) | ||
1523 | cfg80211_send_unprot_disassoc(rx->sdata->dev, | ||
1524 | rx->skb->data, | ||
1525 | rx->skb->len); | ||
1519 | return -EACCES; | 1526 | return -EACCES; |
1527 | } | ||
1520 | /* BIP does not use Protected field, so need to check MMIE */ | 1528 | /* BIP does not use Protected field, so need to check MMIE */ |
1521 | if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && | 1529 | if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && |
1522 | ieee80211_get_mmie_keyidx(rx->skb) < 0)) | 1530 | ieee80211_get_mmie_keyidx(rx->skb) < 0)) { |
1531 | if (ieee80211_is_deauth(fc)) | ||
1532 | cfg80211_send_unprot_deauth(rx->sdata->dev, | ||
1533 | rx->skb->data, | ||
1534 | rx->skb->len); | ||
1535 | else if (ieee80211_is_disassoc(fc)) | ||
1536 | cfg80211_send_unprot_disassoc(rx->sdata->dev, | ||
1537 | rx->skb->data, | ||
1538 | rx->skb->len); | ||
1523 | return -EACCES; | 1539 | return -EACCES; |
1540 | } | ||
1524 | /* | 1541 | /* |
1525 | * When using MFP, Action frames are not allowed prior to | 1542 | * When using MFP, Action frames are not allowed prior to |
1526 | * having configured keys. | 1543 | * having configured keys. |
@@ -1788,11 +1805,11 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1788 | 1805 | ||
1789 | fwd_skb = skb_copy(skb, GFP_ATOMIC); | 1806 | fwd_skb = skb_copy(skb, GFP_ATOMIC); |
1790 | 1807 | ||
1791 | if (!fwd_skb && net_ratelimit()) { | 1808 | if (!fwd_skb && net_ratelimit()) |
1792 | printk(KERN_DEBUG "%s: failed to clone mesh frame\n", | 1809 | printk(KERN_DEBUG "%s: failed to clone mesh frame\n", |
1793 | sdata->name); | 1810 | sdata->name); |
1811 | if (!fwd_skb) | ||
1794 | goto out; | 1812 | goto out; |
1795 | } | ||
1796 | 1813 | ||
1797 | fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; | 1814 | fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; |
1798 | memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); | 1815 | memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); |
@@ -1875,9 +1892,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
1875 | dev->stats.rx_packets++; | 1892 | dev->stats.rx_packets++; |
1876 | dev->stats.rx_bytes += rx->skb->len; | 1893 | dev->stats.rx_bytes += rx->skb->len; |
1877 | 1894 | ||
1878 | if (ieee80211_is_data(hdr->frame_control) && | 1895 | if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && |
1879 | !is_multicast_ether_addr(hdr->addr1) && | 1896 | !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) { |
1880 | local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) { | ||
1881 | mod_timer(&local->dynamic_ps_timer, jiffies + | 1897 | mod_timer(&local->dynamic_ps_timer, jiffies + |
1882 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); | 1898 | msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); |
1883 | } | 1899 | } |
@@ -1888,7 +1904,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
1888 | } | 1904 | } |
1889 | 1905 | ||
1890 | static ieee80211_rx_result debug_noinline | 1906 | static ieee80211_rx_result debug_noinline |
1891 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | 1907 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) |
1892 | { | 1908 | { |
1893 | struct ieee80211_local *local = rx->local; | 1909 | struct ieee80211_local *local = rx->local; |
1894 | struct ieee80211_hw *hw = &local->hw; | 1910 | struct ieee80211_hw *hw = &local->hw; |
@@ -1926,9 +1942,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1926 | mod_timer(&tid_agg_rx->session_timer, | 1942 | mod_timer(&tid_agg_rx->session_timer, |
1927 | TU_TO_EXP_TIME(tid_agg_rx->timeout)); | 1943 | TU_TO_EXP_TIME(tid_agg_rx->timeout)); |
1928 | 1944 | ||
1945 | spin_lock(&tid_agg_rx->reorder_lock); | ||
1929 | /* release stored frames up to start of BAR */ | 1946 | /* release stored frames up to start of BAR */ |
1930 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, | 1947 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); |
1931 | frames); | 1948 | spin_unlock(&tid_agg_rx->reorder_lock); |
1949 | |||
1932 | kfree_skb(skb); | 1950 | kfree_skb(skb); |
1933 | return RX_QUEUED; | 1951 | return RX_QUEUED; |
1934 | } | 1952 | } |
@@ -2119,10 +2137,13 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2119 | } | 2137 | } |
2120 | break; | 2138 | break; |
2121 | case WLAN_CATEGORY_MESH_PLINK: | 2139 | case WLAN_CATEGORY_MESH_PLINK: |
2122 | case WLAN_CATEGORY_MESH_PATH_SEL: | ||
2123 | if (!ieee80211_vif_is_mesh(&sdata->vif)) | 2140 | if (!ieee80211_vif_is_mesh(&sdata->vif)) |
2124 | break; | 2141 | break; |
2125 | goto queue; | 2142 | goto queue; |
2143 | case WLAN_CATEGORY_MESH_PATH_SEL: | ||
2144 | if (!mesh_path_sel_is_hwmp(sdata)) | ||
2145 | break; | ||
2146 | goto queue; | ||
2126 | } | 2147 | } |
2127 | 2148 | ||
2128 | return RX_CONTINUE; | 2149 | return RX_CONTINUE; |
@@ -2440,8 +2461,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, | |||
2440 | } | 2461 | } |
2441 | } | 2462 | } |
2442 | 2463 | ||
2443 | static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, | 2464 | static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) |
2444 | struct sk_buff_head *frames) | ||
2445 | { | 2465 | { |
2446 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2466 | ieee80211_rx_result res = RX_DROP_MONITOR; |
2447 | struct sk_buff *skb; | 2467 | struct sk_buff *skb; |
@@ -2453,7 +2473,15 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, | |||
2453 | goto rxh_next; \ | 2473 | goto rxh_next; \ |
2454 | } while (0); | 2474 | } while (0); |
2455 | 2475 | ||
2456 | while ((skb = __skb_dequeue(frames))) { | 2476 | spin_lock(&rx->local->rx_skb_queue.lock); |
2477 | if (rx->local->running_rx_handler) | ||
2478 | goto unlock; | ||
2479 | |||
2480 | rx->local->running_rx_handler = true; | ||
2481 | |||
2482 | while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) { | ||
2483 | spin_unlock(&rx->local->rx_skb_queue.lock); | ||
2484 | |||
2457 | /* | 2485 | /* |
2458 | * all the other fields are valid across frames | 2486 | * all the other fields are valid across frames |
2459 | * that belong to an aMPDU since they are on the | 2487 | * that belong to an aMPDU since they are on the |
@@ -2476,12 +2504,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, | |||
2476 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | 2504 | CALL_RXH(ieee80211_rx_h_mesh_fwding); |
2477 | #endif | 2505 | #endif |
2478 | CALL_RXH(ieee80211_rx_h_data) | 2506 | CALL_RXH(ieee80211_rx_h_data) |
2479 | 2507 | CALL_RXH(ieee80211_rx_h_ctrl); | |
2480 | /* special treatment -- needs the queue */ | ||
2481 | res = ieee80211_rx_h_ctrl(rx, frames); | ||
2482 | if (res != RX_CONTINUE) | ||
2483 | goto rxh_next; | ||
2484 | |||
2485 | CALL_RXH(ieee80211_rx_h_mgmt_check) | 2508 | CALL_RXH(ieee80211_rx_h_mgmt_check) |
2486 | CALL_RXH(ieee80211_rx_h_action) | 2509 | CALL_RXH(ieee80211_rx_h_action) |
2487 | CALL_RXH(ieee80211_rx_h_userspace_mgmt) | 2510 | CALL_RXH(ieee80211_rx_h_userspace_mgmt) |
@@ -2490,18 +2513,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, | |||
2490 | 2513 | ||
2491 | rxh_next: | 2514 | rxh_next: |
2492 | ieee80211_rx_handlers_result(rx, res); | 2515 | ieee80211_rx_handlers_result(rx, res); |
2493 | 2516 | spin_lock(&rx->local->rx_skb_queue.lock); | |
2494 | #undef CALL_RXH | 2517 | #undef CALL_RXH |
2495 | } | 2518 | } |
2519 | |||
2520 | rx->local->running_rx_handler = false; | ||
2521 | |||
2522 | unlock: | ||
2523 | spin_unlock(&rx->local->rx_skb_queue.lock); | ||
2496 | } | 2524 | } |
2497 | 2525 | ||
2498 | static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | 2526 | static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) |
2499 | { | 2527 | { |
2500 | struct sk_buff_head reorder_release; | ||
2501 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2528 | ieee80211_rx_result res = RX_DROP_MONITOR; |
2502 | 2529 | ||
2503 | __skb_queue_head_init(&reorder_release); | ||
2504 | |||
2505 | #define CALL_RXH(rxh) \ | 2530 | #define CALL_RXH(rxh) \ |
2506 | do { \ | 2531 | do { \ |
2507 | res = rxh(rx); \ | 2532 | res = rxh(rx); \ |
@@ -2512,9 +2537,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | |||
2512 | CALL_RXH(ieee80211_rx_h_passive_scan) | 2537 | CALL_RXH(ieee80211_rx_h_passive_scan) |
2513 | CALL_RXH(ieee80211_rx_h_check) | 2538 | CALL_RXH(ieee80211_rx_h_check) |
2514 | 2539 | ||
2515 | ieee80211_rx_reorder_ampdu(rx, &reorder_release); | 2540 | ieee80211_rx_reorder_ampdu(rx); |
2516 | 2541 | ||
2517 | ieee80211_rx_handlers(rx, &reorder_release); | 2542 | ieee80211_rx_handlers(rx); |
2518 | return; | 2543 | return; |
2519 | 2544 | ||
2520 | rxh_next: | 2545 | rxh_next: |
@@ -2524,13 +2549,11 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | |||
2524 | } | 2549 | } |
2525 | 2550 | ||
2526 | /* | 2551 | /* |
2527 | * This function makes calls into the RX path. Therefore the | 2552 | * This function makes calls into the RX path, therefore |
2528 | * caller must hold the sta_info->lock and everything has to | 2553 | * it has to be invoked under RCU read lock. |
2529 | * be under rcu_read_lock protection as well. | ||
2530 | */ | 2554 | */ |
2531 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | 2555 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) |
2532 | { | 2556 | { |
2533 | struct sk_buff_head frames; | ||
2534 | struct ieee80211_rx_data rx = { | 2557 | struct ieee80211_rx_data rx = { |
2535 | .sta = sta, | 2558 | .sta = sta, |
2536 | .sdata = sta->sdata, | 2559 | .sdata = sta->sdata, |
@@ -2543,13 +2566,11 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | |||
2543 | if (!tid_agg_rx) | 2566 | if (!tid_agg_rx) |
2544 | return; | 2567 | return; |
2545 | 2568 | ||
2546 | __skb_queue_head_init(&frames); | ||
2547 | |||
2548 | spin_lock(&tid_agg_rx->reorder_lock); | 2569 | spin_lock(&tid_agg_rx->reorder_lock); |
2549 | ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames); | 2570 | ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx); |
2550 | spin_unlock(&tid_agg_rx->reorder_lock); | 2571 | spin_unlock(&tid_agg_rx->reorder_lock); |
2551 | 2572 | ||
2552 | ieee80211_rx_handlers(&rx, &frames); | 2573 | ieee80211_rx_handlers(&rx); |
2553 | } | 2574 | } |
2554 | 2575 | ||
2555 | /* main receive path */ | 2576 | /* main receive path */ |
@@ -2884,6 +2905,9 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2884 | return; | 2905 | return; |
2885 | } | 2906 | } |
2886 | 2907 | ||
2908 | ieee80211_tpt_led_trig_rx(local, | ||
2909 | ((struct ieee80211_hdr *)skb->data)->frame_control, | ||
2910 | skb->len); | ||
2887 | __ieee80211_rx_handle_packet(hw, skb); | 2911 | __ieee80211_rx_handle_packet(hw, skb); |
2888 | 2912 | ||
2889 | rcu_read_unlock(); | 2913 | rcu_read_unlock(); |