aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/rx.c')
-rw-r--r--net/mac80211/rx.c195
1 files changed, 119 insertions, 76 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index be9abc2e6348..fa0f37e4afe4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -293,7 +293,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
293 skb2 = skb_clone(skb, GFP_ATOMIC); 293 skb2 = skb_clone(skb, GFP_ATOMIC);
294 if (skb2) { 294 if (skb2) {
295 skb2->dev = prev_dev; 295 skb2->dev = prev_dev;
296 netif_rx(skb2); 296 netif_receive_skb(skb2);
297 } 297 }
298 } 298 }
299 299
@@ -304,7 +304,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
304 304
305 if (prev_dev) { 305 if (prev_dev) {
306 skb->dev = prev_dev; 306 skb->dev = prev_dev;
307 netif_rx(skb); 307 netif_receive_skb(skb);
308 } else 308 } else
309 dev_kfree_skb(skb); 309 dev_kfree_skb(skb);
310 310
@@ -719,16 +719,13 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
719 719
720 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 720 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
721 721
722 spin_lock(&sta->lock); 722 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
723 723 if (!tid_agg_rx)
724 if (!sta->ampdu_mlme.tid_active_rx[tid]) 724 goto dont_reorder;
725 goto dont_reorder_unlock;
726
727 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
728 725
729 /* qos null data frames are excluded */ 726 /* qos null data frames are excluded */
730 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 727 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
731 goto dont_reorder_unlock; 728 goto dont_reorder;
732 729
733 /* new, potentially un-ordered, ampdu frame - process it */ 730 /* new, potentially un-ordered, ampdu frame - process it */
734 731
@@ -740,20 +737,22 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
740 /* if this mpdu is fragmented - terminate rx aggregation session */ 737 /* if this mpdu is fragmented - terminate rx aggregation session */
741 sc = le16_to_cpu(hdr->seq_ctrl); 738 sc = le16_to_cpu(hdr->seq_ctrl);
742 if (sc & IEEE80211_SCTL_FRAG) { 739 if (sc & IEEE80211_SCTL_FRAG) {
743 spin_unlock(&sta->lock); 740 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
744 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 741 skb_queue_tail(&rx->sdata->skb_queue, skb);
745 WLAN_REASON_QSTA_REQUIRE_SETUP); 742 ieee80211_queue_work(&local->hw, &rx->sdata->work);
746 dev_kfree_skb(skb);
747 return; 743 return;
748 } 744 }
749 745
750 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) { 746 /*
751 spin_unlock(&sta->lock); 747 * No locking needed -- we will only ever process one
748 * RX packet at a time, and thus own tid_agg_rx. All
749 * other code manipulating it needs to (and does) make
750 * sure that we cannot get to it any more before doing
751 * anything with it.
752 */
753 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
752 return; 754 return;
753 }
754 755
755 dont_reorder_unlock:
756 spin_unlock(&sta->lock);
757 dont_reorder: 756 dont_reorder:
758 __skb_queue_tail(frames, skb); 757 __skb_queue_tail(frames, skb);
759} 758}
@@ -825,6 +824,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
825 ieee80211_rx_result result = RX_DROP_UNUSABLE; 824 ieee80211_rx_result result = RX_DROP_UNUSABLE;
826 struct ieee80211_key *stakey = NULL; 825 struct ieee80211_key *stakey = NULL;
827 int mmie_keyidx = -1; 826 int mmie_keyidx = -1;
827 __le16 fc;
828 828
829 /* 829 /*
830 * Key selection 101 830 * Key selection 101
@@ -866,13 +866,15 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
866 if (rx->sta) 866 if (rx->sta)
867 stakey = rcu_dereference(rx->sta->key); 867 stakey = rcu_dereference(rx->sta->key);
868 868
869 if (!ieee80211_has_protected(hdr->frame_control)) 869 fc = hdr->frame_control;
870
871 if (!ieee80211_has_protected(fc))
870 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 872 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
871 873
872 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 874 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
873 rx->key = stakey; 875 rx->key = stakey;
874 /* Skip decryption if the frame is not protected. */ 876 /* Skip decryption if the frame is not protected. */
875 if (!ieee80211_has_protected(hdr->frame_control)) 877 if (!ieee80211_has_protected(fc))
876 return RX_CONTINUE; 878 return RX_CONTINUE;
877 } else if (mmie_keyidx >= 0) { 879 } else if (mmie_keyidx >= 0) {
878 /* Broadcast/multicast robust management frame / BIP */ 880 /* Broadcast/multicast robust management frame / BIP */
@@ -884,7 +886,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
884 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 886 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
885 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 887 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
886 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 888 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
887 } else if (!ieee80211_has_protected(hdr->frame_control)) { 889 } else if (!ieee80211_has_protected(fc)) {
888 /* 890 /*
889 * The frame was not protected, so skip decryption. However, we 891 * The frame was not protected, so skip decryption. However, we
890 * need to set rx->key if there is a key that could have been 892 * need to set rx->key if there is a key that could have been
@@ -892,7 +894,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
892 * have been expected. 894 * have been expected.
893 */ 895 */
894 struct ieee80211_key *key = NULL; 896 struct ieee80211_key *key = NULL;
895 if (ieee80211_is_mgmt(hdr->frame_control) && 897 if (ieee80211_is_mgmt(fc) &&
896 is_multicast_ether_addr(hdr->addr1) && 898 is_multicast_ether_addr(hdr->addr1) &&
897 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 899 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
898 rx->key = key; 900 rx->key = key;
@@ -914,7 +916,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
914 (status->flag & RX_FLAG_IV_STRIPPED)) 916 (status->flag & RX_FLAG_IV_STRIPPED))
915 return RX_CONTINUE; 917 return RX_CONTINUE;
916 918
917 hdrlen = ieee80211_hdrlen(hdr->frame_control); 919 hdrlen = ieee80211_hdrlen(fc);
918 920
919 if (rx->skb->len < 8 + hdrlen) 921 if (rx->skb->len < 8 + hdrlen)
920 return RX_DROP_UNUSABLE; /* TODO: count this? */ 922 return RX_DROP_UNUSABLE; /* TODO: count this? */
@@ -947,19 +949,17 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
947 949
948 if (skb_linearize(rx->skb)) 950 if (skb_linearize(rx->skb))
949 return RX_DROP_UNUSABLE; 951 return RX_DROP_UNUSABLE;
950 952 /* the hdr variable is invalid now! */
951 hdr = (struct ieee80211_hdr *)rx->skb->data;
952
953 /* Check for weak IVs if possible */
954 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
955 ieee80211_is_data(hdr->frame_control) &&
956 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
957 !(status->flag & RX_FLAG_DECRYPTED)) &&
958 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
959 rx->sta->wep_weak_iv_count++;
960 953
961 switch (rx->key->conf.alg) { 954 switch (rx->key->conf.alg) {
962 case ALG_WEP: 955 case ALG_WEP:
956 /* Check for weak IVs if possible */
957 if (rx->sta && ieee80211_is_data(fc) &&
958 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
959 !(status->flag & RX_FLAG_DECRYPTED)) &&
960 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
961 rx->sta->wep_weak_iv_count++;
962
963 result = ieee80211_crypto_wep_decrypt(rx); 963 result = ieee80211_crypto_wep_decrypt(rx);
964 break; 964 break;
965 case ALG_TKIP: 965 case ALG_TKIP:
@@ -1267,11 +1267,13 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1267 rx->queue, &(rx->skb)); 1267 rx->queue, &(rx->skb));
1268 if (rx->key && rx->key->conf.alg == ALG_CCMP && 1268 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1269 ieee80211_has_protected(fc)) { 1269 ieee80211_has_protected(fc)) {
1270 int queue = ieee80211_is_mgmt(fc) ?
1271 NUM_RX_DATA_QUEUES : rx->queue;
1270 /* Store CCMP PN so that we can verify that the next 1272 /* Store CCMP PN so that we can verify that the next
1271 * fragment has a sequential PN value. */ 1273 * fragment has a sequential PN value. */
1272 entry->ccmp = 1; 1274 entry->ccmp = 1;
1273 memcpy(entry->last_pn, 1275 memcpy(entry->last_pn,
1274 rx->key->u.ccmp.rx_pn[rx->queue], 1276 rx->key->u.ccmp.rx_pn[queue],
1275 CCMP_PN_LEN); 1277 CCMP_PN_LEN);
1276 } 1278 }
1277 return RX_QUEUED; 1279 return RX_QUEUED;
@@ -1291,6 +1293,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1291 if (entry->ccmp) { 1293 if (entry->ccmp) {
1292 int i; 1294 int i;
1293 u8 pn[CCMP_PN_LEN], *rpn; 1295 u8 pn[CCMP_PN_LEN], *rpn;
1296 int queue;
1294 if (!rx->key || rx->key->conf.alg != ALG_CCMP) 1297 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1295 return RX_DROP_UNUSABLE; 1298 return RX_DROP_UNUSABLE;
1296 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1299 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
@@ -1299,7 +1302,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1299 if (pn[i]) 1302 if (pn[i])
1300 break; 1303 break;
1301 } 1304 }
1302 rpn = rx->key->u.ccmp.rx_pn[rx->queue]; 1305 queue = ieee80211_is_mgmt(fc) ?
1306 NUM_RX_DATA_QUEUES : rx->queue;
1307 rpn = rx->key->u.ccmp.rx_pn[queue];
1303 if (memcmp(pn, rpn, CCMP_PN_LEN)) 1308 if (memcmp(pn, rpn, CCMP_PN_LEN))
1304 return RX_DROP_UNUSABLE; 1309 return RX_DROP_UNUSABLE;
1305 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 1310 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
@@ -1573,7 +1578,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1573 /* deliver to local stack */ 1578 /* deliver to local stack */
1574 skb->protocol = eth_type_trans(skb, dev); 1579 skb->protocol = eth_type_trans(skb, dev);
1575 memset(skb->cb, 0, sizeof(skb->cb)); 1580 memset(skb->cb, 0, sizeof(skb->cb));
1576 netif_rx(skb); 1581 netif_receive_skb(skb);
1577 } 1582 }
1578 } 1583 }
1579 1584
@@ -1829,13 +1834,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1829 &bar_data, sizeof(bar_data))) 1834 &bar_data, sizeof(bar_data)))
1830 return RX_DROP_MONITOR; 1835 return RX_DROP_MONITOR;
1831 1836
1832 spin_lock(&rx->sta->lock);
1833 tid = le16_to_cpu(bar_data.control) >> 12; 1837 tid = le16_to_cpu(bar_data.control) >> 12;
1834 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { 1838
1835 spin_unlock(&rx->sta->lock); 1839 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
1840 if (!tid_agg_rx)
1836 return RX_DROP_MONITOR; 1841 return RX_DROP_MONITOR;
1837 }
1838 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1839 1842
1840 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 1843 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1841 1844
@@ -1848,11 +1851,15 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1848 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, 1851 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1849 frames); 1852 frames);
1850 kfree_skb(skb); 1853 kfree_skb(skb);
1851 spin_unlock(&rx->sta->lock);
1852 return RX_QUEUED; 1854 return RX_QUEUED;
1853 } 1855 }
1854 1856
1855 return RX_CONTINUE; 1857 /*
1858 * After this point, we only want management frames,
1859 * so we can drop all remaining control frames to
1860 * cooked monitor interfaces.
1861 */
1862 return RX_DROP_MONITOR;
1856} 1863}
1857 1864
1858static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 1865static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
@@ -1944,30 +1951,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1944 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1951 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1945 break; 1952 break;
1946 1953
1947 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1948 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1949
1950 switch (mgmt->u.action.u.addba_req.action_code) { 1954 switch (mgmt->u.action.u.addba_req.action_code) {
1951 case WLAN_ACTION_ADDBA_REQ: 1955 case WLAN_ACTION_ADDBA_REQ:
1952 if (len < (IEEE80211_MIN_ACTION_SIZE + 1956 if (len < (IEEE80211_MIN_ACTION_SIZE +
1953 sizeof(mgmt->u.action.u.addba_req))) 1957 sizeof(mgmt->u.action.u.addba_req)))
1954 return RX_DROP_MONITOR; 1958 goto invalid;
1955 ieee80211_process_addba_request(local, rx->sta, mgmt, len); 1959 break;
1956 goto handled;
1957 case WLAN_ACTION_ADDBA_RESP: 1960 case WLAN_ACTION_ADDBA_RESP:
1958 if (len < (IEEE80211_MIN_ACTION_SIZE + 1961 if (len < (IEEE80211_MIN_ACTION_SIZE +
1959 sizeof(mgmt->u.action.u.addba_resp))) 1962 sizeof(mgmt->u.action.u.addba_resp)))
1960 break; 1963 goto invalid;
1961 ieee80211_process_addba_resp(local, rx->sta, mgmt, len); 1964 break;
1962 goto handled;
1963 case WLAN_ACTION_DELBA: 1965 case WLAN_ACTION_DELBA:
1964 if (len < (IEEE80211_MIN_ACTION_SIZE + 1966 if (len < (IEEE80211_MIN_ACTION_SIZE +
1965 sizeof(mgmt->u.action.u.delba))) 1967 sizeof(mgmt->u.action.u.delba)))
1966 break; 1968 goto invalid;
1967 ieee80211_process_delba(sdata, rx->sta, mgmt, len); 1969 break;
1968 goto handled; 1970 default:
1971 goto invalid;
1969 } 1972 }
1970 break; 1973
1974 goto queue;
1971 case WLAN_CATEGORY_SPECTRUM_MGMT: 1975 case WLAN_CATEGORY_SPECTRUM_MGMT:
1972 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 1976 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1973 break; 1977 break;
@@ -1997,7 +2001,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1997 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 2001 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1998 break; 2002 break;
1999 2003
2000 return ieee80211_sta_rx_mgmt(sdata, rx->skb); 2004 goto queue;
2001 } 2005 }
2002 break; 2006 break;
2003 case WLAN_CATEGORY_SA_QUERY: 2007 case WLAN_CATEGORY_SA_QUERY:
@@ -2015,11 +2019,12 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2015 break; 2019 break;
2016 case WLAN_CATEGORY_MESH_PLINK: 2020 case WLAN_CATEGORY_MESH_PLINK:
2017 case WLAN_CATEGORY_MESH_PATH_SEL: 2021 case WLAN_CATEGORY_MESH_PATH_SEL:
2018 if (ieee80211_vif_is_mesh(&sdata->vif)) 2022 if (!ieee80211_vif_is_mesh(&sdata->vif))
2019 return ieee80211_mesh_rx_mgmt(sdata, rx->skb); 2023 break;
2020 break; 2024 goto queue;
2021 } 2025 }
2022 2026
2027 invalid:
2023 /* 2028 /*
2024 * For AP mode, hostapd is responsible for handling any action 2029 * For AP mode, hostapd is responsible for handling any action
2025 * frames that we didn't handle, including returning unknown 2030 * frames that we didn't handle, including returning unknown
@@ -2039,8 +2044,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2039 */ 2044 */
2040 status = IEEE80211_SKB_RXCB(rx->skb); 2045 status = IEEE80211_SKB_RXCB(rx->skb);
2041 2046
2042 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2047 if (cfg80211_rx_action(rx->sdata->dev, status->freq,
2043 cfg80211_rx_action(rx->sdata->dev, status->freq,
2044 rx->skb->data, rx->skb->len, 2048 rx->skb->data, rx->skb->len,
2045 GFP_ATOMIC)) 2049 GFP_ATOMIC))
2046 goto handled; 2050 goto handled;
@@ -2052,11 +2056,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2052 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2056 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2053 GFP_ATOMIC); 2057 GFP_ATOMIC);
2054 if (nskb) { 2058 if (nskb) {
2055 struct ieee80211_mgmt *mgmt = (void *)nskb->data; 2059 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2056 2060
2057 mgmt->u.action.category |= 0x80; 2061 nmgmt->u.action.category |= 0x80;
2058 memcpy(mgmt->da, mgmt->sa, ETH_ALEN); 2062 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2059 memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2063 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2060 2064
2061 memset(nskb->cb, 0, sizeof(nskb->cb)); 2065 memset(nskb->cb, 0, sizeof(nskb->cb));
2062 2066
@@ -2068,6 +2072,14 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2068 rx->sta->rx_packets++; 2072 rx->sta->rx_packets++;
2069 dev_kfree_skb(rx->skb); 2073 dev_kfree_skb(rx->skb);
2070 return RX_QUEUED; 2074 return RX_QUEUED;
2075
2076 queue:
2077 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2078 skb_queue_tail(&sdata->skb_queue, rx->skb);
2079 ieee80211_queue_work(&local->hw, &sdata->work);
2080 if (rx->sta)
2081 rx->sta->rx_packets++;
2082 return RX_QUEUED;
2071} 2083}
2072 2084
2073static ieee80211_rx_result debug_noinline 2085static ieee80211_rx_result debug_noinline
@@ -2075,10 +2087,15 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2075{ 2087{
2076 struct ieee80211_sub_if_data *sdata = rx->sdata; 2088 struct ieee80211_sub_if_data *sdata = rx->sdata;
2077 ieee80211_rx_result rxs; 2089 ieee80211_rx_result rxs;
2090 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2091 __le16 stype;
2078 2092
2079 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 2093 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2080 return RX_DROP_MONITOR; 2094 return RX_DROP_MONITOR;
2081 2095
2096 if (rx->skb->len < 24)
2097 return RX_DROP_MONITOR;
2098
2082 if (ieee80211_drop_unencrypted_mgmt(rx)) 2099 if (ieee80211_drop_unencrypted_mgmt(rx))
2083 return RX_DROP_UNUSABLE; 2100 return RX_DROP_UNUSABLE;
2084 2101
@@ -2086,16 +2103,42 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2086 if (rxs != RX_CONTINUE) 2103 if (rxs != RX_CONTINUE)
2087 return rxs; 2104 return rxs;
2088 2105
2089 if (ieee80211_vif_is_mesh(&sdata->vif)) 2106 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2090 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
2091 2107
2092 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 2108 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2093 return ieee80211_ibss_rx_mgmt(sdata, rx->skb); 2109 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2110 sdata->vif.type != NL80211_IFTYPE_STATION)
2111 return RX_DROP_MONITOR;
2112
2113 switch (stype) {
2114 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2115 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2116 /* process for all: mesh, mlme, ibss */
2117 break;
2118 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2119 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2120 /* process only for station */
2121 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2122 return RX_DROP_MONITOR;
2123 break;
2124 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2125 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2126 /* process only for ibss */
2127 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2128 return RX_DROP_MONITOR;
2129 break;
2130 default:
2131 return RX_DROP_MONITOR;
2132 }
2094 2133
2095 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2134 /* queue up frame and kick off work to process it */
2096 return ieee80211_sta_rx_mgmt(sdata, rx->skb); 2135 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2136 skb_queue_tail(&sdata->skb_queue, rx->skb);
2137 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2138 if (rx->sta)
2139 rx->sta->rx_packets++;
2097 2140
2098 return RX_DROP_MONITOR; 2141 return RX_QUEUED;
2099} 2142}
2100 2143
2101static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, 2144static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
@@ -2151,7 +2194,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2151 u8 rate_or_pad; 2194 u8 rate_or_pad;
2152 __le16 chan_freq; 2195 __le16 chan_freq;
2153 __le16 chan_flags; 2196 __le16 chan_flags;
2154 } __attribute__ ((packed)) *rthdr; 2197 } __packed *rthdr;
2155 struct sk_buff *skb = rx->skb, *skb2; 2198 struct sk_buff *skb = rx->skb, *skb2;
2156 struct net_device *prev_dev = NULL; 2199 struct net_device *prev_dev = NULL;
2157 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -2201,7 +2244,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2201 skb2 = skb_clone(skb, GFP_ATOMIC); 2244 skb2 = skb_clone(skb, GFP_ATOMIC);
2202 if (skb2) { 2245 if (skb2) {
2203 skb2->dev = prev_dev; 2246 skb2->dev = prev_dev;
2204 netif_rx(skb2); 2247 netif_receive_skb(skb2);
2205 } 2248 }
2206 } 2249 }
2207 2250
@@ -2212,7 +2255,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2212 2255
2213 if (prev_dev) { 2256 if (prev_dev) {
2214 skb->dev = prev_dev; 2257 skb->dev = prev_dev;
2215 netif_rx(skb); 2258 netif_receive_skb(skb);
2216 skb = NULL; 2259 skb = NULL;
2217 } else 2260 } else
2218 goto out_free_skb; 2261 goto out_free_skb;