diff options
author | John W. Linville <linville@tuxdriver.com> | 2011-09-19 15:00:16 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2011-09-19 15:00:16 -0400 |
commit | b53d63ecce17c4ddf8636def9f6e8b865c3927f9 (patch) | |
tree | 683ef774fcfb423fa35f61e4326d0ce3f6a7c283 /net/mac80211 | |
parent | 765cf9976e937f1cfe9159bf4534967c8bf8eb6d (diff) | |
parent | 12e62d6f7ec475e546b40bece2045da15d6c21ef (diff) |
Merge branch 'master' of ssh://infradead/~/public_git/wireless-next into for-davem
Diffstat (limited to 'net/mac80211')
-rw-r--r-- | net/mac80211/agg-rx.c | 19 | ||||
-rw-r--r-- | net/mac80211/agg-tx.c | 42 | ||||
-rw-r--r-- | net/mac80211/cfg.c | 21 | ||||
-rw-r--r-- | net/mac80211/debugfs.c | 5 | ||||
-rw-r--r-- | net/mac80211/debugfs_netdev.c | 3 | ||||
-rw-r--r-- | net/mac80211/ht.c | 6 | ||||
-rw-r--r-- | net/mac80211/ieee80211_i.h | 3 | ||||
-rw-r--r-- | net/mac80211/iface.c | 6 | ||||
-rw-r--r-- | net/mac80211/mesh.c | 8 | ||||
-rw-r--r-- | net/mac80211/mesh.h | 6 | ||||
-rw-r--r-- | net/mac80211/mesh_hwmp.c | 31 | ||||
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 192 | ||||
-rw-r--r-- | net/mac80211/mesh_plink.c | 2 | ||||
-rw-r--r-- | net/mac80211/mlme.c | 36 | ||||
-rw-r--r-- | net/mac80211/rc80211_minstrel_ht.c | 3 | ||||
-rw-r--r-- | net/mac80211/rx.c | 35 | ||||
-rw-r--r-- | net/mac80211/spectmgmt.c | 6 | ||||
-rw-r--r-- | net/mac80211/sta_info.c | 10 | ||||
-rw-r--r-- | net/mac80211/sta_info.h | 5 | ||||
-rw-r--r-- | net/mac80211/status.c | 53 | ||||
-rw-r--r-- | net/mac80211/tx.c | 26 | ||||
-rw-r--r-- | net/mac80211/util.c | 13 | ||||
-rw-r--r-- | net/mac80211/wme.c | 16 | ||||
-rw-r--r-- | net/mac80211/wme.h | 3 | ||||
-rw-r--r-- | net/mac80211/work.c | 6 |
25 files changed, 329 insertions, 227 deletions
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 9b5bd8cafc20..7c366dfe8da9 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -167,12 +167,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d | |||
167 | u16 capab; | 167 | u16 capab; |
168 | 168 | ||
169 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | 169 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); |
170 | 170 | if (!skb) | |
171 | if (!skb) { | ||
172 | printk(KERN_DEBUG "%s: failed to allocate buffer " | ||
173 | "for addba resp frame\n", sdata->name); | ||
174 | return; | 171 | return; |
175 | } | ||
176 | 172 | ||
177 | skb_reserve(skb, local->hw.extra_tx_headroom); | 173 | skb_reserve(skb, local->hw.extra_tx_headroom); |
178 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 174 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
@@ -279,14 +275,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
279 | 275 | ||
280 | /* prepare A-MPDU MLME for Rx aggregation */ | 276 | /* prepare A-MPDU MLME for Rx aggregation */ |
281 | tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); | 277 | tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL); |
282 | if (!tid_agg_rx) { | 278 | if (!tid_agg_rx) |
283 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
284 | if (net_ratelimit()) | ||
285 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", | ||
286 | tid); | ||
287 | #endif | ||
288 | goto end; | 279 | goto end; |
289 | } | ||
290 | 280 | ||
291 | spin_lock_init(&tid_agg_rx->reorder_lock); | 281 | spin_lock_init(&tid_agg_rx->reorder_lock); |
292 | 282 | ||
@@ -306,11 +296,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
306 | tid_agg_rx->reorder_time = | 296 | tid_agg_rx->reorder_time = |
307 | kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL); | 297 | kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL); |
308 | if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) { | 298 | if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) { |
309 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
310 | if (net_ratelimit()) | ||
311 | printk(KERN_ERR "can not allocate reordering buffer " | ||
312 | "to tid %d\n", tid); | ||
313 | #endif | ||
314 | kfree(tid_agg_rx->reorder_buf); | 299 | kfree(tid_agg_rx->reorder_buf); |
315 | kfree(tid_agg_rx->reorder_time); | 300 | kfree(tid_agg_rx->reorder_time); |
316 | kfree(tid_agg_rx); | 301 | kfree(tid_agg_rx); |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 018108d1a2fd..3cef5a7281cb 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -68,11 +68,9 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, | |||
68 | 68 | ||
69 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | 69 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); |
70 | 70 | ||
71 | if (!skb) { | 71 | if (!skb) |
72 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
73 | "for addba request frame\n", sdata->name); | ||
74 | return; | 72 | return; |
75 | } | 73 | |
76 | skb_reserve(skb, local->hw.extra_tx_headroom); | 74 | skb_reserve(skb, local->hw.extra_tx_headroom); |
77 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 75 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
78 | memset(mgmt, 0, 24); | 76 | memset(mgmt, 0, 24); |
@@ -106,19 +104,18 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, | |||
106 | ieee80211_tx_skb(sdata, skb); | 104 | ieee80211_tx_skb(sdata, skb); |
107 | } | 105 | } |
108 | 106 | ||
109 | void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn) | 107 | void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) |
110 | { | 108 | { |
109 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | ||
111 | struct ieee80211_local *local = sdata->local; | 110 | struct ieee80211_local *local = sdata->local; |
112 | struct sk_buff *skb; | 111 | struct sk_buff *skb; |
113 | struct ieee80211_bar *bar; | 112 | struct ieee80211_bar *bar; |
114 | u16 bar_control = 0; | 113 | u16 bar_control = 0; |
115 | 114 | ||
116 | skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); | 115 | skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); |
117 | if (!skb) { | 116 | if (!skb) |
118 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
119 | "bar frame\n", sdata->name); | ||
120 | return; | 117 | return; |
121 | } | 118 | |
122 | skb_reserve(skb, local->hw.extra_tx_headroom); | 119 | skb_reserve(skb, local->hw.extra_tx_headroom); |
123 | bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); | 120 | bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar)); |
124 | memset(bar, 0, sizeof(*bar)); | 121 | memset(bar, 0, sizeof(*bar)); |
@@ -135,6 +132,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1 | |||
135 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; | 132 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; |
136 | ieee80211_tx_skb(sdata, skb); | 133 | ieee80211_tx_skb(sdata, skb); |
137 | } | 134 | } |
135 | EXPORT_SYMBOL(ieee80211_send_bar); | ||
138 | 136 | ||
139 | void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, | 137 | void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, |
140 | struct tid_ampdu_tx *tid_tx) | 138 | struct tid_ampdu_tx *tid_tx) |
@@ -364,7 +362,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
364 | return -EINVAL; | 362 | return -EINVAL; |
365 | 363 | ||
366 | if ((tid >= STA_TID_NUM) || | 364 | if ((tid >= STA_TID_NUM) || |
367 | !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) | 365 | !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) || |
366 | (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) | ||
368 | return -EINVAL; | 367 | return -EINVAL; |
369 | 368 | ||
370 | #ifdef CONFIG_MAC80211_HT_DEBUG | 369 | #ifdef CONFIG_MAC80211_HT_DEBUG |
@@ -413,11 +412,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
413 | /* prepare A-MPDU MLME for Tx aggregation */ | 412 | /* prepare A-MPDU MLME for Tx aggregation */ |
414 | tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); | 413 | tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); |
415 | if (!tid_tx) { | 414 | if (!tid_tx) { |
416 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
417 | if (net_ratelimit()) | ||
418 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", | ||
419 | tid); | ||
420 | #endif | ||
421 | ret = -ENOMEM; | 415 | ret = -ENOMEM; |
422 | goto err_unlock_sta; | 416 | goto err_unlock_sta; |
423 | } | 417 | } |
@@ -574,14 +568,9 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
574 | struct ieee80211_ra_tid *ra_tid; | 568 | struct ieee80211_ra_tid *ra_tid; |
575 | struct sk_buff *skb = dev_alloc_skb(0); | 569 | struct sk_buff *skb = dev_alloc_skb(0); |
576 | 570 | ||
577 | if (unlikely(!skb)) { | 571 | if (unlikely(!skb)) |
578 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
579 | if (net_ratelimit()) | ||
580 | printk(KERN_WARNING "%s: Not enough memory, " | ||
581 | "dropping start BA session", sdata->name); | ||
582 | #endif | ||
583 | return; | 572 | return; |
584 | } | 573 | |
585 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 574 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
586 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | 575 | memcpy(&ra_tid->ra, ra, ETH_ALEN); |
587 | ra_tid->tid = tid; | 576 | ra_tid->tid = tid; |
@@ -727,14 +716,9 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
727 | struct ieee80211_ra_tid *ra_tid; | 716 | struct ieee80211_ra_tid *ra_tid; |
728 | struct sk_buff *skb = dev_alloc_skb(0); | 717 | struct sk_buff *skb = dev_alloc_skb(0); |
729 | 718 | ||
730 | if (unlikely(!skb)) { | 719 | if (unlikely(!skb)) |
731 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
732 | if (net_ratelimit()) | ||
733 | printk(KERN_WARNING "%s: Not enough memory, " | ||
734 | "dropping stop BA session", sdata->name); | ||
735 | #endif | ||
736 | return; | 720 | return; |
737 | } | 721 | |
738 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 722 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
739 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | 723 | memcpy(&ra_tid->ra, ra, ETH_ALEN); |
740 | ra_tid->tid = tid; | 724 | ra_tid->tid = tid; |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 4baa03b1c251..567e3e54685a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -455,6 +455,20 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, | |||
455 | return ret; | 455 | return ret; |
456 | } | 456 | } |
457 | 457 | ||
458 | static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata, | ||
459 | struct beacon_parameters *params) | ||
460 | { | ||
461 | struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; | ||
462 | |||
463 | bss_conf->ssid_len = params->ssid_len; | ||
464 | |||
465 | if (params->ssid_len) | ||
466 | memcpy(bss_conf->ssid, params->ssid, params->ssid_len); | ||
467 | |||
468 | bss_conf->hidden_ssid = | ||
469 | (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE); | ||
470 | } | ||
471 | |||
458 | /* | 472 | /* |
459 | * This handles both adding a beacon and setting new beacon info | 473 | * This handles both adding a beacon and setting new beacon info |
460 | */ | 474 | */ |
@@ -548,8 +562,11 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, | |||
548 | 562 | ||
549 | kfree(old); | 563 | kfree(old); |
550 | 564 | ||
565 | ieee80211_config_ap_ssid(sdata, params); | ||
566 | |||
551 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | | 567 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | |
552 | BSS_CHANGED_BEACON); | 568 | BSS_CHANGED_BEACON | |
569 | BSS_CHANGED_SSID); | ||
553 | return 0; | 570 | return 0; |
554 | } | 571 | } |
555 | 572 | ||
@@ -921,7 +938,7 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, | |||
921 | if (dst) | 938 | if (dst) |
922 | return mesh_path_del(dst, sdata); | 939 | return mesh_path_del(dst, sdata); |
923 | 940 | ||
924 | mesh_path_flush(sdata); | 941 | mesh_path_flush_by_iface(sdata); |
925 | return 0; | 942 | return 0; |
926 | } | 943 | } |
927 | 944 | ||
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 267ed45ef6a2..c9141168fd43 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -297,6 +297,9 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, | |||
297 | char *buf = kzalloc(mxln, GFP_KERNEL); | 297 | char *buf = kzalloc(mxln, GFP_KERNEL); |
298 | int sf = 0; /* how many written so far */ | 298 | int sf = 0; /* how many written so far */ |
299 | 299 | ||
300 | if (!buf) | ||
301 | return 0; | ||
302 | |||
300 | sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags); | 303 | sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags); |
301 | if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) | 304 | if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) |
302 | sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n"); | 305 | sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n"); |
@@ -347,6 +350,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, | |||
347 | sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n"); | 350 | sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n"); |
348 | if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) | 351 | if (local->hw.flags & IEEE80211_HW_AP_LINK_PS) |
349 | sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n"); | 352 | sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n"); |
353 | if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW) | ||
354 | sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n"); | ||
350 | 355 | ||
351 | rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); | 356 | rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); |
352 | kfree(buf); | 357 | kfree(buf); |
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 6e8eab7919e2..dd0462917518 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -340,6 +340,8 @@ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC); | |||
340 | IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); | 340 | IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); |
341 | IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); | 341 | IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); |
342 | IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); | 342 | IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); |
343 | IEEE80211_IF_FILE(dropped_frames_congestion, | ||
344 | u.mesh.mshstats.dropped_frames_congestion, DEC); | ||
343 | IEEE80211_IF_FILE(dropped_frames_no_route, | 345 | IEEE80211_IF_FILE(dropped_frames_no_route, |
344 | u.mesh.mshstats.dropped_frames_no_route, DEC); | 346 | u.mesh.mshstats.dropped_frames_no_route, DEC); |
345 | IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); | 347 | IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC); |
@@ -463,6 +465,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) | |||
463 | MESHSTATS_ADD(fwded_frames); | 465 | MESHSTATS_ADD(fwded_frames); |
464 | MESHSTATS_ADD(dropped_frames_ttl); | 466 | MESHSTATS_ADD(dropped_frames_ttl); |
465 | MESHSTATS_ADD(dropped_frames_no_route); | 467 | MESHSTATS_ADD(dropped_frames_no_route); |
468 | MESHSTATS_ADD(dropped_frames_congestion); | ||
466 | MESHSTATS_ADD(estab_plinks); | 469 | MESHSTATS_ADD(estab_plinks); |
467 | #undef MESHSTATS_ADD | 470 | #undef MESHSTATS_ADD |
468 | } | 471 | } |
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 7cfc286946c0..2b9b52c69569 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
@@ -186,12 +186,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, | |||
186 | u16 params; | 186 | u16 params; |
187 | 187 | ||
188 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); | 188 | skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); |
189 | 189 | if (!skb) | |
190 | if (!skb) { | ||
191 | printk(KERN_ERR "%s: failed to allocate buffer " | ||
192 | "for delba frame\n", sdata->name); | ||
193 | return; | 190 | return; |
194 | } | ||
195 | 191 | ||
196 | skb_reserve(skb, local->hw.extra_tx_headroom); | 192 | skb_reserve(skb, local->hw.extra_tx_headroom); |
197 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 193 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c204cee1189c..21186e280ceb 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -261,6 +261,7 @@ struct mesh_stats { | |||
261 | __u32 fwded_frames; /* Mesh total forwarded frames */ | 261 | __u32 fwded_frames; /* Mesh total forwarded frames */ |
262 | __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ | 262 | __u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/ |
263 | __u32 dropped_frames_no_route; /* Not transmitted, no route found */ | 263 | __u32 dropped_frames_no_route; /* Not transmitted, no route found */ |
264 | __u32 dropped_frames_congestion;/* Not forwarded due to congestion */ | ||
264 | atomic_t estab_plinks; | 265 | atomic_t estab_plinks; |
265 | }; | 266 | }; |
266 | 267 | ||
@@ -670,6 +671,7 @@ enum queue_stop_reason { | |||
670 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION, | 671 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION, |
671 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, | 672 | IEEE80211_QUEUE_STOP_REASON_SUSPEND, |
672 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD, | 673 | IEEE80211_QUEUE_STOP_REASON_SKB_ADD, |
674 | IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE, | ||
673 | }; | 675 | }; |
674 | 676 | ||
675 | #ifdef CONFIG_MAC80211_LEDS | 677 | #ifdef CONFIG_MAC80211_LEDS |
@@ -1186,7 +1188,6 @@ struct ieee80211_tx_status_rtap_hdr { | |||
1186 | void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, | 1188 | void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, |
1187 | struct ieee80211_ht_cap *ht_cap_ie, | 1189 | struct ieee80211_ht_cap *ht_cap_ie, |
1188 | struct ieee80211_sta_ht_cap *ht_cap); | 1190 | struct ieee80211_sta_ht_cap *ht_cap); |
1189 | void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn); | ||
1190 | void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, | 1191 | void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, |
1191 | const u8 *da, u16 tid, | 1192 | const u8 *da, u16 tid, |
1192 | u16 initiator, u16 reason_code); | 1193 | u16 initiator, u16 reason_code); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d10dc4df60b6..a33c58f5137c 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1214,6 +1214,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) | |||
1214 | list_del_rcu(&sdata->list); | 1214 | list_del_rcu(&sdata->list); |
1215 | mutex_unlock(&sdata->local->iflist_mtx); | 1215 | mutex_unlock(&sdata->local->iflist_mtx); |
1216 | 1216 | ||
1217 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
1218 | mesh_path_flush_by_iface(sdata); | ||
1219 | |||
1217 | synchronize_rcu(); | 1220 | synchronize_rcu(); |
1218 | unregister_netdevice(sdata->dev); | 1221 | unregister_netdevice(sdata->dev); |
1219 | } | 1222 | } |
@@ -1233,6 +1236,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) | |||
1233 | list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { | 1236 | list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { |
1234 | list_del(&sdata->list); | 1237 | list_del(&sdata->list); |
1235 | 1238 | ||
1239 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
1240 | mesh_path_flush_by_iface(sdata); | ||
1241 | |||
1236 | unregister_netdevice_queue(sdata->dev, &unreg_list); | 1242 | unregister_netdevice_queue(sdata->dev, &unreg_list); |
1237 | } | 1243 | } |
1238 | mutex_unlock(&local->iflist_mtx); | 1244 | mutex_unlock(&local->iflist_mtx); |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 28ab510e621a..a4225ae69681 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -200,10 +200,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, | |||
200 | } | 200 | } |
201 | 201 | ||
202 | p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); | 202 | p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); |
203 | if (!p) { | 203 | if (!p) |
204 | printk(KERN_DEBUG "o11s: could not allocate RMC entry\n"); | ||
205 | return 0; | 204 | return 0; |
206 | } | 205 | |
207 | p->seqnum = seqnum; | 206 | p->seqnum = seqnum; |
208 | p->exp_time = jiffies + RMC_TIMEOUT; | 207 | p->exp_time = jiffies + RMC_TIMEOUT; |
209 | memcpy(p->sa, sa, ETH_ALEN); | 208 | memcpy(p->sa, sa, ETH_ALEN); |
@@ -464,8 +463,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, | |||
464 | memcpy(hdr->addr3, meshsa, ETH_ALEN); | 463 | memcpy(hdr->addr3, meshsa, ETH_ALEN); |
465 | return 24; | 464 | return 24; |
466 | } else { | 465 | } else { |
467 | *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | | 466 | *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); |
468 | IEEE80211_FCTL_TODS); | ||
469 | /* RA TA DA SA */ | 467 | /* RA TA DA SA */ |
470 | memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ | 468 | memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ |
471 | memcpy(hdr->addr2, meshsa, ETH_ALEN); | 469 | memcpy(hdr->addr2, meshsa, ETH_ALEN); |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 20272072171f..7118e8e8855c 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -80,7 +80,9 @@ enum mesh_deferred_task_flags { | |||
80 | * retry | 80 | * retry |
81 | * @discovery_retries: number of discovery retries | 81 | * @discovery_retries: number of discovery retries |
82 | * @flags: mesh path flags, as specified on &enum mesh_path_flags | 82 | * @flags: mesh path flags, as specified on &enum mesh_path_flags |
83 | * @state_lock: mesh path state lock | 83 | * @state_lock: mesh path state lock used to protect changes to the |
84 | * mpath itself. No need to take this lock when adding or removing | ||
85 | * an mpath to a hash bucket on a path table. | ||
84 | * @is_gate: the destination station of this path is a mesh gate | 86 | * @is_gate: the destination station of this path is a mesh gate |
85 | * | 87 | * |
86 | * | 88 | * |
@@ -238,7 +240,6 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, | |||
238 | struct ieee80211_sub_if_data *sdata); | 240 | struct ieee80211_sub_if_data *sdata); |
239 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); | 241 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); |
240 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata); | 242 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata); |
241 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata); | ||
242 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, | 243 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
243 | struct ieee80211_mgmt *mgmt, size_t len); | 244 | struct ieee80211_mgmt *mgmt, size_t len); |
244 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); | 245 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); |
@@ -275,6 +276,7 @@ void mesh_pathtbl_unregister(void); | |||
275 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); | 276 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); |
276 | void mesh_path_timer(unsigned long data); | 277 | void mesh_path_timer(unsigned long data); |
277 | void mesh_path_flush_by_nexthop(struct sta_info *sta); | 278 | void mesh_path_flush_by_nexthop(struct sta_info *sta); |
279 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); | ||
278 | void mesh_path_discard_frame(struct sk_buff *skb, | 280 | void mesh_path_discard_frame(struct sk_buff *skb, |
279 | struct ieee80211_sub_if_data *sdata); | 281 | struct ieee80211_sub_if_data *sdata); |
280 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); | 282 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index fd4f76a3e139..6df7913d7ca4 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include "wme.h" | ||
11 | #include "mesh.h" | 12 | #include "mesh.h" |
12 | 13 | ||
13 | #ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG | 14 | #ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG |
@@ -202,6 +203,26 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
202 | return 0; | 203 | return 0; |
203 | } | 204 | } |
204 | 205 | ||
206 | |||
207 | /* Headroom is not adjusted. Caller should ensure that skb has sufficient | ||
208 | * headroom in case the frame is encrypted. */ | ||
209 | static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata, | ||
210 | struct sk_buff *skb) | ||
211 | { | ||
212 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
213 | |||
214 | skb_set_mac_header(skb, 0); | ||
215 | skb_set_network_header(skb, 0); | ||
216 | skb_set_transport_header(skb, 0); | ||
217 | |||
218 | /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */ | ||
219 | skb_set_queue_mapping(skb, IEEE80211_AC_VO); | ||
220 | skb->priority = 7; | ||
221 | |||
222 | info->control.vif = &sdata->vif; | ||
223 | ieee80211_set_qos_hdr(sdata, skb); | ||
224 | } | ||
225 | |||
205 | /** | 226 | /** |
206 | * mesh_send_path error - Sends a PERR mesh management frame | 227 | * mesh_send_path error - Sends a PERR mesh management frame |
207 | * | 228 | * |
@@ -209,6 +230,10 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, | |||
209 | * @target_sn: SN of the broken destination | 230 | * @target_sn: SN of the broken destination |
210 | * @target_rcode: reason code for this PERR | 231 | * @target_rcode: reason code for this PERR |
211 | * @ra: node this frame is addressed to | 232 | * @ra: node this frame is addressed to |
233 | * | ||
234 | * Note: This function may be called with driver locks taken that the driver | ||
235 | * also acquires in the TX path. To avoid a deadlock we don't transmit the | ||
236 | * frame directly but add it to the pending queue instead. | ||
212 | */ | 237 | */ |
213 | int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, | 238 | int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, |
214 | __le16 target_rcode, const u8 *ra, | 239 | __le16 target_rcode, const u8 *ra, |
@@ -222,7 +247,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, | |||
222 | 247 | ||
223 | if (!skb) | 248 | if (!skb) |
224 | return -1; | 249 | return -1; |
225 | skb_reserve(skb, local->hw.extra_tx_headroom); | 250 | skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom); |
226 | /* 25 is the size of the common mgmt part (24) plus the size of the | 251 | /* 25 is the size of the common mgmt part (24) plus the size of the |
227 | * common action part (1) | 252 | * common action part (1) |
228 | */ | 253 | */ |
@@ -263,7 +288,9 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, | |||
263 | pos += 4; | 288 | pos += 4; |
264 | memcpy(pos, &target_rcode, 2); | 289 | memcpy(pos, &target_rcode, 2); |
265 | 290 | ||
266 | ieee80211_tx_skb(sdata, skb); | 291 | /* see note in function header */ |
292 | prepare_frame_for_deferred_tx(sdata, skb); | ||
293 | ieee80211_add_pending_skb(local, skb); | ||
267 | return 0; | 294 | return 0; |
268 | } | 295 | } |
269 | 296 | ||
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index f97d17cb073c..7f54c5042235 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <net/mac80211.h> | 16 | #include <net/mac80211.h> |
17 | #include "wme.h" | ||
17 | #include "ieee80211_i.h" | 18 | #include "ieee80211_i.h" |
18 | #include "mesh.h" | 19 | #include "mesh.h" |
19 | 20 | ||
@@ -48,8 +49,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ | |||
48 | int mesh_paths_generation; | 49 | int mesh_paths_generation; |
49 | 50 | ||
50 | /* This lock will have the grow table function as writer and add / delete nodes | 51 | /* This lock will have the grow table function as writer and add / delete nodes |
51 | * as readers. When reading the table (i.e. doing lookups) we are well protected | 52 | * as readers. RCU provides sufficient protection only when reading the table |
52 | * by RCU | 53 | * (i.e. doing lookups). Adding or adding or removing nodes requires we take |
54 | * the read lock or we risk operating on an old table. The write lock is only | ||
55 | * needed when modifying the number of buckets a table. | ||
53 | */ | 56 | */ |
54 | static DEFINE_RWLOCK(pathtbl_resize_lock); | 57 | static DEFINE_RWLOCK(pathtbl_resize_lock); |
55 | 58 | ||
@@ -210,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |||
210 | struct ieee80211_hdr *hdr; | 213 | struct ieee80211_hdr *hdr; |
211 | struct sk_buff_head tmpq; | 214 | struct sk_buff_head tmpq; |
212 | unsigned long flags; | 215 | unsigned long flags; |
216 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | ||
213 | 217 | ||
214 | rcu_assign_pointer(mpath->next_hop, sta); | 218 | rcu_assign_pointer(mpath->next_hop, sta); |
215 | 219 | ||
@@ -220,6 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |||
220 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { | 224 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { |
221 | hdr = (struct ieee80211_hdr *) skb->data; | 225 | hdr = (struct ieee80211_hdr *) skb->data; |
222 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); | 226 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); |
227 | skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb)); | ||
228 | ieee80211_set_qos_hdr(sdata, skb); | ||
223 | __skb_queue_tail(&tmpq, skb); | 229 | __skb_queue_tail(&tmpq, skb); |
224 | } | 230 | } |
225 | 231 | ||
@@ -333,25 +339,14 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, | |||
333 | } | 339 | } |
334 | 340 | ||
335 | 341 | ||
336 | /** | 342 | static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst, |
337 | * mesh_path_lookup - look up a path in the mesh path table | 343 | struct ieee80211_sub_if_data *sdata) |
338 | * @dst: hardware address (ETH_ALEN length) of destination | ||
339 | * @sdata: local subif | ||
340 | * | ||
341 | * Returns: pointer to the mesh path structure, or NULL if not found | ||
342 | * | ||
343 | * Locking: must be called within a read rcu section. | ||
344 | */ | ||
345 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | ||
346 | { | 344 | { |
347 | struct mesh_path *mpath; | 345 | struct mesh_path *mpath; |
348 | struct hlist_node *n; | 346 | struct hlist_node *n; |
349 | struct hlist_head *bucket; | 347 | struct hlist_head *bucket; |
350 | struct mesh_table *tbl; | ||
351 | struct mpath_node *node; | 348 | struct mpath_node *node; |
352 | 349 | ||
353 | tbl = rcu_dereference(mesh_paths); | ||
354 | |||
355 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 350 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
356 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 351 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
357 | mpath = node->mpath; | 352 | mpath = node->mpath; |
@@ -359,8 +354,7 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
359 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | 354 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
360 | if (MPATH_EXPIRED(mpath)) { | 355 | if (MPATH_EXPIRED(mpath)) { |
361 | spin_lock_bh(&mpath->state_lock); | 356 | spin_lock_bh(&mpath->state_lock); |
362 | if (MPATH_EXPIRED(mpath)) | 357 | mpath->flags &= ~MESH_PATH_ACTIVE; |
363 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
364 | spin_unlock_bh(&mpath->state_lock); | 358 | spin_unlock_bh(&mpath->state_lock); |
365 | } | 359 | } |
366 | return mpath; | 360 | return mpath; |
@@ -369,31 +363,23 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
369 | return NULL; | 363 | return NULL; |
370 | } | 364 | } |
371 | 365 | ||
372 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | 366 | /** |
367 | * mesh_path_lookup - look up a path in the mesh path table | ||
368 | * @dst: hardware address (ETH_ALEN length) of destination | ||
369 | * @sdata: local subif | ||
370 | * | ||
371 | * Returns: pointer to the mesh path structure, or NULL if not found | ||
372 | * | ||
373 | * Locking: must be called within a read rcu section. | ||
374 | */ | ||
375 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | ||
373 | { | 376 | { |
374 | struct mesh_path *mpath; | 377 | return path_lookup(rcu_dereference(mesh_paths), dst, sdata); |
375 | struct hlist_node *n; | 378 | } |
376 | struct hlist_head *bucket; | ||
377 | struct mesh_table *tbl; | ||
378 | struct mpath_node *node; | ||
379 | |||
380 | tbl = rcu_dereference(mpp_paths); | ||
381 | 379 | ||
382 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 380 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
383 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 381 | { |
384 | mpath = node->mpath; | 382 | return path_lookup(rcu_dereference(mpp_paths), dst, sdata); |
385 | if (mpath->sdata == sdata && | ||
386 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | ||
387 | if (MPATH_EXPIRED(mpath)) { | ||
388 | spin_lock_bh(&mpath->state_lock); | ||
389 | if (MPATH_EXPIRED(mpath)) | ||
390 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
391 | spin_unlock_bh(&mpath->state_lock); | ||
392 | } | ||
393 | return mpath; | ||
394 | } | ||
395 | } | ||
396 | return NULL; | ||
397 | } | 383 | } |
398 | 384 | ||
399 | 385 | ||
@@ -420,8 +406,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data | |||
420 | if (j++ == idx) { | 406 | if (j++ == idx) { |
421 | if (MPATH_EXPIRED(node->mpath)) { | 407 | if (MPATH_EXPIRED(node->mpath)) { |
422 | spin_lock_bh(&node->mpath->state_lock); | 408 | spin_lock_bh(&node->mpath->state_lock); |
423 | if (MPATH_EXPIRED(node->mpath)) | 409 | node->mpath->flags &= ~MESH_PATH_ACTIVE; |
424 | node->mpath->flags &= ~MESH_PATH_ACTIVE; | ||
425 | spin_unlock_bh(&node->mpath->state_lock); | 410 | spin_unlock_bh(&node->mpath->state_lock); |
426 | } | 411 | } |
427 | return node->mpath; | 412 | return node->mpath; |
@@ -776,22 +761,47 @@ void mesh_plink_broken(struct sta_info *sta) | |||
776 | tbl = rcu_dereference(mesh_paths); | 761 | tbl = rcu_dereference(mesh_paths); |
777 | for_each_mesh_entry(tbl, p, node, i) { | 762 | for_each_mesh_entry(tbl, p, node, i) { |
778 | mpath = node->mpath; | 763 | mpath = node->mpath; |
779 | spin_lock_bh(&mpath->state_lock); | ||
780 | if (rcu_dereference(mpath->next_hop) == sta && | 764 | if (rcu_dereference(mpath->next_hop) == sta && |
781 | mpath->flags & MESH_PATH_ACTIVE && | 765 | mpath->flags & MESH_PATH_ACTIVE && |
782 | !(mpath->flags & MESH_PATH_FIXED)) { | 766 | !(mpath->flags & MESH_PATH_FIXED)) { |
767 | spin_lock_bh(&mpath->state_lock); | ||
783 | mpath->flags &= ~MESH_PATH_ACTIVE; | 768 | mpath->flags &= ~MESH_PATH_ACTIVE; |
784 | ++mpath->sn; | 769 | ++mpath->sn; |
785 | spin_unlock_bh(&mpath->state_lock); | 770 | spin_unlock_bh(&mpath->state_lock); |
786 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, | 771 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, |
787 | mpath->dst, cpu_to_le32(mpath->sn), | 772 | mpath->dst, cpu_to_le32(mpath->sn), |
788 | reason, bcast, sdata); | 773 | reason, bcast, sdata); |
789 | } else | 774 | } |
790 | spin_unlock_bh(&mpath->state_lock); | ||
791 | } | 775 | } |
792 | rcu_read_unlock(); | 776 | rcu_read_unlock(); |
793 | } | 777 | } |
794 | 778 | ||
779 | static void mesh_path_node_reclaim(struct rcu_head *rp) | ||
780 | { | ||
781 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | ||
782 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; | ||
783 | |||
784 | del_timer_sync(&node->mpath->timer); | ||
785 | atomic_dec(&sdata->u.mesh.mpaths); | ||
786 | kfree(node->mpath); | ||
787 | kfree(node); | ||
788 | } | ||
789 | |||
790 | /* needs to be called with the corresponding hashwlock taken */ | ||
791 | static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) | ||
792 | { | ||
793 | struct mesh_path *mpath; | ||
794 | mpath = node->mpath; | ||
795 | spin_lock(&mpath->state_lock); | ||
796 | mpath->flags |= MESH_PATH_RESOLVING; | ||
797 | if (mpath->is_gate) | ||
798 | mesh_gate_del(tbl, mpath); | ||
799 | hlist_del_rcu(&node->list); | ||
800 | call_rcu(&node->rcu, mesh_path_node_reclaim); | ||
801 | spin_unlock(&mpath->state_lock); | ||
802 | atomic_dec(&tbl->entries); | ||
803 | } | ||
804 | |||
795 | /** | 805 | /** |
796 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches | 806 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches |
797 | * | 807 | * |
@@ -812,42 +822,59 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
812 | int i; | 822 | int i; |
813 | 823 | ||
814 | rcu_read_lock(); | 824 | rcu_read_lock(); |
815 | tbl = rcu_dereference(mesh_paths); | 825 | read_lock_bh(&pathtbl_resize_lock); |
826 | tbl = resize_dereference_mesh_paths(); | ||
816 | for_each_mesh_entry(tbl, p, node, i) { | 827 | for_each_mesh_entry(tbl, p, node, i) { |
817 | mpath = node->mpath; | 828 | mpath = node->mpath; |
818 | if (rcu_dereference(mpath->next_hop) == sta) | 829 | if (rcu_dereference(mpath->next_hop) == sta) { |
819 | mesh_path_del(mpath->dst, mpath->sdata); | 830 | spin_lock_bh(&tbl->hashwlock[i]); |
831 | __mesh_path_del(tbl, node); | ||
832 | spin_unlock_bh(&tbl->hashwlock[i]); | ||
833 | } | ||
820 | } | 834 | } |
835 | read_unlock_bh(&pathtbl_resize_lock); | ||
821 | rcu_read_unlock(); | 836 | rcu_read_unlock(); |
822 | } | 837 | } |
823 | 838 | ||
824 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) | 839 | static void table_flush_by_iface(struct mesh_table *tbl, |
840 | struct ieee80211_sub_if_data *sdata) | ||
825 | { | 841 | { |
826 | struct mesh_table *tbl; | ||
827 | struct mesh_path *mpath; | 842 | struct mesh_path *mpath; |
828 | struct mpath_node *node; | 843 | struct mpath_node *node; |
829 | struct hlist_node *p; | 844 | struct hlist_node *p; |
830 | int i; | 845 | int i; |
831 | 846 | ||
832 | rcu_read_lock(); | 847 | WARN_ON(!rcu_read_lock_held()); |
833 | tbl = rcu_dereference(mesh_paths); | ||
834 | for_each_mesh_entry(tbl, p, node, i) { | 848 | for_each_mesh_entry(tbl, p, node, i) { |
835 | mpath = node->mpath; | 849 | mpath = node->mpath; |
836 | if (mpath->sdata == sdata) | 850 | if (mpath->sdata != sdata) |
837 | mesh_path_del(mpath->dst, mpath->sdata); | 851 | continue; |
852 | spin_lock_bh(&tbl->hashwlock[i]); | ||
853 | __mesh_path_del(tbl, node); | ||
854 | spin_unlock_bh(&tbl->hashwlock[i]); | ||
838 | } | 855 | } |
839 | rcu_read_unlock(); | ||
840 | } | 856 | } |
841 | 857 | ||
842 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 858 | /** |
859 | * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface | ||
860 | * | ||
861 | * This function deletes both mesh paths as well as mesh portal paths. | ||
862 | * | ||
863 | * @sdata - interface data to match | ||
864 | * | ||
865 | */ | ||
866 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) | ||
843 | { | 867 | { |
844 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 868 | struct mesh_table *tbl; |
845 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; | ||
846 | 869 | ||
847 | del_timer_sync(&node->mpath->timer); | 870 | rcu_read_lock(); |
848 | atomic_dec(&sdata->u.mesh.mpaths); | 871 | read_lock_bh(&pathtbl_resize_lock); |
849 | kfree(node->mpath); | 872 | tbl = resize_dereference_mesh_paths(); |
850 | kfree(node); | 873 | table_flush_by_iface(tbl, sdata); |
874 | tbl = resize_dereference_mpp_paths(); | ||
875 | table_flush_by_iface(tbl, sdata); | ||
876 | read_unlock_bh(&pathtbl_resize_lock); | ||
877 | rcu_read_unlock(); | ||
851 | } | 878 | } |
852 | 879 | ||
853 | /** | 880 | /** |
@@ -878,14 +905,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
878 | mpath = node->mpath; | 905 | mpath = node->mpath; |
879 | if (mpath->sdata == sdata && | 906 | if (mpath->sdata == sdata && |
880 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 907 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
881 | spin_lock_bh(&mpath->state_lock); | 908 | __mesh_path_del(tbl, node); |
882 | if (mpath->is_gate) | ||
883 | mesh_gate_del(tbl, mpath); | ||
884 | mpath->flags |= MESH_PATH_RESOLVING; | ||
885 | hlist_del_rcu(&node->list); | ||
886 | call_rcu(&node->rcu, mesh_path_node_reclaim); | ||
887 | atomic_dec(&tbl->entries); | ||
888 | spin_unlock_bh(&mpath->state_lock); | ||
889 | goto enddel; | 909 | goto enddel; |
890 | } | 910 | } |
891 | } | 911 | } |
@@ -991,9 +1011,14 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
991 | 1011 | ||
992 | da = hdr->addr3; | 1012 | da = hdr->addr3; |
993 | ra = hdr->addr1; | 1013 | ra = hdr->addr1; |
1014 | rcu_read_lock(); | ||
994 | mpath = mesh_path_lookup(da, sdata); | 1015 | mpath = mesh_path_lookup(da, sdata); |
995 | if (mpath) | 1016 | if (mpath) { |
1017 | spin_lock_bh(&mpath->state_lock); | ||
996 | sn = ++mpath->sn; | 1018 | sn = ++mpath->sn; |
1019 | spin_unlock_bh(&mpath->state_lock); | ||
1020 | } | ||
1021 | rcu_read_unlock(); | ||
997 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, | 1022 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, |
998 | cpu_to_le32(sn), reason, ra, sdata); | 1023 | cpu_to_le32(sn), reason, ra, sdata); |
999 | } | 1024 | } |
@@ -1074,6 +1099,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | |||
1074 | int mesh_pathtbl_init(void) | 1099 | int mesh_pathtbl_init(void) |
1075 | { | 1100 | { |
1076 | struct mesh_table *tbl_path, *tbl_mpp; | 1101 | struct mesh_table *tbl_path, *tbl_mpp; |
1102 | int ret; | ||
1077 | 1103 | ||
1078 | tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 1104 | tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
1079 | if (!tbl_path) | 1105 | if (!tbl_path) |
@@ -1082,18 +1108,26 @@ int mesh_pathtbl_init(void) | |||
1082 | tbl_path->copy_node = &mesh_path_node_copy; | 1108 | tbl_path->copy_node = &mesh_path_node_copy; |
1083 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; | 1109 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; |
1084 | tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); | 1110 | tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); |
1111 | if (!tbl_path->known_gates) { | ||
1112 | ret = -ENOMEM; | ||
1113 | goto free_path; | ||
1114 | } | ||
1085 | INIT_HLIST_HEAD(tbl_path->known_gates); | 1115 | INIT_HLIST_HEAD(tbl_path->known_gates); |
1086 | 1116 | ||
1087 | 1117 | ||
1088 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 1118 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
1089 | if (!tbl_mpp) { | 1119 | if (!tbl_mpp) { |
1090 | mesh_table_free(tbl_path, true); | 1120 | ret = -ENOMEM; |
1091 | return -ENOMEM; | 1121 | goto free_path; |
1092 | } | 1122 | } |
1093 | tbl_mpp->free_node = &mesh_path_node_free; | 1123 | tbl_mpp->free_node = &mesh_path_node_free; |
1094 | tbl_mpp->copy_node = &mesh_path_node_copy; | 1124 | tbl_mpp->copy_node = &mesh_path_node_copy; |
1095 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; | 1125 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; |
1096 | tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); | 1126 | tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); |
1127 | if (!tbl_mpp->known_gates) { | ||
1128 | ret = -ENOMEM; | ||
1129 | goto free_mpp; | ||
1130 | } | ||
1097 | INIT_HLIST_HEAD(tbl_mpp->known_gates); | 1131 | INIT_HLIST_HEAD(tbl_mpp->known_gates); |
1098 | 1132 | ||
1099 | /* Need no locking since this is during init */ | 1133 | /* Need no locking since this is during init */ |
@@ -1101,6 +1135,12 @@ int mesh_pathtbl_init(void) | |||
1101 | RCU_INIT_POINTER(mpp_paths, tbl_mpp); | 1135 | RCU_INIT_POINTER(mpp_paths, tbl_mpp); |
1102 | 1136 | ||
1103 | return 0; | 1137 | return 0; |
1138 | |||
1139 | free_mpp: | ||
1140 | mesh_table_free(tbl_mpp, true); | ||
1141 | free_path: | ||
1142 | mesh_table_free(tbl_path, true); | ||
1143 | return ret; | ||
1104 | } | 1144 | } |
1105 | 1145 | ||
1106 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | 1146 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
@@ -1117,14 +1157,10 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
1117 | if (node->mpath->sdata != sdata) | 1157 | if (node->mpath->sdata != sdata) |
1118 | continue; | 1158 | continue; |
1119 | mpath = node->mpath; | 1159 | mpath = node->mpath; |
1120 | spin_lock_bh(&mpath->state_lock); | ||
1121 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | 1160 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
1122 | (!(mpath->flags & MESH_PATH_FIXED)) && | 1161 | (!(mpath->flags & MESH_PATH_FIXED)) && |
1123 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { | 1162 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
1124 | spin_unlock_bh(&mpath->state_lock); | ||
1125 | mesh_path_del(mpath->dst, mpath->sdata); | 1163 | mesh_path_del(mpath->dst, mpath->sdata); |
1126 | } else | ||
1127 | spin_unlock_bh(&mpath->state_lock); | ||
1128 | } | 1164 | } |
1129 | rcu_read_unlock(); | 1165 | rcu_read_unlock(); |
1130 | } | 1166 | } |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 1a00d0f701c3..4396906175ae 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -88,7 +88,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, | |||
88 | if (!sta) | 88 | if (!sta) |
89 | return NULL; | 89 | return NULL; |
90 | 90 | ||
91 | sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH; | 91 | sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH | WLAN_STA_WME; |
92 | sta->sta.supp_rates[local->hw.conf.channel->band] = rates; | 92 | sta->sta.supp_rates[local->hw.conf.channel->band] = rates; |
93 | rate_control_rate_init(sta); | 93 | rate_control_rate_init(sta); |
94 | 94 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 60a6f273cd30..2f92ae2f9706 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -271,11 +271,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, | |||
271 | struct ieee80211_mgmt *mgmt; | 271 | struct ieee80211_mgmt *mgmt; |
272 | 272 | ||
273 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); | 273 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); |
274 | if (!skb) { | 274 | if (!skb) |
275 | printk(KERN_DEBUG "%s: failed to allocate buffer for " | ||
276 | "deauth/disassoc frame\n", sdata->name); | ||
277 | return; | 275 | return; |
278 | } | 276 | |
279 | skb_reserve(skb, local->hw.extra_tx_headroom); | 277 | skb_reserve(skb, local->hw.extra_tx_headroom); |
280 | 278 | ||
281 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); | 279 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); |
@@ -354,11 +352,9 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, | |||
354 | return; | 352 | return; |
355 | 353 | ||
356 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); | 354 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30); |
357 | if (!skb) { | 355 | if (!skb) |
358 | printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr " | ||
359 | "nullfunc frame\n", sdata->name); | ||
360 | return; | 356 | return; |
361 | } | 357 | |
362 | skb_reserve(skb, local->hw.extra_tx_headroom); | 358 | skb_reserve(skb, local->hw.extra_tx_headroom); |
363 | 359 | ||
364 | nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30); | 360 | nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30); |
@@ -394,6 +390,9 @@ static void ieee80211_chswitch_work(struct work_struct *work) | |||
394 | /* call "hw_config" only if doing sw channel switch */ | 390 | /* call "hw_config" only if doing sw channel switch */ |
395 | ieee80211_hw_config(sdata->local, | 391 | ieee80211_hw_config(sdata->local, |
396 | IEEE80211_CONF_CHANGE_CHANNEL); | 392 | IEEE80211_CONF_CHANGE_CHANNEL); |
393 | } else { | ||
394 | /* update the device channel directly */ | ||
395 | sdata->local->hw.conf.channel = sdata->local->oper_channel; | ||
397 | } | 396 | } |
398 | 397 | ||
399 | /* XXX: shouldn't really modify cfg80211-owned data! */ | 398 | /* XXX: shouldn't really modify cfg80211-owned data! */ |
@@ -1922,8 +1921,24 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
1922 | 1921 | ||
1923 | rcu_read_unlock(); | 1922 | rcu_read_unlock(); |
1924 | 1923 | ||
1924 | /* | ||
1925 | * Whenever the AP announces the HT mode change that can be | ||
1926 | * 40MHz intolerant or etc., it would be safer to stop tx | ||
1927 | * queues before doing hw config to avoid buffer overflow. | ||
1928 | */ | ||
1929 | ieee80211_stop_queues_by_reason(&sdata->local->hw, | ||
1930 | IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); | ||
1931 | |||
1932 | /* flush out all packets */ | ||
1933 | synchronize_net(); | ||
1934 | |||
1935 | drv_flush(local, false); | ||
1936 | |||
1925 | changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, | 1937 | changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, |
1926 | bssid, ap_ht_cap_flags); | 1938 | bssid, ap_ht_cap_flags); |
1939 | |||
1940 | ieee80211_wake_queues_by_reason(&sdata->local->hw, | ||
1941 | IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE); | ||
1927 | } | 1942 | } |
1928 | 1943 | ||
1929 | /* Note: country IE parsing is done for us by cfg80211 */ | 1944 | /* Note: country IE parsing is done for us by cfg80211 */ |
@@ -2441,11 +2456,8 @@ static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata, | |||
2441 | int err; | 2456 | int err; |
2442 | 2457 | ||
2443 | sta = sta_info_alloc(sdata, bssid, GFP_KERNEL); | 2458 | sta = sta_info_alloc(sdata, bssid, GFP_KERNEL); |
2444 | if (!sta) { | 2459 | if (!sta) |
2445 | printk(KERN_DEBUG "%s: failed to alloc STA entry for" | ||
2446 | " the AP\n", sdata->name); | ||
2447 | return -ENOMEM; | 2460 | return -ENOMEM; |
2448 | } | ||
2449 | 2461 | ||
2450 | sta->dummy = true; | 2462 | sta->dummy = true; |
2451 | 2463 | ||
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 21588386a302..e19249b0f971 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -452,7 +452,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, | |||
452 | 452 | ||
453 | if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { | 453 | if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { |
454 | minstrel_ht_update_stats(mp, mi); | 454 | minstrel_ht_update_stats(mp, mi); |
455 | minstrel_aggr_check(mp, sta, skb); | 455 | if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) |
456 | minstrel_aggr_check(mp, sta, skb); | ||
456 | } | 457 | } |
457 | } | 458 | } |
458 | 459 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index f45fd2fedc24..db46601e50bf 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -476,7 +476,6 @@ static ieee80211_rx_result | |||
476 | ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | 476 | ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) |
477 | { | 477 | { |
478 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; | 478 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; |
479 | unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
480 | char *dev_addr = rx->sdata->vif.addr; | 479 | char *dev_addr = rx->sdata->vif.addr; |
481 | 480 | ||
482 | if (ieee80211_is_data(hdr->frame_control)) { | 481 | if (ieee80211_is_data(hdr->frame_control)) { |
@@ -524,14 +523,6 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
524 | 523 | ||
525 | } | 524 | } |
526 | 525 | ||
527 | #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) | ||
528 | |||
529 | if (ieee80211_is_data(hdr->frame_control) && | ||
530 | is_multicast_ether_addr(hdr->addr1) && | ||
531 | mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata)) | ||
532 | return RX_DROP_MONITOR; | ||
533 | #undef msh_h_get | ||
534 | |||
535 | return RX_CONTINUE; | 526 | return RX_CONTINUE; |
536 | } | 527 | } |
537 | 528 | ||
@@ -1840,6 +1831,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1840 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 1831 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
1841 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | 1832 | mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
1842 | 1833 | ||
1834 | /* frame is in RMC, don't forward */ | ||
1835 | if (ieee80211_is_data(hdr->frame_control) && | ||
1836 | is_multicast_ether_addr(hdr->addr1) && | ||
1837 | mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata)) | ||
1838 | return RX_DROP_MONITOR; | ||
1839 | |||
1843 | if (!ieee80211_is_data(hdr->frame_control)) | 1840 | if (!ieee80211_is_data(hdr->frame_control)) |
1844 | return RX_CONTINUE; | 1841 | return RX_CONTINUE; |
1845 | 1842 | ||
@@ -1847,6 +1844,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1847 | /* illegal frame */ | 1844 | /* illegal frame */ |
1848 | return RX_DROP_MONITOR; | 1845 | return RX_DROP_MONITOR; |
1849 | 1846 | ||
1847 | if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) { | ||
1848 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, | ||
1849 | dropped_frames_congestion); | ||
1850 | return RX_DROP_MONITOR; | ||
1851 | } | ||
1852 | |||
1850 | if (mesh_hdr->flags & MESH_FLAGS_AE) { | 1853 | if (mesh_hdr->flags & MESH_FLAGS_AE) { |
1851 | struct mesh_path *mppath; | 1854 | struct mesh_path *mppath; |
1852 | char *proxied_addr; | 1855 | char *proxied_addr; |
@@ -1902,13 +1905,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
1902 | memset(info, 0, sizeof(*info)); | 1905 | memset(info, 0, sizeof(*info)); |
1903 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | 1906 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; |
1904 | info->control.vif = &rx->sdata->vif; | 1907 | info->control.vif = &rx->sdata->vif; |
1905 | skb_set_queue_mapping(skb, | 1908 | if (is_multicast_ether_addr(fwd_hdr->addr1)) { |
1906 | ieee80211_select_queue(rx->sdata, fwd_skb)); | ||
1907 | ieee80211_set_qos_hdr(local, skb); | ||
1908 | if (is_multicast_ether_addr(fwd_hdr->addr1)) | ||
1909 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, | 1909 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, |
1910 | fwded_mcast); | 1910 | fwded_mcast); |
1911 | else { | 1911 | skb_set_queue_mapping(fwd_skb, |
1912 | ieee80211_select_queue(sdata, fwd_skb)); | ||
1913 | ieee80211_set_qos_hdr(sdata, fwd_skb); | ||
1914 | } else { | ||
1912 | int err; | 1915 | int err; |
1913 | /* | 1916 | /* |
1914 | * Save TA to addr1 to send TA a path error if a | 1917 | * Save TA to addr1 to send TA a path error if a |
@@ -2569,12 +2572,12 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) | |||
2569 | CALL_RXH(ieee80211_rx_h_ps_poll) | 2572 | CALL_RXH(ieee80211_rx_h_ps_poll) |
2570 | CALL_RXH(ieee80211_rx_h_michael_mic_verify) | 2573 | CALL_RXH(ieee80211_rx_h_michael_mic_verify) |
2571 | /* must be after MMIC verify so header is counted in MPDU mic */ | 2574 | /* must be after MMIC verify so header is counted in MPDU mic */ |
2572 | CALL_RXH(ieee80211_rx_h_remove_qos_control) | ||
2573 | CALL_RXH(ieee80211_rx_h_amsdu) | ||
2574 | #ifdef CONFIG_MAC80211_MESH | 2575 | #ifdef CONFIG_MAC80211_MESH |
2575 | if (ieee80211_vif_is_mesh(&rx->sdata->vif)) | 2576 | if (ieee80211_vif_is_mesh(&rx->sdata->vif)) |
2576 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | 2577 | CALL_RXH(ieee80211_rx_h_mesh_fwding); |
2577 | #endif | 2578 | #endif |
2579 | CALL_RXH(ieee80211_rx_h_remove_qos_control) | ||
2580 | CALL_RXH(ieee80211_rx_h_amsdu) | ||
2578 | CALL_RXH(ieee80211_rx_h_data) | 2581 | CALL_RXH(ieee80211_rx_h_data) |
2579 | CALL_RXH(ieee80211_rx_h_ctrl); | 2582 | CALL_RXH(ieee80211_rx_h_ctrl); |
2580 | CALL_RXH(ieee80211_rx_h_mgmt_check) | 2583 | CALL_RXH(ieee80211_rx_h_mgmt_check) |
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index 7733f66ee2c4..578eea3fc04d 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c | |||
@@ -32,12 +32,8 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da | |||
32 | 32 | ||
33 | skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + | 33 | skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + |
34 | sizeof(struct ieee80211_msrment_ie)); | 34 | sizeof(struct ieee80211_msrment_ie)); |
35 | 35 | if (!skb) | |
36 | if (!skb) { | ||
37 | printk(KERN_ERR "%s: failed to allocate buffer for " | ||
38 | "measurement report frame\n", sdata->name); | ||
39 | return; | 36 | return; |
40 | } | ||
41 | 37 | ||
42 | skb_reserve(skb, local->hw.extra_tx_headroom); | 38 | skb_reserve(skb, local->hw.extra_tx_headroom); |
43 | msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); | 39 | msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 6bc17fb80ee9..695447e988cb 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -691,14 +691,13 @@ void sta_info_clear_tim_bit(struct sta_info *sta) | |||
691 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); | 691 | spin_unlock_irqrestore(&sta->local->sta_lock, flags); |
692 | } | 692 | } |
693 | 693 | ||
694 | static int sta_info_buffer_expired(struct sta_info *sta, | 694 | static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb) |
695 | struct sk_buff *skb) | ||
696 | { | 695 | { |
697 | struct ieee80211_tx_info *info; | 696 | struct ieee80211_tx_info *info; |
698 | int timeout; | 697 | int timeout; |
699 | 698 | ||
700 | if (!skb) | 699 | if (!skb) |
701 | return 0; | 700 | return false; |
702 | 701 | ||
703 | info = IEEE80211_SKB_CB(skb); | 702 | info = IEEE80211_SKB_CB(skb); |
704 | 703 | ||
@@ -718,9 +717,6 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | |||
718 | unsigned long flags; | 717 | unsigned long flags; |
719 | struct sk_buff *skb; | 718 | struct sk_buff *skb; |
720 | 719 | ||
721 | if (skb_queue_empty(&sta->ps_tx_buf)) | ||
722 | return false; | ||
723 | |||
724 | for (;;) { | 720 | for (;;) { |
725 | spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); | 721 | spin_lock_irqsave(&sta->ps_tx_buf.lock, flags); |
726 | skb = skb_peek(&sta->ps_tx_buf); | 722 | skb = skb_peek(&sta->ps_tx_buf); |
@@ -745,7 +741,7 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local, | |||
745 | sta_info_clear_tim_bit(sta); | 741 | sta_info_clear_tim_bit(sta); |
746 | } | 742 | } |
747 | 743 | ||
748 | return true; | 744 | return !skb_queue_empty(&sta->ps_tx_buf); |
749 | } | 745 | } |
750 | 746 | ||
751 | static int __must_check __sta_info_destroy(struct sta_info *sta) | 747 | static int __must_check __sta_info_destroy(struct sta_info *sta) |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index e9eb565506da..56a3d38a2cd1 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -86,6 +86,8 @@ enum ieee80211_sta_info_flags { | |||
86 | * @stop_initiator: initiator of a session stop | 86 | * @stop_initiator: initiator of a session stop |
87 | * @tx_stop: TX DelBA frame when stopping | 87 | * @tx_stop: TX DelBA frame when stopping |
88 | * @buf_size: reorder buffer size at receiver | 88 | * @buf_size: reorder buffer size at receiver |
89 | * @failed_bar_ssn: ssn of the last failed BAR tx attempt | ||
90 | * @bar_pending: BAR needs to be re-sent | ||
89 | * | 91 | * |
90 | * This structure's lifetime is managed by RCU, assignments to | 92 | * This structure's lifetime is managed by RCU, assignments to |
91 | * the array holding it must hold the aggregation mutex. | 93 | * the array holding it must hold the aggregation mutex. |
@@ -106,6 +108,9 @@ struct tid_ampdu_tx { | |||
106 | u8 stop_initiator; | 108 | u8 stop_initiator; |
107 | bool tx_stop; | 109 | bool tx_stop; |
108 | u8 buf_size; | 110 | u8 buf_size; |
111 | |||
112 | u16 failed_bar_ssn; | ||
113 | bool bar_pending; | ||
109 | }; | 114 | }; |
110 | 115 | ||
111 | /** | 116 | /** |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index e51bd2a1a073..d50358c45ab0 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -127,12 +127,32 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
127 | dev_kfree_skb(skb); | 127 | dev_kfree_skb(skb); |
128 | } | 128 | } |
129 | 129 | ||
130 | static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid) | ||
131 | { | ||
132 | struct tid_ampdu_tx *tid_tx; | ||
133 | |||
134 | tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); | ||
135 | if (!tid_tx || !tid_tx->bar_pending) | ||
136 | return; | ||
137 | |||
138 | tid_tx->bar_pending = false; | ||
139 | ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn); | ||
140 | } | ||
141 | |||
130 | static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) | 142 | static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) |
131 | { | 143 | { |
132 | struct ieee80211_mgmt *mgmt = (void *) skb->data; | 144 | struct ieee80211_mgmt *mgmt = (void *) skb->data; |
133 | struct ieee80211_local *local = sta->local; | 145 | struct ieee80211_local *local = sta->local; |
134 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 146 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
135 | 147 | ||
148 | if (ieee80211_is_data_qos(mgmt->frame_control)) { | ||
149 | struct ieee80211_hdr *hdr = (void *) skb->data; | ||
150 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
151 | u16 tid = qc[0] & 0xf; | ||
152 | |||
153 | ieee80211_check_pending_bar(sta, hdr->addr1, tid); | ||
154 | } | ||
155 | |||
136 | if (ieee80211_is_action(mgmt->frame_control) && | 156 | if (ieee80211_is_action(mgmt->frame_control) && |
137 | sdata->vif.type == NL80211_IFTYPE_STATION && | 157 | sdata->vif.type == NL80211_IFTYPE_STATION && |
138 | mgmt->u.action.category == WLAN_CATEGORY_HT && | 158 | mgmt->u.action.category == WLAN_CATEGORY_HT && |
@@ -161,6 +181,18 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) | |||
161 | } | 181 | } |
162 | } | 182 | } |
163 | 183 | ||
184 | static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn) | ||
185 | { | ||
186 | struct tid_ampdu_tx *tid_tx; | ||
187 | |||
188 | tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); | ||
189 | if (!tid_tx) | ||
190 | return; | ||
191 | |||
192 | tid_tx->failed_bar_ssn = ssn; | ||
193 | tid_tx->bar_pending = true; | ||
194 | } | ||
195 | |||
164 | /* | 196 | /* |
165 | * Use a static threshold for now, best value to be determined | 197 | * Use a static threshold for now, best value to be determined |
166 | * by testing ... | 198 | * by testing ... |
@@ -241,23 +273,28 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
241 | tid = qc[0] & 0xf; | 273 | tid = qc[0] & 0xf; |
242 | ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) | 274 | ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) |
243 | & IEEE80211_SCTL_SEQ); | 275 | & IEEE80211_SCTL_SEQ); |
244 | ieee80211_send_bar(sta->sdata, hdr->addr1, | 276 | ieee80211_send_bar(&sta->sdata->vif, hdr->addr1, |
245 | tid, ssn); | 277 | tid, ssn); |
246 | } | 278 | } |
247 | 279 | ||
248 | if (!acked && ieee80211_is_back_req(fc)) { | 280 | if (!acked && ieee80211_is_back_req(fc)) { |
281 | u16 control; | ||
282 | |||
249 | /* | 283 | /* |
250 | * BAR failed, let's tear down the BA session as a | 284 | * BAR failed, store the last SSN and retry sending |
251 | * last resort as some STAs (Intel 5100 on Windows) | 285 | * the BAR when the next unicast transmission on the |
252 | * can get stuck when the BA window isn't flushed | 286 | * same TID succeeds. |
253 | * correctly. | ||
254 | */ | 287 | */ |
255 | bar = (struct ieee80211_bar *) skb->data; | 288 | bar = (struct ieee80211_bar *) skb->data; |
256 | if (!(bar->control & IEEE80211_BAR_CTRL_MULTI_TID)) { | 289 | control = le16_to_cpu(bar->control); |
257 | tid = (bar->control & | 290 | if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) { |
291 | u16 ssn = le16_to_cpu(bar->start_seq_num); | ||
292 | |||
293 | tid = (control & | ||
258 | IEEE80211_BAR_CTRL_TID_INFO_MASK) >> | 294 | IEEE80211_BAR_CTRL_TID_INFO_MASK) >> |
259 | IEEE80211_BAR_CTRL_TID_INFO_SHIFT; | 295 | IEEE80211_BAR_CTRL_TID_INFO_SHIFT; |
260 | ieee80211_stop_tx_ba_session(&sta->sta, tid); | 296 | |
297 | ieee80211_set_bar_pending(sta, tid, ssn); | ||
261 | } | 298 | } |
262 | } | 299 | } |
263 | 300 | ||
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 01072639666f..7cd6c28968b2 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1232,7 +1232,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1232 | tx->sta = sta_info_get(sdata, hdr->addr1); | 1232 | tx->sta = sta_info_get(sdata, hdr->addr1); |
1233 | 1233 | ||
1234 | if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && | 1234 | if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && |
1235 | (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { | 1235 | (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) && |
1236 | !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) { | ||
1236 | struct tid_ampdu_tx *tid_tx; | 1237 | struct tid_ampdu_tx *tid_tx; |
1237 | 1238 | ||
1238 | qc = ieee80211_get_qos_ctl(hdr); | 1239 | qc = ieee80211_get_qos_ctl(hdr); |
@@ -1595,7 +1596,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, | |||
1595 | return; | 1596 | return; |
1596 | } | 1597 | } |
1597 | 1598 | ||
1598 | ieee80211_set_qos_hdr(local, skb); | 1599 | ieee80211_set_qos_hdr(sdata, skb); |
1599 | ieee80211_tx(sdata, skb, false); | 1600 | ieee80211_tx(sdata, skb, false); |
1600 | rcu_read_unlock(); | 1601 | rcu_read_unlock(); |
1601 | } | 1602 | } |
@@ -1878,6 +1879,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, | |||
1878 | rcu_read_unlock(); | 1879 | rcu_read_unlock(); |
1879 | } | 1880 | } |
1880 | 1881 | ||
1882 | /* For mesh, the use of the QoS header is mandatory */ | ||
1883 | if (ieee80211_vif_is_mesh(&sdata->vif)) | ||
1884 | sta_flags |= WLAN_STA_WME; | ||
1885 | |||
1881 | /* receiver and we are QoS enabled, use a QoS type frame */ | 1886 | /* receiver and we are QoS enabled, use a QoS type frame */ |
1882 | if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) { | 1887 | if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) { |
1883 | fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); | 1888 | fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); |
@@ -2365,11 +2370,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, | |||
2365 | local = sdata->local; | 2370 | local = sdata->local; |
2366 | 2371 | ||
2367 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); | 2372 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); |
2368 | if (!skb) { | 2373 | if (!skb) |
2369 | printk(KERN_DEBUG "%s: failed to allocate buffer for " | ||
2370 | "pspoll template\n", sdata->name); | ||
2371 | return NULL; | 2374 | return NULL; |
2372 | } | 2375 | |
2373 | skb_reserve(skb, local->hw.extra_tx_headroom); | 2376 | skb_reserve(skb, local->hw.extra_tx_headroom); |
2374 | 2377 | ||
2375 | pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); | 2378 | pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll)); |
@@ -2405,11 +2408,9 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, | |||
2405 | local = sdata->local; | 2408 | local = sdata->local; |
2406 | 2409 | ||
2407 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); | 2410 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); |
2408 | if (!skb) { | 2411 | if (!skb) |
2409 | printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " | ||
2410 | "template\n", sdata->name); | ||
2411 | return NULL; | 2412 | return NULL; |
2412 | } | 2413 | |
2413 | skb_reserve(skb, local->hw.extra_tx_headroom); | 2414 | skb_reserve(skb, local->hw.extra_tx_headroom); |
2414 | 2415 | ||
2415 | nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb, | 2416 | nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb, |
@@ -2444,11 +2445,8 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, | |||
2444 | 2445 | ||
2445 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + | 2446 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + |
2446 | ie_ssid_len + ie_len); | 2447 | ie_ssid_len + ie_len); |
2447 | if (!skb) { | 2448 | if (!skb) |
2448 | printk(KERN_DEBUG "%s: failed to allocate buffer for probe " | ||
2449 | "request template\n", sdata->name); | ||
2450 | return NULL; | 2449 | return NULL; |
2451 | } | ||
2452 | 2450 | ||
2453 | skb_reserve(skb, local->hw.extra_tx_headroom); | 2451 | skb_reserve(skb, local->hw.extra_tx_headroom); |
2454 | 2452 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index ce916ff6ef08..4b1466d5b6a1 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -707,11 +707,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, | |||
707 | 707 | ||
708 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + | 708 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + |
709 | sizeof(*mgmt) + 6 + extra_len); | 709 | sizeof(*mgmt) + 6 + extra_len); |
710 | if (!skb) { | 710 | if (!skb) |
711 | printk(KERN_DEBUG "%s: failed to allocate buffer for auth " | ||
712 | "frame\n", sdata->name); | ||
713 | return; | 711 | return; |
714 | } | 712 | |
715 | skb_reserve(skb, local->hw.extra_tx_headroom); | 713 | skb_reserve(skb, local->hw.extra_tx_headroom); |
716 | 714 | ||
717 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); | 715 | mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); |
@@ -864,11 +862,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, | |||
864 | 862 | ||
865 | /* FIXME: come up with a proper value */ | 863 | /* FIXME: come up with a proper value */ |
866 | buf = kmalloc(200 + ie_len, GFP_KERNEL); | 864 | buf = kmalloc(200 + ie_len, GFP_KERNEL); |
867 | if (!buf) { | 865 | if (!buf) |
868 | printk(KERN_DEBUG "%s: failed to allocate temporary IE " | ||
869 | "buffer\n", sdata->name); | ||
870 | return NULL; | 866 | return NULL; |
871 | } | ||
872 | 867 | ||
873 | /* | 868 | /* |
874 | * Do not send DS Channel parameter for directed probe requests | 869 | * Do not send DS Channel parameter for directed probe requests |
@@ -1082,6 +1077,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1082 | changed |= BSS_CHANGED_IBSS; | 1077 | changed |= BSS_CHANGED_IBSS; |
1083 | /* fall through */ | 1078 | /* fall through */ |
1084 | case NL80211_IFTYPE_AP: | 1079 | case NL80211_IFTYPE_AP: |
1080 | changed |= BSS_CHANGED_SSID; | ||
1081 | /* fall through */ | ||
1085 | case NL80211_IFTYPE_MESH_POINT: | 1082 | case NL80211_IFTYPE_MESH_POINT: |
1086 | changed |= BSS_CHANGED_BEACON | | 1083 | changed |= BSS_CHANGED_BEACON | |
1087 | BSS_CHANGED_BEACON_ENABLED; | 1084 | BSS_CHANGED_BEACON_ENABLED; |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 7a49532f14cb..971004c9b04f 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -83,11 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, | |||
83 | break; | 83 | break; |
84 | #ifdef CONFIG_MAC80211_MESH | 84 | #ifdef CONFIG_MAC80211_MESH |
85 | case NL80211_IFTYPE_MESH_POINT: | 85 | case NL80211_IFTYPE_MESH_POINT: |
86 | /* | 86 | ra = skb->data; |
87 | * XXX: This is clearly broken ... but already was before, | ||
88 | * because ieee80211_fill_mesh_addresses() would clear A1 | ||
89 | * except for multicast addresses. | ||
90 | */ | ||
91 | break; | 87 | break; |
92 | #endif | 88 | #endif |
93 | case NL80211_IFTYPE_STATION: | 89 | case NL80211_IFTYPE_STATION: |
@@ -139,7 +135,8 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local, | |||
139 | return ieee802_1d_to_ac[skb->priority]; | 135 | return ieee802_1d_to_ac[skb->priority]; |
140 | } | 136 | } |
141 | 137 | ||
142 | void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) | 138 | void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, |
139 | struct sk_buff *skb) | ||
143 | { | 140 | { |
144 | struct ieee80211_hdr *hdr = (void *)skb->data; | 141 | struct ieee80211_hdr *hdr = (void *)skb->data; |
145 | 142 | ||
@@ -150,10 +147,11 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) | |||
150 | 147 | ||
151 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | 148 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
152 | 149 | ||
153 | if (unlikely(local->wifi_wme_noack_test)) | 150 | if (unlikely(sdata->local->wifi_wme_noack_test)) |
154 | ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; | 151 | ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; |
155 | /* qos header is 2 bytes, second reserved */ | 152 | /* qos header is 2 bytes */ |
156 | *p++ = ack_policy | tid; | 153 | *p++ = ack_policy | tid; |
157 | *p = 0; | 154 | *p = ieee80211_vif_is_mesh(&sdata->vif) ? |
155 | (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0; | ||
158 | } | 156 | } |
159 | } | 157 | } |
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index faead6d02026..34e166fbf4d4 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h | |||
@@ -17,7 +17,8 @@ extern const int ieee802_1d_to_ac[8]; | |||
17 | 17 | ||
18 | u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, | 18 | u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, |
19 | struct sk_buff *skb); | 19 | struct sk_buff *skb); |
20 | void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb); | 20 | void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, |
21 | struct sk_buff *skb); | ||
21 | u16 ieee80211_downgrade_queue(struct ieee80211_local *local, | 22 | u16 ieee80211_downgrade_queue(struct ieee80211_local *local, |
22 | struct sk_buff *skb); | 23 | struct sk_buff *skb); |
23 | 24 | ||
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 380b9a7462b6..bac34394c05e 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -229,11 +229,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, | |||
229 | wk->ie_len + /* extra IEs */ | 229 | wk->ie_len + /* extra IEs */ |
230 | 9, /* WMM */ | 230 | 9, /* WMM */ |
231 | GFP_KERNEL); | 231 | GFP_KERNEL); |
232 | if (!skb) { | 232 | if (!skb) |
233 | printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " | ||
234 | "frame\n", sdata->name); | ||
235 | return; | 233 | return; |
236 | } | 234 | |
237 | skb_reserve(skb, local->hw.extra_tx_headroom); | 235 | skb_reserve(skb, local->hw.extra_tx_headroom); |
238 | 236 | ||
239 | capab = WLAN_CAPABILITY_ESS; | 237 | capab = WLAN_CAPABILITY_ESS; |