diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-27 16:41:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-27 16:41:13 -0400 |
commit | 8f98bcdf8f9638ee012ff09c6c8732e0fda6018a (patch) | |
tree | 1701863428e97735dc9cafa4740dba179e7820d7 /net | |
parent | 7ffb9e116fb0abe12b29c81d56bb8a9f498ee1a9 (diff) | |
parent | 9302d7bb0c5cd46be5706859301f18c137b2439f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Don't use MMIO on certain iwlwifi devices otherwise we get a
firmware crash.
2) Don't corrupt the GRO lists of mac80211 contexts by doing sends via
timer interrupt, from Johannes Berg.
3) SKB tailroom is miscalculated in AP_VLAN crypto code, from Michal
Kazior.
4) Fix fw_status memory leak in iwlwifi, from Haim Dreyfuss.
5) Fix use after free in iwl_mvm_d0i3_enable_tx(), from Eliad Peller.
6) JIT'ing of large BPF programs is broken on x86, from Alexei
Starovoitov.
7) EMAC driver ethtool register dump size is miscalculated, from Ivan
Mikhaylov.
8) Fix PHY initial link mode when autonegotiation is disabled in
amd-xgbe, from Tom Lendacky.
9) Fix NULL deref on SOCK_DEAD socket in AF_UNIX and CAIF protocols,
from Mark Salyzyn.
10) credit_bytes not initialized properly in xen-netback, from Ross
Lagerwall.
11) Fallback from MSI-X to INTx interrupts not handled properly in mlx4
driver, fix from Benjamin Poirier.
12) Perform ->attach() after binding dev->qdisc in packet scheduler,
otherwise we can crash. From Cong WANG.
13) Don't clobber data in sctp_v4_map_v6(). From Jason Gunthorpe.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (30 commits)
sctp: Fix mangled IPv4 addresses on a IPv6 listening socket
net_sched: invoke ->attach() after setting dev->qdisc
xen-netfront: properly destroy queues when removing device
mlx4_core: Fix fallback from MSI-X to INTx
xen/netback: Properly initialize credit_bytes
net: netxen: correct sysfs bin attribute return code
tools: bpf_jit_disasm: fix segfault on disabled debugging log output
unix/caif: sk_socket can disappear when state is unlocked
amd-xgbe-phy: Fix initial mode when autoneg is disabled
net: dp83640: fix improper double spin locking.
net: dp83640: reinforce locking rules.
net: dp83640: fix broken calibration routine.
net: stmmac: create one debugfs dir per net-device
net/ibm/emac: fix size of emac dump memory areas
x86: bpf_jit: fix compilation of large bpf programs
net: phy: bcm7xxx: Fix 7425 PHY ID and flags
iwlwifi: mvm: avoid use-after-free on iwl_mvm_d0i3_enable_tx()
iwlwifi: mvm: clean net-detect info if device was reset during suspend
iwlwifi: mvm: take the UCODE_DOWN reference when resuming
iwlwifi: mvm: BT Coex - duplicate the command if sent ASYNC
...
Diffstat (limited to 'net')
-rw-r--r-- | net/caif/caif_socket.c | 8 | ||||
-rw-r--r-- | net/mac80211/cfg.c | 59 | ||||
-rw-r--r-- | net/mac80211/ieee80211_i.h | 9 | ||||
-rw-r--r-- | net/mac80211/iface.c | 6 | ||||
-rw-r--r-- | net/mac80211/key.c | 82 | ||||
-rw-r--r-- | net/mac80211/key.h | 1 | ||||
-rw-r--r-- | net/mac80211/rx.c | 5 | ||||
-rw-r--r-- | net/mac80211/util.c | 3 | ||||
-rw-r--r-- | net/sched/sch_api.c | 10 | ||||
-rw-r--r-- | net/unix/af_unix.c | 8 |
10 files changed, 122 insertions, 69 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 4ec0c803aef1..112ad784838a 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) | |||
330 | release_sock(sk); | 330 | release_sock(sk); |
331 | timeo = schedule_timeout(timeo); | 331 | timeo = schedule_timeout(timeo); |
332 | lock_sock(sk); | 332 | lock_sock(sk); |
333 | |||
334 | if (sock_flag(sk, SOCK_DEAD)) | ||
335 | break; | ||
336 | |||
333 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 337 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
334 | } | 338 | } |
335 | 339 | ||
@@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg, | |||
373 | struct sk_buff *skb; | 377 | struct sk_buff *skb; |
374 | 378 | ||
375 | lock_sock(sk); | 379 | lock_sock(sk); |
380 | if (sock_flag(sk, SOCK_DEAD)) { | ||
381 | err = -ECONNRESET; | ||
382 | goto unlock; | ||
383 | } | ||
376 | skb = skb_dequeue(&sk->sk_receive_queue); | 384 | skb = skb_dequeue(&sk->sk_receive_queue); |
377 | caif_check_flow_release(sk); | 385 | caif_check_flow_release(sk); |
378 | 386 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 265e42721a66..ff347a0eebd4 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2495,51 +2495,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local, | |||
2495 | struct ieee80211_roc_work *new_roc, | 2495 | struct ieee80211_roc_work *new_roc, |
2496 | struct ieee80211_roc_work *cur_roc) | 2496 | struct ieee80211_roc_work *cur_roc) |
2497 | { | 2497 | { |
2498 | unsigned long j = jiffies; | 2498 | unsigned long now = jiffies; |
2499 | unsigned long cur_roc_end = cur_roc->hw_start_time + | 2499 | unsigned long remaining = cur_roc->hw_start_time + |
2500 | msecs_to_jiffies(cur_roc->duration); | 2500 | msecs_to_jiffies(cur_roc->duration) - |
2501 | struct ieee80211_roc_work *next_roc; | 2501 | now; |
2502 | int new_dur; | ||
2503 | 2502 | ||
2504 | if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) | 2503 | if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) |
2505 | return false; | 2504 | return false; |
2506 | 2505 | ||
2507 | if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end)) | 2506 | /* if it doesn't fit entirely, schedule a new one */ |
2507 | if (new_roc->duration > jiffies_to_msecs(remaining)) | ||
2508 | return false; | 2508 | return false; |
2509 | 2509 | ||
2510 | ieee80211_handle_roc_started(new_roc); | 2510 | ieee80211_handle_roc_started(new_roc); |
2511 | 2511 | ||
2512 | new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j); | 2512 | /* add to dependents so we send the expired event properly */ |
2513 | 2513 | list_add_tail(&new_roc->list, &cur_roc->dependents); | |
2514 | /* cur_roc is long enough - add new_roc to the dependents list. */ | ||
2515 | if (new_dur <= 0) { | ||
2516 | list_add_tail(&new_roc->list, &cur_roc->dependents); | ||
2517 | return true; | ||
2518 | } | ||
2519 | |||
2520 | new_roc->duration = new_dur; | ||
2521 | |||
2522 | /* | ||
2523 | * if cur_roc was already coalesced before, we might | ||
2524 | * want to extend the next roc instead of adding | ||
2525 | * a new one. | ||
2526 | */ | ||
2527 | next_roc = list_entry(cur_roc->list.next, | ||
2528 | struct ieee80211_roc_work, list); | ||
2529 | if (&next_roc->list != &local->roc_list && | ||
2530 | next_roc->chan == new_roc->chan && | ||
2531 | next_roc->sdata == new_roc->sdata && | ||
2532 | !WARN_ON(next_roc->started)) { | ||
2533 | list_add_tail(&new_roc->list, &next_roc->dependents); | ||
2534 | next_roc->duration = max(next_roc->duration, | ||
2535 | new_roc->duration); | ||
2536 | next_roc->type = max(next_roc->type, new_roc->type); | ||
2537 | return true; | ||
2538 | } | ||
2539 | |||
2540 | /* add right after cur_roc */ | ||
2541 | list_add(&new_roc->list, &cur_roc->list); | ||
2542 | |||
2543 | return true; | 2514 | return true; |
2544 | } | 2515 | } |
2545 | 2516 | ||
@@ -2652,17 +2623,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2652 | * In the offloaded ROC case, if it hasn't begun, add | 2623 | * In the offloaded ROC case, if it hasn't begun, add |
2653 | * this new one to the dependent list to be handled | 2624 | * this new one to the dependent list to be handled |
2654 | * when the master one begins. If it has begun, | 2625 | * when the master one begins. If it has begun, |
2655 | * check that there's still a minimum time left and | 2626 | * check if it fits entirely within the existing one, |
2656 | * if so, start this one, transmitting the frame, but | 2627 | * in which case it will just be dependent as well. |
2657 | * add it to the list directly after this one with | 2628 | * Otherwise, schedule it by itself. |
2658 | * a reduced time so we'll ask the driver to execute | ||
2659 | * it right after finishing the previous one, in the | ||
2660 | * hope that it'll also be executed right afterwards, | ||
2661 | * effectively extending the old one. | ||
2662 | * If there's no minimum time left, just add it to the | ||
2663 | * normal list. | ||
2664 | * TODO: the ROC type is ignored here, assuming that it | ||
2665 | * is better to immediately use the current ROC. | ||
2666 | */ | 2629 | */ |
2667 | if (!tmp->hw_begun) { | 2630 | if (!tmp->hw_begun) { |
2668 | list_add_tail(&roc->list, &tmp->dependents); | 2631 | list_add_tail(&roc->list, &tmp->dependents); |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ab46ab4a7249..c0a9187bc3a9 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -205,6 +205,8 @@ enum ieee80211_packet_rx_flags { | |||
205 | * @IEEE80211_RX_CMNTR: received on cooked monitor already | 205 | * @IEEE80211_RX_CMNTR: received on cooked monitor already |
206 | * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported | 206 | * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported |
207 | * to cfg80211_report_obss_beacon(). | 207 | * to cfg80211_report_obss_beacon(). |
208 | * @IEEE80211_RX_REORDER_TIMER: this frame is released by the | ||
209 | * reorder buffer timeout timer, not the normal RX path | ||
208 | * | 210 | * |
209 | * These flags are used across handling multiple interfaces | 211 | * These flags are used across handling multiple interfaces |
210 | * for a single frame. | 212 | * for a single frame. |
@@ -212,6 +214,7 @@ enum ieee80211_packet_rx_flags { | |||
212 | enum ieee80211_rx_flags { | 214 | enum ieee80211_rx_flags { |
213 | IEEE80211_RX_CMNTR = BIT(0), | 215 | IEEE80211_RX_CMNTR = BIT(0), |
214 | IEEE80211_RX_BEACON_REPORTED = BIT(1), | 216 | IEEE80211_RX_BEACON_REPORTED = BIT(1), |
217 | IEEE80211_RX_REORDER_TIMER = BIT(2), | ||
215 | }; | 218 | }; |
216 | 219 | ||
217 | struct ieee80211_rx_data { | 220 | struct ieee80211_rx_data { |
@@ -325,12 +328,6 @@ struct mesh_preq_queue { | |||
325 | u8 flags; | 328 | u8 flags; |
326 | }; | 329 | }; |
327 | 330 | ||
328 | #if HZ/100 == 0 | ||
329 | #define IEEE80211_ROC_MIN_LEFT 1 | ||
330 | #else | ||
331 | #define IEEE80211_ROC_MIN_LEFT (HZ/100) | ||
332 | #endif | ||
333 | |||
334 | struct ieee80211_roc_work { | 331 | struct ieee80211_roc_work { |
335 | struct list_head list; | 332 | struct list_head list; |
336 | struct list_head dependents; | 333 | struct list_head dependents; |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index bab5c63c0bad..84cef600c573 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) | |||
522 | memcpy(sdata->vif.hw_queue, master->vif.hw_queue, | 522 | memcpy(sdata->vif.hw_queue, master->vif.hw_queue, |
523 | sizeof(sdata->vif.hw_queue)); | 523 | sizeof(sdata->vif.hw_queue)); |
524 | sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; | 524 | sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; |
525 | |||
526 | mutex_lock(&local->key_mtx); | ||
527 | sdata->crypto_tx_tailroom_needed_cnt += | ||
528 | master->crypto_tx_tailroom_needed_cnt; | ||
529 | mutex_unlock(&local->key_mtx); | ||
530 | |||
525 | break; | 531 | break; |
526 | } | 532 | } |
527 | case NL80211_IFTYPE_AP: | 533 | case NL80211_IFTYPE_AP: |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 2291cd730091..a907f2d5c12d 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local) | |||
58 | lockdep_assert_held(&local->key_mtx); | 58 | lockdep_assert_held(&local->key_mtx); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void | ||
62 | update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) | ||
63 | { | ||
64 | struct ieee80211_sub_if_data *vlan; | ||
65 | |||
66 | if (sdata->vif.type != NL80211_IFTYPE_AP) | ||
67 | return; | ||
68 | |||
69 | mutex_lock(&sdata->local->mtx); | ||
70 | |||
71 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | ||
72 | vlan->crypto_tx_tailroom_needed_cnt += delta; | ||
73 | |||
74 | mutex_unlock(&sdata->local->mtx); | ||
75 | } | ||
76 | |||
61 | static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) | 77 | static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) |
62 | { | 78 | { |
63 | /* | 79 | /* |
@@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) | |||
79 | * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net | 95 | * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net |
80 | */ | 96 | */ |
81 | 97 | ||
98 | update_vlan_tailroom_need_count(sdata, 1); | ||
99 | |||
82 | if (!sdata->crypto_tx_tailroom_needed_cnt++) { | 100 | if (!sdata->crypto_tx_tailroom_needed_cnt++) { |
83 | /* | 101 | /* |
84 | * Flush all XMIT packets currently using HW encryption or no | 102 | * Flush all XMIT packets currently using HW encryption or no |
@@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) | |||
88 | } | 106 | } |
89 | } | 107 | } |
90 | 108 | ||
109 | static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, | ||
110 | int delta) | ||
111 | { | ||
112 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); | ||
113 | |||
114 | update_vlan_tailroom_need_count(sdata, -delta); | ||
115 | sdata->crypto_tx_tailroom_needed_cnt -= delta; | ||
116 | } | ||
117 | |||
91 | static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | 118 | static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) |
92 | { | 119 | { |
93 | struct ieee80211_sub_if_data *sdata; | 120 | struct ieee80211_sub_if_data *sdata; |
@@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
144 | 171 | ||
145 | if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || | 172 | if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || |
146 | (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) | 173 | (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) |
147 | sdata->crypto_tx_tailroom_needed_cnt--; | 174 | decrease_tailroom_need_count(sdata, 1); |
148 | 175 | ||
149 | WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && | 176 | WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && |
150 | (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); | 177 | (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); |
@@ -541,7 +568,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key, | |||
541 | schedule_delayed_work(&sdata->dec_tailroom_needed_wk, | 568 | schedule_delayed_work(&sdata->dec_tailroom_needed_wk, |
542 | HZ/2); | 569 | HZ/2); |
543 | } else { | 570 | } else { |
544 | sdata->crypto_tx_tailroom_needed_cnt--; | 571 | decrease_tailroom_need_count(sdata, 1); |
545 | } | 572 | } |
546 | } | 573 | } |
547 | 574 | ||
@@ -631,6 +658,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) | |||
631 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | 658 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) |
632 | { | 659 | { |
633 | struct ieee80211_key *key; | 660 | struct ieee80211_key *key; |
661 | struct ieee80211_sub_if_data *vlan; | ||
634 | 662 | ||
635 | ASSERT_RTNL(); | 663 | ASSERT_RTNL(); |
636 | 664 | ||
@@ -639,7 +667,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | |||
639 | 667 | ||
640 | mutex_lock(&sdata->local->key_mtx); | 668 | mutex_lock(&sdata->local->key_mtx); |
641 | 669 | ||
642 | sdata->crypto_tx_tailroom_needed_cnt = 0; | 670 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || |
671 | sdata->crypto_tx_tailroom_pending_dec); | ||
672 | |||
673 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | ||
674 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | ||
675 | WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || | ||
676 | vlan->crypto_tx_tailroom_pending_dec); | ||
677 | } | ||
643 | 678 | ||
644 | list_for_each_entry(key, &sdata->key_list, list) { | 679 | list_for_each_entry(key, &sdata->key_list, list) { |
645 | increment_tailroom_need_count(sdata); | 680 | increment_tailroom_need_count(sdata); |
@@ -649,6 +684,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | |||
649 | mutex_unlock(&sdata->local->key_mtx); | 684 | mutex_unlock(&sdata->local->key_mtx); |
650 | } | 685 | } |
651 | 686 | ||
687 | void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata) | ||
688 | { | ||
689 | struct ieee80211_sub_if_data *vlan; | ||
690 | |||
691 | mutex_lock(&sdata->local->key_mtx); | ||
692 | |||
693 | sdata->crypto_tx_tailroom_needed_cnt = 0; | ||
694 | |||
695 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | ||
696 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | ||
697 | vlan->crypto_tx_tailroom_needed_cnt = 0; | ||
698 | } | ||
699 | |||
700 | mutex_unlock(&sdata->local->key_mtx); | ||
701 | } | ||
702 | |||
652 | void ieee80211_iter_keys(struct ieee80211_hw *hw, | 703 | void ieee80211_iter_keys(struct ieee80211_hw *hw, |
653 | struct ieee80211_vif *vif, | 704 | struct ieee80211_vif *vif, |
654 | void (*iter)(struct ieee80211_hw *hw, | 705 | void (*iter)(struct ieee80211_hw *hw, |
@@ -688,8 +739,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, | |||
688 | { | 739 | { |
689 | struct ieee80211_key *key, *tmp; | 740 | struct ieee80211_key *key, *tmp; |
690 | 741 | ||
691 | sdata->crypto_tx_tailroom_needed_cnt -= | 742 | decrease_tailroom_need_count(sdata, |
692 | sdata->crypto_tx_tailroom_pending_dec; | 743 | sdata->crypto_tx_tailroom_pending_dec); |
693 | sdata->crypto_tx_tailroom_pending_dec = 0; | 744 | sdata->crypto_tx_tailroom_pending_dec = 0; |
694 | 745 | ||
695 | ieee80211_debugfs_key_remove_mgmt_default(sdata); | 746 | ieee80211_debugfs_key_remove_mgmt_default(sdata); |
@@ -709,6 +760,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, | |||
709 | { | 760 | { |
710 | struct ieee80211_local *local = sdata->local; | 761 | struct ieee80211_local *local = sdata->local; |
711 | struct ieee80211_sub_if_data *vlan; | 762 | struct ieee80211_sub_if_data *vlan; |
763 | struct ieee80211_sub_if_data *master; | ||
712 | struct ieee80211_key *key, *tmp; | 764 | struct ieee80211_key *key, *tmp; |
713 | LIST_HEAD(keys); | 765 | LIST_HEAD(keys); |
714 | 766 | ||
@@ -728,8 +780,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, | |||
728 | list_for_each_entry_safe(key, tmp, &keys, list) | 780 | list_for_each_entry_safe(key, tmp, &keys, list) |
729 | __ieee80211_key_destroy(key, false); | 781 | __ieee80211_key_destroy(key, false); |
730 | 782 | ||
731 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || | 783 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { |
732 | sdata->crypto_tx_tailroom_pending_dec); | 784 | if (sdata->bss) { |
785 | master = container_of(sdata->bss, | ||
786 | struct ieee80211_sub_if_data, | ||
787 | u.ap); | ||
788 | |||
789 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != | ||
790 | master->crypto_tx_tailroom_needed_cnt); | ||
791 | } | ||
792 | } else { | ||
793 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || | ||
794 | sdata->crypto_tx_tailroom_pending_dec); | ||
795 | } | ||
796 | |||
733 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | 797 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
734 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | 798 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) |
735 | WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || | 799 | WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || |
@@ -793,8 +857,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk) | |||
793 | */ | 857 | */ |
794 | 858 | ||
795 | mutex_lock(&sdata->local->key_mtx); | 859 | mutex_lock(&sdata->local->key_mtx); |
796 | sdata->crypto_tx_tailroom_needed_cnt -= | 860 | decrease_tailroom_need_count(sdata, |
797 | sdata->crypto_tx_tailroom_pending_dec; | 861 | sdata->crypto_tx_tailroom_pending_dec); |
798 | sdata->crypto_tx_tailroom_pending_dec = 0; | 862 | sdata->crypto_tx_tailroom_pending_dec = 0; |
799 | mutex_unlock(&sdata->local->key_mtx); | 863 | mutex_unlock(&sdata->local->key_mtx); |
800 | } | 864 | } |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index c5a31835be0e..96557dd1e77d 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -161,6 +161,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, | |||
161 | void ieee80211_free_sta_keys(struct ieee80211_local *local, | 161 | void ieee80211_free_sta_keys(struct ieee80211_local *local, |
162 | struct sta_info *sta); | 162 | struct sta_info *sta); |
163 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); | 163 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); |
164 | void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata); | ||
164 | 165 | ||
165 | #define key_mtx_dereference(local, ref) \ | 166 | #define key_mtx_dereference(local, ref) \ |
166 | rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) | 167 | rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 260eed45b6d2..5793f75c5ffd 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2121,7 +2121,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
2121 | /* deliver to local stack */ | 2121 | /* deliver to local stack */ |
2122 | skb->protocol = eth_type_trans(skb, dev); | 2122 | skb->protocol = eth_type_trans(skb, dev); |
2123 | memset(skb->cb, 0, sizeof(skb->cb)); | 2123 | memset(skb->cb, 0, sizeof(skb->cb)); |
2124 | if (rx->local->napi) | 2124 | if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && |
2125 | rx->local->napi) | ||
2125 | napi_gro_receive(rx->local->napi, skb); | 2126 | napi_gro_receive(rx->local->napi, skb); |
2126 | else | 2127 | else |
2127 | netif_receive_skb(skb); | 2128 | netif_receive_skb(skb); |
@@ -3231,7 +3232,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | |||
3231 | /* This is OK -- must be QoS data frame */ | 3232 | /* This is OK -- must be QoS data frame */ |
3232 | .security_idx = tid, | 3233 | .security_idx = tid, |
3233 | .seqno_idx = tid, | 3234 | .seqno_idx = tid, |
3234 | .flags = 0, | 3235 | .flags = IEEE80211_RX_REORDER_TIMER, |
3235 | }; | 3236 | }; |
3236 | struct tid_ampdu_rx *tid_agg_rx; | 3237 | struct tid_ampdu_rx *tid_agg_rx; |
3237 | 3238 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 79412f16b61d..b864ebc6ab8f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -2023,6 +2023,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
2023 | 2023 | ||
2024 | /* add back keys */ | 2024 | /* add back keys */ |
2025 | list_for_each_entry(sdata, &local->interfaces, list) | 2025 | list_for_each_entry(sdata, &local->interfaces, list) |
2026 | ieee80211_reset_crypto_tx_tailroom(sdata); | ||
2027 | |||
2028 | list_for_each_entry(sdata, &local->interfaces, list) | ||
2026 | if (ieee80211_sdata_running(sdata)) | 2029 | if (ieee80211_sdata_running(sdata)) |
2027 | ieee80211_enable_keys(sdata); | 2030 | ieee80211_enable_keys(sdata); |
2028 | 2031 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ad9eed70bc8f..1e1c89e51a11 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
815 | if (dev->flags & IFF_UP) | 815 | if (dev->flags & IFF_UP) |
816 | dev_deactivate(dev); | 816 | dev_deactivate(dev); |
817 | 817 | ||
818 | if (new && new->ops->attach) { | 818 | if (new && new->ops->attach) |
819 | new->ops->attach(new); | 819 | goto skip; |
820 | num_q = 0; | ||
821 | } | ||
822 | 820 | ||
823 | for (i = 0; i < num_q; i++) { | 821 | for (i = 0; i < num_q; i++) { |
824 | struct netdev_queue *dev_queue = dev_ingress_queue(dev); | 822 | struct netdev_queue *dev_queue = dev_ingress_queue(dev); |
@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
834 | qdisc_destroy(old); | 832 | qdisc_destroy(old); |
835 | } | 833 | } |
836 | 834 | ||
835 | skip: | ||
837 | if (!ingress) { | 836 | if (!ingress) { |
838 | notify_and_destroy(net, skb, n, classid, | 837 | notify_and_destroy(net, skb, n, classid, |
839 | dev->qdisc, new); | 838 | dev->qdisc, new); |
840 | if (new && !new->ops->attach) | 839 | if (new && !new->ops->attach) |
841 | atomic_inc(&new->refcnt); | 840 | atomic_inc(&new->refcnt); |
842 | dev->qdisc = new ? : &noop_qdisc; | 841 | dev->qdisc = new ? : &noop_qdisc; |
842 | |||
843 | if (new && new->ops->attach) | ||
844 | new->ops->attach(new); | ||
843 | } else { | 845 | } else { |
844 | notify_and_destroy(net, skb, n, classid, old, new); | 846 | notify_and_destroy(net, skb, n, classid, old, new); |
845 | } | 847 | } |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 5266ea7b922b..06430598cf51 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1880,6 +1880,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, | |||
1880 | unix_state_unlock(sk); | 1880 | unix_state_unlock(sk); |
1881 | timeo = freezable_schedule_timeout(timeo); | 1881 | timeo = freezable_schedule_timeout(timeo); |
1882 | unix_state_lock(sk); | 1882 | unix_state_lock(sk); |
1883 | |||
1884 | if (sock_flag(sk, SOCK_DEAD)) | ||
1885 | break; | ||
1886 | |||
1883 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1887 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1884 | } | 1888 | } |
1885 | 1889 | ||
@@ -1939,6 +1943,10 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, | |||
1939 | struct sk_buff *skb, *last; | 1943 | struct sk_buff *skb, *last; |
1940 | 1944 | ||
1941 | unix_state_lock(sk); | 1945 | unix_state_lock(sk); |
1946 | if (sock_flag(sk, SOCK_DEAD)) { | ||
1947 | err = -ECONNRESET; | ||
1948 | goto unlock; | ||
1949 | } | ||
1942 | last = skb = skb_peek(&sk->sk_receive_queue); | 1950 | last = skb = skb_peek(&sk->sk_receive_queue); |
1943 | again: | 1951 | again: |
1944 | if (skb == NULL) { | 1952 | if (skb == NULL) { |