aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/phy/phy.c29
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_fastopen.c4
-rw-r--r--net/ipv4/tcp_input.c19
-rw-r--r--net/ipv6/ip6_fib.c39
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/mac80211/wep.c6
-rw-r--r--net/sched/cls_api.c5
12 files changed, 107 insertions, 35 deletions
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 710696d1af97..47cd578052fc 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -465,7 +465,7 @@ int phy_start_aneg(struct phy_device *phydev)
465 if (err < 0) 465 if (err < 0)
466 goto out_unlock; 466 goto out_unlock;
467 467
468 if (phydev->state != PHY_HALTED && phydev->state != PHY_RESUMING) { 468 if (phydev->state != PHY_HALTED) {
469 if (AUTONEG_ENABLE == phydev->autoneg) { 469 if (AUTONEG_ENABLE == phydev->autoneg) {
470 phydev->state = PHY_AN; 470 phydev->state = PHY_AN;
471 phydev->link_timeout = PHY_AN_TIMEOUT; 471 phydev->link_timeout = PHY_AN_TIMEOUT;
@@ -742,6 +742,9 @@ EXPORT_SYMBOL(phy_stop);
742 */ 742 */
743void phy_start(struct phy_device *phydev) 743void phy_start(struct phy_device *phydev)
744{ 744{
745 bool do_resume = false;
746 int err = 0;
747
745 mutex_lock(&phydev->lock); 748 mutex_lock(&phydev->lock);
746 749
747 switch (phydev->state) { 750 switch (phydev->state) {
@@ -752,11 +755,22 @@ void phy_start(struct phy_device *phydev)
752 phydev->state = PHY_UP; 755 phydev->state = PHY_UP;
753 break; 756 break;
754 case PHY_HALTED: 757 case PHY_HALTED:
758 /* make sure interrupts are re-enabled for the PHY */
759 err = phy_enable_interrupts(phydev);
760 if (err < 0)
761 break;
762
755 phydev->state = PHY_RESUMING; 763 phydev->state = PHY_RESUMING;
764 do_resume = true;
765 break;
756 default: 766 default:
757 break; 767 break;
758 } 768 }
759 mutex_unlock(&phydev->lock); 769 mutex_unlock(&phydev->lock);
770
771 /* if phy was suspended, bring the physical link up again */
772 if (do_resume)
773 phy_resume(phydev);
760} 774}
761EXPORT_SYMBOL(phy_start); 775EXPORT_SYMBOL(phy_start);
762 776
@@ -769,7 +783,7 @@ void phy_state_machine(struct work_struct *work)
769 struct delayed_work *dwork = to_delayed_work(work); 783 struct delayed_work *dwork = to_delayed_work(work);
770 struct phy_device *phydev = 784 struct phy_device *phydev =
771 container_of(dwork, struct phy_device, state_queue); 785 container_of(dwork, struct phy_device, state_queue);
772 bool needs_aneg = false, do_suspend = false, do_resume = false; 786 bool needs_aneg = false, do_suspend = false;
773 int err = 0; 787 int err = 0;
774 788
775 mutex_lock(&phydev->lock); 789 mutex_lock(&phydev->lock);
@@ -888,14 +902,6 @@ void phy_state_machine(struct work_struct *work)
888 } 902 }
889 break; 903 break;
890 case PHY_RESUMING: 904 case PHY_RESUMING:
891 err = phy_clear_interrupt(phydev);
892 if (err)
893 break;
894
895 err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
896 if (err)
897 break;
898
899 if (AUTONEG_ENABLE == phydev->autoneg) { 905 if (AUTONEG_ENABLE == phydev->autoneg) {
900 err = phy_aneg_done(phydev); 906 err = phy_aneg_done(phydev);
901 if (err < 0) 907 if (err < 0)
@@ -933,7 +939,6 @@ void phy_state_machine(struct work_struct *work)
933 } 939 }
934 phydev->adjust_link(phydev->attached_dev); 940 phydev->adjust_link(phydev->attached_dev);
935 } 941 }
936 do_resume = true;
937 break; 942 break;
938 } 943 }
939 944
@@ -943,8 +948,6 @@ void phy_state_machine(struct work_struct *work)
943 err = phy_start_aneg(phydev); 948 err = phy_start_aneg(phydev);
944 else if (do_suspend) 949 else if (do_suspend)
945 phy_suspend(phydev); 950 phy_suspend(phydev);
946 else if (do_resume)
947 phy_resume(phydev);
948 951
949 if (err < 0) 952 if (err < 0)
950 phy_error(phydev); 953 phy_error(phydev);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3b2911502a8c..e8bbf403618f 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -158,6 +158,8 @@ struct tcp_sock {
158 * sum(delta(snd_una)), or how many bytes 158 * sum(delta(snd_una)), or how many bytes
159 * were acked. 159 * were acked.
160 */ 160 */
161 struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
162
161 u32 snd_una; /* First byte we want an ack for */ 163 u32 snd_una; /* First byte we want an ack for */
162 u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 164 u32 snd_sml; /* Last byte of the most recently transmitted small packet */
163 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 165 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 48a815823587..497bc14cdb85 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -129,9 +129,10 @@ struct inet_connection_sock {
129 129
130 u32 probe_timestamp; 130 u32 probe_timestamp;
131 } icsk_mtup; 131 } icsk_mtup;
132 u32 icsk_ca_priv[16];
133 u32 icsk_user_timeout; 132 u32 icsk_user_timeout;
134#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) 133
134 u64 icsk_ca_priv[64 / sizeof(u64)];
135#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
135}; 136};
136 137
137#define ICSK_TIME_RETRANS 1 /* Retransmit timer */ 138#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bff62fc87b8e..f45f2a12f37b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -902,6 +902,10 @@ static int ip_error(struct sk_buff *skb)
902 bool send; 902 bool send;
903 int code; 903 int code;
904 904
905 /* IP on this device is disabled. */
906 if (!in_dev)
907 goto out;
908
905 net = dev_net(rt->dst.dev); 909 net = dev_net(rt->dst.dev);
906 if (!IN_DEV_FORWARD(in_dev)) { 910 if (!IN_DEV_FORWARD(in_dev)) {
907 switch (rt->dst.error) { 911 switch (rt->dst.error) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46efa03d2b11..f1377f2a0472 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -402,6 +402,7 @@ void tcp_init_sock(struct sock *sk)
402 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 402 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
403 tp->snd_cwnd_clamp = ~0; 403 tp->snd_cwnd_clamp = ~0;
404 tp->mss_cache = TCP_MSS_DEFAULT; 404 tp->mss_cache = TCP_MSS_DEFAULT;
405 u64_stats_init(&tp->syncp);
405 406
406 tp->reordering = sysctl_tcp_reordering; 407 tp->reordering = sysctl_tcp_reordering;
407 tcp_enable_early_retrans(tp); 408 tcp_enable_early_retrans(tp);
@@ -2598,6 +2599,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2598 const struct tcp_sock *tp = tcp_sk(sk); 2599 const struct tcp_sock *tp = tcp_sk(sk);
2599 const struct inet_connection_sock *icsk = inet_csk(sk); 2600 const struct inet_connection_sock *icsk = inet_csk(sk);
2600 u32 now = tcp_time_stamp; 2601 u32 now = tcp_time_stamp;
2602 unsigned int start;
2601 u32 rate; 2603 u32 rate;
2602 2604
2603 memset(info, 0, sizeof(*info)); 2605 memset(info, 0, sizeof(*info));
@@ -2665,10 +2667,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2665 rate = READ_ONCE(sk->sk_max_pacing_rate); 2667 rate = READ_ONCE(sk->sk_max_pacing_rate);
2666 info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; 2668 info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
2667 2669
2668 spin_lock_bh(&sk->sk_lock.slock); 2670 do {
2669 info->tcpi_bytes_acked = tp->bytes_acked; 2671 start = u64_stats_fetch_begin_irq(&tp->syncp);
2670 info->tcpi_bytes_received = tp->bytes_received; 2672 info->tcpi_bytes_acked = tp->bytes_acked;
2671 spin_unlock_bh(&sk->sk_lock.slock); 2673 info->tcpi_bytes_received = tp->bytes_received;
2674 } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
2672} 2675}
2673EXPORT_SYMBOL_GPL(tcp_get_info); 2676EXPORT_SYMBOL_GPL(tcp_get_info);
2674 2677
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 3c673d5e6cff..46b087a27503 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -206,6 +206,10 @@ static bool tcp_fastopen_create_child(struct sock *sk,
206 skb_set_owner_r(skb2, child); 206 skb_set_owner_r(skb2, child);
207 __skb_queue_tail(&child->sk_receive_queue, skb2); 207 __skb_queue_tail(&child->sk_receive_queue, skb2);
208 tp->syn_data_acked = 1; 208 tp->syn_data_acked = 1;
209
210 /* u64_stats_update_begin(&tp->syncp) not needed here,
211 * as we certainly are not changing upper 32bit value (0)
212 */
209 tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1; 213 tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
210 } else { 214 } else {
211 end_seq = TCP_SKB_CB(skb)->seq + 1; 215 end_seq = TCP_SKB_CB(skb)->seq + 1;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bc790ea9960f..c9ab964189a0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2698,16 +2698,21 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
2698 struct tcp_sock *tp = tcp_sk(sk); 2698 struct tcp_sock *tp = tcp_sk(sk);
2699 bool recovered = !before(tp->snd_una, tp->high_seq); 2699 bool recovered = !before(tp->snd_una, tp->high_seq);
2700 2700
2701 if ((flag & FLAG_SND_UNA_ADVANCED) &&
2702 tcp_try_undo_loss(sk, false))
2703 return;
2704
2701 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2705 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2702 /* Step 3.b. A timeout is spurious if not all data are 2706 /* Step 3.b. A timeout is spurious if not all data are
2703 * lost, i.e., never-retransmitted data are (s)acked. 2707 * lost, i.e., never-retransmitted data are (s)acked.
2704 */ 2708 */
2705 if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED)) 2709 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2710 tcp_try_undo_loss(sk, true))
2706 return; 2711 return;
2707 2712
2708 if (after(tp->snd_nxt, tp->high_seq) && 2713 if (after(tp->snd_nxt, tp->high_seq)) {
2709 (flag & FLAG_DATA_SACKED || is_dupack)) { 2714 if (flag & FLAG_DATA_SACKED || is_dupack)
2710 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ 2715 tp->frto = 0; /* Step 3.a. loss was real */
2711 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { 2716 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
2712 tp->high_seq = tp->snd_nxt; 2717 tp->high_seq = tp->snd_nxt;
2713 __tcp_push_pending_frames(sk, tcp_current_mss(sk), 2718 __tcp_push_pending_frames(sk, tcp_current_mss(sk),
@@ -2732,8 +2737,6 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
2732 else if (flag & FLAG_SND_UNA_ADVANCED) 2737 else if (flag & FLAG_SND_UNA_ADVANCED)
2733 tcp_reset_reno_sack(tp); 2738 tcp_reset_reno_sack(tp);
2734 } 2739 }
2735 if (tcp_try_undo_loss(sk, false))
2736 return;
2737 tcp_xmit_retransmit_queue(sk); 2740 tcp_xmit_retransmit_queue(sk);
2738} 2741}
2739 2742
@@ -3283,7 +3286,9 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
3283{ 3286{
3284 u32 delta = ack - tp->snd_una; 3287 u32 delta = ack - tp->snd_una;
3285 3288
3289 u64_stats_update_begin(&tp->syncp);
3286 tp->bytes_acked += delta; 3290 tp->bytes_acked += delta;
3291 u64_stats_update_end(&tp->syncp);
3287 tp->snd_una = ack; 3292 tp->snd_una = ack;
3288} 3293}
3289 3294
@@ -3292,7 +3297,9 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
3292{ 3297{
3293 u32 delta = seq - tp->rcv_nxt; 3298 u32 delta = seq - tp->rcv_nxt;
3294 3299
3300 u64_stats_update_begin(&tp->syncp);
3295 tp->bytes_received += delta; 3301 tp->bytes_received += delta;
3302 u64_stats_update_end(&tp->syncp);
3296 tp->rcv_nxt = seq; 3303 tp->rcv_nxt = seq;
3297} 3304}
3298 3305
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 96dbffff5a24..bde57b113009 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -693,6 +693,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
693{ 693{
694 struct rt6_info *iter = NULL; 694 struct rt6_info *iter = NULL;
695 struct rt6_info **ins; 695 struct rt6_info **ins;
696 struct rt6_info **fallback_ins = NULL;
696 int replace = (info->nlh && 697 int replace = (info->nlh &&
697 (info->nlh->nlmsg_flags & NLM_F_REPLACE)); 698 (info->nlh->nlmsg_flags & NLM_F_REPLACE));
698 int add = (!info->nlh || 699 int add = (!info->nlh ||
@@ -716,8 +717,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
716 (info->nlh->nlmsg_flags & NLM_F_EXCL)) 717 (info->nlh->nlmsg_flags & NLM_F_EXCL))
717 return -EEXIST; 718 return -EEXIST;
718 if (replace) { 719 if (replace) {
719 found++; 720 if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
720 break; 721 found++;
722 break;
723 }
724 if (rt_can_ecmp)
725 fallback_ins = fallback_ins ?: ins;
726 goto next_iter;
721 } 727 }
722 728
723 if (iter->dst.dev == rt->dst.dev && 729 if (iter->dst.dev == rt->dst.dev &&
@@ -753,9 +759,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
753 if (iter->rt6i_metric > rt->rt6i_metric) 759 if (iter->rt6i_metric > rt->rt6i_metric)
754 break; 760 break;
755 761
762next_iter:
756 ins = &iter->dst.rt6_next; 763 ins = &iter->dst.rt6_next;
757 } 764 }
758 765
766 if (fallback_ins && !found) {
767 /* No ECMP-able route found, replace first non-ECMP one */
768 ins = fallback_ins;
769 iter = *ins;
770 found++;
771 }
772
759 /* Reset round-robin state, if necessary */ 773 /* Reset round-robin state, if necessary */
760 if (ins == &fn->leaf) 774 if (ins == &fn->leaf)
761 fn->rr_ptr = NULL; 775 fn->rr_ptr = NULL;
@@ -815,6 +829,8 @@ add:
815 } 829 }
816 830
817 } else { 831 } else {
832 int nsiblings;
833
818 if (!found) { 834 if (!found) {
819 if (add) 835 if (add)
820 goto add; 836 goto add;
@@ -835,8 +851,27 @@ add:
835 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 851 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
836 fn->fn_flags |= RTN_RTINFO; 852 fn->fn_flags |= RTN_RTINFO;
837 } 853 }
854 nsiblings = iter->rt6i_nsiblings;
838 fib6_purge_rt(iter, fn, info->nl_net); 855 fib6_purge_rt(iter, fn, info->nl_net);
839 rt6_release(iter); 856 rt6_release(iter);
857
858 if (nsiblings) {
859 /* Replacing an ECMP route, remove all siblings */
860 ins = &rt->dst.rt6_next;
861 iter = *ins;
862 while (iter) {
863 if (rt6_qualify_for_ecmp(iter)) {
864 *ins = iter->dst.rt6_next;
865 fib6_purge_rt(iter, fn, info->nl_net);
866 rt6_release(iter);
867 nsiblings--;
868 } else {
869 ins = &iter->dst.rt6_next;
870 }
871 iter = *ins;
872 }
873 WARN_ON(nsiblings != 0);
874 }
840 } 875 }
841 876
842 return 0; 877 return 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d3588885f097..c73ae5039e46 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2504,9 +2504,9 @@ static int ip6_route_multipath(struct fib6_config *cfg, int add)
2504 int attrlen; 2504 int attrlen;
2505 int err = 0, last_err = 0; 2505 int err = 0, last_err = 0;
2506 2506
2507 remaining = cfg->fc_mp_len;
2507beginning: 2508beginning:
2508 rtnh = (struct rtnexthop *)cfg->fc_mp; 2509 rtnh = (struct rtnexthop *)cfg->fc_mp;
2509 remaining = cfg->fc_mp_len;
2510 2510
2511 /* Parse a Multipath Entry */ 2511 /* Parse a Multipath Entry */
2512 while (rtnh_ok(rtnh, remaining)) { 2512 while (rtnh_ok(rtnh, remaining)) {
@@ -2536,15 +2536,19 @@ beginning:
2536 * next hops that have been already added. 2536 * next hops that have been already added.
2537 */ 2537 */
2538 add = 0; 2538 add = 0;
2539 remaining = cfg->fc_mp_len - remaining;
2539 goto beginning; 2540 goto beginning;
2540 } 2541 }
2541 } 2542 }
2542 /* Because each route is added like a single route we remove 2543 /* Because each route is added like a single route we remove
2543 * this flag after the first nexthop (if there is a collision, 2544 * these flags after the first nexthop: if there is a collision,
2544 * we have already fail to add the first nexthop: 2545 * we have already failed to add the first nexthop:
2545 * fib6_add_rt2node() has reject it). 2546 * fib6_add_rt2node() has rejected it; when replacing, old
2547 * nexthops have been replaced by first new, the rest should
2548 * be added to it.
2546 */ 2549 */
2547 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL; 2550 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2551 NLM_F_REPLACE);
2548 rtnh = rtnh_next(rtnh, &remaining); 2552 rtnh = rtnh_next(rtnh, &remaining);
2549 } 2553 }
2550 2554
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3477c919fcc8..c2ec41617a35 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -731,7 +731,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
731 (inet->inet_dport && inet->inet_dport != rmt_port) || 731 (inet->inet_dport && inet->inet_dport != rmt_port) ||
732 (!ipv6_addr_any(&sk->sk_v6_daddr) && 732 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
733 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || 733 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
734 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 734 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
735 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
736 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
735 return false; 737 return false;
736 if (!inet6_mc_check(sk, loc_addr, rmt_addr)) 738 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
737 return false; 739 return false;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a4220e92f0cc..efa3f48f1ec5 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
98 98
99 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 99 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
100 100
101 if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN || 101 if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
102 skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
103 return NULL; 102 return NULL;
104 103
105 hdrlen = ieee80211_hdrlen(hdr->frame_control); 104 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
167 size_t len; 166 size_t len;
168 u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; 167 u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
169 168
169 if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
170 return -1;
171
170 iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); 172 iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
171 if (!iv) 173 if (!iv)
172 return -1; 174 return -1;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b6ef9a04de06..a75864d93142 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
81 struct tcf_proto_ops *t; 81 struct tcf_proto_ops *t;
82 int rc = -ENOENT; 82 int rc = -ENOENT;
83 83
84 /* Wait for outstanding call_rcu()s, if any, from a
85 * tcf_proto_ops's destroy() handler.
86 */
87 rcu_barrier();
88
84 write_lock(&cls_mod_lock); 89 write_lock(&cls_mod_lock);
85 list_for_each_entry(t, &tcf_proto_base, head) { 90 list_for_each_entry(t, &tcf_proto_base, head) {
86 if (t == ops) { 91 if (t == ops) {