aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/batman-adv/translation-table.c1
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/dst.c10
-rw-r--r--net/core/sock.c1
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_metrics.c12
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c23
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/tcp_ipv6.c28
-rw-r--r--net/llc/llc_station.c6
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/scan.c3
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/sched/act_gact.c14
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/act_simple.c5
-rw-r--r--net/sched/sch_qfq.c95
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xfrm/xfrm_state.c21
32 files changed, 246 insertions, 80 deletions
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index b421cc49d2cd..fc866f2e4528 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
200 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) 200 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
201 goto out; 201 goto out;
202 202
203 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
204 goto out;
205
206 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 203 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
207 204
205 if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
206 goto out;
207
208 next_gw = batadv_gw_get_best_gw_node(bat_priv); 208 next_gw = batadv_gw_get_best_gw_node(bat_priv);
209 209
210 if (curr_gw == next_gw) 210 if (curr_gw == next_gw)
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a438f4b582fc..99dd8f75b3ff 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -197,6 +197,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
197del: 197del:
198 list_del(&entry->list); 198 list_del(&entry->list);
199 kfree(entry); 199 kfree(entry);
200 kfree(tt_change_node);
200 event_removed = true; 201 event_removed = true;
201 goto unlock; 202 goto unlock;
202 } 203 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 0cb3fe8d8e72..a39354ee1432 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1055,6 +1055,8 @@ rollback:
1055 */ 1055 */
1056int dev_set_alias(struct net_device *dev, const char *alias, size_t len) 1056int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1057{ 1057{
1058 char *new_ifalias;
1059
1058 ASSERT_RTNL(); 1060 ASSERT_RTNL();
1059 1061
1060 if (len >= IFALIASZ) 1062 if (len >= IFALIASZ)
@@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1068 return 0; 1070 return 0;
1069 } 1071 }
1070 1072
1071 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); 1073 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1072 if (!dev->ifalias) 1074 if (!new_ifalias)
1073 return -ENOMEM; 1075 return -ENOMEM;
1076 dev->ifalias = new_ifalias;
1074 1077
1075 strlcpy(dev->ifalias, alias, len+1); 1078 strlcpy(dev->ifalias, alias, len+1);
1076 return len; 1079 return len;
@@ -2134,6 +2137,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
2134 __be16 protocol = skb->protocol; 2137 __be16 protocol = skb->protocol;
2135 netdev_features_t features = skb->dev->features; 2138 netdev_features_t features = skb->dev->features;
2136 2139
2140 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2141 features &= ~NETIF_F_GSO_MASK;
2142
2137 if (protocol == htons(ETH_P_8021Q)) { 2143 if (protocol == htons(ETH_P_8021Q)) {
2138 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2144 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2139 protocol = veh->h_vlan_encapsulated_proto; 2145 protocol = veh->h_vlan_encapsulated_proto;
@@ -5986,6 +5992,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5986 dev_net_set(dev, &init_net); 5992 dev_net_set(dev, &init_net);
5987 5993
5988 dev->gso_max_size = GSO_MAX_SIZE; 5994 dev->gso_max_size = GSO_MAX_SIZE;
5995 dev->gso_max_segs = GSO_MAX_SEGS;
5989 5996
5990 INIT_LIST_HEAD(&dev->napi_list); 5997 INIT_LIST_HEAD(&dev->napi_list);
5991 INIT_LIST_HEAD(&dev->unreg_list); 5998 INIT_LIST_HEAD(&dev->unreg_list);
diff --git a/net/core/dst.c b/net/core/dst.c
index 069d51d29414..56d63612e1e4 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb)
149} 149}
150EXPORT_SYMBOL(dst_discard); 150EXPORT_SYMBOL(dst_discard);
151 151
152const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX + 1] = {
153 /* This initializer is needed to force linker to place this variable
154 * into const section. Otherwise it might end into bss section.
155 * We really want to avoid false sharing on this variable, and catch
156 * any writes on it.
157 */
158 [RTAX_MAX] = 0xdeadbeef,
159};
160
153 161
154void *dst_alloc(struct dst_ops *ops, struct net_device *dev, 162void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, unsigned short flags) 163 int initial_ref, int initial_obsolete, unsigned short flags)
diff --git a/net/core/sock.c b/net/core/sock.c
index 6b654b3ddfda..8f67ced8d6a8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1458,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1458 } else { 1458 } else {
1459 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1459 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1460 sk->sk_gso_max_size = dst->dev->gso_max_size; 1460 sk->sk_gso_max_size = dst->dev->gso_max_size;
1461 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1461 } 1462 }
1462 } 1463 }
1463} 1464}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index f0cdb30921c0..57bd978483e1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -367,7 +367,7 @@ static void __leaf_free_rcu(struct rcu_head *head)
367 367
368static inline void free_leaf(struct leaf *l) 368static inline void free_leaf(struct leaf *l)
369{ 369{
370 call_rcu_bh(&l->rcu, __leaf_free_rcu); 370 call_rcu(&l->rcu, __leaf_free_rcu);
371} 371}
372 372
373static inline void free_leaf_info(struct leaf_info *leaf) 373static inline void free_leaf_info(struct leaf_info *leaf)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ba39a52d18c1..147ccc3e93db 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop); 197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
198 if (unlikely(!neigh)) 198 if (unlikely(!neigh))
199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); 199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
200 if (neigh) { 200 if (!IS_ERR(neigh)) {
201 int res = dst_neigh_output(dst, neigh, skb); 201 int res = dst_neigh_output(dst, neigh, skb);
202 202
203 rcu_read_unlock_bh(); 203 rcu_read_unlock_bh();
@@ -1366,9 +1366,8 @@ out:
1366 return skb; 1366 return skb;
1367} 1367}
1368 1368
1369int ip_send_skb(struct sk_buff *skb) 1369int ip_send_skb(struct net *net, struct sk_buff *skb)
1370{ 1370{
1371 struct net *net = sock_net(skb->sk);
1372 int err; 1371 int err;
1373 1372
1374 err = ip_local_out(skb); 1373 err = ip_local_out(skb);
@@ -1391,7 +1390,7 @@ int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1391 return 0; 1390 return 0;
1392 1391
1393 /* Netfilter gets whole the not fragmented skb. */ 1392 /* Netfilter gets whole the not fragmented skb. */
1394 return ip_send_skb(skb); 1393 return ip_send_skb(sock_net(sk), skb);
1395} 1394}
1396 1395
1397/* 1396/*
@@ -1536,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
1536 arg->csumoffset) = csum_fold(csum_add(nskb->csum, 1535 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1537 arg->csum)); 1536 arg->csum));
1538 nskb->ip_summed = CHECKSUM_NONE; 1537 nskb->ip_summed = CHECKSUM_NONE;
1538 skb_orphan(nskb);
1539 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); 1539 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1540 ip_push_pending_frames(sk, &fl4); 1540 ip_push_pending_frames(sk, &fl4);
1541 } 1541 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c035251beb07..e4ba974f143c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -70,7 +70,6 @@
70#include <linux/types.h> 70#include <linux/types.h>
71#include <linux/kernel.h> 71#include <linux/kernel.h>
72#include <linux/mm.h> 72#include <linux/mm.h>
73#include <linux/bootmem.h>
74#include <linux/string.h> 73#include <linux/string.h>
75#include <linux/socket.h> 74#include <linux/socket.h>
76#include <linux/sockios.h> 75#include <linux/sockios.h>
@@ -80,7 +79,6 @@
80#include <linux/netdevice.h> 79#include <linux/netdevice.h>
81#include <linux/proc_fs.h> 80#include <linux/proc_fs.h>
82#include <linux/init.h> 81#include <linux/init.h>
83#include <linux/workqueue.h>
84#include <linux/skbuff.h> 82#include <linux/skbuff.h>
85#include <linux/inetdevice.h> 83#include <linux/inetdevice.h>
86#include <linux/igmp.h> 84#include <linux/igmp.h>
@@ -88,11 +86,9 @@
88#include <linux/mroute.h> 86#include <linux/mroute.h>
89#include <linux/netfilter_ipv4.h> 87#include <linux/netfilter_ipv4.h>
90#include <linux/random.h> 88#include <linux/random.h>
91#include <linux/jhash.h>
92#include <linux/rcupdate.h> 89#include <linux/rcupdate.h>
93#include <linux/times.h> 90#include <linux/times.h>
94#include <linux/slab.h> 91#include <linux/slab.h>
95#include <linux/prefetch.h>
96#include <net/dst.h> 92#include <net/dst.h>
97#include <net/net_namespace.h> 93#include <net/net_namespace.h>
98#include <net/protocol.h> 94#include <net/protocol.h>
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e7e6eeae49c0..2109ff4a1daf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
811 old_size_goal + mss_now > xmit_size_goal)) { 811 old_size_goal + mss_now > xmit_size_goal)) {
812 xmit_size_goal = old_size_goal; 812 xmit_size_goal = old_size_goal;
813 } else { 813 } else {
814 tp->xmit_size_goal_segs = xmit_size_goal / mss_now; 814 tp->xmit_size_goal_segs =
815 min_t(u16, xmit_size_goal / mss_now,
816 sk->sk_gso_max_segs);
815 xmit_size_goal = tp->xmit_size_goal_segs * mss_now; 817 xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
816 } 818 }
817 } 819 }
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4d4db16e336e..1432cdb0644c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
291 left = tp->snd_cwnd - in_flight; 291 left = tp->snd_cwnd - in_flight;
292 if (sk_can_gso(sk) && 292 if (sk_can_gso(sk) &&
293 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && 293 left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
294 left * tp->mss_cache < sk->sk_gso_max_size) 294 left * tp->mss_cache < sk->sk_gso_max_size &&
295 left < sk->sk_gso_max_segs)
295 return true; 296 return true;
296 return left <= tcp_max_tso_deferred_mss(tp); 297 return left <= tcp_max_tso_deferred_mss(tp);
297} 298}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2fd2bc9e3c64..85308b90df80 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5392,6 +5392,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5392{ 5392{
5393 struct tcp_sock *tp = tcp_sk(sk); 5393 struct tcp_sock *tp = tcp_sk(sk);
5394 5394
5395 if (unlikely(sk->sk_rx_dst == NULL))
5396 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5395 /* 5397 /*
5396 * Header prediction. 5398 * Header prediction.
5397 * The code loosely follows the one in the famous 5399 * The code loosely follows the one in the famous
@@ -5605,7 +5607,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5605 tcp_set_state(sk, TCP_ESTABLISHED); 5607 tcp_set_state(sk, TCP_ESTABLISHED);
5606 5608
5607 if (skb != NULL) { 5609 if (skb != NULL) {
5608 inet_sk_rx_dst_set(sk, skb); 5610 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
5609 security_inet_conn_established(sk, skb); 5611 security_inet_conn_established(sk, skb);
5610 } 5612 }
5611 5613
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 42b2a6a73092..767823764016 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1627,9 +1627,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1627 sk->sk_rx_dst = NULL; 1627 sk->sk_rx_dst = NULL;
1628 } 1628 }
1629 } 1629 }
1630 if (unlikely(sk->sk_rx_dst == NULL))
1631 inet_sk_rx_dst_set(sk, skb);
1632
1633 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { 1630 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1634 rsk = sk; 1631 rsk = sk;
1635 goto reset; 1632 goto reset;
@@ -1872,10 +1869,21 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
1872 .twsk_destructor= tcp_twsk_destructor, 1869 .twsk_destructor= tcp_twsk_destructor,
1873}; 1870};
1874 1871
1872void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1873{
1874 struct dst_entry *dst = skb_dst(skb);
1875
1876 dst_hold(dst);
1877 sk->sk_rx_dst = dst;
1878 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1879}
1880EXPORT_SYMBOL(inet_sk_rx_dst_set);
1881
1875const struct inet_connection_sock_af_ops ipv4_specific = { 1882const struct inet_connection_sock_af_ops ipv4_specific = {
1876 .queue_xmit = ip_queue_xmit, 1883 .queue_xmit = ip_queue_xmit,
1877 .send_check = tcp_v4_send_check, 1884 .send_check = tcp_v4_send_check,
1878 .rebuild_header = inet_sk_rebuild_header, 1885 .rebuild_header = inet_sk_rebuild_header,
1886 .sk_rx_dst_set = inet_sk_rx_dst_set,
1879 .conn_request = tcp_v4_conn_request, 1887 .conn_request = tcp_v4_conn_request,
1880 .syn_recv_sock = tcp_v4_syn_recv_sock, 1888 .syn_recv_sock = tcp_v4_syn_recv_sock,
1881 .net_header_len = sizeof(struct iphdr), 1889 .net_header_len = sizeof(struct iphdr),
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 2288a6399e1e..0abe67bb4d3a 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -731,6 +731,18 @@ static int __net_init tcp_net_metrics_init(struct net *net)
731 731
732static void __net_exit tcp_net_metrics_exit(struct net *net) 732static void __net_exit tcp_net_metrics_exit(struct net *net)
733{ 733{
734 unsigned int i;
735
736 for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
737 struct tcp_metrics_block *tm, *next;
738
739 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
740 while (tm) {
741 next = rcu_dereference_protected(tm->tcpm_next, 1);
742 kfree(tm);
743 tm = next;
744 }
745 }
734 kfree(net->ipv4.tcp_metrics_hash); 746 kfree(net->ipv4.tcp_metrics_hash);
735} 747}
736 748
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 232a90c3ec86..d9c9dcef2de3 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -387,7 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
387 struct tcp_sock *oldtp = tcp_sk(sk); 387 struct tcp_sock *oldtp = tcp_sk(sk);
388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values; 388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
389 389
390 inet_sk_rx_dst_set(newsk, skb); 390 newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb);
391 391
392 /* TCP Cookie Transactions require space for the cookie pair, 392 /* TCP Cookie Transactions require space for the cookie pair,
393 * as it differs for each connection. There is no need to 393 * as it differs for each connection. There is no need to
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f1bcff0b10b..20dfd892c86f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -940,7 +940,7 @@ void __init tcp_tasklet_init(void)
940 * We cant xmit new skbs from this context, as we might already 940 * We cant xmit new skbs from this context, as we might already
941 * hold qdisc lock. 941 * hold qdisc lock.
942 */ 942 */
943void tcp_wfree(struct sk_buff *skb) 943static void tcp_wfree(struct sk_buff *skb)
944{ 944{
945 struct sock *sk = skb->sk; 945 struct sock *sk = skb->sk;
946 struct tcp_sock *tp = tcp_sk(sk); 946 struct tcp_sock *tp = tcp_sk(sk);
@@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk)
1522 * when we would be allowed to send the split-due-to-Nagle skb fully. 1522 * when we would be allowed to send the split-due-to-Nagle skb fully.
1523 */ 1523 */
1524static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, 1524static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1525 unsigned int mss_now, unsigned int cwnd) 1525 unsigned int mss_now, unsigned int max_segs)
1526{ 1526{
1527 const struct tcp_sock *tp = tcp_sk(sk); 1527 const struct tcp_sock *tp = tcp_sk(sk);
1528 u32 needed, window, cwnd_len; 1528 u32 needed, window, max_len;
1529 1529
1530 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1530 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1531 cwnd_len = mss_now * cwnd; 1531 max_len = mss_now * max_segs;
1532 1532
1533 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1533 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1534 return cwnd_len; 1534 return max_len;
1535 1535
1536 needed = min(skb->len, window); 1536 needed = min(skb->len, window);
1537 1537
1538 if (cwnd_len <= needed) 1538 if (max_len <= needed)
1539 return cwnd_len; 1539 return max_len;
1540 1540
1541 return needed - needed % mss_now; 1541 return needed - needed % mss_now;
1542} 1542}
@@ -1765,7 +1765,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1765 limit = min(send_win, cong_win); 1765 limit = min(send_win, cong_win);
1766 1766
1767 /* If a full-sized TSO skb can be sent, do it. */ 1767 /* If a full-sized TSO skb can be sent, do it. */
1768 if (limit >= sk->sk_gso_max_size) 1768 if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
1769 sk->sk_gso_max_segs * tp->mss_cache))
1769 goto send_now; 1770 goto send_now;
1770 1771
1771 /* Middle in queue won't get any more data, full sendable already? */ 1772 /* Middle in queue won't get any more data, full sendable already? */
@@ -1999,7 +2000,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1999 limit = mss_now; 2000 limit = mss_now;
2000 if (tso_segs > 1 && !tcp_urg_mode(tp)) 2001 if (tso_segs > 1 && !tcp_urg_mode(tp))
2001 limit = tcp_mss_split_point(sk, skb, mss_now, 2002 limit = tcp_mss_split_point(sk, skb, mss_now,
2002 cwnd_quota); 2003 min_t(unsigned int,
2004 cwnd_quota,
2005 sk->sk_gso_max_segs));
2003 2006
2004 if (skb->len > limit && 2007 if (skb->len > limit &&
2005 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2008 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b4c3582a991f..6f6d1aca3c3d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -758,7 +758,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
758 uh->check = CSUM_MANGLED_0; 758 uh->check = CSUM_MANGLED_0;
759 759
760send: 760send:
761 err = ip_send_skb(skb); 761 err = ip_send_skb(sock_net(sk), skb);
762 if (err) { 762 if (err) {
763 if (err == -ENOBUFS && !inet->recverr) { 763 if (err == -ENOBUFS && !inet->recverr) {
764 UDP_INC_STATS_USER(sock_net(sk), 764 UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c66b90f71c9b..bb9ce2b2f377 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1447,7 +1447,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1447 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); 1447 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1448 1448
1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1449 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1450 struct dst_entry *dst = sk->sk_rx_dst;
1451
1450 sock_rps_save_rxhash(sk, skb); 1452 sock_rps_save_rxhash(sk, skb);
1453 if (dst) {
1454 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1455 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1456 dst_release(dst);
1457 sk->sk_rx_dst = NULL;
1458 }
1459 }
1460
1451 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1461 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1452 goto reset; 1462 goto reset;
1453 if (opt_skb) 1463 if (opt_skb)
@@ -1705,9 +1715,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
1705 struct dst_entry *dst = sk->sk_rx_dst; 1715 struct dst_entry *dst = sk->sk_rx_dst;
1706 struct inet_sock *icsk = inet_sk(sk); 1716 struct inet_sock *icsk = inet_sk(sk);
1707 if (dst) 1717 if (dst)
1708 dst = dst_check(dst, 0); 1718 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1709 if (dst && 1719 if (dst &&
1710 icsk->rx_dst_ifindex == inet6_iif(skb)) 1720 icsk->rx_dst_ifindex == skb->skb_iif)
1711 skb_dst_set_noref(skb, dst); 1721 skb_dst_set_noref(skb, dst);
1712 } 1722 }
1713 } 1723 }
@@ -1719,10 +1729,23 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1719 .twsk_destructor= tcp_twsk_destructor, 1729 .twsk_destructor= tcp_twsk_destructor,
1720}; 1730};
1721 1731
1732static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1733{
1734 struct dst_entry *dst = skb_dst(skb);
1735 const struct rt6_info *rt = (const struct rt6_info *)dst;
1736
1737 dst_hold(dst);
1738 sk->sk_rx_dst = dst;
1739 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1740 if (rt->rt6i_node)
1741 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
1742}
1743
1722static const struct inet_connection_sock_af_ops ipv6_specific = { 1744static const struct inet_connection_sock_af_ops ipv6_specific = {
1723 .queue_xmit = inet6_csk_xmit, 1745 .queue_xmit = inet6_csk_xmit,
1724 .send_check = tcp_v6_send_check, 1746 .send_check = tcp_v6_send_check,
1725 .rebuild_header = inet6_sk_rebuild_header, 1747 .rebuild_header = inet6_sk_rebuild_header,
1748 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1726 .conn_request = tcp_v6_conn_request, 1749 .conn_request = tcp_v6_conn_request,
1727 .syn_recv_sock = tcp_v6_syn_recv_sock, 1750 .syn_recv_sock = tcp_v6_syn_recv_sock,
1728 .net_header_len = sizeof(struct ipv6hdr), 1751 .net_header_len = sizeof(struct ipv6hdr),
@@ -1754,6 +1777,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
1754 .queue_xmit = ip_queue_xmit, 1777 .queue_xmit = ip_queue_xmit,
1755 .send_check = tcp_v4_send_check, 1778 .send_check = tcp_v4_send_check,
1756 .rebuild_header = inet_sk_rebuild_header, 1779 .rebuild_header = inet_sk_rebuild_header,
1780 .sk_rx_dst_set = inet_sk_rx_dst_set,
1757 .conn_request = tcp_v6_conn_request, 1781 .conn_request = tcp_v6_conn_request,
1758 .syn_recv_sock = tcp_v6_syn_recv_sock, 1782 .syn_recv_sock = tcp_v6_syn_recv_sock,
1759 .net_header_len = sizeof(struct iphdr), 1783 .net_header_len = sizeof(struct iphdr),
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 39a8d8924b9c..6828e39ec2ec 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
268out: 268out:
269 return rc; 269 return rc;
270free: 270free:
271 kfree_skb(skb); 271 kfree_skb(nskb);
272 goto out; 272 goto out;
273} 273}
274 274
@@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
293out: 293out:
294 return rc; 294 return rc;
295free: 295free:
296 kfree_skb(skb); 296 kfree_skb(nskb);
297 goto out; 297 goto out;
298} 298}
299 299
@@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
322out: 322out:
323 return rc; 323 return rc;
324free: 324free:
325 kfree_skb(skb); 325 kfree_skb(nskb);
326 goto out; 326 goto out;
327} 327}
328 328
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6fac18c0423f..85572353a7e3 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -622,6 +622,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
622 622
623 del_timer_sync(&sdata->u.mesh.housekeeping_timer); 623 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
624 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); 624 del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
625 del_timer_sync(&sdata->u.mesh.mesh_path_timer);
625 /* 626 /*
626 * If the timer fired while we waited for it, it will have 627 * If the timer fired while we waited for it, it will have
627 * requeued the work. Now the work will be running again 628 * requeued the work. Now the work will be running again
@@ -634,6 +635,8 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
634 local->fif_other_bss--; 635 local->fif_other_bss--;
635 atomic_dec(&local->iff_allmultis); 636 atomic_dec(&local->iff_allmultis);
636 ieee80211_configure_filter(local); 637 ieee80211_configure_filter(local);
638
639 sdata->u.mesh.timers_running = 0;
637} 640}
638 641
639static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, 642static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cef0c9e79aba..a4a5acdbaa4d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1430,6 +1430,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1430 del_timer_sync(&sdata->u.mgd.bcn_mon_timer); 1430 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
1431 del_timer_sync(&sdata->u.mgd.timer); 1431 del_timer_sync(&sdata->u.mgd.timer);
1432 del_timer_sync(&sdata->u.mgd.chswitch_timer); 1432 del_timer_sync(&sdata->u.mgd.chswitch_timer);
1433
1434 sdata->u.mgd.timers_running = 0;
1433} 1435}
1434 1436
1435void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 1437void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bcaee5d12839..839dd9737989 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
299 if (local->scan_req != local->int_scan_req) 299 if (local->scan_req != local->int_scan_req)
300 cfg80211_scan_done(local->scan_req, aborted); 300 cfg80211_scan_done(local->scan_req, aborted);
301 local->scan_req = NULL; 301 local->scan_req = NULL;
302 local->scan_sdata = NULL; 302 rcu_assign_pointer(local->scan_sdata, NULL);
303 303
304 local->scanning = 0; 304 local->scanning = 0;
305 local->scan_channel = NULL; 305 local->scan_channel = NULL;
@@ -984,7 +984,6 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
984 kfree(local->sched_scan_ies.ie[i]); 984 kfree(local->sched_scan_ies.ie[i]);
985 985
986 drv_sched_scan_stop(local, sdata); 986 drv_sched_scan_stop(local, sdata);
987 rcu_assign_pointer(local->sched_scan_sdata, NULL);
988 } 987 }
989out: 988out:
990 mutex_unlock(&local->mtx); 989 mutex_unlock(&local->mtx);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ceaca7c134a0..8ac890a1a4c0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1079,7 +1079,7 @@ static void *packet_current_rx_frame(struct packet_sock *po,
1079 default: 1079 default:
1080 WARN(1, "TPACKET version not supported\n"); 1080 WARN(1, "TPACKET version not supported\n");
1081 BUG(); 1081 BUG();
1082 return 0; 1082 return NULL;
1083 } 1083 }
1084} 1084}
1085 1085
@@ -1936,7 +1936,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
1936 1936
1937 if (likely(po->tx_ring.pg_vec)) { 1937 if (likely(po->tx_ring.pg_vec)) {
1938 ph = skb_shinfo(skb)->destructor_arg; 1938 ph = skb_shinfo(skb)->destructor_arg;
1939 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
1940 BUG_ON(atomic_read(&po->tx_ring.pending) == 0); 1939 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1941 atomic_dec(&po->tx_ring.pending); 1940 atomic_dec(&po->tx_ring.pending);
1942 __packet_set_status(po, ph, TP_STATUS_AVAILABLE); 1941 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index f10fb8256442..05d60859d8e3 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
67 struct tcf_common *pc; 67 struct tcf_common *pc;
68 int ret = 0; 68 int ret = 0;
69 int err; 69 int err;
70#ifdef CONFIG_GACT_PROB
71 struct tc_gact_p *p_parm = NULL;
72#endif
70 73
71 if (nla == NULL) 74 if (nla == NULL)
72 return -EINVAL; 75 return -EINVAL;
@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
82#ifndef CONFIG_GACT_PROB 85#ifndef CONFIG_GACT_PROB
83 if (tb[TCA_GACT_PROB] != NULL) 86 if (tb[TCA_GACT_PROB] != NULL)
84 return -EOPNOTSUPP; 87 return -EOPNOTSUPP;
88#else
89 if (tb[TCA_GACT_PROB]) {
90 p_parm = nla_data(tb[TCA_GACT_PROB]);
91 if (p_parm->ptype >= MAX_RAND)
92 return -EINVAL;
93 }
85#endif 94#endif
86 95
87 pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); 96 pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
103 spin_lock_bh(&gact->tcf_lock); 112 spin_lock_bh(&gact->tcf_lock);
104 gact->tcf_action = parm->action; 113 gact->tcf_action = parm->action;
105#ifdef CONFIG_GACT_PROB 114#ifdef CONFIG_GACT_PROB
106 if (tb[TCA_GACT_PROB] != NULL) { 115 if (p_parm) {
107 struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
108 gact->tcfg_paction = p_parm->paction; 116 gact->tcfg_paction = p_parm->paction;
109 gact->tcfg_pval = p_parm->pval; 117 gact->tcfg_pval = p_parm->pval;
110 gact->tcfg_ptype = p_parm->ptype; 118 gact->tcfg_ptype = p_parm->ptype;
@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
133 141
134 spin_lock(&gact->tcf_lock); 142 spin_lock(&gact->tcf_lock);
135#ifdef CONFIG_GACT_PROB 143#ifdef CONFIG_GACT_PROB
136 if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) 144 if (gact->tcfg_ptype)
137 action = gact_rand[gact->tcfg_ptype](gact); 145 action = gact_rand[gact->tcfg_ptype](gact);
138 else 146 else
139 action = gact->tcf_action; 147 action = gact->tcf_action;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60e281ad0f07..58fb3c7aab9e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -185,7 +185,12 @@ err3:
185err2: 185err2:
186 kfree(tname); 186 kfree(tname);
187err1: 187err1:
188 kfree(pc); 188 if (ret == ACT_P_CREATED) {
189 if (est)
190 gen_kill_estimator(&pc->tcfc_bstats,
191 &pc->tcfc_rate_est);
192 kfree_rcu(pc, tcfc_rcu);
193 }
189 return err; 194 return err;
190} 195}
191 196
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 26aa2f6ce257..45c53ab067a6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
74 p = to_pedit(pc); 74 p = to_pedit(pc);
75 keys = kmalloc(ksize, GFP_KERNEL); 75 keys = kmalloc(ksize, GFP_KERNEL);
76 if (keys == NULL) { 76 if (keys == NULL) {
77 kfree(pc); 77 if (est)
78 gen_kill_estimator(&pc->tcfc_bstats,
79 &pc->tcfc_rate_est);
80 kfree_rcu(pc, tcfc_rcu);
78 return -ENOMEM; 81 return -ENOMEM;
79 } 82 }
80 ret = ACT_P_CREATED; 83 ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3922f2a2821b..3714f60f0b3c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
131 d = to_defact(pc); 131 d = to_defact(pc);
132 ret = alloc_defdata(d, defdata); 132 ret = alloc_defdata(d, defdata);
133 if (ret < 0) { 133 if (ret < 0) {
134 kfree(pc); 134 if (est)
135 gen_kill_estimator(&pc->tcfc_bstats,
136 &pc->tcfc_rate_est);
137 kfree_rcu(pc, tcfc_rcu);
135 return ret; 138 return ret;
136 } 139 }
137 d->tcf_action = parm->action; 140 d->tcf_action = parm->action;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9af01f3df18c..e4723d31fdd5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -203,6 +203,34 @@ out:
203 return index; 203 return index;
204} 204}
205 205
206/* Length of the next packet (0 if the queue is empty). */
207static unsigned int qdisc_peek_len(struct Qdisc *sch)
208{
209 struct sk_buff *skb;
210
211 skb = sch->ops->peek(sch);
212 return skb ? qdisc_pkt_len(skb) : 0;
213}
214
215static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
216static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
217 unsigned int len);
218
219static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
220 u32 lmax, u32 inv_w, int delta_w)
221{
222 int i;
223
224 /* update qfq-specific data */
225 cl->lmax = lmax;
226 cl->inv_w = inv_w;
227 i = qfq_calc_index(cl->inv_w, cl->lmax);
228
229 cl->grp = &q->groups[i];
230
231 q->wsum += delta_w;
232}
233
206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, 234static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
207 struct nlattr **tca, unsigned long *arg) 235 struct nlattr **tca, unsigned long *arg)
208{ 236{
@@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
250 lmax = 1UL << QFQ_MTU_SHIFT; 278 lmax = 1UL << QFQ_MTU_SHIFT;
251 279
252 if (cl != NULL) { 280 if (cl != NULL) {
281 bool need_reactivation = false;
282
253 if (tca[TCA_RATE]) { 283 if (tca[TCA_RATE]) {
254 err = gen_replace_estimator(&cl->bstats, &cl->rate_est, 284 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
255 qdisc_root_sleeping_lock(sch), 285 qdisc_root_sleeping_lock(sch),
@@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
258 return err; 288 return err;
259 } 289 }
260 290
261 if (inv_w != cl->inv_w) { 291 if (lmax == cl->lmax && inv_w == cl->inv_w)
262 sch_tree_lock(sch); 292 return 0; /* nothing to update */
263 q->wsum += delta_w; 293
264 cl->inv_w = inv_w; 294 i = qfq_calc_index(inv_w, lmax);
265 sch_tree_unlock(sch); 295 sch_tree_lock(sch);
296 if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
297 /*
298 * shift cl->F back, to not charge the
299 * class for the not-yet-served head
300 * packet
301 */
302 cl->F = cl->S;
303 /* remove class from its slot in the old group */
304 qfq_deactivate_class(q, cl);
305 need_reactivation = true;
266 } 306 }
307
308 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
309
310 if (need_reactivation) /* activate in new group */
311 qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
312 sch_tree_unlock(sch);
313
267 return 0; 314 return 0;
268 } 315 }
269 316
@@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
273 320
274 cl->refcnt = 1; 321 cl->refcnt = 1;
275 cl->common.classid = classid; 322 cl->common.classid = classid;
276 cl->lmax = lmax;
277 cl->inv_w = inv_w;
278 i = qfq_calc_index(cl->inv_w, cl->lmax);
279 323
280 cl->grp = &q->groups[i]; 324 qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
281 325
282 cl->qdisc = qdisc_create_dflt(sch->dev_queue, 326 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
283 &pfifo_qdisc_ops, classid); 327 &pfifo_qdisc_ops, classid);
@@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
294 return err; 338 return err;
295 } 339 }
296 } 340 }
297 q->wsum += weight;
298 341
299 sch_tree_lock(sch); 342 sch_tree_lock(sch);
300 qdisc_class_hash_insert(&q->clhash, &cl->common); 343 qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
711 } 754 }
712} 755}
713 756
714/* What is length of next packet in queue (0 if queue is empty) */
715static unsigned int qdisc_peek_len(struct Qdisc *sch)
716{
717 struct sk_buff *skb;
718
719 skb = sch->ops->peek(sch);
720 return skb ? qdisc_pkt_len(skb) : 0;
721}
722
723/* 757/*
724 * Updates the class, returns true if also the group needs to be updated. 758 * Updates the class, returns true if also the group needs to be updated.
725 */ 759 */
@@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
843static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 877static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
844{ 878{
845 struct qfq_sched *q = qdisc_priv(sch); 879 struct qfq_sched *q = qdisc_priv(sch);
846 struct qfq_group *grp;
847 struct qfq_class *cl; 880 struct qfq_class *cl;
848 int err; 881 int err;
849 u64 roundedS;
850 int s;
851 882
852 cl = qfq_classify(skb, sch, &err); 883 cl = qfq_classify(skb, sch, &err);
853 if (cl == NULL) { 884 if (cl == NULL) {
@@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
876 return err; 907 return err;
877 908
878 /* If reach this point, queue q was idle */ 909 /* If reach this point, queue q was idle */
879 grp = cl->grp; 910 qfq_activate_class(q, cl, qdisc_pkt_len(skb));
911
912 return err;
913}
914
915/*
916 * Handle class switch from idle to backlogged.
917 */
918static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
919 unsigned int pkt_len)
920{
921 struct qfq_group *grp = cl->grp;
922 u64 roundedS;
923 int s;
924
880 qfq_update_start(q, cl); 925 qfq_update_start(q, cl);
881 926
882 /* compute new finish time and rounded start. */ 927 /* compute new finish time and rounded start. */
883 cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w; 928 cl->F = cl->S + (u64)pkt_len * cl->inv_w;
884 roundedS = qfq_round_down(cl->S, grp->slot_shift); 929 roundedS = qfq_round_down(cl->S, grp->slot_shift);
885 930
886 /* 931 /*
@@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
917 962
918skip_update: 963skip_update:
919 qfq_slot_insert(grp, cl, roundedS); 964 qfq_slot_insert(grp, cl, roundedS);
920
921 return err;
922} 965}
923 966
924 967
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 31b40cc4a9c3..dcd64d5b07aa 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -952,6 +952,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
952 */ 952 */
953 synchronize_rcu(); 953 synchronize_rcu();
954 INIT_LIST_HEAD(&wdev->list); 954 INIT_LIST_HEAD(&wdev->list);
955 /*
956 * Ensure that all events have been processed and
957 * freed.
958 */
959 cfg80211_process_wdev_events(wdev);
955 break; 960 break;
956 case NETDEV_PRE_UP: 961 case NETDEV_PRE_UP:
957 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) 962 if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5206c6844fd7..bc7430b54771 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
426 struct net_device *dev, enum nl80211_iftype ntype, 426 struct net_device *dev, enum nl80211_iftype ntype,
427 u32 *flags, struct vif_params *params); 427 u32 *flags, struct vif_params *params);
428void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); 428void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
429void cfg80211_process_wdev_events(struct wireless_dev *wdev);
429 430
430int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, 431int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
431 struct wireless_dev *wdev, 432 struct wireless_dev *wdev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2303ee73b50a..2ded3c7fad06 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -680,6 +680,8 @@ static u32 map_regdom_flags(u32 rd_flags)
680 channel_flags |= IEEE80211_CHAN_NO_IBSS; 680 channel_flags |= IEEE80211_CHAN_NO_IBSS;
681 if (rd_flags & NL80211_RRF_DFS) 681 if (rd_flags & NL80211_RRF_DFS)
682 channel_flags |= IEEE80211_CHAN_RADAR; 682 channel_flags |= IEEE80211_CHAN_RADAR;
683 if (rd_flags & NL80211_RRF_NO_OFDM)
684 channel_flags |= IEEE80211_CHAN_NO_OFDM;
683 return channel_flags; 685 return channel_flags;
684} 686}
685 687
@@ -901,7 +903,21 @@ static void handle_channel(struct wiphy *wiphy,
901 chan->max_antenna_gain = min(chan->orig_mag, 903 chan->max_antenna_gain = min(chan->orig_mag,
902 (int) MBI_TO_DBI(power_rule->max_antenna_gain)); 904 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
903 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); 905 chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
904 chan->max_power = min(chan->max_power, chan->max_reg_power); 906 if (chan->orig_mpwr) {
907 /*
908 * Devices that have their own custom regulatory domain
909 * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
910 * passed country IE power settings.
911 */
912 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
913 wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
914 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
915 chan->max_power = chan->max_reg_power;
916 else
917 chan->max_power = min(chan->orig_mpwr,
918 chan->max_reg_power);
919 } else
920 chan->max_power = chan->max_reg_power;
905} 921}
906 922
907static void handle_band(struct wiphy *wiphy, 923static void handle_band(struct wiphy *wiphy,
@@ -1885,6 +1901,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy)
1885 chan->flags = chan->orig_flags; 1901 chan->flags = chan->orig_flags;
1886 chan->max_antenna_gain = chan->orig_mag; 1902 chan->max_antenna_gain = chan->orig_mag;
1887 chan->max_power = chan->orig_mpwr; 1903 chan->max_power = chan->orig_mpwr;
1904 chan->beacon_found = false;
1888 } 1905 }
1889 } 1906 }
1890} 1907}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 26f8cd30f712..994e2f0cc7a8 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -735,7 +735,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
735 wdev->connect_keys = NULL; 735 wdev->connect_keys = NULL;
736} 736}
737 737
738static void cfg80211_process_wdev_events(struct wireless_dev *wdev) 738void cfg80211_process_wdev_events(struct wireless_dev *wdev)
739{ 739{
740 struct cfg80211_event *ev; 740 struct cfg80211_event *ev;
741 unsigned long flags; 741 unsigned long flags;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5b228f97d4b3..87cd0e4d4282 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -415,8 +415,17 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
415 if (x->lft.hard_add_expires_seconds) { 415 if (x->lft.hard_add_expires_seconds) {
416 long tmo = x->lft.hard_add_expires_seconds + 416 long tmo = x->lft.hard_add_expires_seconds +
417 x->curlft.add_time - now; 417 x->curlft.add_time - now;
418 if (tmo <= 0) 418 if (tmo <= 0) {
419 goto expired; 419 if (x->xflags & XFRM_SOFT_EXPIRE) {
420 /* enter hard expire without soft expire first?!
421 * setting a new date could trigger this.
422 * workarbound: fix x->curflt.add_time by below:
423 */
424 x->curlft.add_time = now - x->saved_tmo - 1;
425 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
426 } else
427 goto expired;
428 }
420 if (tmo < next) 429 if (tmo < next)
421 next = tmo; 430 next = tmo;
422 } 431 }
@@ -433,10 +442,14 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
433 if (x->lft.soft_add_expires_seconds) { 442 if (x->lft.soft_add_expires_seconds) {
434 long tmo = x->lft.soft_add_expires_seconds + 443 long tmo = x->lft.soft_add_expires_seconds +
435 x->curlft.add_time - now; 444 x->curlft.add_time - now;
436 if (tmo <= 0) 445 if (tmo <= 0) {
437 warn = 1; 446 warn = 1;
438 else if (tmo < next) 447 x->xflags &= ~XFRM_SOFT_EXPIRE;
448 } else if (tmo < next) {
439 next = tmo; 449 next = tmo;
450 x->xflags |= XFRM_SOFT_EXPIRE;
451 x->saved_tmo = tmo;
452 }
440 } 453 }
441 if (x->lft.soft_use_expires_seconds) { 454 if (x->lft.soft_use_expires_seconds) {
442 long tmo = x->lft.soft_use_expires_seconds + 455 long tmo = x->lft.soft_use_expires_seconds +