aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bpf/test_run.c45
-rw-r--r--net/bridge/br_multicast.c9
-rw-r--r--net/compat.c6
-rw-r--r--net/dsa/port.c7
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/ip_gre.c33
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fou6.c2
-rw-r--r--net/ipv6/ip6_gre.c39
-rw-r--r--net/ipv6/route.c32
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/key/af_key.c42
-rw-r--r--net/mac80211/main.c4
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/phonet/pep.c32
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/smc/smc.h6
-rw-r--r--net/tipc/socket.c11
-rw-r--r--net/unix/af_unix.c57
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/x25/af_x25.c13
-rw-r--r--net/xdp/xsk.c16
-rw-r--r--net/xfrm/xfrm_interface.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c2
29 files changed, 246 insertions, 186 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index fa2644d276ef..e31e1b20f7f4 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -13,27 +13,13 @@
13#include <net/sock.h> 13#include <net/sock.h>
14#include <net/tcp.h> 14#include <net/tcp.h>
15 15
16static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, 16static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
17 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) 17 u32 *retval, u32 *time)
18{
19 u32 ret;
20
21 preempt_disable();
22 rcu_read_lock();
23 bpf_cgroup_storage_set(storage);
24 ret = BPF_PROG_RUN(prog, ctx);
25 rcu_read_unlock();
26 preempt_enable();
27
28 return ret;
29}
30
31static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
32 u32 *time)
33{ 18{
34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; 19 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
35 enum bpf_cgroup_storage_type stype; 20 enum bpf_cgroup_storage_type stype;
36 u64 time_start, time_spent = 0; 21 u64 time_start, time_spent = 0;
22 int ret = 0;
37 u32 i; 23 u32 i;
38 24
39 for_each_cgroup_storage_type(stype) { 25 for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
48 34
49 if (!repeat) 35 if (!repeat)
50 repeat = 1; 36 repeat = 1;
37
38 rcu_read_lock();
39 preempt_disable();
51 time_start = ktime_get_ns(); 40 time_start = ktime_get_ns();
52 for (i = 0; i < repeat; i++) { 41 for (i = 0; i < repeat; i++) {
53 *ret = bpf_test_run_one(prog, ctx, storage); 42 bpf_cgroup_storage_set(storage);
43 *retval = BPF_PROG_RUN(prog, ctx);
44
45 if (signal_pending(current)) {
46 ret = -EINTR;
47 break;
48 }
49
54 if (need_resched()) { 50 if (need_resched()) {
55 if (signal_pending(current))
56 break;
57 time_spent += ktime_get_ns() - time_start; 51 time_spent += ktime_get_ns() - time_start;
52 preempt_enable();
53 rcu_read_unlock();
54
58 cond_resched(); 55 cond_resched();
56
57 rcu_read_lock();
58 preempt_disable();
59 time_start = ktime_get_ns(); 59 time_start = ktime_get_ns();
60 } 60 }
61 } 61 }
62 time_spent += ktime_get_ns() - time_start; 62 time_spent += ktime_get_ns() - time_start;
63 preempt_enable();
64 rcu_read_unlock();
65
63 do_div(time_spent, repeat); 66 do_div(time_spent, repeat);
64 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 67 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
65 68
66 for_each_cgroup_storage_type(stype) 69 for_each_cgroup_storage_type(stype)
67 bpf_cgroup_storage_free(storage[stype]); 70 bpf_cgroup_storage_free(storage[stype]);
68 71
69 return 0; 72 return ret;
70} 73}
71 74
72static int bpf_test_finish(const union bpf_attr *kattr, 75static int bpf_test_finish(const union bpf_attr *kattr,
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3aeff0895669..ac92b2eb32b1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br,
1204 return; 1204 return;
1205 1205
1206 br_multicast_update_query_timer(br, query, max_delay); 1206 br_multicast_update_query_timer(br, query, max_delay);
1207 1207 br_multicast_mark_router(br, port);
1208 /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
1209 * the arrival port for IGMP Queries where the source address
1210 * is 0.0.0.0 should not be added to router port list.
1211 */
1212 if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
1213 saddr->proto == htons(ETH_P_IPV6))
1214 br_multicast_mark_router(br, port);
1215} 1208}
1216 1209
1217static void br_ip4_multicast_query(struct net_bridge *br, 1210static void br_ip4_multicast_query(struct net_bridge *br,
diff --git a/net/compat.c b/net/compat.c
index 959d1c51826d..3d348198004f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname,
388 char __user *optval, unsigned int optlen) 388 char __user *optval, unsigned int optlen)
389{ 389{
390 int err; 390 int err;
391 struct socket *sock = sockfd_lookup(fd, &err); 391 struct socket *sock;
392
393 if (optlen > INT_MAX)
394 return -EINVAL;
392 395
396 sock = sockfd_lookup(fd, &err);
393 if (sock) { 397 if (sock) {
394 err = security_socket_setsockopt(sock, level, optname); 398 err = security_socket_setsockopt(sock, level, optname);
395 if (err) { 399 if (err) {
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 2d7e01b23572..2a2a878b5ce3 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
69 69
70int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) 70int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
71{ 71{
72 u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
73 struct dsa_switch *ds = dp->ds; 72 struct dsa_switch *ds = dp->ds;
74 int port = dp->index; 73 int port = dp->index;
75 int err; 74 int err;
@@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
80 return err; 79 return err;
81 } 80 }
82 81
83 dsa_port_set_state_now(dp, stp_state); 82 if (!dp->bridge_dev)
83 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
84 84
85 return 0; 85 return 0;
86} 86}
@@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy)
90 struct dsa_switch *ds = dp->ds; 90 struct dsa_switch *ds = dp->ds;
91 int port = dp->index; 91 int port = dp->index;
92 92
93 dsa_port_set_state_now(dp, BR_STATE_DISABLED); 93 if (!dp->bridge_dev)
94 dsa_port_set_state_now(dp, BR_STATE_DISABLED);
94 95
95 if (ds->ops->port_disable) 96 if (ds->ops->port_disable)
96 ds->ops->port_disable(ds, port, phy); 97 ds->ops->port_disable(ds, port, phy);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 5459f41fc26f..10e809b296ec 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
328 skb->len += tailen; 328 skb->len += tailen;
329 skb->data_len += tailen; 329 skb->data_len += tailen;
330 skb->truesize += tailen; 330 skb->truesize += tailen;
331 if (sk) 331 if (sk && sk_fullsock(sk))
332 refcount_add(tailen, &sk->sk_wmem_alloc); 332 refcount_add(tailen, &sk->sk_wmem_alloc);
333 333
334 goto out; 334 goto out;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 3978f807fa8b..6ae89f2b541b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1457,9 +1457,23 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1457 struct ip_tunnel_parm *p = &t->parms; 1457 struct ip_tunnel_parm *p = &t->parms;
1458 __be16 o_flags = p->o_flags; 1458 __be16 o_flags = p->o_flags;
1459 1459
1460 if ((t->erspan_ver == 1 || t->erspan_ver == 2) && 1460 if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1461 !t->collect_md) 1461 if (!t->collect_md)
1462 o_flags |= TUNNEL_KEY; 1462 o_flags |= TUNNEL_KEY;
1463
1464 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1465 goto nla_put_failure;
1466
1467 if (t->erspan_ver == 1) {
1468 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1469 goto nla_put_failure;
1470 } else {
1471 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1472 goto nla_put_failure;
1473 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1474 goto nla_put_failure;
1475 }
1476 }
1463 1477
1464 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1478 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1465 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1479 nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -1495,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1495 goto nla_put_failure; 1509 goto nla_put_failure;
1496 } 1510 }
1497 1511
1498 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1499 goto nla_put_failure;
1500
1501 if (t->erspan_ver == 1) {
1502 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1503 goto nla_put_failure;
1504 } else if (t->erspan_ver == 2) {
1505 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1506 goto nla_put_failure;
1507 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1508 goto nla_put_failure;
1509 }
1510
1511 return 0; 1512 return 0;
1512 1513
1513nla_put_failure: 1514nla_put_failure:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 730bc44dbad9..ccc78f3a4b60 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ 2347 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; 2348 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 2349 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2350 tcp_init_tso_segs(skb, mss_now);
2350 goto repair; /* Skip network transmission */ 2351 goto repair; /* Skip network transmission */
2351 } 2352 }
2352 2353
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5c3cd5d84a6f..372fdc5381a9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
562 562
563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 563 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
564 int (*handler)(struct sk_buff *skb, u32 info); 564 int (*handler)(struct sk_buff *skb, u32 info);
565 const struct ip_tunnel_encap_ops *encap;
565 566
566 if (!iptun_encaps[i]) 567 encap = rcu_dereference(iptun_encaps[i]);
568 if (!encap)
567 continue; 569 continue;
568 handler = rcu_dereference(iptun_encaps[i]->err_handler); 570 handler = encap->err_handler;
569 if (handler && !handler(skb, info)) 571 if (handler && !handler(skb, info))
570 return 0; 572 return 0;
571 } 573 }
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5afe9f83374d..239d4a65ad6e 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
296 skb->len += tailen; 296 skb->len += tailen;
297 skb->data_len += tailen; 297 skb->data_len += tailen;
298 skb->truesize += tailen; 298 skb->truesize += tailen;
299 if (sk) 299 if (sk && sk_fullsock(sk))
300 refcount_add(tailen, &sk->sk_wmem_alloc); 300 refcount_add(tailen, &sk->sk_wmem_alloc);
301 301
302 goto out; 302 goto out;
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index b858bd5280bf..867474abe269 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
72 72
73static int gue6_err_proto_handler(int proto, struct sk_buff *skb, 73static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
74 struct inet6_skb_parm *opt, 74 struct inet6_skb_parm *opt,
75 u8 type, u8 code, int offset, u32 info) 75 u8 type, u8 code, int offset, __be32 info)
76{ 76{
77 const struct inet6_protocol *ipprot; 77 const struct inet6_protocol *ipprot;
78 78
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 43890898b0b5..26f25b6e2833 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1722,6 +1722,9 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1722static void ip6erspan_set_version(struct nlattr *data[], 1722static void ip6erspan_set_version(struct nlattr *data[],
1723 struct __ip6_tnl_parm *parms) 1723 struct __ip6_tnl_parm *parms)
1724{ 1724{
1725 if (!data)
1726 return;
1727
1725 parms->erspan_ver = 1; 1728 parms->erspan_ver = 1;
1726 if (data[IFLA_GRE_ERSPAN_VER]) 1729 if (data[IFLA_GRE_ERSPAN_VER])
1727 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1730 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
@@ -2104,9 +2107,23 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2104 struct __ip6_tnl_parm *p = &t->parms; 2107 struct __ip6_tnl_parm *p = &t->parms;
2105 __be16 o_flags = p->o_flags; 2108 __be16 o_flags = p->o_flags;
2106 2109
2107 if ((p->erspan_ver == 1 || p->erspan_ver == 2) && 2110 if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2108 !p->collect_md) 2111 if (!p->collect_md)
2109 o_flags |= TUNNEL_KEY; 2112 o_flags |= TUNNEL_KEY;
2113
2114 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2115 goto nla_put_failure;
2116
2117 if (p->erspan_ver == 1) {
2118 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2119 goto nla_put_failure;
2120 } else {
2121 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2122 goto nla_put_failure;
2123 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2124 goto nla_put_failure;
2125 }
2126 }
2110 2127
2111 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2128 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2112 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2129 nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -2121,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2121 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || 2138 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2122 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || 2139 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2123 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || 2140 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2124 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || 2141 nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2125 nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2126 goto nla_put_failure; 2142 goto nla_put_failure;
2127 2143
2128 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, 2144 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -2140,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2140 goto nla_put_failure; 2156 goto nla_put_failure;
2141 } 2157 }
2142 2158
2143 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2144 goto nla_put_failure;
2145
2146 if (p->erspan_ver == 1) {
2147 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2148 goto nla_put_failure;
2149 } else if (p->erspan_ver == 2) {
2150 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2151 goto nla_put_failure;
2152 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2153 goto nla_put_failure;
2154 }
2155
2156 return 0; 2159 return 0;
2157 2160
2158nla_put_failure: 2161nla_put_failure:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 964491cf3672..ce15dc4ccbfa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
1274static void rt6_remove_exception(struct rt6_exception_bucket *bucket, 1274static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1275 struct rt6_exception *rt6_ex) 1275 struct rt6_exception *rt6_ex)
1276{ 1276{
1277 struct fib6_info *from;
1277 struct net *net; 1278 struct net *net;
1278 1279
1279 if (!bucket || !rt6_ex) 1280 if (!bucket || !rt6_ex)
1280 return; 1281 return;
1281 1282
1282 net = dev_net(rt6_ex->rt6i->dst.dev); 1283 net = dev_net(rt6_ex->rt6i->dst.dev);
1284 net->ipv6.rt6_stats->fib_rt_cache--;
1285
1286 /* purge completely the exception to allow releasing the held resources:
1287 * some [sk] cache may keep the dst around for unlimited time
1288 */
1289 from = rcu_dereference_protected(rt6_ex->rt6i->from,
1290 lockdep_is_held(&rt6_exception_lock));
1291 rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
1292 fib6_info_release(from);
1293 dst_dev_put(&rt6_ex->rt6i->dst);
1294
1283 hlist_del_rcu(&rt6_ex->hlist); 1295 hlist_del_rcu(&rt6_ex->hlist);
1284 dst_release(&rt6_ex->rt6i->dst); 1296 dst_release(&rt6_ex->rt6i->dst);
1285 kfree_rcu(rt6_ex, rcu); 1297 kfree_rcu(rt6_ex, rcu);
1286 WARN_ON_ONCE(!bucket->depth); 1298 WARN_ON_ONCE(!bucket->depth);
1287 bucket->depth--; 1299 bucket->depth--;
1288 net->ipv6.rt6_stats->fib_rt_cache--;
1289} 1300}
1290 1301
1291/* Remove oldest rt6_ex in bucket and free the memory 1302/* Remove oldest rt6_ex in bucket and free the memory
@@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
1599static void rt6_update_exception_stamp_rt(struct rt6_info *rt) 1610static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1600{ 1611{
1601 struct rt6_exception_bucket *bucket; 1612 struct rt6_exception_bucket *bucket;
1602 struct fib6_info *from = rt->from;
1603 struct in6_addr *src_key = NULL; 1613 struct in6_addr *src_key = NULL;
1604 struct rt6_exception *rt6_ex; 1614 struct rt6_exception *rt6_ex;
1605 1615 struct fib6_info *from;
1606 if (!from ||
1607 !(rt->rt6i_flags & RTF_CACHE))
1608 return;
1609 1616
1610 rcu_read_lock(); 1617 rcu_read_lock();
1618 from = rcu_dereference(rt->from);
1619 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1620 goto unlock;
1621
1611 bucket = rcu_dereference(from->rt6i_exception_bucket); 1622 bucket = rcu_dereference(from->rt6i_exception_bucket);
1612 1623
1613#ifdef CONFIG_IPV6_SUBTREES 1624#ifdef CONFIG_IPV6_SUBTREES
@@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1626 if (rt6_ex) 1637 if (rt6_ex)
1627 rt6_ex->stamp = jiffies; 1638 rt6_ex->stamp = jiffies;
1628 1639
1640unlock:
1629 rcu_read_unlock(); 1641 rcu_read_unlock();
1630} 1642}
1631 1643
@@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net,
2742 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; 2754 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2743 const struct in6_addr *gw_addr = &cfg->fc_gateway; 2755 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2744 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT; 2756 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2757 struct fib6_info *from;
2745 struct rt6_info *grt; 2758 struct rt6_info *grt;
2746 int err; 2759 int err;
2747 2760
2748 err = 0; 2761 err = 0;
2749 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0); 2762 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2750 if (grt) { 2763 if (grt) {
2764 rcu_read_lock();
2765 from = rcu_dereference(grt->from);
2751 if (!grt->dst.error && 2766 if (!grt->dst.error &&
2752 /* ignore match if it is the default route */ 2767 /* ignore match if it is the default route */
2753 grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) && 2768 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
2754 (grt->rt6i_flags & flags || dev != grt->dst.dev)) { 2769 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2755 NL_SET_ERR_MSG(extack, 2770 NL_SET_ERR_MSG(extack,
2756 "Nexthop has invalid gateway or device mismatch"); 2771 "Nexthop has invalid gateway or device mismatch");
2757 err = -EINVAL; 2772 err = -EINVAL;
2758 } 2773 }
2774 rcu_read_unlock();
2759 2775
2760 ip6_rt_put(grt); 2776 ip6_rt_put(grt);
2761 } 2777 }
@@ -4649,7 +4665,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4649 table = rt->fib6_table->tb6_id; 4665 table = rt->fib6_table->tb6_id;
4650 else 4666 else
4651 table = RT6_TABLE_UNSPEC; 4667 table = RT6_TABLE_UNSPEC;
4652 rtm->rtm_table = table; 4668 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
4653 if (nla_put_u32(skb, RTA_TABLE, table)) 4669 if (nla_put_u32(skb, RTA_TABLE, table))
4654 goto nla_put_failure; 4670 goto nla_put_failure;
4655 4671
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 2596ffdeebea..b444483cdb2b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
288 int peeked, peeking, off; 288 int peeked, peeking, off;
289 int err; 289 int err;
290 int is_udplite = IS_UDPLITE(sk); 290 int is_udplite = IS_UDPLITE(sk);
291 struct udp_mib __percpu *mib;
291 bool checksum_valid = false; 292 bool checksum_valid = false;
292 struct udp_mib *mib;
293 int is_udp4; 293 int is_udp4;
294 294
295 if (flags & MSG_ERRQUEUE) 295 if (flags & MSG_ERRQUEUE)
@@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable);
420 */ 420 */
421static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, 421static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
422 struct inet6_skb_parm *opt, 422 struct inet6_skb_parm *opt,
423 u8 type, u8 code, int offset, u32 info) 423 u8 type, u8 code, int offset, __be32 info)
424{ 424{
425 int i; 425 int i;
426 426
427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { 427 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 428 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
429 u8 type, u8 code, int offset, u32 info); 429 u8 type, u8 code, int offset, __be32 info);
430 const struct ip6_tnl_encap_ops *encap;
430 431
431 if (!ip6tun_encaps[i]) 432 encap = rcu_dereference(ip6tun_encaps[i]);
433 if (!encap)
432 continue; 434 continue;
433 handler = rcu_dereference(ip6tun_encaps[i]->err_handler); 435 handler = encap->err_handler;
434 if (handler && !handler(skb, opt, type, code, offset, info)) 436 if (handler && !handler(skb, opt, type, code, offset, info))
435 return 0; 437 return 0;
436 } 438 }
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index f5b4febeaa25..bc65db782bfb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
344 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 344 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
345 unsigned int i; 345 unsigned int i;
346 346
347 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
348 xfrm_flush_gc(); 347 xfrm_flush_gc();
348 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
349 349
350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) 350 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); 351 WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 655c787f9d54..5651c29cb5bd 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
196 return 0; 196 return 0;
197} 197}
198 198
199static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 199static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
200 gfp_t allocation, struct sock *sk) 200 struct sock *sk)
201{ 201{
202 int err = -ENOBUFS; 202 int err = -ENOBUFS;
203 203
204 sock_hold(sk); 204 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
205 if (*skb2 == NULL) { 205 return err;
206 if (refcount_read(&skb->users) != 1) { 206
207 *skb2 = skb_clone(skb, allocation); 207 skb = skb_clone(skb, allocation);
208 } else { 208
209 *skb2 = skb; 209 if (skb) {
210 refcount_inc(&skb->users); 210 skb_set_owner_r(skb, sk);
211 } 211 skb_queue_tail(&sk->sk_receive_queue, skb);
212 } 212 sk->sk_data_ready(sk);
213 if (*skb2 != NULL) { 213 err = 0;
214 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
215 skb_set_owner_r(*skb2, sk);
216 skb_queue_tail(&sk->sk_receive_queue, *skb2);
217 sk->sk_data_ready(sk);
218 *skb2 = NULL;
219 err = 0;
220 }
221 } 214 }
222 sock_put(sk);
223 return err; 215 return err;
224} 216}
225 217
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
234{ 226{
235 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 227 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
236 struct sock *sk; 228 struct sock *sk;
237 struct sk_buff *skb2 = NULL;
238 int err = -ESRCH; 229 int err = -ESRCH;
239 230
240 /* XXX Do we need something like netlink_overrun? I think 231 /* XXX Do we need something like netlink_overrun? I think
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
253 * socket. 244 * socket.
254 */ 245 */
255 if (pfk->promisc) 246 if (pfk->promisc)
256 pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 247 pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
257 248
258 /* the exact target will be processed later */ 249 /* the exact target will be processed later */
259 if (sk == one_sk) 250 if (sk == one_sk)
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
268 continue; 259 continue;
269 } 260 }
270 261
271 err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); 262 err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
272 263
273 /* Error is cleared after successful sending to at least one 264 /* Error is cleared after successful sending to at least one
274 * registered KM */ 265 * registered KM */
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
278 rcu_read_unlock(); 269 rcu_read_unlock();
279 270
280 if (one_sk != NULL) 271 if (one_sk != NULL)
281 err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); 272 err = pfkey_broadcast_one(skb, allocation, one_sk);
282 273
283 kfree_skb(skb2);
284 kfree_skb(skb); 274 kfree_skb(skb);
285 return err; 275 return err;
286} 276}
@@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
1783 if (proto == 0) 1773 if (proto == 0)
1784 return -EINVAL; 1774 return -EINVAL;
1785 1775
1786 err = xfrm_state_flush(net, proto, true); 1776 err = xfrm_state_flush(net, proto, true, false);
1787 err2 = unicast_flush_resp(sk, hdr); 1777 err2 = unicast_flush_resp(sk, hdr);
1788 if (err || err2) { 1778 if (err || err2) {
1789 if (err == -ESRCH) /* empty table - go quietly */ 1779 if (err == -ESRCH) /* empty table - go quietly */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 87a729926734..977dea436ee8 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
615 * We need a bit of data queued to build aggregates properly, so 615 * We need a bit of data queued to build aggregates properly, so
616 * instruct the TCP stack to allow more than a single ms of data 616 * instruct the TCP stack to allow more than a single ms of data
617 * to be queued in the stack. The value is a bit-shift of 1 617 * to be queued in the stack. The value is a bit-shift of 1
618 * second, so 8 is ~4ms of queued data. Only affects local TCP 618 * second, so 7 is ~8ms of queued data. Only affects local TCP
619 * sockets. 619 * sockets.
620 * This is the default, anyhow - drivers may need to override it 620 * This is the default, anyhow - drivers may need to override it
621 * for local reasons (longer buffers, longer completion time, or 621 * for local reasons (longer buffers, longer completion time, or
622 * similar). 622 * similar).
623 */ 623 */
624 local->hw.tx_sk_pacing_shift = 8; 624 local->hw.tx_sk_pacing_shift = 7;
625 625
626 /* set up some defaults */ 626 /* set up some defaults */
627 local->hw.queues = 1; 627 local->hw.queues = 1;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bb4d71efb6fb..c2a6da5d80da 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2644 struct ieee80211_sub_if_data *sdata = rx->sdata; 2644 struct ieee80211_sub_if_data *sdata = rx->sdata;
2645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2645 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2646 u16 ac, q, hdrlen; 2646 u16 ac, q, hdrlen;
2647 int tailroom = 0;
2647 2648
2648 hdr = (struct ieee80211_hdr *) skb->data; 2649 hdr = (struct ieee80211_hdr *) skb->data;
2649 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2650 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2732,8 +2733,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2732 if (!ifmsh->mshcfg.dot11MeshForwarding) 2733 if (!ifmsh->mshcfg.dot11MeshForwarding)
2733 goto out; 2734 goto out;
2734 2735
2736 if (sdata->crypto_tx_tailroom_needed_cnt)
2737 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2738
2735 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2739 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2736 sdata->encrypt_headroom, 0, GFP_ATOMIC); 2740 sdata->encrypt_headroom,
2741 tailroom, GFP_ATOMIC);
2737 if (!fwd_skb) 2742 if (!fwd_skb)
2738 goto out; 2743 goto out;
2739 2744
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9fc76b19cd3c..db3473540303 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
132 ph->utid = 0; 132 ph->utid = 0;
133 ph->message_id = id; 133 ph->message_id = id;
134 ph->pipe_handle = pn->pipe_handle; 134 ph->pipe_handle = pn->pipe_handle;
135 ph->data[0] = code; 135 ph->error_code = code;
136 return pn_skb_send(sk, skb, NULL); 136 return pn_skb_send(sk, skb, NULL);
137} 137}
138 138
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
153 ph->utid = id; /* whatever */ 153 ph->utid = id; /* whatever */
154 ph->message_id = id; 154 ph->message_id = id;
155 ph->pipe_handle = pn->pipe_handle; 155 ph->pipe_handle = pn->pipe_handle;
156 ph->data[0] = code; 156 ph->error_code = code;
157 return pn_skb_send(sk, skb, NULL); 157 return pn_skb_send(sk, skb, NULL);
158} 158}
159 159
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
208 struct pnpipehdr *ph; 208 struct pnpipehdr *ph;
209 struct sockaddr_pn dst; 209 struct sockaddr_pn dst;
210 u8 data[4] = { 210 u8 data[4] = {
211 oph->data[0], /* PEP type */ 211 oph->pep_type, /* PEP type */
212 code, /* error code, at an unusual offset */ 212 code, /* error code, at an unusual offset */
213 PAD, PAD, 213 PAD, PAD,
214 }; 214 };
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
221 ph->utid = oph->utid; 221 ph->utid = oph->utid;
222 ph->message_id = PNS_PEP_CTRL_RESP; 222 ph->message_id = PNS_PEP_CTRL_RESP;
223 ph->pipe_handle = oph->pipe_handle; 223 ph->pipe_handle = oph->pipe_handle;
224 ph->data[0] = oph->data[1]; /* CTRL id */ 224 ph->data0 = oph->data[0]; /* CTRL id */
225 225
226 pn_skb_get_src_sockaddr(oskb, &dst); 226 pn_skb_get_src_sockaddr(oskb, &dst);
227 return pn_skb_send(sk, skb, &dst); 227 return pn_skb_send(sk, skb, &dst);
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
272 return -EINVAL; 272 return -EINVAL;
273 273
274 hdr = pnp_hdr(skb); 274 hdr = pnp_hdr(skb);
275 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 275 if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
276 net_dbg_ratelimited("Phonet unknown PEP type: %u\n", 276 net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
277 (unsigned int)hdr->data[0]); 277 (unsigned int)hdr->pep_type);
278 return -EOPNOTSUPP; 278 return -EOPNOTSUPP;
279 } 279 }
280 280
281 switch (hdr->data[1]) { 281 switch (hdr->data[0]) {
282 case PN_PEP_IND_FLOW_CONTROL: 282 case PN_PEP_IND_FLOW_CONTROL:
283 switch (pn->tx_fc) { 283 switch (pn->tx_fc) {
284 case PN_LEGACY_FLOW_CONTROL: 284 case PN_LEGACY_FLOW_CONTROL:
285 switch (hdr->data[4]) { 285 switch (hdr->data[3]) {
286 case PEP_IND_BUSY: 286 case PEP_IND_BUSY:
287 atomic_set(&pn->tx_credits, 0); 287 atomic_set(&pn->tx_credits, 0);
288 break; 288 break;
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
292 } 292 }
293 break; 293 break;
294 case PN_ONE_CREDIT_FLOW_CONTROL: 294 case PN_ONE_CREDIT_FLOW_CONTROL:
295 if (hdr->data[4] == PEP_IND_READY) 295 if (hdr->data[3] == PEP_IND_READY)
296 atomic_set(&pn->tx_credits, wake = 1); 296 atomic_set(&pn->tx_credits, wake = 1);
297 break; 297 break;
298 } 298 }
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
301 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: 301 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
302 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) 302 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
303 break; 303 break;
304 atomic_add(wake = hdr->data[4], &pn->tx_credits); 304 atomic_add(wake = hdr->data[3], &pn->tx_credits);
305 break; 305 break;
306 306
307 default: 307 default:
308 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", 308 net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
309 (unsigned int)hdr->data[1]); 309 (unsigned int)hdr->data[0]);
310 return -EOPNOTSUPP; 310 return -EOPNOTSUPP;
311 } 311 }
312 if (wake) 312 if (wake)
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
318{ 318{
319 struct pep_sock *pn = pep_sk(sk); 319 struct pep_sock *pn = pep_sk(sk);
320 struct pnpipehdr *hdr = pnp_hdr(skb); 320 struct pnpipehdr *hdr = pnp_hdr(skb);
321 u8 n_sb = hdr->data[0]; 321 u8 n_sb = hdr->data0;
322 322
323 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; 323 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
324 __skb_pull(skb, sizeof(*hdr)); 324 __skb_pull(skb, sizeof(*hdr));
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
506 return -ECONNREFUSED; 506 return -ECONNREFUSED;
507 507
508 /* Parse sub-blocks */ 508 /* Parse sub-blocks */
509 n_sb = hdr->data[4]; 509 n_sb = hdr->data[3];
510 while (n_sb > 0) { 510 while (n_sb > 0) {
511 u8 type, buf[6], len = sizeof(buf); 511 u8 type, buf[6], len = sizeof(buf);
512 const u8 *data = pep_get_sb(skb, &type, &len, buf); 512 const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
739 ph->utid = 0; 739 ph->utid = 0;
740 ph->message_id = PNS_PIPE_REMOVE_REQ; 740 ph->message_id = PNS_PIPE_REMOVE_REQ;
741 ph->pipe_handle = pn->pipe_handle; 741 ph->pipe_handle = pn->pipe_handle;
742 ph->data[0] = PAD; 742 ph->data0 = PAD;
743 return pn_skb_send(sk, skb, NULL); 743 return pn_skb_send(sk, skb, NULL);
744} 744}
745 745
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
817 peer_type = hdr->other_pep_type << 8; 817 peer_type = hdr->other_pep_type << 8;
818 818
819 /* Parse sub-blocks (options) */ 819 /* Parse sub-blocks (options) */
820 n_sb = hdr->data[4]; 820 n_sb = hdr->data[3];
821 while (n_sb > 0) { 821 while (n_sb > 0) {
822 u8 type, buf[1], len = sizeof(buf); 822 u8 type, buf[1], len = sizeof(buf);
823 const u8 *data = pep_get_sb(skb, &type, &len, buf); 823 const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1109 ph->utid = 0; 1109 ph->utid = 0;
1110 if (pn->aligned) { 1110 if (pn->aligned) {
1111 ph->message_id = PNS_PIPE_ALIGNED_DATA; 1111 ph->message_id = PNS_PIPE_ALIGNED_DATA;
1112 ph->data[0] = 0; /* padding */ 1112 ph->data0 = 0; /* padding */
1113 } else 1113 } else
1114 ph->message_id = PNS_PIPE_DATA; 1114 ph->message_id = PNS_PIPE_DATA;
1115 ph->pipe_handle = pn->pipe_handle; 1115 ph->pipe_handle = pn->pipe_handle;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 033696e6f74f..ad158d311ffa 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
207 207
208 /* When a data chunk is sent, reset the heartbeat interval. */ 208 /* When a data chunk is sent, reset the heartbeat interval. */
209 expires = jiffies + sctp_transport_timeout(transport); 209 expires = jiffies + sctp_transport_timeout(transport);
210 if (time_before(transport->hb_timer.expires, expires) && 210 if ((time_before(transport->hb_timer.expires, expires) ||
211 !timer_pending(&transport->hb_timer)) &&
211 !mod_timer(&transport->hb_timer, 212 !mod_timer(&transport->hb_timer,
212 expires + prandom_u32_max(transport->rto))) 213 expires + prandom_u32_max(transport->rto)))
213 sctp_transport_hold(transport); 214 sctp_transport_hold(transport);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5721416d0605..adbdf195eb08 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */
113} __aligned(8); 113} __aligned(8);
114 114
115enum smc_urg_state { 115enum smc_urg_state {
116 SMC_URG_VALID, /* data present */ 116 SMC_URG_VALID = 1, /* data present */
117 SMC_URG_NOTYET, /* data pending */ 117 SMC_URG_NOTYET = 2, /* data pending */
118 SMC_URG_READ /* data was already read */ 118 SMC_URG_READ = 3, /* data was already read */
119}; 119};
120 120
121struct smc_connection { 121struct smc_connection {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1217c90a363b..684f2125fc6b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -388,7 +388,7 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
388 rc_ = tipc_sk_sock_err((sock_), timeo_); \ 388 rc_ = tipc_sk_sock_err((sock_), timeo_); \
389 if (rc_) \ 389 if (rc_) \
390 break; \ 390 break; \
391 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ 391 add_wait_queue(sk_sleep(sk_), &wait_); \
392 release_sock(sk_); \ 392 release_sock(sk_); \
393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ 393 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
394 sched_annotate_sleep(); \ 394 sched_annotate_sleep(); \
@@ -1677,7 +1677,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
1677static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1677static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1678{ 1678{
1679 struct sock *sk = sock->sk; 1679 struct sock *sk = sock->sk;
1680 DEFINE_WAIT(wait); 1680 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1681 long timeo = *timeop; 1681 long timeo = *timeop;
1682 int err = sock_error(sk); 1682 int err = sock_error(sk);
1683 1683
@@ -1685,15 +1685,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1685 return err; 1685 return err;
1686 1686
1687 for (;;) { 1687 for (;;) {
1688 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1689 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { 1688 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1690 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1689 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1691 err = -ENOTCONN; 1690 err = -ENOTCONN;
1692 break; 1691 break;
1693 } 1692 }
1693 add_wait_queue(sk_sleep(sk), &wait);
1694 release_sock(sk); 1694 release_sock(sk);
1695 timeo = schedule_timeout(timeo); 1695 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1696 sched_annotate_sleep();
1696 lock_sock(sk); 1697 lock_sock(sk);
1698 remove_wait_queue(sk_sleep(sk), &wait);
1697 } 1699 }
1698 err = 0; 1700 err = 0;
1699 if (!skb_queue_empty(&sk->sk_receive_queue)) 1701 if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -1709,7 +1711,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1709 if (err) 1711 if (err)
1710 break; 1712 break;
1711 } 1713 }
1712 finish_wait(sk_sleep(sk), &wait);
1713 *timeop = timeo; 1714 *timeop = timeo;
1714 return err; 1715 return err;
1715} 1716}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 74d1eed7cbd4..a95d479caeea 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -890,7 +890,7 @@ retry:
890 addr->hash ^= sk->sk_type; 890 addr->hash ^= sk->sk_type;
891 891
892 __unix_remove_socket(sk); 892 __unix_remove_socket(sk);
893 u->addr = addr; 893 smp_store_release(&u->addr, addr);
894 __unix_insert_socket(&unix_socket_table[addr->hash], sk); 894 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
895 spin_unlock(&unix_table_lock); 895 spin_unlock(&unix_table_lock);
896 err = 0; 896 err = 0;
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1060 1060
1061 err = 0; 1061 err = 0;
1062 __unix_remove_socket(sk); 1062 __unix_remove_socket(sk);
1063 u->addr = addr; 1063 smp_store_release(&u->addr, addr);
1064 __unix_insert_socket(list, sk); 1064 __unix_insert_socket(list, sk);
1065 1065
1066out_unlock: 1066out_unlock:
@@ -1331,15 +1331,29 @@ restart:
1331 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); 1331 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1332 otheru = unix_sk(other); 1332 otheru = unix_sk(other);
1333 1333
1334 /* copy address information from listening to new sock*/ 1334 /* copy address information from listening to new sock
1335 if (otheru->addr) { 1335 *
1336 refcount_inc(&otheru->addr->refcnt); 1336 * The contents of *(otheru->addr) and otheru->path
1337 newu->addr = otheru->addr; 1337 * are seen fully set up here, since we have found
1338 } 1338 * otheru in hash under unix_table_lock. Insertion
1339 * into the hash chain we'd found it in had been done
1340 * in an earlier critical area protected by unix_table_lock,
1341 * the same one where we'd set *(otheru->addr) contents,
1342 * as well as otheru->path and otheru->addr itself.
1343 *
1344 * Using smp_store_release() here to set newu->addr
1345 * is enough to make those stores, as well as stores
1346 * to newu->path visible to anyone who gets newu->addr
1347 * by smp_load_acquire(). IOW, the same warranties
1348 * as for unix_sock instances bound in unix_bind() or
1349 * in unix_autobind().
1350 */
1339 if (otheru->path.dentry) { 1351 if (otheru->path.dentry) {
1340 path_get(&otheru->path); 1352 path_get(&otheru->path);
1341 newu->path = otheru->path; 1353 newu->path = otheru->path;
1342 } 1354 }
1355 refcount_inc(&otheru->addr->refcnt);
1356 smp_store_release(&newu->addr, otheru->addr);
1343 1357
1344 /* Set credentials */ 1358 /* Set credentials */
1345 copy_peercred(sk, other); 1359 copy_peercred(sk, other);
@@ -1453,7 +1467,7 @@ out:
1453static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) 1467static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1454{ 1468{
1455 struct sock *sk = sock->sk; 1469 struct sock *sk = sock->sk;
1456 struct unix_sock *u; 1470 struct unix_address *addr;
1457 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); 1471 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1458 int err = 0; 1472 int err = 0;
1459 1473
@@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1468 sock_hold(sk); 1482 sock_hold(sk);
1469 } 1483 }
1470 1484
1471 u = unix_sk(sk); 1485 addr = smp_load_acquire(&unix_sk(sk)->addr);
1472 unix_state_lock(sk); 1486 if (!addr) {
1473 if (!u->addr) {
1474 sunaddr->sun_family = AF_UNIX; 1487 sunaddr->sun_family = AF_UNIX;
1475 sunaddr->sun_path[0] = 0; 1488 sunaddr->sun_path[0] = 0;
1476 err = sizeof(short); 1489 err = sizeof(short);
1477 } else { 1490 } else {
1478 struct unix_address *addr = u->addr;
1479
1480 err = addr->len; 1491 err = addr->len;
1481 memcpy(sunaddr, addr->name, addr->len); 1492 memcpy(sunaddr, addr->name, addr->len);
1482 } 1493 }
1483 unix_state_unlock(sk);
1484 sock_put(sk); 1494 sock_put(sk);
1485out: 1495out:
1486 return err; 1496 return err;
@@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2073 2083
2074static void unix_copy_addr(struct msghdr *msg, struct sock *sk) 2084static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2075{ 2085{
2076 struct unix_sock *u = unix_sk(sk); 2086 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2077 2087
2078 if (u->addr) { 2088 if (addr) {
2079 msg->msg_namelen = u->addr->len; 2089 msg->msg_namelen = addr->len;
2080 memcpy(msg->msg_name, u->addr->name, u->addr->len); 2090 memcpy(msg->msg_name, addr->name, addr->len);
2081 } 2091 }
2082} 2092}
2083 2093
@@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk)
2581 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2591 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2582 return -EPERM; 2592 return -EPERM;
2583 2593
2584 unix_state_lock(sk); 2594 if (!smp_load_acquire(&unix_sk(sk)->addr))
2595 return -ENOENT;
2596
2585 path = unix_sk(sk)->path; 2597 path = unix_sk(sk)->path;
2586 if (!path.dentry) { 2598 if (!path.dentry)
2587 unix_state_unlock(sk);
2588 return -ENOENT; 2599 return -ENOENT;
2589 }
2590 2600
2591 path_get(&path); 2601 path_get(&path);
2592 unix_state_unlock(sk);
2593 2602
2594 fd = get_unused_fd_flags(O_CLOEXEC); 2603 fd = get_unused_fd_flags(O_CLOEXEC);
2595 if (fd < 0) 2604 if (fd < 0)
@@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2830 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), 2839 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2831 sock_i_ino(s)); 2840 sock_i_ino(s));
2832 2841
2833 if (u->addr) { 2842 if (u->addr) { // under unix_table_lock here
2834 int i, len; 2843 int i, len;
2835 seq_putc(seq, ' '); 2844 seq_putc(seq, ' ');
2836 2845
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e83462..3183d9b8ab33 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
10 10
11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) 11static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
12{ 12{
13 struct unix_address *addr = unix_sk(sk)->addr; 13 /* might or might not have unix_table_lock */
14 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
14 15
15 if (!addr) 16 if (!addr)
16 return 0; 17 return 0;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index ec3a828672ef..eff31348e20b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -679,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
679 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 679 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
680 int len, i, rc = 0; 680 int len, i, rc = 0;
681 681
682 if (!sock_flag(sk, SOCK_ZAPPED) || 682 if (addr_len != sizeof(struct sockaddr_x25) ||
683 addr_len != sizeof(struct sockaddr_x25) ||
684 addr->sx25_family != AF_X25) { 683 addr->sx25_family != AF_X25) {
685 rc = -EINVAL; 684 rc = -EINVAL;
686 goto out; 685 goto out;
@@ -699,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
699 } 698 }
700 699
701 lock_sock(sk); 700 lock_sock(sk);
702 x25_sk(sk)->source_addr = addr->sx25_addr; 701 if (sock_flag(sk, SOCK_ZAPPED)) {
703 x25_insert_socket(sk); 702 x25_sk(sk)->source_addr = addr->sx25_addr;
704 sock_reset_flag(sk, SOCK_ZAPPED); 703 x25_insert_socket(sk);
704 sock_reset_flag(sk, SOCK_ZAPPED);
705 } else {
706 rc = -EINVAL;
707 }
705 release_sock(sk); 708 release_sock(sk);
706 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); 709 SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
707out: 710out:
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 45f3b528dc09..85e4fe4f18cc 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock)
366 366
367 xskq_destroy(xs->rx); 367 xskq_destroy(xs->rx);
368 xskq_destroy(xs->tx); 368 xskq_destroy(xs->tx);
369 xdp_put_umem(xs->umem);
370 369
371 sock_orphan(sk); 370 sock_orphan(sk);
372 sock->sk = NULL; 371 sock->sk = NULL;
@@ -718,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = {
718 .sendpage = sock_no_sendpage, 717 .sendpage = sock_no_sendpage,
719}; 718};
720 719
720static void xsk_destruct(struct sock *sk)
721{
722 struct xdp_sock *xs = xdp_sk(sk);
723
724 if (!sock_flag(sk, SOCK_DEAD))
725 return;
726
727 xdp_put_umem(xs->umem);
728
729 sk_refcnt_debug_dec(sk);
730}
731
721static int xsk_create(struct net *net, struct socket *sock, int protocol, 732static int xsk_create(struct net *net, struct socket *sock, int protocol,
722 int kern) 733 int kern)
723{ 734{
@@ -744,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
744 755
745 sk->sk_family = PF_XDP; 756 sk->sk_family = PF_XDP;
746 757
758 sk->sk_destruct = xsk_destruct;
759 sk_refcnt_debug_inc(sk);
760
747 sock_set_flag(sk, SOCK_RCU_FREE); 761 sock_set_flag(sk, SOCK_RCU_FREE);
748 762
749 xs = xdp_sk(sk); 763 xs = xdp_sk(sk);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 6be8c7df15bb..dbb3c1945b5c 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
76 int ifindex; 76 int ifindex;
77 struct xfrm_if *xi; 77 struct xfrm_if *xi;
78 78
79 if (!skb->dev) 79 if (!secpath_exists(skb) || !skb->dev)
80 return NULL; 80 return NULL;
81 81
82 xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id); 82 xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
83 ifindex = skb->dev->ifindex; 83 ifindex = skb->dev->ifindex;
84 84
85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { 85 for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ba0a4048c846..8d1a898d0ba5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3314,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3314 3314
3315 if (ifcb) { 3315 if (ifcb) {
3316 xi = ifcb->decode_session(skb); 3316 xi = ifcb->decode_session(skb);
3317 if (xi) 3317 if (xi) {
3318 if_id = xi->p.if_id; 3318 if_id = xi->p.if_id;
3319 net = xi->net;
3320 }
3319 } 3321 }
3320 rcu_read_unlock(); 3322 rcu_read_unlock();
3321 3323
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 23c92891758a..1bb971f46fc6 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
432} 432}
433EXPORT_SYMBOL(xfrm_state_free); 433EXPORT_SYMBOL(xfrm_state_free);
434 434
435static void xfrm_state_gc_destroy(struct xfrm_state *x) 435static void ___xfrm_state_destroy(struct xfrm_state *x)
436{ 436{
437 tasklet_hrtimer_cancel(&x->mtimer); 437 tasklet_hrtimer_cancel(&x->mtimer);
438 del_timer_sync(&x->rtimer); 438 del_timer_sync(&x->rtimer);
@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
474 synchronize_rcu(); 474 synchronize_rcu();
475 475
476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 476 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
477 xfrm_state_gc_destroy(x); 477 ___xfrm_state_destroy(x);
478} 478}
479 479
480static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) 480static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
598} 598}
599EXPORT_SYMBOL(xfrm_state_alloc); 599EXPORT_SYMBOL(xfrm_state_alloc);
600 600
601void __xfrm_state_destroy(struct xfrm_state *x) 601void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
602{ 602{
603 WARN_ON(x->km.state != XFRM_STATE_DEAD); 603 WARN_ON(x->km.state != XFRM_STATE_DEAD);
604 604
605 spin_lock_bh(&xfrm_state_gc_lock); 605 if (sync) {
606 hlist_add_head(&x->gclist, &xfrm_state_gc_list); 606 synchronize_rcu();
607 spin_unlock_bh(&xfrm_state_gc_lock); 607 ___xfrm_state_destroy(x);
608 schedule_work(&xfrm_state_gc_work); 608 } else {
609 spin_lock_bh(&xfrm_state_gc_lock);
610 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
611 spin_unlock_bh(&xfrm_state_gc_lock);
612 schedule_work(&xfrm_state_gc_work);
613 }
609} 614}
610EXPORT_SYMBOL(__xfrm_state_destroy); 615EXPORT_SYMBOL(__xfrm_state_destroy);
611 616
@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
708} 713}
709#endif 714#endif
710 715
711int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) 716int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
712{ 717{
713 int i, err = 0, cnt = 0; 718 int i, err = 0, cnt = 0;
714 719
@@ -730,7 +735,10 @@ restart:
730 err = xfrm_state_delete(x); 735 err = xfrm_state_delete(x);
731 xfrm_audit_state_delete(x, err ? 0 : 1, 736 xfrm_audit_state_delete(x, err ? 0 : 1,
732 task_valid); 737 task_valid);
733 xfrm_state_put(x); 738 if (sync)
739 xfrm_state_put_sync(x);
740 else
741 xfrm_state_put(x);
734 if (!err) 742 if (!err)
735 cnt++; 743 cnt++;
736 744
@@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
2215 if (atomic_read(&t->tunnel_users) == 2) 2223 if (atomic_read(&t->tunnel_users) == 2)
2216 xfrm_state_delete(t); 2224 xfrm_state_delete(t);
2217 atomic_dec(&t->tunnel_users); 2225 atomic_dec(&t->tunnel_users);
2218 xfrm_state_put(t); 2226 xfrm_state_put_sync(t);
2219 x->tunnel = NULL; 2227 x->tunnel = NULL;
2220 } 2228 }
2221} 2229}
@@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net)
2375 unsigned int sz; 2383 unsigned int sz;
2376 2384
2377 flush_work(&net->xfrm.state_hash_work); 2385 flush_work(&net->xfrm.state_hash_work);
2378 xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
2379 flush_work(&xfrm_state_gc_work); 2386 flush_work(&xfrm_state_gc_work);
2387 xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
2380 2388
2381 WARN_ON(!list_empty(&net->xfrm.state_all)); 2389 WARN_ON(!list_empty(&net->xfrm.state_all));
2382 2390
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c6d26afcf89d..a131f9ff979e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1932 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1932 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1933 int err; 1933 int err;
1934 1934
1935 err = xfrm_state_flush(net, p->proto, true); 1935 err = xfrm_state_flush(net, p->proto, true, false);
1936 if (err) { 1936 if (err) {
1937 if (err == -ESRCH) /* empty table */ 1937 if (err == -ESRCH) /* empty table */
1938 return 0; 1938 return 0;