aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-05-21 11:37:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-05-21 11:37:48 -0400
commit5aef268ace7436d8a25d41ee3837ecadb0115917 (patch)
treeefa69470ccffa4f7655102b57118ab81de788be5 /net
parent771c577c23bac90597c685971d7297ea00f99d11 (diff)
parentb80d0b93b991e551a32157e0d9d38fc5bc9348a7 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix refcounting bug for connections in on-packet scheduling mode of IPVS, from Julian Anastasov. 2) Set network header properly in AF_PACKET's packet_snd, from Willem de Bruijn. 3) Fix regressions in 3c59x by converting to generic DMA API. It was relying upon the hack that the PCI DMA interfaces would accept NULL for EISA devices. From Christoph Hellwig. 4) Remove RDMA devices before unregistering netdev in QEDE driver, from Michal Kalderon. 5) Use after free in TUN driver ptr_ring usage, from Jason Wang. 6) Properly check for missing netlink attributes in SMC_PNETID requests, from Eric Biggers. 7) Set DMA mask before performaing any DMA operations in vmxnet3 driver, from Regis Duchesne. 8) Fix mlx5 build with SMP=n, from Saeed Mahameed. 9) Classifier fixes in bcm_sf2 driver from Florian Fainelli. 10) Tuntap use after free during release, from Jason Wang. 11) Don't use stack memory in scatterlists in tls code, from Matt Mullins. 12) Not fully initialized flow key object in ipv4 routing code, from David Ahern. 13) Various packet headroom bug fixes in ip6_gre driver, from Petr Machata. 14) Remove queues from XPS maps using correct index, from Amritha Nambiar. 15) Fix use after free in sock_diag, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (64 commits) net: ip6_gre: fix tunnel metadata device sharing. cxgb4: fix offset in collecting TX rate limit info net: sched: red: avoid hashing NULL child sock_diag: fix use-after-free read in __sk_free sh_eth: Change platform check to CONFIG_ARCH_RENESAS net: dsa: Do not register devlink for unused ports net: Fix a bug in removing queues from XPS map bpf: fix truncated jump targets on heavy expansions bpf: parse and verdict prog attach may race with bpf map update bpf: sockmap update rollback on error can incorrectly dec prog refcnt net: test tailroom before appending to linear skb net: ip6_gre: Fix ip6erspan hlen calculation net: ip6_gre: Split up ip6gre_changelink() net: ip6_gre: Split up ip6gre_newlink() net: ip6_gre: Split up ip6gre_tnl_change() net: ip6_gre: Split up ip6gre_tnl_link_config() net: ip6_gre: Fix headroom request in ip6erspan_tunnel_xmit() net: ip6_gre: Request headroom in __gre6_xmit() selftests/bpf: check return value of fopen in test_verifier.c erspan: fix invalid erspan version. ...
Diffstat (limited to 'net')
-rw-r--r--net/bridge/netfilter/ebt_stp.c4
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/filter.c11
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dsa/dsa2.c9
-rw-r--r--net/ipv4/fib_frontend.c8
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c2
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv6/ip6_gre.c286
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/netfilter/core.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c11
-rw-r--r--net/netfilter/nf_tables_api.c77
-rw-r--r--net/netfilter/nf_tables_core.c21
-rw-r--r--net/netfilter/nfnetlink_acct.c2
-rw-r--r--net/netfilter/nfnetlink_cthelper.c7
-rw-r--r--net/netfilter/nft_compat.c201
-rw-r--r--net/netfilter/nft_immediate.c15
-rw-r--r--net/netfilter/x_tables.c6
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/sch_red.c5
-rw-r--r--net/sched/sch_tbf.c5
-rw-r--r--net/smc/smc_pnet.c71
-rw-r--r--net/tls/tls_sw.c9
32 files changed, 615 insertions, 203 deletions
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 47ba98db145d..46c1fe7637ea 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -161,8 +161,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
161 /* Make sure the match only receives stp frames */ 161 /* Make sure the match only receives stp frames */
162 if (!par->nft_compat && 162 if (!par->nft_compat &&
163 (!ether_addr_equal(e->destmac, eth_stp_addr) || 163 (!ether_addr_equal(e->destmac, eth_stp_addr) ||
164 !is_broadcast_ether_addr(e->destmsk) || 164 !(e->bitmask & EBT_DESTMAC) ||
165 !(e->bitmask & EBT_DESTMAC))) 165 !is_broadcast_ether_addr(e->destmsk)))
166 return -EINVAL; 166 return -EINVAL;
167 167
168 return 0; 168 return 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index af0558b00c6c..2af787e8b130 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2124,7 +2124,7 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
2124 int i, j; 2124 int i, j;
2125 2125
2126 for (i = count, j = offset; i--; j++) { 2126 for (i = count, j = offset; i--; j++) {
2127 if (!remove_xps_queue(dev_maps, cpu, j)) 2127 if (!remove_xps_queue(dev_maps, tci, j))
2128 break; 2128 break;
2129 } 2129 }
2130 2130
diff --git a/net/core/filter.c b/net/core/filter.c
index e77c30ca491d..201ff36b17a8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -481,11 +481,18 @@ do_pass:
481 481
482#define BPF_EMIT_JMP \ 482#define BPF_EMIT_JMP \
483 do { \ 483 do { \
484 const s32 off_min = S16_MIN, off_max = S16_MAX; \
485 s32 off; \
486 \
484 if (target >= len || target < 0) \ 487 if (target >= len || target < 0) \
485 goto err; \ 488 goto err; \
486 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ 489 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
487 /* Adjust pc relative offset for 2nd or 3rd insn. */ \ 490 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
488 insn->off -= insn - tmp_insns; \ 491 off -= insn - tmp_insns; \
492 /* Reject anything not fitting into insn->off. */ \
493 if (off < off_min || off > off_max) \
494 goto err; \
495 insn->off = off; \
489 } while (0) 496 } while (0)
490 497
491 case BPF_JMP | BPF_JA: 498 case BPF_JMP | BPF_JA:
diff --git a/net/core/sock.c b/net/core/sock.c
index 6444525f610c..3b6d02854e57 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1606,7 +1606,7 @@ static void __sk_free(struct sock *sk)
1606 if (likely(sk->sk_net_refcnt)) 1606 if (likely(sk->sk_net_refcnt))
1607 sock_inuse_add(sock_net(sk), -1); 1607 sock_inuse_add(sock_net(sk), -1);
1608 1608
1609 if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) 1609 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1610 sock_diag_broadcast_destroy(sk); 1610 sock_diag_broadcast_destroy(sk);
1611 else 1611 else
1612 sk_destruct(sk); 1612 sk_destruct(sk);
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index adf50fbc4c13..47725250b4ca 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -258,11 +258,13 @@ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
258static int dsa_port_setup(struct dsa_port *dp) 258static int dsa_port_setup(struct dsa_port *dp)
259{ 259{
260 struct dsa_switch *ds = dp->ds; 260 struct dsa_switch *ds = dp->ds;
261 int err; 261 int err = 0;
262 262
263 memset(&dp->devlink_port, 0, sizeof(dp->devlink_port)); 263 memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
264 264
265 err = devlink_port_register(ds->devlink, &dp->devlink_port, dp->index); 265 if (dp->type != DSA_PORT_TYPE_UNUSED)
266 err = devlink_port_register(ds->devlink, &dp->devlink_port,
267 dp->index);
266 if (err) 268 if (err)
267 return err; 269 return err;
268 270
@@ -293,7 +295,8 @@ static int dsa_port_setup(struct dsa_port *dp)
293 295
294static void dsa_port_teardown(struct dsa_port *dp) 296static void dsa_port_teardown(struct dsa_port *dp)
295{ 297{
296 devlink_port_unregister(&dp->devlink_port); 298 if (dp->type != DSA_PORT_TYPE_UNUSED)
299 devlink_port_unregister(&dp->devlink_port);
297 300
298 switch (dp->type) { 301 switch (dp->type) {
299 case DSA_PORT_TYPE_UNUSED: 302 case DSA_PORT_TYPE_UNUSED:
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f05afaf3235c..4d622112bf95 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -326,10 +326,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
326 u8 tos, int oif, struct net_device *dev, 326 u8 tos, int oif, struct net_device *dev,
327 int rpf, struct in_device *idev, u32 *itag) 327 int rpf, struct in_device *idev, u32 *itag)
328{ 328{
329 struct net *net = dev_net(dev);
330 struct flow_keys flkeys;
329 int ret, no_addr; 331 int ret, no_addr;
330 struct fib_result res; 332 struct fib_result res;
331 struct flowi4 fl4; 333 struct flowi4 fl4;
332 struct net *net = dev_net(dev);
333 bool dev_match; 334 bool dev_match;
334 335
335 fl4.flowi4_oif = 0; 336 fl4.flowi4_oif = 0;
@@ -347,6 +348,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
347 no_addr = idev->ifa_list == NULL; 348 no_addr = idev->ifa_list == NULL;
348 349
349 fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0; 350 fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
351 if (!fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys)) {
352 fl4.flowi4_proto = 0;
353 fl4.fl4_sport = 0;
354 fl4.fl4_dport = 0;
355 }
350 356
351 trace_fib_validate_source(dev, &fl4); 357 trace_fib_validate_source(dev, &fl4);
352 358
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 9c169bb2444d..f200b304f76c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -722,10 +722,12 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
722 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 722 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
723 tunnel->index, 723 tunnel->index,
724 truncate, true); 724 truncate, true);
725 else 725 else if (tunnel->erspan_ver == 2)
726 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), 726 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
727 tunnel->dir, tunnel->hwid, 727 tunnel->dir, tunnel->hwid,
728 truncate, true); 728 truncate, true);
729 else
730 goto free_skb;
729 731
730 tunnel->parms.o_flags &= ~TUNNEL_KEY; 732 tunnel->parms.o_flags &= ~TUNNEL_KEY;
731 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); 733 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 83c73bab2c3d..d54abc097800 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1045,7 +1045,8 @@ alloc_new_skb:
1045 if (copy > length) 1045 if (copy > length)
1046 copy = length; 1046 copy = length;
1047 1047
1048 if (!(rt->dst.dev->features&NETIF_F_SG)) { 1048 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1049 skb_tailroom(skb) >= copy) {
1049 unsigned int off; 1050 unsigned int off;
1050 1051
1051 off = skb->len; 1052 off = skb->len;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 44b308d93ec2..e85f35b89c49 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -34,6 +34,7 @@
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36MODULE_DESCRIPTION("IPv4 packet filter"); 36MODULE_DESCRIPTION("IPv4 packet filter");
37MODULE_ALIAS("ipt_icmp");
37 38
38void *ipt_alloc_initial_table(const struct xt_table *info) 39void *ipt_alloc_initial_table(const struct xt_table *info)
39{ 40{
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index fd01f13c896a..12843c9ef142 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -89,10 +89,10 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
89 return true ^ invert; 89 return true ^ invert;
90 } 90 }
91 91
92 memset(&flow, 0, sizeof(flow));
92 flow.flowi4_iif = LOOPBACK_IFINDEX; 93 flow.flowi4_iif = LOOPBACK_IFINDEX;
93 flow.daddr = iph->saddr; 94 flow.daddr = iph->saddr;
94 flow.saddr = rpfilter_get_saddr(iph->daddr); 95 flow.saddr = rpfilter_get_saddr(iph->daddr);
95 flow.flowi4_oif = 0;
96 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 96 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
97 flow.flowi4_tos = RT_TOS(iph->tos); 97 flow.flowi4_tos = RT_TOS(iph->tos);
98 flow.flowi4_scope = RT_SCOPE_UNIVERSE; 98 flow.flowi4_scope = RT_SCOPE_UNIVERSE;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 29268efad247..2cfa1b518f8d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1961,8 +1961,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1961 fl4.saddr = saddr; 1961 fl4.saddr = saddr;
1962 fl4.flowi4_uid = sock_net_uid(net, NULL); 1962 fl4.flowi4_uid = sock_net_uid(net, NULL);
1963 1963
1964 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) 1964 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
1965 flkeys = &_flkeys; 1965 flkeys = &_flkeys;
1966 } else {
1967 fl4.flowi4_proto = 0;
1968 fl4.fl4_sport = 0;
1969 fl4.fl4_dport = 0;
1970 }
1966 1971
1967 err = fib_lookup(net, &fl4, res, 0); 1972 err = fib_lookup(net, &fl4, res, 0);
1968 if (err != 0) { 1973 if (err != 0) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383cac0ff0ec..d07e34f8e309 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2833,8 +2833,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2833 return -EBUSY; 2833 return -EBUSY;
2834 2834
2835 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2835 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2836 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2836 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
2837 BUG(); 2837 WARN_ON_ONCE(1);
2838 return -EINVAL;
2839 }
2838 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2840 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2839 return -ENOMEM; 2841 return -ENOMEM;
2840 } 2842 }
@@ -3342,6 +3344,7 @@ static void tcp_connect_init(struct sock *sk)
3342 sock_reset_flag(sk, SOCK_DONE); 3344 sock_reset_flag(sk, SOCK_DONE);
3343 tp->snd_wnd = 0; 3345 tp->snd_wnd = 0;
3344 tcp_init_wl(tp, 0); 3346 tcp_init_wl(tp, 0);
3347 tcp_write_queue_purge(sk);
3345 tp->snd_una = tp->write_seq; 3348 tp->snd_una = tp->write_seq;
3346 tp->snd_sml = tp->write_seq; 3349 tp->snd_sml = tp->write_seq;
3347 tp->snd_up = tp->write_seq; 3350 tp->snd_up = tp->write_seq;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 69727bc168cb..458de353f5d9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -71,6 +71,7 @@ struct ip6gre_net {
71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; 71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
72 72
73 struct ip6_tnl __rcu *collect_md_tun; 73 struct ip6_tnl __rcu *collect_md_tun;
74 struct ip6_tnl __rcu *collect_md_tun_erspan;
74 struct net_device *fb_tunnel_dev; 75 struct net_device *fb_tunnel_dev;
75}; 76};
76 77
@@ -81,6 +82,7 @@ static int ip6gre_tunnel_init(struct net_device *dev);
81static void ip6gre_tunnel_setup(struct net_device *dev); 82static void ip6gre_tunnel_setup(struct net_device *dev);
82static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 83static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
83static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); 84static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
85static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
84 86
85/* Tunnel hash table */ 87/* Tunnel hash table */
86 88
@@ -232,7 +234,12 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
232 if (cand) 234 if (cand)
233 return cand; 235 return cand;
234 236
235 t = rcu_dereference(ign->collect_md_tun); 237 if (gre_proto == htons(ETH_P_ERSPAN) ||
238 gre_proto == htons(ETH_P_ERSPAN2))
239 t = rcu_dereference(ign->collect_md_tun_erspan);
240 else
241 t = rcu_dereference(ign->collect_md_tun);
242
236 if (t && t->dev->flags & IFF_UP) 243 if (t && t->dev->flags & IFF_UP)
237 return t; 244 return t;
238 245
@@ -261,6 +268,31 @@ static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
261 return &ign->tunnels[prio][h]; 268 return &ign->tunnels[prio][h];
262} 269}
263 270
271static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
272{
273 if (t->parms.collect_md)
274 rcu_assign_pointer(ign->collect_md_tun, t);
275}
276
277static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
278{
279 if (t->parms.collect_md)
280 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
281}
282
283static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
284{
285 if (t->parms.collect_md)
286 rcu_assign_pointer(ign->collect_md_tun, NULL);
287}
288
289static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
290 struct ip6_tnl *t)
291{
292 if (t->parms.collect_md)
293 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
294}
295
264static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, 296static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
265 const struct ip6_tnl *t) 297 const struct ip6_tnl *t)
266{ 298{
@@ -271,9 +303,6 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
271{ 303{
272 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); 304 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
273 305
274 if (t->parms.collect_md)
275 rcu_assign_pointer(ign->collect_md_tun, t);
276
277 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 306 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
278 rcu_assign_pointer(*tp, t); 307 rcu_assign_pointer(*tp, t);
279} 308}
@@ -283,9 +312,6 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
283 struct ip6_tnl __rcu **tp; 312 struct ip6_tnl __rcu **tp;
284 struct ip6_tnl *iter; 313 struct ip6_tnl *iter;
285 314
286 if (t->parms.collect_md)
287 rcu_assign_pointer(ign->collect_md_tun, NULL);
288
289 for (tp = ip6gre_bucket(ign, t); 315 for (tp = ip6gre_bucket(ign, t);
290 (iter = rtnl_dereference(*tp)) != NULL; 316 (iter = rtnl_dereference(*tp)) != NULL;
291 tp = &iter->next) { 317 tp = &iter->next) {
@@ -374,11 +400,23 @@ failed_free:
374 return NULL; 400 return NULL;
375} 401}
376 402
403static void ip6erspan_tunnel_uninit(struct net_device *dev)
404{
405 struct ip6_tnl *t = netdev_priv(dev);
406 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
407
408 ip6erspan_tunnel_unlink_md(ign, t);
409 ip6gre_tunnel_unlink(ign, t);
410 dst_cache_reset(&t->dst_cache);
411 dev_put(dev);
412}
413
377static void ip6gre_tunnel_uninit(struct net_device *dev) 414static void ip6gre_tunnel_uninit(struct net_device *dev)
378{ 415{
379 struct ip6_tnl *t = netdev_priv(dev); 416 struct ip6_tnl *t = netdev_priv(dev);
380 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 417 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
381 418
419 ip6gre_tunnel_unlink_md(ign, t);
382 ip6gre_tunnel_unlink(ign, t); 420 ip6gre_tunnel_unlink(ign, t);
383 dst_cache_reset(&t->dst_cache); 421 dst_cache_reset(&t->dst_cache);
384 dev_put(dev); 422 dev_put(dev);
@@ -698,6 +736,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
698 else 736 else
699 fl6->daddr = tunnel->parms.raddr; 737 fl6->daddr = tunnel->parms.raddr;
700 738
739 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
740 return -ENOMEM;
741
701 /* Push GRE header. */ 742 /* Push GRE header. */
702 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; 743 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
703 744
@@ -908,7 +949,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
908 truncate = true; 949 truncate = true;
909 } 950 }
910 951
911 if (skb_cow_head(skb, dev->needed_headroom)) 952 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
912 goto tx_err; 953 goto tx_err;
913 954
914 t->parms.o_flags &= ~TUNNEL_KEY; 955 t->parms.o_flags &= ~TUNNEL_KEY;
@@ -979,11 +1020,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
979 erspan_build_header(skb, ntohl(t->parms.o_key), 1020 erspan_build_header(skb, ntohl(t->parms.o_key),
980 t->parms.index, 1021 t->parms.index,
981 truncate, false); 1022 truncate, false);
982 else 1023 else if (t->parms.erspan_ver == 2)
983 erspan_build_header_v2(skb, ntohl(t->parms.o_key), 1024 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
984 t->parms.dir, 1025 t->parms.dir,
985 t->parms.hwid, 1026 t->parms.hwid,
986 truncate, false); 1027 truncate, false);
1028 else
1029 goto tx_err;
1030
987 fl6.daddr = t->parms.raddr; 1031 fl6.daddr = t->parms.raddr;
988 } 1032 }
989 1033
@@ -1019,12 +1063,11 @@ tx_err:
1019 return NETDEV_TX_OK; 1063 return NETDEV_TX_OK;
1020} 1064}
1021 1065
1022static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) 1066static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1023{ 1067{
1024 struct net_device *dev = t->dev; 1068 struct net_device *dev = t->dev;
1025 struct __ip6_tnl_parm *p = &t->parms; 1069 struct __ip6_tnl_parm *p = &t->parms;
1026 struct flowi6 *fl6 = &t->fl.u.ip6; 1070 struct flowi6 *fl6 = &t->fl.u.ip6;
1027 int t_hlen;
1028 1071
1029 if (dev->type != ARPHRD_ETHER) { 1072 if (dev->type != ARPHRD_ETHER) {
1030 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1073 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1051,12 +1094,13 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1051 dev->flags |= IFF_POINTOPOINT; 1094 dev->flags |= IFF_POINTOPOINT;
1052 else 1095 else
1053 dev->flags &= ~IFF_POINTOPOINT; 1096 dev->flags &= ~IFF_POINTOPOINT;
1097}
1054 1098
1055 t->tun_hlen = gre_calc_hlen(t->parms.o_flags); 1099static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1056 1100 int t_hlen)
1057 t->hlen = t->encap_hlen + t->tun_hlen; 1101{
1058 1102 const struct __ip6_tnl_parm *p = &t->parms;
1059 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1103 struct net_device *dev = t->dev;
1060 1104
1061 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1105 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1062 int strict = (ipv6_addr_type(&p->raddr) & 1106 int strict = (ipv6_addr_type(&p->raddr) &
@@ -1088,8 +1132,26 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1088 } 1132 }
1089} 1133}
1090 1134
1091static int ip6gre_tnl_change(struct ip6_tnl *t, 1135static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1092 const struct __ip6_tnl_parm *p, int set_mtu) 1136{
1137 int t_hlen;
1138
1139 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1140 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1141
1142 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1143 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1144 return t_hlen;
1145}
1146
1147static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1148{
1149 ip6gre_tnl_link_config_common(t);
1150 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1151}
1152
1153static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1154 const struct __ip6_tnl_parm *p)
1093{ 1155{
1094 t->parms.laddr = p->laddr; 1156 t->parms.laddr = p->laddr;
1095 t->parms.raddr = p->raddr; 1157 t->parms.raddr = p->raddr;
@@ -1105,6 +1167,12 @@ static int ip6gre_tnl_change(struct ip6_tnl *t,
1105 t->parms.o_flags = p->o_flags; 1167 t->parms.o_flags = p->o_flags;
1106 t->parms.fwmark = p->fwmark; 1168 t->parms.fwmark = p->fwmark;
1107 dst_cache_reset(&t->dst_cache); 1169 dst_cache_reset(&t->dst_cache);
1170}
1171
1172static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1173 int set_mtu)
1174{
1175 ip6gre_tnl_copy_tnl_parm(t, p);
1108 ip6gre_tnl_link_config(t, set_mtu); 1176 ip6gre_tnl_link_config(t, set_mtu);
1109 return 0; 1177 return 0;
1110} 1178}
@@ -1381,11 +1449,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
1381 return ret; 1449 return ret;
1382 } 1450 }
1383 1451
1384 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 1452 t_hlen = ip6gre_calc_hlen(tunnel);
1385 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1386 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1387
1388 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1389 dev->mtu = ETH_DATA_LEN - t_hlen; 1453 dev->mtu = ETH_DATA_LEN - t_hlen;
1390 if (dev->type == ARPHRD_ETHER) 1454 if (dev->type == ARPHRD_ETHER)
1391 dev->mtu -= ETH_HLEN; 1455 dev->mtu -= ETH_HLEN;
@@ -1728,6 +1792,19 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
1728 .ndo_get_iflink = ip6_tnl_get_iflink, 1792 .ndo_get_iflink = ip6_tnl_get_iflink,
1729}; 1793};
1730 1794
1795static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1796{
1797 int t_hlen;
1798
1799 tunnel->tun_hlen = 8;
1800 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1801 erspan_hdr_len(tunnel->parms.erspan_ver);
1802
1803 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1804 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1805 return t_hlen;
1806}
1807
1731static int ip6erspan_tap_init(struct net_device *dev) 1808static int ip6erspan_tap_init(struct net_device *dev)
1732{ 1809{
1733 struct ip6_tnl *tunnel; 1810 struct ip6_tnl *tunnel;
@@ -1751,12 +1828,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
1751 return ret; 1828 return ret;
1752 } 1829 }
1753 1830
1754 tunnel->tun_hlen = 8; 1831 t_hlen = ip6erspan_calc_hlen(tunnel);
1755 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1756 erspan_hdr_len(tunnel->parms.erspan_ver);
1757 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1758
1759 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1760 dev->mtu = ETH_DATA_LEN - t_hlen; 1832 dev->mtu = ETH_DATA_LEN - t_hlen;
1761 if (dev->type == ARPHRD_ETHER) 1833 if (dev->type == ARPHRD_ETHER)
1762 dev->mtu -= ETH_HLEN; 1834 dev->mtu -= ETH_HLEN;
@@ -1764,14 +1836,14 @@ static int ip6erspan_tap_init(struct net_device *dev)
1764 dev->mtu -= 8; 1836 dev->mtu -= 8;
1765 1837
1766 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1838 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1767 ip6gre_tnl_link_config(tunnel, 1); 1839 ip6erspan_tnl_link_config(tunnel, 1);
1768 1840
1769 return 0; 1841 return 0;
1770} 1842}
1771 1843
1772static const struct net_device_ops ip6erspan_netdev_ops = { 1844static const struct net_device_ops ip6erspan_netdev_ops = {
1773 .ndo_init = ip6erspan_tap_init, 1845 .ndo_init = ip6erspan_tap_init,
1774 .ndo_uninit = ip6gre_tunnel_uninit, 1846 .ndo_uninit = ip6erspan_tunnel_uninit,
1775 .ndo_start_xmit = ip6erspan_tunnel_xmit, 1847 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1776 .ndo_set_mac_address = eth_mac_addr, 1848 .ndo_set_mac_address = eth_mac_addr,
1777 .ndo_validate_addr = eth_validate_addr, 1849 .ndo_validate_addr = eth_validate_addr,
@@ -1835,13 +1907,11 @@ static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1835 return ret; 1907 return ret;
1836} 1908}
1837 1909
1838static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 1910static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
1839 struct nlattr *tb[], struct nlattr *data[], 1911 struct nlattr *tb[], struct nlattr *data[],
1840 struct netlink_ext_ack *extack) 1912 struct netlink_ext_ack *extack)
1841{ 1913{
1842 struct ip6_tnl *nt; 1914 struct ip6_tnl *nt;
1843 struct net *net = dev_net(dev);
1844 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1845 struct ip_tunnel_encap ipencap; 1915 struct ip_tunnel_encap ipencap;
1846 int err; 1916 int err;
1847 1917
@@ -1854,16 +1924,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1854 return err; 1924 return err;
1855 } 1925 }
1856 1926
1857 ip6gre_netlink_parms(data, &nt->parms);
1858
1859 if (nt->parms.collect_md) {
1860 if (rtnl_dereference(ign->collect_md_tun))
1861 return -EEXIST;
1862 } else {
1863 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1864 return -EEXIST;
1865 }
1866
1867 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1927 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1868 eth_hw_addr_random(dev); 1928 eth_hw_addr_random(dev);
1869 1929
@@ -1874,51 +1934,94 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1874 if (err) 1934 if (err)
1875 goto out; 1935 goto out;
1876 1936
1877 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1878
1879 if (tb[IFLA_MTU]) 1937 if (tb[IFLA_MTU])
1880 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 1938 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1881 1939
1882 dev_hold(dev); 1940 dev_hold(dev);
1883 ip6gre_tunnel_link(ign, nt);
1884 1941
1885out: 1942out:
1886 return err; 1943 return err;
1887} 1944}
1888 1945
1889static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 1946static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1890 struct nlattr *data[], 1947 struct nlattr *tb[], struct nlattr *data[],
1891 struct netlink_ext_ack *extack) 1948 struct netlink_ext_ack *extack)
1949{
1950 struct ip6_tnl *nt = netdev_priv(dev);
1951 struct net *net = dev_net(dev);
1952 struct ip6gre_net *ign;
1953 int err;
1954
1955 ip6gre_netlink_parms(data, &nt->parms);
1956 ign = net_generic(net, ip6gre_net_id);
1957
1958 if (nt->parms.collect_md) {
1959 if (rtnl_dereference(ign->collect_md_tun))
1960 return -EEXIST;
1961 } else {
1962 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1963 return -EEXIST;
1964 }
1965
1966 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
1967 if (!err) {
1968 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1969 ip6gre_tunnel_link_md(ign, nt);
1970 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
1971 }
1972 return err;
1973}
1974
1975static struct ip6_tnl *
1976ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
1977 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
1978 struct netlink_ext_ack *extack)
1892{ 1979{
1893 struct ip6_tnl *t, *nt = netdev_priv(dev); 1980 struct ip6_tnl *t, *nt = netdev_priv(dev);
1894 struct net *net = nt->net; 1981 struct net *net = nt->net;
1895 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1982 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1896 struct __ip6_tnl_parm p;
1897 struct ip_tunnel_encap ipencap; 1983 struct ip_tunnel_encap ipencap;
1898 1984
1899 if (dev == ign->fb_tunnel_dev) 1985 if (dev == ign->fb_tunnel_dev)
1900 return -EINVAL; 1986 return ERR_PTR(-EINVAL);
1901 1987
1902 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 1988 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1903 int err = ip6_tnl_encap_setup(nt, &ipencap); 1989 int err = ip6_tnl_encap_setup(nt, &ipencap);
1904 1990
1905 if (err < 0) 1991 if (err < 0)
1906 return err; 1992 return ERR_PTR(err);
1907 } 1993 }
1908 1994
1909 ip6gre_netlink_parms(data, &p); 1995 ip6gre_netlink_parms(data, p_p);
1910 1996
1911 t = ip6gre_tunnel_locate(net, &p, 0); 1997 t = ip6gre_tunnel_locate(net, p_p, 0);
1912 1998
1913 if (t) { 1999 if (t) {
1914 if (t->dev != dev) 2000 if (t->dev != dev)
1915 return -EEXIST; 2001 return ERR_PTR(-EEXIST);
1916 } else { 2002 } else {
1917 t = nt; 2003 t = nt;
1918 } 2004 }
1919 2005
2006 return t;
2007}
2008
2009static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2010 struct nlattr *data[],
2011 struct netlink_ext_ack *extack)
2012{
2013 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2014 struct __ip6_tnl_parm p;
2015 struct ip6_tnl *t;
2016
2017 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2018 if (IS_ERR(t))
2019 return PTR_ERR(t);
2020
2021 ip6gre_tunnel_unlink_md(ign, t);
1920 ip6gre_tunnel_unlink(ign, t); 2022 ip6gre_tunnel_unlink(ign, t);
1921 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 2023 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2024 ip6gre_tunnel_link_md(ign, t);
1922 ip6gre_tunnel_link(ign, t); 2025 ip6gre_tunnel_link(ign, t);
1923 return 0; 2026 return 0;
1924} 2027}
@@ -2068,6 +2171,69 @@ static void ip6erspan_tap_setup(struct net_device *dev)
2068 netif_keep_dst(dev); 2171 netif_keep_dst(dev);
2069} 2172}
2070 2173
2174static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2175 struct nlattr *tb[], struct nlattr *data[],
2176 struct netlink_ext_ack *extack)
2177{
2178 struct ip6_tnl *nt = netdev_priv(dev);
2179 struct net *net = dev_net(dev);
2180 struct ip6gre_net *ign;
2181 int err;
2182
2183 ip6gre_netlink_parms(data, &nt->parms);
2184 ign = net_generic(net, ip6gre_net_id);
2185
2186 if (nt->parms.collect_md) {
2187 if (rtnl_dereference(ign->collect_md_tun_erspan))
2188 return -EEXIST;
2189 } else {
2190 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2191 return -EEXIST;
2192 }
2193
2194 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2195 if (!err) {
2196 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2197 ip6erspan_tunnel_link_md(ign, nt);
2198 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2199 }
2200 return err;
2201}
2202
2203static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2204{
2205 ip6gre_tnl_link_config_common(t);
2206 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2207}
2208
2209static int ip6erspan_tnl_change(struct ip6_tnl *t,
2210 const struct __ip6_tnl_parm *p, int set_mtu)
2211{
2212 ip6gre_tnl_copy_tnl_parm(t, p);
2213 ip6erspan_tnl_link_config(t, set_mtu);
2214 return 0;
2215}
2216
2217static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2218 struct nlattr *data[],
2219 struct netlink_ext_ack *extack)
2220{
2221 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2222 struct __ip6_tnl_parm p;
2223 struct ip6_tnl *t;
2224
2225 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2226 if (IS_ERR(t))
2227 return PTR_ERR(t);
2228
2229 ip6gre_tunnel_unlink_md(ign, t);
2230 ip6gre_tunnel_unlink(ign, t);
2231 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2232 ip6erspan_tunnel_link_md(ign, t);
2233 ip6gre_tunnel_link(ign, t);
2234 return 0;
2235}
2236
2071static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { 2237static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2072 .kind = "ip6gre", 2238 .kind = "ip6gre",
2073 .maxtype = IFLA_GRE_MAX, 2239 .maxtype = IFLA_GRE_MAX,
@@ -2104,8 +2270,8 @@ static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2104 .priv_size = sizeof(struct ip6_tnl), 2270 .priv_size = sizeof(struct ip6_tnl),
2105 .setup = ip6erspan_tap_setup, 2271 .setup = ip6erspan_tap_setup,
2106 .validate = ip6erspan_tap_validate, 2272 .validate = ip6erspan_tap_validate,
2107 .newlink = ip6gre_newlink, 2273 .newlink = ip6erspan_newlink,
2108 .changelink = ip6gre_changelink, 2274 .changelink = ip6erspan_changelink,
2109 .get_size = ip6gre_get_size, 2275 .get_size = ip6gre_get_size,
2110 .fill_info = ip6gre_fill_info, 2276 .fill_info = ip6gre_fill_info,
2111 .get_link_net = ip6_tnl_get_link_net, 2277 .get_link_net = ip6_tnl_get_link_net,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2e891d2c30ef..7b6d1689087b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1503,7 +1503,8 @@ alloc_new_skb:
1503 if (copy > length) 1503 if (copy > length)
1504 copy = length; 1504 copy = length;
1505 1505
1506 if (!(rt->dst.dev->features&NETIF_F_SG)) { 1506 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1507 skb_tailroom(skb) >= copy) {
1507 unsigned int off; 1508 unsigned int off;
1508 1509
1509 off = skb->len; 1510 off = skb->len;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 65c9e1a58305..97f79dc943d7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -38,6 +38,7 @@
38MODULE_LICENSE("GPL"); 38MODULE_LICENSE("GPL");
39MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 39MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40MODULE_DESCRIPTION("IPv6 packet filter"); 40MODULE_DESCRIPTION("IPv6 packet filter");
41MODULE_ALIAS("ip6t_icmp6");
41 42
42void *ip6t_alloc_initial_table(const struct xt_table *info) 43void *ip6t_alloc_initial_table(const struct xt_table *info)
43{ 44{
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 0f6b8172fb9a..206fb2c4c319 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -585,7 +585,8 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
585EXPORT_SYMBOL(nf_nat_decode_session_hook); 585EXPORT_SYMBOL(nf_nat_decode_session_hook);
586#endif 586#endif
587 587
588static void __net_init __netfilter_net_init(struct nf_hook_entries **e, int max) 588static void __net_init
589__netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
589{ 590{
590 int h; 591 int h;
591 592
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 370abbf6f421..75de46576f51 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -232,7 +232,10 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
232static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) 232static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
233{ 233{
234 unsigned int hash; 234 unsigned int hash;
235 bool ret; 235 bool ret = false;
236
237 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
238 return refcount_dec_if_one(&cp->refcnt);
236 239
237 hash = ip_vs_conn_hashkey_conn(cp); 240 hash = ip_vs_conn_hashkey_conn(cp);
238 241
@@ -240,15 +243,13 @@ static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
240 spin_lock(&cp->lock); 243 spin_lock(&cp->lock);
241 244
242 if (cp->flags & IP_VS_CONN_F_HASHED) { 245 if (cp->flags & IP_VS_CONN_F_HASHED) {
243 ret = false;
244 /* Decrease refcnt and unlink conn only if we are last user */ 246 /* Decrease refcnt and unlink conn only if we are last user */
245 if (refcount_dec_if_one(&cp->refcnt)) { 247 if (refcount_dec_if_one(&cp->refcnt)) {
246 hlist_del_rcu(&cp->c_list); 248 hlist_del_rcu(&cp->c_list);
247 cp->flags &= ~IP_VS_CONN_F_HASHED; 249 cp->flags &= ~IP_VS_CONN_F_HASHED;
248 ret = true; 250 ret = true;
249 } 251 }
250 } else 252 }
251 ret = refcount_read(&cp->refcnt) ? false : true;
252 253
253 spin_unlock(&cp->lock); 254 spin_unlock(&cp->lock);
254 ct_write_unlock_bh(hash); 255 ct_write_unlock_bh(hash);
@@ -454,12 +455,6 @@ ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
454} 455}
455EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto); 456EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
456 457
457static void __ip_vs_conn_put_notimer(struct ip_vs_conn *cp)
458{
459 __ip_vs_conn_put(cp);
460 ip_vs_conn_expire(&cp->timer);
461}
462
463/* 458/*
464 * Put back the conn and restart its timer with its timeout 459 * Put back the conn and restart its timer with its timeout
465 */ 460 */
@@ -478,7 +473,7 @@ void ip_vs_conn_put(struct ip_vs_conn *cp)
478 (refcount_read(&cp->refcnt) == 1) && 473 (refcount_read(&cp->refcnt) == 1) &&
479 !timer_pending(&cp->timer)) 474 !timer_pending(&cp->timer))
480 /* expire connection immediately */ 475 /* expire connection immediately */
481 __ip_vs_conn_put_notimer(cp); 476 ip_vs_conn_expire(&cp->timer);
482 else 477 else
483 __ip_vs_conn_put_timer(cp); 478 __ip_vs_conn_put_timer(cp);
484} 479}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5f6f73cf2174..0679dd101e72 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -119,6 +119,8 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
119 struct ip_vs_cpu_stats *s; 119 struct ip_vs_cpu_stats *s;
120 struct ip_vs_service *svc; 120 struct ip_vs_service *svc;
121 121
122 local_bh_disable();
123
122 s = this_cpu_ptr(dest->stats.cpustats); 124 s = this_cpu_ptr(dest->stats.cpustats);
123 u64_stats_update_begin(&s->syncp); 125 u64_stats_update_begin(&s->syncp);
124 s->cnt.inpkts++; 126 s->cnt.inpkts++;
@@ -137,6 +139,8 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
137 s->cnt.inpkts++; 139 s->cnt.inpkts++;
138 s->cnt.inbytes += skb->len; 140 s->cnt.inbytes += skb->len;
139 u64_stats_update_end(&s->syncp); 141 u64_stats_update_end(&s->syncp);
142
143 local_bh_enable();
140 } 144 }
141} 145}
142 146
@@ -151,6 +155,8 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
151 struct ip_vs_cpu_stats *s; 155 struct ip_vs_cpu_stats *s;
152 struct ip_vs_service *svc; 156 struct ip_vs_service *svc;
153 157
158 local_bh_disable();
159
154 s = this_cpu_ptr(dest->stats.cpustats); 160 s = this_cpu_ptr(dest->stats.cpustats);
155 u64_stats_update_begin(&s->syncp); 161 u64_stats_update_begin(&s->syncp);
156 s->cnt.outpkts++; 162 s->cnt.outpkts++;
@@ -169,6 +175,8 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
169 s->cnt.outpkts++; 175 s->cnt.outpkts++;
170 s->cnt.outbytes += skb->len; 176 s->cnt.outbytes += skb->len;
171 u64_stats_update_end(&s->syncp); 177 u64_stats_update_end(&s->syncp);
178
179 local_bh_enable();
172 } 180 }
173} 181}
174 182
@@ -179,6 +187,8 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
179 struct netns_ipvs *ipvs = svc->ipvs; 187 struct netns_ipvs *ipvs = svc->ipvs;
180 struct ip_vs_cpu_stats *s; 188 struct ip_vs_cpu_stats *s;
181 189
190 local_bh_disable();
191
182 s = this_cpu_ptr(cp->dest->stats.cpustats); 192 s = this_cpu_ptr(cp->dest->stats.cpustats);
183 u64_stats_update_begin(&s->syncp); 193 u64_stats_update_begin(&s->syncp);
184 s->cnt.conns++; 194 s->cnt.conns++;
@@ -193,6 +203,8 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
193 u64_stats_update_begin(&s->syncp); 203 u64_stats_update_begin(&s->syncp);
194 s->cnt.conns++; 204 s->cnt.conns++;
195 u64_stats_update_end(&s->syncp); 205 u64_stats_update_end(&s->syncp);
206
207 local_bh_enable();
196} 208}
197 209
198 210
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index e97cdc1cf98c..8e67910185a0 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -981,6 +981,17 @@ static int tcp_packet(struct nf_conn *ct,
981 return NF_ACCEPT; /* Don't change state */ 981 return NF_ACCEPT; /* Don't change state */
982 } 982 }
983 break; 983 break;
984 case TCP_CONNTRACK_SYN_SENT2:
985 /* tcp_conntracks table is not smart enough to handle
986 * simultaneous open.
987 */
988 ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
989 break;
990 case TCP_CONNTRACK_SYN_RECV:
991 if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
992 ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
993 new_state = TCP_CONNTRACK_ESTABLISHED;
994 break;
984 case TCP_CONNTRACK_CLOSE: 995 case TCP_CONNTRACK_CLOSE:
985 if (index == TCP_RST_SET 996 if (index == TCP_RST_SET
986 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) 997 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 04d4e3772584..91e80aa852d6 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -214,6 +214,34 @@ static int nft_delchain(struct nft_ctx *ctx)
214 return err; 214 return err;
215} 215}
216 216
217static void nft_rule_expr_activate(const struct nft_ctx *ctx,
218 struct nft_rule *rule)
219{
220 struct nft_expr *expr;
221
222 expr = nft_expr_first(rule);
223 while (expr != nft_expr_last(rule) && expr->ops) {
224 if (expr->ops->activate)
225 expr->ops->activate(ctx, expr);
226
227 expr = nft_expr_next(expr);
228 }
229}
230
231static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
232 struct nft_rule *rule)
233{
234 struct nft_expr *expr;
235
236 expr = nft_expr_first(rule);
237 while (expr != nft_expr_last(rule) && expr->ops) {
238 if (expr->ops->deactivate)
239 expr->ops->deactivate(ctx, expr);
240
241 expr = nft_expr_next(expr);
242 }
243}
244
217static int 245static int
218nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) 246nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
219{ 247{
@@ -259,6 +287,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
259 nft_trans_destroy(trans); 287 nft_trans_destroy(trans);
260 return err; 288 return err;
261 } 289 }
290 nft_rule_expr_deactivate(ctx, rule);
262 291
263 return 0; 292 return 0;
264} 293}
@@ -2238,6 +2267,13 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2238 kfree(rule); 2267 kfree(rule);
2239} 2268}
2240 2269
2270static void nf_tables_rule_release(const struct nft_ctx *ctx,
2271 struct nft_rule *rule)
2272{
2273 nft_rule_expr_deactivate(ctx, rule);
2274 nf_tables_rule_destroy(ctx, rule);
2275}
2276
2241#define NFT_RULE_MAXEXPRS 128 2277#define NFT_RULE_MAXEXPRS 128
2242 2278
2243static struct nft_expr_info *info; 2279static struct nft_expr_info *info;
@@ -2402,7 +2438,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2402 return 0; 2438 return 0;
2403 2439
2404err2: 2440err2:
2405 nf_tables_rule_destroy(&ctx, rule); 2441 nf_tables_rule_release(&ctx, rule);
2406err1: 2442err1:
2407 for (i = 0; i < n; i++) { 2443 for (i = 0; i < n; i++) {
2408 if (info[i].ops != NULL) 2444 if (info[i].ops != NULL)
@@ -4044,8 +4080,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
4044 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^ 4080 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^
4045 nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) || 4081 nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) ||
4046 nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^ 4082 nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
4047 nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) 4083 nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) {
4048 return -EBUSY; 4084 err = -EBUSY;
4085 goto err5;
4086 }
4049 if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) && 4087 if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
4050 nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) && 4088 nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
4051 memcmp(nft_set_ext_data(ext), 4089 memcmp(nft_set_ext_data(ext),
@@ -4130,7 +4168,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
4130 * NFT_GOTO verdicts. This function must be called on active data objects 4168 * NFT_GOTO verdicts. This function must be called on active data objects
4131 * from the second phase of the commit protocol. 4169 * from the second phase of the commit protocol.
4132 */ 4170 */
4133static void nft_data_hold(const struct nft_data *data, enum nft_data_types type) 4171void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
4134{ 4172{
4135 if (type == NFT_DATA_VERDICT) { 4173 if (type == NFT_DATA_VERDICT) {
4136 switch (data->verdict.code) { 4174 switch (data->verdict.code) {
@@ -5761,7 +5799,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
5761 } 5799 }
5762} 5800}
5763 5801
5764static void nf_tables_commit_release(struct nft_trans *trans) 5802static void nft_commit_release(struct nft_trans *trans)
5765{ 5803{
5766 switch (trans->msg_type) { 5804 switch (trans->msg_type) {
5767 case NFT_MSG_DELTABLE: 5805 case NFT_MSG_DELTABLE:
@@ -5790,6 +5828,21 @@ static void nf_tables_commit_release(struct nft_trans *trans)
5790 kfree(trans); 5828 kfree(trans);
5791} 5829}
5792 5830
5831static void nf_tables_commit_release(struct net *net)
5832{
5833 struct nft_trans *trans, *next;
5834
5835 if (list_empty(&net->nft.commit_list))
5836 return;
5837
5838 synchronize_rcu();
5839
5840 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
5841 list_del(&trans->list);
5842 nft_commit_release(trans);
5843 }
5844}
5845
5793static int nf_tables_commit(struct net *net, struct sk_buff *skb) 5846static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5794{ 5847{
5795 struct nft_trans *trans, *next; 5848 struct nft_trans *trans, *next;
@@ -5920,13 +5973,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5920 } 5973 }
5921 } 5974 }
5922 5975
5923 synchronize_rcu(); 5976 nf_tables_commit_release(net);
5924
5925 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
5926 list_del(&trans->list);
5927 nf_tables_commit_release(trans);
5928 }
5929
5930 nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); 5977 nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
5931 5978
5932 return 0; 5979 return 0;
@@ -6006,10 +6053,12 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
6006 case NFT_MSG_NEWRULE: 6053 case NFT_MSG_NEWRULE:
6007 trans->ctx.chain->use--; 6054 trans->ctx.chain->use--;
6008 list_del_rcu(&nft_trans_rule(trans)->list); 6055 list_del_rcu(&nft_trans_rule(trans)->list);
6056 nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
6009 break; 6057 break;
6010 case NFT_MSG_DELRULE: 6058 case NFT_MSG_DELRULE:
6011 trans->ctx.chain->use++; 6059 trans->ctx.chain->use++;
6012 nft_clear(trans->ctx.net, nft_trans_rule(trans)); 6060 nft_clear(trans->ctx.net, nft_trans_rule(trans));
6061 nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
6013 nft_trans_destroy(trans); 6062 nft_trans_destroy(trans);
6014 break; 6063 break;
6015 case NFT_MSG_NEWSET: 6064 case NFT_MSG_NEWSET:
@@ -6585,7 +6634,7 @@ int __nft_release_basechain(struct nft_ctx *ctx)
6585 list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { 6634 list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
6586 list_del(&rule->list); 6635 list_del(&rule->list);
6587 ctx->chain->use--; 6636 ctx->chain->use--;
6588 nf_tables_rule_destroy(ctx, rule); 6637 nf_tables_rule_release(ctx, rule);
6589 } 6638 }
6590 list_del(&ctx->chain->list); 6639 list_del(&ctx->chain->list);
6591 ctx->table->use--; 6640 ctx->table->use--;
@@ -6623,7 +6672,7 @@ static void __nft_release_tables(struct net *net)
6623 list_for_each_entry_safe(rule, nr, &chain->rules, list) { 6672 list_for_each_entry_safe(rule, nr, &chain->rules, list) {
6624 list_del(&rule->list); 6673 list_del(&rule->list);
6625 chain->use--; 6674 chain->use--;
6626 nf_tables_rule_destroy(&ctx, rule); 6675 nf_tables_rule_release(&ctx, rule);
6627 } 6676 }
6628 } 6677 }
6629 list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { 6678 list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index dfd0bf3810d2..942702a2776f 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -119,15 +119,22 @@ DEFINE_STATIC_KEY_FALSE(nft_counters_enabled);
119static noinline void nft_update_chain_stats(const struct nft_chain *chain, 119static noinline void nft_update_chain_stats(const struct nft_chain *chain,
120 const struct nft_pktinfo *pkt) 120 const struct nft_pktinfo *pkt)
121{ 121{
122 struct nft_base_chain *base_chain;
122 struct nft_stats *stats; 123 struct nft_stats *stats;
123 124
124 local_bh_disable(); 125 base_chain = nft_base_chain(chain);
125 stats = this_cpu_ptr(rcu_dereference(nft_base_chain(chain)->stats)); 126 if (!base_chain->stats)
126 u64_stats_update_begin(&stats->syncp); 127 return;
127 stats->pkts++; 128
128 stats->bytes += pkt->skb->len; 129 stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
129 u64_stats_update_end(&stats->syncp); 130 if (stats) {
130 local_bh_enable(); 131 local_bh_disable();
132 u64_stats_update_begin(&stats->syncp);
133 stats->pkts++;
134 stats->bytes += pkt->skb->len;
135 u64_stats_update_end(&stats->syncp);
136 local_bh_enable();
137 }
131} 138}
132 139
133struct nft_jumpstack { 140struct nft_jumpstack {
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index b9505bcd3827..6ddf89183e7b 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -115,7 +115,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
115 nfacct->flags = flags; 115 nfacct->flags = flags;
116 } 116 }
117 117
118 strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); 118 nla_strlcpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX);
119 119
120 if (tb[NFACCT_BYTES]) { 120 if (tb[NFACCT_BYTES]) {
121 atomic64_set(&nfacct->bytes, 121 atomic64_set(&nfacct->bytes,
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 4a4b293fb2e5..fa026b269b36 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -149,8 +149,8 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
149 !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) 149 !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
150 return -EINVAL; 150 return -EINVAL;
151 151
152 strncpy(expect_policy->name, 152 nla_strlcpy(expect_policy->name,
153 nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); 153 nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
154 expect_policy->max_expected = 154 expect_policy->max_expected =
155 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); 155 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
156 if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) 156 if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
@@ -234,7 +234,8 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
234 if (ret < 0) 234 if (ret < 0)
235 goto err1; 235 goto err1;
236 236
237 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); 237 nla_strlcpy(helper->name,
238 nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
238 size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); 239 size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
239 if (size > FIELD_SIZEOF(struct nf_conn_help, data)) { 240 if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
240 ret = -ENOMEM; 241 ret = -ENOMEM;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 8e23726b9081..1d99a1efdafc 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -27,14 +27,31 @@ struct nft_xt {
27 struct list_head head; 27 struct list_head head;
28 struct nft_expr_ops ops; 28 struct nft_expr_ops ops;
29 unsigned int refcnt; 29 unsigned int refcnt;
30
31 /* Unlike other expressions, ops doesn't have static storage duration.
32 * nft core assumes they do. We use kfree_rcu so that nft core can
33 * can check expr->ops->size even after nft_compat->destroy() frees
34 * the nft_xt struct that holds the ops structure.
35 */
36 struct rcu_head rcu_head;
37};
38
39/* Used for matches where *info is larger than X byte */
40#define NFT_MATCH_LARGE_THRESH 192
41
42struct nft_xt_match_priv {
43 void *info;
30}; 44};
31 45
32static void nft_xt_put(struct nft_xt *xt) 46static bool nft_xt_put(struct nft_xt *xt)
33{ 47{
34 if (--xt->refcnt == 0) { 48 if (--xt->refcnt == 0) {
35 list_del(&xt->head); 49 list_del(&xt->head);
36 kfree(xt); 50 kfree_rcu(xt, rcu_head);
51 return true;
37 } 52 }
53
54 return false;
38} 55}
39 56
40static int nft_compat_chain_validate_dependency(const char *tablename, 57static int nft_compat_chain_validate_dependency(const char *tablename,
@@ -226,6 +243,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
226 struct xt_target *target = expr->ops->data; 243 struct xt_target *target = expr->ops->data;
227 struct xt_tgchk_param par; 244 struct xt_tgchk_param par;
228 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); 245 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
246 struct nft_xt *nft_xt;
229 u16 proto = 0; 247 u16 proto = 0;
230 bool inv = false; 248 bool inv = false;
231 union nft_entry e = {}; 249 union nft_entry e = {};
@@ -236,25 +254,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
236 if (ctx->nla[NFTA_RULE_COMPAT]) { 254 if (ctx->nla[NFTA_RULE_COMPAT]) {
237 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv); 255 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
238 if (ret < 0) 256 if (ret < 0)
239 goto err; 257 return ret;
240 } 258 }
241 259
242 nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); 260 nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
243 261
244 ret = xt_check_target(&par, size, proto, inv); 262 ret = xt_check_target(&par, size, proto, inv);
245 if (ret < 0) 263 if (ret < 0)
246 goto err; 264 return ret;
247 265
248 /* The standard target cannot be used */ 266 /* The standard target cannot be used */
249 if (target->target == NULL) { 267 if (!target->target)
250 ret = -EINVAL; 268 return -EINVAL;
251 goto err;
252 }
253 269
270 nft_xt = container_of(expr->ops, struct nft_xt, ops);
271 nft_xt->refcnt++;
254 return 0; 272 return 0;
255err:
256 module_put(target->me);
257 return ret;
258} 273}
259 274
260static void 275static void
@@ -271,8 +286,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
271 if (par.target->destroy != NULL) 286 if (par.target->destroy != NULL)
272 par.target->destroy(&par); 287 par.target->destroy(&par);
273 288
274 nft_xt_put(container_of(expr->ops, struct nft_xt, ops)); 289 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
275 module_put(target->me); 290 module_put(target->me);
276} 291}
277 292
278static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) 293static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -316,11 +331,11 @@ static int nft_target_validate(const struct nft_ctx *ctx,
316 return 0; 331 return 0;
317} 332}
318 333
319static void nft_match_eval(const struct nft_expr *expr, 334static void __nft_match_eval(const struct nft_expr *expr,
320 struct nft_regs *regs, 335 struct nft_regs *regs,
321 const struct nft_pktinfo *pkt) 336 const struct nft_pktinfo *pkt,
337 void *info)
322{ 338{
323 void *info = nft_expr_priv(expr);
324 struct xt_match *match = expr->ops->data; 339 struct xt_match *match = expr->ops->data;
325 struct sk_buff *skb = pkt->skb; 340 struct sk_buff *skb = pkt->skb;
326 bool ret; 341 bool ret;
@@ -344,6 +359,22 @@ static void nft_match_eval(const struct nft_expr *expr,
344 } 359 }
345} 360}
346 361
362static void nft_match_large_eval(const struct nft_expr *expr,
363 struct nft_regs *regs,
364 const struct nft_pktinfo *pkt)
365{
366 struct nft_xt_match_priv *priv = nft_expr_priv(expr);
367
368 __nft_match_eval(expr, regs, pkt, priv->info);
369}
370
371static void nft_match_eval(const struct nft_expr *expr,
372 struct nft_regs *regs,
373 const struct nft_pktinfo *pkt)
374{
375 __nft_match_eval(expr, regs, pkt, nft_expr_priv(expr));
376}
377
347static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { 378static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
348 [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING }, 379 [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
349 [NFTA_MATCH_REV] = { .type = NLA_U32 }, 380 [NFTA_MATCH_REV] = { .type = NLA_U32 },
@@ -404,13 +435,14 @@ static void match_compat_from_user(struct xt_match *m, void *in, void *out)
404} 435}
405 436
406static int 437static int
407nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 438__nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
408 const struct nlattr * const tb[]) 439 const struct nlattr * const tb[],
440 void *info)
409{ 441{
410 void *info = nft_expr_priv(expr);
411 struct xt_match *match = expr->ops->data; 442 struct xt_match *match = expr->ops->data;
412 struct xt_mtchk_param par; 443 struct xt_mtchk_param par;
413 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); 444 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
445 struct nft_xt *nft_xt;
414 u16 proto = 0; 446 u16 proto = 0;
415 bool inv = false; 447 bool inv = false;
416 union nft_entry e = {}; 448 union nft_entry e = {};
@@ -421,26 +453,50 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
421 if (ctx->nla[NFTA_RULE_COMPAT]) { 453 if (ctx->nla[NFTA_RULE_COMPAT]) {
422 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv); 454 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
423 if (ret < 0) 455 if (ret < 0)
424 goto err; 456 return ret;
425 } 457 }
426 458
427 nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); 459 nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
428 460
429 ret = xt_check_match(&par, size, proto, inv); 461 ret = xt_check_match(&par, size, proto, inv);
430 if (ret < 0) 462 if (ret < 0)
431 goto err; 463 return ret;
432 464
465 nft_xt = container_of(expr->ops, struct nft_xt, ops);
466 nft_xt->refcnt++;
433 return 0; 467 return 0;
434err: 468}
435 module_put(match->me); 469
470static int
471nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
472 const struct nlattr * const tb[])
473{
474 return __nft_match_init(ctx, expr, tb, nft_expr_priv(expr));
475}
476
477static int
478nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
479 const struct nlattr * const tb[])
480{
481 struct nft_xt_match_priv *priv = nft_expr_priv(expr);
482 struct xt_match *m = expr->ops->data;
483 int ret;
484
485 priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
486 if (!priv->info)
487 return -ENOMEM;
488
489 ret = __nft_match_init(ctx, expr, tb, priv->info);
490 if (ret)
491 kfree(priv->info);
436 return ret; 492 return ret;
437} 493}
438 494
439static void 495static void
440nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 496__nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
497 void *info)
441{ 498{
442 struct xt_match *match = expr->ops->data; 499 struct xt_match *match = expr->ops->data;
443 void *info = nft_expr_priv(expr);
444 struct xt_mtdtor_param par; 500 struct xt_mtdtor_param par;
445 501
446 par.net = ctx->net; 502 par.net = ctx->net;
@@ -450,13 +506,28 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
450 if (par.match->destroy != NULL) 506 if (par.match->destroy != NULL)
451 par.match->destroy(&par); 507 par.match->destroy(&par);
452 508
453 nft_xt_put(container_of(expr->ops, struct nft_xt, ops)); 509 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
454 module_put(match->me); 510 module_put(match->me);
455} 511}
456 512
457static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr) 513static void
514nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
515{
516 __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
517}
518
519static void
520nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
521{
522 struct nft_xt_match_priv *priv = nft_expr_priv(expr);
523
524 __nft_match_destroy(ctx, expr, priv->info);
525 kfree(priv->info);
526}
527
528static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
529 void *info)
458{ 530{
459 void *info = nft_expr_priv(expr);
460 struct xt_match *match = expr->ops->data; 531 struct xt_match *match = expr->ops->data;
461 532
462 if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) || 533 if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
@@ -470,6 +541,18 @@ nla_put_failure:
470 return -1; 541 return -1;
471} 542}
472 543
544static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
545{
546 return __nft_match_dump(skb, expr, nft_expr_priv(expr));
547}
548
549static int nft_match_large_dump(struct sk_buff *skb, const struct nft_expr *e)
550{
551 struct nft_xt_match_priv *priv = nft_expr_priv(e);
552
553 return __nft_match_dump(skb, e, priv->info);
554}
555
473static int nft_match_validate(const struct nft_ctx *ctx, 556static int nft_match_validate(const struct nft_ctx *ctx,
474 const struct nft_expr *expr, 557 const struct nft_expr *expr,
475 const struct nft_data **data) 558 const struct nft_data **data)
@@ -637,6 +720,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
637{ 720{
638 struct nft_xt *nft_match; 721 struct nft_xt *nft_match;
639 struct xt_match *match; 722 struct xt_match *match;
723 unsigned int matchsize;
640 char *mt_name; 724 char *mt_name;
641 u32 rev, family; 725 u32 rev, family;
642 int err; 726 int err;
@@ -654,13 +738,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
654 list_for_each_entry(nft_match, &nft_match_list, head) { 738 list_for_each_entry(nft_match, &nft_match_list, head) {
655 struct xt_match *match = nft_match->ops.data; 739 struct xt_match *match = nft_match->ops.data;
656 740
657 if (nft_match_cmp(match, mt_name, rev, family)) { 741 if (nft_match_cmp(match, mt_name, rev, family))
658 if (!try_module_get(match->me))
659 return ERR_PTR(-ENOENT);
660
661 nft_match->refcnt++;
662 return &nft_match->ops; 742 return &nft_match->ops;
663 }
664 } 743 }
665 744
666 match = xt_request_find_match(family, mt_name, rev); 745 match = xt_request_find_match(family, mt_name, rev);
@@ -679,9 +758,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
679 goto err; 758 goto err;
680 } 759 }
681 760
682 nft_match->refcnt = 1; 761 nft_match->refcnt = 0;
683 nft_match->ops.type = &nft_match_type; 762 nft_match->ops.type = &nft_match_type;
684 nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
685 nft_match->ops.eval = nft_match_eval; 763 nft_match->ops.eval = nft_match_eval;
686 nft_match->ops.init = nft_match_init; 764 nft_match->ops.init = nft_match_init;
687 nft_match->ops.destroy = nft_match_destroy; 765 nft_match->ops.destroy = nft_match_destroy;
@@ -689,6 +767,18 @@ nft_match_select_ops(const struct nft_ctx *ctx,
689 nft_match->ops.validate = nft_match_validate; 767 nft_match->ops.validate = nft_match_validate;
690 nft_match->ops.data = match; 768 nft_match->ops.data = match;
691 769
770 matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
771 if (matchsize > NFT_MATCH_LARGE_THRESH) {
772 matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
773
774 nft_match->ops.eval = nft_match_large_eval;
775 nft_match->ops.init = nft_match_large_init;
776 nft_match->ops.destroy = nft_match_large_destroy;
777 nft_match->ops.dump = nft_match_large_dump;
778 }
779
780 nft_match->ops.size = matchsize;
781
692 list_add(&nft_match->head, &nft_match_list); 782 list_add(&nft_match->head, &nft_match_list);
693 783
694 return &nft_match->ops; 784 return &nft_match->ops;
@@ -739,13 +829,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
739 list_for_each_entry(nft_target, &nft_target_list, head) { 829 list_for_each_entry(nft_target, &nft_target_list, head) {
740 struct xt_target *target = nft_target->ops.data; 830 struct xt_target *target = nft_target->ops.data;
741 831
742 if (nft_target_cmp(target, tg_name, rev, family)) { 832 if (nft_target_cmp(target, tg_name, rev, family))
743 if (!try_module_get(target->me))
744 return ERR_PTR(-ENOENT);
745
746 nft_target->refcnt++;
747 return &nft_target->ops; 833 return &nft_target->ops;
748 }
749 } 834 }
750 835
751 target = xt_request_find_target(family, tg_name, rev); 836 target = xt_request_find_target(family, tg_name, rev);
@@ -764,7 +849,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
764 goto err; 849 goto err;
765 } 850 }
766 851
767 nft_target->refcnt = 1; 852 nft_target->refcnt = 0;
768 nft_target->ops.type = &nft_target_type; 853 nft_target->ops.type = &nft_target_type;
769 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 854 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
770 nft_target->ops.init = nft_target_init; 855 nft_target->ops.init = nft_target_init;
@@ -823,6 +908,32 @@ err_match:
823 908
824static void __exit nft_compat_module_exit(void) 909static void __exit nft_compat_module_exit(void)
825{ 910{
911 struct nft_xt *xt, *next;
912
913 /* list should be empty here, it can be non-empty only in case there
914 * was an error that caused nft_xt expr to not be initialized fully
915 * and noone else requested the same expression later.
916 *
917 * In this case, the lists contain 0-refcount entries that still
918 * hold module reference.
919 */
920 list_for_each_entry_safe(xt, next, &nft_target_list, head) {
921 struct xt_target *target = xt->ops.data;
922
923 if (WARN_ON_ONCE(xt->refcnt))
924 continue;
925 module_put(target->me);
926 kfree(xt);
927 }
928
929 list_for_each_entry_safe(xt, next, &nft_match_list, head) {
930 struct xt_match *match = xt->ops.data;
931
932 if (WARN_ON_ONCE(xt->refcnt))
933 continue;
934 module_put(match->me);
935 kfree(xt);
936 }
826 nfnetlink_subsys_unregister(&nfnl_compat_subsys); 937 nfnetlink_subsys_unregister(&nfnl_compat_subsys);
827 nft_unregister_expr(&nft_target_type); 938 nft_unregister_expr(&nft_target_type);
828 nft_unregister_expr(&nft_match_type); 939 nft_unregister_expr(&nft_match_type);
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 4717d7796927..aa87ff8beae8 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -69,8 +69,16 @@ err1:
69 return err; 69 return err;
70} 70}
71 71
72static void nft_immediate_destroy(const struct nft_ctx *ctx, 72static void nft_immediate_activate(const struct nft_ctx *ctx,
73 const struct nft_expr *expr) 73 const struct nft_expr *expr)
74{
75 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
76
77 return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
78}
79
80static void nft_immediate_deactivate(const struct nft_ctx *ctx,
81 const struct nft_expr *expr)
74{ 82{
75 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 83 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
76 84
@@ -108,7 +116,8 @@ static const struct nft_expr_ops nft_imm_ops = {
108 .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), 116 .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
109 .eval = nft_immediate_eval, 117 .eval = nft_immediate_eval,
110 .init = nft_immediate_init, 118 .init = nft_immediate_init,
111 .destroy = nft_immediate_destroy, 119 .activate = nft_immediate_activate,
120 .deactivate = nft_immediate_deactivate,
112 .dump = nft_immediate_dump, 121 .dump = nft_immediate_dump,
113 .validate = nft_immediate_validate, 122 .validate = nft_immediate_validate,
114}; 123};
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 71325fef647d..cb7cb300c3bc 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -183,6 +183,9 @@ struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
183 struct xt_match *m; 183 struct xt_match *m;
184 int err = -ENOENT; 184 int err = -ENOENT;
185 185
186 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
187 return ERR_PTR(-EINVAL);
188
186 mutex_lock(&xt[af].mutex); 189 mutex_lock(&xt[af].mutex);
187 list_for_each_entry(m, &xt[af].match, list) { 190 list_for_each_entry(m, &xt[af].match, list) {
188 if (strcmp(m->name, name) == 0) { 191 if (strcmp(m->name, name) == 0) {
@@ -229,6 +232,9 @@ struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
229 struct xt_target *t; 232 struct xt_target *t;
230 int err = -ENOENT; 233 int err = -ENOENT;
231 234
235 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
236 return ERR_PTR(-EINVAL);
237
232 mutex_lock(&xt[af].mutex); 238 mutex_lock(&xt[af].mutex);
233 list_for_each_entry(t, &xt[af].target, list) { 239 list_for_each_entry(t, &xt[af].target, list) {
234 if (strcmp(t->name, name) == 0) { 240 if (strcmp(t->name, name) == 0) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 01f3515cada0..e9422fe45179 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2903,13 +2903,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2903 if (skb == NULL) 2903 if (skb == NULL)
2904 goto out_unlock; 2904 goto out_unlock;
2905 2905
2906 skb_set_network_header(skb, reserve); 2906 skb_reset_network_header(skb);
2907 2907
2908 err = -EINVAL; 2908 err = -EINVAL;
2909 if (sock->type == SOCK_DGRAM) { 2909 if (sock->type == SOCK_DGRAM) {
2910 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2910 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2911 if (unlikely(offset < 0)) 2911 if (unlikely(offset < 0))
2912 goto out_free; 2912 goto out_free;
2913 } else if (reserve) {
2914 skb_push(skb, reserve);
2913 } 2915 }
2914 2916
2915 /* Returns -EFAULT on error */ 2917 /* Returns -EFAULT on error */
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 853604685965..1fb39e1f9d07 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -161,6 +161,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
161 case htons(ETH_P_8021AD): 161 case htons(ETH_P_8021AD):
162 break; 162 break;
163 default: 163 default:
164 if (exists)
165 tcf_idr_release(*a, bind);
164 return -EPROTONOSUPPORT; 166 return -EPROTONOSUPPORT;
165 } 167 }
166 } else { 168 } else {
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 16644b3d2362..56c181c3feeb 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -222,10 +222,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
222 extack); 222 extack);
223 if (IS_ERR(child)) 223 if (IS_ERR(child))
224 return PTR_ERR(child); 224 return PTR_ERR(child);
225 }
226 225
227 if (child != &noop_qdisc) 226 /* child is fifo, no need to check for noop_qdisc */
228 qdisc_hash_add(child, true); 227 qdisc_hash_add(child, true);
228 }
229
229 sch_tree_lock(sch); 230 sch_tree_lock(sch);
230 q->flags = ctl->flags; 231 q->flags = ctl->flags;
231 q->limit = ctl->limit; 232 q->limit = ctl->limit;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 03225a8df973..6f74a426f159 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -383,6 +383,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
383 err = PTR_ERR(child); 383 err = PTR_ERR(child);
384 goto done; 384 goto done;
385 } 385 }
386
387 /* child is fifo, no need to check for noop_qdisc */
388 qdisc_hash_add(child, true);
386 } 389 }
387 390
388 sch_tree_lock(sch); 391 sch_tree_lock(sch);
@@ -391,8 +394,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
391 q->qdisc->qstats.backlog); 394 q->qdisc->qstats.backlog);
392 qdisc_destroy(q->qdisc); 395 qdisc_destroy(q->qdisc);
393 q->qdisc = child; 396 q->qdisc = child;
394 if (child != &noop_qdisc)
395 qdisc_hash_add(child, true);
396 } 397 }
397 q->limit = qopt->limit; 398 q->limit = qopt->limit;
398 if (tb[TCA_TBF_PBURST]) 399 if (tb[TCA_TBF_PBURST])
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 74568cdbca70..d7b88b2d1b22 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -245,40 +245,45 @@ out:
245static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem, 245static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
246 struct nlattr *tb[]) 246 struct nlattr *tb[])
247{ 247{
248 char *string, *ibname = NULL; 248 char *string, *ibname;
249 int rc = 0; 249 int rc;
250 250
251 memset(pnetelem, 0, sizeof(*pnetelem)); 251 memset(pnetelem, 0, sizeof(*pnetelem));
252 INIT_LIST_HEAD(&pnetelem->list); 252 INIT_LIST_HEAD(&pnetelem->list);
253 if (tb[SMC_PNETID_NAME]) { 253
254 string = (char *)nla_data(tb[SMC_PNETID_NAME]); 254 rc = -EINVAL;
255 if (!smc_pnetid_valid(string, pnetelem->pnet_name)) { 255 if (!tb[SMC_PNETID_NAME])
256 rc = -EINVAL; 256 goto error;
257 goto error; 257 string = (char *)nla_data(tb[SMC_PNETID_NAME]);
258 } 258 if (!smc_pnetid_valid(string, pnetelem->pnet_name))
259 } 259 goto error;
260 if (tb[SMC_PNETID_ETHNAME]) { 260
261 string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); 261 rc = -EINVAL;
262 pnetelem->ndev = dev_get_by_name(net, string); 262 if (!tb[SMC_PNETID_ETHNAME])
263 if (!pnetelem->ndev) 263 goto error;
264 return -ENOENT; 264 rc = -ENOENT;
265 } 265 string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
266 if (tb[SMC_PNETID_IBNAME]) { 266 pnetelem->ndev = dev_get_by_name(net, string);
267 ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]); 267 if (!pnetelem->ndev)
268 ibname = strim(ibname); 268 goto error;
269 pnetelem->smcibdev = smc_pnet_find_ib(ibname); 269
270 if (!pnetelem->smcibdev) { 270 rc = -EINVAL;
271 rc = -ENOENT; 271 if (!tb[SMC_PNETID_IBNAME])
272 goto error; 272 goto error;
273 } 273 rc = -ENOENT;
274 } 274 ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
275 if (tb[SMC_PNETID_IBPORT]) { 275 ibname = strim(ibname);
276 pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]); 276 pnetelem->smcibdev = smc_pnet_find_ib(ibname);
277 if (pnetelem->ib_port > SMC_MAX_PORTS) { 277 if (!pnetelem->smcibdev)
278 rc = -EINVAL; 278 goto error;
279 goto error; 279
280 } 280 rc = -EINVAL;
281 } 281 if (!tb[SMC_PNETID_IBPORT])
282 goto error;
283 pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
284 if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS)
285 goto error;
286
282 return 0; 287 return 0;
283 288
284error: 289error:
@@ -307,6 +312,8 @@ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
307 void *hdr; 312 void *hdr;
308 int rc; 313 int rc;
309 314
315 if (!info->attrs[SMC_PNETID_NAME])
316 return -EINVAL;
310 pnetelem = smc_pnet_find_pnetid( 317 pnetelem = smc_pnet_find_pnetid(
311 (char *)nla_data(info->attrs[SMC_PNETID_NAME])); 318 (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
312 if (!pnetelem) 319 if (!pnetelem)
@@ -359,6 +366,8 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
359 366
360static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) 367static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
361{ 368{
369 if (!info->attrs[SMC_PNETID_NAME])
370 return -EINVAL;
362 return smc_pnet_remove_by_pnetid( 371 return smc_pnet_remove_by_pnetid(
363 (char *)nla_data(info->attrs[SMC_PNETID_NAME])); 372 (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
364} 373}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 71e79597f940..e1c93ce74e0f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -680,7 +680,6 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
680 struct scatterlist *sgin = &sgin_arr[0]; 680 struct scatterlist *sgin = &sgin_arr[0];
681 struct strp_msg *rxm = strp_msg(skb); 681 struct strp_msg *rxm = strp_msg(skb);
682 int ret, nsg = ARRAY_SIZE(sgin_arr); 682 int ret, nsg = ARRAY_SIZE(sgin_arr);
683 char aad_recv[TLS_AAD_SPACE_SIZE];
684 struct sk_buff *unused; 683 struct sk_buff *unused;
685 684
686 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 685 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
@@ -698,13 +697,13 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
698 } 697 }
699 698
700 sg_init_table(sgin, nsg); 699 sg_init_table(sgin, nsg);
701 sg_set_buf(&sgin[0], aad_recv, sizeof(aad_recv)); 700 sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
702 701
703 nsg = skb_to_sgvec(skb, &sgin[1], 702 nsg = skb_to_sgvec(skb, &sgin[1],
704 rxm->offset + tls_ctx->rx.prepend_size, 703 rxm->offset + tls_ctx->rx.prepend_size,
705 rxm->full_len - tls_ctx->rx.prepend_size); 704 rxm->full_len - tls_ctx->rx.prepend_size);
706 705
707 tls_make_aad(aad_recv, 706 tls_make_aad(ctx->rx_aad_ciphertext,
708 rxm->full_len - tls_ctx->rx.overhead_size, 707 rxm->full_len - tls_ctx->rx.overhead_size,
709 tls_ctx->rx.rec_seq, 708 tls_ctx->rx.rec_seq,
710 tls_ctx->rx.rec_seq_size, 709 tls_ctx->rx.rec_seq_size,
@@ -803,12 +802,12 @@ int tls_sw_recvmsg(struct sock *sk,
803 if (to_copy <= len && page_count < MAX_SKB_FRAGS && 802 if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
804 likely(!(flags & MSG_PEEK))) { 803 likely(!(flags & MSG_PEEK))) {
805 struct scatterlist sgin[MAX_SKB_FRAGS + 1]; 804 struct scatterlist sgin[MAX_SKB_FRAGS + 1];
806 char unused[21];
807 int pages = 0; 805 int pages = 0;
808 806
809 zc = true; 807 zc = true;
810 sg_init_table(sgin, MAX_SKB_FRAGS + 1); 808 sg_init_table(sgin, MAX_SKB_FRAGS + 1);
811 sg_set_buf(&sgin[0], unused, 13); 809 sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
810 TLS_AAD_SPACE_SIZE);
812 811
813 err = zerocopy_from_iter(sk, &msg->msg_iter, 812 err = zerocopy_from_iter(sk, &msg->msg_iter,
814 to_copy, &pages, 813 to_copy, &pages,