diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/bridge/br_stp.c | 2 | ||||
-rw-r--r-- | net/bridge/netfilter/ebtables.c | 4 | ||||
-rw-r--r-- | net/bridge/netfilter/nft_reject_bridge.c | 20 | ||||
-rw-r--r-- | net/core/filter.c | 38 | ||||
-rw-r--r-- | net/core/netpoll.c | 3 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 1 | ||||
-rw-r--r-- | net/ipv4/fou.c | 16 | ||||
-rw-r--r-- | net/ipv4/ip_tunnel_core.c | 4 | ||||
-rw-r--r-- | net/ipv4/netfilter/arp_tables.c | 43 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 48 | ||||
-rw-r--r-- | net/ipv4/netfilter/ipt_SYNPROXY.c | 54 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 48 | ||||
-rw-r--r-- | net/ipv6/udp.c | 4 | ||||
-rw-r--r-- | net/netfilter/ipset/ip_set_bitmap_gen.h | 2 | ||||
-rw-r--r-- | net/netfilter/ipset/ip_set_core.c | 33 | ||||
-rw-r--r-- | net/netfilter/ipset/ip_set_hash_gen.h | 2 | ||||
-rw-r--r-- | net/netfilter/ipset/ip_set_list_set.c | 2 | ||||
-rw-r--r-- | net/netfilter/nfnetlink_queue.c | 7 | ||||
-rw-r--r-- | net/openvswitch/Kconfig | 4 | ||||
-rw-r--r-- | net/openvswitch/conntrack.c | 24 | ||||
-rw-r--r-- | net/sctp/output.c | 6 | ||||
-rw-r--r-- | net/switchdev/switchdev.c | 2 | ||||
-rw-r--r-- | net/xfrm/xfrm_input.c | 3 |
23 files changed, 224 insertions, 146 deletions
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index e23449094188..9cb7044d0801 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -582,7 +582,7 @@ int br_set_ageing_time(struct net_bridge *br, u32 ageing_time) | |||
582 | int err; | 582 | int err; |
583 | 583 | ||
584 | err = switchdev_port_attr_set(br->dev, &attr); | 584 | err = switchdev_port_attr_set(br->dev, &attr); |
585 | if (err) | 585 | if (err && err != -EOPNOTSUPP) |
586 | return err; | 586 | return err; |
587 | 587 | ||
588 | br->ageing_time = t; | 588 | br->ageing_time = t; |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 67b2e27999aa..8570bc7744c2 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1521,6 +1521,8 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
1521 | if (copy_from_user(&tmp, user, sizeof(tmp))) | 1521 | if (copy_from_user(&tmp, user, sizeof(tmp))) |
1522 | return -EFAULT; | 1522 | return -EFAULT; |
1523 | 1523 | ||
1524 | tmp.name[sizeof(tmp.name) - 1] = '\0'; | ||
1525 | |||
1524 | t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); | 1526 | t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); |
1525 | if (!t) | 1527 | if (!t) |
1526 | return ret; | 1528 | return ret; |
@@ -2332,6 +2334,8 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, | |||
2332 | if (copy_from_user(&tmp, user, sizeof(tmp))) | 2334 | if (copy_from_user(&tmp, user, sizeof(tmp))) |
2333 | return -EFAULT; | 2335 | return -EFAULT; |
2334 | 2336 | ||
2337 | tmp.name[sizeof(tmp.name) - 1] = '\0'; | ||
2338 | |||
2335 | t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); | 2339 | t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); |
2336 | if (!t) | 2340 | if (!t) |
2337 | return ret; | 2341 | return ret; |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index adc8d7221dbb..77f7e7a9ebe1 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
@@ -40,7 +40,8 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, | |||
40 | /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) | 40 | /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) |
41 | * or the bridge port (NF_BRIDGE PREROUTING). | 41 | * or the bridge port (NF_BRIDGE PREROUTING). |
42 | */ | 42 | */ |
43 | static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, | 43 | static void nft_reject_br_send_v4_tcp_reset(struct net *net, |
44 | struct sk_buff *oldskb, | ||
44 | const struct net_device *dev, | 45 | const struct net_device *dev, |
45 | int hook) | 46 | int hook) |
46 | { | 47 | { |
@@ -48,7 +49,6 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, | |||
48 | struct iphdr *niph; | 49 | struct iphdr *niph; |
49 | const struct tcphdr *oth; | 50 | const struct tcphdr *oth; |
50 | struct tcphdr _oth; | 51 | struct tcphdr _oth; |
51 | struct net *net = sock_net(oldskb->sk); | ||
52 | 52 | ||
53 | if (!nft_bridge_iphdr_validate(oldskb)) | 53 | if (!nft_bridge_iphdr_validate(oldskb)) |
54 | return; | 54 | return; |
@@ -75,7 +75,8 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, | |||
75 | br_deliver(br_port_get_rcu(dev), nskb); | 75 | br_deliver(br_port_get_rcu(dev), nskb); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, | 78 | static void nft_reject_br_send_v4_unreach(struct net *net, |
79 | struct sk_buff *oldskb, | ||
79 | const struct net_device *dev, | 80 | const struct net_device *dev, |
80 | int hook, u8 code) | 81 | int hook, u8 code) |
81 | { | 82 | { |
@@ -86,7 +87,6 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, | |||
86 | void *payload; | 87 | void *payload; |
87 | __wsum csum; | 88 | __wsum csum; |
88 | u8 proto; | 89 | u8 proto; |
89 | struct net *net = sock_net(oldskb->sk); | ||
90 | 90 | ||
91 | if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb)) | 91 | if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb)) |
92 | return; | 92 | return; |
@@ -273,17 +273,17 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, | |||
273 | case htons(ETH_P_IP): | 273 | case htons(ETH_P_IP): |
274 | switch (priv->type) { | 274 | switch (priv->type) { |
275 | case NFT_REJECT_ICMP_UNREACH: | 275 | case NFT_REJECT_ICMP_UNREACH: |
276 | nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, | 276 | nft_reject_br_send_v4_unreach(pkt->net, pkt->skb, |
277 | pkt->hook, | 277 | pkt->in, pkt->hook, |
278 | priv->icmp_code); | 278 | priv->icmp_code); |
279 | break; | 279 | break; |
280 | case NFT_REJECT_TCP_RST: | 280 | case NFT_REJECT_TCP_RST: |
281 | nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in, | 281 | nft_reject_br_send_v4_tcp_reset(pkt->net, pkt->skb, |
282 | pkt->hook); | 282 | pkt->in, pkt->hook); |
283 | break; | 283 | break; |
284 | case NFT_REJECT_ICMPX_UNREACH: | 284 | case NFT_REJECT_ICMPX_UNREACH: |
285 | nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, | 285 | nft_reject_br_send_v4_unreach(pkt->net, pkt->skb, |
286 | pkt->hook, | 286 | pkt->in, pkt->hook, |
287 | nft_reject_icmp_code(priv->icmp_code)); | 287 | nft_reject_icmp_code(priv->icmp_code)); |
288 | break; | 288 | break; |
289 | } | 289 | } |
diff --git a/net/core/filter.c b/net/core/filter.c index b7177d01ecb0..ca7f832b2980 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -1149,7 +1149,8 @@ void bpf_prog_destroy(struct bpf_prog *fp) | |||
1149 | } | 1149 | } |
1150 | EXPORT_SYMBOL_GPL(bpf_prog_destroy); | 1150 | EXPORT_SYMBOL_GPL(bpf_prog_destroy); |
1151 | 1151 | ||
1152 | static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) | 1152 | static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk, |
1153 | bool locked) | ||
1153 | { | 1154 | { |
1154 | struct sk_filter *fp, *old_fp; | 1155 | struct sk_filter *fp, *old_fp; |
1155 | 1156 | ||
@@ -1165,10 +1166,8 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) | |||
1165 | return -ENOMEM; | 1166 | return -ENOMEM; |
1166 | } | 1167 | } |
1167 | 1168 | ||
1168 | old_fp = rcu_dereference_protected(sk->sk_filter, | 1169 | old_fp = rcu_dereference_protected(sk->sk_filter, locked); |
1169 | sock_owned_by_user(sk)); | ||
1170 | rcu_assign_pointer(sk->sk_filter, fp); | 1170 | rcu_assign_pointer(sk->sk_filter, fp); |
1171 | |||
1172 | if (old_fp) | 1171 | if (old_fp) |
1173 | sk_filter_uncharge(sk, old_fp); | 1172 | sk_filter_uncharge(sk, old_fp); |
1174 | 1173 | ||
@@ -1247,7 +1246,8 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) | |||
1247 | * occurs or there is insufficient memory for the filter a negative | 1246 | * occurs or there is insufficient memory for the filter a negative |
1248 | * errno code is returned. On success the return is zero. | 1247 | * errno code is returned. On success the return is zero. |
1249 | */ | 1248 | */ |
1250 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | 1249 | int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, |
1250 | bool locked) | ||
1251 | { | 1251 | { |
1252 | struct bpf_prog *prog = __get_filter(fprog, sk); | 1252 | struct bpf_prog *prog = __get_filter(fprog, sk); |
1253 | int err; | 1253 | int err; |
@@ -1255,7 +1255,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
1255 | if (IS_ERR(prog)) | 1255 | if (IS_ERR(prog)) |
1256 | return PTR_ERR(prog); | 1256 | return PTR_ERR(prog); |
1257 | 1257 | ||
1258 | err = __sk_attach_prog(prog, sk); | 1258 | err = __sk_attach_prog(prog, sk, locked); |
1259 | if (err < 0) { | 1259 | if (err < 0) { |
1260 | __bpf_prog_release(prog); | 1260 | __bpf_prog_release(prog); |
1261 | return err; | 1261 | return err; |
@@ -1263,7 +1263,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | |||
1263 | 1263 | ||
1264 | return 0; | 1264 | return 0; |
1265 | } | 1265 | } |
1266 | EXPORT_SYMBOL_GPL(sk_attach_filter); | 1266 | EXPORT_SYMBOL_GPL(__sk_attach_filter); |
1267 | |||
1268 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) | ||
1269 | { | ||
1270 | return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk)); | ||
1271 | } | ||
1267 | 1272 | ||
1268 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) | 1273 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
1269 | { | 1274 | { |
@@ -1309,7 +1314,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) | |||
1309 | if (IS_ERR(prog)) | 1314 | if (IS_ERR(prog)) |
1310 | return PTR_ERR(prog); | 1315 | return PTR_ERR(prog); |
1311 | 1316 | ||
1312 | err = __sk_attach_prog(prog, sk); | 1317 | err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk)); |
1313 | if (err < 0) { | 1318 | if (err < 0) { |
1314 | bpf_prog_put(prog); | 1319 | bpf_prog_put(prog); |
1315 | return err; | 1320 | return err; |
@@ -1764,6 +1769,7 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) | |||
1764 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { | 1769 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { |
1765 | switch (size) { | 1770 | switch (size) { |
1766 | case offsetof(struct bpf_tunnel_key, tunnel_label): | 1771 | case offsetof(struct bpf_tunnel_key, tunnel_label): |
1772 | case offsetof(struct bpf_tunnel_key, tunnel_ext): | ||
1767 | goto set_compat; | 1773 | goto set_compat; |
1768 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): | 1774 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): |
1769 | /* Fixup deprecated structure layouts here, so we have | 1775 | /* Fixup deprecated structure layouts here, so we have |
@@ -1849,6 +1855,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) | |||
1849 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { | 1855 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { |
1850 | switch (size) { | 1856 | switch (size) { |
1851 | case offsetof(struct bpf_tunnel_key, tunnel_label): | 1857 | case offsetof(struct bpf_tunnel_key, tunnel_label): |
1858 | case offsetof(struct bpf_tunnel_key, tunnel_ext): | ||
1852 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): | 1859 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): |
1853 | /* Fixup deprecated structure layouts here, so we have | 1860 | /* Fixup deprecated structure layouts here, so we have |
1854 | * a common path later on. | 1861 | * a common path later on. |
@@ -1861,7 +1868,8 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) | |||
1861 | return -EINVAL; | 1868 | return -EINVAL; |
1862 | } | 1869 | } |
1863 | } | 1870 | } |
1864 | if (unlikely(!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label)) | 1871 | if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || |
1872 | from->tunnel_ext)) | ||
1865 | return -EINVAL; | 1873 | return -EINVAL; |
1866 | 1874 | ||
1867 | skb_dst_drop(skb); | 1875 | skb_dst_drop(skb); |
@@ -2247,7 +2255,7 @@ static int __init register_sk_filter_ops(void) | |||
2247 | } | 2255 | } |
2248 | late_initcall(register_sk_filter_ops); | 2256 | late_initcall(register_sk_filter_ops); |
2249 | 2257 | ||
2250 | int sk_detach_filter(struct sock *sk) | 2258 | int __sk_detach_filter(struct sock *sk, bool locked) |
2251 | { | 2259 | { |
2252 | int ret = -ENOENT; | 2260 | int ret = -ENOENT; |
2253 | struct sk_filter *filter; | 2261 | struct sk_filter *filter; |
@@ -2255,8 +2263,7 @@ int sk_detach_filter(struct sock *sk) | |||
2255 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) | 2263 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
2256 | return -EPERM; | 2264 | return -EPERM; |
2257 | 2265 | ||
2258 | filter = rcu_dereference_protected(sk->sk_filter, | 2266 | filter = rcu_dereference_protected(sk->sk_filter, locked); |
2259 | sock_owned_by_user(sk)); | ||
2260 | if (filter) { | 2267 | if (filter) { |
2261 | RCU_INIT_POINTER(sk->sk_filter, NULL); | 2268 | RCU_INIT_POINTER(sk->sk_filter, NULL); |
2262 | sk_filter_uncharge(sk, filter); | 2269 | sk_filter_uncharge(sk, filter); |
@@ -2265,7 +2272,12 @@ int sk_detach_filter(struct sock *sk) | |||
2265 | 2272 | ||
2266 | return ret; | 2273 | return ret; |
2267 | } | 2274 | } |
2268 | EXPORT_SYMBOL_GPL(sk_detach_filter); | 2275 | EXPORT_SYMBOL_GPL(__sk_detach_filter); |
2276 | |||
2277 | int sk_detach_filter(struct sock *sk) | ||
2278 | { | ||
2279 | return __sk_detach_filter(sk, sock_owned_by_user(sk)); | ||
2280 | } | ||
2269 | 2281 | ||
2270 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, | 2282 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, |
2271 | unsigned int len) | 2283 | unsigned int len) |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 94acfc89ad97..a57bd17805b4 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -603,7 +603,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |||
603 | const struct net_device_ops *ops; | 603 | const struct net_device_ops *ops; |
604 | int err; | 604 | int err; |
605 | 605 | ||
606 | np->dev = ndev; | ||
607 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); | 606 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); |
608 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); | 607 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); |
609 | 608 | ||
@@ -670,6 +669,7 @@ int netpoll_setup(struct netpoll *np) | |||
670 | goto unlock; | 669 | goto unlock; |
671 | } | 670 | } |
672 | dev_hold(ndev); | 671 | dev_hold(ndev); |
672 | np->dev = ndev; | ||
673 | 673 | ||
674 | if (netdev_master_upper_dev_get(ndev)) { | 674 | if (netdev_master_upper_dev_get(ndev)) { |
675 | np_err(np, "%s is a slave device, aborting\n", np->dev_name); | 675 | np_err(np, "%s is a slave device, aborting\n", np->dev_name); |
@@ -770,6 +770,7 @@ int netpoll_setup(struct netpoll *np) | |||
770 | return 0; | 770 | return 0; |
771 | 771 | ||
772 | put: | 772 | put: |
773 | np->dev = NULL; | ||
773 | dev_put(ndev); | 774 | dev_put(ndev); |
774 | unlock: | 775 | unlock: |
775 | rtnl_unlock(); | 776 | rtnl_unlock(); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f2066772d0f3..a75f7e94b445 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -909,6 +909,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, | |||
909 | + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ | 909 | + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */ |
910 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ | 910 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */ |
911 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ | 911 | + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ |
912 | + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ | ||
912 | + nla_total_size(1); /* IFLA_PROTO_DOWN */ | 913 | + nla_total_size(1); /* IFLA_PROTO_DOWN */ |
913 | 914 | ||
914 | } | 915 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index a0586b4a197d..5a94aea280d3 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
@@ -195,6 +195,14 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, | |||
195 | u8 proto = NAPI_GRO_CB(skb)->proto; | 195 | u8 proto = NAPI_GRO_CB(skb)->proto; |
196 | const struct net_offload **offloads; | 196 | const struct net_offload **offloads; |
197 | 197 | ||
198 | /* We can clear the encap_mark for FOU as we are essentially doing | ||
199 | * one of two possible things. We are either adding an L4 tunnel | ||
200 | * header to the outer L3 tunnel header, or we are are simply | ||
201 | * treating the GRE tunnel header as though it is a UDP protocol | ||
202 | * specific header such as VXLAN or GENEVE. | ||
203 | */ | ||
204 | NAPI_GRO_CB(skb)->encap_mark = 0; | ||
205 | |||
198 | rcu_read_lock(); | 206 | rcu_read_lock(); |
199 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; | 207 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; |
200 | ops = rcu_dereference(offloads[proto]); | 208 | ops = rcu_dereference(offloads[proto]); |
@@ -352,6 +360,14 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, | |||
352 | } | 360 | } |
353 | } | 361 | } |
354 | 362 | ||
363 | /* We can clear the encap_mark for GUE as we are essentially doing | ||
364 | * one of two possible things. We are either adding an L4 tunnel | ||
365 | * header to the outer L3 tunnel header, or we are are simply | ||
366 | * treating the GRE tunnel header as though it is a UDP protocol | ||
367 | * specific header such as VXLAN or GENEVE. | ||
368 | */ | ||
369 | NAPI_GRO_CB(skb)->encap_mark = 0; | ||
370 | |||
355 | rcu_read_lock(); | 371 | rcu_read_lock(); |
356 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; | 372 | offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; |
357 | ops = rcu_dereference(offloads[guehdr->proto_ctype]); | 373 | ops = rcu_dereference(offloads[guehdr->proto_ctype]); |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 02dd990af542..6165f30c4d72 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -372,8 +372,8 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb, | |||
372 | if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) || | 372 | if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) || |
373 | nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || | 373 | nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) || |
374 | nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || | 374 | nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || |
375 | nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || | 375 | nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) || |
376 | nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || | 376 | nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) || |
377 | nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) | 377 | nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) |
378 | return -ENOMEM; | 378 | return -ENOMEM; |
379 | 379 | ||
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index bf081927e06b..4133b0f513af 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -359,11 +359,12 @@ unsigned int arpt_do_table(struct sk_buff *skb, | |||
359 | } | 359 | } |
360 | 360 | ||
361 | /* All zeroes == unconditional rule. */ | 361 | /* All zeroes == unconditional rule. */ |
362 | static inline bool unconditional(const struct arpt_arp *arp) | 362 | static inline bool unconditional(const struct arpt_entry *e) |
363 | { | 363 | { |
364 | static const struct arpt_arp uncond; | 364 | static const struct arpt_arp uncond; |
365 | 365 | ||
366 | return memcmp(arp, &uncond, sizeof(uncond)) == 0; | 366 | return e->target_offset == sizeof(struct arpt_entry) && |
367 | memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; | ||
367 | } | 368 | } |
368 | 369 | ||
369 | /* Figures out from what hook each rule can be called: returns 0 if | 370 | /* Figures out from what hook each rule can be called: returns 0 if |
@@ -402,11 +403,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo, | |||
402 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); | 403 | |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); |
403 | 404 | ||
404 | /* Unconditional return/END. */ | 405 | /* Unconditional return/END. */ |
405 | if ((e->target_offset == sizeof(struct arpt_entry) && | 406 | if ((unconditional(e) && |
406 | (strcmp(t->target.u.user.name, | 407 | (strcmp(t->target.u.user.name, |
407 | XT_STANDARD_TARGET) == 0) && | 408 | XT_STANDARD_TARGET) == 0) && |
408 | t->verdict < 0 && unconditional(&e->arp)) || | 409 | t->verdict < 0) || visited) { |
409 | visited) { | ||
410 | unsigned int oldpos, size; | 410 | unsigned int oldpos, size; |
411 | 411 | ||
412 | if ((strcmp(t->target.u.user.name, | 412 | if ((strcmp(t->target.u.user.name, |
@@ -474,14 +474,12 @@ next: | |||
474 | return 1; | 474 | return 1; |
475 | } | 475 | } |
476 | 476 | ||
477 | static inline int check_entry(const struct arpt_entry *e, const char *name) | 477 | static inline int check_entry(const struct arpt_entry *e) |
478 | { | 478 | { |
479 | const struct xt_entry_target *t; | 479 | const struct xt_entry_target *t; |
480 | 480 | ||
481 | if (!arp_checkentry(&e->arp)) { | 481 | if (!arp_checkentry(&e->arp)) |
482 | duprintf("arp_tables: arp check failed %p %s.\n", e, name); | ||
483 | return -EINVAL; | 482 | return -EINVAL; |
484 | } | ||
485 | 483 | ||
486 | if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) | 484 | if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) |
487 | return -EINVAL; | 485 | return -EINVAL; |
@@ -522,10 +520,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) | |||
522 | struct xt_target *target; | 520 | struct xt_target *target; |
523 | int ret; | 521 | int ret; |
524 | 522 | ||
525 | ret = check_entry(e, name); | ||
526 | if (ret) | ||
527 | return ret; | ||
528 | |||
529 | e->counters.pcnt = xt_percpu_counter_alloc(); | 523 | e->counters.pcnt = xt_percpu_counter_alloc(); |
530 | if (IS_ERR_VALUE(e->counters.pcnt)) | 524 | if (IS_ERR_VALUE(e->counters.pcnt)) |
531 | return -ENOMEM; | 525 | return -ENOMEM; |
@@ -557,7 +551,7 @@ static bool check_underflow(const struct arpt_entry *e) | |||
557 | const struct xt_entry_target *t; | 551 | const struct xt_entry_target *t; |
558 | unsigned int verdict; | 552 | unsigned int verdict; |
559 | 553 | ||
560 | if (!unconditional(&e->arp)) | 554 | if (!unconditional(e)) |
561 | return false; | 555 | return false; |
562 | t = arpt_get_target_c(e); | 556 | t = arpt_get_target_c(e); |
563 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 557 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
@@ -576,9 +570,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, | |||
576 | unsigned int valid_hooks) | 570 | unsigned int valid_hooks) |
577 | { | 571 | { |
578 | unsigned int h; | 572 | unsigned int h; |
573 | int err; | ||
579 | 574 | ||
580 | if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || | 575 | if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || |
581 | (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { | 576 | (unsigned char *)e + sizeof(struct arpt_entry) >= limit || |
577 | (unsigned char *)e + e->next_offset > limit) { | ||
582 | duprintf("Bad offset %p\n", e); | 578 | duprintf("Bad offset %p\n", e); |
583 | return -EINVAL; | 579 | return -EINVAL; |
584 | } | 580 | } |
@@ -590,6 +586,10 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, | |||
590 | return -EINVAL; | 586 | return -EINVAL; |
591 | } | 587 | } |
592 | 588 | ||
589 | err = check_entry(e); | ||
590 | if (err) | ||
591 | return err; | ||
592 | |||
593 | /* Check hooks & underflows */ | 593 | /* Check hooks & underflows */ |
594 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { | 594 | for (h = 0; h < NF_ARP_NUMHOOKS; h++) { |
595 | if (!(valid_hooks & (1 << h))) | 595 | if (!(valid_hooks & (1 << h))) |
@@ -598,9 +598,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e, | |||
598 | newinfo->hook_entry[h] = hook_entries[h]; | 598 | newinfo->hook_entry[h] = hook_entries[h]; |
599 | if ((unsigned char *)e - base == underflows[h]) { | 599 | if ((unsigned char *)e - base == underflows[h]) { |
600 | if (!check_underflow(e)) { | 600 | if (!check_underflow(e)) { |
601 | pr_err("Underflows must be unconditional and " | 601 | pr_debug("Underflows must be unconditional and " |
602 | "use the STANDARD target with " | 602 | "use the STANDARD target with " |
603 | "ACCEPT/DROP\n"); | 603 | "ACCEPT/DROP\n"); |
604 | return -EINVAL; | 604 | return -EINVAL; |
605 | } | 605 | } |
606 | newinfo->underflow[h] = underflows[h]; | 606 | newinfo->underflow[h] = underflows[h]; |
@@ -969,6 +969,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, | |||
969 | sizeof(struct arpt_get_entries) + get.size); | 969 | sizeof(struct arpt_get_entries) + get.size); |
970 | return -EINVAL; | 970 | return -EINVAL; |
971 | } | 971 | } |
972 | get.name[sizeof(get.name) - 1] = '\0'; | ||
972 | 973 | ||
973 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); | 974 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); |
974 | if (!IS_ERR_OR_NULL(t)) { | 975 | if (!IS_ERR_OR_NULL(t)) { |
@@ -1233,7 +1234,8 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, | |||
1233 | 1234 | ||
1234 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1235 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1235 | if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || | 1236 | if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || |
1236 | (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { | 1237 | (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || |
1238 | (unsigned char *)e + e->next_offset > limit) { | ||
1237 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1239 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1238 | return -EINVAL; | 1240 | return -EINVAL; |
1239 | } | 1241 | } |
@@ -1246,7 +1248,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, | |||
1246 | } | 1248 | } |
1247 | 1249 | ||
1248 | /* For purposes of check_entry casting the compat entry is fine */ | 1250 | /* For purposes of check_entry casting the compat entry is fine */ |
1249 | ret = check_entry((struct arpt_entry *)e, name); | 1251 | ret = check_entry((struct arpt_entry *)e); |
1250 | if (ret) | 1252 | if (ret) |
1251 | return ret; | 1253 | return ret; |
1252 | 1254 | ||
@@ -1662,6 +1664,7 @@ static int compat_get_entries(struct net *net, | |||
1662 | *len, sizeof(get) + get.size); | 1664 | *len, sizeof(get) + get.size); |
1663 | return -EINVAL; | 1665 | return -EINVAL; |
1664 | } | 1666 | } |
1667 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1665 | 1668 | ||
1666 | xt_compat_lock(NFPROTO_ARP); | 1669 | xt_compat_lock(NFPROTO_ARP); |
1667 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); | 1670 | t = xt_find_table_lock(net, NFPROTO_ARP, get.name); |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index e53f8d6f326d..631c100a1338 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset) | |||
168 | 168 | ||
169 | /* All zeroes == unconditional rule. */ | 169 | /* All zeroes == unconditional rule. */ |
170 | /* Mildly perf critical (only if packet tracing is on) */ | 170 | /* Mildly perf critical (only if packet tracing is on) */ |
171 | static inline bool unconditional(const struct ipt_ip *ip) | 171 | static inline bool unconditional(const struct ipt_entry *e) |
172 | { | 172 | { |
173 | static const struct ipt_ip uncond; | 173 | static const struct ipt_ip uncond; |
174 | 174 | ||
175 | return memcmp(ip, &uncond, sizeof(uncond)) == 0; | 175 | return e->target_offset == sizeof(struct ipt_entry) && |
176 | memcmp(&e->ip, &uncond, sizeof(uncond)) == 0; | ||
176 | #undef FWINV | 177 | #undef FWINV |
177 | } | 178 | } |
178 | 179 | ||
@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, | |||
229 | } else if (s == e) { | 230 | } else if (s == e) { |
230 | (*rulenum)++; | 231 | (*rulenum)++; |
231 | 232 | ||
232 | if (s->target_offset == sizeof(struct ipt_entry) && | 233 | if (unconditional(s) && |
233 | strcmp(t->target.u.kernel.target->name, | 234 | strcmp(t->target.u.kernel.target->name, |
234 | XT_STANDARD_TARGET) == 0 && | 235 | XT_STANDARD_TARGET) == 0 && |
235 | t->verdict < 0 && | 236 | t->verdict < 0) { |
236 | unconditional(&s->ip)) { | ||
237 | /* Tail of chains: STANDARD target (return/policy) */ | 237 | /* Tail of chains: STANDARD target (return/policy) */ |
238 | *comment = *chainname == hookname | 238 | *comment = *chainname == hookname |
239 | ? comments[NF_IP_TRACE_COMMENT_POLICY] | 239 | ? comments[NF_IP_TRACE_COMMENT_POLICY] |
@@ -476,11 +476,10 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
476 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); | 476 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); |
477 | 477 | ||
478 | /* Unconditional return/END. */ | 478 | /* Unconditional return/END. */ |
479 | if ((e->target_offset == sizeof(struct ipt_entry) && | 479 | if ((unconditional(e) && |
480 | (strcmp(t->target.u.user.name, | 480 | (strcmp(t->target.u.user.name, |
481 | XT_STANDARD_TARGET) == 0) && | 481 | XT_STANDARD_TARGET) == 0) && |
482 | t->verdict < 0 && unconditional(&e->ip)) || | 482 | t->verdict < 0) || visited) { |
483 | visited) { | ||
484 | unsigned int oldpos, size; | 483 | unsigned int oldpos, size; |
485 | 484 | ||
486 | if ((strcmp(t->target.u.user.name, | 485 | if ((strcmp(t->target.u.user.name, |
@@ -569,14 +568,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) | |||
569 | } | 568 | } |
570 | 569 | ||
571 | static int | 570 | static int |
572 | check_entry(const struct ipt_entry *e, const char *name) | 571 | check_entry(const struct ipt_entry *e) |
573 | { | 572 | { |
574 | const struct xt_entry_target *t; | 573 | const struct xt_entry_target *t; |
575 | 574 | ||
576 | if (!ip_checkentry(&e->ip)) { | 575 | if (!ip_checkentry(&e->ip)) |
577 | duprintf("ip check failed %p %s.\n", e, name); | ||
578 | return -EINVAL; | 576 | return -EINVAL; |
579 | } | ||
580 | 577 | ||
581 | if (e->target_offset + sizeof(struct xt_entry_target) > | 578 | if (e->target_offset + sizeof(struct xt_entry_target) > |
582 | e->next_offset) | 579 | e->next_offset) |
@@ -666,10 +663,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, | |||
666 | struct xt_mtchk_param mtpar; | 663 | struct xt_mtchk_param mtpar; |
667 | struct xt_entry_match *ematch; | 664 | struct xt_entry_match *ematch; |
668 | 665 | ||
669 | ret = check_entry(e, name); | ||
670 | if (ret) | ||
671 | return ret; | ||
672 | |||
673 | e->counters.pcnt = xt_percpu_counter_alloc(); | 666 | e->counters.pcnt = xt_percpu_counter_alloc(); |
674 | if (IS_ERR_VALUE(e->counters.pcnt)) | 667 | if (IS_ERR_VALUE(e->counters.pcnt)) |
675 | return -ENOMEM; | 668 | return -ENOMEM; |
@@ -721,7 +714,7 @@ static bool check_underflow(const struct ipt_entry *e) | |||
721 | const struct xt_entry_target *t; | 714 | const struct xt_entry_target *t; |
722 | unsigned int verdict; | 715 | unsigned int verdict; |
723 | 716 | ||
724 | if (!unconditional(&e->ip)) | 717 | if (!unconditional(e)) |
725 | return false; | 718 | return false; |
726 | t = ipt_get_target_c(e); | 719 | t = ipt_get_target_c(e); |
727 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 720 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
@@ -741,9 +734,11 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
741 | unsigned int valid_hooks) | 734 | unsigned int valid_hooks) |
742 | { | 735 | { |
743 | unsigned int h; | 736 | unsigned int h; |
737 | int err; | ||
744 | 738 | ||
745 | if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || | 739 | if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || |
746 | (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { | 740 | (unsigned char *)e + sizeof(struct ipt_entry) >= limit || |
741 | (unsigned char *)e + e->next_offset > limit) { | ||
747 | duprintf("Bad offset %p\n", e); | 742 | duprintf("Bad offset %p\n", e); |
748 | return -EINVAL; | 743 | return -EINVAL; |
749 | } | 744 | } |
@@ -755,6 +750,10 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
755 | return -EINVAL; | 750 | return -EINVAL; |
756 | } | 751 | } |
757 | 752 | ||
753 | err = check_entry(e); | ||
754 | if (err) | ||
755 | return err; | ||
756 | |||
758 | /* Check hooks & underflows */ | 757 | /* Check hooks & underflows */ |
759 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 758 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
760 | if (!(valid_hooks & (1 << h))) | 759 | if (!(valid_hooks & (1 << h))) |
@@ -763,9 +762,9 @@ check_entry_size_and_hooks(struct ipt_entry *e, | |||
763 | newinfo->hook_entry[h] = hook_entries[h]; | 762 | newinfo->hook_entry[h] = hook_entries[h]; |
764 | if ((unsigned char *)e - base == underflows[h]) { | 763 | if ((unsigned char *)e - base == underflows[h]) { |
765 | if (!check_underflow(e)) { | 764 | if (!check_underflow(e)) { |
766 | pr_err("Underflows must be unconditional and " | 765 | pr_debug("Underflows must be unconditional and " |
767 | "use the STANDARD target with " | 766 | "use the STANDARD target with " |
768 | "ACCEPT/DROP\n"); | 767 | "ACCEPT/DROP\n"); |
769 | return -EINVAL; | 768 | return -EINVAL; |
770 | } | 769 | } |
771 | newinfo->underflow[h] = underflows[h]; | 770 | newinfo->underflow[h] = underflows[h]; |
@@ -1157,6 +1156,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, | |||
1157 | *len, sizeof(get) + get.size); | 1156 | *len, sizeof(get) + get.size); |
1158 | return -EINVAL; | 1157 | return -EINVAL; |
1159 | } | 1158 | } |
1159 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1160 | 1160 | ||
1161 | t = xt_find_table_lock(net, AF_INET, get.name); | 1161 | t = xt_find_table_lock(net, AF_INET, get.name); |
1162 | if (!IS_ERR_OR_NULL(t)) { | 1162 | if (!IS_ERR_OR_NULL(t)) { |
@@ -1493,7 +1493,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, | |||
1493 | 1493 | ||
1494 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1494 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1495 | if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || | 1495 | if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || |
1496 | (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { | 1496 | (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit || |
1497 | (unsigned char *)e + e->next_offset > limit) { | ||
1497 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1498 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1498 | return -EINVAL; | 1499 | return -EINVAL; |
1499 | } | 1500 | } |
@@ -1506,7 +1507,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, | |||
1506 | } | 1507 | } |
1507 | 1508 | ||
1508 | /* For purposes of check_entry casting the compat entry is fine */ | 1509 | /* For purposes of check_entry casting the compat entry is fine */ |
1509 | ret = check_entry((struct ipt_entry *)e, name); | 1510 | ret = check_entry((struct ipt_entry *)e); |
1510 | if (ret) | 1511 | if (ret) |
1511 | return ret; | 1512 | return ret; |
1512 | 1513 | ||
@@ -1935,6 +1936,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, | |||
1935 | *len, sizeof(get) + get.size); | 1936 | *len, sizeof(get) + get.size); |
1936 | return -EINVAL; | 1937 | return -EINVAL; |
1937 | } | 1938 | } |
1939 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1938 | 1940 | ||
1939 | xt_compat_lock(AF_INET); | 1941 | xt_compat_lock(AF_INET); |
1940 | t = xt_find_table_lock(net, AF_INET, get.name); | 1942 | t = xt_find_table_lock(net, AF_INET, get.name); |
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 7b8fbb352877..db5b87509446 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
@@ -18,10 +18,10 @@ | |||
18 | #include <net/netfilter/nf_conntrack_synproxy.h> | 18 | #include <net/netfilter/nf_conntrack_synproxy.h> |
19 | 19 | ||
20 | static struct iphdr * | 20 | static struct iphdr * |
21 | synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr) | 21 | synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr, |
22 | __be32 daddr) | ||
22 | { | 23 | { |
23 | struct iphdr *iph; | 24 | struct iphdr *iph; |
24 | struct net *net = sock_net(skb->sk); | ||
25 | 25 | ||
26 | skb_reset_network_header(skb); | 26 | skb_reset_network_header(skb); |
27 | iph = (struct iphdr *)skb_put(skb, sizeof(*iph)); | 27 | iph = (struct iphdr *)skb_put(skb, sizeof(*iph)); |
@@ -40,14 +40,12 @@ synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr) | |||
40 | } | 40 | } |
41 | 41 | ||
42 | static void | 42 | static void |
43 | synproxy_send_tcp(const struct synproxy_net *snet, | 43 | synproxy_send_tcp(struct net *net, |
44 | const struct sk_buff *skb, struct sk_buff *nskb, | 44 | const struct sk_buff *skb, struct sk_buff *nskb, |
45 | struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, | 45 | struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, |
46 | struct iphdr *niph, struct tcphdr *nth, | 46 | struct iphdr *niph, struct tcphdr *nth, |
47 | unsigned int tcp_hdr_size) | 47 | unsigned int tcp_hdr_size) |
48 | { | 48 | { |
49 | struct net *net = nf_ct_net(snet->tmpl); | ||
50 | |||
51 | nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0); | 49 | nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0); |
52 | nskb->ip_summed = CHECKSUM_PARTIAL; | 50 | nskb->ip_summed = CHECKSUM_PARTIAL; |
53 | nskb->csum_start = (unsigned char *)nth - nskb->head; | 51 | nskb->csum_start = (unsigned char *)nth - nskb->head; |
@@ -72,7 +70,7 @@ free_nskb: | |||
72 | } | 70 | } |
73 | 71 | ||
74 | static void | 72 | static void |
75 | synproxy_send_client_synack(const struct synproxy_net *snet, | 73 | synproxy_send_client_synack(struct net *net, |
76 | const struct sk_buff *skb, const struct tcphdr *th, | 74 | const struct sk_buff *skb, const struct tcphdr *th, |
77 | const struct synproxy_options *opts) | 75 | const struct synproxy_options *opts) |
78 | { | 76 | { |
@@ -91,7 +89,7 @@ synproxy_send_client_synack(const struct synproxy_net *snet, | |||
91 | return; | 89 | return; |
92 | skb_reserve(nskb, MAX_TCP_HEADER); | 90 | skb_reserve(nskb, MAX_TCP_HEADER); |
93 | 91 | ||
94 | niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr); | 92 | niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr); |
95 | 93 | ||
96 | skb_reset_transport_header(nskb); | 94 | skb_reset_transport_header(nskb); |
97 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); | 95 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); |
@@ -109,15 +107,16 @@ synproxy_send_client_synack(const struct synproxy_net *snet, | |||
109 | 107 | ||
110 | synproxy_build_options(nth, opts); | 108 | synproxy_build_options(nth, opts); |
111 | 109 | ||
112 | synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, | 110 | synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, |
113 | niph, nth, tcp_hdr_size); | 111 | niph, nth, tcp_hdr_size); |
114 | } | 112 | } |
115 | 113 | ||
116 | static void | 114 | static void |
117 | synproxy_send_server_syn(const struct synproxy_net *snet, | 115 | synproxy_send_server_syn(struct net *net, |
118 | const struct sk_buff *skb, const struct tcphdr *th, | 116 | const struct sk_buff *skb, const struct tcphdr *th, |
119 | const struct synproxy_options *opts, u32 recv_seq) | 117 | const struct synproxy_options *opts, u32 recv_seq) |
120 | { | 118 | { |
119 | struct synproxy_net *snet = synproxy_pernet(net); | ||
121 | struct sk_buff *nskb; | 120 | struct sk_buff *nskb; |
122 | struct iphdr *iph, *niph; | 121 | struct iphdr *iph, *niph; |
123 | struct tcphdr *nth; | 122 | struct tcphdr *nth; |
@@ -132,7 +131,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet, | |||
132 | return; | 131 | return; |
133 | skb_reserve(nskb, MAX_TCP_HEADER); | 132 | skb_reserve(nskb, MAX_TCP_HEADER); |
134 | 133 | ||
135 | niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr); | 134 | niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr); |
136 | 135 | ||
137 | skb_reset_transport_header(nskb); | 136 | skb_reset_transport_header(nskb); |
138 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); | 137 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); |
@@ -153,12 +152,12 @@ synproxy_send_server_syn(const struct synproxy_net *snet, | |||
153 | 152 | ||
154 | synproxy_build_options(nth, opts); | 153 | synproxy_build_options(nth, opts); |
155 | 154 | ||
156 | synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, | 155 | synproxy_send_tcp(net, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, |
157 | niph, nth, tcp_hdr_size); | 156 | niph, nth, tcp_hdr_size); |
158 | } | 157 | } |
159 | 158 | ||
160 | static void | 159 | static void |
161 | synproxy_send_server_ack(const struct synproxy_net *snet, | 160 | synproxy_send_server_ack(struct net *net, |
162 | const struct ip_ct_tcp *state, | 161 | const struct ip_ct_tcp *state, |
163 | const struct sk_buff *skb, const struct tcphdr *th, | 162 | const struct sk_buff *skb, const struct tcphdr *th, |
164 | const struct synproxy_options *opts) | 163 | const struct synproxy_options *opts) |
@@ -177,7 +176,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet, | |||
177 | return; | 176 | return; |
178 | skb_reserve(nskb, MAX_TCP_HEADER); | 177 | skb_reserve(nskb, MAX_TCP_HEADER); |
179 | 178 | ||
180 | niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr); | 179 | niph = synproxy_build_ip(net, nskb, iph->daddr, iph->saddr); |
181 | 180 | ||
182 | skb_reset_transport_header(nskb); | 181 | skb_reset_transport_header(nskb); |
183 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); | 182 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); |
@@ -193,11 +192,11 @@ synproxy_send_server_ack(const struct synproxy_net *snet, | |||
193 | 192 | ||
194 | synproxy_build_options(nth, opts); | 193 | synproxy_build_options(nth, opts); |
195 | 194 | ||
196 | synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); | 195 | synproxy_send_tcp(net, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); |
197 | } | 196 | } |
198 | 197 | ||
199 | static void | 198 | static void |
200 | synproxy_send_client_ack(const struct synproxy_net *snet, | 199 | synproxy_send_client_ack(struct net *net, |
201 | const struct sk_buff *skb, const struct tcphdr *th, | 200 | const struct sk_buff *skb, const struct tcphdr *th, |
202 | const struct synproxy_options *opts) | 201 | const struct synproxy_options *opts) |
203 | { | 202 | { |
@@ -215,7 +214,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet, | |||
215 | return; | 214 | return; |
216 | skb_reserve(nskb, MAX_TCP_HEADER); | 215 | skb_reserve(nskb, MAX_TCP_HEADER); |
217 | 216 | ||
218 | niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr); | 217 | niph = synproxy_build_ip(net, nskb, iph->saddr, iph->daddr); |
219 | 218 | ||
220 | skb_reset_transport_header(nskb); | 219 | skb_reset_transport_header(nskb); |
221 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); | 220 | nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size); |
@@ -231,15 +230,16 @@ synproxy_send_client_ack(const struct synproxy_net *snet, | |||
231 | 230 | ||
232 | synproxy_build_options(nth, opts); | 231 | synproxy_build_options(nth, opts); |
233 | 232 | ||
234 | synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, | 233 | synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, |
235 | niph, nth, tcp_hdr_size); | 234 | niph, nth, tcp_hdr_size); |
236 | } | 235 | } |
237 | 236 | ||
238 | static bool | 237 | static bool |
239 | synproxy_recv_client_ack(const struct synproxy_net *snet, | 238 | synproxy_recv_client_ack(struct net *net, |
240 | const struct sk_buff *skb, const struct tcphdr *th, | 239 | const struct sk_buff *skb, const struct tcphdr *th, |
241 | struct synproxy_options *opts, u32 recv_seq) | 240 | struct synproxy_options *opts, u32 recv_seq) |
242 | { | 241 | { |
242 | struct synproxy_net *snet = synproxy_pernet(net); | ||
243 | int mss; | 243 | int mss; |
244 | 244 | ||
245 | mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1); | 245 | mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1); |
@@ -255,7 +255,7 @@ synproxy_recv_client_ack(const struct synproxy_net *snet, | |||
255 | if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) | 255 | if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP) |
256 | synproxy_check_timestamp_cookie(opts); | 256 | synproxy_check_timestamp_cookie(opts); |
257 | 257 | ||
258 | synproxy_send_server_syn(snet, skb, th, opts, recv_seq); | 258 | synproxy_send_server_syn(net, skb, th, opts, recv_seq); |
259 | return true; | 259 | return true; |
260 | } | 260 | } |
261 | 261 | ||
@@ -263,7 +263,8 @@ static unsigned int | |||
263 | synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) | 263 | synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) |
264 | { | 264 | { |
265 | const struct xt_synproxy_info *info = par->targinfo; | 265 | const struct xt_synproxy_info *info = par->targinfo; |
266 | struct synproxy_net *snet = synproxy_pernet(par->net); | 266 | struct net *net = par->net; |
267 | struct synproxy_net *snet = synproxy_pernet(net); | ||
267 | struct synproxy_options opts = {}; | 268 | struct synproxy_options opts = {}; |
268 | struct tcphdr *th, _th; | 269 | struct tcphdr *th, _th; |
269 | 270 | ||
@@ -292,12 +293,12 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) | |||
292 | XT_SYNPROXY_OPT_SACK_PERM | | 293 | XT_SYNPROXY_OPT_SACK_PERM | |
293 | XT_SYNPROXY_OPT_ECN); | 294 | XT_SYNPROXY_OPT_ECN); |
294 | 295 | ||
295 | synproxy_send_client_synack(snet, skb, th, &opts); | 296 | synproxy_send_client_synack(net, skb, th, &opts); |
296 | return NF_DROP; | 297 | return NF_DROP; |
297 | 298 | ||
298 | } else if (th->ack && !(th->fin || th->rst || th->syn)) { | 299 | } else if (th->ack && !(th->fin || th->rst || th->syn)) { |
299 | /* ACK from client */ | 300 | /* ACK from client */ |
300 | synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq)); | 301 | synproxy_recv_client_ack(net, skb, th, &opts, ntohl(th->seq)); |
301 | return NF_DROP; | 302 | return NF_DROP; |
302 | } | 303 | } |
303 | 304 | ||
@@ -308,7 +309,8 @@ static unsigned int ipv4_synproxy_hook(void *priv, | |||
308 | struct sk_buff *skb, | 309 | struct sk_buff *skb, |
309 | const struct nf_hook_state *nhs) | 310 | const struct nf_hook_state *nhs) |
310 | { | 311 | { |
311 | struct synproxy_net *snet = synproxy_pernet(nhs->net); | 312 | struct net *net = nhs->net; |
313 | struct synproxy_net *snet = synproxy_pernet(net); | ||
312 | enum ip_conntrack_info ctinfo; | 314 | enum ip_conntrack_info ctinfo; |
313 | struct nf_conn *ct; | 315 | struct nf_conn *ct; |
314 | struct nf_conn_synproxy *synproxy; | 316 | struct nf_conn_synproxy *synproxy; |
@@ -365,7 +367,7 @@ static unsigned int ipv4_synproxy_hook(void *priv, | |||
365 | * therefore we need to add 1 to make the SYN sequence | 367 | * therefore we need to add 1 to make the SYN sequence |
366 | * number match the one of first SYN. | 368 | * number match the one of first SYN. |
367 | */ | 369 | */ |
368 | if (synproxy_recv_client_ack(snet, skb, th, &opts, | 370 | if (synproxy_recv_client_ack(net, skb, th, &opts, |
369 | ntohl(th->seq) + 1)) | 371 | ntohl(th->seq) + 1)) |
370 | this_cpu_inc(snet->stats->cookie_retrans); | 372 | this_cpu_inc(snet->stats->cookie_retrans); |
371 | 373 | ||
@@ -391,12 +393,12 @@ static unsigned int ipv4_synproxy_hook(void *priv, | |||
391 | XT_SYNPROXY_OPT_SACK_PERM); | 393 | XT_SYNPROXY_OPT_SACK_PERM); |
392 | 394 | ||
393 | swap(opts.tsval, opts.tsecr); | 395 | swap(opts.tsval, opts.tsecr); |
394 | synproxy_send_server_ack(snet, state, skb, th, &opts); | 396 | synproxy_send_server_ack(net, state, skb, th, &opts); |
395 | 397 | ||
396 | nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); | 398 | nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq)); |
397 | 399 | ||
398 | swap(opts.tsval, opts.tsecr); | 400 | swap(opts.tsval, opts.tsecr); |
399 | synproxy_send_client_ack(snet, skb, th, &opts); | 401 | synproxy_send_client_ack(net, skb, th, &opts); |
400 | 402 | ||
401 | consume_skb(skb); | 403 | consume_skb(skb); |
402 | return NF_STOLEN; | 404 | return NF_STOLEN; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 84f9baf7aee8..86b67b70b626 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset) | |||
198 | 198 | ||
199 | /* All zeroes == unconditional rule. */ | 199 | /* All zeroes == unconditional rule. */ |
200 | /* Mildly perf critical (only if packet tracing is on) */ | 200 | /* Mildly perf critical (only if packet tracing is on) */ |
201 | static inline bool unconditional(const struct ip6t_ip6 *ipv6) | 201 | static inline bool unconditional(const struct ip6t_entry *e) |
202 | { | 202 | { |
203 | static const struct ip6t_ip6 uncond; | 203 | static const struct ip6t_ip6 uncond; |
204 | 204 | ||
205 | return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; | 205 | return e->target_offset == sizeof(struct ip6t_entry) && |
206 | memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0; | ||
206 | } | 207 | } |
207 | 208 | ||
208 | static inline const struct xt_entry_target * | 209 | static inline const struct xt_entry_target * |
@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, | |||
258 | } else if (s == e) { | 259 | } else if (s == e) { |
259 | (*rulenum)++; | 260 | (*rulenum)++; |
260 | 261 | ||
261 | if (s->target_offset == sizeof(struct ip6t_entry) && | 262 | if (unconditional(s) && |
262 | strcmp(t->target.u.kernel.target->name, | 263 | strcmp(t->target.u.kernel.target->name, |
263 | XT_STANDARD_TARGET) == 0 && | 264 | XT_STANDARD_TARGET) == 0 && |
264 | t->verdict < 0 && | 265 | t->verdict < 0) { |
265 | unconditional(&s->ipv6)) { | ||
266 | /* Tail of chains: STANDARD target (return/policy) */ | 266 | /* Tail of chains: STANDARD target (return/policy) */ |
267 | *comment = *chainname == hookname | 267 | *comment = *chainname == hookname |
268 | ? comments[NF_IP6_TRACE_COMMENT_POLICY] | 268 | ? comments[NF_IP6_TRACE_COMMENT_POLICY] |
@@ -488,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
488 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); | 488 | e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); |
489 | 489 | ||
490 | /* Unconditional return/END. */ | 490 | /* Unconditional return/END. */ |
491 | if ((e->target_offset == sizeof(struct ip6t_entry) && | 491 | if ((unconditional(e) && |
492 | (strcmp(t->target.u.user.name, | 492 | (strcmp(t->target.u.user.name, |
493 | XT_STANDARD_TARGET) == 0) && | 493 | XT_STANDARD_TARGET) == 0) && |
494 | t->verdict < 0 && | 494 | t->verdict < 0) || visited) { |
495 | unconditional(&e->ipv6)) || visited) { | ||
496 | unsigned int oldpos, size; | 495 | unsigned int oldpos, size; |
497 | 496 | ||
498 | if ((strcmp(t->target.u.user.name, | 497 | if ((strcmp(t->target.u.user.name, |
@@ -581,14 +580,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net) | |||
581 | } | 580 | } |
582 | 581 | ||
583 | static int | 582 | static int |
584 | check_entry(const struct ip6t_entry *e, const char *name) | 583 | check_entry(const struct ip6t_entry *e) |
585 | { | 584 | { |
586 | const struct xt_entry_target *t; | 585 | const struct xt_entry_target *t; |
587 | 586 | ||
588 | if (!ip6_checkentry(&e->ipv6)) { | 587 | if (!ip6_checkentry(&e->ipv6)) |
589 | duprintf("ip_tables: ip check failed %p %s.\n", e, name); | ||
590 | return -EINVAL; | 588 | return -EINVAL; |
591 | } | ||
592 | 589 | ||
593 | if (e->target_offset + sizeof(struct xt_entry_target) > | 590 | if (e->target_offset + sizeof(struct xt_entry_target) > |
594 | e->next_offset) | 591 | e->next_offset) |
@@ -679,10 +676,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, | |||
679 | struct xt_mtchk_param mtpar; | 676 | struct xt_mtchk_param mtpar; |
680 | struct xt_entry_match *ematch; | 677 | struct xt_entry_match *ematch; |
681 | 678 | ||
682 | ret = check_entry(e, name); | ||
683 | if (ret) | ||
684 | return ret; | ||
685 | |||
686 | e->counters.pcnt = xt_percpu_counter_alloc(); | 679 | e->counters.pcnt = xt_percpu_counter_alloc(); |
687 | if (IS_ERR_VALUE(e->counters.pcnt)) | 680 | if (IS_ERR_VALUE(e->counters.pcnt)) |
688 | return -ENOMEM; | 681 | return -ENOMEM; |
@@ -733,7 +726,7 @@ static bool check_underflow(const struct ip6t_entry *e) | |||
733 | const struct xt_entry_target *t; | 726 | const struct xt_entry_target *t; |
734 | unsigned int verdict; | 727 | unsigned int verdict; |
735 | 728 | ||
736 | if (!unconditional(&e->ipv6)) | 729 | if (!unconditional(e)) |
737 | return false; | 730 | return false; |
738 | t = ip6t_get_target_c(e); | 731 | t = ip6t_get_target_c(e); |
739 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) | 732 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) |
@@ -753,9 +746,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e, | |||
753 | unsigned int valid_hooks) | 746 | unsigned int valid_hooks) |
754 | { | 747 | { |
755 | unsigned int h; | 748 | unsigned int h; |
749 | int err; | ||
756 | 750 | ||
757 | if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || | 751 | if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || |
758 | (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { | 752 | (unsigned char *)e + sizeof(struct ip6t_entry) >= limit || |
753 | (unsigned char *)e + e->next_offset > limit) { | ||
759 | duprintf("Bad offset %p\n", e); | 754 | duprintf("Bad offset %p\n", e); |
760 | return -EINVAL; | 755 | return -EINVAL; |
761 | } | 756 | } |
@@ -767,6 +762,10 @@ check_entry_size_and_hooks(struct ip6t_entry *e, | |||
767 | return -EINVAL; | 762 | return -EINVAL; |
768 | } | 763 | } |
769 | 764 | ||
765 | err = check_entry(e); | ||
766 | if (err) | ||
767 | return err; | ||
768 | |||
770 | /* Check hooks & underflows */ | 769 | /* Check hooks & underflows */ |
771 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { | 770 | for (h = 0; h < NF_INET_NUMHOOKS; h++) { |
772 | if (!(valid_hooks & (1 << h))) | 771 | if (!(valid_hooks & (1 << h))) |
@@ -775,9 +774,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e, | |||
775 | newinfo->hook_entry[h] = hook_entries[h]; | 774 | newinfo->hook_entry[h] = hook_entries[h]; |
776 | if ((unsigned char *)e - base == underflows[h]) { | 775 | if ((unsigned char *)e - base == underflows[h]) { |
777 | if (!check_underflow(e)) { | 776 | if (!check_underflow(e)) { |
778 | pr_err("Underflows must be unconditional and " | 777 | pr_debug("Underflows must be unconditional and " |
779 | "use the STANDARD target with " | 778 | "use the STANDARD target with " |
780 | "ACCEPT/DROP\n"); | 779 | "ACCEPT/DROP\n"); |
781 | return -EINVAL; | 780 | return -EINVAL; |
782 | } | 781 | } |
783 | newinfo->underflow[h] = underflows[h]; | 782 | newinfo->underflow[h] = underflows[h]; |
@@ -1169,6 +1168,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, | |||
1169 | *len, sizeof(get) + get.size); | 1168 | *len, sizeof(get) + get.size); |
1170 | return -EINVAL; | 1169 | return -EINVAL; |
1171 | } | 1170 | } |
1171 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1172 | 1172 | ||
1173 | t = xt_find_table_lock(net, AF_INET6, get.name); | 1173 | t = xt_find_table_lock(net, AF_INET6, get.name); |
1174 | if (!IS_ERR_OR_NULL(t)) { | 1174 | if (!IS_ERR_OR_NULL(t)) { |
@@ -1505,7 +1505,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, | |||
1505 | 1505 | ||
1506 | duprintf("check_compat_entry_size_and_hooks %p\n", e); | 1506 | duprintf("check_compat_entry_size_and_hooks %p\n", e); |
1507 | if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || | 1507 | if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || |
1508 | (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { | 1508 | (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || |
1509 | (unsigned char *)e + e->next_offset > limit) { | ||
1509 | duprintf("Bad offset %p, limit = %p\n", e, limit); | 1510 | duprintf("Bad offset %p, limit = %p\n", e, limit); |
1510 | return -EINVAL; | 1511 | return -EINVAL; |
1511 | } | 1512 | } |
@@ -1518,7 +1519,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, | |||
1518 | } | 1519 | } |
1519 | 1520 | ||
1520 | /* For purposes of check_entry casting the compat entry is fine */ | 1521 | /* For purposes of check_entry casting the compat entry is fine */ |
1521 | ret = check_entry((struct ip6t_entry *)e, name); | 1522 | ret = check_entry((struct ip6t_entry *)e); |
1522 | if (ret) | 1523 | if (ret) |
1523 | return ret; | 1524 | return ret; |
1524 | 1525 | ||
@@ -1944,6 +1945,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, | |||
1944 | *len, sizeof(get) + get.size); | 1945 | *len, sizeof(get) + get.size); |
1945 | return -EINVAL; | 1946 | return -EINVAL; |
1946 | } | 1947 | } |
1948 | get.name[sizeof(get.name) - 1] = '\0'; | ||
1947 | 1949 | ||
1948 | xt_compat_lock(AF_INET6); | 1950 | xt_compat_lock(AF_INET6); |
1949 | t = xt_find_table_lock(net, AF_INET6, get.name); | 1951 | t = xt_find_table_lock(net, AF_INET6, get.name); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index fd25e447a5fa..8125931106be 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -843,8 +843,8 @@ start_lookup: | |||
843 | flush_stack(stack, count, skb, count - 1); | 843 | flush_stack(stack, count, skb, count - 1); |
844 | } else { | 844 | } else { |
845 | if (!inner_flushed) | 845 | if (!inner_flushed) |
846 | UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, | 846 | UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, |
847 | proto == IPPROTO_UDPLITE); | 847 | proto == IPPROTO_UDPLITE); |
848 | consume_skb(skb); | 848 | consume_skb(skb); |
849 | } | 849 | } |
850 | return 0; | 850 | return 0; |
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index b0bc475f641e..2e8e7e5fb4a6 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h | |||
@@ -95,7 +95,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb) | |||
95 | if (!nested) | 95 | if (!nested) |
96 | goto nla_put_failure; | 96 | goto nla_put_failure; |
97 | if (mtype_do_head(skb, map) || | 97 | if (mtype_do_head(skb, map) || |
98 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || | 98 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) || |
99 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) | 99 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) |
100 | goto nla_put_failure; | 100 | goto nla_put_failure; |
101 | if (unlikely(ip_set_put_flags(skb, set))) | 101 | if (unlikely(ip_set_put_flags(skb, set))) |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 7e6568cad494..a748b0c2c981 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
@@ -497,6 +497,26 @@ __ip_set_put(struct ip_set *set) | |||
497 | write_unlock_bh(&ip_set_ref_lock); | 497 | write_unlock_bh(&ip_set_ref_lock); |
498 | } | 498 | } |
499 | 499 | ||
500 | /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need | ||
501 | * a separate reference counter | ||
502 | */ | ||
503 | static inline void | ||
504 | __ip_set_get_netlink(struct ip_set *set) | ||
505 | { | ||
506 | write_lock_bh(&ip_set_ref_lock); | ||
507 | set->ref_netlink++; | ||
508 | write_unlock_bh(&ip_set_ref_lock); | ||
509 | } | ||
510 | |||
511 | static inline void | ||
512 | __ip_set_put_netlink(struct ip_set *set) | ||
513 | { | ||
514 | write_lock_bh(&ip_set_ref_lock); | ||
515 | BUG_ON(set->ref_netlink == 0); | ||
516 | set->ref_netlink--; | ||
517 | write_unlock_bh(&ip_set_ref_lock); | ||
518 | } | ||
519 | |||
500 | /* Add, del and test set entries from kernel. | 520 | /* Add, del and test set entries from kernel. |
501 | * | 521 | * |
502 | * The set behind the index must exist and must be referenced | 522 | * The set behind the index must exist and must be referenced |
@@ -1002,7 +1022,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl, | |||
1002 | if (!attr[IPSET_ATTR_SETNAME]) { | 1022 | if (!attr[IPSET_ATTR_SETNAME]) { |
1003 | for (i = 0; i < inst->ip_set_max; i++) { | 1023 | for (i = 0; i < inst->ip_set_max; i++) { |
1004 | s = ip_set(inst, i); | 1024 | s = ip_set(inst, i); |
1005 | if (s && s->ref) { | 1025 | if (s && (s->ref || s->ref_netlink)) { |
1006 | ret = -IPSET_ERR_BUSY; | 1026 | ret = -IPSET_ERR_BUSY; |
1007 | goto out; | 1027 | goto out; |
1008 | } | 1028 | } |
@@ -1024,7 +1044,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl, | |||
1024 | if (!s) { | 1044 | if (!s) { |
1025 | ret = -ENOENT; | 1045 | ret = -ENOENT; |
1026 | goto out; | 1046 | goto out; |
1027 | } else if (s->ref) { | 1047 | } else if (s->ref || s->ref_netlink) { |
1028 | ret = -IPSET_ERR_BUSY; | 1048 | ret = -IPSET_ERR_BUSY; |
1029 | goto out; | 1049 | goto out; |
1030 | } | 1050 | } |
@@ -1171,6 +1191,9 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb, | |||
1171 | from->family == to->family)) | 1191 | from->family == to->family)) |
1172 | return -IPSET_ERR_TYPE_MISMATCH; | 1192 | return -IPSET_ERR_TYPE_MISMATCH; |
1173 | 1193 | ||
1194 | if (from->ref_netlink || to->ref_netlink) | ||
1195 | return -EBUSY; | ||
1196 | |||
1174 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); | 1197 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); |
1175 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); | 1198 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); |
1176 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); | 1199 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); |
@@ -1206,7 +1229,7 @@ ip_set_dump_done(struct netlink_callback *cb) | |||
1206 | if (set->variant->uref) | 1229 | if (set->variant->uref) |
1207 | set->variant->uref(set, cb, false); | 1230 | set->variant->uref(set, cb, false); |
1208 | pr_debug("release set %s\n", set->name); | 1231 | pr_debug("release set %s\n", set->name); |
1209 | __ip_set_put_byindex(inst, index); | 1232 | __ip_set_put_netlink(set); |
1210 | } | 1233 | } |
1211 | return 0; | 1234 | return 0; |
1212 | } | 1235 | } |
@@ -1328,7 +1351,7 @@ dump_last: | |||
1328 | if (!cb->args[IPSET_CB_ARG0]) { | 1351 | if (!cb->args[IPSET_CB_ARG0]) { |
1329 | /* Start listing: make sure set won't be destroyed */ | 1352 | /* Start listing: make sure set won't be destroyed */ |
1330 | pr_debug("reference set\n"); | 1353 | pr_debug("reference set\n"); |
1331 | set->ref++; | 1354 | set->ref_netlink++; |
1332 | } | 1355 | } |
1333 | write_unlock_bh(&ip_set_ref_lock); | 1356 | write_unlock_bh(&ip_set_ref_lock); |
1334 | nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, | 1357 | nlh = start_msg(skb, NETLINK_CB(cb->skb).portid, |
@@ -1396,7 +1419,7 @@ release_refcount: | |||
1396 | if (set->variant->uref) | 1419 | if (set->variant->uref) |
1397 | set->variant->uref(set, cb, false); | 1420 | set->variant->uref(set, cb, false); |
1398 | pr_debug("release set %s\n", set->name); | 1421 | pr_debug("release set %s\n", set->name); |
1399 | __ip_set_put_byindex(inst, index); | 1422 | __ip_set_put_netlink(set); |
1400 | cb->args[IPSET_CB_ARG0] = 0; | 1423 | cb->args[IPSET_CB_ARG0] = 0; |
1401 | } | 1424 | } |
1402 | out: | 1425 | out: |
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index e5336ab36d67..d32fd6b036bf 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h | |||
@@ -1082,7 +1082,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb) | |||
1082 | if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask)) | 1082 | if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask)) |
1083 | goto nla_put_failure; | 1083 | goto nla_put_failure; |
1084 | #endif | 1084 | #endif |
1085 | if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || | 1085 | if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) || |
1086 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) | 1086 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize))) |
1087 | goto nla_put_failure; | 1087 | goto nla_put_failure; |
1088 | if (unlikely(ip_set_put_flags(skb, set))) | 1088 | if (unlikely(ip_set_put_flags(skb, set))) |
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 24c6c1962aea..a2a89e4e0a14 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c | |||
@@ -458,7 +458,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) | |||
458 | if (!nested) | 458 | if (!nested) |
459 | goto nla_put_failure; | 459 | goto nla_put_failure; |
460 | if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || | 460 | if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || |
461 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || | 461 | nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) || |
462 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, | 462 | nla_put_net32(skb, IPSET_ATTR_MEMSIZE, |
463 | htonl(sizeof(*map) + n * set->dsize))) | 463 | htonl(sizeof(*map) + n * set->dsize))) |
464 | goto nla_put_failure; | 464 | goto nla_put_failure; |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 75429997ed41..cb5b630a645b 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -582,7 +582,12 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, | |||
582 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ | 582 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ |
583 | err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); | 583 | err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); |
584 | if (err < 0) { | 584 | if (err < 0) { |
585 | queue->queue_user_dropped++; | 585 | if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { |
586 | failopen = 1; | ||
587 | err = 0; | ||
588 | } else { | ||
589 | queue->queue_user_dropped++; | ||
590 | } | ||
586 | goto err_out_unlock; | 591 | goto err_out_unlock; |
587 | } | 592 | } |
588 | 593 | ||
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 234a73344c6e..ce947292ae77 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig | |||
@@ -7,7 +7,9 @@ config OPENVSWITCH | |||
7 | depends on INET | 7 | depends on INET |
8 | depends on !NF_CONNTRACK || \ | 8 | depends on !NF_CONNTRACK || \ |
9 | (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \ | 9 | (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \ |
10 | (!NF_NAT || NF_NAT))) | 10 | (!NF_NAT || NF_NAT) && \ |
11 | (!NF_NAT_IPV4 || NF_NAT_IPV4) && \ | ||
12 | (!NF_NAT_IPV6 || NF_NAT_IPV6))) | ||
11 | select LIBCRC32C | 13 | select LIBCRC32C |
12 | select MPLS | 14 | select MPLS |
13 | select NET_MPLS_GSO | 15 | select NET_MPLS_GSO |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index dc5eb29fe7d6..1b9d286756be 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -535,14 +535,15 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, | |||
535 | switch (ctinfo) { | 535 | switch (ctinfo) { |
536 | case IP_CT_RELATED: | 536 | case IP_CT_RELATED: |
537 | case IP_CT_RELATED_REPLY: | 537 | case IP_CT_RELATED_REPLY: |
538 | if (skb->protocol == htons(ETH_P_IP) && | 538 | if (IS_ENABLED(CONFIG_NF_NAT_IPV4) && |
539 | skb->protocol == htons(ETH_P_IP) && | ||
539 | ip_hdr(skb)->protocol == IPPROTO_ICMP) { | 540 | ip_hdr(skb)->protocol == IPPROTO_ICMP) { |
540 | if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, | 541 | if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, |
541 | hooknum)) | 542 | hooknum)) |
542 | err = NF_DROP; | 543 | err = NF_DROP; |
543 | goto push; | 544 | goto push; |
544 | #if IS_ENABLED(CONFIG_NF_NAT_IPV6) | 545 | } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) && |
545 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 546 | skb->protocol == htons(ETH_P_IPV6)) { |
546 | __be16 frag_off; | 547 | __be16 frag_off; |
547 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; | 548 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; |
548 | int hdrlen = ipv6_skip_exthdr(skb, | 549 | int hdrlen = ipv6_skip_exthdr(skb, |
@@ -557,7 +558,6 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, | |||
557 | err = NF_DROP; | 558 | err = NF_DROP; |
558 | goto push; | 559 | goto push; |
559 | } | 560 | } |
560 | #endif | ||
561 | } | 561 | } |
562 | /* Non-ICMP, fall thru to initialize if needed. */ | 562 | /* Non-ICMP, fall thru to initialize if needed. */ |
563 | case IP_CT_NEW: | 563 | case IP_CT_NEW: |
@@ -664,11 +664,12 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, | |||
664 | 664 | ||
665 | /* Determine NAT type. | 665 | /* Determine NAT type. |
666 | * Check if the NAT type can be deduced from the tracked connection. | 666 | * Check if the NAT type can be deduced from the tracked connection. |
667 | * Make sure expected traffic is NATted only when committing. | 667 | * Make sure new expected connections (IP_CT_RELATED) are NATted only |
668 | * when committing. | ||
668 | */ | 669 | */ |
669 | if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW && | 670 | if (info->nat & OVS_CT_NAT && ctinfo != IP_CT_NEW && |
670 | ct->status & IPS_NAT_MASK && | 671 | ct->status & IPS_NAT_MASK && |
671 | (!(ct->status & IPS_EXPECTED_BIT) || info->commit)) { | 672 | (ctinfo != IP_CT_RELATED || info->commit)) { |
672 | /* NAT an established or related connection like before. */ | 673 | /* NAT an established or related connection like before. */ |
673 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) | 674 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) |
674 | /* This is the REPLY direction for a connection | 675 | /* This is the REPLY direction for a connection |
@@ -968,7 +969,8 @@ static int parse_nat(const struct nlattr *attr, | |||
968 | break; | 969 | break; |
969 | 970 | ||
970 | case OVS_NAT_ATTR_IP_MIN: | 971 | case OVS_NAT_ATTR_IP_MIN: |
971 | nla_memcpy(&info->range.min_addr, a, nla_len(a)); | 972 | nla_memcpy(&info->range.min_addr, a, |
973 | sizeof(info->range.min_addr)); | ||
972 | info->range.flags |= NF_NAT_RANGE_MAP_IPS; | 974 | info->range.flags |= NF_NAT_RANGE_MAP_IPS; |
973 | break; | 975 | break; |
974 | 976 | ||
@@ -1238,7 +1240,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info, | |||
1238 | } | 1240 | } |
1239 | 1241 | ||
1240 | if (info->range.flags & NF_NAT_RANGE_MAP_IPS) { | 1242 | if (info->range.flags & NF_NAT_RANGE_MAP_IPS) { |
1241 | if (info->family == NFPROTO_IPV4) { | 1243 | if (IS_ENABLED(CONFIG_NF_NAT_IPV4) && |
1244 | info->family == NFPROTO_IPV4) { | ||
1242 | if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN, | 1245 | if (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MIN, |
1243 | info->range.min_addr.ip) || | 1246 | info->range.min_addr.ip) || |
1244 | (info->range.max_addr.ip | 1247 | (info->range.max_addr.ip |
@@ -1246,8 +1249,8 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info, | |||
1246 | (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX, | 1249 | (nla_put_in_addr(skb, OVS_NAT_ATTR_IP_MAX, |
1247 | info->range.max_addr.ip)))) | 1250 | info->range.max_addr.ip)))) |
1248 | return false; | 1251 | return false; |
1249 | #if IS_ENABLED(CONFIG_NF_NAT_IPV6) | 1252 | } else if (IS_ENABLED(CONFIG_NF_NAT_IPV6) && |
1250 | } else if (info->family == NFPROTO_IPV6) { | 1253 | info->family == NFPROTO_IPV6) { |
1251 | if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN, | 1254 | if (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MIN, |
1252 | &info->range.min_addr.in6) || | 1255 | &info->range.min_addr.in6) || |
1253 | (memcmp(&info->range.max_addr.in6, | 1256 | (memcmp(&info->range.max_addr.in6, |
@@ -1256,7 +1259,6 @@ static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info *info, | |||
1256 | (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX, | 1259 | (nla_put_in6_addr(skb, OVS_NAT_ATTR_IP_MAX, |
1257 | &info->range.max_addr.in6)))) | 1260 | &info->range.max_addr.in6)))) |
1258 | return false; | 1261 | return false; |
1259 | #endif | ||
1260 | } else { | 1262 | } else { |
1261 | return false; | 1263 | return false; |
1262 | } | 1264 | } |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 736c004abfbc..97745351d58c 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -401,7 +401,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
401 | sk = chunk->skb->sk; | 401 | sk = chunk->skb->sk; |
402 | 402 | ||
403 | /* Allocate the new skb. */ | 403 | /* Allocate the new skb. */ |
404 | nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC); | 404 | nskb = alloc_skb(packet->size + MAX_HEADER, gfp); |
405 | if (!nskb) | 405 | if (!nskb) |
406 | goto nomem; | 406 | goto nomem; |
407 | 407 | ||
@@ -523,8 +523,8 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
523 | */ | 523 | */ |
524 | if (auth) | 524 | if (auth) |
525 | sctp_auth_calculate_hmac(asoc, nskb, | 525 | sctp_auth_calculate_hmac(asoc, nskb, |
526 | (struct sctp_auth_chunk *)auth, | 526 | (struct sctp_auth_chunk *)auth, |
527 | GFP_ATOMIC); | 527 | gfp); |
528 | 528 | ||
529 | /* 2) Calculate the Adler-32 checksum of the whole packet, | 529 | /* 2) Calculate the Adler-32 checksum of the whole packet, |
530 | * including the SCTP common header and all the | 530 | * including the SCTP common header and all the |
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 8b5833c1ff2e..2b9b98f1c2ff 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c | |||
@@ -1079,7 +1079,7 @@ nla_put_failure: | |||
1079 | * @filter_dev: filter device | 1079 | * @filter_dev: filter device |
1080 | * @idx: | 1080 | * @idx: |
1081 | * | 1081 | * |
1082 | * Delete FDB entry from switch device. | 1082 | * Dump FDB entries from switch device. |
1083 | */ | 1083 | */ |
1084 | int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, | 1084 | int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, |
1085 | struct net_device *dev, | 1085 | struct net_device *dev, |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index ad7f5b3f9b61..1c4ad477ce93 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) | |||
292 | XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; | 292 | XFRM_SKB_CB(skb)->seq.input.hi = seq_hi; |
293 | 293 | ||
294 | skb_dst_force(skb); | 294 | skb_dst_force(skb); |
295 | dev_hold(skb->dev); | ||
295 | 296 | ||
296 | nexthdr = x->type->input(x, skb); | 297 | nexthdr = x->type->input(x, skb); |
297 | 298 | ||
298 | if (nexthdr == -EINPROGRESS) | 299 | if (nexthdr == -EINPROGRESS) |
299 | return 0; | 300 | return 0; |
300 | resume: | 301 | resume: |
302 | dev_put(skb->dev); | ||
303 | |||
301 | spin_lock(&x->lock); | 304 | spin_lock(&x->lock); |
302 | if (nexthdr <= 0) { | 305 | if (nexthdr <= 0) { |
303 | if (nexthdr == -EBADMSG) { | 306 | if (nexthdr == -EBADMSG) { |