diff options
Diffstat (limited to 'net')
51 files changed, 408 insertions, 179 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 98a30a5b8664..59555f0f8fc8 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -443,7 +443,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, | |||
443 | case NETDEV_UP: | 443 | case NETDEV_UP: |
444 | /* Put all VLANs for this dev in the up state too. */ | 444 | /* Put all VLANs for this dev in the up state too. */ |
445 | vlan_group_for_each_dev(grp, i, vlandev) { | 445 | vlan_group_for_each_dev(grp, i, vlandev) { |
446 | flgs = vlandev->flags; | 446 | flgs = dev_get_flags(vlandev); |
447 | if (flgs & IFF_UP) | 447 | if (flgs & IFF_UP) |
448 | continue; | 448 | continue; |
449 | 449 | ||
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4663c3dad3f5..c4802f3bd4c5 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -2854,9 +2854,11 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status, | |||
2854 | * state. If we were running both LE and BR/EDR inquiry | 2854 | * state. If we were running both LE and BR/EDR inquiry |
2855 | * simultaneously, and BR/EDR inquiry is already | 2855 | * simultaneously, and BR/EDR inquiry is already |
2856 | * finished, stop discovery, otherwise BR/EDR inquiry | 2856 | * finished, stop discovery, otherwise BR/EDR inquiry |
2857 | * will stop discovery when finished. | 2857 | * will stop discovery when finished. If we will resolve |
2858 | * remote device name, do not change discovery state. | ||
2858 | */ | 2859 | */ |
2859 | if (!test_bit(HCI_INQUIRY, &hdev->flags)) | 2860 | if (!test_bit(HCI_INQUIRY, &hdev->flags) && |
2861 | hdev->discovery.state != DISCOVERY_RESOLVING) | ||
2860 | hci_discovery_set_state(hdev, | 2862 | hci_discovery_set_state(hdev, |
2861 | DISCOVERY_STOPPED); | 2863 | DISCOVERY_STOPPED); |
2862 | } else { | 2864 | } else { |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 4b6722f8f179..22fd0419b314 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1072,7 +1072,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1072 | 1072 | ||
1073 | err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, | 1073 | err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, |
1074 | vid); | 1074 | vid); |
1075 | if (!err) | 1075 | if (err) |
1076 | break; | 1076 | break; |
1077 | } | 1077 | } |
1078 | 1078 | ||
@@ -1822,7 +1822,7 @@ static void br_multicast_query_expired(struct net_bridge *br, | |||
1822 | if (query->startup_sent < br->multicast_startup_query_count) | 1822 | if (query->startup_sent < br->multicast_startup_query_count) |
1823 | query->startup_sent++; | 1823 | query->startup_sent++; |
1824 | 1824 | ||
1825 | RCU_INIT_POINTER(querier, NULL); | 1825 | RCU_INIT_POINTER(querier->port, NULL); |
1826 | br_multicast_send_query(br, NULL, query); | 1826 | br_multicast_send_query(br, NULL, query); |
1827 | spin_unlock(&br->multicast_lock); | 1827 | spin_unlock(&br->multicast_lock); |
1828 | } | 1828 | } |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index ab55e2472beb..60ddfbeb47f5 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -37,10 +37,6 @@ | |||
37 | #include <net/route.h> | 37 | #include <net/route.h> |
38 | #include <net/netfilter/br_netfilter.h> | 38 | #include <net/netfilter/br_netfilter.h> |
39 | 39 | ||
40 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | ||
41 | #include <net/netfilter/nf_conntrack.h> | ||
42 | #endif | ||
43 | |||
44 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
45 | #include "br_private.h" | 41 | #include "br_private.h" |
46 | #ifdef CONFIG_SYSCTL | 42 | #ifdef CONFIG_SYSCTL |
@@ -350,24 +346,15 @@ free_skb: | |||
350 | return 0; | 346 | return 0; |
351 | } | 347 | } |
352 | 348 | ||
353 | static bool dnat_took_place(const struct sk_buff *skb) | 349 | static bool daddr_was_changed(const struct sk_buff *skb, |
350 | const struct nf_bridge_info *nf_bridge) | ||
354 | { | 351 | { |
355 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) | 352 | return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr; |
356 | enum ip_conntrack_info ctinfo; | ||
357 | struct nf_conn *ct; | ||
358 | |||
359 | ct = nf_ct_get(skb, &ctinfo); | ||
360 | if (!ct || nf_ct_is_untracked(ct)) | ||
361 | return false; | ||
362 | |||
363 | return test_bit(IPS_DST_NAT_BIT, &ct->status); | ||
364 | #else | ||
365 | return false; | ||
366 | #endif | ||
367 | } | 353 | } |
368 | 354 | ||
369 | /* This requires some explaining. If DNAT has taken place, | 355 | /* This requires some explaining. If DNAT has taken place, |
370 | * we will need to fix up the destination Ethernet address. | 356 | * we will need to fix up the destination Ethernet address. |
357 | * This is also true when SNAT takes place (for the reply direction). | ||
371 | * | 358 | * |
372 | * There are two cases to consider: | 359 | * There are two cases to consider: |
373 | * 1. The packet was DNAT'ed to a device in the same bridge | 360 | * 1. The packet was DNAT'ed to a device in the same bridge |
@@ -421,7 +408,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb) | |||
421 | nf_bridge->pkt_otherhost = false; | 408 | nf_bridge->pkt_otherhost = false; |
422 | } | 409 | } |
423 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; | 410 | nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; |
424 | if (dnat_took_place(skb)) { | 411 | if (daddr_was_changed(skb, nf_bridge)) { |
425 | if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { | 412 | if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { |
426 | struct in_device *in_dev = __in_dev_get_rcu(dev); | 413 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
427 | 414 | ||
@@ -632,6 +619,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, | |||
632 | struct sk_buff *skb, | 619 | struct sk_buff *skb, |
633 | const struct nf_hook_state *state) | 620 | const struct nf_hook_state *state) |
634 | { | 621 | { |
622 | struct nf_bridge_info *nf_bridge; | ||
635 | struct net_bridge_port *p; | 623 | struct net_bridge_port *p; |
636 | struct net_bridge *br; | 624 | struct net_bridge *br; |
637 | __u32 len = nf_bridge_encap_header_len(skb); | 625 | __u32 len = nf_bridge_encap_header_len(skb); |
@@ -669,6 +657,9 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops, | |||
669 | if (!setup_pre_routing(skb)) | 657 | if (!setup_pre_routing(skb)) |
670 | return NF_DROP; | 658 | return NF_DROP; |
671 | 659 | ||
660 | nf_bridge = nf_bridge_info_get(skb); | ||
661 | nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr; | ||
662 | |||
672 | skb->protocol = htons(ETH_P_IP); | 663 | skb->protocol = htons(ETH_P_IP); |
673 | 664 | ||
674 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb, | 665 | NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb, |
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 4fcaa67750fd..7caf7fae2d5b 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c | |||
@@ -97,7 +97,9 @@ static void br_forward_delay_timer_expired(unsigned long arg) | |||
97 | netif_carrier_on(br->dev); | 97 | netif_carrier_on(br->dev); |
98 | } | 98 | } |
99 | br_log_state(p); | 99 | br_log_state(p); |
100 | rcu_read_lock(); | ||
100 | br_ifinfo_notify(RTM_NEWLINK, p); | 101 | br_ifinfo_notify(RTM_NEWLINK, p); |
102 | rcu_read_unlock(); | ||
101 | spin_unlock(&br->lock); | 103 | spin_unlock(&br->lock); |
102 | } | 104 | } |
103 | 105 | ||
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 4ec0c803aef1..112ad784838a 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) | |||
330 | release_sock(sk); | 330 | release_sock(sk); |
331 | timeo = schedule_timeout(timeo); | 331 | timeo = schedule_timeout(timeo); |
332 | lock_sock(sk); | 332 | lock_sock(sk); |
333 | |||
334 | if (sock_flag(sk, SOCK_DEAD)) | ||
335 | break; | ||
336 | |||
333 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 337 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
334 | } | 338 | } |
335 | 339 | ||
@@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg, | |||
373 | struct sk_buff *skb; | 377 | struct sk_buff *skb; |
374 | 378 | ||
375 | lock_sock(sk); | 379 | lock_sock(sk); |
380 | if (sock_flag(sk, SOCK_DEAD)) { | ||
381 | err = -ECONNRESET; | ||
382 | goto unlock; | ||
383 | } | ||
376 | skb = skb_dequeue(&sk->sk_receive_queue); | 384 | skb = skb_dequeue(&sk->sk_receive_queue); |
377 | caif_check_flow_release(sk); | 385 | caif_check_flow_release(sk); |
378 | 386 | ||
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 41a4abc7e98e..c4ec9239249a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, | |||
1306 | if (list_empty(&req->r_osd_item)) | 1306 | if (list_empty(&req->r_osd_item)) |
1307 | req->r_osd = NULL; | 1307 | req->r_osd = NULL; |
1308 | } | 1308 | } |
1309 | |||
1310 | list_del_init(&req->r_req_lru_item); /* can be on notarget */ | ||
1311 | ceph_osdc_put_request(req); | 1309 | ceph_osdc_put_request(req); |
1312 | } | 1310 | } |
1313 | 1311 | ||
@@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend, | |||
2017 | err = __map_request(osdc, req, | 2015 | err = __map_request(osdc, req, |
2018 | force_resend || force_resend_writes); | 2016 | force_resend || force_resend_writes); |
2019 | dout("__map_request returned %d\n", err); | 2017 | dout("__map_request returned %d\n", err); |
2020 | if (err == 0) | ||
2021 | continue; /* no change and no osd was specified */ | ||
2022 | if (err < 0) | 2018 | if (err < 0) |
2023 | continue; /* hrm! */ | 2019 | continue; /* hrm! */ |
2024 | if (req->r_osd == NULL) { | 2020 | if (req->r_osd == NULL || err > 0) { |
2025 | dout("tid %llu maps to no valid osd\n", req->r_tid); | 2021 | if (req->r_osd == NULL) { |
2026 | needmap++; /* request a newer map */ | 2022 | dout("lingering %p tid %llu maps to no osd\n", |
2027 | continue; | 2023 | req, req->r_tid); |
2028 | } | 2024 | /* |
2025 | * A homeless lingering request makes | ||
2026 | * no sense, as it's job is to keep | ||
2027 | * a particular OSD connection open. | ||
2028 | * Request a newer map and kick the | ||
2029 | * request, knowing that it won't be | ||
2030 | * resent until we actually get a map | ||
2031 | * that can tell us where to send it. | ||
2032 | */ | ||
2033 | needmap++; | ||
2034 | } | ||
2029 | 2035 | ||
2030 | dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, | 2036 | dout("kicking lingering %p tid %llu osd%d\n", req, |
2031 | req->r_osd ? req->r_osd->o_osd : -1); | 2037 | req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); |
2032 | __register_request(osdc, req); | 2038 | __register_request(osdc, req); |
2033 | __unregister_linger_request(osdc, req); | 2039 | __unregister_linger_request(osdc, req); |
2040 | } | ||
2034 | } | 2041 | } |
2035 | reset_changed_osds(osdc); | 2042 | reset_changed_osds(osdc); |
2036 | mutex_unlock(&osdc->request_mutex); | 2043 | mutex_unlock(&osdc->request_mutex); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 666e0928ba40..8de36824018d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2416,6 +2416,9 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, | |||
2416 | { | 2416 | { |
2417 | struct sk_buff *skb; | 2417 | struct sk_buff *skb; |
2418 | 2418 | ||
2419 | if (dev->reg_state != NETREG_REGISTERED) | ||
2420 | return; | ||
2421 | |||
2419 | skb = rtmsg_ifinfo_build_skb(type, dev, change, flags); | 2422 | skb = rtmsg_ifinfo_build_skb(type, dev, change, flags); |
2420 | if (skb) | 2423 | if (skb) |
2421 | rtmsg_ifinfo_send(skb, dev, flags); | 2424 | rtmsg_ifinfo_send(skb, dev, flags); |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e6f6cc3a1bcf..392e29a0227d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -359,7 +359,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, | |||
359 | */ | 359 | */ |
360 | ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); | 360 | ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); |
361 | if (ds == NULL) | 361 | if (ds == NULL) |
362 | return NULL; | 362 | return ERR_PTR(-ENOMEM); |
363 | 363 | ||
364 | ds->dst = dst; | 364 | ds->dst = dst; |
365 | ds->index = index; | 365 | ds->index = index; |
@@ -370,7 +370,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, | |||
370 | 370 | ||
371 | ret = dsa_switch_setup_one(ds, parent); | 371 | ret = dsa_switch_setup_one(ds, parent); |
372 | if (ret) | 372 | if (ret) |
373 | return NULL; | 373 | return ERR_PTR(ret); |
374 | 374 | ||
375 | return ds; | 375 | return ds; |
376 | } | 376 | } |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 421a80b09b62..30b544f025ac 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -256,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
256 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); | 256 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); |
257 | aead_givcrypt_set_assoc(req, asg, assoclen); | 257 | aead_givcrypt_set_assoc(req, asg, assoclen); |
258 | aead_givcrypt_set_giv(req, esph->enc_data, | 258 | aead_givcrypt_set_giv(req, esph->enc_data, |
259 | XFRM_SKB_CB(skb)->seq.output.low); | 259 | XFRM_SKB_CB(skb)->seq.output.low + |
260 | ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); | ||
260 | 261 | ||
261 | ESP_SKB_CB(skb)->tmp = tmp; | 262 | ESP_SKB_CB(skb)->tmp = tmp; |
262 | err = crypto_aead_givencrypt(req); | 263 | err = crypto_aead_givencrypt(req); |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index e13fcc602da2..09b62e17dd8c 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1164,6 +1164,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) | |||
1164 | state = fa->fa_state; | 1164 | state = fa->fa_state; |
1165 | new_fa->fa_state = state & ~FA_S_ACCESSED; | 1165 | new_fa->fa_state = state & ~FA_S_ACCESSED; |
1166 | new_fa->fa_slen = fa->fa_slen; | 1166 | new_fa->fa_slen = fa->fa_slen; |
1167 | new_fa->tb_id = tb->tb_id; | ||
1167 | 1168 | ||
1168 | err = netdev_switch_fib_ipv4_add(key, plen, fi, | 1169 | err = netdev_switch_fib_ipv4_add(key, plen, fi, |
1169 | new_fa->fa_tos, | 1170 | new_fa->fa_tos, |
@@ -1764,7 +1765,7 @@ void fib_table_flush_external(struct fib_table *tb) | |||
1764 | /* record local slen */ | 1765 | /* record local slen */ |
1765 | slen = fa->fa_slen; | 1766 | slen = fa->fa_slen; |
1766 | 1767 | ||
1767 | if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL)) | 1768 | if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD)) |
1768 | continue; | 1769 | continue; |
1769 | 1770 | ||
1770 | netdev_switch_fib_ipv4_del(n->key, | 1771 | netdev_switch_fib_ipv4_del(n->key, |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 9f7269f3c54a..0c152087ca15 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -65,7 +65,6 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, | |||
65 | goto drop; | 65 | goto drop; |
66 | 66 | ||
67 | XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; | 67 | XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; |
68 | skb->mark = be32_to_cpu(tunnel->parms.i_key); | ||
69 | 68 | ||
70 | return xfrm_input(skb, nexthdr, spi, encap_type); | 69 | return xfrm_input(skb, nexthdr, spi, encap_type); |
71 | } | 70 | } |
@@ -91,6 +90,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err) | |||
91 | struct pcpu_sw_netstats *tstats; | 90 | struct pcpu_sw_netstats *tstats; |
92 | struct xfrm_state *x; | 91 | struct xfrm_state *x; |
93 | struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; | 92 | struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; |
93 | u32 orig_mark = skb->mark; | ||
94 | int ret; | ||
94 | 95 | ||
95 | if (!tunnel) | 96 | if (!tunnel) |
96 | return 1; | 97 | return 1; |
@@ -107,7 +108,11 @@ static int vti_rcv_cb(struct sk_buff *skb, int err) | |||
107 | x = xfrm_input_state(skb); | 108 | x = xfrm_input_state(skb); |
108 | family = x->inner_mode->afinfo->family; | 109 | family = x->inner_mode->afinfo->family; |
109 | 110 | ||
110 | if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) | 111 | skb->mark = be32_to_cpu(tunnel->parms.i_key); |
112 | ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); | ||
113 | skb->mark = orig_mark; | ||
114 | |||
115 | if (!ret) | ||
111 | return -EPERM; | 116 | return -EPERM; |
112 | 117 | ||
113 | skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); | 118 | skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); |
@@ -216,8 +221,6 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
216 | 221 | ||
217 | memset(&fl, 0, sizeof(fl)); | 222 | memset(&fl, 0, sizeof(fl)); |
218 | 223 | ||
219 | skb->mark = be32_to_cpu(tunnel->parms.o_key); | ||
220 | |||
221 | switch (skb->protocol) { | 224 | switch (skb->protocol) { |
222 | case htons(ETH_P_IP): | 225 | case htons(ETH_P_IP): |
223 | xfrm_decode_session(skb, &fl, AF_INET); | 226 | xfrm_decode_session(skb, &fl, AF_INET); |
@@ -233,6 +236,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
233 | return NETDEV_TX_OK; | 236 | return NETDEV_TX_OK; |
234 | } | 237 | } |
235 | 238 | ||
239 | /* override mark with tunnel output key */ | ||
240 | fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key); | ||
241 | |||
236 | return vti_xmit(skb, dev, &fl); | 242 | return vti_xmit(skb, dev, &fl); |
237 | } | 243 | } |
238 | 244 | ||
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 13bfe84bf3ca..a61200754f4b 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -1075,6 +1075,9 @@ static int do_replace(struct net *net, const void __user *user, | |||
1075 | /* overflow check */ | 1075 | /* overflow check */ |
1076 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1076 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1077 | return -ENOMEM; | 1077 | return -ENOMEM; |
1078 | if (tmp.num_counters == 0) | ||
1079 | return -EINVAL; | ||
1080 | |||
1078 | tmp.name[sizeof(tmp.name)-1] = 0; | 1081 | tmp.name[sizeof(tmp.name)-1] = 0; |
1079 | 1082 | ||
1080 | newinfo = xt_alloc_table_info(tmp.size); | 1083 | newinfo = xt_alloc_table_info(tmp.size); |
@@ -1499,6 +1502,9 @@ static int compat_do_replace(struct net *net, void __user *user, | |||
1499 | return -ENOMEM; | 1502 | return -ENOMEM; |
1500 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1503 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1501 | return -ENOMEM; | 1504 | return -ENOMEM; |
1505 | if (tmp.num_counters == 0) | ||
1506 | return -EINVAL; | ||
1507 | |||
1502 | tmp.name[sizeof(tmp.name)-1] = 0; | 1508 | tmp.name[sizeof(tmp.name)-1] = 0; |
1503 | 1509 | ||
1504 | newinfo = xt_alloc_table_info(tmp.size); | 1510 | newinfo = xt_alloc_table_info(tmp.size); |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index c69db7fa25ee..2d0e265fef6e 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -1262,6 +1262,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len) | |||
1262 | /* overflow check */ | 1262 | /* overflow check */ |
1263 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1263 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1264 | return -ENOMEM; | 1264 | return -ENOMEM; |
1265 | if (tmp.num_counters == 0) | ||
1266 | return -EINVAL; | ||
1267 | |||
1265 | tmp.name[sizeof(tmp.name)-1] = 0; | 1268 | tmp.name[sizeof(tmp.name)-1] = 0; |
1266 | 1269 | ||
1267 | newinfo = xt_alloc_table_info(tmp.size); | 1270 | newinfo = xt_alloc_table_info(tmp.size); |
@@ -1809,6 +1812,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) | |||
1809 | return -ENOMEM; | 1812 | return -ENOMEM; |
1810 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1813 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1811 | return -ENOMEM; | 1814 | return -ENOMEM; |
1815 | if (tmp.num_counters == 0) | ||
1816 | return -EINVAL; | ||
1817 | |||
1812 | tmp.name[sizeof(tmp.name)-1] = 0; | 1818 | tmp.name[sizeof(tmp.name)-1] = 0; |
1813 | 1819 | ||
1814 | newinfo = xt_alloc_table_info(tmp.size); | 1820 | newinfo = xt_alloc_table_info(tmp.size); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index bff62fc87b8e..f45f2a12f37b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -902,6 +902,10 @@ static int ip_error(struct sk_buff *skb) | |||
902 | bool send; | 902 | bool send; |
903 | int code; | 903 | int code; |
904 | 904 | ||
905 | /* IP on this device is disabled. */ | ||
906 | if (!in_dev) | ||
907 | goto out; | ||
908 | |||
905 | net = dev_net(rt->dst.dev); | 909 | net = dev_net(rt->dst.dev); |
906 | if (!IN_DEV_FORWARD(in_dev)) { | 910 | if (!IN_DEV_FORWARD(in_dev)) { |
907 | switch (rt->dst.error) { | 911 | switch (rt->dst.error) { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 46efa03d2b11..f1377f2a0472 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -402,6 +402,7 @@ void tcp_init_sock(struct sock *sk) | |||
402 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 402 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
403 | tp->snd_cwnd_clamp = ~0; | 403 | tp->snd_cwnd_clamp = ~0; |
404 | tp->mss_cache = TCP_MSS_DEFAULT; | 404 | tp->mss_cache = TCP_MSS_DEFAULT; |
405 | u64_stats_init(&tp->syncp); | ||
405 | 406 | ||
406 | tp->reordering = sysctl_tcp_reordering; | 407 | tp->reordering = sysctl_tcp_reordering; |
407 | tcp_enable_early_retrans(tp); | 408 | tcp_enable_early_retrans(tp); |
@@ -2598,6 +2599,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2598 | const struct tcp_sock *tp = tcp_sk(sk); | 2599 | const struct tcp_sock *tp = tcp_sk(sk); |
2599 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2600 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2600 | u32 now = tcp_time_stamp; | 2601 | u32 now = tcp_time_stamp; |
2602 | unsigned int start; | ||
2601 | u32 rate; | 2603 | u32 rate; |
2602 | 2604 | ||
2603 | memset(info, 0, sizeof(*info)); | 2605 | memset(info, 0, sizeof(*info)); |
@@ -2665,10 +2667,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2665 | rate = READ_ONCE(sk->sk_max_pacing_rate); | 2667 | rate = READ_ONCE(sk->sk_max_pacing_rate); |
2666 | info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; | 2668 | info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; |
2667 | 2669 | ||
2668 | spin_lock_bh(&sk->sk_lock.slock); | 2670 | do { |
2669 | info->tcpi_bytes_acked = tp->bytes_acked; | 2671 | start = u64_stats_fetch_begin_irq(&tp->syncp); |
2670 | info->tcpi_bytes_received = tp->bytes_received; | 2672 | info->tcpi_bytes_acked = tp->bytes_acked; |
2671 | spin_unlock_bh(&sk->sk_lock.slock); | 2673 | info->tcpi_bytes_received = tp->bytes_received; |
2674 | } while (u64_stats_fetch_retry_irq(&tp->syncp, start)); | ||
2672 | } | 2675 | } |
2673 | EXPORT_SYMBOL_GPL(tcp_get_info); | 2676 | EXPORT_SYMBOL_GPL(tcp_get_info); |
2674 | 2677 | ||
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 7a5ae50c80c8..84be008c945c 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk, | |||
187 | 187 | ||
188 | tcp_cleanup_congestion_control(sk); | 188 | tcp_cleanup_congestion_control(sk); |
189 | icsk->icsk_ca_ops = ca; | 189 | icsk->icsk_ca_ops = ca; |
190 | icsk->icsk_ca_setsockopt = 1; | ||
190 | 191 | ||
191 | if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) | 192 | if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) |
192 | icsk->icsk_ca_ops->init(sk); | 193 | icsk->icsk_ca_ops->init(sk); |
@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) | |||
335 | rcu_read_lock(); | 336 | rcu_read_lock(); |
336 | ca = __tcp_ca_find_autoload(name); | 337 | ca = __tcp_ca_find_autoload(name); |
337 | /* No change asking for existing value */ | 338 | /* No change asking for existing value */ |
338 | if (ca == icsk->icsk_ca_ops) | 339 | if (ca == icsk->icsk_ca_ops) { |
340 | icsk->icsk_ca_setsockopt = 1; | ||
339 | goto out; | 341 | goto out; |
342 | } | ||
340 | if (!ca) | 343 | if (!ca) |
341 | err = -ENOENT; | 344 | err = -ENOENT; |
342 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || | 345 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || |
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 3c673d5e6cff..46b087a27503 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c | |||
@@ -206,6 +206,10 @@ static bool tcp_fastopen_create_child(struct sock *sk, | |||
206 | skb_set_owner_r(skb2, child); | 206 | skb_set_owner_r(skb2, child); |
207 | __skb_queue_tail(&child->sk_receive_queue, skb2); | 207 | __skb_queue_tail(&child->sk_receive_queue, skb2); |
208 | tp->syn_data_acked = 1; | 208 | tp->syn_data_acked = 1; |
209 | |||
210 | /* u64_stats_update_begin(&tp->syncp) not needed here, | ||
211 | * as we certainly are not changing upper 32bit value (0) | ||
212 | */ | ||
209 | tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1; | 213 | tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1; |
210 | } else { | 214 | } else { |
211 | end_seq = TCP_SKB_CB(skb)->seq + 1; | 215 | end_seq = TCP_SKB_CB(skb)->seq + 1; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bc790ea9960f..c9ab964189a0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2698,16 +2698,21 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) | |||
2698 | struct tcp_sock *tp = tcp_sk(sk); | 2698 | struct tcp_sock *tp = tcp_sk(sk); |
2699 | bool recovered = !before(tp->snd_una, tp->high_seq); | 2699 | bool recovered = !before(tp->snd_una, tp->high_seq); |
2700 | 2700 | ||
2701 | if ((flag & FLAG_SND_UNA_ADVANCED) && | ||
2702 | tcp_try_undo_loss(sk, false)) | ||
2703 | return; | ||
2704 | |||
2701 | if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ | 2705 | if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ |
2702 | /* Step 3.b. A timeout is spurious if not all data are | 2706 | /* Step 3.b. A timeout is spurious if not all data are |
2703 | * lost, i.e., never-retransmitted data are (s)acked. | 2707 | * lost, i.e., never-retransmitted data are (s)acked. |
2704 | */ | 2708 | */ |
2705 | if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED)) | 2709 | if ((flag & FLAG_ORIG_SACK_ACKED) && |
2710 | tcp_try_undo_loss(sk, true)) | ||
2706 | return; | 2711 | return; |
2707 | 2712 | ||
2708 | if (after(tp->snd_nxt, tp->high_seq) && | 2713 | if (after(tp->snd_nxt, tp->high_seq)) { |
2709 | (flag & FLAG_DATA_SACKED || is_dupack)) { | 2714 | if (flag & FLAG_DATA_SACKED || is_dupack) |
2710 | tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ | 2715 | tp->frto = 0; /* Step 3.a. loss was real */ |
2711 | } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { | 2716 | } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { |
2712 | tp->high_seq = tp->snd_nxt; | 2717 | tp->high_seq = tp->snd_nxt; |
2713 | __tcp_push_pending_frames(sk, tcp_current_mss(sk), | 2718 | __tcp_push_pending_frames(sk, tcp_current_mss(sk), |
@@ -2732,8 +2737,6 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) | |||
2732 | else if (flag & FLAG_SND_UNA_ADVANCED) | 2737 | else if (flag & FLAG_SND_UNA_ADVANCED) |
2733 | tcp_reset_reno_sack(tp); | 2738 | tcp_reset_reno_sack(tp); |
2734 | } | 2739 | } |
2735 | if (tcp_try_undo_loss(sk, false)) | ||
2736 | return; | ||
2737 | tcp_xmit_retransmit_queue(sk); | 2740 | tcp_xmit_retransmit_queue(sk); |
2738 | } | 2741 | } |
2739 | 2742 | ||
@@ -3283,7 +3286,9 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) | |||
3283 | { | 3286 | { |
3284 | u32 delta = ack - tp->snd_una; | 3287 | u32 delta = ack - tp->snd_una; |
3285 | 3288 | ||
3289 | u64_stats_update_begin(&tp->syncp); | ||
3286 | tp->bytes_acked += delta; | 3290 | tp->bytes_acked += delta; |
3291 | u64_stats_update_end(&tp->syncp); | ||
3287 | tp->snd_una = ack; | 3292 | tp->snd_una = ack; |
3288 | } | 3293 | } |
3289 | 3294 | ||
@@ -3292,7 +3297,9 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) | |||
3292 | { | 3297 | { |
3293 | u32 delta = seq - tp->rcv_nxt; | 3298 | u32 delta = seq - tp->rcv_nxt; |
3294 | 3299 | ||
3300 | u64_stats_update_begin(&tp->syncp); | ||
3295 | tp->bytes_received += delta; | 3301 | tp->bytes_received += delta; |
3302 | u64_stats_update_end(&tp->syncp); | ||
3296 | tp->rcv_nxt = seq; | 3303 | tp->rcv_nxt = seq; |
3297 | } | 3304 | } |
3298 | 3305 | ||
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e5d7649136fc..17e7339ee5ca 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -300,7 +300,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
300 | tw->tw_v6_daddr = sk->sk_v6_daddr; | 300 | tw->tw_v6_daddr = sk->sk_v6_daddr; |
301 | tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | 301 | tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; |
302 | tw->tw_tclass = np->tclass; | 302 | tw->tw_tclass = np->tclass; |
303 | tw->tw_flowlabel = np->flow_label >> 12; | 303 | tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); |
304 | tw->tw_ipv6only = sk->sk_ipv6only; | 304 | tw->tw_ipv6only = sk->sk_ipv6only; |
305 | } | 305 | } |
306 | #endif | 306 | #endif |
@@ -420,7 +420,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) | |||
420 | rcu_read_unlock(); | 420 | rcu_read_unlock(); |
421 | } | 421 | } |
422 | 422 | ||
423 | if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner)) | 423 | /* If no valid choice made yet, assign current system default ca. */ |
424 | if (!ca_got_dst && | ||
425 | (!icsk->icsk_ca_setsockopt || | ||
426 | !try_module_get(icsk->icsk_ca_ops->owner))) | ||
424 | tcp_assign_congestion_control(sk); | 427 | tcp_assign_congestion_control(sk); |
425 | 428 | ||
426 | tcp_set_ca_state(sk, TCP_CA_Open); | 429 | tcp_set_ca_state(sk, TCP_CA_Open); |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d10b7e0112eb..1c92ea67baef 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1345,10 +1345,8 @@ csum_copy_err: | |||
1345 | } | 1345 | } |
1346 | unlock_sock_fast(sk, slow); | 1346 | unlock_sock_fast(sk, slow); |
1347 | 1347 | ||
1348 | if (noblock) | 1348 | /* starting over for a new packet, but check if we need to yield */ |
1349 | return -EAGAIN; | 1349 | cond_resched(); |
1350 | |||
1351 | /* starting over for a new packet */ | ||
1352 | msg->msg_flags &= ~MSG_TRUNC; | 1350 | msg->msg_flags &= ~MSG_TRUNC; |
1353 | goto try_again; | 1351 | goto try_again; |
1354 | } | 1352 | } |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 31f1b5d5e2ef..7c07ce36aae2 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -248,7 +248,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
248 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); | 248 | aead_givcrypt_set_crypt(req, sg, sg, clen, iv); |
249 | aead_givcrypt_set_assoc(req, asg, assoclen); | 249 | aead_givcrypt_set_assoc(req, asg, assoclen); |
250 | aead_givcrypt_set_giv(req, esph->enc_data, | 250 | aead_givcrypt_set_giv(req, esph->enc_data, |
251 | XFRM_SKB_CB(skb)->seq.output.low); | 251 | XFRM_SKB_CB(skb)->seq.output.low + |
252 | ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); | ||
252 | 253 | ||
253 | ESP_SKB_CB(skb)->tmp = tmp; | 254 | ESP_SKB_CB(skb)->tmp = tmp; |
254 | err = crypto_aead_givencrypt(req); | 255 | err = crypto_aead_givencrypt(req); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 96dbffff5a24..bde57b113009 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -693,6 +693,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
693 | { | 693 | { |
694 | struct rt6_info *iter = NULL; | 694 | struct rt6_info *iter = NULL; |
695 | struct rt6_info **ins; | 695 | struct rt6_info **ins; |
696 | struct rt6_info **fallback_ins = NULL; | ||
696 | int replace = (info->nlh && | 697 | int replace = (info->nlh && |
697 | (info->nlh->nlmsg_flags & NLM_F_REPLACE)); | 698 | (info->nlh->nlmsg_flags & NLM_F_REPLACE)); |
698 | int add = (!info->nlh || | 699 | int add = (!info->nlh || |
@@ -716,8 +717,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
716 | (info->nlh->nlmsg_flags & NLM_F_EXCL)) | 717 | (info->nlh->nlmsg_flags & NLM_F_EXCL)) |
717 | return -EEXIST; | 718 | return -EEXIST; |
718 | if (replace) { | 719 | if (replace) { |
719 | found++; | 720 | if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) { |
720 | break; | 721 | found++; |
722 | break; | ||
723 | } | ||
724 | if (rt_can_ecmp) | ||
725 | fallback_ins = fallback_ins ?: ins; | ||
726 | goto next_iter; | ||
721 | } | 727 | } |
722 | 728 | ||
723 | if (iter->dst.dev == rt->dst.dev && | 729 | if (iter->dst.dev == rt->dst.dev && |
@@ -753,9 +759,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
753 | if (iter->rt6i_metric > rt->rt6i_metric) | 759 | if (iter->rt6i_metric > rt->rt6i_metric) |
754 | break; | 760 | break; |
755 | 761 | ||
762 | next_iter: | ||
756 | ins = &iter->dst.rt6_next; | 763 | ins = &iter->dst.rt6_next; |
757 | } | 764 | } |
758 | 765 | ||
766 | if (fallback_ins && !found) { | ||
767 | /* No ECMP-able route found, replace first non-ECMP one */ | ||
768 | ins = fallback_ins; | ||
769 | iter = *ins; | ||
770 | found++; | ||
771 | } | ||
772 | |||
759 | /* Reset round-robin state, if necessary */ | 773 | /* Reset round-robin state, if necessary */ |
760 | if (ins == &fn->leaf) | 774 | if (ins == &fn->leaf) |
761 | fn->rr_ptr = NULL; | 775 | fn->rr_ptr = NULL; |
@@ -815,6 +829,8 @@ add: | |||
815 | } | 829 | } |
816 | 830 | ||
817 | } else { | 831 | } else { |
832 | int nsiblings; | ||
833 | |||
818 | if (!found) { | 834 | if (!found) { |
819 | if (add) | 835 | if (add) |
820 | goto add; | 836 | goto add; |
@@ -835,8 +851,27 @@ add: | |||
835 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; | 851 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; |
836 | fn->fn_flags |= RTN_RTINFO; | 852 | fn->fn_flags |= RTN_RTINFO; |
837 | } | 853 | } |
854 | nsiblings = iter->rt6i_nsiblings; | ||
838 | fib6_purge_rt(iter, fn, info->nl_net); | 855 | fib6_purge_rt(iter, fn, info->nl_net); |
839 | rt6_release(iter); | 856 | rt6_release(iter); |
857 | |||
858 | if (nsiblings) { | ||
859 | /* Replacing an ECMP route, remove all siblings */ | ||
860 | ins = &rt->dst.rt6_next; | ||
861 | iter = *ins; | ||
862 | while (iter) { | ||
863 | if (rt6_qualify_for_ecmp(iter)) { | ||
864 | *ins = iter->dst.rt6_next; | ||
865 | fib6_purge_rt(iter, fn, info->nl_net); | ||
866 | rt6_release(iter); | ||
867 | nsiblings--; | ||
868 | } else { | ||
869 | ins = &iter->dst.rt6_next; | ||
870 | } | ||
871 | iter = *ins; | ||
872 | } | ||
873 | WARN_ON(nsiblings != 0); | ||
874 | } | ||
840 | } | 875 | } |
841 | 876 | ||
842 | return 0; | 877 | return 0; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index c21777565c58..bc09cb97b840 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1300,8 +1300,10 @@ emsgsize: | |||
1300 | 1300 | ||
1301 | /* If this is the first and only packet and device | 1301 | /* If this is the first and only packet and device |
1302 | * supports checksum offloading, let's use it. | 1302 | * supports checksum offloading, let's use it. |
1303 | * Use transhdrlen, same as IPv4, because partial | ||
1304 | * sums only work when transhdrlen is set. | ||
1303 | */ | 1305 | */ |
1304 | if (!skb && sk->sk_protocol == IPPROTO_UDP && | 1306 | if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && |
1305 | length + fragheaderlen < mtu && | 1307 | length + fragheaderlen < mtu && |
1306 | rt->dst.dev->features & NETIF_F_V6_CSUM && | 1308 | rt->dst.dev->features & NETIF_F_V6_CSUM && |
1307 | !exthdrlen) | 1309 | !exthdrlen) |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index ed9d681207fa..0224c032dca5 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
@@ -322,7 +322,6 @@ static int vti6_rcv(struct sk_buff *skb) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; | 324 | XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; |
325 | skb->mark = be32_to_cpu(t->parms.i_key); | ||
326 | 325 | ||
327 | rcu_read_unlock(); | 326 | rcu_read_unlock(); |
328 | 327 | ||
@@ -342,6 +341,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err) | |||
342 | struct pcpu_sw_netstats *tstats; | 341 | struct pcpu_sw_netstats *tstats; |
343 | struct xfrm_state *x; | 342 | struct xfrm_state *x; |
344 | struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; | 343 | struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; |
344 | u32 orig_mark = skb->mark; | ||
345 | int ret; | ||
345 | 346 | ||
346 | if (!t) | 347 | if (!t) |
347 | return 1; | 348 | return 1; |
@@ -358,7 +359,11 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err) | |||
358 | x = xfrm_input_state(skb); | 359 | x = xfrm_input_state(skb); |
359 | family = x->inner_mode->afinfo->family; | 360 | family = x->inner_mode->afinfo->family; |
360 | 361 | ||
361 | if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) | 362 | skb->mark = be32_to_cpu(t->parms.i_key); |
363 | ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); | ||
364 | skb->mark = orig_mark; | ||
365 | |||
366 | if (!ret) | ||
362 | return -EPERM; | 367 | return -EPERM; |
363 | 368 | ||
364 | skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); | 369 | skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); |
@@ -430,6 +435,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
430 | struct net_device *tdev; | 435 | struct net_device *tdev; |
431 | struct xfrm_state *x; | 436 | struct xfrm_state *x; |
432 | int err = -1; | 437 | int err = -1; |
438 | int mtu; | ||
433 | 439 | ||
434 | if (!dst) | 440 | if (!dst) |
435 | goto tx_err_link_failure; | 441 | goto tx_err_link_failure; |
@@ -463,6 +469,19 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
463 | skb_dst_set(skb, dst); | 469 | skb_dst_set(skb, dst); |
464 | skb->dev = skb_dst(skb)->dev; | 470 | skb->dev = skb_dst(skb)->dev; |
465 | 471 | ||
472 | mtu = dst_mtu(dst); | ||
473 | if (!skb->ignore_df && skb->len > mtu) { | ||
474 | skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); | ||
475 | |||
476 | if (skb->protocol == htons(ETH_P_IPV6)) | ||
477 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | ||
478 | else | ||
479 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, | ||
480 | htonl(mtu)); | ||
481 | |||
482 | return -EMSGSIZE; | ||
483 | } | ||
484 | |||
466 | err = dst_output(skb); | 485 | err = dst_output(skb); |
467 | if (net_xmit_eval(err) == 0) { | 486 | if (net_xmit_eval(err) == 0) { |
468 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); | 487 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
@@ -495,7 +514,6 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
495 | int ret; | 514 | int ret; |
496 | 515 | ||
497 | memset(&fl, 0, sizeof(fl)); | 516 | memset(&fl, 0, sizeof(fl)); |
498 | skb->mark = be32_to_cpu(t->parms.o_key); | ||
499 | 517 | ||
500 | switch (skb->protocol) { | 518 | switch (skb->protocol) { |
501 | case htons(ETH_P_IPV6): | 519 | case htons(ETH_P_IPV6): |
@@ -516,6 +534,9 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
516 | goto tx_err; | 534 | goto tx_err; |
517 | } | 535 | } |
518 | 536 | ||
537 | /* override mark with tunnel output key */ | ||
538 | fl.flowi_mark = be32_to_cpu(t->parms.o_key); | ||
539 | |||
519 | ret = vti6_xmit(skb, dev, &fl); | 540 | ret = vti6_xmit(skb, dev, &fl); |
520 | if (ret < 0) | 541 | if (ret < 0) |
521 | goto tx_err; | 542 | goto tx_err; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 1a732a1d3c8e..62f5b0d0bc9b 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -1275,6 +1275,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len) | |||
1275 | /* overflow check */ | 1275 | /* overflow check */ |
1276 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1276 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1277 | return -ENOMEM; | 1277 | return -ENOMEM; |
1278 | if (tmp.num_counters == 0) | ||
1279 | return -EINVAL; | ||
1280 | |||
1278 | tmp.name[sizeof(tmp.name)-1] = 0; | 1281 | tmp.name[sizeof(tmp.name)-1] = 0; |
1279 | 1282 | ||
1280 | newinfo = xt_alloc_table_info(tmp.size); | 1283 | newinfo = xt_alloc_table_info(tmp.size); |
@@ -1822,6 +1825,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) | |||
1822 | return -ENOMEM; | 1825 | return -ENOMEM; |
1823 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) | 1826 | if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) |
1824 | return -ENOMEM; | 1827 | return -ENOMEM; |
1828 | if (tmp.num_counters == 0) | ||
1829 | return -EINVAL; | ||
1830 | |||
1825 | tmp.name[sizeof(tmp.name)-1] = 0; | 1831 | tmp.name[sizeof(tmp.name)-1] = 0; |
1826 | 1832 | ||
1827 | newinfo = xt_alloc_table_info(tmp.size); | 1833 | newinfo = xt_alloc_table_info(tmp.size); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index d3588885f097..c73ae5039e46 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2504,9 +2504,9 @@ static int ip6_route_multipath(struct fib6_config *cfg, int add) | |||
2504 | int attrlen; | 2504 | int attrlen; |
2505 | int err = 0, last_err = 0; | 2505 | int err = 0, last_err = 0; |
2506 | 2506 | ||
2507 | remaining = cfg->fc_mp_len; | ||
2507 | beginning: | 2508 | beginning: |
2508 | rtnh = (struct rtnexthop *)cfg->fc_mp; | 2509 | rtnh = (struct rtnexthop *)cfg->fc_mp; |
2509 | remaining = cfg->fc_mp_len; | ||
2510 | 2510 | ||
2511 | /* Parse a Multipath Entry */ | 2511 | /* Parse a Multipath Entry */ |
2512 | while (rtnh_ok(rtnh, remaining)) { | 2512 | while (rtnh_ok(rtnh, remaining)) { |
@@ -2536,15 +2536,19 @@ beginning: | |||
2536 | * next hops that have been already added. | 2536 | * next hops that have been already added. |
2537 | */ | 2537 | */ |
2538 | add = 0; | 2538 | add = 0; |
2539 | remaining = cfg->fc_mp_len - remaining; | ||
2539 | goto beginning; | 2540 | goto beginning; |
2540 | } | 2541 | } |
2541 | } | 2542 | } |
2542 | /* Because each route is added like a single route we remove | 2543 | /* Because each route is added like a single route we remove |
2543 | * this flag after the first nexthop (if there is a collision, | 2544 | * these flags after the first nexthop: if there is a collision, |
2544 | * we have already fail to add the first nexthop: | 2545 | * we have already failed to add the first nexthop: |
2545 | * fib6_add_rt2node() has reject it). | 2546 | * fib6_add_rt2node() has rejected it; when replacing, old |
2547 | * nexthops have been replaced by first new, the rest should | ||
2548 | * be added to it. | ||
2546 | */ | 2549 | */ |
2547 | cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL; | 2550 | cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | |
2551 | NLM_F_REPLACE); | ||
2548 | rtnh = rtnh_next(rtnh, &remaining); | 2552 | rtnh = rtnh_next(rtnh, &remaining); |
2549 | } | 2553 | } |
2550 | 2554 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b6575d665568..3adffb300238 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -914,7 +914,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
914 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, | 914 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
915 | tcp_time_stamp + tcptw->tw_ts_offset, | 915 | tcp_time_stamp + tcptw->tw_ts_offset, |
916 | tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), | 916 | tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), |
917 | tw->tw_tclass, (tw->tw_flowlabel << 12)); | 917 | tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); |
918 | 918 | ||
919 | inet_twsk_put(tw); | 919 | inet_twsk_put(tw); |
920 | } | 920 | } |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3477c919fcc8..e51fc3eee6db 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -525,10 +525,8 @@ csum_copy_err: | |||
525 | } | 525 | } |
526 | unlock_sock_fast(sk, slow); | 526 | unlock_sock_fast(sk, slow); |
527 | 527 | ||
528 | if (noblock) | 528 | /* starting over for a new packet, but check if we need to yield */ |
529 | return -EAGAIN; | 529 | cond_resched(); |
530 | |||
531 | /* starting over for a new packet */ | ||
532 | msg->msg_flags &= ~MSG_TRUNC; | 530 | msg->msg_flags &= ~MSG_TRUNC; |
533 | goto try_again; | 531 | goto try_again; |
534 | } | 532 | } |
@@ -731,7 +729,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk, | |||
731 | (inet->inet_dport && inet->inet_dport != rmt_port) || | 729 | (inet->inet_dport && inet->inet_dport != rmt_port) || |
732 | (!ipv6_addr_any(&sk->sk_v6_daddr) && | 730 | (!ipv6_addr_any(&sk->sk_v6_daddr) && |
733 | !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || | 731 | !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) || |
734 | (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) | 732 | (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) || |
733 | (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && | ||
734 | !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))) | ||
735 | return false; | 735 | return false; |
736 | if (!inet6_mc_check(sk, loc_addr, rmt_addr)) | 736 | if (!inet6_mc_check(sk, loc_addr, rmt_addr)) |
737 | return false; | 737 | return false; |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 265e42721a66..ff347a0eebd4 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2495,51 +2495,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local, | |||
2495 | struct ieee80211_roc_work *new_roc, | 2495 | struct ieee80211_roc_work *new_roc, |
2496 | struct ieee80211_roc_work *cur_roc) | 2496 | struct ieee80211_roc_work *cur_roc) |
2497 | { | 2497 | { |
2498 | unsigned long j = jiffies; | 2498 | unsigned long now = jiffies; |
2499 | unsigned long cur_roc_end = cur_roc->hw_start_time + | 2499 | unsigned long remaining = cur_roc->hw_start_time + |
2500 | msecs_to_jiffies(cur_roc->duration); | 2500 | msecs_to_jiffies(cur_roc->duration) - |
2501 | struct ieee80211_roc_work *next_roc; | 2501 | now; |
2502 | int new_dur; | ||
2503 | 2502 | ||
2504 | if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) | 2503 | if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) |
2505 | return false; | 2504 | return false; |
2506 | 2505 | ||
2507 | if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end)) | 2506 | /* if it doesn't fit entirely, schedule a new one */ |
2507 | if (new_roc->duration > jiffies_to_msecs(remaining)) | ||
2508 | return false; | 2508 | return false; |
2509 | 2509 | ||
2510 | ieee80211_handle_roc_started(new_roc); | 2510 | ieee80211_handle_roc_started(new_roc); |
2511 | 2511 | ||
2512 | new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j); | 2512 | /* add to dependents so we send the expired event properly */ |
2513 | 2513 | list_add_tail(&new_roc->list, &cur_roc->dependents); | |
2514 | /* cur_roc is long enough - add new_roc to the dependents list. */ | ||
2515 | if (new_dur <= 0) { | ||
2516 | list_add_tail(&new_roc->list, &cur_roc->dependents); | ||
2517 | return true; | ||
2518 | } | ||
2519 | |||
2520 | new_roc->duration = new_dur; | ||
2521 | |||
2522 | /* | ||
2523 | * if cur_roc was already coalesced before, we might | ||
2524 | * want to extend the next roc instead of adding | ||
2525 | * a new one. | ||
2526 | */ | ||
2527 | next_roc = list_entry(cur_roc->list.next, | ||
2528 | struct ieee80211_roc_work, list); | ||
2529 | if (&next_roc->list != &local->roc_list && | ||
2530 | next_roc->chan == new_roc->chan && | ||
2531 | next_roc->sdata == new_roc->sdata && | ||
2532 | !WARN_ON(next_roc->started)) { | ||
2533 | list_add_tail(&new_roc->list, &next_roc->dependents); | ||
2534 | next_roc->duration = max(next_roc->duration, | ||
2535 | new_roc->duration); | ||
2536 | next_roc->type = max(next_roc->type, new_roc->type); | ||
2537 | return true; | ||
2538 | } | ||
2539 | |||
2540 | /* add right after cur_roc */ | ||
2541 | list_add(&new_roc->list, &cur_roc->list); | ||
2542 | |||
2543 | return true; | 2514 | return true; |
2544 | } | 2515 | } |
2545 | 2516 | ||
@@ -2652,17 +2623,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, | |||
2652 | * In the offloaded ROC case, if it hasn't begun, add | 2623 | * In the offloaded ROC case, if it hasn't begun, add |
2653 | * this new one to the dependent list to be handled | 2624 | * this new one to the dependent list to be handled |
2654 | * when the master one begins. If it has begun, | 2625 | * when the master one begins. If it has begun, |
2655 | * check that there's still a minimum time left and | 2626 | * check if it fits entirely within the existing one, |
2656 | * if so, start this one, transmitting the frame, but | 2627 | * in which case it will just be dependent as well. |
2657 | * add it to the list directly after this one with | 2628 | * Otherwise, schedule it by itself. |
2658 | * a reduced time so we'll ask the driver to execute | ||
2659 | * it right after finishing the previous one, in the | ||
2660 | * hope that it'll also be executed right afterwards, | ||
2661 | * effectively extending the old one. | ||
2662 | * If there's no minimum time left, just add it to the | ||
2663 | * normal list. | ||
2664 | * TODO: the ROC type is ignored here, assuming that it | ||
2665 | * is better to immediately use the current ROC. | ||
2666 | */ | 2629 | */ |
2667 | if (!tmp->hw_begun) { | 2630 | if (!tmp->hw_begun) { |
2668 | list_add_tail(&roc->list, &tmp->dependents); | 2631 | list_add_tail(&roc->list, &tmp->dependents); |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ab46ab4a7249..c0a9187bc3a9 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -205,6 +205,8 @@ enum ieee80211_packet_rx_flags { | |||
205 | * @IEEE80211_RX_CMNTR: received on cooked monitor already | 205 | * @IEEE80211_RX_CMNTR: received on cooked monitor already |
206 | * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported | 206 | * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported |
207 | * to cfg80211_report_obss_beacon(). | 207 | * to cfg80211_report_obss_beacon(). |
208 | * @IEEE80211_RX_REORDER_TIMER: this frame is released by the | ||
209 | * reorder buffer timeout timer, not the normal RX path | ||
208 | * | 210 | * |
209 | * These flags are used across handling multiple interfaces | 211 | * These flags are used across handling multiple interfaces |
210 | * for a single frame. | 212 | * for a single frame. |
@@ -212,6 +214,7 @@ enum ieee80211_packet_rx_flags { | |||
212 | enum ieee80211_rx_flags { | 214 | enum ieee80211_rx_flags { |
213 | IEEE80211_RX_CMNTR = BIT(0), | 215 | IEEE80211_RX_CMNTR = BIT(0), |
214 | IEEE80211_RX_BEACON_REPORTED = BIT(1), | 216 | IEEE80211_RX_BEACON_REPORTED = BIT(1), |
217 | IEEE80211_RX_REORDER_TIMER = BIT(2), | ||
215 | }; | 218 | }; |
216 | 219 | ||
217 | struct ieee80211_rx_data { | 220 | struct ieee80211_rx_data { |
@@ -325,12 +328,6 @@ struct mesh_preq_queue { | |||
325 | u8 flags; | 328 | u8 flags; |
326 | }; | 329 | }; |
327 | 330 | ||
328 | #if HZ/100 == 0 | ||
329 | #define IEEE80211_ROC_MIN_LEFT 1 | ||
330 | #else | ||
331 | #define IEEE80211_ROC_MIN_LEFT (HZ/100) | ||
332 | #endif | ||
333 | |||
334 | struct ieee80211_roc_work { | 331 | struct ieee80211_roc_work { |
335 | struct list_head list; | 332 | struct list_head list; |
336 | struct list_head dependents; | 333 | struct list_head dependents; |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index bab5c63c0bad..84cef600c573 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) | |||
522 | memcpy(sdata->vif.hw_queue, master->vif.hw_queue, | 522 | memcpy(sdata->vif.hw_queue, master->vif.hw_queue, |
523 | sizeof(sdata->vif.hw_queue)); | 523 | sizeof(sdata->vif.hw_queue)); |
524 | sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; | 524 | sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; |
525 | |||
526 | mutex_lock(&local->key_mtx); | ||
527 | sdata->crypto_tx_tailroom_needed_cnt += | ||
528 | master->crypto_tx_tailroom_needed_cnt; | ||
529 | mutex_unlock(&local->key_mtx); | ||
530 | |||
525 | break; | 531 | break; |
526 | } | 532 | } |
527 | case NL80211_IFTYPE_AP: | 533 | case NL80211_IFTYPE_AP: |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 2291cd730091..a907f2d5c12d 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local) | |||
58 | lockdep_assert_held(&local->key_mtx); | 58 | lockdep_assert_held(&local->key_mtx); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void | ||
62 | update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) | ||
63 | { | ||
64 | struct ieee80211_sub_if_data *vlan; | ||
65 | |||
66 | if (sdata->vif.type != NL80211_IFTYPE_AP) | ||
67 | return; | ||
68 | |||
69 | mutex_lock(&sdata->local->mtx); | ||
70 | |||
71 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | ||
72 | vlan->crypto_tx_tailroom_needed_cnt += delta; | ||
73 | |||
74 | mutex_unlock(&sdata->local->mtx); | ||
75 | } | ||
76 | |||
61 | static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) | 77 | static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) |
62 | { | 78 | { |
63 | /* | 79 | /* |
@@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) | |||
79 | * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net | 95 | * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net |
80 | */ | 96 | */ |
81 | 97 | ||
98 | update_vlan_tailroom_need_count(sdata, 1); | ||
99 | |||
82 | if (!sdata->crypto_tx_tailroom_needed_cnt++) { | 100 | if (!sdata->crypto_tx_tailroom_needed_cnt++) { |
83 | /* | 101 | /* |
84 | * Flush all XMIT packets currently using HW encryption or no | 102 | * Flush all XMIT packets currently using HW encryption or no |
@@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) | |||
88 | } | 106 | } |
89 | } | 107 | } |
90 | 108 | ||
109 | static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, | ||
110 | int delta) | ||
111 | { | ||
112 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); | ||
113 | |||
114 | update_vlan_tailroom_need_count(sdata, -delta); | ||
115 | sdata->crypto_tx_tailroom_needed_cnt -= delta; | ||
116 | } | ||
117 | |||
91 | static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | 118 | static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) |
92 | { | 119 | { |
93 | struct ieee80211_sub_if_data *sdata; | 120 | struct ieee80211_sub_if_data *sdata; |
@@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
144 | 171 | ||
145 | if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || | 172 | if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || |
146 | (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) | 173 | (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) |
147 | sdata->crypto_tx_tailroom_needed_cnt--; | 174 | decrease_tailroom_need_count(sdata, 1); |
148 | 175 | ||
149 | WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && | 176 | WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && |
150 | (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); | 177 | (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); |
@@ -541,7 +568,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key, | |||
541 | schedule_delayed_work(&sdata->dec_tailroom_needed_wk, | 568 | schedule_delayed_work(&sdata->dec_tailroom_needed_wk, |
542 | HZ/2); | 569 | HZ/2); |
543 | } else { | 570 | } else { |
544 | sdata->crypto_tx_tailroom_needed_cnt--; | 571 | decrease_tailroom_need_count(sdata, 1); |
545 | } | 572 | } |
546 | } | 573 | } |
547 | 574 | ||
@@ -631,6 +658,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) | |||
631 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | 658 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) |
632 | { | 659 | { |
633 | struct ieee80211_key *key; | 660 | struct ieee80211_key *key; |
661 | struct ieee80211_sub_if_data *vlan; | ||
634 | 662 | ||
635 | ASSERT_RTNL(); | 663 | ASSERT_RTNL(); |
636 | 664 | ||
@@ -639,7 +667,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | |||
639 | 667 | ||
640 | mutex_lock(&sdata->local->key_mtx); | 668 | mutex_lock(&sdata->local->key_mtx); |
641 | 669 | ||
642 | sdata->crypto_tx_tailroom_needed_cnt = 0; | 670 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || |
671 | sdata->crypto_tx_tailroom_pending_dec); | ||
672 | |||
673 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | ||
674 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | ||
675 | WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || | ||
676 | vlan->crypto_tx_tailroom_pending_dec); | ||
677 | } | ||
643 | 678 | ||
644 | list_for_each_entry(key, &sdata->key_list, list) { | 679 | list_for_each_entry(key, &sdata->key_list, list) { |
645 | increment_tailroom_need_count(sdata); | 680 | increment_tailroom_need_count(sdata); |
@@ -649,6 +684,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | |||
649 | mutex_unlock(&sdata->local->key_mtx); | 684 | mutex_unlock(&sdata->local->key_mtx); |
650 | } | 685 | } |
651 | 686 | ||
687 | void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata) | ||
688 | { | ||
689 | struct ieee80211_sub_if_data *vlan; | ||
690 | |||
691 | mutex_lock(&sdata->local->key_mtx); | ||
692 | |||
693 | sdata->crypto_tx_tailroom_needed_cnt = 0; | ||
694 | |||
695 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | ||
696 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | ||
697 | vlan->crypto_tx_tailroom_needed_cnt = 0; | ||
698 | } | ||
699 | |||
700 | mutex_unlock(&sdata->local->key_mtx); | ||
701 | } | ||
702 | |||
652 | void ieee80211_iter_keys(struct ieee80211_hw *hw, | 703 | void ieee80211_iter_keys(struct ieee80211_hw *hw, |
653 | struct ieee80211_vif *vif, | 704 | struct ieee80211_vif *vif, |
654 | void (*iter)(struct ieee80211_hw *hw, | 705 | void (*iter)(struct ieee80211_hw *hw, |
@@ -688,8 +739,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, | |||
688 | { | 739 | { |
689 | struct ieee80211_key *key, *tmp; | 740 | struct ieee80211_key *key, *tmp; |
690 | 741 | ||
691 | sdata->crypto_tx_tailroom_needed_cnt -= | 742 | decrease_tailroom_need_count(sdata, |
692 | sdata->crypto_tx_tailroom_pending_dec; | 743 | sdata->crypto_tx_tailroom_pending_dec); |
693 | sdata->crypto_tx_tailroom_pending_dec = 0; | 744 | sdata->crypto_tx_tailroom_pending_dec = 0; |
694 | 745 | ||
695 | ieee80211_debugfs_key_remove_mgmt_default(sdata); | 746 | ieee80211_debugfs_key_remove_mgmt_default(sdata); |
@@ -709,6 +760,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, | |||
709 | { | 760 | { |
710 | struct ieee80211_local *local = sdata->local; | 761 | struct ieee80211_local *local = sdata->local; |
711 | struct ieee80211_sub_if_data *vlan; | 762 | struct ieee80211_sub_if_data *vlan; |
763 | struct ieee80211_sub_if_data *master; | ||
712 | struct ieee80211_key *key, *tmp; | 764 | struct ieee80211_key *key, *tmp; |
713 | LIST_HEAD(keys); | 765 | LIST_HEAD(keys); |
714 | 766 | ||
@@ -728,8 +780,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, | |||
728 | list_for_each_entry_safe(key, tmp, &keys, list) | 780 | list_for_each_entry_safe(key, tmp, &keys, list) |
729 | __ieee80211_key_destroy(key, false); | 781 | __ieee80211_key_destroy(key, false); |
730 | 782 | ||
731 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || | 783 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { |
732 | sdata->crypto_tx_tailroom_pending_dec); | 784 | if (sdata->bss) { |
785 | master = container_of(sdata->bss, | ||
786 | struct ieee80211_sub_if_data, | ||
787 | u.ap); | ||
788 | |||
789 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != | ||
790 | master->crypto_tx_tailroom_needed_cnt); | ||
791 | } | ||
792 | } else { | ||
793 | WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || | ||
794 | sdata->crypto_tx_tailroom_pending_dec); | ||
795 | } | ||
796 | |||
733 | if (sdata->vif.type == NL80211_IFTYPE_AP) { | 797 | if (sdata->vif.type == NL80211_IFTYPE_AP) { |
734 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) | 798 | list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) |
735 | WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || | 799 | WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || |
@@ -793,8 +857,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk) | |||
793 | */ | 857 | */ |
794 | 858 | ||
795 | mutex_lock(&sdata->local->key_mtx); | 859 | mutex_lock(&sdata->local->key_mtx); |
796 | sdata->crypto_tx_tailroom_needed_cnt -= | 860 | decrease_tailroom_need_count(sdata, |
797 | sdata->crypto_tx_tailroom_pending_dec; | 861 | sdata->crypto_tx_tailroom_pending_dec); |
798 | sdata->crypto_tx_tailroom_pending_dec = 0; | 862 | sdata->crypto_tx_tailroom_pending_dec = 0; |
799 | mutex_unlock(&sdata->local->key_mtx); | 863 | mutex_unlock(&sdata->local->key_mtx); |
800 | } | 864 | } |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index c5a31835be0e..96557dd1e77d 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -161,6 +161,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, | |||
161 | void ieee80211_free_sta_keys(struct ieee80211_local *local, | 161 | void ieee80211_free_sta_keys(struct ieee80211_local *local, |
162 | struct sta_info *sta); | 162 | struct sta_info *sta); |
163 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); | 163 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); |
164 | void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata); | ||
164 | 165 | ||
165 | #define key_mtx_dereference(local, ref) \ | 166 | #define key_mtx_dereference(local, ref) \ |
166 | rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) | 167 | rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 260eed45b6d2..5793f75c5ffd 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2121,7 +2121,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) | |||
2121 | /* deliver to local stack */ | 2121 | /* deliver to local stack */ |
2122 | skb->protocol = eth_type_trans(skb, dev); | 2122 | skb->protocol = eth_type_trans(skb, dev); |
2123 | memset(skb->cb, 0, sizeof(skb->cb)); | 2123 | memset(skb->cb, 0, sizeof(skb->cb)); |
2124 | if (rx->local->napi) | 2124 | if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && |
2125 | rx->local->napi) | ||
2125 | napi_gro_receive(rx->local->napi, skb); | 2126 | napi_gro_receive(rx->local->napi, skb); |
2126 | else | 2127 | else |
2127 | netif_receive_skb(skb); | 2128 | netif_receive_skb(skb); |
@@ -3231,7 +3232,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | |||
3231 | /* This is OK -- must be QoS data frame */ | 3232 | /* This is OK -- must be QoS data frame */ |
3232 | .security_idx = tid, | 3233 | .security_idx = tid, |
3233 | .seqno_idx = tid, | 3234 | .seqno_idx = tid, |
3234 | .flags = 0, | 3235 | .flags = IEEE80211_RX_REORDER_TIMER, |
3235 | }; | 3236 | }; |
3236 | struct tid_ampdu_rx *tid_agg_rx; | 3237 | struct tid_ampdu_rx *tid_agg_rx; |
3237 | 3238 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 79412f16b61d..b864ebc6ab8f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -2023,6 +2023,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
2023 | 2023 | ||
2024 | /* add back keys */ | 2024 | /* add back keys */ |
2025 | list_for_each_entry(sdata, &local->interfaces, list) | 2025 | list_for_each_entry(sdata, &local->interfaces, list) |
2026 | ieee80211_reset_crypto_tx_tailroom(sdata); | ||
2027 | |||
2028 | list_for_each_entry(sdata, &local->interfaces, list) | ||
2026 | if (ieee80211_sdata_running(sdata)) | 2029 | if (ieee80211_sdata_running(sdata)) |
2027 | ieee80211_enable_keys(sdata); | 2030 | ieee80211_enable_keys(sdata); |
2028 | 2031 | ||
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index a4220e92f0cc..efa3f48f1ec5 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c | |||
@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, | |||
98 | 98 | ||
99 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); | 99 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
100 | 100 | ||
101 | if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN || | 101 | if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) |
102 | skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) | ||
103 | return NULL; | 102 | return NULL; |
104 | 103 | ||
105 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 104 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
@@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, | |||
167 | size_t len; | 166 | size_t len; |
168 | u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; | 167 | u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; |
169 | 168 | ||
169 | if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN)) | ||
170 | return -1; | ||
171 | |||
170 | iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); | 172 | iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); |
171 | if (!iv) | 173 | if (!iv) |
172 | return -1; | 174 | return -1; |
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index f70e34a68f70..a0f3e6a3c7d1 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -863,6 +863,7 @@ config NETFILTER_XT_TARGET_TPROXY | |||
863 | depends on NETFILTER_XTABLES | 863 | depends on NETFILTER_XTABLES |
864 | depends on NETFILTER_ADVANCED | 864 | depends on NETFILTER_ADVANCED |
865 | depends on (IPV6 || IPV6=n) | 865 | depends on (IPV6 || IPV6=n) |
866 | depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) | ||
866 | depends on IP_NF_MANGLE | 867 | depends on IP_NF_MANGLE |
867 | select NF_DEFRAG_IPV4 | 868 | select NF_DEFRAG_IPV4 |
868 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES | 869 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES |
@@ -1356,6 +1357,7 @@ config NETFILTER_XT_MATCH_SOCKET | |||
1356 | depends on NETFILTER_ADVANCED | 1357 | depends on NETFILTER_ADVANCED |
1357 | depends on !NF_CONNTRACK || NF_CONNTRACK | 1358 | depends on !NF_CONNTRACK || NF_CONNTRACK |
1358 | depends on (IPV6 || IPV6=n) | 1359 | depends on (IPV6 || IPV6=n) |
1360 | depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n) | ||
1359 | select NF_DEFRAG_IPV4 | 1361 | select NF_DEFRAG_IPV4 |
1360 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES | 1362 | select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES |
1361 | help | 1363 | help |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 49532672f66d..285eae3a1454 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -3823,6 +3823,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) | |||
3823 | cancel_work_sync(&ipvs->defense_work.work); | 3823 | cancel_work_sync(&ipvs->defense_work.work); |
3824 | unregister_net_sysctl_table(ipvs->sysctl_hdr); | 3824 | unregister_net_sysctl_table(ipvs->sysctl_hdr); |
3825 | ip_vs_stop_estimator(net, &ipvs->tot_stats); | 3825 | ip_vs_stop_estimator(net, &ipvs->tot_stats); |
3826 | |||
3827 | if (!net_eq(net, &init_net)) | ||
3828 | kfree(ipvs->sysctl_tbl); | ||
3826 | } | 3829 | } |
3827 | 3830 | ||
3828 | #else | 3831 | #else |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 5caa0c41bf26..70383de72054 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -202,7 +202,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
202 | * sES -> sES :-) | 202 | * sES -> sES :-) |
203 | * sFW -> sCW Normal close request answered by ACK. | 203 | * sFW -> sCW Normal close request answered by ACK. |
204 | * sCW -> sCW | 204 | * sCW -> sCW |
205 | * sLA -> sTW Last ACK detected. | 205 | * sLA -> sTW Last ACK detected (RFC5961 challenged) |
206 | * sTW -> sTW Retransmitted last ACK. Remain in the same state. | 206 | * sTW -> sTW Retransmitted last ACK. Remain in the same state. |
207 | * sCL -> sCL | 207 | * sCL -> sCL |
208 | */ | 208 | */ |
@@ -261,7 +261,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { | |||
261 | * sES -> sES :-) | 261 | * sES -> sES :-) |
262 | * sFW -> sCW Normal close request answered by ACK. | 262 | * sFW -> sCW Normal close request answered by ACK. |
263 | * sCW -> sCW | 263 | * sCW -> sCW |
264 | * sLA -> sTW Last ACK detected. | 264 | * sLA -> sTW Last ACK detected (RFC5961 challenged) |
265 | * sTW -> sTW Retransmitted last ACK. | 265 | * sTW -> sTW Retransmitted last ACK. |
266 | * sCL -> sCL | 266 | * sCL -> sCL |
267 | */ | 267 | */ |
@@ -906,6 +906,7 @@ static int tcp_packet(struct nf_conn *ct, | |||
906 | 1 : ct->proto.tcp.last_win; | 906 | 1 : ct->proto.tcp.last_win; |
907 | ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale = | 907 | ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale = |
908 | ct->proto.tcp.last_wscale; | 908 | ct->proto.tcp.last_wscale; |
909 | ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK; | ||
909 | ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags = | 910 | ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags = |
910 | ct->proto.tcp.last_flags; | 911 | ct->proto.tcp.last_flags; |
911 | memset(&ct->proto.tcp.seen[dir], 0, | 912 | memset(&ct->proto.tcp.seen[dir], 0, |
@@ -923,7 +924,9 @@ static int tcp_packet(struct nf_conn *ct, | |||
923 | * may be in sync but we are not. In that case, we annotate | 924 | * may be in sync but we are not. In that case, we annotate |
924 | * the TCP options and let the packet go through. If it is a | 925 | * the TCP options and let the packet go through. If it is a |
925 | * valid SYN packet, the server will reply with a SYN/ACK, and | 926 | * valid SYN packet, the server will reply with a SYN/ACK, and |
926 | * then we'll get in sync. Otherwise, the server ignores it. */ | 927 | * then we'll get in sync. Otherwise, the server potentially |
928 | * responds with a challenge ACK if implementing RFC5961. | ||
929 | */ | ||
927 | if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) { | 930 | if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) { |
928 | struct ip_ct_tcp_state seen = {}; | 931 | struct ip_ct_tcp_state seen = {}; |
929 | 932 | ||
@@ -939,6 +942,13 @@ static int tcp_packet(struct nf_conn *ct, | |||
939 | ct->proto.tcp.last_flags |= | 942 | ct->proto.tcp.last_flags |= |
940 | IP_CT_TCP_FLAG_SACK_PERM; | 943 | IP_CT_TCP_FLAG_SACK_PERM; |
941 | } | 944 | } |
945 | /* Mark the potential for RFC5961 challenge ACK, | ||
946 | * this pose a special problem for LAST_ACK state | ||
947 | * as ACK is intrepretated as ACKing last FIN. | ||
948 | */ | ||
949 | if (old_state == TCP_CONNTRACK_LAST_ACK) | ||
950 | ct->proto.tcp.last_flags |= | ||
951 | IP_CT_EXP_CHALLENGE_ACK; | ||
942 | } | 952 | } |
943 | spin_unlock_bh(&ct->lock); | 953 | spin_unlock_bh(&ct->lock); |
944 | if (LOG_INVALID(net, IPPROTO_TCP)) | 954 | if (LOG_INVALID(net, IPPROTO_TCP)) |
@@ -970,6 +980,25 @@ static int tcp_packet(struct nf_conn *ct, | |||
970 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, | 980 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, |
971 | "nf_ct_tcp: invalid state "); | 981 | "nf_ct_tcp: invalid state "); |
972 | return -NF_ACCEPT; | 982 | return -NF_ACCEPT; |
983 | case TCP_CONNTRACK_TIME_WAIT: | ||
984 | /* RFC5961 compliance cause stack to send "challenge-ACK" | ||
985 | * e.g. in response to spurious SYNs. Conntrack MUST | ||
986 | * not believe this ACK is acking last FIN. | ||
987 | */ | ||
988 | if (old_state == TCP_CONNTRACK_LAST_ACK && | ||
989 | index == TCP_ACK_SET && | ||
990 | ct->proto.tcp.last_dir != dir && | ||
991 | ct->proto.tcp.last_index == TCP_SYN_SET && | ||
992 | (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) { | ||
993 | /* Detected RFC5961 challenge ACK */ | ||
994 | ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK; | ||
995 | spin_unlock_bh(&ct->lock); | ||
996 | if (LOG_INVALID(net, IPPROTO_TCP)) | ||
997 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, | ||
998 | "nf_ct_tcp: challenge-ACK ignored "); | ||
999 | return NF_ACCEPT; /* Don't change state */ | ||
1000 | } | ||
1001 | break; | ||
973 | case TCP_CONNTRACK_CLOSE: | 1002 | case TCP_CONNTRACK_CLOSE: |
974 | if (index == TCP_RST_SET | 1003 | if (index == TCP_RST_SET |
975 | && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) | 1004 | && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ad9d11fb29fd..34ded09317e7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -4472,9 +4472,9 @@ EXPORT_SYMBOL_GPL(nft_data_init); | |||
4472 | */ | 4472 | */ |
4473 | void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) | 4473 | void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) |
4474 | { | 4474 | { |
4475 | switch (type) { | 4475 | if (type < NFT_DATA_VERDICT) |
4476 | case NFT_DATA_VALUE: | ||
4477 | return; | 4476 | return; |
4477 | switch (type) { | ||
4478 | case NFT_DATA_VERDICT: | 4478 | case NFT_DATA_VERDICT: |
4479 | return nft_verdict_uninit(data); | 4479 | return nft_verdict_uninit(data); |
4480 | default: | 4480 | default: |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 3ad91266c821..4ef1fae8445e 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -1073,7 +1073,13 @@ static struct pernet_operations nfnl_log_net_ops = { | |||
1073 | 1073 | ||
1074 | static int __init nfnetlink_log_init(void) | 1074 | static int __init nfnetlink_log_init(void) |
1075 | { | 1075 | { |
1076 | int status = -ENOMEM; | 1076 | int status; |
1077 | |||
1078 | status = register_pernet_subsys(&nfnl_log_net_ops); | ||
1079 | if (status < 0) { | ||
1080 | pr_err("failed to register pernet ops\n"); | ||
1081 | goto out; | ||
1082 | } | ||
1077 | 1083 | ||
1078 | netlink_register_notifier(&nfulnl_rtnl_notifier); | 1084 | netlink_register_notifier(&nfulnl_rtnl_notifier); |
1079 | status = nfnetlink_subsys_register(&nfulnl_subsys); | 1085 | status = nfnetlink_subsys_register(&nfulnl_subsys); |
@@ -1088,28 +1094,23 @@ static int __init nfnetlink_log_init(void) | |||
1088 | goto cleanup_subsys; | 1094 | goto cleanup_subsys; |
1089 | } | 1095 | } |
1090 | 1096 | ||
1091 | status = register_pernet_subsys(&nfnl_log_net_ops); | ||
1092 | if (status < 0) { | ||
1093 | pr_err("failed to register pernet ops\n"); | ||
1094 | goto cleanup_logger; | ||
1095 | } | ||
1096 | return status; | 1097 | return status; |
1097 | 1098 | ||
1098 | cleanup_logger: | ||
1099 | nf_log_unregister(&nfulnl_logger); | ||
1100 | cleanup_subsys: | 1099 | cleanup_subsys: |
1101 | nfnetlink_subsys_unregister(&nfulnl_subsys); | 1100 | nfnetlink_subsys_unregister(&nfulnl_subsys); |
1102 | cleanup_netlink_notifier: | 1101 | cleanup_netlink_notifier: |
1103 | netlink_unregister_notifier(&nfulnl_rtnl_notifier); | 1102 | netlink_unregister_notifier(&nfulnl_rtnl_notifier); |
1103 | unregister_pernet_subsys(&nfnl_log_net_ops); | ||
1104 | out: | ||
1104 | return status; | 1105 | return status; |
1105 | } | 1106 | } |
1106 | 1107 | ||
1107 | static void __exit nfnetlink_log_fini(void) | 1108 | static void __exit nfnetlink_log_fini(void) |
1108 | { | 1109 | { |
1109 | unregister_pernet_subsys(&nfnl_log_net_ops); | ||
1110 | nf_log_unregister(&nfulnl_logger); | 1110 | nf_log_unregister(&nfulnl_logger); |
1111 | nfnetlink_subsys_unregister(&nfulnl_subsys); | 1111 | nfnetlink_subsys_unregister(&nfulnl_subsys); |
1112 | netlink_unregister_notifier(&nfulnl_rtnl_notifier); | 1112 | netlink_unregister_notifier(&nfulnl_rtnl_notifier); |
1113 | unregister_pernet_subsys(&nfnl_log_net_ops); | ||
1113 | } | 1114 | } |
1114 | 1115 | ||
1115 | MODULE_DESCRIPTION("netfilter userspace logging"); | 1116 | MODULE_DESCRIPTION("netfilter userspace logging"); |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 0b98c7420239..11c7682fa0ea 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -1317,7 +1317,13 @@ static struct pernet_operations nfnl_queue_net_ops = { | |||
1317 | 1317 | ||
1318 | static int __init nfnetlink_queue_init(void) | 1318 | static int __init nfnetlink_queue_init(void) |
1319 | { | 1319 | { |
1320 | int status = -ENOMEM; | 1320 | int status; |
1321 | |||
1322 | status = register_pernet_subsys(&nfnl_queue_net_ops); | ||
1323 | if (status < 0) { | ||
1324 | pr_err("nf_queue: failed to register pernet ops\n"); | ||
1325 | goto out; | ||
1326 | } | ||
1321 | 1327 | ||
1322 | netlink_register_notifier(&nfqnl_rtnl_notifier); | 1328 | netlink_register_notifier(&nfqnl_rtnl_notifier); |
1323 | status = nfnetlink_subsys_register(&nfqnl_subsys); | 1329 | status = nfnetlink_subsys_register(&nfqnl_subsys); |
@@ -1326,19 +1332,13 @@ static int __init nfnetlink_queue_init(void) | |||
1326 | goto cleanup_netlink_notifier; | 1332 | goto cleanup_netlink_notifier; |
1327 | } | 1333 | } |
1328 | 1334 | ||
1329 | status = register_pernet_subsys(&nfnl_queue_net_ops); | ||
1330 | if (status < 0) { | ||
1331 | pr_err("nf_queue: failed to register pernet ops\n"); | ||
1332 | goto cleanup_subsys; | ||
1333 | } | ||
1334 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1335 | register_netdevice_notifier(&nfqnl_dev_notifier); |
1335 | nf_register_queue_handler(&nfqh); | 1336 | nf_register_queue_handler(&nfqh); |
1336 | return status; | 1337 | return status; |
1337 | 1338 | ||
1338 | cleanup_subsys: | ||
1339 | nfnetlink_subsys_unregister(&nfqnl_subsys); | ||
1340 | cleanup_netlink_notifier: | 1339 | cleanup_netlink_notifier: |
1341 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1340 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
1341 | out: | ||
1342 | return status; | 1342 | return status; |
1343 | } | 1343 | } |
1344 | 1344 | ||
@@ -1346,9 +1346,9 @@ static void __exit nfnetlink_queue_fini(void) | |||
1346 | { | 1346 | { |
1347 | nf_unregister_queue_handler(); | 1347 | nf_unregister_queue_handler(); |
1348 | unregister_netdevice_notifier(&nfqnl_dev_notifier); | 1348 | unregister_netdevice_notifier(&nfqnl_dev_notifier); |
1349 | unregister_pernet_subsys(&nfnl_queue_net_ops); | ||
1350 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 1349 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
1351 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1350 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
1351 | unregister_pernet_subsys(&nfnl_queue_net_ops); | ||
1352 | 1352 | ||
1353 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ | 1353 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
1354 | } | 1354 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index daa0b818174b..bf6e76643f78 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -89,7 +89,7 @@ static inline int netlink_is_kernel(struct sock *sk) | |||
89 | return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; | 89 | return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; |
90 | } | 90 | } |
91 | 91 | ||
92 | struct netlink_table *nl_table; | 92 | struct netlink_table *nl_table __read_mostly; |
93 | EXPORT_SYMBOL_GPL(nl_table); | 93 | EXPORT_SYMBOL_GPL(nl_table); |
94 | 94 | ||
95 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); | 95 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); |
@@ -1081,6 +1081,7 @@ static int netlink_insert(struct sock *sk, u32 portid) | |||
1081 | if (err) { | 1081 | if (err) { |
1082 | if (err == -EEXIST) | 1082 | if (err == -EEXIST) |
1083 | err = -EADDRINUSE; | 1083 | err = -EADDRINUSE; |
1084 | nlk_sk(sk)->portid = 0; | ||
1084 | sock_put(sk); | 1085 | sock_put(sk); |
1085 | } | 1086 | } |
1086 | 1087 | ||
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index b6ef9a04de06..a75864d93142 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) | |||
81 | struct tcf_proto_ops *t; | 81 | struct tcf_proto_ops *t; |
82 | int rc = -ENOENT; | 82 | int rc = -ENOENT; |
83 | 83 | ||
84 | /* Wait for outstanding call_rcu()s, if any, from a | ||
85 | * tcf_proto_ops's destroy() handler. | ||
86 | */ | ||
87 | rcu_barrier(); | ||
88 | |||
84 | write_lock(&cls_mod_lock); | 89 | write_lock(&cls_mod_lock); |
85 | list_for_each_entry(t, &tcf_proto_base, head) { | 90 | list_for_each_entry(t, &tcf_proto_base, head) { |
86 | if (t == ops) { | 91 | if (t == ops) { |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ad9eed70bc8f..1e1c89e51a11 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
815 | if (dev->flags & IFF_UP) | 815 | if (dev->flags & IFF_UP) |
816 | dev_deactivate(dev); | 816 | dev_deactivate(dev); |
817 | 817 | ||
818 | if (new && new->ops->attach) { | 818 | if (new && new->ops->attach) |
819 | new->ops->attach(new); | 819 | goto skip; |
820 | num_q = 0; | ||
821 | } | ||
822 | 820 | ||
823 | for (i = 0; i < num_q; i++) { | 821 | for (i = 0; i < num_q; i++) { |
824 | struct netdev_queue *dev_queue = dev_ingress_queue(dev); | 822 | struct netdev_queue *dev_queue = dev_ingress_queue(dev); |
@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
834 | qdisc_destroy(old); | 832 | qdisc_destroy(old); |
835 | } | 833 | } |
836 | 834 | ||
835 | skip: | ||
837 | if (!ingress) { | 836 | if (!ingress) { |
838 | notify_and_destroy(net, skb, n, classid, | 837 | notify_and_destroy(net, skb, n, classid, |
839 | dev->qdisc, new); | 838 | dev->qdisc, new); |
840 | if (new && !new->ops->attach) | 839 | if (new && !new->ops->attach) |
841 | atomic_inc(&new->refcnt); | 840 | atomic_inc(&new->refcnt); |
842 | dev->qdisc = new ? : &noop_qdisc; | 841 | dev->qdisc = new ? : &noop_qdisc; |
842 | |||
843 | if (new && new->ops->attach) | ||
844 | new->ops->attach(new); | ||
843 | } else { | 845 | } else { |
844 | notify_and_destroy(net, skb, n, classid, old, new); | 846 | notify_and_destroy(net, skb, n, classid, old, new); |
845 | } | 847 | } |
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 46568b85c333..055453d48668 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c | |||
@@ -338,7 +338,7 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, | |||
338 | fi, tos, type, nlflags, | 338 | fi, tos, type, nlflags, |
339 | tb_id); | 339 | tb_id); |
340 | if (!err) | 340 | if (!err) |
341 | fi->fib_flags |= RTNH_F_EXTERNAL; | 341 | fi->fib_flags |= RTNH_F_OFFLOAD; |
342 | } | 342 | } |
343 | 343 | ||
344 | return err; | 344 | return err; |
@@ -364,7 +364,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, | |||
364 | const struct swdev_ops *ops; | 364 | const struct swdev_ops *ops; |
365 | int err = 0; | 365 | int err = 0; |
366 | 366 | ||
367 | if (!(fi->fib_flags & RTNH_F_EXTERNAL)) | 367 | if (!(fi->fib_flags & RTNH_F_OFFLOAD)) |
368 | return 0; | 368 | return 0; |
369 | 369 | ||
370 | dev = netdev_switch_get_dev_by_nhs(fi); | 370 | dev = netdev_switch_get_dev_by_nhs(fi); |
@@ -376,7 +376,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, | |||
376 | err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len, | 376 | err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len, |
377 | fi, tos, type, tb_id); | 377 | fi, tos, type, tb_id); |
378 | if (!err) | 378 | if (!err) |
379 | fi->fib_flags &= ~RTNH_F_EXTERNAL; | 379 | fi->fib_flags &= ~RTNH_F_OFFLOAD; |
380 | } | 380 | } |
381 | 381 | ||
382 | return err; | 382 | return err; |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 5266ea7b922b..06430598cf51 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1880,6 +1880,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, | |||
1880 | unix_state_unlock(sk); | 1880 | unix_state_unlock(sk); |
1881 | timeo = freezable_schedule_timeout(timeo); | 1881 | timeo = freezable_schedule_timeout(timeo); |
1882 | unix_state_lock(sk); | 1882 | unix_state_lock(sk); |
1883 | |||
1884 | if (sock_flag(sk, SOCK_DEAD)) | ||
1885 | break; | ||
1886 | |||
1883 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1887 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1884 | } | 1888 | } |
1885 | 1889 | ||
@@ -1939,6 +1943,10 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, | |||
1939 | struct sk_buff *skb, *last; | 1943 | struct sk_buff *skb, *last; |
1940 | 1944 | ||
1941 | unix_state_lock(sk); | 1945 | unix_state_lock(sk); |
1946 | if (sock_flag(sk, SOCK_DEAD)) { | ||
1947 | err = -ECONNRESET; | ||
1948 | goto unlock; | ||
1949 | } | ||
1942 | last = skb = skb_peek(&sk->sk_receive_queue); | 1950 | last = skb = skb_peek(&sk->sk_receive_queue); |
1943 | again: | 1951 | again: |
1944 | if (skb == NULL) { | 1952 | if (skb == NULL) { |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 526c4feb3b50..b58286ecd156 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <net/dst.h> | 13 | #include <net/dst.h> |
14 | #include <net/ip.h> | 14 | #include <net/ip.h> |
15 | #include <net/xfrm.h> | 15 | #include <net/xfrm.h> |
16 | #include <net/ip_tunnels.h> | ||
17 | #include <net/ip6_tunnel.h> | ||
16 | 18 | ||
17 | static struct kmem_cache *secpath_cachep __read_mostly; | 19 | static struct kmem_cache *secpath_cachep __read_mostly; |
18 | 20 | ||
@@ -186,6 +188,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) | |||
186 | struct xfrm_state *x = NULL; | 188 | struct xfrm_state *x = NULL; |
187 | xfrm_address_t *daddr; | 189 | xfrm_address_t *daddr; |
188 | struct xfrm_mode *inner_mode; | 190 | struct xfrm_mode *inner_mode; |
191 | u32 mark = skb->mark; | ||
189 | unsigned int family; | 192 | unsigned int family; |
190 | int decaps = 0; | 193 | int decaps = 0; |
191 | int async = 0; | 194 | int async = 0; |
@@ -203,6 +206,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) | |||
203 | XFRM_SPI_SKB_CB(skb)->daddroff); | 206 | XFRM_SPI_SKB_CB(skb)->daddroff); |
204 | family = XFRM_SPI_SKB_CB(skb)->family; | 207 | family = XFRM_SPI_SKB_CB(skb)->family; |
205 | 208 | ||
209 | /* if tunnel is present override skb->mark value with tunnel i_key */ | ||
210 | if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { | ||
211 | switch (family) { | ||
212 | case AF_INET: | ||
213 | mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); | ||
214 | break; | ||
215 | case AF_INET6: | ||
216 | mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); | ||
217 | break; | ||
218 | } | ||
219 | } | ||
220 | |||
206 | /* Allocate new secpath or COW existing one. */ | 221 | /* Allocate new secpath or COW existing one. */ |
207 | if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { | 222 | if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { |
208 | struct sec_path *sp; | 223 | struct sec_path *sp; |
@@ -229,7 +244,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) | |||
229 | goto drop; | 244 | goto drop; |
230 | } | 245 | } |
231 | 246 | ||
232 | x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); | 247 | x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); |
233 | if (x == NULL) { | 248 | if (x == NULL) { |
234 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); | 249 | XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); |
235 | xfrm_audit_state_notfound(skb, family, spi, seq); | 250 | xfrm_audit_state_notfound(skb, family, spi, seq); |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index dab57daae408..4fd725a0c500 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -99,6 +99,7 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) | |||
99 | 99 | ||
100 | if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { | 100 | if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { |
101 | XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; | 101 | XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; |
102 | XFRM_SKB_CB(skb)->seq.output.hi = 0; | ||
102 | if (unlikely(x->replay.oseq == 0)) { | 103 | if (unlikely(x->replay.oseq == 0)) { |
103 | x->replay.oseq--; | 104 | x->replay.oseq--; |
104 | xfrm_audit_state_replay_overflow(x, skb); | 105 | xfrm_audit_state_replay_overflow(x, skb); |
@@ -177,6 +178,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) | |||
177 | 178 | ||
178 | if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { | 179 | if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { |
179 | XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; | 180 | XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; |
181 | XFRM_SKB_CB(skb)->seq.output.hi = 0; | ||
180 | if (unlikely(replay_esn->oseq == 0)) { | 182 | if (unlikely(replay_esn->oseq == 0)) { |
181 | replay_esn->oseq--; | 183 | replay_esn->oseq--; |
182 | xfrm_audit_state_replay_overflow(x, skb); | 184 | xfrm_audit_state_replay_overflow(x, skb); |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f5e39e35d73a..96688cd0f6f1 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -927,8 +927,8 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, | |||
927 | x->id.spi != spi) | 927 | x->id.spi != spi) |
928 | continue; | 928 | continue; |
929 | 929 | ||
930 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | ||
931 | xfrm_state_hold(x); | 930 | xfrm_state_hold(x); |
931 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | ||
932 | return x; | 932 | return x; |
933 | } | 933 | } |
934 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | 934 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); |