diff options
Diffstat (limited to 'net/ipv4')
| -rw-r--r-- | net/ipv4/af_inet.c | 3 | ||||
| -rw-r--r-- | net/ipv4/devinet.c | 6 | ||||
| -rw-r--r-- | net/ipv4/inet_connection_sock.c | 1 | ||||
| -rw-r--r-- | net/ipv4/inet_fragment.c | 20 | ||||
| -rw-r--r-- | net/ipv4/ip_fragment.c | 11 | ||||
| -rw-r--r-- | net/ipv4/ip_gre.c | 5 | ||||
| -rw-r--r-- | net/ipv4/ip_input.c | 6 | ||||
| -rw-r--r-- | net/ipv4/ip_options.c | 7 | ||||
| -rw-r--r-- | net/ipv4/ipconfig.c | 3 | ||||
| -rw-r--r-- | net/ipv4/netfilter/Kconfig | 13 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 13 | ||||
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 14 | ||||
| -rw-r--r-- | net/ipv4/tcp_output.c | 8 | ||||
| -rw-r--r-- | net/ipv4/udp.c | 7 |
15 files changed, 63 insertions, 56 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f7661..c929d9c1c4b6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
| 1333 | iph->frag_off |= htons(IP_MF); | 1333 | iph->frag_off |= htons(IP_MF); |
| 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); | 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); |
| 1335 | } else { | 1335 | } else { |
| 1336 | if (!(iph->frag_off & htons(IP_DF))) | 1336 | iph->id = htons(id++); |
| 1337 | iph->id = htons(id++); | ||
| 1338 | } | 1337 | } |
| 1339 | iph->tot_len = htons(skb->len - skb->mac_len); | 1338 | iph->tot_len = htons(skb->len - skb->mac_len); |
| 1340 | iph->check = 0; | 1339 | iph->check = 0; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc829..96083b7a436b 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
| @@ -802,8 +802,10 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
| 802 | if (nlh->nlmsg_flags & NLM_F_EXCL || | 802 | if (nlh->nlmsg_flags & NLM_F_EXCL || |
| 803 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) | 803 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) |
| 804 | return -EEXIST; | 804 | return -EEXIST; |
| 805 | 805 | ifa = ifa_existing; | |
| 806 | set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); | 806 | set_ifa_lifetime(ifa, valid_lft, prefered_lft); |
| 807 | rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); | ||
| 808 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); | ||
| 807 | } | 809 | } |
| 808 | return 0; | 810 | return 0; |
| 809 | } | 811 | } |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d1874be1df3..786d97aee751 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock); | |||
| 735 | * tcp/dccp_create_openreq_child(). | 735 | * tcp/dccp_create_openreq_child(). |
| 736 | */ | 736 | */ |
| 737 | void inet_csk_prepare_forced_close(struct sock *sk) | 737 | void inet_csk_prepare_forced_close(struct sock *sk) |
| 738 | __releases(&sk->sk_lock.slock) | ||
| 738 | { | 739 | { |
| 739 | /* sk_clone_lock locked the socket and set refcnt to 2 */ | 740 | /* sk_clone_lock locked the socket and set refcnt to 2 */ |
| 740 | bh_unlock_sock(sk); | 741 | bh_unlock_sock(sk); |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 245ae078a07f..f4fd23de9b13 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
| 22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 23 | 23 | ||
| 24 | #include <net/sock.h> | ||
| 24 | #include <net/inet_frag.h> | 25 | #include <net/inet_frag.h> |
| 25 | 26 | ||
| 26 | static void inet_frag_secret_rebuild(unsigned long dummy) | 27 | static void inet_frag_secret_rebuild(unsigned long dummy) |
| @@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
| 277 | __releases(&f->lock) | 278 | __releases(&f->lock) |
| 278 | { | 279 | { |
| 279 | struct inet_frag_queue *q; | 280 | struct inet_frag_queue *q; |
| 281 | int depth = 0; | ||
| 280 | 282 | ||
| 281 | hlist_for_each_entry(q, &f->hash[hash], list) { | 283 | hlist_for_each_entry(q, &f->hash[hash], list) { |
| 282 | if (q->net == nf && f->match(q, key)) { | 284 | if (q->net == nf && f->match(q, key)) { |
| @@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
| 284 | read_unlock(&f->lock); | 286 | read_unlock(&f->lock); |
| 285 | return q; | 287 | return q; |
| 286 | } | 288 | } |
| 289 | depth++; | ||
| 287 | } | 290 | } |
| 288 | read_unlock(&f->lock); | 291 | read_unlock(&f->lock); |
| 289 | 292 | ||
| 290 | return inet_frag_create(nf, f, key); | 293 | if (depth <= INETFRAGS_MAXDEPTH) |
| 294 | return inet_frag_create(nf, f, key); | ||
| 295 | else | ||
| 296 | return ERR_PTR(-ENOBUFS); | ||
| 291 | } | 297 | } |
| 292 | EXPORT_SYMBOL(inet_frag_find); | 298 | EXPORT_SYMBOL(inet_frag_find); |
| 299 | |||
| 300 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, | ||
| 301 | const char *prefix) | ||
| 302 | { | ||
| 303 | static const char msg[] = "inet_frag_find: Fragment hash bucket" | ||
| 304 | " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) | ||
| 305 | ". Dropping fragment.\n"; | ||
| 306 | |||
| 307 | if (PTR_ERR(q) == -ENOBUFS) | ||
| 308 | LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); | ||
| 309 | } | ||
| 310 | EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b6d30acb600c..a6445b843ef4 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) | |||
| 292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); | 292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
| 293 | 293 | ||
| 294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); | 294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
| 295 | if (q == NULL) | 295 | if (IS_ERR_OR_NULL(q)) { |
| 296 | goto out_nomem; | 296 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
| 297 | 297 | return NULL; | |
| 298 | } | ||
| 298 | return container_of(q, struct ipq, q); | 299 | return container_of(q, struct ipq, q); |
| 299 | |||
| 300 | out_nomem: | ||
| 301 | LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); | ||
| 302 | return NULL; | ||
| 303 | } | 300 | } |
| 304 | 301 | ||
| 305 | /* Is the fragment too far ahead to be part of ipq? */ | 302 | /* Is the fragment too far ahead to be part of ipq? */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ef0e674ec5..91d66dbde9c0 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
| 798 | 798 | ||
| 799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { | 799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { |
| 800 | gre_hlen = 0; | 800 | gre_hlen = 0; |
| 801 | if (skb->protocol == htons(ETH_P_IP)) | 801 | tiph = (const struct iphdr *)skb->data; |
| 802 | tiph = (const struct iphdr *)skb->data; | ||
| 803 | else | ||
| 804 | tiph = &tunnel->parms.iph; | ||
| 805 | } else { | 802 | } else { |
| 806 | gre_hlen = tunnel->hlen; | 803 | gre_hlen = tunnel->hlen; |
| 807 | tiph = &tunnel->parms.iph; | 804 | tiph = &tunnel->parms.iph; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 87abd3e2bd32..2bdf802e28e2 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb) | |||
| 228 | icmp_send(skb, ICMP_DEST_UNREACH, | 228 | icmp_send(skb, ICMP_DEST_UNREACH, |
| 229 | ICMP_PROT_UNREACH, 0); | 229 | ICMP_PROT_UNREACH, 0); |
| 230 | } | 230 | } |
| 231 | } else | 231 | kfree_skb(skb); |
| 232 | } else { | ||
| 232 | IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); | 233 | IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); |
| 233 | kfree_skb(skb); | 234 | consume_skb(skb); |
| 235 | } | ||
| 234 | } | 236 | } |
| 235 | } | 237 | } |
| 236 | out: | 238 | out: |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f6289bf6f332..ec7264514a82 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
| @@ -370,7 +370,6 @@ int ip_options_compile(struct net *net, | |||
| 370 | } | 370 | } |
| 371 | switch (optptr[3]&0xF) { | 371 | switch (optptr[3]&0xF) { |
| 372 | case IPOPT_TS_TSONLY: | 372 | case IPOPT_TS_TSONLY: |
| 373 | opt->ts = optptr - iph; | ||
| 374 | if (skb) | 373 | if (skb) |
| 375 | timeptr = &optptr[optptr[2]-1]; | 374 | timeptr = &optptr[optptr[2]-1]; |
| 376 | opt->ts_needtime = 1; | 375 | opt->ts_needtime = 1; |
| @@ -381,7 +380,6 @@ int ip_options_compile(struct net *net, | |||
| 381 | pp_ptr = optptr + 2; | 380 | pp_ptr = optptr + 2; |
| 382 | goto error; | 381 | goto error; |
| 383 | } | 382 | } |
| 384 | opt->ts = optptr - iph; | ||
| 385 | if (rt) { | 383 | if (rt) { |
| 386 | spec_dst_fill(&spec_dst, skb); | 384 | spec_dst_fill(&spec_dst, skb); |
| 387 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); | 385 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); |
| @@ -396,7 +394,6 @@ int ip_options_compile(struct net *net, | |||
| 396 | pp_ptr = optptr + 2; | 394 | pp_ptr = optptr + 2; |
| 397 | goto error; | 395 | goto error; |
| 398 | } | 396 | } |
| 399 | opt->ts = optptr - iph; | ||
| 400 | { | 397 | { |
| 401 | __be32 addr; | 398 | __be32 addr; |
| 402 | memcpy(&addr, &optptr[optptr[2]-1], 4); | 399 | memcpy(&addr, &optptr[optptr[2]-1], 4); |
| @@ -423,18 +420,18 @@ int ip_options_compile(struct net *net, | |||
| 423 | put_unaligned_be32(midtime, timeptr); | 420 | put_unaligned_be32(midtime, timeptr); |
| 424 | opt->is_changed = 1; | 421 | opt->is_changed = 1; |
| 425 | } | 422 | } |
| 426 | } else { | 423 | } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { |
| 427 | unsigned int overflow = optptr[3]>>4; | 424 | unsigned int overflow = optptr[3]>>4; |
| 428 | if (overflow == 15) { | 425 | if (overflow == 15) { |
| 429 | pp_ptr = optptr + 3; | 426 | pp_ptr = optptr + 3; |
| 430 | goto error; | 427 | goto error; |
| 431 | } | 428 | } |
| 432 | opt->ts = optptr - iph; | ||
| 433 | if (skb) { | 429 | if (skb) { |
| 434 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); | 430 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); |
| 435 | opt->is_changed = 1; | 431 | opt->is_changed = 1; |
| 436 | } | 432 | } |
| 437 | } | 433 | } |
| 434 | opt->ts = optptr - iph; | ||
| 438 | break; | 435 | break; |
| 439 | case IPOPT_RA: | 436 | case IPOPT_RA: |
| 440 | if (optlen < 4) { | 437 | if (optlen < 4) { |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 98cbc6877019..bf6c5cf31aed 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
| @@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void) | |||
| 1522 | } | 1522 | } |
| 1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) | 1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) |
| 1524 | if (ic_nameservers[i] != NONE) | 1524 | if (ic_nameservers[i] != NONE) |
| 1525 | pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); | 1525 | pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]); |
| 1526 | pr_cont("\n"); | ||
| 1526 | #endif /* !SILENT */ | 1527 | #endif /* !SILENT */ |
| 1527 | 1528 | ||
| 1528 | return 0; | 1529 | return 0; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index ce2d43e1f09f..0d755c50994b 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
| @@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT | |||
| 36 | 36 | ||
| 37 | If unsure, say Y. | 37 | If unsure, say Y. |
| 38 | 38 | ||
| 39 | config IP_NF_QUEUE | ||
| 40 | tristate "IP Userspace queueing via NETLINK (OBSOLETE)" | ||
| 41 | depends on NETFILTER_ADVANCED | ||
| 42 | help | ||
| 43 | Netfilter has the ability to queue packets to user space: the | ||
| 44 | netlink device can be used to access them using this driver. | ||
| 45 | |||
| 46 | This option enables the old IPv4-only "ip_queue" implementation | ||
| 47 | which has been obsoleted by the new "nfnetlink_queue" code (see | ||
| 48 | CONFIG_NETFILTER_NETLINK_QUEUE). | ||
| 49 | |||
| 50 | To compile it as a module, choose M here. If unsure, say N. | ||
| 51 | |||
| 52 | config IP_NF_IPTABLES | 39 | config IP_NF_IPTABLES |
| 53 | tristate "IP tables support (required for filtering/masq/NAT)" | 40 | tristate "IP tables support (required for filtering/masq/NAT)" |
| 54 | default m if NETFILTER_ADVANCED=n | 41 | default m if NETFILTER_ADVANCED=n |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae24..e22020790709 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
| 775 | * Make sure that we have exactly size bytes | 775 | * Make sure that we have exactly size bytes |
| 776 | * available to the caller, no more, no less. | 776 | * available to the caller, no more, no less. |
| 777 | */ | 777 | */ |
| 778 | skb->avail_size = size; | 778 | skb->reserved_tailroom = skb->end - skb->tail - size; |
| 779 | return skb; | 779 | return skb; |
| 780 | } | 780 | } |
| 781 | __kfree_skb(skb); | 781 | __kfree_skb(skb); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a759e19496d2..3bd55bad230a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
| 2059 | if (tcp_is_reno(tp)) | 2059 | if (tcp_is_reno(tp)) |
| 2060 | tcp_reset_reno_sack(tp); | 2060 | tcp_reset_reno_sack(tp); |
| 2061 | 2061 | ||
| 2062 | if (!how) { | 2062 | tp->undo_marker = tp->snd_una; |
| 2063 | /* Push undo marker, if it was plain RTO and nothing | 2063 | if (how) { |
| 2064 | * was retransmitted. */ | ||
| 2065 | tp->undo_marker = tp->snd_una; | ||
| 2066 | } else { | ||
| 2067 | tp->sacked_out = 0; | 2064 | tp->sacked_out = 0; |
| 2068 | tp->fackets_out = 0; | 2065 | tp->fackets_out = 0; |
| 2069 | } | 2066 | } |
| @@ -5485,6 +5482,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 5485 | if (tcp_checksum_complete_user(sk, skb)) | 5482 | if (tcp_checksum_complete_user(sk, skb)) |
| 5486 | goto csum_error; | 5483 | goto csum_error; |
| 5487 | 5484 | ||
| 5485 | if ((int)skb->truesize > sk->sk_forward_alloc) | ||
| 5486 | goto step5; | ||
| 5487 | |||
| 5488 | /* Predicted packet is in window by definition. | 5488 | /* Predicted packet is in window by definition. |
| 5489 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. | 5489 | * seq == rcv_nxt and rcv_wup <= rcv_nxt. |
| 5490 | * Hence, check seq<=rcv_wup reduces to: | 5490 | * Hence, check seq<=rcv_wup reduces to: |
| @@ -5496,9 +5496,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
| 5496 | 5496 | ||
| 5497 | tcp_rcv_rtt_measure_ts(sk, skb); | 5497 | tcp_rcv_rtt_measure_ts(sk, skb); |
| 5498 | 5498 | ||
| 5499 | if ((int)skb->truesize > sk->sk_forward_alloc) | ||
| 5500 | goto step5; | ||
| 5501 | |||
| 5502 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); | 5499 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); |
| 5503 | 5500 | ||
| 5504 | /* Bulk data transfer: receiver */ | 5501 | /* Bulk data transfer: receiver */ |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310f..d09203c63264 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk) | |||
| 274 | struct inet_sock *inet = inet_sk(sk); | 274 | struct inet_sock *inet = inet_sk(sk); |
| 275 | u32 mtu = tcp_sk(sk)->mtu_info; | 275 | u32 mtu = tcp_sk(sk)->mtu_info; |
| 276 | 276 | ||
| 277 | /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs | ||
| 278 | * send out by Linux are always <576bytes so they should go through | ||
| 279 | * unfragmented). | ||
| 280 | */ | ||
| 281 | if (sk->sk_state == TCP_LISTEN) | ||
| 282 | return; | ||
| 283 | |||
| 284 | dst = inet_csk_update_pmtu(sk, mtu); | 277 | dst = inet_csk_update_pmtu(sk, mtu); |
| 285 | if (!dst) | 278 | if (!dst) |
| 286 | return; | 279 | return; |
| @@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
| 408 | goto out; | 401 | goto out; |
| 409 | 402 | ||
| 410 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ | 403 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ |
| 404 | /* We are not interested in TCP_LISTEN and open_requests | ||
| 405 | * (SYN-ACKs send out by Linux are always <576bytes so | ||
| 406 | * they should go through unfragmented). | ||
| 407 | */ | ||
| 408 | if (sk->sk_state == TCP_LISTEN) | ||
| 409 | goto out; | ||
| 410 | |||
| 411 | tp->mtu_info = info; | 411 | tp->mtu_info = info; |
| 412 | if (!sock_owned_by_user(sk)) { | 412 | if (!sock_owned_by_user(sk)) { |
| 413 | tcp_v4_mtu_reduced(sk); | 413 | tcp_v4_mtu_reduced(sk); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2b4461074da..5d0b4387cba6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
| 1298 | eat = min_t(int, len, skb_headlen(skb)); | 1298 | eat = min_t(int, len, skb_headlen(skb)); |
| 1299 | if (eat) { | 1299 | if (eat) { |
| 1300 | __skb_pull(skb, eat); | 1300 | __skb_pull(skb, eat); |
| 1301 | skb->avail_size -= eat; | ||
| 1302 | len -= eat; | 1301 | len -= eat; |
| 1303 | if (!len) | 1302 | if (!len) |
| 1304 | return; | 1303 | return; |
| @@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
| 1810 | goto send_now; | 1809 | goto send_now; |
| 1811 | } | 1810 | } |
| 1812 | 1811 | ||
| 1813 | /* Ok, it looks like it is advisable to defer. */ | 1812 | /* Ok, it looks like it is advisable to defer. |
| 1814 | tp->tso_deferred = 1 | (jiffies << 1); | 1813 | * Do not rearm the timer if already set to not break TCP ACK clocking. |
| 1814 | */ | ||
| 1815 | if (!tp->tso_deferred) | ||
| 1816 | tp->tso_deferred = 1 | (jiffies << 1); | ||
| 1815 | 1817 | ||
| 1816 | return true; | 1818 | return true; |
| 1817 | 1819 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963c..0a073a263720 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb) | |||
| 1762 | 1762 | ||
| 1763 | void udp_destroy_sock(struct sock *sk) | 1763 | void udp_destroy_sock(struct sock *sk) |
| 1764 | { | 1764 | { |
| 1765 | struct udp_sock *up = udp_sk(sk); | ||
| 1765 | bool slow = lock_sock_fast(sk); | 1766 | bool slow = lock_sock_fast(sk); |
| 1766 | udp_flush_pending_frames(sk); | 1767 | udp_flush_pending_frames(sk); |
| 1767 | unlock_sock_fast(sk, slow); | 1768 | unlock_sock_fast(sk, slow); |
| 1769 | if (static_key_false(&udp_encap_needed) && up->encap_type) { | ||
| 1770 | void (*encap_destroy)(struct sock *sk); | ||
| 1771 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | ||
| 1772 | if (encap_destroy) | ||
| 1773 | encap_destroy(sk); | ||
| 1774 | } | ||
| 1768 | } | 1775 | } |
| 1769 | 1776 | ||
| 1770 | /* | 1777 | /* |
