diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/af_inet.c | 3 | ||||
-rw-r--r-- | net/ipv4/inet_fragment.c | 20 | ||||
-rw-r--r-- | net/ipv4/ip_fragment.c | 11 | ||||
-rw-r--r-- | net/ipv4/ip_gre.c | 5 | ||||
-rw-r--r-- | net/ipv4/ip_options.c | 5 | ||||
-rw-r--r-- | net/ipv4/ipconfig.c | 3 | ||||
-rw-r--r-- | net/ipv4/netfilter/Kconfig | 13 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 7 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 14 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 8 | ||||
-rw-r--r-- | net/ipv4/udp.c | 7 |
12 files changed, 50 insertions, 48 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f7661..c929d9c1c4b6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1333 | iph->frag_off |= htons(IP_MF); | 1333 | iph->frag_off |= htons(IP_MF); |
1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); | 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); |
1335 | } else { | 1335 | } else { |
1336 | if (!(iph->frag_off & htons(IP_DF))) | 1336 | iph->id = htons(id++); |
1337 | iph->id = htons(id++); | ||
1338 | } | 1337 | } |
1339 | iph->tot_len = htons(skb->len - skb->mac_len); | 1338 | iph->tot_len = htons(skb->len - skb->mac_len); |
1340 | iph->check = 0; | 1339 | iph->check = 0; |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 245ae078a07f..f4fd23de9b13 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | 23 | ||
24 | #include <net/sock.h> | ||
24 | #include <net/inet_frag.h> | 25 | #include <net/inet_frag.h> |
25 | 26 | ||
26 | static void inet_frag_secret_rebuild(unsigned long dummy) | 27 | static void inet_frag_secret_rebuild(unsigned long dummy) |
@@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
277 | __releases(&f->lock) | 278 | __releases(&f->lock) |
278 | { | 279 | { |
279 | struct inet_frag_queue *q; | 280 | struct inet_frag_queue *q; |
281 | int depth = 0; | ||
280 | 282 | ||
281 | hlist_for_each_entry(q, &f->hash[hash], list) { | 283 | hlist_for_each_entry(q, &f->hash[hash], list) { |
282 | if (q->net == nf && f->match(q, key)) { | 284 | if (q->net == nf && f->match(q, key)) { |
@@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
284 | read_unlock(&f->lock); | 286 | read_unlock(&f->lock); |
285 | return q; | 287 | return q; |
286 | } | 288 | } |
289 | depth++; | ||
287 | } | 290 | } |
288 | read_unlock(&f->lock); | 291 | read_unlock(&f->lock); |
289 | 292 | ||
290 | return inet_frag_create(nf, f, key); | 293 | if (depth <= INETFRAGS_MAXDEPTH) |
294 | return inet_frag_create(nf, f, key); | ||
295 | else | ||
296 | return ERR_PTR(-ENOBUFS); | ||
291 | } | 297 | } |
292 | EXPORT_SYMBOL(inet_frag_find); | 298 | EXPORT_SYMBOL(inet_frag_find); |
299 | |||
300 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, | ||
301 | const char *prefix) | ||
302 | { | ||
303 | static const char msg[] = "inet_frag_find: Fragment hash bucket" | ||
304 | " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) | ||
305 | ". Dropping fragment.\n"; | ||
306 | |||
307 | if (PTR_ERR(q) == -ENOBUFS) | ||
308 | LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); | ||
309 | } | ||
310 | EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b6d30acb600c..a6445b843ef4 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) | |||
292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); | 292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
293 | 293 | ||
294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); | 294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
295 | if (q == NULL) | 295 | if (IS_ERR_OR_NULL(q)) { |
296 | goto out_nomem; | 296 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
297 | 297 | return NULL; | |
298 | } | ||
298 | return container_of(q, struct ipq, q); | 299 | return container_of(q, struct ipq, q); |
299 | |||
300 | out_nomem: | ||
301 | LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); | ||
302 | return NULL; | ||
303 | } | 300 | } |
304 | 301 | ||
305 | /* Is the fragment too far ahead to be part of ipq? */ | 302 | /* Is the fragment too far ahead to be part of ipq? */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ef0e674ec5..91d66dbde9c0 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
798 | 798 | ||
799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { | 799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { |
800 | gre_hlen = 0; | 800 | gre_hlen = 0; |
801 | if (skb->protocol == htons(ETH_P_IP)) | 801 | tiph = (const struct iphdr *)skb->data; |
802 | tiph = (const struct iphdr *)skb->data; | ||
803 | else | ||
804 | tiph = &tunnel->parms.iph; | ||
805 | } else { | 802 | } else { |
806 | gre_hlen = tunnel->hlen; | 803 | gre_hlen = tunnel->hlen; |
807 | tiph = &tunnel->parms.iph; | 804 | tiph = &tunnel->parms.iph; |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 310a3647c83d..ec7264514a82 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -370,7 +370,6 @@ int ip_options_compile(struct net *net, | |||
370 | } | 370 | } |
371 | switch (optptr[3]&0xF) { | 371 | switch (optptr[3]&0xF) { |
372 | case IPOPT_TS_TSONLY: | 372 | case IPOPT_TS_TSONLY: |
373 | opt->ts = optptr - iph; | ||
374 | if (skb) | 373 | if (skb) |
375 | timeptr = &optptr[optptr[2]-1]; | 374 | timeptr = &optptr[optptr[2]-1]; |
376 | opt->ts_needtime = 1; | 375 | opt->ts_needtime = 1; |
@@ -381,7 +380,6 @@ int ip_options_compile(struct net *net, | |||
381 | pp_ptr = optptr + 2; | 380 | pp_ptr = optptr + 2; |
382 | goto error; | 381 | goto error; |
383 | } | 382 | } |
384 | opt->ts = optptr - iph; | ||
385 | if (rt) { | 383 | if (rt) { |
386 | spec_dst_fill(&spec_dst, skb); | 384 | spec_dst_fill(&spec_dst, skb); |
387 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); | 385 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); |
@@ -396,7 +394,6 @@ int ip_options_compile(struct net *net, | |||
396 | pp_ptr = optptr + 2; | 394 | pp_ptr = optptr + 2; |
397 | goto error; | 395 | goto error; |
398 | } | 396 | } |
399 | opt->ts = optptr - iph; | ||
400 | { | 397 | { |
401 | __be32 addr; | 398 | __be32 addr; |
402 | memcpy(&addr, &optptr[optptr[2]-1], 4); | 399 | memcpy(&addr, &optptr[optptr[2]-1], 4); |
@@ -429,12 +426,12 @@ int ip_options_compile(struct net *net, | |||
429 | pp_ptr = optptr + 3; | 426 | pp_ptr = optptr + 3; |
430 | goto error; | 427 | goto error; |
431 | } | 428 | } |
432 | opt->ts = optptr - iph; | ||
433 | if (skb) { | 429 | if (skb) { |
434 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); | 430 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); |
435 | opt->is_changed = 1; | 431 | opt->is_changed = 1; |
436 | } | 432 | } |
437 | } | 433 | } |
434 | opt->ts = optptr - iph; | ||
438 | break; | 435 | break; |
439 | case IPOPT_RA: | 436 | case IPOPT_RA: |
440 | if (optlen < 4) { | 437 | if (optlen < 4) { |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 98cbc6877019..bf6c5cf31aed 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void) | |||
1522 | } | 1522 | } |
1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) | 1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) |
1524 | if (ic_nameservers[i] != NONE) | 1524 | if (ic_nameservers[i] != NONE) |
1525 | pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); | 1525 | pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]); |
1526 | pr_cont("\n"); | ||
1526 | #endif /* !SILENT */ | 1527 | #endif /* !SILENT */ |
1527 | 1528 | ||
1528 | return 0; | 1529 | return 0; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index ce2d43e1f09f..0d755c50994b 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT | |||
36 | 36 | ||
37 | If unsure, say Y. | 37 | If unsure, say Y. |
38 | 38 | ||
39 | config IP_NF_QUEUE | ||
40 | tristate "IP Userspace queueing via NETLINK (OBSOLETE)" | ||
41 | depends on NETFILTER_ADVANCED | ||
42 | help | ||
43 | Netfilter has the ability to queue packets to user space: the | ||
44 | netlink device can be used to access them using this driver. | ||
45 | |||
46 | This option enables the old IPv4-only "ip_queue" implementation | ||
47 | which has been obsoleted by the new "nfnetlink_queue" code (see | ||
48 | CONFIG_NETFILTER_NETLINK_QUEUE). | ||
49 | |||
50 | To compile it as a module, choose M here. If unsure, say N. | ||
51 | |||
52 | config IP_NF_IPTABLES | 39 | config IP_NF_IPTABLES |
53 | tristate "IP tables support (required for filtering/masq/NAT)" | 40 | tristate "IP tables support (required for filtering/masq/NAT)" |
54 | default m if NETFILTER_ADVANCED=n | 41 | default m if NETFILTER_ADVANCED=n |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae24..e22020790709 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
775 | * Make sure that we have exactly size bytes | 775 | * Make sure that we have exactly size bytes |
776 | * available to the caller, no more, no less. | 776 | * available to the caller, no more, no less. |
777 | */ | 777 | */ |
778 | skb->avail_size = size; | 778 | skb->reserved_tailroom = skb->end - skb->tail - size; |
779 | return skb; | 779 | return skb; |
780 | } | 780 | } |
781 | __kfree_skb(skb); | 781 | __kfree_skb(skb); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0d9bdacce99f..3bd55bad230a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
2059 | if (tcp_is_reno(tp)) | 2059 | if (tcp_is_reno(tp)) |
2060 | tcp_reset_reno_sack(tp); | 2060 | tcp_reset_reno_sack(tp); |
2061 | 2061 | ||
2062 | if (!how) { | 2062 | tp->undo_marker = tp->snd_una; |
2063 | /* Push undo marker, if it was plain RTO and nothing | 2063 | if (how) { |
2064 | * was retransmitted. */ | ||
2065 | tp->undo_marker = tp->snd_una; | ||
2066 | } else { | ||
2067 | tp->sacked_out = 0; | 2064 | tp->sacked_out = 0; |
2068 | tp->fackets_out = 0; | 2065 | tp->fackets_out = 0; |
2069 | } | 2066 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310f..d09203c63264 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk) | |||
274 | struct inet_sock *inet = inet_sk(sk); | 274 | struct inet_sock *inet = inet_sk(sk); |
275 | u32 mtu = tcp_sk(sk)->mtu_info; | 275 | u32 mtu = tcp_sk(sk)->mtu_info; |
276 | 276 | ||
277 | /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs | ||
278 | * send out by Linux are always <576bytes so they should go through | ||
279 | * unfragmented). | ||
280 | */ | ||
281 | if (sk->sk_state == TCP_LISTEN) | ||
282 | return; | ||
283 | |||
284 | dst = inet_csk_update_pmtu(sk, mtu); | 277 | dst = inet_csk_update_pmtu(sk, mtu); |
285 | if (!dst) | 278 | if (!dst) |
286 | return; | 279 | return; |
@@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
408 | goto out; | 401 | goto out; |
409 | 402 | ||
410 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ | 403 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ |
404 | /* We are not interested in TCP_LISTEN and open_requests | ||
405 | * (SYN-ACKs send out by Linux are always <576bytes so | ||
406 | * they should go through unfragmented). | ||
407 | */ | ||
408 | if (sk->sk_state == TCP_LISTEN) | ||
409 | goto out; | ||
410 | |||
411 | tp->mtu_info = info; | 411 | tp->mtu_info = info; |
412 | if (!sock_owned_by_user(sk)) { | 412 | if (!sock_owned_by_user(sk)) { |
413 | tcp_v4_mtu_reduced(sk); | 413 | tcp_v4_mtu_reduced(sk); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2b4461074da..5d0b4387cba6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
1298 | eat = min_t(int, len, skb_headlen(skb)); | 1298 | eat = min_t(int, len, skb_headlen(skb)); |
1299 | if (eat) { | 1299 | if (eat) { |
1300 | __skb_pull(skb, eat); | 1300 | __skb_pull(skb, eat); |
1301 | skb->avail_size -= eat; | ||
1302 | len -= eat; | 1301 | len -= eat; |
1303 | if (!len) | 1302 | if (!len) |
1304 | return; | 1303 | return; |
@@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
1810 | goto send_now; | 1809 | goto send_now; |
1811 | } | 1810 | } |
1812 | 1811 | ||
1813 | /* Ok, it looks like it is advisable to defer. */ | 1812 | /* Ok, it looks like it is advisable to defer. |
1814 | tp->tso_deferred = 1 | (jiffies << 1); | 1813 | * Do not rearm the timer if already set to not break TCP ACK clocking. |
1814 | */ | ||
1815 | if (!tp->tso_deferred) | ||
1816 | tp->tso_deferred = 1 | (jiffies << 1); | ||
1815 | 1817 | ||
1816 | return true; | 1818 | return true; |
1817 | 1819 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963c..0a073a263720 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb) | |||
1762 | 1762 | ||
1763 | void udp_destroy_sock(struct sock *sk) | 1763 | void udp_destroy_sock(struct sock *sk) |
1764 | { | 1764 | { |
1765 | struct udp_sock *up = udp_sk(sk); | ||
1765 | bool slow = lock_sock_fast(sk); | 1766 | bool slow = lock_sock_fast(sk); |
1766 | udp_flush_pending_frames(sk); | 1767 | udp_flush_pending_frames(sk); |
1767 | unlock_sock_fast(sk, slow); | 1768 | unlock_sock_fast(sk, slow); |
1769 | if (static_key_false(&udp_encap_needed) && up->encap_type) { | ||
1770 | void (*encap_destroy)(struct sock *sk); | ||
1771 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | ||
1772 | if (encap_destroy) | ||
1773 | encap_destroy(sk); | ||
1774 | } | ||
1768 | } | 1775 | } |
1769 | 1776 | ||
1770 | /* | 1777 | /* |