aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/inet_fragment.c20
-rw-r--r--net/ipv4/ip_fragment.c11
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_input.c6
-rw-r--r--net/ipv4/ip_options.c7
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_output.c1
10 files changed, 42 insertions, 31 deletions
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7d1874be1df3..786d97aee751 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock);
735 * tcp/dccp_create_openreq_child(). 735 * tcp/dccp_create_openreq_child().
736 */ 736 */
737void inet_csk_prepare_forced_close(struct sock *sk) 737void inet_csk_prepare_forced_close(struct sock *sk)
738 __releases(&sk->sk_lock.slock)
738{ 739{
739 /* sk_clone_lock locked the socket and set refcnt to 2 */ 740 /* sk_clone_lock locked the socket and set refcnt to 2 */
740 bh_unlock_sock(sk); 741 bh_unlock_sock(sk);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 245ae078a07f..f4fd23de9b13 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -21,6 +21,7 @@
21#include <linux/rtnetlink.h> 21#include <linux/rtnetlink.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#include <net/sock.h>
24#include <net/inet_frag.h> 25#include <net/inet_frag.h>
25 26
26static void inet_frag_secret_rebuild(unsigned long dummy) 27static void inet_frag_secret_rebuild(unsigned long dummy)
@@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
277 __releases(&f->lock) 278 __releases(&f->lock)
278{ 279{
279 struct inet_frag_queue *q; 280 struct inet_frag_queue *q;
281 int depth = 0;
280 282
281 hlist_for_each_entry(q, &f->hash[hash], list) { 283 hlist_for_each_entry(q, &f->hash[hash], list) {
282 if (q->net == nf && f->match(q, key)) { 284 if (q->net == nf && f->match(q, key)) {
@@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
284 read_unlock(&f->lock); 286 read_unlock(&f->lock);
285 return q; 287 return q;
286 } 288 }
289 depth++;
287 } 290 }
288 read_unlock(&f->lock); 291 read_unlock(&f->lock);
289 292
290 return inet_frag_create(nf, f, key); 293 if (depth <= INETFRAGS_MAXDEPTH)
294 return inet_frag_create(nf, f, key);
295 else
296 return ERR_PTR(-ENOBUFS);
291} 297}
292EXPORT_SYMBOL(inet_frag_find); 298EXPORT_SYMBOL(inet_frag_find);
299
300void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
301 const char *prefix)
302{
303 static const char msg[] = "inet_frag_find: Fragment hash bucket"
304 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
305 ". Dropping fragment.\n";
306
307 if (PTR_ERR(q) == -ENOBUFS)
308 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
309}
310EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b6d30acb600c..a6445b843ef4 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
292 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 292 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
293 293
294 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); 294 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
295 if (q == NULL) 295 if (IS_ERR_OR_NULL(q)) {
296 goto out_nomem; 296 inet_frag_maybe_warn_overflow(q, pr_fmt());
297 297 return NULL;
298 }
298 return container_of(q, struct ipq, q); 299 return container_of(q, struct ipq, q);
299
300out_nomem:
301 LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
302 return NULL;
303} 300}
304 301
305/* Is the fragment too far ahead to be part of ipq? */ 302/* Is the fragment too far ahead to be part of ipq? */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d0ef0e674ec5..91d66dbde9c0 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
798 798
799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
800 gre_hlen = 0; 800 gre_hlen = 0;
801 if (skb->protocol == htons(ETH_P_IP)) 801 tiph = (const struct iphdr *)skb->data;
802 tiph = (const struct iphdr *)skb->data;
803 else
804 tiph = &tunnel->parms.iph;
805 } else { 802 } else {
806 gre_hlen = tunnel->hlen; 803 gre_hlen = tunnel->hlen;
807 tiph = &tunnel->parms.iph; 804 tiph = &tunnel->parms.iph;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 87abd3e2bd32..2bdf802e28e2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
228 icmp_send(skb, ICMP_DEST_UNREACH, 228 icmp_send(skb, ICMP_DEST_UNREACH,
229 ICMP_PROT_UNREACH, 0); 229 ICMP_PROT_UNREACH, 0);
230 } 230 }
231 } else 231 kfree_skb(skb);
232 } else {
232 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); 233 IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
233 kfree_skb(skb); 234 consume_skb(skb);
235 }
234 } 236 }
235 } 237 }
236 out: 238 out:
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index f6289bf6f332..ec7264514a82 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -370,7 +370,6 @@ int ip_options_compile(struct net *net,
370 } 370 }
371 switch (optptr[3]&0xF) { 371 switch (optptr[3]&0xF) {
372 case IPOPT_TS_TSONLY: 372 case IPOPT_TS_TSONLY:
373 opt->ts = optptr - iph;
374 if (skb) 373 if (skb)
375 timeptr = &optptr[optptr[2]-1]; 374 timeptr = &optptr[optptr[2]-1];
376 opt->ts_needtime = 1; 375 opt->ts_needtime = 1;
@@ -381,7 +380,6 @@ int ip_options_compile(struct net *net,
381 pp_ptr = optptr + 2; 380 pp_ptr = optptr + 2;
382 goto error; 381 goto error;
383 } 382 }
384 opt->ts = optptr - iph;
385 if (rt) { 383 if (rt) {
386 spec_dst_fill(&spec_dst, skb); 384 spec_dst_fill(&spec_dst, skb);
387 memcpy(&optptr[optptr[2]-1], &spec_dst, 4); 385 memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
@@ -396,7 +394,6 @@ int ip_options_compile(struct net *net,
396 pp_ptr = optptr + 2; 394 pp_ptr = optptr + 2;
397 goto error; 395 goto error;
398 } 396 }
399 opt->ts = optptr - iph;
400 { 397 {
401 __be32 addr; 398 __be32 addr;
402 memcpy(&addr, &optptr[optptr[2]-1], 4); 399 memcpy(&addr, &optptr[optptr[2]-1], 4);
@@ -423,18 +420,18 @@ int ip_options_compile(struct net *net,
423 put_unaligned_be32(midtime, timeptr); 420 put_unaligned_be32(midtime, timeptr);
424 opt->is_changed = 1; 421 opt->is_changed = 1;
425 } 422 }
426 } else { 423 } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
427 unsigned int overflow = optptr[3]>>4; 424 unsigned int overflow = optptr[3]>>4;
428 if (overflow == 15) { 425 if (overflow == 15) {
429 pp_ptr = optptr + 3; 426 pp_ptr = optptr + 3;
430 goto error; 427 goto error;
431 } 428 }
432 opt->ts = optptr - iph;
433 if (skb) { 429 if (skb) {
434 optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); 430 optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
435 opt->is_changed = 1; 431 opt->is_changed = 1;
436 } 432 }
437 } 433 }
434 opt->ts = optptr - iph;
438 break; 435 break;
439 case IPOPT_RA: 436 case IPOPT_RA:
440 if (optlen < 4) { 437 if (optlen < 4) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 47e854fcae24..e22020790709 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
775 * Make sure that we have exactly size bytes 775 * Make sure that we have exactly size bytes
776 * available to the caller, no more, no less. 776 * available to the caller, no more, no less.
777 */ 777 */
778 skb->avail_size = size; 778 skb->reserved_tailroom = skb->end - skb->tail - size;
779 return skb; 779 return skb;
780 } 780 }
781 __kfree_skb(skb); 781 __kfree_skb(skb);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a759e19496d2..0d9bdacce99f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5485 if (tcp_checksum_complete_user(sk, skb)) 5485 if (tcp_checksum_complete_user(sk, skb))
5486 goto csum_error; 5486 goto csum_error;
5487 5487
5488 if ((int)skb->truesize > sk->sk_forward_alloc)
5489 goto step5;
5490
5488 /* Predicted packet is in window by definition. 5491 /* Predicted packet is in window by definition.
5489 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5492 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5490 * Hence, check seq<=rcv_wup reduces to: 5493 * Hence, check seq<=rcv_wup reduces to:
@@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5496 5499
5497 tcp_rcv_rtt_measure_ts(sk, skb); 5500 tcp_rcv_rtt_measure_ts(sk, skb);
5498 5501
5499 if ((int)skb->truesize > sk->sk_forward_alloc)
5500 goto step5;
5501
5502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5502 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
5503 5503
5504 /* Bulk data transfer: receiver */ 5504 /* Bulk data transfer: receiver */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 4a8ec457310f..d09203c63264 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
274 struct inet_sock *inet = inet_sk(sk); 274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info; 275 u32 mtu = tcp_sk(sk)->mtu_info;
276 276
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
279 * unfragmented).
280 */
281 if (sk->sk_state == TCP_LISTEN)
282 return;
283
284 dst = inet_csk_update_pmtu(sk, mtu); 277 dst = inet_csk_update_pmtu(sk, mtu);
285 if (!dst) 278 if (!dst)
286 return; 279 return;
@@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
408 goto out; 401 goto out;
409 402
410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ 403 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404 /* We are not interested in TCP_LISTEN and open_requests
405 * (SYN-ACKs send out by Linux are always <576bytes so
406 * they should go through unfragmented).
407 */
408 if (sk->sk_state == TCP_LISTEN)
409 goto out;
410
411 tp->mtu_info = info; 411 tp->mtu_info = info;
412 if (!sock_owned_by_user(sk)) { 412 if (!sock_owned_by_user(sk)) {
413 tcp_v4_mtu_reduced(sk); 413 tcp_v4_mtu_reduced(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e2b4461074da..817fbb396bc8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1298 eat = min_t(int, len, skb_headlen(skb)); 1298 eat = min_t(int, len, skb_headlen(skb));
1299 if (eat) { 1299 if (eat) {
1300 __skb_pull(skb, eat); 1300 __skb_pull(skb, eat);
1301 skb->avail_size -= eat;
1302 len -= eat; 1301 len -= eat;
1303 if (!len) 1302 if (!len)
1304 return; 1303 return;