summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-01 18:56:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-01 18:56:08 -0500
commit34229b277480f46c1e9a19f027f30b074512e68b (patch)
tree90d8b43ebceb850b0e7852d75283aebbd2abbc00 /net/ipv4
parent2c923414d3963b959f65a8a6031972402e6a34a5 (diff)
parent53729eb174c1589f9185340ffe8c10b3f39f3ef3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "This looks like a lot but it's a mixture of regression fixes as well as fixes for longer standing issues. 1) Fix on-channel cancellation in mac80211, from Johannes Berg. 2) Handle CHECKSUM_COMPLETE properly in xt_TCPMSS netfilter xtables module, from Eric Dumazet. 3) Avoid infinite loop in UDP SO_REUSEPORT logic, also from Eric Dumazet. 4) Avoid a NULL deref if we try to set SO_REUSEPORT after a socket is bound, from Craig Gallek. 5) GRO key comparisons don't take lightweight tunnels into account, from Jesse Gross. 6) Fix struct pid leak via SCM credentials in AF_UNIX, from Eric Dumazet. 7) We need to set the rtnl_link_ops of ipv6 SIT tunnels before we register them, otherwise the NEWLINK netlink message is missing the proper attributes. From Thadeu Lima de Souza Cascardo. 8) Several Spectrum chip bug fixes for mlxsw switch driver, from Ido Schimmel 9) Handle fragments properly in ipv4 easly socket demux, from Eric Dumazet. 10) Don't ignore the ifindex key specifier on ipv6 output route lookups, from Paolo Abeni" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (128 commits) tcp: avoid cwnd undo after receiving ECN irda: fix a potential use-after-free in ircomm_param_request net: tg3: avoid uninitialized variable warning net: nb8800: avoid uninitialized variable warning net: vxge: avoid unused function warnings net: bgmac: clarify CONFIG_BCMA dependency net: hp100: remove unnecessary #ifdefs net: davinci_cpdma: use dma_addr_t for DMA address ipv6/udp: use sticky pktinfo egress ifindex on connect() ipv6: enforce flowi6_oif usage in ip6_dst_lookup_tail() netlink: not trim skb for mmaped socket when dump vxlan: fix a out of bounds access in __vxlan_find_mac net: dsa: mv88e6xxx: fix port VLAN maps fib_trie: Fix shift by 32 in fib_table_lookup net: moxart: use correct accessors for DMA memory ipv4: ipconfig: avoid unused ic_proto_used symbol bnxt_en: Fix crash in bnxt_free_tx_skbs() during tx timeout. bnxt_en: Exclude rx_drop_pkts hw counter from the stack's rx_dropped counter. bnxt_en: Ring free response from close path should use completion ring net_sched: drr: check for NULL pointer in drr_dequeue ...
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/inet_diag.c21
-rw-r--r--net/ipv4/ip_fragment.c1
-rw-r--r--net/ipv4/ip_input.c5
-rw-r--r--net/ipv4/ipconfig.c4
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_ipv4.c13
-rw-r--r--net/ipv4/udp.c32
11 files changed, 70 insertions, 40 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index c22920525e5d..775824720b6b 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -353,6 +353,7 @@ config INET_ESP
353 select CRYPTO_CBC 353 select CRYPTO_CBC
354 select CRYPTO_SHA1 354 select CRYPTO_SHA1
355 select CRYPTO_DES 355 select CRYPTO_DES
356 select CRYPTO_ECHAINIV
356 ---help--- 357 ---help---
357 Support for IPsec ESP. 358 Support for IPsec ESP.
358 359
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 7aea0ccb6be6..d07fc076bea0 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1394,9 +1394,10 @@ found:
1394 struct fib_info *fi = fa->fa_info; 1394 struct fib_info *fi = fa->fa_info;
1395 int nhsel, err; 1395 int nhsel, err;
1396 1396
1397 if ((index >= (1ul << fa->fa_slen)) && 1397 if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
1398 ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH))) 1398 if (index >= (1ul << fa->fa_slen))
1399 continue; 1399 continue;
1400 }
1400 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) 1401 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1401 continue; 1402 continue;
1402 if (fi->fib_dead) 1403 if (fi->fib_dead)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8bb8e7ad8548..6029157a19ed 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -361,13 +361,20 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
361 req->id.idiag_dport, req->id.idiag_src[0], 361 req->id.idiag_dport, req->id.idiag_src[0],
362 req->id.idiag_sport, req->id.idiag_if); 362 req->id.idiag_sport, req->id.idiag_if);
363#if IS_ENABLED(CONFIG_IPV6) 363#if IS_ENABLED(CONFIG_IPV6)
364 else if (req->sdiag_family == AF_INET6) 364 else if (req->sdiag_family == AF_INET6) {
365 sk = inet6_lookup(net, hashinfo, 365 if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
366 (struct in6_addr *)req->id.idiag_dst, 366 ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
367 req->id.idiag_dport, 367 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[3],
368 (struct in6_addr *)req->id.idiag_src, 368 req->id.idiag_dport, req->id.idiag_src[3],
369 req->id.idiag_sport, 369 req->id.idiag_sport, req->id.idiag_if);
370 req->id.idiag_if); 370 else
371 sk = inet6_lookup(net, hashinfo,
372 (struct in6_addr *)req->id.idiag_dst,
373 req->id.idiag_dport,
374 (struct in6_addr *)req->id.idiag_src,
375 req->id.idiag_sport,
376 req->id.idiag_if);
377 }
371#endif 378#endif
372 else 379 else
373 return ERR_PTR(-EINVAL); 380 return ERR_PTR(-EINVAL);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 3f00810b7288..187c6fcc3027 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
661 struct ipq *qp; 661 struct ipq *qp;
662 662
663 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); 663 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
664 skb_orphan(skb);
664 665
665 /* Lookup (or create) queue header */ 666 /* Lookup (or create) queue header */
666 qp = ip_find(net, ip_hdr(skb), user, vif); 667 qp = ip_find(net, ip_hdr(skb), user, vif);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index b1209b63381f..d77eb0c3b684 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -316,7 +316,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
316 const struct iphdr *iph = ip_hdr(skb); 316 const struct iphdr *iph = ip_hdr(skb);
317 struct rtable *rt; 317 struct rtable *rt;
318 318
319 if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) { 319 if (sysctl_ip_early_demux &&
320 !skb_dst(skb) &&
321 !skb->sk &&
322 !ip_is_fragment(iph)) {
320 const struct net_protocol *ipprot; 323 const struct net_protocol *ipprot;
321 int protocol = iph->protocol; 324 int protocol = iph->protocol;
322 325
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 67f7c9de0b16..2ed9dd2b5f2f 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -143,7 +143,11 @@ static char dhcp_client_identifier[253] __initdata;
143 143
144/* Persistent data: */ 144/* Persistent data: */
145 145
146#ifdef IPCONFIG_DYNAMIC
146static int ic_proto_used; /* Protocol used, if any */ 147static int ic_proto_used; /* Protocol used, if any */
148#else
149#define ic_proto_used 0
150#endif
147static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */ 151static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */
148static u8 ic_domain[64]; /* DNS (not NIS) domain name */ 152static u8 ic_domain[64]; /* DNS (not NIS) domain name */
149 153
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 6fb869f646bf..a04dee536b8e 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
27{ 27{
28 int err; 28 int err;
29 29
30 skb_orphan(skb);
31
32 local_bh_disable(); 30 local_bh_disable();
33 err = ip_defrag(net, skb, user); 31 err = ip_defrag(net, skb, user);
34 local_bh_enable(); 32 local_bh_enable();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index fd17eec93525..19746b3fcbbe 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -279,6 +279,7 @@
279 279
280#include <asm/uaccess.h> 280#include <asm/uaccess.h>
281#include <asm/ioctls.h> 281#include <asm/ioctls.h>
282#include <asm/unaligned.h>
282#include <net/busy_poll.h> 283#include <net/busy_poll.h>
283 284
284int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; 285int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
@@ -2638,6 +2639,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2638 const struct inet_connection_sock *icsk = inet_csk(sk); 2639 const struct inet_connection_sock *icsk = inet_csk(sk);
2639 u32 now = tcp_time_stamp; 2640 u32 now = tcp_time_stamp;
2640 unsigned int start; 2641 unsigned int start;
2642 u64 rate64;
2641 u32 rate; 2643 u32 rate;
2642 2644
2643 memset(info, 0, sizeof(*info)); 2645 memset(info, 0, sizeof(*info));
@@ -2703,15 +2705,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2703 info->tcpi_total_retrans = tp->total_retrans; 2705 info->tcpi_total_retrans = tp->total_retrans;
2704 2706
2705 rate = READ_ONCE(sk->sk_pacing_rate); 2707 rate = READ_ONCE(sk->sk_pacing_rate);
2706 info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL; 2708 rate64 = rate != ~0U ? rate : ~0ULL;
2709 put_unaligned(rate64, &info->tcpi_pacing_rate);
2707 2710
2708 rate = READ_ONCE(sk->sk_max_pacing_rate); 2711 rate = READ_ONCE(sk->sk_max_pacing_rate);
2709 info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; 2712 rate64 = rate != ~0U ? rate : ~0ULL;
2713 put_unaligned(rate64, &info->tcpi_max_pacing_rate);
2710 2714
2711 do { 2715 do {
2712 start = u64_stats_fetch_begin_irq(&tp->syncp); 2716 start = u64_stats_fetch_begin_irq(&tp->syncp);
2713 info->tcpi_bytes_acked = tp->bytes_acked; 2717 put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
2714 info->tcpi_bytes_received = tp->bytes_received; 2718 put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
2715 } while (u64_stats_fetch_retry_irq(&tp->syncp, start)); 2719 } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
2716 info->tcpi_segs_out = tp->segs_out; 2720 info->tcpi_segs_out = tp->segs_out;
2717 info->tcpi_segs_in = tp->segs_in; 2721 info->tcpi_segs_in = tp->segs_in;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0003d409fec5..1c2a73406261 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2164,8 +2164,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2164{ 2164{
2165 struct tcp_sock *tp = tcp_sk(sk); 2165 struct tcp_sock *tp = tcp_sk(sk);
2166 struct sk_buff *skb; 2166 struct sk_buff *skb;
2167 int cnt, oldcnt; 2167 int cnt, oldcnt, lost;
2168 int err;
2169 unsigned int mss; 2168 unsigned int mss;
2170 /* Use SACK to deduce losses of new sequences sent during recovery */ 2169 /* Use SACK to deduce losses of new sequences sent during recovery */
2171 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; 2170 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
@@ -2205,9 +2204,10 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2205 break; 2204 break;
2206 2205
2207 mss = tcp_skb_mss(skb); 2206 mss = tcp_skb_mss(skb);
2208 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, 2207 /* If needed, chop off the prefix to mark as lost. */
2209 mss, GFP_ATOMIC); 2208 lost = (packets - oldcnt) * mss;
2210 if (err < 0) 2209 if (lost < skb->len &&
2210 tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0)
2211 break; 2211 break;
2212 cnt = packets; 2212 cnt = packets;
2213 } 2213 }
@@ -2366,8 +2366,6 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2366 tp->snd_ssthresh = tp->prior_ssthresh; 2366 tp->snd_ssthresh = tp->prior_ssthresh;
2367 tcp_ecn_withdraw_cwr(tp); 2367 tcp_ecn_withdraw_cwr(tp);
2368 } 2368 }
2369 } else {
2370 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
2371 } 2369 }
2372 tp->snd_cwnd_stamp = tcp_time_stamp; 2370 tp->snd_cwnd_stamp = tcp_time_stamp;
2373 tp->undo_marker = 0; 2371 tp->undo_marker = 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5ced3e4013e3..a4d523709ab3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -707,7 +707,8 @@ release_sk1:
707 outside socket context is ugly, certainly. What can I do? 707 outside socket context is ugly, certainly. What can I do?
708 */ 708 */
709 709
710static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 710static void tcp_v4_send_ack(struct net *net,
711 struct sk_buff *skb, u32 seq, u32 ack,
711 u32 win, u32 tsval, u32 tsecr, int oif, 712 u32 win, u32 tsval, u32 tsecr, int oif,
712 struct tcp_md5sig_key *key, 713 struct tcp_md5sig_key *key,
713 int reply_flags, u8 tos) 714 int reply_flags, u8 tos)
@@ -722,7 +723,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
722 ]; 723 ];
723 } rep; 724 } rep;
724 struct ip_reply_arg arg; 725 struct ip_reply_arg arg;
725 struct net *net = dev_net(skb_dst(skb)->dev);
726 726
727 memset(&rep.th, 0, sizeof(struct tcphdr)); 727 memset(&rep.th, 0, sizeof(struct tcphdr));
728 memset(&arg, 0, sizeof(arg)); 728 memset(&arg, 0, sizeof(arg));
@@ -784,7 +784,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
784 struct inet_timewait_sock *tw = inet_twsk(sk); 784 struct inet_timewait_sock *tw = inet_twsk(sk);
785 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 785 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
786 786
787 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 787 tcp_v4_send_ack(sock_net(sk), skb,
788 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
788 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 789 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
789 tcp_time_stamp + tcptw->tw_ts_offset, 790 tcp_time_stamp + tcptw->tw_ts_offset,
790 tcptw->tw_ts_recent, 791 tcptw->tw_ts_recent,
@@ -803,8 +804,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
803 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV 804 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
804 * sk->sk_state == TCP_SYN_RECV -> for Fast Open. 805 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
805 */ 806 */
806 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? 807 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
807 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, 808 tcp_sk(sk)->snd_nxt;
809
810 tcp_v4_send_ack(sock_net(sk), skb, seq,
808 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, 811 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
809 tcp_time_stamp, 812 tcp_time_stamp,
810 req->ts_recent, 813 req->ts_recent,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index dc45b538e237..be0b21852b13 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -499,6 +499,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
499 struct sock *sk, *result; 499 struct sock *sk, *result;
500 struct hlist_nulls_node *node; 500 struct hlist_nulls_node *node;
501 int score, badness, matches = 0, reuseport = 0; 501 int score, badness, matches = 0, reuseport = 0;
502 bool select_ok = true;
502 u32 hash = 0; 503 u32 hash = 0;
503 504
504begin: 505begin:
@@ -512,14 +513,18 @@ begin:
512 badness = score; 513 badness = score;
513 reuseport = sk->sk_reuseport; 514 reuseport = sk->sk_reuseport;
514 if (reuseport) { 515 if (reuseport) {
515 struct sock *sk2;
516 hash = udp_ehashfn(net, daddr, hnum, 516 hash = udp_ehashfn(net, daddr, hnum,
517 saddr, sport); 517 saddr, sport);
518 sk2 = reuseport_select_sock(sk, hash, skb, 518 if (select_ok) {
519 sizeof(struct udphdr)); 519 struct sock *sk2;
520 if (sk2) { 520
521 result = sk2; 521 sk2 = reuseport_select_sock(sk, hash, skb,
522 goto found; 522 sizeof(struct udphdr));
523 if (sk2) {
524 result = sk2;
525 select_ok = false;
526 goto found;
527 }
523 } 528 }
524 matches = 1; 529 matches = 1;
525 } 530 }
@@ -563,6 +568,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
563 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); 568 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
564 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; 569 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
565 int score, badness, matches = 0, reuseport = 0; 570 int score, badness, matches = 0, reuseport = 0;
571 bool select_ok = true;
566 u32 hash = 0; 572 u32 hash = 0;
567 573
568 rcu_read_lock(); 574 rcu_read_lock();
@@ -601,14 +607,18 @@ begin:
601 badness = score; 607 badness = score;
602 reuseport = sk->sk_reuseport; 608 reuseport = sk->sk_reuseport;
603 if (reuseport) { 609 if (reuseport) {
604 struct sock *sk2;
605 hash = udp_ehashfn(net, daddr, hnum, 610 hash = udp_ehashfn(net, daddr, hnum,
606 saddr, sport); 611 saddr, sport);
607 sk2 = reuseport_select_sock(sk, hash, skb, 612 if (select_ok) {
613 struct sock *sk2;
614
615 sk2 = reuseport_select_sock(sk, hash, skb,
608 sizeof(struct udphdr)); 616 sizeof(struct udphdr));
609 if (sk2) { 617 if (sk2) {
610 result = sk2; 618 result = sk2;
611 goto found; 619 select_ok = false;
620 goto found;
621 }
612 } 622 }
613 matches = 1; 623 matches = 1;
614 } 624 }