aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ba09016d1bfd..65947c1f4733 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -587,7 +587,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
587 } rep; 587 } rep;
588 struct ip_reply_arg arg; 588 struct ip_reply_arg arg;
589#ifdef CONFIG_TCP_MD5SIG 589#ifdef CONFIG_TCP_MD5SIG
590 struct tcp_md5sig_key *key; 590 struct tcp_md5sig_key *key = NULL;
591 const __u8 *hash_location = NULL; 591 const __u8 *hash_location = NULL;
592 unsigned char newhash[16]; 592 unsigned char newhash[16];
593 int genhash; 593 int genhash;
@@ -627,7 +627,10 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); 627 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628#ifdef CONFIG_TCP_MD5SIG 628#ifdef CONFIG_TCP_MD5SIG
629 hash_location = tcp_parse_md5sig_option(th); 629 hash_location = tcp_parse_md5sig_option(th);
630 if (!sk && hash_location) { 630 if (sk && sk_fullsock(sk)) {
631 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
632 &ip_hdr(skb)->saddr, AF_INET);
633 } else if (hash_location) {
631 /* 634 /*
632 * active side is lost. Try to find listening socket through 635 * active side is lost. Try to find listening socket through
633 * source port, and then find md5 key through listening socket. 636 * source port, and then find md5 key through listening socket.
@@ -651,10 +654,6 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
651 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); 654 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 if (genhash || memcmp(hash_location, newhash, 16) != 0) 655 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 goto release_sk1; 656 goto release_sk1;
654 } else {
655 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 &ip_hdr(skb)->saddr,
657 AF_INET) : NULL;
658 } 657 }
659 658
660 if (key) { 659 if (key) {
@@ -675,7 +674,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
675 ip_hdr(skb)->saddr, /* XXX */ 674 ip_hdr(skb)->saddr, /* XXX */
676 arg.iov[0].iov_len, IPPROTO_TCP, 0); 675 arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 676 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 677 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
678
679 /* When socket is gone, all binding information is lost. 679 /* When socket is gone, all binding information is lost.
680 * routing might fail in this case. No choice here, if we choose to force 680 * routing might fail in this case. No choice here, if we choose to force
681 * input interface, we will misroute in case of asymmetric route. 681 * input interface, we will misroute in case of asymmetric route.
@@ -683,6 +683,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
683 if (sk) 683 if (sk)
684 arg.bound_dev_if = sk->sk_bound_dev_if; 684 arg.bound_dev_if = sk->sk_bound_dev_if;
685 685
686 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
687 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
688
686 arg.tos = ip_hdr(skb)->tos; 689 arg.tos = ip_hdr(skb)->tos;
687 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 690 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 skb, &TCP_SKB_CB(skb)->header.h4.opt, 691 skb, &TCP_SKB_CB(skb)->header.h4.opt,
@@ -921,7 +924,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
921 } 924 }
922 925
923 md5sig = rcu_dereference_protected(tp->md5sig_info, 926 md5sig = rcu_dereference_protected(tp->md5sig_info,
924 sock_owned_by_user(sk)); 927 sock_owned_by_user(sk) ||
928 lockdep_is_held(&sk->sk_lock.slock));
925 if (!md5sig) { 929 if (!md5sig) {
926 md5sig = kmalloc(sizeof(*md5sig), gfp); 930 md5sig = kmalloc(sizeof(*md5sig), gfp);
927 if (!md5sig) 931 if (!md5sig)
@@ -1275,6 +1279,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1275 ireq = inet_rsk(req); 1279 ireq = inet_rsk(req);
1276 sk_daddr_set(newsk, ireq->ir_rmt_addr); 1280 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1277 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 1281 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1282 newsk->sk_bound_dev_if = ireq->ir_iif;
1278 newinet->inet_saddr = ireq->ir_loc_addr; 1283 newinet->inet_saddr = ireq->ir_loc_addr;
1279 inet_opt = ireq->opt; 1284 inet_opt = ireq->opt;
1280 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1285 rcu_assign_pointer(newinet->inet_opt, inet_opt);
@@ -1492,7 +1497,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1492 if (likely(sk->sk_rx_dst)) 1497 if (likely(sk->sk_rx_dst))
1493 skb_dst_drop(skb); 1498 skb_dst_drop(skb);
1494 else 1499 else
1495 skb_dst_force(skb); 1500 skb_dst_force_safe(skb);
1496 1501
1497 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1502 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1498 tp->ucopy.memory += skb->truesize; 1503 tp->ucopy.memory += skb->truesize;
@@ -1704,7 +1709,9 @@ do_time_wait:
1704 tcp_v4_timewait_ack(sk, skb); 1709 tcp_v4_timewait_ack(sk, skb);
1705 break; 1710 break;
1706 case TCP_TW_RST: 1711 case TCP_TW_RST:
1707 goto no_tcp_socket; 1712 tcp_v4_send_reset(sk, skb);
1713 inet_twsk_deschedule_put(inet_twsk(sk));
1714 goto discard_it;
1708 case TCP_TW_SUCCESS:; 1715 case TCP_TW_SUCCESS:;
1709 } 1716 }
1710 goto discard_it; 1717 goto discard_it;
@@ -1720,8 +1727,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1720{ 1727{
1721 struct dst_entry *dst = skb_dst(skb); 1728 struct dst_entry *dst = skb_dst(skb);
1722 1729
1723 if (dst) { 1730 if (dst && dst_hold_safe(dst)) {
1724 dst_hold(dst);
1725 sk->sk_rx_dst = dst; 1731 sk->sk_rx_dst = dst;
1726 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 1732 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1727 } 1733 }
@@ -2341,6 +2347,7 @@ struct proto tcp_prot = {
2341 .destroy_cgroup = tcp_destroy_cgroup, 2347 .destroy_cgroup = tcp_destroy_cgroup,
2342 .proto_cgroup = tcp_proto_cgroup, 2348 .proto_cgroup = tcp_proto_cgroup,
2343#endif 2349#endif
2350 .diag_destroy = tcp_abort,
2344}; 2351};
2345EXPORT_SYMBOL(tcp_prot); 2352EXPORT_SYMBOL(tcp_prot);
2346 2353
@@ -2378,6 +2385,10 @@ static int __net_init tcp_sk_init(struct net *net)
2378 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; 2385 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2379 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; 2386 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2380 2387
2388 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2389 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2390 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2391
2381 return 0; 2392 return 0;
2382fail: 2393fail:
2383 tcp_sk_exit(net); 2394 tcp_sk_exit(net);