diff options
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 170 |
1 files changed, 54 insertions, 116 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1f5e62229aaa..b6575d665568 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -104,19 +104,6 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | |||
104 | } | 104 | } |
105 | } | 105 | } |
106 | 106 | ||
107 | static void tcp_v6_hash(struct sock *sk) | ||
108 | { | ||
109 | if (sk->sk_state != TCP_CLOSE) { | ||
110 | if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) { | ||
111 | tcp_prot.hash(sk); | ||
112 | return; | ||
113 | } | ||
114 | local_bh_disable(); | ||
115 | __inet6_hash(sk, NULL); | ||
116 | local_bh_enable(); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) | 107 | static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) |
121 | { | 108 | { |
122 | return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, | 109 | return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, |
@@ -154,7 +141,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
154 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { | 141 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { |
155 | struct ip6_flowlabel *flowlabel; | 142 | struct ip6_flowlabel *flowlabel; |
156 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | 143 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
157 | if (flowlabel == NULL) | 144 | if (!flowlabel) |
158 | return -EINVAL; | 145 | return -EINVAL; |
159 | fl6_sock_release(flowlabel); | 146 | fl6_sock_release(flowlabel); |
160 | } | 147 | } |
@@ -233,11 +220,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
233 | tp->af_specific = &tcp_sock_ipv6_specific; | 220 | tp->af_specific = &tcp_sock_ipv6_specific; |
234 | #endif | 221 | #endif |
235 | goto failure; | 222 | goto failure; |
236 | } else { | ||
237 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); | ||
238 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, | ||
239 | &sk->sk_v6_rcv_saddr); | ||
240 | } | 223 | } |
224 | np->saddr = sk->sk_v6_rcv_saddr; | ||
241 | 225 | ||
242 | return err; | 226 | return err; |
243 | } | 227 | } |
@@ -263,7 +247,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
263 | goto failure; | 247 | goto failure; |
264 | } | 248 | } |
265 | 249 | ||
266 | if (saddr == NULL) { | 250 | if (!saddr) { |
267 | saddr = &fl6.saddr; | 251 | saddr = &fl6.saddr; |
268 | sk->sk_v6_rcv_saddr = *saddr; | 252 | sk->sk_v6_rcv_saddr = *saddr; |
269 | } | 253 | } |
@@ -340,18 +324,20 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
340 | { | 324 | { |
341 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; | 325 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; |
342 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); | 326 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); |
327 | struct net *net = dev_net(skb->dev); | ||
328 | struct request_sock *fastopen; | ||
343 | struct ipv6_pinfo *np; | 329 | struct ipv6_pinfo *np; |
344 | struct sock *sk; | ||
345 | int err; | ||
346 | struct tcp_sock *tp; | 330 | struct tcp_sock *tp; |
347 | struct request_sock *fastopen; | ||
348 | __u32 seq, snd_una; | 331 | __u32 seq, snd_una; |
349 | struct net *net = dev_net(skb->dev); | 332 | struct sock *sk; |
333 | int err; | ||
350 | 334 | ||
351 | sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, | 335 | sk = __inet6_lookup_established(net, &tcp_hashinfo, |
352 | th->dest, &hdr->saddr, th->source, skb->dev->ifindex); | 336 | &hdr->daddr, th->dest, |
337 | &hdr->saddr, ntohs(th->source), | ||
338 | skb->dev->ifindex); | ||
353 | 339 | ||
354 | if (sk == NULL) { | 340 | if (!sk) { |
355 | ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), | 341 | ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), |
356 | ICMP6_MIB_INERRORS); | 342 | ICMP6_MIB_INERRORS); |
357 | return; | 343 | return; |
@@ -361,6 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
361 | inet_twsk_put(inet_twsk(sk)); | 347 | inet_twsk_put(inet_twsk(sk)); |
362 | return; | 348 | return; |
363 | } | 349 | } |
350 | seq = ntohl(th->seq); | ||
351 | if (sk->sk_state == TCP_NEW_SYN_RECV) | ||
352 | return tcp_req_err(sk, seq); | ||
364 | 353 | ||
365 | bh_lock_sock(sk); | 354 | bh_lock_sock(sk); |
366 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) | 355 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) |
@@ -375,7 +364,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
375 | } | 364 | } |
376 | 365 | ||
377 | tp = tcp_sk(sk); | 366 | tp = tcp_sk(sk); |
378 | seq = ntohl(th->seq); | ||
379 | /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ | 367 | /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ |
380 | fastopen = tp->fastopen_rsk; | 368 | fastopen = tp->fastopen_rsk; |
381 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; | 369 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; |
@@ -419,37 +407,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
419 | 407 | ||
420 | /* Might be for an request_sock */ | 408 | /* Might be for an request_sock */ |
421 | switch (sk->sk_state) { | 409 | switch (sk->sk_state) { |
422 | struct request_sock *req, **prev; | ||
423 | case TCP_LISTEN: | ||
424 | if (sock_owned_by_user(sk)) | ||
425 | goto out; | ||
426 | |||
427 | /* Note : We use inet6_iif() here, not tcp_v6_iif() */ | ||
428 | req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, | ||
429 | &hdr->saddr, inet6_iif(skb)); | ||
430 | if (!req) | ||
431 | goto out; | ||
432 | |||
433 | /* ICMPs are not backlogged, hence we cannot get | ||
434 | * an established socket here. | ||
435 | */ | ||
436 | WARN_ON(req->sk != NULL); | ||
437 | |||
438 | if (seq != tcp_rsk(req)->snt_isn) { | ||
439 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); | ||
440 | goto out; | ||
441 | } | ||
442 | |||
443 | inet_csk_reqsk_queue_drop(sk, req, prev); | ||
444 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
445 | goto out; | ||
446 | |||
447 | case TCP_SYN_SENT: | 410 | case TCP_SYN_SENT: |
448 | case TCP_SYN_RECV: | 411 | case TCP_SYN_RECV: |
449 | /* Only in fast or simultaneous open. If a fast open socket is | 412 | /* Only in fast or simultaneous open. If a fast open socket is |
450 | * is already accepted it is treated as a connected one below. | 413 | * is already accepted it is treated as a connected one below. |
451 | */ | 414 | */ |
452 | if (fastopen && fastopen->sk == NULL) | 415 | if (fastopen && !fastopen->sk) |
453 | break; | 416 | break; |
454 | 417 | ||
455 | if (!sock_owned_by_user(sk)) { | 418 | if (!sock_owned_by_user(sk)) { |
@@ -497,7 +460,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, | |||
497 | &ireq->ir_v6_rmt_addr); | 460 | &ireq->ir_v6_rmt_addr); |
498 | 461 | ||
499 | fl6->daddr = ireq->ir_v6_rmt_addr; | 462 | fl6->daddr = ireq->ir_v6_rmt_addr; |
500 | if (np->repflow && (ireq->pktopts != NULL)) | 463 | if (np->repflow && ireq->pktopts) |
501 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); | 464 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); |
502 | 465 | ||
503 | skb_set_queue_mapping(skb, queue_mapping); | 466 | skb_set_queue_mapping(skb, queue_mapping); |
@@ -523,17 +486,11 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, | |||
523 | } | 486 | } |
524 | 487 | ||
525 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, | 488 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, |
526 | struct sock *addr_sk) | 489 | const struct sock *addr_sk) |
527 | { | 490 | { |
528 | return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); | 491 | return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); |
529 | } | 492 | } |
530 | 493 | ||
531 | static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, | ||
532 | struct request_sock *req) | ||
533 | { | ||
534 | return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr); | ||
535 | } | ||
536 | |||
537 | static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, | 494 | static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, |
538 | int optlen) | 495 | int optlen) |
539 | { | 496 | { |
@@ -619,9 +576,9 @@ clear_hash_noput: | |||
619 | return 1; | 576 | return 1; |
620 | } | 577 | } |
621 | 578 | ||
622 | static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | 579 | static int tcp_v6_md5_hash_skb(char *md5_hash, |
580 | const struct tcp_md5sig_key *key, | ||
623 | const struct sock *sk, | 581 | const struct sock *sk, |
624 | const struct request_sock *req, | ||
625 | const struct sk_buff *skb) | 582 | const struct sk_buff *skb) |
626 | { | 583 | { |
627 | const struct in6_addr *saddr, *daddr; | 584 | const struct in6_addr *saddr, *daddr; |
@@ -629,12 +586,9 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | |||
629 | struct hash_desc *desc; | 586 | struct hash_desc *desc; |
630 | const struct tcphdr *th = tcp_hdr(skb); | 587 | const struct tcphdr *th = tcp_hdr(skb); |
631 | 588 | ||
632 | if (sk) { | 589 | if (sk) { /* valid for establish/request sockets */ |
633 | saddr = &inet6_sk(sk)->saddr; | 590 | saddr = &sk->sk_v6_rcv_saddr; |
634 | daddr = &sk->sk_v6_daddr; | 591 | daddr = &sk->sk_v6_daddr; |
635 | } else if (req) { | ||
636 | saddr = &inet_rsk(req)->ir_v6_loc_addr; | ||
637 | daddr = &inet_rsk(req)->ir_v6_rmt_addr; | ||
638 | } else { | 592 | } else { |
639 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); | 593 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); |
640 | saddr = &ip6h->saddr; | 594 | saddr = &ip6h->saddr; |
@@ -670,8 +624,7 @@ clear_hash_noput: | |||
670 | return 1; | 624 | return 1; |
671 | } | 625 | } |
672 | 626 | ||
673 | static int __tcp_v6_inbound_md5_hash(struct sock *sk, | 627 | static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) |
674 | const struct sk_buff *skb) | ||
675 | { | 628 | { |
676 | const __u8 *hash_location = NULL; | 629 | const __u8 *hash_location = NULL; |
677 | struct tcp_md5sig_key *hash_expected; | 630 | struct tcp_md5sig_key *hash_expected; |
@@ -685,44 +638,32 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk, | |||
685 | 638 | ||
686 | /* We've parsed the options - do we have a hash? */ | 639 | /* We've parsed the options - do we have a hash? */ |
687 | if (!hash_expected && !hash_location) | 640 | if (!hash_expected && !hash_location) |
688 | return 0; | 641 | return false; |
689 | 642 | ||
690 | if (hash_expected && !hash_location) { | 643 | if (hash_expected && !hash_location) { |
691 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); | 644 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); |
692 | return 1; | 645 | return true; |
693 | } | 646 | } |
694 | 647 | ||
695 | if (!hash_expected && hash_location) { | 648 | if (!hash_expected && hash_location) { |
696 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); | 649 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); |
697 | return 1; | 650 | return true; |
698 | } | 651 | } |
699 | 652 | ||
700 | /* check the signature */ | 653 | /* check the signature */ |
701 | genhash = tcp_v6_md5_hash_skb(newhash, | 654 | genhash = tcp_v6_md5_hash_skb(newhash, |
702 | hash_expected, | 655 | hash_expected, |
703 | NULL, NULL, skb); | 656 | NULL, skb); |
704 | 657 | ||
705 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | 658 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
706 | net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", | 659 | net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", |
707 | genhash ? "failed" : "mismatch", | 660 | genhash ? "failed" : "mismatch", |
708 | &ip6h->saddr, ntohs(th->source), | 661 | &ip6h->saddr, ntohs(th->source), |
709 | &ip6h->daddr, ntohs(th->dest)); | 662 | &ip6h->daddr, ntohs(th->dest)); |
710 | return 1; | 663 | return true; |
711 | } | 664 | } |
712 | return 0; | 665 | return false; |
713 | } | 666 | } |
714 | |||
715 | static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | ||
716 | { | ||
717 | int ret; | ||
718 | |||
719 | rcu_read_lock(); | ||
720 | ret = __tcp_v6_inbound_md5_hash(sk, skb); | ||
721 | rcu_read_unlock(); | ||
722 | |||
723 | return ret; | ||
724 | } | ||
725 | |||
726 | #endif | 667 | #endif |
727 | 668 | ||
728 | static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, | 669 | static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, |
@@ -734,8 +675,6 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, | |||
734 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; | 675 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; |
735 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; | 676 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; |
736 | 677 | ||
737 | ireq->ir_iif = sk->sk_bound_dev_if; | ||
738 | |||
739 | /* So that link locals have meaning */ | 678 | /* So that link locals have meaning */ |
740 | if (!sk->sk_bound_dev_if && | 679 | if (!sk->sk_bound_dev_if && |
741 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) | 680 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) |
@@ -774,7 +713,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | |||
774 | .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - | 713 | .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - |
775 | sizeof(struct ipv6hdr), | 714 | sizeof(struct ipv6hdr), |
776 | #ifdef CONFIG_TCP_MD5SIG | 715 | #ifdef CONFIG_TCP_MD5SIG |
777 | .md5_lookup = tcp_v6_reqsk_md5_lookup, | 716 | .req_md5_lookup = tcp_v6_md5_lookup, |
778 | .calc_md5_hash = tcp_v6_md5_hash_skb, | 717 | .calc_md5_hash = tcp_v6_md5_hash_skb, |
779 | #endif | 718 | #endif |
780 | .init_req = tcp_v6_init_req, | 719 | .init_req = tcp_v6_init_req, |
@@ -811,7 +750,7 @@ static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq, | |||
811 | 750 | ||
812 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, | 751 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, |
813 | GFP_ATOMIC); | 752 | GFP_ATOMIC); |
814 | if (buff == NULL) | 753 | if (!buff) |
815 | return; | 754 | return; |
816 | 755 | ||
817 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); | 756 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); |
@@ -931,7 +870,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
931 | if (!key) | 870 | if (!key) |
932 | goto release_sk1; | 871 | goto release_sk1; |
933 | 872 | ||
934 | genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb); | 873 | genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); |
935 | if (genhash || memcmp(hash_location, newhash, 16) != 0) | 874 | if (genhash || memcmp(hash_location, newhash, 16) != 0) |
936 | goto release_sk1; | 875 | goto release_sk1; |
937 | } else { | 876 | } else { |
@@ -997,17 +936,20 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
997 | 936 | ||
998 | static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) | 937 | static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) |
999 | { | 938 | { |
1000 | struct request_sock *req, **prev; | ||
1001 | const struct tcphdr *th = tcp_hdr(skb); | 939 | const struct tcphdr *th = tcp_hdr(skb); |
940 | struct request_sock *req; | ||
1002 | struct sock *nsk; | 941 | struct sock *nsk; |
1003 | 942 | ||
1004 | /* Find possible connection requests. */ | 943 | /* Find possible connection requests. */ |
1005 | req = inet6_csk_search_req(sk, &prev, th->source, | 944 | req = inet6_csk_search_req(sk, th->source, |
1006 | &ipv6_hdr(skb)->saddr, | 945 | &ipv6_hdr(skb)->saddr, |
1007 | &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); | 946 | &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); |
1008 | if (req) | 947 | if (req) { |
1009 | return tcp_check_req(sk, skb, req, prev, false); | 948 | nsk = tcp_check_req(sk, skb, req, false); |
1010 | 949 | if (!nsk) | |
950 | reqsk_put(req); | ||
951 | return nsk; | ||
952 | } | ||
1011 | nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, | 953 | nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, |
1012 | &ipv6_hdr(skb)->saddr, th->source, | 954 | &ipv6_hdr(skb)->saddr, th->source, |
1013 | &ipv6_hdr(skb)->daddr, ntohs(th->dest), | 955 | &ipv6_hdr(skb)->daddr, ntohs(th->dest), |
@@ -1067,7 +1009,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1067 | 1009 | ||
1068 | newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); | 1010 | newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); |
1069 | 1011 | ||
1070 | if (newsk == NULL) | 1012 | if (!newsk) |
1071 | return NULL; | 1013 | return NULL; |
1072 | 1014 | ||
1073 | newtcp6sk = (struct tcp6_sock *)newsk; | 1015 | newtcp6sk = (struct tcp6_sock *)newsk; |
@@ -1079,11 +1021,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1079 | 1021 | ||
1080 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 1022 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
1081 | 1023 | ||
1082 | ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr); | 1024 | newnp->saddr = newsk->sk_v6_rcv_saddr; |
1083 | |||
1084 | ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); | ||
1085 | |||
1086 | newsk->sk_v6_rcv_saddr = newnp->saddr; | ||
1087 | 1025 | ||
1088 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; | 1026 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; |
1089 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; | 1027 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; |
@@ -1128,7 +1066,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1128 | } | 1066 | } |
1129 | 1067 | ||
1130 | newsk = tcp_create_openreq_child(sk, req, skb); | 1068 | newsk = tcp_create_openreq_child(sk, req, skb); |
1131 | if (newsk == NULL) | 1069 | if (!newsk) |
1132 | goto out_nonewsk; | 1070 | goto out_nonewsk; |
1133 | 1071 | ||
1134 | /* | 1072 | /* |
@@ -1170,7 +1108,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1170 | 1108 | ||
1171 | /* Clone pktoptions received with SYN */ | 1109 | /* Clone pktoptions received with SYN */ |
1172 | newnp->pktoptions = NULL; | 1110 | newnp->pktoptions = NULL; |
1173 | if (ireq->pktopts != NULL) { | 1111 | if (ireq->pktopts) { |
1174 | newnp->pktoptions = skb_clone(ireq->pktopts, | 1112 | newnp->pktoptions = skb_clone(ireq->pktopts, |
1175 | sk_gfp_atomic(sk, GFP_ATOMIC)); | 1113 | sk_gfp_atomic(sk, GFP_ATOMIC)); |
1176 | consume_skb(ireq->pktopts); | 1114 | consume_skb(ireq->pktopts); |
@@ -1215,7 +1153,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1215 | #ifdef CONFIG_TCP_MD5SIG | 1153 | #ifdef CONFIG_TCP_MD5SIG |
1216 | /* Copy over the MD5 key from the original socket */ | 1154 | /* Copy over the MD5 key from the original socket */ |
1217 | key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); | 1155 | key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); |
1218 | if (key != NULL) { | 1156 | if (key) { |
1219 | /* We're using one, so create a matching key | 1157 | /* We're using one, so create a matching key |
1220 | * on the newsk structure. If we fail to get | 1158 | * on the newsk structure. If we fail to get |
1221 | * memory, then we end up not copying the key | 1159 | * memory, then we end up not copying the key |
@@ -1232,7 +1170,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1232 | tcp_done(newsk); | 1170 | tcp_done(newsk); |
1233 | goto out; | 1171 | goto out; |
1234 | } | 1172 | } |
1235 | __inet6_hash(newsk, NULL); | 1173 | __inet_hash(newsk, NULL); |
1236 | 1174 | ||
1237 | return newsk; | 1175 | return newsk; |
1238 | 1176 | ||
@@ -1547,9 +1485,9 @@ do_time_wait: | |||
1547 | &ipv6_hdr(skb)->saddr, th->source, | 1485 | &ipv6_hdr(skb)->saddr, th->source, |
1548 | &ipv6_hdr(skb)->daddr, | 1486 | &ipv6_hdr(skb)->daddr, |
1549 | ntohs(th->dest), tcp_v6_iif(skb)); | 1487 | ntohs(th->dest), tcp_v6_iif(skb)); |
1550 | if (sk2 != NULL) { | 1488 | if (sk2) { |
1551 | struct inet_timewait_sock *tw = inet_twsk(sk); | 1489 | struct inet_timewait_sock *tw = inet_twsk(sk); |
1552 | inet_twsk_deschedule(tw, &tcp_death_row); | 1490 | inet_twsk_deschedule(tw); |
1553 | inet_twsk_put(tw); | 1491 | inet_twsk_put(tw); |
1554 | sk = sk2; | 1492 | sk = sk2; |
1555 | tcp_v6_restore_cb(skb); | 1493 | tcp_v6_restore_cb(skb); |
@@ -1595,7 +1533,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
1595 | if (sk) { | 1533 | if (sk) { |
1596 | skb->sk = sk; | 1534 | skb->sk = sk; |
1597 | skb->destructor = sock_edemux; | 1535 | skb->destructor = sock_edemux; |
1598 | if (sk->sk_state != TCP_TIME_WAIT) { | 1536 | if (sk_fullsock(sk)) { |
1599 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); | 1537 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
1600 | 1538 | ||
1601 | if (dst) | 1539 | if (dst) |
@@ -1700,9 +1638,9 @@ static void tcp_v6_destroy_sock(struct sock *sk) | |||
1700 | #ifdef CONFIG_PROC_FS | 1638 | #ifdef CONFIG_PROC_FS |
1701 | /* Proc filesystem TCPv6 sock list dumping. */ | 1639 | /* Proc filesystem TCPv6 sock list dumping. */ |
1702 | static void get_openreq6(struct seq_file *seq, | 1640 | static void get_openreq6(struct seq_file *seq, |
1703 | const struct sock *sk, struct request_sock *req, int i, kuid_t uid) | 1641 | struct request_sock *req, int i, kuid_t uid) |
1704 | { | 1642 | { |
1705 | int ttd = req->expires - jiffies; | 1643 | long ttd = req->rsk_timer.expires - jiffies; |
1706 | const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; | 1644 | const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; |
1707 | const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; | 1645 | const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; |
1708 | 1646 | ||
@@ -1791,9 +1729,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
1791 | static void get_timewait6_sock(struct seq_file *seq, | 1729 | static void get_timewait6_sock(struct seq_file *seq, |
1792 | struct inet_timewait_sock *tw, int i) | 1730 | struct inet_timewait_sock *tw, int i) |
1793 | { | 1731 | { |
1732 | long delta = tw->tw_timer.expires - jiffies; | ||
1794 | const struct in6_addr *dest, *src; | 1733 | const struct in6_addr *dest, *src; |
1795 | __u16 destp, srcp; | 1734 | __u16 destp, srcp; |
1796 | s32 delta = tw->tw_ttd - inet_tw_time_stamp(); | ||
1797 | 1735 | ||
1798 | dest = &tw->tw_v6_daddr; | 1736 | dest = &tw->tw_v6_daddr; |
1799 | src = &tw->tw_v6_rcv_saddr; | 1737 | src = &tw->tw_v6_rcv_saddr; |
@@ -1838,7 +1776,7 @@ static int tcp6_seq_show(struct seq_file *seq, void *v) | |||
1838 | get_tcp6_sock(seq, v, st->num); | 1776 | get_tcp6_sock(seq, v, st->num); |
1839 | break; | 1777 | break; |
1840 | case TCP_SEQ_STATE_OPENREQ: | 1778 | case TCP_SEQ_STATE_OPENREQ: |
1841 | get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); | 1779 | get_openreq6(seq, v, st->num, st->uid); |
1842 | break; | 1780 | break; |
1843 | } | 1781 | } |
1844 | out: | 1782 | out: |
@@ -1902,7 +1840,7 @@ struct proto tcpv6_prot = { | |||
1902 | .sendpage = tcp_sendpage, | 1840 | .sendpage = tcp_sendpage, |
1903 | .backlog_rcv = tcp_v6_do_rcv, | 1841 | .backlog_rcv = tcp_v6_do_rcv, |
1904 | .release_cb = tcp_release_cb, | 1842 | .release_cb = tcp_release_cb, |
1905 | .hash = tcp_v6_hash, | 1843 | .hash = inet_hash, |
1906 | .unhash = inet_unhash, | 1844 | .unhash = inet_unhash, |
1907 | .get_port = inet_csk_get_port, | 1845 | .get_port = inet_csk_get_port, |
1908 | .enter_memory_pressure = tcp_enter_memory_pressure, | 1846 | .enter_memory_pressure = tcp_enter_memory_pressure, |