diff options
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
| -rw-r--r-- | net/ipv6/tcp_ipv6.c | 182 |
1 files changed, 65 insertions, 117 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5d46832c6f72..ad51df85aa00 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -104,19 +104,6 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | |||
| 104 | } | 104 | } |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static void tcp_v6_hash(struct sock *sk) | ||
| 108 | { | ||
| 109 | if (sk->sk_state != TCP_CLOSE) { | ||
| 110 | if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) { | ||
| 111 | tcp_prot.hash(sk); | ||
| 112 | return; | ||
| 113 | } | ||
| 114 | local_bh_disable(); | ||
| 115 | __inet6_hash(sk, NULL); | ||
| 116 | local_bh_enable(); | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 120 | static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) | 107 | static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) |
| 121 | { | 108 | { |
| 122 | return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, | 109 | return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, |
| @@ -154,7 +141,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
| 154 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { | 141 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { |
| 155 | struct ip6_flowlabel *flowlabel; | 142 | struct ip6_flowlabel *flowlabel; |
| 156 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); | 143 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
| 157 | if (flowlabel == NULL) | 144 | if (!flowlabel) |
| 158 | return -EINVAL; | 145 | return -EINVAL; |
| 159 | fl6_sock_release(flowlabel); | 146 | fl6_sock_release(flowlabel); |
| 160 | } | 147 | } |
| @@ -233,11 +220,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
| 233 | tp->af_specific = &tcp_sock_ipv6_specific; | 220 | tp->af_specific = &tcp_sock_ipv6_specific; |
| 234 | #endif | 221 | #endif |
| 235 | goto failure; | 222 | goto failure; |
| 236 | } else { | ||
| 237 | ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); | ||
| 238 | ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, | ||
| 239 | &sk->sk_v6_rcv_saddr); | ||
| 240 | } | 223 | } |
| 224 | np->saddr = sk->sk_v6_rcv_saddr; | ||
| 241 | 225 | ||
| 242 | return err; | 226 | return err; |
| 243 | } | 227 | } |
| @@ -263,7 +247,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
| 263 | goto failure; | 247 | goto failure; |
| 264 | } | 248 | } |
| 265 | 249 | ||
| 266 | if (saddr == NULL) { | 250 | if (!saddr) { |
| 267 | saddr = &fl6.saddr; | 251 | saddr = &fl6.saddr; |
| 268 | sk->sk_v6_rcv_saddr = *saddr; | 252 | sk->sk_v6_rcv_saddr = *saddr; |
| 269 | } | 253 | } |
| @@ -340,18 +324,20 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 340 | { | 324 | { |
| 341 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; | 325 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; |
| 342 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); | 326 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); |
| 327 | struct net *net = dev_net(skb->dev); | ||
| 328 | struct request_sock *fastopen; | ||
| 343 | struct ipv6_pinfo *np; | 329 | struct ipv6_pinfo *np; |
| 344 | struct sock *sk; | ||
| 345 | int err; | ||
| 346 | struct tcp_sock *tp; | 330 | struct tcp_sock *tp; |
| 347 | struct request_sock *fastopen; | ||
| 348 | __u32 seq, snd_una; | 331 | __u32 seq, snd_una; |
| 349 | struct net *net = dev_net(skb->dev); | 332 | struct sock *sk; |
| 333 | int err; | ||
| 350 | 334 | ||
| 351 | sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, | 335 | sk = __inet6_lookup_established(net, &tcp_hashinfo, |
| 352 | th->dest, &hdr->saddr, th->source, skb->dev->ifindex); | 336 | &hdr->daddr, th->dest, |
| 337 | &hdr->saddr, ntohs(th->source), | ||
| 338 | skb->dev->ifindex); | ||
| 353 | 339 | ||
| 354 | if (sk == NULL) { | 340 | if (!sk) { |
| 355 | ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), | 341 | ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), |
| 356 | ICMP6_MIB_INERRORS); | 342 | ICMP6_MIB_INERRORS); |
| 357 | return; | 343 | return; |
| @@ -361,6 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 361 | inet_twsk_put(inet_twsk(sk)); | 347 | inet_twsk_put(inet_twsk(sk)); |
| 362 | return; | 348 | return; |
| 363 | } | 349 | } |
| 350 | seq = ntohl(th->seq); | ||
| 351 | if (sk->sk_state == TCP_NEW_SYN_RECV) | ||
| 352 | return tcp_req_err(sk, seq); | ||
| 364 | 353 | ||
| 365 | bh_lock_sock(sk); | 354 | bh_lock_sock(sk); |
| 366 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) | 355 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) |
| @@ -375,7 +364,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 375 | } | 364 | } |
| 376 | 365 | ||
| 377 | tp = tcp_sk(sk); | 366 | tp = tcp_sk(sk); |
| 378 | seq = ntohl(th->seq); | ||
| 379 | /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ | 367 | /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ |
| 380 | fastopen = tp->fastopen_rsk; | 368 | fastopen = tp->fastopen_rsk; |
| 381 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; | 369 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; |
| @@ -419,37 +407,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
| 419 | 407 | ||
| 420 | /* Might be for an request_sock */ | 408 | /* Might be for an request_sock */ |
| 421 | switch (sk->sk_state) { | 409 | switch (sk->sk_state) { |
| 422 | struct request_sock *req, **prev; | ||
| 423 | case TCP_LISTEN: | ||
| 424 | if (sock_owned_by_user(sk)) | ||
| 425 | goto out; | ||
| 426 | |||
| 427 | /* Note : We use inet6_iif() here, not tcp_v6_iif() */ | ||
| 428 | req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, | ||
| 429 | &hdr->saddr, inet6_iif(skb)); | ||
| 430 | if (!req) | ||
| 431 | goto out; | ||
| 432 | |||
| 433 | /* ICMPs are not backlogged, hence we cannot get | ||
| 434 | * an established socket here. | ||
| 435 | */ | ||
| 436 | WARN_ON(req->sk != NULL); | ||
| 437 | |||
| 438 | if (seq != tcp_rsk(req)->snt_isn) { | ||
| 439 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); | ||
| 440 | goto out; | ||
| 441 | } | ||
| 442 | |||
| 443 | inet_csk_reqsk_queue_drop(sk, req, prev); | ||
| 444 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
| 445 | goto out; | ||
| 446 | |||
| 447 | case TCP_SYN_SENT: | 410 | case TCP_SYN_SENT: |
| 448 | case TCP_SYN_RECV: | 411 | case TCP_SYN_RECV: |
| 449 | /* Only in fast or simultaneous open. If a fast open socket is | 412 | /* Only in fast or simultaneous open. If a fast open socket is |
| 450 | * is already accepted it is treated as a connected one below. | 413 | * is already accepted it is treated as a connected one below. |
| 451 | */ | 414 | */ |
| 452 | if (fastopen && fastopen->sk == NULL) | 415 | if (fastopen && !fastopen->sk) |
| 453 | break; | 416 | break; |
| 454 | 417 | ||
| 455 | if (!sock_owned_by_user(sk)) { | 418 | if (!sock_owned_by_user(sk)) { |
| @@ -497,7 +460,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, | |||
| 497 | &ireq->ir_v6_rmt_addr); | 460 | &ireq->ir_v6_rmt_addr); |
| 498 | 461 | ||
| 499 | fl6->daddr = ireq->ir_v6_rmt_addr; | 462 | fl6->daddr = ireq->ir_v6_rmt_addr; |
| 500 | if (np->repflow && (ireq->pktopts != NULL)) | 463 | if (np->repflow && ireq->pktopts) |
| 501 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); | 464 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); |
| 502 | 465 | ||
| 503 | skb_set_queue_mapping(skb, queue_mapping); | 466 | skb_set_queue_mapping(skb, queue_mapping); |
| @@ -523,17 +486,11 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, | |||
| 523 | } | 486 | } |
| 524 | 487 | ||
| 525 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, | 488 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, |
| 526 | struct sock *addr_sk) | 489 | const struct sock *addr_sk) |
| 527 | { | 490 | { |
| 528 | return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); | 491 | return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); |
| 529 | } | 492 | } |
| 530 | 493 | ||
| 531 | static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, | ||
| 532 | struct request_sock *req) | ||
| 533 | { | ||
| 534 | return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr); | ||
| 535 | } | ||
| 536 | |||
| 537 | static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, | 494 | static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, |
| 538 | int optlen) | 495 | int optlen) |
| 539 | { | 496 | { |
| @@ -619,9 +576,9 @@ clear_hash_noput: | |||
| 619 | return 1; | 576 | return 1; |
| 620 | } | 577 | } |
| 621 | 578 | ||
| 622 | static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | 579 | static int tcp_v6_md5_hash_skb(char *md5_hash, |
| 580 | const struct tcp_md5sig_key *key, | ||
| 623 | const struct sock *sk, | 581 | const struct sock *sk, |
| 624 | const struct request_sock *req, | ||
| 625 | const struct sk_buff *skb) | 582 | const struct sk_buff *skb) |
| 626 | { | 583 | { |
| 627 | const struct in6_addr *saddr, *daddr; | 584 | const struct in6_addr *saddr, *daddr; |
| @@ -629,12 +586,9 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | |||
| 629 | struct hash_desc *desc; | 586 | struct hash_desc *desc; |
| 630 | const struct tcphdr *th = tcp_hdr(skb); | 587 | const struct tcphdr *th = tcp_hdr(skb); |
| 631 | 588 | ||
| 632 | if (sk) { | 589 | if (sk) { /* valid for establish/request sockets */ |
| 633 | saddr = &inet6_sk(sk)->saddr; | 590 | saddr = &sk->sk_v6_rcv_saddr; |
| 634 | daddr = &sk->sk_v6_daddr; | 591 | daddr = &sk->sk_v6_daddr; |
| 635 | } else if (req) { | ||
| 636 | saddr = &inet_rsk(req)->ir_v6_loc_addr; | ||
| 637 | daddr = &inet_rsk(req)->ir_v6_rmt_addr; | ||
| 638 | } else { | 592 | } else { |
| 639 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); | 593 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); |
| 640 | saddr = &ip6h->saddr; | 594 | saddr = &ip6h->saddr; |
| @@ -670,8 +624,7 @@ clear_hash_noput: | |||
| 670 | return 1; | 624 | return 1; |
| 671 | } | 625 | } |
| 672 | 626 | ||
| 673 | static int __tcp_v6_inbound_md5_hash(struct sock *sk, | 627 | static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) |
| 674 | const struct sk_buff *skb) | ||
| 675 | { | 628 | { |
| 676 | const __u8 *hash_location = NULL; | 629 | const __u8 *hash_location = NULL; |
| 677 | struct tcp_md5sig_key *hash_expected; | 630 | struct tcp_md5sig_key *hash_expected; |
| @@ -685,44 +638,32 @@ static int __tcp_v6_inbound_md5_hash(struct sock *sk, | |||
| 685 | 638 | ||
| 686 | /* We've parsed the options - do we have a hash? */ | 639 | /* We've parsed the options - do we have a hash? */ |
| 687 | if (!hash_expected && !hash_location) | 640 | if (!hash_expected && !hash_location) |
| 688 | return 0; | 641 | return false; |
| 689 | 642 | ||
| 690 | if (hash_expected && !hash_location) { | 643 | if (hash_expected && !hash_location) { |
| 691 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); | 644 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); |
| 692 | return 1; | 645 | return true; |
| 693 | } | 646 | } |
| 694 | 647 | ||
| 695 | if (!hash_expected && hash_location) { | 648 | if (!hash_expected && hash_location) { |
| 696 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); | 649 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); |
| 697 | return 1; | 650 | return true; |
| 698 | } | 651 | } |
| 699 | 652 | ||
| 700 | /* check the signature */ | 653 | /* check the signature */ |
| 701 | genhash = tcp_v6_md5_hash_skb(newhash, | 654 | genhash = tcp_v6_md5_hash_skb(newhash, |
| 702 | hash_expected, | 655 | hash_expected, |
| 703 | NULL, NULL, skb); | 656 | NULL, skb); |
| 704 | 657 | ||
| 705 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | 658 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
| 706 | net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", | 659 | net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", |
| 707 | genhash ? "failed" : "mismatch", | 660 | genhash ? "failed" : "mismatch", |
| 708 | &ip6h->saddr, ntohs(th->source), | 661 | &ip6h->saddr, ntohs(th->source), |
| 709 | &ip6h->daddr, ntohs(th->dest)); | 662 | &ip6h->daddr, ntohs(th->dest)); |
| 710 | return 1; | 663 | return true; |
| 711 | } | 664 | } |
| 712 | return 0; | 665 | return false; |
| 713 | } | 666 | } |
| 714 | |||
| 715 | static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) | ||
| 716 | { | ||
| 717 | int ret; | ||
| 718 | |||
| 719 | rcu_read_lock(); | ||
| 720 | ret = __tcp_v6_inbound_md5_hash(sk, skb); | ||
| 721 | rcu_read_unlock(); | ||
| 722 | |||
| 723 | return ret; | ||
| 724 | } | ||
| 725 | |||
| 726 | #endif | 667 | #endif |
| 727 | 668 | ||
| 728 | static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, | 669 | static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, |
| @@ -734,8 +675,6 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, | |||
| 734 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; | 675 | ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; |
| 735 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; | 676 | ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; |
| 736 | 677 | ||
| 737 | ireq->ir_iif = sk->sk_bound_dev_if; | ||
| 738 | |||
| 739 | /* So that link locals have meaning */ | 678 | /* So that link locals have meaning */ |
| 740 | if (!sk->sk_bound_dev_if && | 679 | if (!sk->sk_bound_dev_if && |
| 741 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) | 680 | ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) |
| @@ -774,7 +713,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | |||
| 774 | .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - | 713 | .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - |
| 775 | sizeof(struct ipv6hdr), | 714 | sizeof(struct ipv6hdr), |
| 776 | #ifdef CONFIG_TCP_MD5SIG | 715 | #ifdef CONFIG_TCP_MD5SIG |
| 777 | .md5_lookup = tcp_v6_reqsk_md5_lookup, | 716 | .req_md5_lookup = tcp_v6_md5_lookup, |
| 778 | .calc_md5_hash = tcp_v6_md5_hash_skb, | 717 | .calc_md5_hash = tcp_v6_md5_hash_skb, |
| 779 | #endif | 718 | #endif |
| 780 | .init_req = tcp_v6_init_req, | 719 | .init_req = tcp_v6_init_req, |
| @@ -811,7 +750,7 @@ static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq, | |||
| 811 | 750 | ||
| 812 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, | 751 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, |
| 813 | GFP_ATOMIC); | 752 | GFP_ATOMIC); |
| 814 | if (buff == NULL) | 753 | if (!buff) |
| 815 | return; | 754 | return; |
| 816 | 755 | ||
| 817 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); | 756 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); |
| @@ -931,7 +870,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) | |||
| 931 | if (!key) | 870 | if (!key) |
| 932 | goto release_sk1; | 871 | goto release_sk1; |
| 933 | 872 | ||
| 934 | genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb); | 873 | genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); |
| 935 | if (genhash || memcmp(hash_location, newhash, 16) != 0) | 874 | if (genhash || memcmp(hash_location, newhash, 16) != 0) |
| 936 | goto release_sk1; | 875 | goto release_sk1; |
| 937 | } else { | 876 | } else { |
| @@ -997,17 +936,19 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, | |||
| 997 | 936 | ||
| 998 | static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) | 937 | static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) |
| 999 | { | 938 | { |
| 1000 | struct request_sock *req, **prev; | ||
| 1001 | const struct tcphdr *th = tcp_hdr(skb); | 939 | const struct tcphdr *th = tcp_hdr(skb); |
| 940 | struct request_sock *req; | ||
| 1002 | struct sock *nsk; | 941 | struct sock *nsk; |
| 1003 | 942 | ||
| 1004 | /* Find possible connection requests. */ | 943 | /* Find possible connection requests. */ |
| 1005 | req = inet6_csk_search_req(sk, &prev, th->source, | 944 | req = inet6_csk_search_req(sk, th->source, |
| 1006 | &ipv6_hdr(skb)->saddr, | 945 | &ipv6_hdr(skb)->saddr, |
| 1007 | &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); | 946 | &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); |
| 1008 | if (req) | 947 | if (req) { |
| 1009 | return tcp_check_req(sk, skb, req, prev, false); | 948 | nsk = tcp_check_req(sk, skb, req, false); |
| 1010 | 949 | reqsk_put(req); | |
| 950 | return nsk; | ||
| 951 | } | ||
| 1011 | nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, | 952 | nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, |
| 1012 | &ipv6_hdr(skb)->saddr, th->source, | 953 | &ipv6_hdr(skb)->saddr, th->source, |
| 1013 | &ipv6_hdr(skb)->daddr, ntohs(th->dest), | 954 | &ipv6_hdr(skb)->daddr, ntohs(th->dest), |
| @@ -1067,7 +1008,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
| 1067 | 1008 | ||
| 1068 | newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); | 1009 | newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); |
| 1069 | 1010 | ||
| 1070 | if (newsk == NULL) | 1011 | if (!newsk) |
| 1071 | return NULL; | 1012 | return NULL; |
| 1072 | 1013 | ||
| 1073 | newtcp6sk = (struct tcp6_sock *)newsk; | 1014 | newtcp6sk = (struct tcp6_sock *)newsk; |
| @@ -1079,11 +1020,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
| 1079 | 1020 | ||
| 1080 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 1021 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
| 1081 | 1022 | ||
| 1082 | ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr); | 1023 | newnp->saddr = newsk->sk_v6_rcv_saddr; |
| 1083 | |||
| 1084 | ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); | ||
| 1085 | |||
| 1086 | newsk->sk_v6_rcv_saddr = newnp->saddr; | ||
| 1087 | 1024 | ||
| 1088 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; | 1025 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; |
| 1089 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; | 1026 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; |
| @@ -1128,7 +1065,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
| 1128 | } | 1065 | } |
| 1129 | 1066 | ||
| 1130 | newsk = tcp_create_openreq_child(sk, req, skb); | 1067 | newsk = tcp_create_openreq_child(sk, req, skb); |
| 1131 | if (newsk == NULL) | 1068 | if (!newsk) |
| 1132 | goto out_nonewsk; | 1069 | goto out_nonewsk; |
| 1133 | 1070 | ||
| 1134 | /* | 1071 | /* |
| @@ -1170,7 +1107,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
| 1170 | 1107 | ||
| 1171 | /* Clone pktoptions received with SYN */ | 1108 | /* Clone pktoptions received with SYN */ |
| 1172 | newnp->pktoptions = NULL; | 1109 | newnp->pktoptions = NULL; |
| 1173 | if (ireq->pktopts != NULL) { | 1110 | if (ireq->pktopts) { |
| 1174 | newnp->pktoptions = skb_clone(ireq->pktopts, | 1111 | newnp->pktoptions = skb_clone(ireq->pktopts, |
| 1175 | sk_gfp_atomic(sk, GFP_ATOMIC)); | 1112 | sk_gfp_atomic(sk, GFP_ATOMIC)); |
| 1176 | consume_skb(ireq->pktopts); | 1113 | consume_skb(ireq->pktopts); |
| @@ -1215,7 +1152,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
| 1215 | #ifdef CONFIG_TCP_MD5SIG | 1152 | #ifdef CONFIG_TCP_MD5SIG |
| 1216 | /* Copy over the MD5 key from the original socket */ | 1153 | /* Copy over the MD5 key from the original socket */ |
| 1217 | key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); | 1154 | key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); |
| 1218 | if (key != NULL) { | 1155 | if (key) { |
| 1219 | /* We're using one, so create a matching key | 1156 | /* We're using one, so create a matching key |
| 1220 | * on the newsk structure. If we fail to get | 1157 | * on the newsk structure. If we fail to get |
| 1221 | * memory, then we end up not copying the key | 1158 | * memory, then we end up not copying the key |
| @@ -1232,7 +1169,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
| 1232 | tcp_done(newsk); | 1169 | tcp_done(newsk); |
| 1233 | goto out; | 1170 | goto out; |
| 1234 | } | 1171 | } |
| 1235 | __inet6_hash(newsk, NULL); | 1172 | __inet_hash(newsk, NULL); |
| 1236 | 1173 | ||
| 1237 | return newsk; | 1174 | return newsk; |
| 1238 | 1175 | ||
| @@ -1411,6 +1348,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, | |||
| 1411 | TCP_SKB_CB(skb)->sacked = 0; | 1348 | TCP_SKB_CB(skb)->sacked = 0; |
| 1412 | } | 1349 | } |
| 1413 | 1350 | ||
| 1351 | static void tcp_v6_restore_cb(struct sk_buff *skb) | ||
| 1352 | { | ||
| 1353 | /* We need to move header back to the beginning if xfrm6_policy_check() | ||
| 1354 | * and tcp_v6_fill_cb() are going to be called again. | ||
| 1355 | */ | ||
| 1356 | memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, | ||
| 1357 | sizeof(struct inet6_skb_parm)); | ||
| 1358 | } | ||
| 1359 | |||
| 1414 | static int tcp_v6_rcv(struct sk_buff *skb) | 1360 | static int tcp_v6_rcv(struct sk_buff *skb) |
| 1415 | { | 1361 | { |
| 1416 | const struct tcphdr *th; | 1362 | const struct tcphdr *th; |
| @@ -1538,11 +1484,12 @@ do_time_wait: | |||
| 1538 | &ipv6_hdr(skb)->saddr, th->source, | 1484 | &ipv6_hdr(skb)->saddr, th->source, |
| 1539 | &ipv6_hdr(skb)->daddr, | 1485 | &ipv6_hdr(skb)->daddr, |
| 1540 | ntohs(th->dest), tcp_v6_iif(skb)); | 1486 | ntohs(th->dest), tcp_v6_iif(skb)); |
| 1541 | if (sk2 != NULL) { | 1487 | if (sk2) { |
| 1542 | struct inet_timewait_sock *tw = inet_twsk(sk); | 1488 | struct inet_timewait_sock *tw = inet_twsk(sk); |
| 1543 | inet_twsk_deschedule(tw, &tcp_death_row); | 1489 | inet_twsk_deschedule(tw); |
| 1544 | inet_twsk_put(tw); | 1490 | inet_twsk_put(tw); |
| 1545 | sk = sk2; | 1491 | sk = sk2; |
| 1492 | tcp_v6_restore_cb(skb); | ||
| 1546 | goto process; | 1493 | goto process; |
| 1547 | } | 1494 | } |
| 1548 | /* Fall through to ACK */ | 1495 | /* Fall through to ACK */ |
| @@ -1551,6 +1498,7 @@ do_time_wait: | |||
| 1551 | tcp_v6_timewait_ack(sk, skb); | 1498 | tcp_v6_timewait_ack(sk, skb); |
| 1552 | break; | 1499 | break; |
| 1553 | case TCP_TW_RST: | 1500 | case TCP_TW_RST: |
| 1501 | tcp_v6_restore_cb(skb); | ||
| 1554 | goto no_tcp_socket; | 1502 | goto no_tcp_socket; |
| 1555 | case TCP_TW_SUCCESS: | 1503 | case TCP_TW_SUCCESS: |
| 1556 | ; | 1504 | ; |
| @@ -1584,8 +1532,8 @@ static void tcp_v6_early_demux(struct sk_buff *skb) | |||
| 1584 | if (sk) { | 1532 | if (sk) { |
| 1585 | skb->sk = sk; | 1533 | skb->sk = sk; |
| 1586 | skb->destructor = sock_edemux; | 1534 | skb->destructor = sock_edemux; |
| 1587 | if (sk->sk_state != TCP_TIME_WAIT) { | 1535 | if (sk_fullsock(sk)) { |
| 1588 | struct dst_entry *dst = sk->sk_rx_dst; | 1536 | struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
| 1589 | 1537 | ||
| 1590 | if (dst) | 1538 | if (dst) |
| 1591 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); | 1539 | dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
| @@ -1689,9 +1637,9 @@ static void tcp_v6_destroy_sock(struct sock *sk) | |||
| 1689 | #ifdef CONFIG_PROC_FS | 1637 | #ifdef CONFIG_PROC_FS |
| 1690 | /* Proc filesystem TCPv6 sock list dumping. */ | 1638 | /* Proc filesystem TCPv6 sock list dumping. */ |
| 1691 | static void get_openreq6(struct seq_file *seq, | 1639 | static void get_openreq6(struct seq_file *seq, |
| 1692 | const struct sock *sk, struct request_sock *req, int i, kuid_t uid) | 1640 | struct request_sock *req, int i, kuid_t uid) |
| 1693 | { | 1641 | { |
| 1694 | int ttd = req->expires - jiffies; | 1642 | long ttd = req->rsk_timer.expires - jiffies; |
| 1695 | const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; | 1643 | const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; |
| 1696 | const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; | 1644 | const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; |
| 1697 | 1645 | ||
| @@ -1780,9 +1728,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
| 1780 | static void get_timewait6_sock(struct seq_file *seq, | 1728 | static void get_timewait6_sock(struct seq_file *seq, |
| 1781 | struct inet_timewait_sock *tw, int i) | 1729 | struct inet_timewait_sock *tw, int i) |
| 1782 | { | 1730 | { |
| 1731 | long delta = tw->tw_timer.expires - jiffies; | ||
| 1783 | const struct in6_addr *dest, *src; | 1732 | const struct in6_addr *dest, *src; |
| 1784 | __u16 destp, srcp; | 1733 | __u16 destp, srcp; |
| 1785 | s32 delta = tw->tw_ttd - inet_tw_time_stamp(); | ||
| 1786 | 1734 | ||
| 1787 | dest = &tw->tw_v6_daddr; | 1735 | dest = &tw->tw_v6_daddr; |
| 1788 | src = &tw->tw_v6_rcv_saddr; | 1736 | src = &tw->tw_v6_rcv_saddr; |
| @@ -1827,7 +1775,7 @@ static int tcp6_seq_show(struct seq_file *seq, void *v) | |||
| 1827 | get_tcp6_sock(seq, v, st->num); | 1775 | get_tcp6_sock(seq, v, st->num); |
| 1828 | break; | 1776 | break; |
| 1829 | case TCP_SEQ_STATE_OPENREQ: | 1777 | case TCP_SEQ_STATE_OPENREQ: |
| 1830 | get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); | 1778 | get_openreq6(seq, v, st->num, st->uid); |
| 1831 | break; | 1779 | break; |
| 1832 | } | 1780 | } |
| 1833 | out: | 1781 | out: |
| @@ -1891,7 +1839,7 @@ struct proto tcpv6_prot = { | |||
| 1891 | .sendpage = tcp_sendpage, | 1839 | .sendpage = tcp_sendpage, |
| 1892 | .backlog_rcv = tcp_v6_do_rcv, | 1840 | .backlog_rcv = tcp_v6_do_rcv, |
| 1893 | .release_cb = tcp_release_cb, | 1841 | .release_cb = tcp_release_cb, |
| 1894 | .hash = tcp_v6_hash, | 1842 | .hash = inet_hash, |
| 1895 | .unhash = inet_unhash, | 1843 | .unhash = inet_unhash, |
| 1896 | .get_port = inet_csk_get_port, | 1844 | .get_port = inet_csk_get_port, |
| 1897 | .enter_memory_pressure = tcp_enter_memory_pressure, | 1845 | .enter_memory_pressure = tcp_enter_memory_pressure, |
