diff options
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 126 |
1 files changed, 71 insertions, 55 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5c8c84273028..2255d2bf5f6b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -66,7 +66,7 @@ | |||
66 | #include <linux/proc_fs.h> | 66 | #include <linux/proc_fs.h> |
67 | #include <linux/seq_file.h> | 67 | #include <linux/seq_file.h> |
68 | 68 | ||
69 | #include <linux/crypto.h> | 69 | #include <crypto/hash.h> |
70 | #include <linux/scatterlist.h> | 70 | #include <linux/scatterlist.h> |
71 | 71 | ||
72 | static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); | 72 | static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); |
@@ -234,7 +234,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
234 | fl6.fl6_dport = usin->sin6_port; | 234 | fl6.fl6_dport = usin->sin6_port; |
235 | fl6.fl6_sport = inet->inet_sport; | 235 | fl6.fl6_sport = inet->inet_sport; |
236 | 236 | ||
237 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); | 237 | opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); |
238 | final_p = fl6_update_dst(&fl6, opt, &final); | 238 | final_p = fl6_update_dst(&fl6, opt, &final); |
239 | 239 | ||
240 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 240 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
@@ -336,8 +336,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
336 | skb->dev->ifindex); | 336 | skb->dev->ifindex); |
337 | 337 | ||
338 | if (!sk) { | 338 | if (!sk) { |
339 | ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), | 339 | __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), |
340 | ICMP6_MIB_INERRORS); | 340 | ICMP6_MIB_INERRORS); |
341 | return; | 341 | return; |
342 | } | 342 | } |
343 | 343 | ||
@@ -352,13 +352,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
352 | 352 | ||
353 | bh_lock_sock(sk); | 353 | bh_lock_sock(sk); |
354 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) | 354 | if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) |
355 | NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); | 355 | __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); |
356 | 356 | ||
357 | if (sk->sk_state == TCP_CLOSE) | 357 | if (sk->sk_state == TCP_CLOSE) |
358 | goto out; | 358 | goto out; |
359 | 359 | ||
360 | if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { | 360 | if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { |
361 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | 361 | __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); |
362 | goto out; | 362 | goto out; |
363 | } | 363 | } |
364 | 364 | ||
@@ -368,7 +368,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
368 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; | 368 | snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; |
369 | if (sk->sk_state != TCP_LISTEN && | 369 | if (sk->sk_state != TCP_LISTEN && |
370 | !between(seq, snd_una, tp->snd_nxt)) { | 370 | !between(seq, snd_una, tp->snd_nxt)) { |
371 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); | 371 | __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); |
372 | goto out; | 372 | goto out; |
373 | } | 373 | } |
374 | 374 | ||
@@ -439,7 +439,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
439 | struct flowi *fl, | 439 | struct flowi *fl, |
440 | struct request_sock *req, | 440 | struct request_sock *req, |
441 | struct tcp_fastopen_cookie *foc, | 441 | struct tcp_fastopen_cookie *foc, |
442 | bool attach_req) | 442 | enum tcp_synack_type synack_type) |
443 | { | 443 | { |
444 | struct inet_request_sock *ireq = inet_rsk(req); | 444 | struct inet_request_sock *ireq = inet_rsk(req); |
445 | struct ipv6_pinfo *np = inet6_sk(sk); | 445 | struct ipv6_pinfo *np = inet6_sk(sk); |
@@ -452,7 +452,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
452 | IPPROTO_TCP)) == NULL) | 452 | IPPROTO_TCP)) == NULL) |
453 | goto done; | 453 | goto done; |
454 | 454 | ||
455 | skb = tcp_make_synack(sk, dst, req, foc, attach_req); | 455 | skb = tcp_make_synack(sk, dst, req, foc, synack_type); |
456 | 456 | ||
457 | if (skb) { | 457 | if (skb) { |
458 | __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, | 458 | __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, |
@@ -541,7 +541,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, | |||
541 | bp->len = cpu_to_be32(nbytes); | 541 | bp->len = cpu_to_be32(nbytes); |
542 | 542 | ||
543 | sg_init_one(&sg, bp, sizeof(*bp)); | 543 | sg_init_one(&sg, bp, sizeof(*bp)); |
544 | return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); | 544 | ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp)); |
545 | return crypto_ahash_update(hp->md5_req); | ||
545 | } | 546 | } |
546 | 547 | ||
547 | static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, | 548 | static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, |
@@ -549,14 +550,14 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, | |||
549 | const struct tcphdr *th) | 550 | const struct tcphdr *th) |
550 | { | 551 | { |
551 | struct tcp_md5sig_pool *hp; | 552 | struct tcp_md5sig_pool *hp; |
552 | struct hash_desc *desc; | 553 | struct ahash_request *req; |
553 | 554 | ||
554 | hp = tcp_get_md5sig_pool(); | 555 | hp = tcp_get_md5sig_pool(); |
555 | if (!hp) | 556 | if (!hp) |
556 | goto clear_hash_noput; | 557 | goto clear_hash_noput; |
557 | desc = &hp->md5_desc; | 558 | req = hp->md5_req; |
558 | 559 | ||
559 | if (crypto_hash_init(desc)) | 560 | if (crypto_ahash_init(req)) |
560 | goto clear_hash; | 561 | goto clear_hash; |
561 | if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) | 562 | if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) |
562 | goto clear_hash; | 563 | goto clear_hash; |
@@ -564,7 +565,8 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, | |||
564 | goto clear_hash; | 565 | goto clear_hash; |
565 | if (tcp_md5_hash_key(hp, key)) | 566 | if (tcp_md5_hash_key(hp, key)) |
566 | goto clear_hash; | 567 | goto clear_hash; |
567 | if (crypto_hash_final(desc, md5_hash)) | 568 | ahash_request_set_crypt(req, NULL, md5_hash, 0); |
569 | if (crypto_ahash_final(req)) | ||
568 | goto clear_hash; | 570 | goto clear_hash; |
569 | 571 | ||
570 | tcp_put_md5sig_pool(); | 572 | tcp_put_md5sig_pool(); |
@@ -584,7 +586,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, | |||
584 | { | 586 | { |
585 | const struct in6_addr *saddr, *daddr; | 587 | const struct in6_addr *saddr, *daddr; |
586 | struct tcp_md5sig_pool *hp; | 588 | struct tcp_md5sig_pool *hp; |
587 | struct hash_desc *desc; | 589 | struct ahash_request *req; |
588 | const struct tcphdr *th = tcp_hdr(skb); | 590 | const struct tcphdr *th = tcp_hdr(skb); |
589 | 591 | ||
590 | if (sk) { /* valid for establish/request sockets */ | 592 | if (sk) { /* valid for establish/request sockets */ |
@@ -599,9 +601,9 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, | |||
599 | hp = tcp_get_md5sig_pool(); | 601 | hp = tcp_get_md5sig_pool(); |
600 | if (!hp) | 602 | if (!hp) |
601 | goto clear_hash_noput; | 603 | goto clear_hash_noput; |
602 | desc = &hp->md5_desc; | 604 | req = hp->md5_req; |
603 | 605 | ||
604 | if (crypto_hash_init(desc)) | 606 | if (crypto_ahash_init(req)) |
605 | goto clear_hash; | 607 | goto clear_hash; |
606 | 608 | ||
607 | if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) | 609 | if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) |
@@ -612,7 +614,8 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, | |||
612 | goto clear_hash; | 614 | goto clear_hash; |
613 | if (tcp_md5_hash_key(hp, key)) | 615 | if (tcp_md5_hash_key(hp, key)) |
614 | goto clear_hash; | 616 | goto clear_hash; |
615 | if (crypto_hash_final(desc, md5_hash)) | 617 | ahash_request_set_crypt(req, NULL, md5_hash, 0); |
618 | if (crypto_ahash_final(req)) | ||
616 | goto clear_hash; | 619 | goto clear_hash; |
617 | 620 | ||
618 | tcp_put_md5sig_pool(); | 621 | tcp_put_md5sig_pool(); |
@@ -646,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk, | |||
646 | return false; | 649 | return false; |
647 | 650 | ||
648 | if (hash_expected && !hash_location) { | 651 | if (hash_expected && !hash_location) { |
649 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); | 652 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); |
650 | return true; | 653 | return true; |
651 | } | 654 | } |
652 | 655 | ||
653 | if (!hash_expected && hash_location) { | 656 | if (!hash_expected && hash_location) { |
654 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); | 657 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); |
655 | return true; | 658 | return true; |
656 | } | 659 | } |
657 | 660 | ||
@@ -735,7 +738,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | |||
735 | static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, | 738 | static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, |
736 | u32 ack, u32 win, u32 tsval, u32 tsecr, | 739 | u32 ack, u32 win, u32 tsval, u32 tsecr, |
737 | int oif, struct tcp_md5sig_key *key, int rst, | 740 | int oif, struct tcp_md5sig_key *key, int rst, |
738 | u8 tclass, u32 label) | 741 | u8 tclass, __be32 label) |
739 | { | 742 | { |
740 | const struct tcphdr *th = tcp_hdr(skb); | 743 | const struct tcphdr *th = tcp_hdr(skb); |
741 | struct tcphdr *t1; | 744 | struct tcphdr *t1; |
@@ -807,8 +810,13 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 | |||
807 | fl6.flowi6_proto = IPPROTO_TCP; | 810 | fl6.flowi6_proto = IPPROTO_TCP; |
808 | if (rt6_need_strict(&fl6.daddr) && !oif) | 811 | if (rt6_need_strict(&fl6.daddr) && !oif) |
809 | fl6.flowi6_oif = tcp_v6_iif(skb); | 812 | fl6.flowi6_oif = tcp_v6_iif(skb); |
810 | else | 813 | else { |
814 | if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) | ||
815 | oif = skb->skb_iif; | ||
816 | |||
811 | fl6.flowi6_oif = oif; | 817 | fl6.flowi6_oif = oif; |
818 | } | ||
819 | |||
812 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); | 820 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); |
813 | fl6.fl6_dport = t1->dest; | 821 | fl6.fl6_dport = t1->dest; |
814 | fl6.fl6_sport = t1->source; | 822 | fl6.fl6_sport = t1->source; |
@@ -822,9 +830,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 | |||
822 | if (!IS_ERR(dst)) { | 830 | if (!IS_ERR(dst)) { |
823 | skb_dst_set(buff, dst); | 831 | skb_dst_set(buff, dst); |
824 | ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); | 832 | ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); |
825 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); | 833 | TCP_INC_STATS(net, TCP_MIB_OUTSEGS); |
826 | if (rst) | 834 | if (rst) |
827 | TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); | 835 | TCP_INC_STATS(net, TCP_MIB_OUTRSTS); |
828 | return; | 836 | return; |
829 | } | 837 | } |
830 | 838 | ||
@@ -855,6 +863,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) | |||
855 | return; | 863 | return; |
856 | 864 | ||
857 | #ifdef CONFIG_TCP_MD5SIG | 865 | #ifdef CONFIG_TCP_MD5SIG |
866 | rcu_read_lock(); | ||
858 | hash_location = tcp_parse_md5sig_option(th); | 867 | hash_location = tcp_parse_md5sig_option(th); |
859 | if (sk && sk_fullsock(sk)) { | 868 | if (sk && sk_fullsock(sk)) { |
860 | key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); | 869 | key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); |
@@ -867,20 +876,20 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) | |||
867 | * no RST generated if md5 hash doesn't match. | 876 | * no RST generated if md5 hash doesn't match. |
868 | */ | 877 | */ |
869 | sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), | 878 | sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), |
870 | &tcp_hashinfo, &ipv6h->saddr, | 879 | &tcp_hashinfo, NULL, 0, |
880 | &ipv6h->saddr, | ||
871 | th->source, &ipv6h->daddr, | 881 | th->source, &ipv6h->daddr, |
872 | ntohs(th->source), tcp_v6_iif(skb)); | 882 | ntohs(th->source), tcp_v6_iif(skb)); |
873 | if (!sk1) | 883 | if (!sk1) |
874 | return; | 884 | goto out; |
875 | 885 | ||
876 | rcu_read_lock(); | ||
877 | key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); | 886 | key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); |
878 | if (!key) | 887 | if (!key) |
879 | goto release_sk1; | 888 | goto out; |
880 | 889 | ||
881 | genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); | 890 | genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); |
882 | if (genhash || memcmp(hash_location, newhash, 16) != 0) | 891 | if (genhash || memcmp(hash_location, newhash, 16) != 0) |
883 | goto release_sk1; | 892 | goto out; |
884 | } | 893 | } |
885 | #endif | 894 | #endif |
886 | 895 | ||
@@ -894,18 +903,15 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) | |||
894 | tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); | 903 | tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); |
895 | 904 | ||
896 | #ifdef CONFIG_TCP_MD5SIG | 905 | #ifdef CONFIG_TCP_MD5SIG |
897 | release_sk1: | 906 | out: |
898 | if (sk1) { | 907 | rcu_read_unlock(); |
899 | rcu_read_unlock(); | ||
900 | sock_put(sk1); | ||
901 | } | ||
902 | #endif | 908 | #endif |
903 | } | 909 | } |
904 | 910 | ||
905 | static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, | 911 | static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, |
906 | u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, | 912 | u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, |
907 | struct tcp_md5sig_key *key, u8 tclass, | 913 | struct tcp_md5sig_key *key, u8 tclass, |
908 | u32 label) | 914 | __be32 label) |
909 | { | 915 | { |
910 | tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, | 916 | tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, |
911 | tclass, label); | 917 | tclass, label); |
@@ -963,7 +969,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
963 | &tcp_request_sock_ipv6_ops, sk, skb); | 969 | &tcp_request_sock_ipv6_ops, sk, skb); |
964 | 970 | ||
965 | drop: | 971 | drop: |
966 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | 972 | tcp_listendrop(sk); |
967 | return 0; /* don't send reset */ | 973 | return 0; /* don't send reset */ |
968 | } | 974 | } |
969 | 975 | ||
@@ -1164,11 +1170,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1164 | return newsk; | 1170 | return newsk; |
1165 | 1171 | ||
1166 | out_overflow: | 1172 | out_overflow: |
1167 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | 1173 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); |
1168 | out_nonewsk: | 1174 | out_nonewsk: |
1169 | dst_release(dst); | 1175 | dst_release(dst); |
1170 | out: | 1176 | out: |
1171 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | 1177 | tcp_listendrop(sk); |
1172 | return NULL; | 1178 | return NULL; |
1173 | } | 1179 | } |
1174 | 1180 | ||
@@ -1275,8 +1281,8 @@ discard: | |||
1275 | kfree_skb(skb); | 1281 | kfree_skb(skb); |
1276 | return 0; | 1282 | return 0; |
1277 | csum_err: | 1283 | csum_err: |
1278 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); | 1284 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); |
1279 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); | 1285 | TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); |
1280 | goto discard; | 1286 | goto discard; |
1281 | 1287 | ||
1282 | 1288 | ||
@@ -1347,6 +1353,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1347 | { | 1353 | { |
1348 | const struct tcphdr *th; | 1354 | const struct tcphdr *th; |
1349 | const struct ipv6hdr *hdr; | 1355 | const struct ipv6hdr *hdr; |
1356 | bool refcounted; | ||
1350 | struct sock *sk; | 1357 | struct sock *sk; |
1351 | int ret; | 1358 | int ret; |
1352 | struct net *net = dev_net(skb->dev); | 1359 | struct net *net = dev_net(skb->dev); |
@@ -1357,14 +1364,14 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1357 | /* | 1364 | /* |
1358 | * Count it even if it's bad. | 1365 | * Count it even if it's bad. |
1359 | */ | 1366 | */ |
1360 | TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); | 1367 | __TCP_INC_STATS(net, TCP_MIB_INSEGS); |
1361 | 1368 | ||
1362 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) | 1369 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) |
1363 | goto discard_it; | 1370 | goto discard_it; |
1364 | 1371 | ||
1365 | th = tcp_hdr(skb); | 1372 | th = (const struct tcphdr *)skb->data; |
1366 | 1373 | ||
1367 | if (th->doff < sizeof(struct tcphdr)/4) | 1374 | if (unlikely(th->doff < sizeof(struct tcphdr)/4)) |
1368 | goto bad_packet; | 1375 | goto bad_packet; |
1369 | if (!pskb_may_pull(skb, th->doff*4)) | 1376 | if (!pskb_may_pull(skb, th->doff*4)) |
1370 | goto discard_it; | 1377 | goto discard_it; |
@@ -1372,12 +1379,13 @@ static int tcp_v6_rcv(struct sk_buff *skb) | |||
1372 | if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) | 1379 | if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) |
1373 | goto csum_error; | 1380 | goto csum_error; |
1374 | 1381 | ||
1375 | th = tcp_hdr(skb); | 1382 | th = (const struct tcphdr *)skb->data; |
1376 | hdr = ipv6_hdr(skb); | 1383 | hdr = ipv6_hdr(skb); |
1377 | 1384 | ||
1378 | lookup: | 1385 | lookup: |
1379 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, | 1386 | sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), |
1380 | inet6_iif(skb)); | 1387 | th->source, th->dest, inet6_iif(skb), |
1388 | &refcounted); | ||
1381 | if (!sk) | 1389 | if (!sk) |
1382 | goto no_tcp_socket; | 1390 | goto no_tcp_socket; |
1383 | 1391 | ||
@@ -1400,6 +1408,7 @@ process: | |||
1400 | goto lookup; | 1408 | goto lookup; |
1401 | } | 1409 | } |
1402 | sock_hold(sk); | 1410 | sock_hold(sk); |
1411 | refcounted = true; | ||
1403 | nsk = tcp_check_req(sk, skb, req, false); | 1412 | nsk = tcp_check_req(sk, skb, req, false); |
1404 | if (!nsk) { | 1413 | if (!nsk) { |
1405 | reqsk_put(req); | 1414 | reqsk_put(req); |
@@ -1417,7 +1426,7 @@ process: | |||
1417 | } | 1426 | } |
1418 | } | 1427 | } |
1419 | if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { | 1428 | if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { |
1420 | NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); | 1429 | __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); |
1421 | goto discard_and_relse; | 1430 | goto discard_and_relse; |
1422 | } | 1431 | } |
1423 | 1432 | ||
@@ -1442,7 +1451,7 @@ process: | |||
1442 | sk_incoming_cpu_update(sk); | 1451 | sk_incoming_cpu_update(sk); |
1443 | 1452 | ||
1444 | bh_lock_sock_nested(sk); | 1453 | bh_lock_sock_nested(sk); |
1445 | tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs); | 1454 | tcp_segs_in(tcp_sk(sk), skb); |
1446 | ret = 0; | 1455 | ret = 0; |
1447 | if (!sock_owned_by_user(sk)) { | 1456 | if (!sock_owned_by_user(sk)) { |
1448 | if (!tcp_prequeue(sk, skb)) | 1457 | if (!tcp_prequeue(sk, skb)) |
@@ -1450,13 +1459,14 @@ process: | |||
1450 | } else if (unlikely(sk_add_backlog(sk, skb, | 1459 | } else if (unlikely(sk_add_backlog(sk, skb, |
1451 | sk->sk_rcvbuf + sk->sk_sndbuf))) { | 1460 | sk->sk_rcvbuf + sk->sk_sndbuf))) { |
1452 | bh_unlock_sock(sk); | 1461 | bh_unlock_sock(sk); |
1453 | NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); | 1462 | __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP); |
1454 | goto discard_and_relse; | 1463 | goto discard_and_relse; |
1455 | } | 1464 | } |
1456 | bh_unlock_sock(sk); | 1465 | bh_unlock_sock(sk); |
1457 | 1466 | ||
1458 | put_and_return: | 1467 | put_and_return: |
1459 | sock_put(sk); | 1468 | if (refcounted) |
1469 | sock_put(sk); | ||
1460 | return ret ? -1 : 0; | 1470 | return ret ? -1 : 0; |
1461 | 1471 | ||
1462 | no_tcp_socket: | 1472 | no_tcp_socket: |
@@ -1467,9 +1477,9 @@ no_tcp_socket: | |||
1467 | 1477 | ||
1468 | if (tcp_checksum_complete(skb)) { | 1478 | if (tcp_checksum_complete(skb)) { |
1469 | csum_error: | 1479 | csum_error: |
1470 | TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); | 1480 | __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); |
1471 | bad_packet: | 1481 | bad_packet: |
1472 | TCP_INC_STATS_BH(net, TCP_MIB_INERRS); | 1482 | __TCP_INC_STATS(net, TCP_MIB_INERRS); |
1473 | } else { | 1483 | } else { |
1474 | tcp_v6_send_reset(NULL, skb); | 1484 | tcp_v6_send_reset(NULL, skb); |
1475 | } | 1485 | } |
@@ -1479,7 +1489,9 @@ discard_it: | |||
1479 | return 0; | 1489 | return 0; |
1480 | 1490 | ||
1481 | discard_and_relse: | 1491 | discard_and_relse: |
1482 | sock_put(sk); | 1492 | sk_drops_add(sk, skb); |
1493 | if (refcounted) | ||
1494 | sock_put(sk); | ||
1483 | goto discard_it; | 1495 | goto discard_it; |
1484 | 1496 | ||
1485 | do_time_wait: | 1497 | do_time_wait: |
@@ -1501,6 +1513,7 @@ do_time_wait: | |||
1501 | struct sock *sk2; | 1513 | struct sock *sk2; |
1502 | 1514 | ||
1503 | sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, | 1515 | sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, |
1516 | skb, __tcp_hdrlen(th), | ||
1504 | &ipv6_hdr(skb)->saddr, th->source, | 1517 | &ipv6_hdr(skb)->saddr, th->source, |
1505 | &ipv6_hdr(skb)->daddr, | 1518 | &ipv6_hdr(skb)->daddr, |
1506 | ntohs(th->dest), tcp_v6_iif(skb)); | 1519 | ntohs(th->dest), tcp_v6_iif(skb)); |
@@ -1509,6 +1522,7 @@ do_time_wait: | |||
1509 | inet_twsk_deschedule_put(tw); | 1522 | inet_twsk_deschedule_put(tw); |
1510 | sk = sk2; | 1523 | sk = sk2; |
1511 | tcp_v6_restore_cb(skb); | 1524 | tcp_v6_restore_cb(skb); |
1525 | refcounted = false; | ||
1512 | goto process; | 1526 | goto process; |
1513 | } | 1527 | } |
1514 | /* Fall through to ACK */ | 1528 | /* Fall through to ACK */ |
@@ -1707,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
1707 | destp = ntohs(inet->inet_dport); | 1721 | destp = ntohs(inet->inet_dport); |
1708 | srcp = ntohs(inet->inet_sport); | 1722 | srcp = ntohs(inet->inet_sport); |
1709 | 1723 | ||
1710 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { | 1724 | if (icsk->icsk_pending == ICSK_TIME_RETRANS || |
1725 | icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || | ||
1726 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { | ||
1711 | timer_active = 1; | 1727 | timer_active = 1; |
1712 | timer_expires = icsk->icsk_timeout; | 1728 | timer_expires = icsk->icsk_timeout; |
1713 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { | 1729 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { |
@@ -1866,7 +1882,7 @@ struct proto tcpv6_prot = { | |||
1866 | .sendpage = tcp_sendpage, | 1882 | .sendpage = tcp_sendpage, |
1867 | .backlog_rcv = tcp_v6_do_rcv, | 1883 | .backlog_rcv = tcp_v6_do_rcv, |
1868 | .release_cb = tcp_release_cb, | 1884 | .release_cb = tcp_release_cb, |
1869 | .hash = inet_hash, | 1885 | .hash = inet6_hash, |
1870 | .unhash = inet_unhash, | 1886 | .unhash = inet_unhash, |
1871 | .get_port = inet_csk_get_port, | 1887 | .get_port = inet_csk_get_port, |
1872 | .enter_memory_pressure = tcp_enter_memory_pressure, | 1888 | .enter_memory_pressure = tcp_enter_memory_pressure, |