diff options
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 336 |
1 files changed, 132 insertions, 204 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 97a230026e13..a2b06d0cc26b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). | 6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * | 7 | * |
8 | * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $ | ||
9 | * | ||
10 | * IPv4 specific functions | 8 | * IPv4 specific functions |
11 | * | 9 | * |
12 | * | 10 | * |
@@ -85,18 +83,18 @@ | |||
85 | int sysctl_tcp_tw_reuse __read_mostly; | 83 | int sysctl_tcp_tw_reuse __read_mostly; |
86 | int sysctl_tcp_low_latency __read_mostly; | 84 | int sysctl_tcp_low_latency __read_mostly; |
87 | 85 | ||
88 | /* Check TCP sequence numbers in ICMP packets. */ | ||
89 | #define ICMP_MIN_LENGTH 8 | ||
90 | |||
91 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | ||
92 | 86 | ||
93 | #ifdef CONFIG_TCP_MD5SIG | 87 | #ifdef CONFIG_TCP_MD5SIG |
94 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, | 88 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, |
95 | __be32 addr); | 89 | __be32 addr); |
96 | static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | 90 | static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, |
97 | __be32 saddr, __be32 daddr, | 91 | __be32 daddr, __be32 saddr, struct tcphdr *th); |
98 | struct tcphdr *th, int protocol, | 92 | #else |
99 | unsigned int tcplen); | 93 | static inline |
94 | struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) | ||
95 | { | ||
96 | return NULL; | ||
97 | } | ||
100 | #endif | 98 | #endif |
101 | 99 | ||
102 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | 100 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
@@ -176,7 +174,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
176 | inet->sport, usin->sin_port, sk, 1); | 174 | inet->sport, usin->sin_port, sk, 1); |
177 | if (tmp < 0) { | 175 | if (tmp < 0) { |
178 | if (tmp == -ENETUNREACH) | 176 | if (tmp == -ENETUNREACH) |
179 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 177 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
180 | return tmp; | 178 | return tmp; |
181 | } | 179 | } |
182 | 180 | ||
@@ -344,16 +342,17 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
344 | struct sock *sk; | 342 | struct sock *sk; |
345 | __u32 seq; | 343 | __u32 seq; |
346 | int err; | 344 | int err; |
345 | struct net *net = dev_net(skb->dev); | ||
347 | 346 | ||
348 | if (skb->len < (iph->ihl << 2) + 8) { | 347 | if (skb->len < (iph->ihl << 2) + 8) { |
349 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 348 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
350 | return; | 349 | return; |
351 | } | 350 | } |
352 | 351 | ||
353 | sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest, | 352 | sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest, |
354 | iph->saddr, th->source, inet_iif(skb)); | 353 | iph->saddr, th->source, inet_iif(skb)); |
355 | if (!sk) { | 354 | if (!sk) { |
356 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | 355 | ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); |
357 | return; | 356 | return; |
358 | } | 357 | } |
359 | if (sk->sk_state == TCP_TIME_WAIT) { | 358 | if (sk->sk_state == TCP_TIME_WAIT) { |
@@ -366,7 +365,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
366 | * servers this needs to be solved differently. | 365 | * servers this needs to be solved differently. |
367 | */ | 366 | */ |
368 | if (sock_owned_by_user(sk)) | 367 | if (sock_owned_by_user(sk)) |
369 | NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); | 368 | NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); |
370 | 369 | ||
371 | if (sk->sk_state == TCP_CLOSE) | 370 | if (sk->sk_state == TCP_CLOSE) |
372 | goto out; | 371 | goto out; |
@@ -375,7 +374,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
375 | seq = ntohl(th->seq); | 374 | seq = ntohl(th->seq); |
376 | if (sk->sk_state != TCP_LISTEN && | 375 | if (sk->sk_state != TCP_LISTEN && |
377 | !between(seq, tp->snd_una, tp->snd_nxt)) { | 376 | !between(seq, tp->snd_una, tp->snd_nxt)) { |
378 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 377 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
379 | goto out; | 378 | goto out; |
380 | } | 379 | } |
381 | 380 | ||
@@ -419,10 +418,10 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) | |||
419 | /* ICMPs are not backlogged, hence we cannot get | 418 | /* ICMPs are not backlogged, hence we cannot get |
420 | an established socket here. | 419 | an established socket here. |
421 | */ | 420 | */ |
422 | BUG_TRAP(!req->sk); | 421 | WARN_ON(req->sk); |
423 | 422 | ||
424 | if (seq != tcp_rsk(req)->snt_isn) { | 423 | if (seq != tcp_rsk(req)->snt_isn) { |
425 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); | 424 | NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); |
426 | goto out; | 425 | goto out; |
427 | } | 426 | } |
428 | 427 | ||
@@ -544,6 +543,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
544 | #ifdef CONFIG_TCP_MD5SIG | 543 | #ifdef CONFIG_TCP_MD5SIG |
545 | struct tcp_md5sig_key *key; | 544 | struct tcp_md5sig_key *key; |
546 | #endif | 545 | #endif |
546 | struct net *net; | ||
547 | 547 | ||
548 | /* Never send a reset in response to a reset. */ | 548 | /* Never send a reset in response to a reset. */ |
549 | if (th->rst) | 549 | if (th->rst) |
@@ -582,12 +582,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
582 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; | 582 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; |
583 | rep.th.doff = arg.iov[0].iov_len / 4; | 583 | rep.th.doff = arg.iov[0].iov_len / 4; |
584 | 584 | ||
585 | tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1], | 585 | tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], |
586 | key, | 586 | key, ip_hdr(skb)->daddr, |
587 | ip_hdr(skb)->daddr, | 587 | ip_hdr(skb)->saddr, &rep.th); |
588 | ip_hdr(skb)->saddr, | ||
589 | &rep.th, IPPROTO_TCP, | ||
590 | arg.iov[0].iov_len); | ||
591 | } | 588 | } |
592 | #endif | 589 | #endif |
593 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, | 590 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, |
@@ -595,20 +592,21 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
595 | sizeof(struct tcphdr), IPPROTO_TCP, 0); | 592 | sizeof(struct tcphdr), IPPROTO_TCP, 0); |
596 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 593 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
597 | 594 | ||
598 | ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb, | 595 | net = dev_net(skb->dst->dev); |
596 | ip_send_reply(net->ipv4.tcp_sock, skb, | ||
599 | &arg, arg.iov[0].iov_len); | 597 | &arg, arg.iov[0].iov_len); |
600 | 598 | ||
601 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 599 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
602 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); | 600 | TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); |
603 | } | 601 | } |
604 | 602 | ||
605 | /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states | 603 | /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states |
606 | outside socket context is ugly, certainly. What can I do? | 604 | outside socket context is ugly, certainly. What can I do? |
607 | */ | 605 | */ |
608 | 606 | ||
609 | static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | 607 | static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, |
610 | struct sk_buff *skb, u32 seq, u32 ack, | 608 | u32 win, u32 ts, int oif, |
611 | u32 win, u32 ts) | 609 | struct tcp_md5sig_key *key) |
612 | { | 610 | { |
613 | struct tcphdr *th = tcp_hdr(skb); | 611 | struct tcphdr *th = tcp_hdr(skb); |
614 | struct { | 612 | struct { |
@@ -620,10 +618,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | |||
620 | ]; | 618 | ]; |
621 | } rep; | 619 | } rep; |
622 | struct ip_reply_arg arg; | 620 | struct ip_reply_arg arg; |
623 | #ifdef CONFIG_TCP_MD5SIG | 621 | struct net *net = dev_net(skb->dev); |
624 | struct tcp_md5sig_key *key; | ||
625 | struct tcp_md5sig_key tw_key; | ||
626 | #endif | ||
627 | 622 | ||
628 | memset(&rep.th, 0, sizeof(struct tcphdr)); | 623 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
629 | memset(&arg, 0, sizeof(arg)); | 624 | memset(&arg, 0, sizeof(arg)); |
@@ -649,23 +644,6 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | |||
649 | rep.th.window = htons(win); | 644 | rep.th.window = htons(win); |
650 | 645 | ||
651 | #ifdef CONFIG_TCP_MD5SIG | 646 | #ifdef CONFIG_TCP_MD5SIG |
652 | /* | ||
653 | * The SKB holds an imcoming packet, but may not have a valid ->sk | ||
654 | * pointer. This is especially the case when we're dealing with a | ||
655 | * TIME_WAIT ack, because the sk structure is long gone, and only | ||
656 | * the tcp_timewait_sock remains. So the md5 key is stashed in that | ||
657 | * structure, and we use it in preference. I believe that (twsk || | ||
658 | * skb->sk) holds true, but we program defensively. | ||
659 | */ | ||
660 | if (!twsk && skb->sk) { | ||
661 | key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr); | ||
662 | } else if (twsk && twsk->tw_md5_keylen) { | ||
663 | tw_key.key = twsk->tw_md5_key; | ||
664 | tw_key.keylen = twsk->tw_md5_keylen; | ||
665 | key = &tw_key; | ||
666 | } else | ||
667 | key = NULL; | ||
668 | |||
669 | if (key) { | 647 | if (key) { |
670 | int offset = (ts) ? 3 : 0; | 648 | int offset = (ts) ? 3 : 0; |
671 | 649 | ||
@@ -676,25 +654,22 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, | |||
676 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; | 654 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; |
677 | rep.th.doff = arg.iov[0].iov_len/4; | 655 | rep.th.doff = arg.iov[0].iov_len/4; |
678 | 656 | ||
679 | tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset], | 657 | tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], |
680 | key, | 658 | key, ip_hdr(skb)->daddr, |
681 | ip_hdr(skb)->daddr, | 659 | ip_hdr(skb)->saddr, &rep.th); |
682 | ip_hdr(skb)->saddr, | ||
683 | &rep.th, IPPROTO_TCP, | ||
684 | arg.iov[0].iov_len); | ||
685 | } | 660 | } |
686 | #endif | 661 | #endif |
687 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, | 662 | arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, |
688 | ip_hdr(skb)->saddr, /* XXX */ | 663 | ip_hdr(skb)->saddr, /* XXX */ |
689 | arg.iov[0].iov_len, IPPROTO_TCP, 0); | 664 | arg.iov[0].iov_len, IPPROTO_TCP, 0); |
690 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 665 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
691 | if (twsk) | 666 | if (oif) |
692 | arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if; | 667 | arg.bound_dev_if = oif; |
693 | 668 | ||
694 | ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb, | 669 | ip_send_reply(net->ipv4.tcp_sock, skb, |
695 | &arg, arg.iov[0].iov_len); | 670 | &arg, arg.iov[0].iov_len); |
696 | 671 | ||
697 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 672 | TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); |
698 | } | 673 | } |
699 | 674 | ||
700 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | 675 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) |
@@ -702,9 +677,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
702 | struct inet_timewait_sock *tw = inet_twsk(sk); | 677 | struct inet_timewait_sock *tw = inet_twsk(sk); |
703 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | 678 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); |
704 | 679 | ||
705 | tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | 680 | tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
706 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, | 681 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
707 | tcptw->tw_ts_recent); | 682 | tcptw->tw_ts_recent, |
683 | tw->tw_bound_dev_if, | ||
684 | tcp_twsk_md5_key(tcptw) | ||
685 | ); | ||
708 | 686 | ||
709 | inet_twsk_put(tw); | 687 | inet_twsk_put(tw); |
710 | } | 688 | } |
@@ -712,9 +690,11 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
712 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, | 690 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, |
713 | struct request_sock *req) | 691 | struct request_sock *req) |
714 | { | 692 | { |
715 | tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, | 693 | tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, |
716 | tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, | 694 | tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, |
717 | req->ts_recent); | 695 | req->ts_recent, |
696 | 0, | ||
697 | tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr)); | ||
718 | } | 698 | } |
719 | 699 | ||
720 | /* | 700 | /* |
@@ -1004,32 +984,13 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, | |||
1004 | newkey, cmd.tcpm_keylen); | 984 | newkey, cmd.tcpm_keylen); |
1005 | } | 985 | } |
1006 | 986 | ||
1007 | static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | 987 | static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, |
1008 | __be32 saddr, __be32 daddr, | 988 | __be32 daddr, __be32 saddr, int nbytes) |
1009 | struct tcphdr *th, int protocol, | ||
1010 | unsigned int tcplen) | ||
1011 | { | 989 | { |
1012 | struct scatterlist sg[4]; | ||
1013 | __u16 data_len; | ||
1014 | int block = 0; | ||
1015 | __sum16 old_checksum; | ||
1016 | struct tcp_md5sig_pool *hp; | ||
1017 | struct tcp4_pseudohdr *bp; | 990 | struct tcp4_pseudohdr *bp; |
1018 | struct hash_desc *desc; | 991 | struct scatterlist sg; |
1019 | int err; | ||
1020 | unsigned int nbytes = 0; | ||
1021 | |||
1022 | /* | ||
1023 | * Okay, so RFC2385 is turned on for this connection, | ||
1024 | * so we need to generate the MD5 hash for the packet now. | ||
1025 | */ | ||
1026 | |||
1027 | hp = tcp_get_md5sig_pool(); | ||
1028 | if (!hp) | ||
1029 | goto clear_hash_noput; | ||
1030 | 992 | ||
1031 | bp = &hp->md5_blk.ip4; | 993 | bp = &hp->md5_blk.ip4; |
1032 | desc = &hp->md5_desc; | ||
1033 | 994 | ||
1034 | /* | 995 | /* |
1035 | * 1. the TCP pseudo-header (in the order: source IP address, | 996 | * 1. the TCP pseudo-header (in the order: source IP address, |
@@ -1039,86 +1000,96 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | |||
1039 | bp->saddr = saddr; | 1000 | bp->saddr = saddr; |
1040 | bp->daddr = daddr; | 1001 | bp->daddr = daddr; |
1041 | bp->pad = 0; | 1002 | bp->pad = 0; |
1042 | bp->protocol = protocol; | 1003 | bp->protocol = IPPROTO_TCP; |
1043 | bp->len = htons(tcplen); | 1004 | bp->len = cpu_to_be16(nbytes); |
1044 | 1005 | ||
1045 | sg_init_table(sg, 4); | 1006 | sg_init_one(&sg, bp, sizeof(*bp)); |
1046 | 1007 | return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); | |
1047 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | 1008 | } |
1048 | nbytes += sizeof(*bp); | ||
1049 | |||
1050 | /* 2. the TCP header, excluding options, and assuming a | ||
1051 | * checksum of zero/ | ||
1052 | */ | ||
1053 | old_checksum = th->check; | ||
1054 | th->check = 0; | ||
1055 | sg_set_buf(&sg[block++], th, sizeof(struct tcphdr)); | ||
1056 | nbytes += sizeof(struct tcphdr); | ||
1057 | |||
1058 | /* 3. the TCP segment data (if any) */ | ||
1059 | data_len = tcplen - (th->doff << 2); | ||
1060 | if (data_len > 0) { | ||
1061 | unsigned char *data = (unsigned char *)th + (th->doff << 2); | ||
1062 | sg_set_buf(&sg[block++], data, data_len); | ||
1063 | nbytes += data_len; | ||
1064 | } | ||
1065 | 1009 | ||
1066 | /* 4. an independently-specified key or password, known to both | 1010 | static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, |
1067 | * TCPs and presumably connection-specific | 1011 | __be32 daddr, __be32 saddr, struct tcphdr *th) |
1068 | */ | 1012 | { |
1069 | sg_set_buf(&sg[block++], key->key, key->keylen); | 1013 | struct tcp_md5sig_pool *hp; |
1070 | nbytes += key->keylen; | 1014 | struct hash_desc *desc; |
1071 | 1015 | ||
1072 | sg_mark_end(&sg[block - 1]); | 1016 | hp = tcp_get_md5sig_pool(); |
1017 | if (!hp) | ||
1018 | goto clear_hash_noput; | ||
1019 | desc = &hp->md5_desc; | ||
1073 | 1020 | ||
1074 | /* Now store the Hash into the packet */ | 1021 | if (crypto_hash_init(desc)) |
1075 | err = crypto_hash_init(desc); | ||
1076 | if (err) | ||
1077 | goto clear_hash; | 1022 | goto clear_hash; |
1078 | err = crypto_hash_update(desc, sg, nbytes); | 1023 | if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) |
1079 | if (err) | ||
1080 | goto clear_hash; | 1024 | goto clear_hash; |
1081 | err = crypto_hash_final(desc, md5_hash); | 1025 | if (tcp_md5_hash_header(hp, th)) |
1082 | if (err) | 1026 | goto clear_hash; |
1027 | if (tcp_md5_hash_key(hp, key)) | ||
1028 | goto clear_hash; | ||
1029 | if (crypto_hash_final(desc, md5_hash)) | ||
1083 | goto clear_hash; | 1030 | goto clear_hash; |
1084 | 1031 | ||
1085 | /* Reset header, and free up the crypto */ | ||
1086 | tcp_put_md5sig_pool(); | 1032 | tcp_put_md5sig_pool(); |
1087 | th->check = old_checksum; | ||
1088 | |||
1089 | out: | ||
1090 | return 0; | 1033 | return 0; |
1034 | |||
1091 | clear_hash: | 1035 | clear_hash: |
1092 | tcp_put_md5sig_pool(); | 1036 | tcp_put_md5sig_pool(); |
1093 | clear_hash_noput: | 1037 | clear_hash_noput: |
1094 | memset(md5_hash, 0, 16); | 1038 | memset(md5_hash, 0, 16); |
1095 | goto out; | 1039 | return 1; |
1096 | } | 1040 | } |
1097 | 1041 | ||
1098 | int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | 1042 | int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, |
1099 | struct sock *sk, | 1043 | struct sock *sk, struct request_sock *req, |
1100 | struct dst_entry *dst, | 1044 | struct sk_buff *skb) |
1101 | struct request_sock *req, | ||
1102 | struct tcphdr *th, int protocol, | ||
1103 | unsigned int tcplen) | ||
1104 | { | 1045 | { |
1046 | struct tcp_md5sig_pool *hp; | ||
1047 | struct hash_desc *desc; | ||
1048 | struct tcphdr *th = tcp_hdr(skb); | ||
1105 | __be32 saddr, daddr; | 1049 | __be32 saddr, daddr; |
1106 | 1050 | ||
1107 | if (sk) { | 1051 | if (sk) { |
1108 | saddr = inet_sk(sk)->saddr; | 1052 | saddr = inet_sk(sk)->saddr; |
1109 | daddr = inet_sk(sk)->daddr; | 1053 | daddr = inet_sk(sk)->daddr; |
1054 | } else if (req) { | ||
1055 | saddr = inet_rsk(req)->loc_addr; | ||
1056 | daddr = inet_rsk(req)->rmt_addr; | ||
1110 | } else { | 1057 | } else { |
1111 | struct rtable *rt = (struct rtable *)dst; | 1058 | const struct iphdr *iph = ip_hdr(skb); |
1112 | BUG_ON(!rt); | 1059 | saddr = iph->saddr; |
1113 | saddr = rt->rt_src; | 1060 | daddr = iph->daddr; |
1114 | daddr = rt->rt_dst; | ||
1115 | } | 1061 | } |
1116 | return tcp_v4_do_calc_md5_hash(md5_hash, key, | 1062 | |
1117 | saddr, daddr, | 1063 | hp = tcp_get_md5sig_pool(); |
1118 | th, protocol, tcplen); | 1064 | if (!hp) |
1065 | goto clear_hash_noput; | ||
1066 | desc = &hp->md5_desc; | ||
1067 | |||
1068 | if (crypto_hash_init(desc)) | ||
1069 | goto clear_hash; | ||
1070 | |||
1071 | if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) | ||
1072 | goto clear_hash; | ||
1073 | if (tcp_md5_hash_header(hp, th)) | ||
1074 | goto clear_hash; | ||
1075 | if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) | ||
1076 | goto clear_hash; | ||
1077 | if (tcp_md5_hash_key(hp, key)) | ||
1078 | goto clear_hash; | ||
1079 | if (crypto_hash_final(desc, md5_hash)) | ||
1080 | goto clear_hash; | ||
1081 | |||
1082 | tcp_put_md5sig_pool(); | ||
1083 | return 0; | ||
1084 | |||
1085 | clear_hash: | ||
1086 | tcp_put_md5sig_pool(); | ||
1087 | clear_hash_noput: | ||
1088 | memset(md5_hash, 0, 16); | ||
1089 | return 1; | ||
1119 | } | 1090 | } |
1120 | 1091 | ||
1121 | EXPORT_SYMBOL(tcp_v4_calc_md5_hash); | 1092 | EXPORT_SYMBOL(tcp_v4_md5_hash_skb); |
1122 | 1093 | ||
1123 | static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | 1094 | static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) |
1124 | { | 1095 | { |
@@ -1134,52 +1105,12 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) | |||
1134 | struct tcp_md5sig_key *hash_expected; | 1105 | struct tcp_md5sig_key *hash_expected; |
1135 | const struct iphdr *iph = ip_hdr(skb); | 1106 | const struct iphdr *iph = ip_hdr(skb); |
1136 | struct tcphdr *th = tcp_hdr(skb); | 1107 | struct tcphdr *th = tcp_hdr(skb); |
1137 | int length = (th->doff << 2) - sizeof(struct tcphdr); | ||
1138 | int genhash; | 1108 | int genhash; |
1139 | unsigned char *ptr; | ||
1140 | unsigned char newhash[16]; | 1109 | unsigned char newhash[16]; |
1141 | 1110 | ||
1142 | hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); | 1111 | hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); |
1112 | hash_location = tcp_parse_md5sig_option(th); | ||
1143 | 1113 | ||
1144 | /* | ||
1145 | * If the TCP option length is less than the TCP_MD5SIG | ||
1146 | * option length, then we can shortcut | ||
1147 | */ | ||
1148 | if (length < TCPOLEN_MD5SIG) { | ||
1149 | if (hash_expected) | ||
1150 | return 1; | ||
1151 | else | ||
1152 | return 0; | ||
1153 | } | ||
1154 | |||
1155 | /* Okay, we can't shortcut - we have to grub through the options */ | ||
1156 | ptr = (unsigned char *)(th + 1); | ||
1157 | while (length > 0) { | ||
1158 | int opcode = *ptr++; | ||
1159 | int opsize; | ||
1160 | |||
1161 | switch (opcode) { | ||
1162 | case TCPOPT_EOL: | ||
1163 | goto done_opts; | ||
1164 | case TCPOPT_NOP: | ||
1165 | length--; | ||
1166 | continue; | ||
1167 | default: | ||
1168 | opsize = *ptr++; | ||
1169 | if (opsize < 2) | ||
1170 | goto done_opts; | ||
1171 | if (opsize > length) | ||
1172 | goto done_opts; | ||
1173 | |||
1174 | if (opcode == TCPOPT_MD5SIG) { | ||
1175 | hash_location = ptr; | ||
1176 | goto done_opts; | ||
1177 | } | ||
1178 | } | ||
1179 | ptr += opsize-2; | ||
1180 | length -= opsize; | ||
1181 | } | ||
1182 | done_opts: | ||
1183 | /* We've parsed the options - do we have a hash? */ | 1114 | /* We've parsed the options - do we have a hash? */ |
1184 | if (!hash_expected && !hash_location) | 1115 | if (!hash_expected && !hash_location) |
1185 | return 0; | 1116 | return 0; |
@@ -1203,11 +1134,9 @@ done_opts: | |||
1203 | /* Okay, so this is hash_expected and hash_location - | 1134 | /* Okay, so this is hash_expected and hash_location - |
1204 | * so we need to calculate the checksum. | 1135 | * so we need to calculate the checksum. |
1205 | */ | 1136 | */ |
1206 | genhash = tcp_v4_do_calc_md5_hash(newhash, | 1137 | genhash = tcp_v4_md5_hash_skb(newhash, |
1207 | hash_expected, | 1138 | hash_expected, |
1208 | iph->saddr, iph->daddr, | 1139 | NULL, NULL, skb); |
1209 | th, sk->sk_protocol, | ||
1210 | skb->len); | ||
1211 | 1140 | ||
1212 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | 1141 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { |
1213 | if (net_ratelimit()) { | 1142 | if (net_ratelimit()) { |
@@ -1351,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1351 | if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && | 1280 | if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && |
1352 | (s32)(peer->tcp_ts - req->ts_recent) > | 1281 | (s32)(peer->tcp_ts - req->ts_recent) > |
1353 | TCP_PAWS_WINDOW) { | 1282 | TCP_PAWS_WINDOW) { |
1354 | NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); | 1283 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); |
1355 | goto drop_and_release; | 1284 | goto drop_and_release; |
1356 | } | 1285 | } |
1357 | } | 1286 | } |
@@ -1456,6 +1385,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1456 | if (newkey != NULL) | 1385 | if (newkey != NULL) |
1457 | tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, | 1386 | tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, |
1458 | newkey, key->keylen); | 1387 | newkey, key->keylen); |
1388 | newsk->sk_route_caps &= ~NETIF_F_GSO_MASK; | ||
1459 | } | 1389 | } |
1460 | #endif | 1390 | #endif |
1461 | 1391 | ||
@@ -1465,9 +1395,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1465 | return newsk; | 1395 | return newsk; |
1466 | 1396 | ||
1467 | exit_overflow: | 1397 | exit_overflow: |
1468 | NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); | 1398 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); |
1469 | exit: | 1399 | exit: |
1470 | NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); | 1400 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); |
1471 | dst_release(dst); | 1401 | dst_release(dst); |
1472 | return NULL; | 1402 | return NULL; |
1473 | } | 1403 | } |
@@ -1594,7 +1524,7 @@ discard: | |||
1594 | return 0; | 1524 | return 0; |
1595 | 1525 | ||
1596 | csum_err: | 1526 | csum_err: |
1597 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1527 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); |
1598 | goto discard; | 1528 | goto discard; |
1599 | } | 1529 | } |
1600 | 1530 | ||
@@ -1608,12 +1538,13 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1608 | struct tcphdr *th; | 1538 | struct tcphdr *th; |
1609 | struct sock *sk; | 1539 | struct sock *sk; |
1610 | int ret; | 1540 | int ret; |
1541 | struct net *net = dev_net(skb->dev); | ||
1611 | 1542 | ||
1612 | if (skb->pkt_type != PACKET_HOST) | 1543 | if (skb->pkt_type != PACKET_HOST) |
1613 | goto discard_it; | 1544 | goto discard_it; |
1614 | 1545 | ||
1615 | /* Count it even if it's bad */ | 1546 | /* Count it even if it's bad */ |
1616 | TCP_INC_STATS_BH(TCP_MIB_INSEGS); | 1547 | TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); |
1617 | 1548 | ||
1618 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) | 1549 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) |
1619 | goto discard_it; | 1550 | goto discard_it; |
@@ -1642,7 +1573,7 @@ int tcp_v4_rcv(struct sk_buff *skb) | |||
1642 | TCP_SKB_CB(skb)->flags = iph->tos; | 1573 | TCP_SKB_CB(skb)->flags = iph->tos; |
1643 | TCP_SKB_CB(skb)->sacked = 0; | 1574 | TCP_SKB_CB(skb)->sacked = 0; |
1644 | 1575 | ||
1645 | sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, | 1576 | sk = __inet_lookup(net, &tcp_hashinfo, iph->saddr, |
1646 | th->source, iph->daddr, th->dest, inet_iif(skb)); | 1577 | th->source, iph->daddr, th->dest, inet_iif(skb)); |
1647 | if (!sk) | 1578 | if (!sk) |
1648 | goto no_tcp_socket; | 1579 | goto no_tcp_socket; |
@@ -1689,7 +1620,7 @@ no_tcp_socket: | |||
1689 | 1620 | ||
1690 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { | 1621 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { |
1691 | bad_packet: | 1622 | bad_packet: |
1692 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1623 | TCP_INC_STATS_BH(net, TCP_MIB_INERRS); |
1693 | } else { | 1624 | } else { |
1694 | tcp_v4_send_reset(NULL, skb); | 1625 | tcp_v4_send_reset(NULL, skb); |
1695 | } | 1626 | } |
@@ -1710,7 +1641,7 @@ do_time_wait: | |||
1710 | } | 1641 | } |
1711 | 1642 | ||
1712 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { | 1643 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { |
1713 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1644 | TCP_INC_STATS_BH(net, TCP_MIB_INERRS); |
1714 | inet_twsk_put(inet_twsk(sk)); | 1645 | inet_twsk_put(inet_twsk(sk)); |
1715 | goto discard_it; | 1646 | goto discard_it; |
1716 | } | 1647 | } |
@@ -1818,7 +1749,7 @@ struct inet_connection_sock_af_ops ipv4_specific = { | |||
1818 | #ifdef CONFIG_TCP_MD5SIG | 1749 | #ifdef CONFIG_TCP_MD5SIG |
1819 | static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { | 1750 | static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { |
1820 | .md5_lookup = tcp_v4_md5_lookup, | 1751 | .md5_lookup = tcp_v4_md5_lookup, |
1821 | .calc_md5_hash = tcp_v4_calc_md5_hash, | 1752 | .calc_md5_hash = tcp_v4_md5_hash_skb, |
1822 | .md5_add = tcp_v4_md5_add_func, | 1753 | .md5_add = tcp_v4_md5_add_func, |
1823 | .md5_parse = tcp_v4_parse_md5_keys, | 1754 | .md5_parse = tcp_v4_parse_md5_keys, |
1824 | }; | 1755 | }; |
@@ -1875,7 +1806,7 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
1875 | return 0; | 1806 | return 0; |
1876 | } | 1807 | } |
1877 | 1808 | ||
1878 | int tcp_v4_destroy_sock(struct sock *sk) | 1809 | void tcp_v4_destroy_sock(struct sock *sk) |
1879 | { | 1810 | { |
1880 | struct tcp_sock *tp = tcp_sk(sk); | 1811 | struct tcp_sock *tp = tcp_sk(sk); |
1881 | 1812 | ||
@@ -1919,8 +1850,6 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1919 | } | 1850 | } |
1920 | 1851 | ||
1921 | atomic_dec(&tcp_sockets_allocated); | 1852 | atomic_dec(&tcp_sockets_allocated); |
1922 | |||
1923 | return 0; | ||
1924 | } | 1853 | } |
1925 | 1854 | ||
1926 | EXPORT_SYMBOL(tcp_v4_destroy_sock); | 1855 | EXPORT_SYMBOL(tcp_v4_destroy_sock); |
@@ -1963,8 +1892,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1963 | req = req->dl_next; | 1892 | req = req->dl_next; |
1964 | while (1) { | 1893 | while (1) { |
1965 | while (req) { | 1894 | while (req) { |
1966 | if (req->rsk_ops->family == st->family && | 1895 | if (req->rsk_ops->family == st->family) { |
1967 | net_eq(sock_net(req->sk), net)) { | ||
1968 | cur = req; | 1896 | cur = req; |
1969 | goto out; | 1897 | goto out; |
1970 | } | 1898 | } |
@@ -2295,7 +2223,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) | |||
2295 | } | 2223 | } |
2296 | 2224 | ||
2297 | seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " | 2225 | seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " |
2298 | "%08X %5d %8d %lu %d %p %u %u %u %u %d%n", | 2226 | "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", |
2299 | i, src, srcp, dest, destp, sk->sk_state, | 2227 | i, src, srcp, dest, destp, sk->sk_state, |
2300 | tp->write_seq - tp->snd_una, | 2228 | tp->write_seq - tp->snd_una, |
2301 | sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog : | 2229 | sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog : |
@@ -2307,8 +2235,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) | |||
2307 | icsk->icsk_probes_out, | 2235 | icsk->icsk_probes_out, |
2308 | sock_i_ino(sk), | 2236 | sock_i_ino(sk), |
2309 | atomic_read(&sk->sk_refcnt), sk, | 2237 | atomic_read(&sk->sk_refcnt), sk, |
2310 | icsk->icsk_rto, | 2238 | jiffies_to_clock_t(icsk->icsk_rto), |
2311 | icsk->icsk_ack.ato, | 2239 | jiffies_to_clock_t(icsk->icsk_ack.ato), |
2312 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, | 2240 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, |
2313 | tp->snd_cwnd, | 2241 | tp->snd_cwnd, |
2314 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh, | 2242 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh, |