aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c324
1 files changed, 128 insertions, 196 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ffe869ac1bcf..a82df6307567 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * Implementation of the Transmission Control Protocol(TCP). 6 * Implementation of the Transmission Control Protocol(TCP).
7 * 7 *
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9 *
10 * IPv4 specific functions 8 * IPv4 specific functions
11 * 9 *
12 * 10 *
@@ -89,10 +87,14 @@ int sysctl_tcp_low_latency __read_mostly;
89#ifdef CONFIG_TCP_MD5SIG 87#ifdef CONFIG_TCP_MD5SIG
90static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, 88static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
91 __be32 addr); 89 __be32 addr);
92static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 90static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
93 __be32 saddr, __be32 daddr, 91 __be32 daddr, __be32 saddr, struct tcphdr *th);
94 struct tcphdr *th, int protocol, 92#else
95 unsigned int tcplen); 93static inline
94struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
95{
96 return NULL;
97}
96#endif 98#endif
97 99
98struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 100struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
@@ -172,7 +174,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
172 inet->sport, usin->sin_port, sk, 1); 174 inet->sport, usin->sin_port, sk, 1);
173 if (tmp < 0) { 175 if (tmp < 0) {
174 if (tmp == -ENETUNREACH) 176 if (tmp == -ENETUNREACH)
175 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 177 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
176 return tmp; 178 return tmp;
177 } 179 }
178 180
@@ -340,16 +342,17 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
340 struct sock *sk; 342 struct sock *sk;
341 __u32 seq; 343 __u32 seq;
342 int err; 344 int err;
345 struct net *net = dev_net(skb->dev);
343 346
344 if (skb->len < (iph->ihl << 2) + 8) { 347 if (skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
346 return; 349 return;
347 } 350 }
348 351
349 sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest, 352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(skb)); 353 iph->saddr, th->source, inet_iif(skb));
351 if (!sk) { 354 if (!sk) {
352 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 return; 356 return;
354 } 357 }
355 if (sk->sk_state == TCP_TIME_WAIT) { 358 if (sk->sk_state == TCP_TIME_WAIT) {
@@ -362,7 +365,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
362 * servers this needs to be solved differently. 365 * servers this needs to be solved differently.
363 */ 366 */
364 if (sock_owned_by_user(sk)) 367 if (sock_owned_by_user(sk))
365 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
366 369
367 if (sk->sk_state == TCP_CLOSE) 370 if (sk->sk_state == TCP_CLOSE)
368 goto out; 371 goto out;
@@ -371,7 +374,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
371 seq = ntohl(th->seq); 374 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN && 375 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) { 376 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 377 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 goto out; 378 goto out;
376 } 379 }
377 380
@@ -418,7 +421,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
418 BUG_TRAP(!req->sk); 421 BUG_TRAP(!req->sk);
419 422
420 if (seq != tcp_rsk(req)->snt_isn) { 423 if (seq != tcp_rsk(req)->snt_isn) {
421 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 424 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
422 goto out; 425 goto out;
423 } 426 }
424 427
@@ -540,6 +543,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
540#ifdef CONFIG_TCP_MD5SIG 543#ifdef CONFIG_TCP_MD5SIG
541 struct tcp_md5sig_key *key; 544 struct tcp_md5sig_key *key;
542#endif 545#endif
546 struct net *net;
543 547
544 /* Never send a reset in response to a reset. */ 548 /* Never send a reset in response to a reset. */
545 if (th->rst) 549 if (th->rst)
@@ -578,12 +582,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
578 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; 582 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
579 rep.th.doff = arg.iov[0].iov_len / 4; 583 rep.th.doff = arg.iov[0].iov_len / 4;
580 584
581 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1], 585 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
582 key, 586 key, ip_hdr(skb)->daddr,
583 ip_hdr(skb)->daddr, 587 ip_hdr(skb)->saddr, &rep.th);
584 ip_hdr(skb)->saddr,
585 &rep.th, IPPROTO_TCP,
586 arg.iov[0].iov_len);
587 } 588 }
588#endif 589#endif
589 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 590 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
@@ -591,20 +592,21 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
591 sizeof(struct tcphdr), IPPROTO_TCP, 0); 592 sizeof(struct tcphdr), IPPROTO_TCP, 0);
592 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 593 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
593 594
594 ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb, 595 net = dev_net(skb->dst->dev);
596 ip_send_reply(net->ipv4.tcp_sock, skb,
595 &arg, arg.iov[0].iov_len); 597 &arg, arg.iov[0].iov_len);
596 598
597 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 599 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
598 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 600 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
599} 601}
600 602
601/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states 603/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
602 outside socket context is ugly, certainly. What can I do? 604 outside socket context is ugly, certainly. What can I do?
603 */ 605 */
604 606
605static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, 607static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
606 struct sk_buff *skb, u32 seq, u32 ack, 608 u32 win, u32 ts, int oif,
607 u32 win, u32 ts) 609 struct tcp_md5sig_key *key)
608{ 610{
609 struct tcphdr *th = tcp_hdr(skb); 611 struct tcphdr *th = tcp_hdr(skb);
610 struct { 612 struct {
@@ -616,10 +618,7 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
616 ]; 618 ];
617 } rep; 619 } rep;
618 struct ip_reply_arg arg; 620 struct ip_reply_arg arg;
619#ifdef CONFIG_TCP_MD5SIG 621 struct net *net = dev_net(skb->dev);
620 struct tcp_md5sig_key *key;
621 struct tcp_md5sig_key tw_key;
622#endif
623 622
624 memset(&rep.th, 0, sizeof(struct tcphdr)); 623 memset(&rep.th, 0, sizeof(struct tcphdr));
625 memset(&arg, 0, sizeof(arg)); 624 memset(&arg, 0, sizeof(arg));
@@ -645,23 +644,6 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
645 rep.th.window = htons(win); 644 rep.th.window = htons(win);
646 645
647#ifdef CONFIG_TCP_MD5SIG 646#ifdef CONFIG_TCP_MD5SIG
648 /*
649 * The SKB holds an imcoming packet, but may not have a valid ->sk
650 * pointer. This is especially the case when we're dealing with a
651 * TIME_WAIT ack, because the sk structure is long gone, and only
652 * the tcp_timewait_sock remains. So the md5 key is stashed in that
653 * structure, and we use it in preference. I believe that (twsk ||
654 * skb->sk) holds true, but we program defensively.
655 */
656 if (!twsk && skb->sk) {
657 key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr);
658 } else if (twsk && twsk->tw_md5_keylen) {
659 tw_key.key = twsk->tw_md5_key;
660 tw_key.keylen = twsk->tw_md5_keylen;
661 key = &tw_key;
662 } else
663 key = NULL;
664
665 if (key) { 647 if (key) {
666 int offset = (ts) ? 3 : 0; 648 int offset = (ts) ? 3 : 0;
667 649
@@ -672,25 +654,22 @@ static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
672 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; 654 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
673 rep.th.doff = arg.iov[0].iov_len/4; 655 rep.th.doff = arg.iov[0].iov_len/4;
674 656
675 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset], 657 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
676 key, 658 key, ip_hdr(skb)->daddr,
677 ip_hdr(skb)->daddr, 659 ip_hdr(skb)->saddr, &rep.th);
678 ip_hdr(skb)->saddr,
679 &rep.th, IPPROTO_TCP,
680 arg.iov[0].iov_len);
681 } 660 }
682#endif 661#endif
683 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, 662 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
684 ip_hdr(skb)->saddr, /* XXX */ 663 ip_hdr(skb)->saddr, /* XXX */
685 arg.iov[0].iov_len, IPPROTO_TCP, 0); 664 arg.iov[0].iov_len, IPPROTO_TCP, 0);
686 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 665 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
687 if (twsk) 666 if (oif)
688 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if; 667 arg.bound_dev_if = oif;
689 668
690 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb, 669 ip_send_reply(net->ipv4.tcp_sock, skb,
691 &arg, arg.iov[0].iov_len); 670 &arg, arg.iov[0].iov_len);
692 671
693 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 672 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
694} 673}
695 674
696static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 675static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -698,9 +677,12 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
698 struct inet_timewait_sock *tw = inet_twsk(sk); 677 struct inet_timewait_sock *tw = inet_twsk(sk);
699 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 678 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
700 679
701 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 680 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
702 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 681 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
703 tcptw->tw_ts_recent); 682 tcptw->tw_ts_recent,
683 tw->tw_bound_dev_if,
684 tcp_twsk_md5_key(tcptw)
685 );
704 686
705 inet_twsk_put(tw); 687 inet_twsk_put(tw);
706} 688}
@@ -708,9 +690,11 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
708static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, 690static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
709 struct request_sock *req) 691 struct request_sock *req)
710{ 692{
711 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, 693 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
712 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 694 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
713 req->ts_recent); 695 req->ts_recent,
696 0,
697 tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr));
714} 698}
715 699
716/* 700/*
@@ -1000,32 +984,13 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1000 newkey, cmd.tcpm_keylen); 984 newkey, cmd.tcpm_keylen);
1001} 985}
1002 986
1003static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 987static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1004 __be32 saddr, __be32 daddr, 988 __be32 daddr, __be32 saddr, int nbytes)
1005 struct tcphdr *th, int protocol,
1006 unsigned int tcplen)
1007{ 989{
1008 struct scatterlist sg[4];
1009 __u16 data_len;
1010 int block = 0;
1011 __sum16 old_checksum;
1012 struct tcp_md5sig_pool *hp;
1013 struct tcp4_pseudohdr *bp; 990 struct tcp4_pseudohdr *bp;
1014 struct hash_desc *desc; 991 struct scatterlist sg;
1015 int err;
1016 unsigned int nbytes = 0;
1017
1018 /*
1019 * Okay, so RFC2385 is turned on for this connection,
1020 * so we need to generate the MD5 hash for the packet now.
1021 */
1022
1023 hp = tcp_get_md5sig_pool();
1024 if (!hp)
1025 goto clear_hash_noput;
1026 992
1027 bp = &hp->md5_blk.ip4; 993 bp = &hp->md5_blk.ip4;
1028 desc = &hp->md5_desc;
1029 994
1030 /* 995 /*
1031 * 1. the TCP pseudo-header (in the order: source IP address, 996 * 1. the TCP pseudo-header (in the order: source IP address,
@@ -1035,86 +1000,96 @@ static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1035 bp->saddr = saddr; 1000 bp->saddr = saddr;
1036 bp->daddr = daddr; 1001 bp->daddr = daddr;
1037 bp->pad = 0; 1002 bp->pad = 0;
1038 bp->protocol = protocol; 1003 bp->protocol = IPPROTO_TCP;
1039 bp->len = htons(tcplen); 1004 bp->len = cpu_to_be16(nbytes);
1040
1041 sg_init_table(sg, 4);
1042
1043 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1044 nbytes += sizeof(*bp);
1045
1046 /* 2. the TCP header, excluding options, and assuming a
1047 * checksum of zero/
1048 */
1049 old_checksum = th->check;
1050 th->check = 0;
1051 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1052 nbytes += sizeof(struct tcphdr);
1053 1005
1054 /* 3. the TCP segment data (if any) */ 1006 sg_init_one(&sg, bp, sizeof(*bp));
1055 data_len = tcplen - (th->doff << 2); 1007 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1056 if (data_len > 0) { 1008}
1057 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1058 sg_set_buf(&sg[block++], data, data_len);
1059 nbytes += data_len;
1060 }
1061 1009
1062 /* 4. an independently-specified key or password, known to both 1010static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1063 * TCPs and presumably connection-specific 1011 __be32 daddr, __be32 saddr, struct tcphdr *th)
1064 */ 1012{
1065 sg_set_buf(&sg[block++], key->key, key->keylen); 1013 struct tcp_md5sig_pool *hp;
1066 nbytes += key->keylen; 1014 struct hash_desc *desc;
1067 1015
1068 sg_mark_end(&sg[block - 1]); 1016 hp = tcp_get_md5sig_pool();
1017 if (!hp)
1018 goto clear_hash_noput;
1019 desc = &hp->md5_desc;
1069 1020
1070 /* Now store the Hash into the packet */ 1021 if (crypto_hash_init(desc))
1071 err = crypto_hash_init(desc);
1072 if (err)
1073 goto clear_hash; 1022 goto clear_hash;
1074 err = crypto_hash_update(desc, sg, nbytes); 1023 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1075 if (err)
1076 goto clear_hash; 1024 goto clear_hash;
1077 err = crypto_hash_final(desc, md5_hash); 1025 if (tcp_md5_hash_header(hp, th))
1078 if (err) 1026 goto clear_hash;
1027 if (tcp_md5_hash_key(hp, key))
1028 goto clear_hash;
1029 if (crypto_hash_final(desc, md5_hash))
1079 goto clear_hash; 1030 goto clear_hash;
1080 1031
1081 /* Reset header, and free up the crypto */
1082 tcp_put_md5sig_pool(); 1032 tcp_put_md5sig_pool();
1083 th->check = old_checksum;
1084
1085out:
1086 return 0; 1033 return 0;
1034
1087clear_hash: 1035clear_hash:
1088 tcp_put_md5sig_pool(); 1036 tcp_put_md5sig_pool();
1089clear_hash_noput: 1037clear_hash_noput:
1090 memset(md5_hash, 0, 16); 1038 memset(md5_hash, 0, 16);
1091 goto out; 1039 return 1;
1092} 1040}
1093 1041
1094int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, 1042int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1095 struct sock *sk, 1043 struct sock *sk, struct request_sock *req,
1096 struct dst_entry *dst, 1044 struct sk_buff *skb)
1097 struct request_sock *req,
1098 struct tcphdr *th, int protocol,
1099 unsigned int tcplen)
1100{ 1045{
1046 struct tcp_md5sig_pool *hp;
1047 struct hash_desc *desc;
1048 struct tcphdr *th = tcp_hdr(skb);
1101 __be32 saddr, daddr; 1049 __be32 saddr, daddr;
1102 1050
1103 if (sk) { 1051 if (sk) {
1104 saddr = inet_sk(sk)->saddr; 1052 saddr = inet_sk(sk)->saddr;
1105 daddr = inet_sk(sk)->daddr; 1053 daddr = inet_sk(sk)->daddr;
1054 } else if (req) {
1055 saddr = inet_rsk(req)->loc_addr;
1056 daddr = inet_rsk(req)->rmt_addr;
1106 } else { 1057 } else {
1107 struct rtable *rt = (struct rtable *)dst; 1058 const struct iphdr *iph = ip_hdr(skb);
1108 BUG_ON(!rt); 1059 saddr = iph->saddr;
1109 saddr = rt->rt_src; 1060 daddr = iph->daddr;
1110 daddr = rt->rt_dst;
1111 } 1061 }
1112 return tcp_v4_do_calc_md5_hash(md5_hash, key, 1062
1113 saddr, daddr, 1063 hp = tcp_get_md5sig_pool();
1114 th, protocol, tcplen); 1064 if (!hp)
1065 goto clear_hash_noput;
1066 desc = &hp->md5_desc;
1067
1068 if (crypto_hash_init(desc))
1069 goto clear_hash;
1070
1071 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1072 goto clear_hash;
1073 if (tcp_md5_hash_header(hp, th))
1074 goto clear_hash;
1075 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1076 goto clear_hash;
1077 if (tcp_md5_hash_key(hp, key))
1078 goto clear_hash;
1079 if (crypto_hash_final(desc, md5_hash))
1080 goto clear_hash;
1081
1082 tcp_put_md5sig_pool();
1083 return 0;
1084
1085clear_hash:
1086 tcp_put_md5sig_pool();
1087clear_hash_noput:
1088 memset(md5_hash, 0, 16);
1089 return 1;
1115} 1090}
1116 1091
1117EXPORT_SYMBOL(tcp_v4_calc_md5_hash); 1092EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1118 1093
1119static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) 1094static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1120{ 1095{
@@ -1130,52 +1105,12 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1130 struct tcp_md5sig_key *hash_expected; 1105 struct tcp_md5sig_key *hash_expected;
1131 const struct iphdr *iph = ip_hdr(skb); 1106 const struct iphdr *iph = ip_hdr(skb);
1132 struct tcphdr *th = tcp_hdr(skb); 1107 struct tcphdr *th = tcp_hdr(skb);
1133 int length = (th->doff << 2) - sizeof(struct tcphdr);
1134 int genhash; 1108 int genhash;
1135 unsigned char *ptr;
1136 unsigned char newhash[16]; 1109 unsigned char newhash[16];
1137 1110
1138 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); 1111 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1112 hash_location = tcp_parse_md5sig_option(th);
1139 1113
1140 /*
1141 * If the TCP option length is less than the TCP_MD5SIG
1142 * option length, then we can shortcut
1143 */
1144 if (length < TCPOLEN_MD5SIG) {
1145 if (hash_expected)
1146 return 1;
1147 else
1148 return 0;
1149 }
1150
1151 /* Okay, we can't shortcut - we have to grub through the options */
1152 ptr = (unsigned char *)(th + 1);
1153 while (length > 0) {
1154 int opcode = *ptr++;
1155 int opsize;
1156
1157 switch (opcode) {
1158 case TCPOPT_EOL:
1159 goto done_opts;
1160 case TCPOPT_NOP:
1161 length--;
1162 continue;
1163 default:
1164 opsize = *ptr++;
1165 if (opsize < 2)
1166 goto done_opts;
1167 if (opsize > length)
1168 goto done_opts;
1169
1170 if (opcode == TCPOPT_MD5SIG) {
1171 hash_location = ptr;
1172 goto done_opts;
1173 }
1174 }
1175 ptr += opsize-2;
1176 length -= opsize;
1177 }
1178done_opts:
1179 /* We've parsed the options - do we have a hash? */ 1114 /* We've parsed the options - do we have a hash? */
1180 if (!hash_expected && !hash_location) 1115 if (!hash_expected && !hash_location)
1181 return 0; 1116 return 0;
@@ -1199,11 +1134,9 @@ done_opts:
1199 /* Okay, so this is hash_expected and hash_location - 1134 /* Okay, so this is hash_expected and hash_location -
1200 * so we need to calculate the checksum. 1135 * so we need to calculate the checksum.
1201 */ 1136 */
1202 genhash = tcp_v4_do_calc_md5_hash(newhash, 1137 genhash = tcp_v4_md5_hash_skb(newhash,
1203 hash_expected, 1138 hash_expected,
1204 iph->saddr, iph->daddr, 1139 NULL, NULL, skb);
1205 th, sk->sk_protocol,
1206 skb->len);
1207 1140
1208 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 1141 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1209 if (net_ratelimit()) { 1142 if (net_ratelimit()) {
@@ -1347,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1347 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL && 1280 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1348 (s32)(peer->tcp_ts - req->ts_recent) > 1281 (s32)(peer->tcp_ts - req->ts_recent) >
1349 TCP_PAWS_WINDOW) { 1282 TCP_PAWS_WINDOW) {
1350 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); 1283 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1351 goto drop_and_release; 1284 goto drop_and_release;
1352 } 1285 }
1353 } 1286 }
@@ -1452,6 +1385,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1452 if (newkey != NULL) 1385 if (newkey != NULL)
1453 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, 1386 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1454 newkey, key->keylen); 1387 newkey, key->keylen);
1388 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1455 } 1389 }
1456#endif 1390#endif
1457 1391
@@ -1461,9 +1395,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1461 return newsk; 1395 return newsk;
1462 1396
1463exit_overflow: 1397exit_overflow:
1464 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 1398 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1465exit: 1399exit:
1466 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 1400 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1467 dst_release(dst); 1401 dst_release(dst);
1468 return NULL; 1402 return NULL;
1469} 1403}
@@ -1590,7 +1524,7 @@ discard:
1590 return 0; 1524 return 0;
1591 1525
1592csum_err: 1526csum_err:
1593 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1527 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1594 goto discard; 1528 goto discard;
1595} 1529}
1596 1530
@@ -1604,12 +1538,13 @@ int tcp_v4_rcv(struct sk_buff *skb)
1604 struct tcphdr *th; 1538 struct tcphdr *th;
1605 struct sock *sk; 1539 struct sock *sk;
1606 int ret; 1540 int ret;
1541 struct net *net = dev_net(skb->dev);
1607 1542
1608 if (skb->pkt_type != PACKET_HOST) 1543 if (skb->pkt_type != PACKET_HOST)
1609 goto discard_it; 1544 goto discard_it;
1610 1545
1611 /* Count it even if it's bad */ 1546 /* Count it even if it's bad */
1612 TCP_INC_STATS_BH(TCP_MIB_INSEGS); 1547 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1613 1548
1614 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1549 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1615 goto discard_it; 1550 goto discard_it;
@@ -1638,7 +1573,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
1638 TCP_SKB_CB(skb)->flags = iph->tos; 1573 TCP_SKB_CB(skb)->flags = iph->tos;
1639 TCP_SKB_CB(skb)->sacked = 0; 1574 TCP_SKB_CB(skb)->sacked = 0;
1640 1575
1641 sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, 1576 sk = __inet_lookup(net, &tcp_hashinfo, iph->saddr,
1642 th->source, iph->daddr, th->dest, inet_iif(skb)); 1577 th->source, iph->daddr, th->dest, inet_iif(skb));
1643 if (!sk) 1578 if (!sk)
1644 goto no_tcp_socket; 1579 goto no_tcp_socket;
@@ -1685,7 +1620,7 @@ no_tcp_socket:
1685 1620
1686 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1621 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1687bad_packet: 1622bad_packet:
1688 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1623 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1689 } else { 1624 } else {
1690 tcp_v4_send_reset(NULL, skb); 1625 tcp_v4_send_reset(NULL, skb);
1691 } 1626 }
@@ -1706,7 +1641,7 @@ do_time_wait:
1706 } 1641 }
1707 1642
1708 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1643 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1709 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1644 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1710 inet_twsk_put(inet_twsk(sk)); 1645 inet_twsk_put(inet_twsk(sk));
1711 goto discard_it; 1646 goto discard_it;
1712 } 1647 }
@@ -1814,7 +1749,7 @@ struct inet_connection_sock_af_ops ipv4_specific = {
1814#ifdef CONFIG_TCP_MD5SIG 1749#ifdef CONFIG_TCP_MD5SIG
1815static struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1750static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1816 .md5_lookup = tcp_v4_md5_lookup, 1751 .md5_lookup = tcp_v4_md5_lookup,
1817 .calc_md5_hash = tcp_v4_calc_md5_hash, 1752 .calc_md5_hash = tcp_v4_md5_hash_skb,
1818 .md5_add = tcp_v4_md5_add_func, 1753 .md5_add = tcp_v4_md5_add_func,
1819 .md5_parse = tcp_v4_parse_md5_keys, 1754 .md5_parse = tcp_v4_parse_md5_keys,
1820}; 1755};
@@ -1871,7 +1806,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1871 return 0; 1806 return 0;
1872} 1807}
1873 1808
1874int tcp_v4_destroy_sock(struct sock *sk) 1809void tcp_v4_destroy_sock(struct sock *sk)
1875{ 1810{
1876 struct tcp_sock *tp = tcp_sk(sk); 1811 struct tcp_sock *tp = tcp_sk(sk);
1877 1812
@@ -1915,8 +1850,6 @@ int tcp_v4_destroy_sock(struct sock *sk)
1915 } 1850 }
1916 1851
1917 atomic_dec(&tcp_sockets_allocated); 1852 atomic_dec(&tcp_sockets_allocated);
1918
1919 return 0;
1920} 1853}
1921 1854
1922EXPORT_SYMBOL(tcp_v4_destroy_sock); 1855EXPORT_SYMBOL(tcp_v4_destroy_sock);
@@ -1959,8 +1892,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1959 req = req->dl_next; 1892 req = req->dl_next;
1960 while (1) { 1893 while (1) {
1961 while (req) { 1894 while (req) {
1962 if (req->rsk_ops->family == st->family && 1895 if (req->rsk_ops->family == st->family) {
1963 net_eq(sock_net(req->sk), net)) {
1964 cur = req; 1896 cur = req;
1965 goto out; 1897 goto out;
1966 } 1898 }