aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2006-11-14 22:07:45 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:22:39 -0500
commitcfb6eeb4c860592edd123fdea908d23c6ad1c7dc (patch)
tree361c073622faa540ef6602ef1b0a6e8c0a17fc60 /net/ipv4/tcp_ipv4.c
parentbf6bce71eae386dbc37f93af7e5ad173450d9945 (diff)
[TCP]: MD5 Signature Option (RFC2385) support.
Based on implementation by Rick Payne. Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c673
1 files changed, 643 insertions, 30 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0ad0904bf56c..8c8e8112f98d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -78,6 +78,9 @@
78#include <linux/proc_fs.h> 78#include <linux/proc_fs.h>
79#include <linux/seq_file.h> 79#include <linux/seq_file.h>
80 80
81#include <linux/crypto.h>
82#include <linux/scatterlist.h>
83
81int sysctl_tcp_tw_reuse __read_mostly; 84int sysctl_tcp_tw_reuse __read_mostly;
82int sysctl_tcp_low_latency __read_mostly; 85int sysctl_tcp_low_latency __read_mostly;
83 86
@@ -89,6 +92,13 @@ static struct socket *tcp_socket;
89 92
90void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); 93void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
91 94
95#ifdef CONFIG_TCP_MD5SIG
96static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr);
97static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
98 __be32 saddr, __be32 daddr, struct tcphdr *th,
99 int protocol, int tcplen);
100#endif
101
92struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 102struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), 103 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
94 .lhash_users = ATOMIC_INIT(0), 104 .lhash_users = ATOMIC_INIT(0),
@@ -526,11 +536,19 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
526 * Exception: precedence violation. We do not implement it in any case. 536 * Exception: precedence violation. We do not implement it in any case.
527 */ 537 */
528 538
529static void tcp_v4_send_reset(struct sk_buff *skb) 539static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
530{ 540{
531 struct tcphdr *th = skb->h.th; 541 struct tcphdr *th = skb->h.th;
532 struct tcphdr rth; 542 struct {
543 struct tcphdr th;
544#ifdef CONFIG_TCP_MD5SIG
545 u32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
546#endif
547 } rep;
533 struct ip_reply_arg arg; 548 struct ip_reply_arg arg;
549#ifdef CONFIG_TCP_MD5SIG
550 struct tcp_md5sig_key *key;
551#endif
534 552
535 /* Never send a reset in response to a reset. */ 553 /* Never send a reset in response to a reset. */
536 if (th->rst) 554 if (th->rst)
@@ -540,29 +558,50 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
540 return; 558 return;
541 559
542 /* Swap the send and the receive. */ 560 /* Swap the send and the receive. */
543 memset(&rth, 0, sizeof(struct tcphdr)); 561 memset(&rep, 0, sizeof(rep));
544 rth.dest = th->source; 562 rep.th.dest = th->source;
545 rth.source = th->dest; 563 rep.th.source = th->dest;
546 rth.doff = sizeof(struct tcphdr) / 4; 564 rep.th.doff = sizeof(struct tcphdr) / 4;
547 rth.rst = 1; 565 rep.th.rst = 1;
548 566
549 if (th->ack) { 567 if (th->ack) {
550 rth.seq = th->ack_seq; 568 rep.th.seq = th->ack_seq;
551 } else { 569 } else {
552 rth.ack = 1; 570 rep.th.ack = 1;
553 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + 571 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
554 skb->len - (th->doff << 2)); 572 skb->len - (th->doff << 2));
555 } 573 }
556 574
557 memset(&arg, 0, sizeof arg); 575 memset(&arg, 0, sizeof arg);
558 arg.iov[0].iov_base = (unsigned char *)&rth; 576 arg.iov[0].iov_base = (unsigned char *)&rep;
559 arg.iov[0].iov_len = sizeof rth; 577 arg.iov[0].iov_len = sizeof(rep.th);
578
579#ifdef CONFIG_TCP_MD5SIG
580 key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL;
581 if (key) {
582 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
583 (TCPOPT_NOP << 16) |
584 (TCPOPT_MD5SIG << 8) |
585 TCPOLEN_MD5SIG);
586 /* Update length and the length the header thinks exists */
587 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
588 rep.th.doff = arg.iov[0].iov_len / 4;
589
590 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
591 key,
592 skb->nh.iph->daddr,
593 skb->nh.iph->saddr,
594 &rep.th, IPPROTO_TCP,
595 arg.iov[0].iov_len);
596 }
597#endif
598
560 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, 599 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
561 skb->nh.iph->saddr, /*XXX*/ 600 skb->nh.iph->saddr, /*XXX*/
562 sizeof(struct tcphdr), IPPROTO_TCP, 0); 601 sizeof(struct tcphdr), IPPROTO_TCP, 0);
563 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 602 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
564 603
565 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); 604 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
566 605
567 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 606 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
568 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 607 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
@@ -572,15 +611,24 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
572 outside socket context is ugly, certainly. What can I do? 611 outside socket context is ugly, certainly. What can I do?
573 */ 612 */
574 613
575static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 614static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
615 struct sk_buff *skb, u32 seq, u32 ack,
576 u32 win, u32 ts) 616 u32 win, u32 ts)
577{ 617{
578 struct tcphdr *th = skb->h.th; 618 struct tcphdr *th = skb->h.th;
579 struct { 619 struct {
580 struct tcphdr th; 620 struct tcphdr th;
581 u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2]; 621 u32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
622#ifdef CONFIG_TCP_MD5SIG
623 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
624#endif
625 ];
582 } rep; 626 } rep;
583 struct ip_reply_arg arg; 627 struct ip_reply_arg arg;
628#ifdef CONFIG_TCP_MD5SIG
629 struct tcp_md5sig_key *key;
630 struct tcp_md5sig_key tw_key;
631#endif
584 632
585 memset(&rep.th, 0, sizeof(struct tcphdr)); 633 memset(&rep.th, 0, sizeof(struct tcphdr));
586 memset(&arg, 0, sizeof arg); 634 memset(&arg, 0, sizeof arg);
@@ -588,12 +636,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
588 arg.iov[0].iov_base = (unsigned char *)&rep; 636 arg.iov[0].iov_base = (unsigned char *)&rep;
589 arg.iov[0].iov_len = sizeof(rep.th); 637 arg.iov[0].iov_len = sizeof(rep.th);
590 if (ts) { 638 if (ts) {
591 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 639 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
592 (TCPOPT_TIMESTAMP << 8) | 640 (TCPOPT_TIMESTAMP << 8) |
593 TCPOLEN_TIMESTAMP); 641 TCPOLEN_TIMESTAMP);
594 rep.tsopt[1] = htonl(tcp_time_stamp); 642 rep.opt[1] = htonl(tcp_time_stamp);
595 rep.tsopt[2] = htonl(ts); 643 rep.opt[2] = htonl(ts);
596 arg.iov[0].iov_len = sizeof(rep); 644 arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED;
597 } 645 }
598 646
599 /* Swap the send and the receive. */ 647 /* Swap the send and the receive. */
@@ -605,6 +653,44 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
605 rep.th.ack = 1; 653 rep.th.ack = 1;
606 rep.th.window = htons(win); 654 rep.th.window = htons(win);
607 655
656#ifdef CONFIG_TCP_MD5SIG
657 /*
658 * The SKB holds an imcoming packet, but may not have a valid ->sk
659 * pointer. This is especially the case when we're dealing with a
660 * TIME_WAIT ack, because the sk structure is long gone, and only
661 * the tcp_timewait_sock remains. So the md5 key is stashed in that
662 * structure, and we use it in preference. I believe that (twsk ||
663 * skb->sk) holds true, but we program defensively.
664 */
665 if (!twsk && skb->sk) {
666 key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr);
667 } else if (twsk && twsk->tw_md5_keylen) {
668 tw_key.key = twsk->tw_md5_key;
669 tw_key.keylen = twsk->tw_md5_keylen;
670 key = &tw_key;
671 } else {
672 key = NULL;
673 }
674
675 if (key) {
676 int offset = (ts) ? 3 : 0;
677
678 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
679 (TCPOPT_NOP << 16) |
680 (TCPOPT_MD5SIG << 8) |
681 TCPOLEN_MD5SIG);
682 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
683 rep.th.doff = arg.iov[0].iov_len/4;
684
685 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
686 key,
687 skb->nh.iph->daddr,
688 skb->nh.iph->saddr,
689 &rep.th, IPPROTO_TCP,
690 arg.iov[0].iov_len);
691 }
692#endif
693
608 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, 694 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
609 skb->nh.iph->saddr, /*XXX*/ 695 skb->nh.iph->saddr, /*XXX*/
610 arg.iov[0].iov_len, IPPROTO_TCP, 0); 696 arg.iov[0].iov_len, IPPROTO_TCP, 0);
@@ -618,9 +704,9 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
618static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 704static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
619{ 705{
620 struct inet_timewait_sock *tw = inet_twsk(sk); 706 struct inet_timewait_sock *tw = inet_twsk(sk);
621 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 707 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
622 708
623 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 709 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
624 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); 710 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
625 711
626 inet_twsk_put(tw); 712 inet_twsk_put(tw);
@@ -628,7 +714,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
628 714
629static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 715static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
630{ 716{
631 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 717 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
718 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
632 req->ts_recent); 719 req->ts_recent);
633} 720}
634 721
@@ -714,6 +801,461 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk,
714 return dopt; 801 return dopt;
715} 802}
716 803
804#ifdef CONFIG_TCP_MD5SIG
805/*
806 * RFC2385 MD5 checksumming requires a mapping of
807 * IP address->MD5 Key.
808 * We need to maintain these in the sk structure.
809 */
810
811/* Find the Key structure for an address. */
812static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
813{
814 struct tcp_sock *tp = tcp_sk(sk);
815 int i;
816
817 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
818 return NULL;
819 for (i = 0; i < tp->md5sig_info->entries4; i++) {
820 if (tp->md5sig_info->keys4[i].addr == addr)
821 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys4[i];
822 }
823 return NULL;
824}
825
826struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
827 struct sock *addr_sk)
828{
829 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
830}
831
832EXPORT_SYMBOL(tcp_v4_md5_lookup);
833
834struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
835 struct request_sock *req)
836{
837 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
838}
839
840/* This can be called on a newly created socket, from other files */
841int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
842 u8 *newkey, u8 newkeylen)
843{
844 /* Add Key to the list */
845 struct tcp4_md5sig_key *key;
846 struct tcp_sock *tp = tcp_sk(sk);
847 struct tcp4_md5sig_key *keys;
848
849 key = (struct tcp4_md5sig_key *) tcp_v4_md5_do_lookup(sk, addr);
850 if (key) {
851 /* Pre-existing entry - just update that one. */
852 kfree (key->key);
853 key->key = newkey;
854 key->keylen = newkeylen;
855 } else {
856 if (!tp->md5sig_info) {
857 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
858 if (!tp->md5sig_info) {
859 kfree(newkey);
860 return -ENOMEM;
861 }
862 }
863 if (tcp_alloc_md5sig_pool() == NULL) {
864 kfree(newkey);
865 return -ENOMEM;
866 }
867 if (tp->md5sig_info->alloced4 == tp->md5sig_info->entries4) {
868 keys = kmalloc((sizeof(struct tcp4_md5sig_key) *
869 (tp->md5sig_info->entries4 + 1)), GFP_ATOMIC);
870 if (!keys) {
871 kfree(newkey);
872 tcp_free_md5sig_pool();
873 return -ENOMEM;
874 }
875
876 if (tp->md5sig_info->entries4)
877 memcpy(keys, tp->md5sig_info->keys4,
878 (sizeof (struct tcp4_md5sig_key) *
879 tp->md5sig_info->entries4));
880
881 /* Free old key list, and reference new one */
882 if (tp->md5sig_info->keys4)
883 kfree(tp->md5sig_info->keys4);
884 tp->md5sig_info->keys4 = keys;
885 tp->md5sig_info->alloced4++;
886 }
887 tp->md5sig_info->entries4++;
888 tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].addr = addr;
889 tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].key = newkey;
890 tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].keylen = newkeylen;
891 }
892 return 0;
893}
894
895EXPORT_SYMBOL(tcp_v4_md5_do_add);
896
897static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
898 u8 *newkey, u8 newkeylen)
899{
900 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
901 newkey, newkeylen);
902}
903
904int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
905{
906 struct tcp_sock *tp = tcp_sk(sk);
907 int i;
908
909 for (i = 0; i < tp->md5sig_info->entries4; i++) {
910 if (tp->md5sig_info->keys4[i].addr == addr) {
911 /* Free the key */
912 kfree(tp->md5sig_info->keys4[i].key);
913 tp->md5sig_info->entries4--;
914
915 if (tp->md5sig_info->entries4 == 0) {
916 kfree(tp->md5sig_info->keys4);
917 tp->md5sig_info->keys4 = NULL;
918 } else {
919 /* Need to do some manipulation */
920 if (tp->md5sig_info->entries4 != i)
921 memcpy(&tp->md5sig_info->keys4[i],
922 &tp->md5sig_info->keys4[i+1],
923 (tp->md5sig_info->entries4 - i)
924 * sizeof (struct tcp4_md5sig_key));
925 }
926 tcp_free_md5sig_pool();
927 return 0;
928 }
929 }
930 return -ENOENT;
931}
932
933EXPORT_SYMBOL(tcp_v4_md5_do_del);
934
935static void tcp_v4_clear_md5_list (struct sock *sk)
936{
937 struct tcp_sock *tp = tcp_sk(sk);
938
939 /* Free each key, then the set of key keys,
940 * the crypto element, and then decrement our
941 * hold on the last resort crypto.
942 */
943 if (tp->md5sig_info->entries4) {
944 int i;
945 for (i = 0; i < tp->md5sig_info->entries4; i++)
946 kfree(tp->md5sig_info->keys4[i].key);
947 tp->md5sig_info->entries4 = 0;
948 tcp_free_md5sig_pool();
949 }
950 if (tp->md5sig_info->keys4) {
951 kfree(tp->md5sig_info->keys4);
952 tp->md5sig_info->keys4 = NULL;
953 tp->md5sig_info->alloced4 = 0;
954 }
955}
956
957static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval,
958 int optlen)
959{
960 struct tcp_md5sig cmd;
961 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
962 u8 *newkey;
963
964 if (optlen < sizeof(cmd))
965 return -EINVAL;
966
967 if (copy_from_user (&cmd, optval, sizeof(cmd)))
968 return -EFAULT;
969
970 if (sin->sin_family != AF_INET)
971 return -EINVAL;
972
973 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
974 if (!tcp_sk(sk)->md5sig_info)
975 return -ENOENT;
976 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
977 }
978
979 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
980 return -EINVAL;
981
982 if (!tcp_sk(sk)->md5sig_info) {
983 struct tcp_sock *tp = tcp_sk(sk);
984 struct tcp_md5sig_info *p;
985
986 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
987 if (!p)
988 return -EINVAL;
989
990 tp->md5sig_info = p;
991
992 }
993
994 newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL);
995 if (!newkey)
996 return -ENOMEM;
997 memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen);
998 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
999 newkey, cmd.tcpm_keylen);
1000}
1001
1002static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1003 __be32 saddr, __be32 daddr,
1004 struct tcphdr *th, int protocol,
1005 int tcplen)
1006{
1007 struct scatterlist sg[4];
1008 __u16 data_len;
1009 int block = 0;
1010#ifdef CONFIG_TCP_MD5SIG_DEBUG
1011 int i;
1012#endif
1013 __u16 old_checksum;
1014 struct tcp_md5sig_pool *hp;
1015 struct tcp4_pseudohdr *bp;
1016 struct hash_desc *desc;
1017 int err;
1018 unsigned int nbytes = 0;
1019
1020 /*
1021 * Okay, so RFC2385 is turned on for this connection,
1022 * so we need to generate the MD5 hash for the packet now.
1023 */
1024
1025 hp = tcp_get_md5sig_pool();
1026 if (!hp)
1027 goto clear_hash_noput;
1028
1029 bp = &hp->md5_blk.ip4;
1030 desc = &hp->md5_desc;
1031
1032 /*
1033 * 1. the TCP pseudo-header (in the order: source IP address,
1034 * destination IP address, zero-padded protocol number, and
1035 * segment length)
1036 */
1037 bp->saddr = saddr;
1038 bp->daddr = daddr;
1039 bp->pad = 0;
1040 bp->protocol = protocol;
1041 bp->len = htons(tcplen);
1042 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1043 nbytes += sizeof(*bp);
1044
1045#ifdef CONFIG_TCP_MD5SIG_DEBUG
1046 printk("Calcuating hash for: ");
1047 for (i = 0; i < sizeof (*bp); i++)
1048 printk ("%02x ", (unsigned int)((unsigned char *)bp)[i]);
1049 printk(" ");
1050#endif
1051
1052 /* 2. the TCP header, excluding options, and assuming a
1053 * checksum of zero/
1054 */
1055 old_checksum = th->check;
1056 th->check = 0;
1057 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1058 nbytes += sizeof(struct tcphdr);
1059#ifdef CONFIG_TCP_MD5SIG_DEBUG
1060 for (i = 0; i < sizeof (struct tcphdr); i++)
1061 printk (" %02x", (unsigned int)((unsigned char *)th)[i]);
1062#endif
1063 /* 3. the TCP segment data (if any) */
1064 data_len = tcplen - (th->doff << 2);
1065 if (data_len > 0) {
1066 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1067 sg_set_buf(&sg[block++], data, data_len);
1068 nbytes += data_len;
1069 }
1070
1071 /* 4. an independently-specified key or password, known to both
1072 * TCPs and presumably connection-specific
1073 */
1074 sg_set_buf(&sg[block++], key->key, key->keylen);
1075 nbytes += key->keylen;
1076
1077#ifdef CONFIG_TCP_MD5SIG_DEBUG
1078 printk (" and password: ");
1079 for (i = 0; i < key->keylen; i++)
1080 printk ("%02x ", (unsigned int)key->key[i]);
1081#endif
1082
1083 /* Now store the Hash into the packet */
1084 err = crypto_hash_init(desc);
1085 if (err)
1086 goto clear_hash;
1087 err = crypto_hash_update(desc, sg, nbytes);
1088 if (err)
1089 goto clear_hash;
1090 err = crypto_hash_final(desc, md5_hash);
1091 if (err)
1092 goto clear_hash;
1093
1094 /* Reset header, and free up the crypto */
1095 tcp_put_md5sig_pool();
1096 th->check = old_checksum;
1097
1098out:
1099#ifdef CONFIG_TCP_MD5SIG_DEBUG
1100 printk(" result:");
1101 for (i = 0; i < 16; i++)
1102 printk (" %02x", (unsigned int)(((u8*)md5_hash)[i]));
1103 printk("\n");
1104#endif
1105 return 0;
1106clear_hash:
1107 tcp_put_md5sig_pool();
1108clear_hash_noput:
1109 memset(md5_hash, 0, 16);
1110 goto out;
1111}
1112
1113int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1114 struct sock *sk,
1115 struct dst_entry *dst,
1116 struct request_sock *req,
1117 struct tcphdr *th, int protocol,
1118 int tcplen)
1119{
1120 __be32 saddr, daddr;
1121
1122 if (sk) {
1123 saddr = inet_sk(sk)->saddr;
1124 daddr = inet_sk(sk)->daddr;
1125 } else {
1126 struct rtable *rt = (struct rtable *)dst;
1127 BUG_ON(!rt);
1128 saddr = rt->rt_src;
1129 daddr = rt->rt_dst;
1130 }
1131 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1132 saddr, daddr,
1133 th, protocol, tcplen);
1134}
1135
1136EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
1137
1138static int tcp_v4_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
1139{
1140 /*
1141 * This gets called for each TCP segment that arrives
1142 * so we want to be efficient.
1143 * We have 3 drop cases:
1144 * o No MD5 hash and one expected.
1145 * o MD5 hash and we're not expecting one.
1146 * o MD5 hash and its wrong.
1147 */
1148 __u8 *hash_location = NULL;
1149 struct tcp_md5sig_key *hash_expected;
1150 struct iphdr *iph = skb->nh.iph;
1151 struct tcphdr *th = skb->h.th;
1152 int length = (th->doff << 2) - sizeof (struct tcphdr);
1153 int genhash;
1154 unsigned char *ptr;
1155 unsigned char newhash[16];
1156
1157 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1158
1159 /*
1160 * If the TCP option length is less than the TCP_MD5SIG
1161 * option length, then we can shortcut
1162 */
1163 if (length < TCPOLEN_MD5SIG) {
1164 if (hash_expected)
1165 return 1;
1166 else
1167 return 0;
1168 }
1169
1170 /* Okay, we can't shortcut - we have to grub through the options */
1171 ptr = (unsigned char *)(th + 1);
1172 while (length > 0) {
1173 int opcode = *ptr++;
1174 int opsize;
1175
1176 switch (opcode) {
1177 case TCPOPT_EOL:
1178 goto done_opts;
1179 case TCPOPT_NOP:
1180 length--;
1181 continue;
1182 default:
1183 opsize = *ptr++;
1184 if (opsize < 2)
1185 goto done_opts;
1186 if (opsize > length)
1187 goto done_opts;
1188
1189 if (opcode == TCPOPT_MD5SIG) {
1190 hash_location = ptr;
1191 goto done_opts;
1192 }
1193 }
1194 ptr += opsize-2;
1195 length -= opsize;
1196 }
1197done_opts:
1198 /* We've parsed the options - do we have a hash? */
1199 if (!hash_expected && !hash_location)
1200 return 0;
1201
1202 if (hash_expected && !hash_location) {
1203 if (net_ratelimit()) {
1204 printk(KERN_INFO "MD5 Hash NOT expected but found "
1205 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1206 NIPQUAD (iph->saddr), ntohs(th->source),
1207 NIPQUAD (iph->daddr), ntohs(th->dest));
1208 }
1209 return 1;
1210 }
1211
1212 if (!hash_expected && hash_location) {
1213 if (net_ratelimit()) {
1214 printk(KERN_INFO "MD5 Hash NOT expected but found "
1215 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1216 NIPQUAD (iph->saddr), ntohs(th->source),
1217 NIPQUAD (iph->daddr), ntohs(th->dest));
1218 }
1219 return 1;
1220 }
1221
1222 /* Okay, so this is hash_expected and hash_location -
1223 * so we need to calculate the checksum.
1224 */
1225 genhash = tcp_v4_do_calc_md5_hash(newhash,
1226 hash_expected,
1227 iph->saddr, iph->daddr,
1228 th, sk->sk_protocol,
1229 skb->len);
1230
1231 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1232 if (net_ratelimit()) {
1233 printk(KERN_INFO "MD5 Hash failed for "
1234 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
1235 NIPQUAD (iph->saddr), ntohs(th->source),
1236 NIPQUAD (iph->daddr), ntohs(th->dest),
1237 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1238#ifdef CONFIG_TCP_MD5SIG_DEBUG
1239 do {
1240 int i;
1241 printk("Received: ");
1242 for (i = 0; i < 16; i++)
1243 printk("%02x ", 0xff & (int)hash_location[i]);
1244 printk("\n");
1245 printk("Calculated: ");
1246 for (i = 0; i < 16; i++)
1247 printk("%02x ", 0xff & (int)newhash[i]);
1248 printk("\n");
1249 } while(0);
1250#endif
1251 }
1252 return 1;
1253 }
1254 return 0;
1255}
1256
1257#endif
1258
717struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1259struct request_sock_ops tcp_request_sock_ops __read_mostly = {
718 .family = PF_INET, 1260 .family = PF_INET,
719 .obj_size = sizeof(struct tcp_request_sock), 1261 .obj_size = sizeof(struct tcp_request_sock),
@@ -723,9 +1265,16 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
723 .send_reset = tcp_v4_send_reset, 1265 .send_reset = tcp_v4_send_reset,
724}; 1266};
725 1267
1268struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1269#ifdef CONFIG_TCP_MD5SIG
1270 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1271#endif
1272};
1273
726static struct timewait_sock_ops tcp_timewait_sock_ops = { 1274static struct timewait_sock_ops tcp_timewait_sock_ops = {
727 .twsk_obj_size = sizeof(struct tcp_timewait_sock), 1275 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
728 .twsk_unique = tcp_twsk_unique, 1276 .twsk_unique = tcp_twsk_unique,
1277 .twsk_destructor= tcp_twsk_destructor,
729}; 1278};
730 1279
731int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1280int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -773,6 +1322,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
773 if (!req) 1322 if (!req)
774 goto drop; 1323 goto drop;
775 1324
1325#ifdef CONFIG_TCP_MD5SIG
1326 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1327#endif
1328
776 tcp_clear_options(&tmp_opt); 1329 tcp_clear_options(&tmp_opt);
777 tmp_opt.mss_clamp = 536; 1330 tmp_opt.mss_clamp = 536;
778 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; 1331 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
@@ -891,6 +1444,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
891 struct inet_sock *newinet; 1444 struct inet_sock *newinet;
892 struct tcp_sock *newtp; 1445 struct tcp_sock *newtp;
893 struct sock *newsk; 1446 struct sock *newsk;
1447#ifdef CONFIG_TCP_MD5SIG
1448 struct tcp_md5sig_key *key;
1449#endif
894 1450
895 if (sk_acceptq_is_full(sk)) 1451 if (sk_acceptq_is_full(sk))
896 goto exit_overflow; 1452 goto exit_overflow;
@@ -925,6 +1481,24 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
925 newtp->advmss = dst_metric(dst, RTAX_ADVMSS); 1481 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
926 tcp_initialize_rcv_mss(newsk); 1482 tcp_initialize_rcv_mss(newsk);
927 1483
1484#ifdef CONFIG_TCP_MD5SIG
1485 /* Copy over the MD5 key from the original socket */
1486 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1487 /*
1488 * We're using one, so create a matching key
1489 * on the newsk structure. If we fail to get
1490 * memory, then we end up not copying the key
1491 * across. Shucks.
1492 */
1493 char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
1494 if (newkey) {
1495 memcpy(newkey, key->key, key->keylen);
1496 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1497 newkey, key->keylen);
1498 }
1499 }
1500#endif
1501
928 __inet_hash(&tcp_hashinfo, newsk, 0); 1502 __inet_hash(&tcp_hashinfo, newsk, 0);
929 __inet_inherit_port(&tcp_hashinfo, sk, newsk); 1503 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
930 1504
@@ -1000,10 +1574,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
1000 */ 1574 */
1001int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) 1575int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1002{ 1576{
1577 struct sock *rsk;
1578#ifdef CONFIG_TCP_MD5SIG
1579 /*
1580 * We really want to reject the packet as early as possible
1581 * if:
1582 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1583 * o There is an MD5 option and we're not expecting one
1584 */
1585 if (tcp_v4_inbound_md5_hash (sk, skb))
1586 goto discard;
1587#endif
1588
1003 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1589 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1004 TCP_CHECK_TIMER(sk); 1590 TCP_CHECK_TIMER(sk);
1005 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) 1591 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
1592 rsk = sk;
1006 goto reset; 1593 goto reset;
1594 }
1007 TCP_CHECK_TIMER(sk); 1595 TCP_CHECK_TIMER(sk);
1008 return 0; 1596 return 0;
1009 } 1597 }
@@ -1017,20 +1605,24 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1017 goto discard; 1605 goto discard;
1018 1606
1019 if (nsk != sk) { 1607 if (nsk != sk) {
1020 if (tcp_child_process(sk, nsk, skb)) 1608 if (tcp_child_process(sk, nsk, skb)) {
1609 rsk = nsk;
1021 goto reset; 1610 goto reset;
1611 }
1022 return 0; 1612 return 0;
1023 } 1613 }
1024 } 1614 }
1025 1615
1026 TCP_CHECK_TIMER(sk); 1616 TCP_CHECK_TIMER(sk);
1027 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) 1617 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
1618 rsk = sk;
1028 goto reset; 1619 goto reset;
1620 }
1029 TCP_CHECK_TIMER(sk); 1621 TCP_CHECK_TIMER(sk);
1030 return 0; 1622 return 0;
1031 1623
1032reset: 1624reset:
1033 tcp_v4_send_reset(skb); 1625 tcp_v4_send_reset(rsk, skb);
1034discard: 1626discard:
1035 kfree_skb(skb); 1627 kfree_skb(skb);
1036 /* Be careful here. If this function gets more complicated and 1628 /* Be careful here. If this function gets more complicated and
@@ -1139,7 +1731,7 @@ no_tcp_socket:
1139bad_packet: 1731bad_packet:
1140 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1732 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1141 } else { 1733 } else {
1142 tcp_v4_send_reset(skb); 1734 tcp_v4_send_reset(NULL, skb);
1143 } 1735 }
1144 1736
1145discard_it: 1737discard_it:
@@ -1262,6 +1854,15 @@ struct inet_connection_sock_af_ops ipv4_specific = {
1262#endif 1854#endif
1263}; 1855};
1264 1856
1857struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1858#ifdef CONFIG_TCP_MD5SIG
1859 .md5_lookup = tcp_v4_md5_lookup,
1860 .calc_md5_hash = tcp_v4_calc_md5_hash,
1861 .md5_add = tcp_v4_md5_add_func,
1862 .md5_parse = tcp_v4_parse_md5_keys,
1863#endif
1864};
1865
1265/* NOTE: A lot of things set to zero explicitly by call to 1866/* NOTE: A lot of things set to zero explicitly by call to
1266 * sk_alloc() so need not be done here. 1867 * sk_alloc() so need not be done here.
1267 */ 1868 */
@@ -1301,6 +1902,9 @@ static int tcp_v4_init_sock(struct sock *sk)
1301 1902
1302 icsk->icsk_af_ops = &ipv4_specific; 1903 icsk->icsk_af_ops = &ipv4_specific;
1303 icsk->icsk_sync_mss = tcp_sync_mss; 1904 icsk->icsk_sync_mss = tcp_sync_mss;
1905#ifdef CONFIG_TCP_MD5SIG
1906 tp->af_specific = &tcp_sock_ipv4_specific;
1907#endif
1304 1908
1305 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1909 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1306 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1910 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -1324,6 +1928,15 @@ int tcp_v4_destroy_sock(struct sock *sk)
1324 /* Cleans up our, hopefully empty, out_of_order_queue. */ 1928 /* Cleans up our, hopefully empty, out_of_order_queue. */
1325 __skb_queue_purge(&tp->out_of_order_queue); 1929 __skb_queue_purge(&tp->out_of_order_queue);
1326 1930
1931#ifdef CONFIG_TCP_MD5SIG
1932 /* Clean up the MD5 key list, if any */
1933 if (tp->md5sig_info) {
1934 tcp_v4_clear_md5_list(sk);
1935 kfree(tp->md5sig_info);
1936 tp->md5sig_info = NULL;
1937 }
1938#endif
1939
1327#ifdef CONFIG_NET_DMA 1940#ifdef CONFIG_NET_DMA
1328 /* Cleans up our sk_async_wait_queue */ 1941 /* Cleans up our sk_async_wait_queue */
1329 __skb_queue_purge(&sk->sk_async_wait_queue); 1942 __skb_queue_purge(&sk->sk_async_wait_queue);