aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c568
1 files changed, 551 insertions, 17 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9a88395a7629..663d1d238014 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -66,10 +66,13 @@
66#include <linux/proc_fs.h> 66#include <linux/proc_fs.h>
67#include <linux/seq_file.h> 67#include <linux/seq_file.h>
68 68
69#include <linux/crypto.h>
70#include <linux/scatterlist.h>
71
69/* Socket used for sending RSTs and ACKs */ 72/* Socket used for sending RSTs and ACKs */
70static struct socket *tcp6_socket; 73static struct socket *tcp6_socket;
71 74
72static void tcp_v6_send_reset(struct sk_buff *skb); 75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); 76static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
74static void tcp_v6_send_check(struct sock *sk, int len, 77static void tcp_v6_send_check(struct sock *sk, int len,
75 struct sk_buff *skb); 78 struct sk_buff *skb);
@@ -78,6 +81,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 81
79static struct inet_connection_sock_af_ops ipv6_mapped; 82static struct inet_connection_sock_af_ops ipv6_mapped;
80static struct inet_connection_sock_af_ops ipv6_specific; 83static struct inet_connection_sock_af_ops ipv6_specific;
84static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
85static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
81 86
82static int tcp_v6_get_port(struct sock *sk, unsigned short snum) 87static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
83{ 88{
@@ -208,6 +213,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
208 213
209 icsk->icsk_af_ops = &ipv6_mapped; 214 icsk->icsk_af_ops = &ipv6_mapped;
210 sk->sk_backlog_rcv = tcp_v4_do_rcv; 215 sk->sk_backlog_rcv = tcp_v4_do_rcv;
216#ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
218#endif
211 219
212 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 220 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213 221
@@ -215,6 +223,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
215 icsk->icsk_ext_hdr_len = exthdrlen; 223 icsk->icsk_ext_hdr_len = exthdrlen;
216 icsk->icsk_af_ops = &ipv6_specific; 224 icsk->icsk_af_ops = &ipv6_specific;
217 sk->sk_backlog_rcv = tcp_v6_do_rcv; 225 sk->sk_backlog_rcv = tcp_v6_do_rcv;
226#ifdef CONFIG_TCP_MD5SIG
227 tp->af_specific = &tcp_sock_ipv6_specific;
228#endif
218 goto failure; 229 goto failure;
219 } else { 230 } else {
220 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), 231 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
@@ -518,6 +529,396 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
518 kfree_skb(inet6_rsk(req)->pktopts); 529 kfree_skb(inet6_rsk(req)->pktopts);
519} 530}
520 531
532#ifdef CONFIG_TCP_MD5SIG
533static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
534 struct in6_addr *addr)
535{
536 struct tcp_sock *tp = tcp_sk(sk);
537 int i;
538
539 BUG_ON(tp == NULL);
540
541 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
542 return NULL;
543
544 for (i = 0; i < tp->md5sig_info->entries6; i++) {
545 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
546 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
547 }
548 return NULL;
549}
550
551static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
552 struct sock *addr_sk)
553{
554 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
555}
556
557static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
558 struct request_sock *req)
559{
560 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
561}
562
563static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
564 char *newkey, u8 newkeylen)
565{
566 /* Add key to the list */
567 struct tcp6_md5sig_key *key;
568 struct tcp_sock *tp = tcp_sk(sk);
569 struct tcp6_md5sig_key *keys;
570
571 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
572 if (key) {
573 /* modify existing entry - just update that one */
574 kfree(key->key);
575 key->key = newkey;
576 key->keylen = newkeylen;
577 } else {
578 /* reallocate new list if current one is full. */
579 if (!tp->md5sig_info) {
580 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
581 if (!tp->md5sig_info) {
582 kfree(newkey);
583 return -ENOMEM;
584 }
585 }
586 tcp_alloc_md5sig_pool();
587 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
588 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
589 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
590
591 if (!keys) {
592 tcp_free_md5sig_pool();
593 kfree(newkey);
594 return -ENOMEM;
595 }
596
597 if (tp->md5sig_info->entries6)
598 memmove(keys, tp->md5sig_info->keys6,
599 (sizeof (tp->md5sig_info->keys6[0]) *
600 tp->md5sig_info->entries6));
601
602 kfree(tp->md5sig_info->keys6);
603 tp->md5sig_info->keys6 = keys;
604 tp->md5sig_info->alloced6++;
605 }
606
607 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
608 peer);
609 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
610 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
611
612 tp->md5sig_info->entries6++;
613 }
614 return 0;
615}
616
617static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
618 u8 *newkey, __u8 newkeylen)
619{
620 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
621 newkey, newkeylen);
622}
623
624static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
625{
626 struct tcp_sock *tp = tcp_sk(sk);
627 int i;
628
629 for (i = 0; i < tp->md5sig_info->entries6; i++) {
630 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
631 /* Free the key */
632 kfree(tp->md5sig_info->keys6[i].key);
633 tp->md5sig_info->entries6--;
634
635 if (tp->md5sig_info->entries6 == 0) {
636 kfree(tp->md5sig_info->keys6);
637 tp->md5sig_info->keys6 = NULL;
638
639 tcp_free_md5sig_pool();
640
641 return 0;
642 } else {
643 /* shrink the database */
644 if (tp->md5sig_info->entries6 != i)
645 memmove(&tp->md5sig_info->keys6[i],
646 &tp->md5sig_info->keys6[i+1],
647 (tp->md5sig_info->entries6 - i)
648 * sizeof (tp->md5sig_info->keys6[0]));
649 }
650 }
651 }
652 return -ENOENT;
653}
654
655static void tcp_v6_clear_md5_list (struct sock *sk)
656{
657 struct tcp_sock *tp = tcp_sk(sk);
658 int i;
659
660 if (tp->md5sig_info->entries6) {
661 for (i = 0; i < tp->md5sig_info->entries6; i++)
662 kfree(tp->md5sig_info->keys6[i].key);
663 tp->md5sig_info->entries6 = 0;
664 tcp_free_md5sig_pool();
665 }
666
667 kfree(tp->md5sig_info->keys6);
668 tp->md5sig_info->keys6 = NULL;
669 tp->md5sig_info->alloced6 = 0;
670
671 if (tp->md5sig_info->entries4) {
672 for (i = 0; i < tp->md5sig_info->entries4; i++)
673 kfree(tp->md5sig_info->keys4[i].key);
674 tp->md5sig_info->entries4 = 0;
675 tcp_free_md5sig_pool();
676 }
677
678 kfree(tp->md5sig_info->keys4);
679 tp->md5sig_info->keys4 = NULL;
680 tp->md5sig_info->alloced4 = 0;
681}
682
683static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
684 int optlen)
685{
686 struct tcp_md5sig cmd;
687 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
688 u8 *newkey;
689
690 if (optlen < sizeof(cmd))
691 return -EINVAL;
692
693 if (copy_from_user(&cmd, optval, sizeof(cmd)))
694 return -EFAULT;
695
696 if (sin6->sin6_family != AF_INET6)
697 return -EINVAL;
698
699 if (!cmd.tcpm_keylen) {
700 if (!tcp_sk(sk)->md5sig_info)
701 return -ENOENT;
702 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
703 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
704 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
705 }
706
707 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
708 return -EINVAL;
709
710 if (!tcp_sk(sk)->md5sig_info) {
711 struct tcp_sock *tp = tcp_sk(sk);
712 struct tcp_md5sig_info *p;
713
714 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
715 if (!p)
716 return -ENOMEM;
717
718 tp->md5sig_info = p;
719 }
720
721 newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL);
722 if (!newkey)
723 return -ENOMEM;
724 memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen);
725 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
726 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
727 newkey, cmd.tcpm_keylen);
728 }
729 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
730}
731
732static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
733 struct in6_addr *saddr,
734 struct in6_addr *daddr,
735 struct tcphdr *th, int protocol,
736 int tcplen)
737{
738 struct scatterlist sg[4];
739 __u16 data_len;
740 int block = 0;
741 __u16 cksum;
742 struct tcp_md5sig_pool *hp;
743 struct tcp6_pseudohdr *bp;
744 struct hash_desc *desc;
745 int err;
746 unsigned int nbytes = 0;
747
748 hp = tcp_get_md5sig_pool();
749 if (!hp) {
750 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
751 goto clear_hash_noput;
752 }
753 bp = &hp->md5_blk.ip6;
754 desc = &hp->md5_desc;
755
756 /* 1. TCP pseudo-header (RFC2460) */
757 ipv6_addr_copy(&bp->saddr, saddr);
758 ipv6_addr_copy(&bp->daddr, daddr);
759 bp->len = htonl(tcplen);
760 bp->protocol = htonl(protocol);
761
762 sg_set_buf(&sg[block++], bp, sizeof(*bp));
763 nbytes += sizeof(*bp);
764
765 /* 2. TCP header, excluding options */
766 cksum = th->check;
767 th->check = 0;
768 sg_set_buf(&sg[block++], th, sizeof(*th));
769 nbytes += sizeof(*th);
770
771 /* 3. TCP segment data (if any) */
772 data_len = tcplen - (th->doff << 2);
773 if (data_len > 0) {
774 u8 *data = (u8 *)th + (th->doff << 2);
775 sg_set_buf(&sg[block++], data, data_len);
776 nbytes += data_len;
777 }
778
779 /* 4. shared key */
780 sg_set_buf(&sg[block++], key->key, key->keylen);
781 nbytes += key->keylen;
782
783 /* Now store the hash into the packet */
784 err = crypto_hash_init(desc);
785 if (err) {
786 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
787 goto clear_hash;
788 }
789 err = crypto_hash_update(desc, sg, nbytes);
790 if (err) {
791 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
792 goto clear_hash;
793 }
794 err = crypto_hash_final(desc, md5_hash);
795 if (err) {
796 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
797 goto clear_hash;
798 }
799
800 /* Reset header, and free up the crypto */
801 tcp_put_md5sig_pool();
802 th->check = cksum;
803out:
804 return 0;
805clear_hash:
806 tcp_put_md5sig_pool();
807clear_hash_noput:
808 memset(md5_hash, 0, 16);
809 goto out;
810}
811
812static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
813 struct sock *sk,
814 struct dst_entry *dst,
815 struct request_sock *req,
816 struct tcphdr *th, int protocol,
817 int tcplen)
818{
819 struct in6_addr *saddr, *daddr;
820
821 if (sk) {
822 saddr = &inet6_sk(sk)->saddr;
823 daddr = &inet6_sk(sk)->daddr;
824 } else {
825 saddr = &inet6_rsk(req)->loc_addr;
826 daddr = &inet6_rsk(req)->rmt_addr;
827 }
828 return tcp_v6_do_calc_md5_hash(md5_hash, key,
829 saddr, daddr,
830 th, protocol, tcplen);
831}
832
833static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
834{
835 __u8 *hash_location = NULL;
836 struct tcp_md5sig_key *hash_expected;
837 struct ipv6hdr *ip6h = skb->nh.ipv6h;
838 struct tcphdr *th = skb->h.th;
839 int length = (th->doff << 2) - sizeof (*th);
840 int genhash;
841 u8 *ptr;
842 u8 newhash[16];
843
844 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
845
846 /* If the TCP option is too short, we can short cut */
847 if (length < TCPOLEN_MD5SIG)
848 return hash_expected ? 1 : 0;
849
850 /* parse options */
851 ptr = (u8*)(th + 1);
852 while (length > 0) {
853 int opcode = *ptr++;
854 int opsize;
855
856 switch(opcode) {
857 case TCPOPT_EOL:
858 goto done_opts;
859 case TCPOPT_NOP:
860 length--;
861 continue;
862 default:
863 opsize = *ptr++;
864 if (opsize < 2 || opsize > length)
865 goto done_opts;
866 if (opcode == TCPOPT_MD5SIG) {
867 hash_location = ptr;
868 goto done_opts;
869 }
870 }
871 ptr += opsize - 2;
872 length -= opsize;
873 }
874
875done_opts:
876 /* do we have a hash as expected? */
877 if (!hash_expected) {
878 if (!hash_location)
879 return 0;
880 if (net_ratelimit()) {
881 printk(KERN_INFO "MD5 Hash NOT expected but found "
882 "(" NIP6_FMT ", %u)->"
883 "(" NIP6_FMT ", %u)\n",
884 NIP6(ip6h->saddr), ntohs(th->source),
885 NIP6(ip6h->daddr), ntohs(th->dest));
886 }
887 return 1;
888 }
889
890 if (!hash_location) {
891 if (net_ratelimit()) {
892 printk(KERN_INFO "MD5 Hash expected but NOT found "
893 "(" NIP6_FMT ", %u)->"
894 "(" NIP6_FMT ", %u)\n",
895 NIP6(ip6h->saddr), ntohs(th->source),
896 NIP6(ip6h->daddr), ntohs(th->dest));
897 }
898 return 1;
899 }
900
901 /* check the signature */
902 genhash = tcp_v6_do_calc_md5_hash(newhash,
903 hash_expected,
904 &ip6h->saddr, &ip6h->daddr,
905 th, sk->sk_protocol,
906 skb->len);
907 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
908 if (net_ratelimit()) {
909 printk(KERN_INFO "MD5 Hash %s for "
910 "(" NIP6_FMT ", %u)->"
911 "(" NIP6_FMT ", %u)\n",
912 genhash ? "failed" : "mismatch",
913 NIP6(ip6h->saddr), ntohs(th->source),
914 NIP6(ip6h->daddr), ntohs(th->dest));
915 }
916 return 1;
917 }
918 return 0;
919}
920#endif
921
521static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 922static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
522 .family = AF_INET6, 923 .family = AF_INET6,
523 .obj_size = sizeof(struct tcp6_request_sock), 924 .obj_size = sizeof(struct tcp6_request_sock),
@@ -527,9 +928,16 @@ static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
527 .send_reset = tcp_v6_send_reset 928 .send_reset = tcp_v6_send_reset
528}; 929};
529 930
931struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
932#ifdef CONFIG_TCP_MD5SIG
933 .md5_lookup = tcp_v6_reqsk_md5_lookup,
934#endif
935};
936
530static struct timewait_sock_ops tcp6_timewait_sock_ops = { 937static struct timewait_sock_ops tcp6_timewait_sock_ops = {
531 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 938 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
532 .twsk_unique = tcp_twsk_unique, 939 .twsk_unique = tcp_twsk_unique,
940 .twsk_destructor= tcp_twsk_destructor,
533}; 941};
534 942
535static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) 943static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
@@ -566,11 +974,15 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
566 return 0; 974 return 0;
567} 975}
568 976
569static void tcp_v6_send_reset(struct sk_buff *skb) 977static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
570{ 978{
571 struct tcphdr *th = skb->h.th, *t1; 979 struct tcphdr *th = skb->h.th, *t1;
572 struct sk_buff *buff; 980 struct sk_buff *buff;
573 struct flowi fl; 981 struct flowi fl;
982 int tot_len = sizeof(*th);
983#ifdef CONFIG_TCP_MD5SIG
984 struct tcp_md5sig_key *key;
985#endif
574 986
575 if (th->rst) 987 if (th->rst)
576 return; 988 return;
@@ -578,25 +990,35 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
578 if (!ipv6_unicast_destination(skb)) 990 if (!ipv6_unicast_destination(skb))
579 return; 991 return;
580 992
993#ifdef CONFIG_TCP_MD5SIG
994 if (sk)
995 key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr);
996 else
997 key = NULL;
998
999 if (key)
1000 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1001#endif
1002
581 /* 1003 /*
582 * We need to grab some memory, and put together an RST, 1004 * We need to grab some memory, and put together an RST,
583 * and then put it into the queue to be sent. 1005 * and then put it into the queue to be sent.
584 */ 1006 */
585 1007
586 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr), 1008 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
587 GFP_ATOMIC); 1009 GFP_ATOMIC);
588 if (buff == NULL) 1010 if (buff == NULL)
589 return; 1011 return;
590 1012
591 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); 1013 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
592 1014
593 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr)); 1015 t1 = (struct tcphdr *) skb_push(buff, tot_len);
594 1016
595 /* Swap the send and the receive. */ 1017 /* Swap the send and the receive. */
596 memset(t1, 0, sizeof(*t1)); 1018 memset(t1, 0, sizeof(*t1));
597 t1->dest = th->source; 1019 t1->dest = th->source;
598 t1->source = th->dest; 1020 t1->source = th->dest;
599 t1->doff = sizeof(*t1)/4; 1021 t1->doff = tot_len / 4;
600 t1->rst = 1; 1022 t1->rst = 1;
601 1023
602 if(th->ack) { 1024 if(th->ack) {
@@ -607,6 +1029,22 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
607 + skb->len - (th->doff<<2)); 1029 + skb->len - (th->doff<<2));
608 } 1030 }
609 1031
1032#ifdef CONFIG_TCP_MD5SIG
1033 if (key) {
1034 u32 *opt = (u32*)(t1 + 1);
1035 opt[0] = htonl((TCPOPT_NOP << 24) |
1036 (TCPOPT_NOP << 16) |
1037 (TCPOPT_MD5SIG << 8) |
1038 TCPOLEN_MD5SIG);
1039 tcp_v6_do_calc_md5_hash((__u8*)&opt[1],
1040 key,
1041 &skb->nh.ipv6h->daddr,
1042 &skb->nh.ipv6h->saddr,
1043 t1, IPPROTO_TCP,
1044 tot_len);
1045 }
1046#endif
1047
610 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); 1048 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
611 1049
612 memset(&fl, 0, sizeof(fl)); 1050 memset(&fl, 0, sizeof(fl));
@@ -637,15 +1075,37 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
637 kfree_skb(buff); 1075 kfree_skb(buff);
638} 1076}
639 1077
640static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) 1078static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1079 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
641{ 1080{
642 struct tcphdr *th = skb->h.th, *t1; 1081 struct tcphdr *th = skb->h.th, *t1;
643 struct sk_buff *buff; 1082 struct sk_buff *buff;
644 struct flowi fl; 1083 struct flowi fl;
645 int tot_len = sizeof(struct tcphdr); 1084 int tot_len = sizeof(struct tcphdr);
1085 u32 *topt;
1086#ifdef CONFIG_TCP_MD5SIG
1087 struct tcp_md5sig_key *key;
1088 struct tcp_md5sig_key tw_key;
1089#endif
1090
1091#ifdef CONFIG_TCP_MD5SIG
1092 if (!tw && skb->sk) {
1093 key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr);
1094 } else if (tw && tw->tw_md5_keylen) {
1095 tw_key.key = tw->tw_md5_key;
1096 tw_key.keylen = tw->tw_md5_keylen;
1097 key = &tw_key;
1098 } else {
1099 key = NULL;
1100 }
1101#endif
646 1102
647 if (ts) 1103 if (ts)
648 tot_len += TCPOLEN_TSTAMP_ALIGNED; 1104 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1105#ifdef CONFIG_TCP_MD5SIG
1106 if (key)
1107 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1108#endif
649 1109
650 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 1110 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
651 GFP_ATOMIC); 1111 GFP_ATOMIC);
@@ -665,15 +1125,29 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
665 t1->ack_seq = htonl(ack); 1125 t1->ack_seq = htonl(ack);
666 t1->ack = 1; 1126 t1->ack = 1;
667 t1->window = htons(win); 1127 t1->window = htons(win);
1128
1129 topt = (u32*)(t1 + 1);
668 1130
669 if (ts) { 1131 if (ts) {
670 u32 *ptr = (u32*)(t1 + 1); 1132 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
671 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 1133 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
672 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 1134 *topt++ = htonl(tcp_time_stamp);
673 *ptr++ = htonl(tcp_time_stamp); 1135 *topt = htonl(ts);
674 *ptr = htonl(ts);
675 } 1136 }
676 1137
1138#ifdef CONFIG_TCP_MD5SIG
1139 if (key) {
1140 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1141 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1142 tcp_v6_do_calc_md5_hash((__u8 *)topt,
1143 key,
1144 &skb->nh.ipv6h->daddr,
1145 &skb->nh.ipv6h->saddr,
1146 t1, IPPROTO_TCP,
1147 tot_len);
1148 }
1149#endif
1150
677 buff->csum = csum_partial((char *)t1, tot_len, 0); 1151 buff->csum = csum_partial((char *)t1, tot_len, 0);
678 1152
679 memset(&fl, 0, sizeof(fl)); 1153 memset(&fl, 0, sizeof(fl));
@@ -704,9 +1178,9 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
704static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1178static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
705{ 1179{
706 struct inet_timewait_sock *tw = inet_twsk(sk); 1180 struct inet_timewait_sock *tw = inet_twsk(sk);
707 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1181 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
708 1182
709 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1183 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
710 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1184 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
711 tcptw->tw_ts_recent); 1185 tcptw->tw_ts_recent);
712 1186
@@ -715,7 +1189,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
715 1189
716static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1190static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
717{ 1191{
718 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); 1192 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
719} 1193}
720 1194
721 1195
@@ -786,6 +1260,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
786 if (req == NULL) 1260 if (req == NULL)
787 goto drop; 1261 goto drop;
788 1262
1263#ifdef CONFIG_TCP_MD5SIG
1264 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1265#endif
1266
789 tcp_clear_options(&tmp_opt); 1267 tcp_clear_options(&tmp_opt);
790 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1268 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
791 tmp_opt.user_mss = tp->rx_opt.user_mss; 1269 tmp_opt.user_mss = tp->rx_opt.user_mss;
@@ -844,6 +1322,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
844 struct tcp_sock *newtp; 1322 struct tcp_sock *newtp;
845 struct sock *newsk; 1323 struct sock *newsk;
846 struct ipv6_txoptions *opt; 1324 struct ipv6_txoptions *opt;
1325#ifdef CONFIG_TCP_MD5SIG
1326 struct tcp_md5sig_key *key;
1327#endif
847 1328
848 if (skb->protocol == htons(ETH_P_IP)) { 1329 if (skb->protocol == htons(ETH_P_IP)) {
849 /* 1330 /*
@@ -874,6 +1355,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
874 1355
875 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1356 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
876 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1357 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1358#ifdef CONFIG_TCP_MD5SIG
1359 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1360#endif
1361
877 newnp->pktoptions = NULL; 1362 newnp->pktoptions = NULL;
878 newnp->opt = NULL; 1363 newnp->opt = NULL;
879 newnp->mcast_oif = inet6_iif(skb); 1364 newnp->mcast_oif = inet6_iif(skb);
@@ -1008,6 +1493,23 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1008 1493
1009 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1494 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1010 1495
1496#ifdef CONFIG_TCP_MD5SIG
1497 /* Copy over the MD5 key from the original socket */
1498 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1499 /* We're using one, so create a matching key
1500 * on the newsk structure. If we fail to get
1501 * memory, then we end up not copying the key
1502 * across. Shucks.
1503 */
1504 char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
1505 if (newkey) {
1506 memcpy(newkey, key->key, key->keylen);
1507 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1508 newkey, key->keylen);
1509 }
1510 }
1511#endif
1512
1011 __inet6_hash(&tcp_hashinfo, newsk); 1513 __inet6_hash(&tcp_hashinfo, newsk);
1012 inet_inherit_port(&tcp_hashinfo, sk, newsk); 1514 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1013 1515
@@ -1067,6 +1569,11 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1067 if (skb->protocol == htons(ETH_P_IP)) 1569 if (skb->protocol == htons(ETH_P_IP))
1068 return tcp_v4_do_rcv(sk, skb); 1570 return tcp_v4_do_rcv(sk, skb);
1069 1571
1572#ifdef CONFIG_TCP_MD5SIG
1573 if (tcp_v6_inbound_md5_hash (sk, skb))
1574 goto discard;
1575#endif
1576
1070 if (sk_filter(sk, skb)) 1577 if (sk_filter(sk, skb))
1071 goto discard; 1578 goto discard;
1072 1579
@@ -1132,7 +1639,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1132 return 0; 1639 return 0;
1133 1640
1134reset: 1641reset:
1135 tcp_v6_send_reset(skb); 1642 tcp_v6_send_reset(sk, skb);
1136discard: 1643discard:
1137 if (opt_skb) 1644 if (opt_skb)
1138 __kfree_skb(opt_skb); 1645 __kfree_skb(opt_skb);
@@ -1257,7 +1764,7 @@ no_tcp_socket:
1257bad_packet: 1764bad_packet:
1258 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1765 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1259 } else { 1766 } else {
1260 tcp_v6_send_reset(skb); 1767 tcp_v6_send_reset(NULL, skb);
1261 } 1768 }
1262 1769
1263discard_it: 1770discard_it:
@@ -1336,6 +1843,15 @@ static struct inet_connection_sock_af_ops ipv6_specific = {
1336#endif 1843#endif
1337}; 1844};
1338 1845
1846static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1847#ifdef CONFIG_TCP_MD5SIG
1848 .md5_lookup = tcp_v6_md5_lookup,
1849 .calc_md5_hash = tcp_v6_calc_md5_hash,
1850 .md5_add = tcp_v6_md5_add_func,
1851 .md5_parse = tcp_v6_parse_md5_keys,
1852#endif
1853};
1854
1339/* 1855/*
1340 * TCP over IPv4 via INET6 API 1856 * TCP over IPv4 via INET6 API
1341 */ 1857 */
@@ -1358,6 +1874,15 @@ static struct inet_connection_sock_af_ops ipv6_mapped = {
1358#endif 1874#endif
1359}; 1875};
1360 1876
1877static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1878#ifdef CONFIG_TCP_MD5SIG
1879 .md5_lookup = tcp_v4_md5_lookup,
1880 .calc_md5_hash = tcp_v4_calc_md5_hash,
1881 .md5_add = tcp_v6_md5_add_func,
1882 .md5_parse = tcp_v6_parse_md5_keys,
1883#endif
1884};
1885
1361/* NOTE: A lot of things set to zero explicitly by call to 1886/* NOTE: A lot of things set to zero explicitly by call to
1362 * sk_alloc() so need not be done here. 1887 * sk_alloc() so need not be done here.
1363 */ 1888 */
@@ -1397,6 +1922,10 @@ static int tcp_v6_init_sock(struct sock *sk)
1397 sk->sk_write_space = sk_stream_write_space; 1922 sk->sk_write_space = sk_stream_write_space;
1398 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1923 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1399 1924
1925#ifdef CONFIG_TCP_MD5SIG
1926 tp->af_specific = &tcp_sock_ipv6_specific;
1927#endif
1928
1400 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1929 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1401 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1930 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1402 1931
@@ -1407,6 +1936,11 @@ static int tcp_v6_init_sock(struct sock *sk)
1407 1936
1408static int tcp_v6_destroy_sock(struct sock *sk) 1937static int tcp_v6_destroy_sock(struct sock *sk)
1409{ 1938{
1939#ifdef CONFIG_TCP_MD5SIG
1940 /* Clean up the MD5 key list */
1941 if (tcp_sk(sk)->md5sig_info)
1942 tcp_v6_clear_md5_list(sk);
1943#endif
1410 tcp_v4_destroy_sock(sk); 1944 tcp_v4_destroy_sock(sk);
1411 return inet6_destroy_sock(sk); 1945 return inet6_destroy_sock(sk);
1412} 1946}