diff options
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 607 |
1 files changed, 566 insertions, 41 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4c2a7c0cafef..c25e930c2c69 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -66,10 +66,13 @@ | |||
66 | #include <linux/proc_fs.h> | 66 | #include <linux/proc_fs.h> |
67 | #include <linux/seq_file.h> | 67 | #include <linux/seq_file.h> |
68 | 68 | ||
69 | #include <linux/crypto.h> | ||
70 | #include <linux/scatterlist.h> | ||
71 | |||
69 | /* Socket used for sending RSTs and ACKs */ | 72 | /* Socket used for sending RSTs and ACKs */ |
70 | static struct socket *tcp6_socket; | 73 | static struct socket *tcp6_socket; |
71 | 74 | ||
72 | static void tcp_v6_send_reset(struct sk_buff *skb); | 75 | static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); |
73 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); | 76 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); |
74 | static void tcp_v6_send_check(struct sock *sk, int len, | 77 | static void tcp_v6_send_check(struct sock *sk, int len, |
75 | struct sk_buff *skb); | 78 | struct sk_buff *skb); |
@@ -78,6 +81,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); | |||
78 | 81 | ||
79 | static struct inet_connection_sock_af_ops ipv6_mapped; | 82 | static struct inet_connection_sock_af_ops ipv6_mapped; |
80 | static struct inet_connection_sock_af_ops ipv6_specific; | 83 | static struct inet_connection_sock_af_ops ipv6_specific; |
84 | #ifdef CONFIG_TCP_MD5SIG | ||
85 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific; | ||
86 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; | ||
87 | #endif | ||
81 | 88 | ||
82 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | 89 | static int tcp_v6_get_port(struct sock *sk, unsigned short snum) |
83 | { | 90 | { |
@@ -98,27 +105,20 @@ static void tcp_v6_hash(struct sock *sk) | |||
98 | } | 105 | } |
99 | } | 106 | } |
100 | 107 | ||
101 | static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len, | 108 | static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len, |
102 | struct in6_addr *saddr, | 109 | struct in6_addr *saddr, |
103 | struct in6_addr *daddr, | 110 | struct in6_addr *daddr, |
104 | unsigned long base) | 111 | __wsum base) |
105 | { | 112 | { |
106 | return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); | 113 | return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); |
107 | } | 114 | } |
108 | 115 | ||
109 | static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) | 116 | static __u32 tcp_v6_init_sequence(struct sk_buff *skb) |
110 | { | 117 | { |
111 | if (skb->protocol == htons(ETH_P_IPV6)) { | 118 | return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, |
112 | return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, | 119 | skb->nh.ipv6h->saddr.s6_addr32, |
113 | skb->nh.ipv6h->saddr.s6_addr32, | 120 | skb->h.th->dest, |
114 | skb->h.th->dest, | 121 | skb->h.th->source); |
115 | skb->h.th->source); | ||
116 | } else { | ||
117 | return secure_tcp_sequence_number(skb->nh.iph->daddr, | ||
118 | skb->nh.iph->saddr, | ||
119 | skb->h.th->dest, | ||
120 | skb->h.th->source); | ||
121 | } | ||
122 | } | 122 | } |
123 | 123 | ||
124 | static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | 124 | static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, |
@@ -215,6 +215,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
215 | 215 | ||
216 | icsk->icsk_af_ops = &ipv6_mapped; | 216 | icsk->icsk_af_ops = &ipv6_mapped; |
217 | sk->sk_backlog_rcv = tcp_v4_do_rcv; | 217 | sk->sk_backlog_rcv = tcp_v4_do_rcv; |
218 | #ifdef CONFIG_TCP_MD5SIG | ||
219 | tp->af_specific = &tcp_sock_ipv6_mapped_specific; | ||
220 | #endif | ||
218 | 221 | ||
219 | err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); | 222 | err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); |
220 | 223 | ||
@@ -222,6 +225,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
222 | icsk->icsk_ext_hdr_len = exthdrlen; | 225 | icsk->icsk_ext_hdr_len = exthdrlen; |
223 | icsk->icsk_af_ops = &ipv6_specific; | 226 | icsk->icsk_af_ops = &ipv6_specific; |
224 | sk->sk_backlog_rcv = tcp_v6_do_rcv; | 227 | sk->sk_backlog_rcv = tcp_v6_do_rcv; |
228 | #ifdef CONFIG_TCP_MD5SIG | ||
229 | tp->af_specific = &tcp_sock_ipv6_specific; | ||
230 | #endif | ||
225 | goto failure; | 231 | goto failure; |
226 | } else { | 232 | } else { |
227 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), | 233 | ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), |
@@ -310,7 +316,7 @@ failure: | |||
310 | } | 316 | } |
311 | 317 | ||
312 | static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 318 | static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
313 | int type, int code, int offset, __u32 info) | 319 | int type, int code, int offset, __be32 info) |
314 | { | 320 | { |
315 | struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; | 321 | struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; |
316 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); | 322 | const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); |
@@ -509,8 +515,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
509 | 515 | ||
510 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); | 516 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); |
511 | err = ip6_xmit(sk, skb, &fl, opt, 0); | 517 | err = ip6_xmit(sk, skb, &fl, opt, 0); |
512 | if (err == NET_XMIT_CN) | 518 | err = net_xmit_eval(err); |
513 | err = 0; | ||
514 | } | 519 | } |
515 | 520 | ||
516 | done: | 521 | done: |
@@ -526,7 +531,396 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req) | |||
526 | kfree_skb(inet6_rsk(req)->pktopts); | 531 | kfree_skb(inet6_rsk(req)->pktopts); |
527 | } | 532 | } |
528 | 533 | ||
529 | static struct request_sock_ops tcp6_request_sock_ops = { | 534 | #ifdef CONFIG_TCP_MD5SIG |
535 | static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, | ||
536 | struct in6_addr *addr) | ||
537 | { | ||
538 | struct tcp_sock *tp = tcp_sk(sk); | ||
539 | int i; | ||
540 | |||
541 | BUG_ON(tp == NULL); | ||
542 | |||
543 | if (!tp->md5sig_info || !tp->md5sig_info->entries6) | ||
544 | return NULL; | ||
545 | |||
546 | for (i = 0; i < tp->md5sig_info->entries6; i++) { | ||
547 | if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) | ||
548 | return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; | ||
549 | } | ||
550 | return NULL; | ||
551 | } | ||
552 | |||
553 | static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, | ||
554 | struct sock *addr_sk) | ||
555 | { | ||
556 | return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr); | ||
557 | } | ||
558 | |||
559 | static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, | ||
560 | struct request_sock *req) | ||
561 | { | ||
562 | return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); | ||
563 | } | ||
564 | |||
565 | static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, | ||
566 | char *newkey, u8 newkeylen) | ||
567 | { | ||
568 | /* Add key to the list */ | ||
569 | struct tcp6_md5sig_key *key; | ||
570 | struct tcp_sock *tp = tcp_sk(sk); | ||
571 | struct tcp6_md5sig_key *keys; | ||
572 | |||
573 | key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); | ||
574 | if (key) { | ||
575 | /* modify existing entry - just update that one */ | ||
576 | kfree(key->key); | ||
577 | key->key = newkey; | ||
578 | key->keylen = newkeylen; | ||
579 | } else { | ||
580 | /* reallocate new list if current one is full. */ | ||
581 | if (!tp->md5sig_info) { | ||
582 | tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); | ||
583 | if (!tp->md5sig_info) { | ||
584 | kfree(newkey); | ||
585 | return -ENOMEM; | ||
586 | } | ||
587 | } | ||
588 | tcp_alloc_md5sig_pool(); | ||
589 | if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { | ||
590 | keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * | ||
591 | (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); | ||
592 | |||
593 | if (!keys) { | ||
594 | tcp_free_md5sig_pool(); | ||
595 | kfree(newkey); | ||
596 | return -ENOMEM; | ||
597 | } | ||
598 | |||
599 | if (tp->md5sig_info->entries6) | ||
600 | memmove(keys, tp->md5sig_info->keys6, | ||
601 | (sizeof (tp->md5sig_info->keys6[0]) * | ||
602 | tp->md5sig_info->entries6)); | ||
603 | |||
604 | kfree(tp->md5sig_info->keys6); | ||
605 | tp->md5sig_info->keys6 = keys; | ||
606 | tp->md5sig_info->alloced6++; | ||
607 | } | ||
608 | |||
609 | ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, | ||
610 | peer); | ||
611 | tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; | ||
612 | tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; | ||
613 | |||
614 | tp->md5sig_info->entries6++; | ||
615 | } | ||
616 | return 0; | ||
617 | } | ||
618 | |||
619 | static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk, | ||
620 | u8 *newkey, __u8 newkeylen) | ||
621 | { | ||
622 | return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr, | ||
623 | newkey, newkeylen); | ||
624 | } | ||
625 | |||
626 | static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) | ||
627 | { | ||
628 | struct tcp_sock *tp = tcp_sk(sk); | ||
629 | int i; | ||
630 | |||
631 | for (i = 0; i < tp->md5sig_info->entries6; i++) { | ||
632 | if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { | ||
633 | /* Free the key */ | ||
634 | kfree(tp->md5sig_info->keys6[i].key); | ||
635 | tp->md5sig_info->entries6--; | ||
636 | |||
637 | if (tp->md5sig_info->entries6 == 0) { | ||
638 | kfree(tp->md5sig_info->keys6); | ||
639 | tp->md5sig_info->keys6 = NULL; | ||
640 | |||
641 | tcp_free_md5sig_pool(); | ||
642 | |||
643 | return 0; | ||
644 | } else { | ||
645 | /* shrink the database */ | ||
646 | if (tp->md5sig_info->entries6 != i) | ||
647 | memmove(&tp->md5sig_info->keys6[i], | ||
648 | &tp->md5sig_info->keys6[i+1], | ||
649 | (tp->md5sig_info->entries6 - i) | ||
650 | * sizeof (tp->md5sig_info->keys6[0])); | ||
651 | } | ||
652 | } | ||
653 | } | ||
654 | return -ENOENT; | ||
655 | } | ||
656 | |||
657 | static void tcp_v6_clear_md5_list (struct sock *sk) | ||
658 | { | ||
659 | struct tcp_sock *tp = tcp_sk(sk); | ||
660 | int i; | ||
661 | |||
662 | if (tp->md5sig_info->entries6) { | ||
663 | for (i = 0; i < tp->md5sig_info->entries6; i++) | ||
664 | kfree(tp->md5sig_info->keys6[i].key); | ||
665 | tp->md5sig_info->entries6 = 0; | ||
666 | tcp_free_md5sig_pool(); | ||
667 | } | ||
668 | |||
669 | kfree(tp->md5sig_info->keys6); | ||
670 | tp->md5sig_info->keys6 = NULL; | ||
671 | tp->md5sig_info->alloced6 = 0; | ||
672 | |||
673 | if (tp->md5sig_info->entries4) { | ||
674 | for (i = 0; i < tp->md5sig_info->entries4; i++) | ||
675 | kfree(tp->md5sig_info->keys4[i].key); | ||
676 | tp->md5sig_info->entries4 = 0; | ||
677 | tcp_free_md5sig_pool(); | ||
678 | } | ||
679 | |||
680 | kfree(tp->md5sig_info->keys4); | ||
681 | tp->md5sig_info->keys4 = NULL; | ||
682 | tp->md5sig_info->alloced4 = 0; | ||
683 | } | ||
684 | |||
685 | static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, | ||
686 | int optlen) | ||
687 | { | ||
688 | struct tcp_md5sig cmd; | ||
689 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; | ||
690 | u8 *newkey; | ||
691 | |||
692 | if (optlen < sizeof(cmd)) | ||
693 | return -EINVAL; | ||
694 | |||
695 | if (copy_from_user(&cmd, optval, sizeof(cmd))) | ||
696 | return -EFAULT; | ||
697 | |||
698 | if (sin6->sin6_family != AF_INET6) | ||
699 | return -EINVAL; | ||
700 | |||
701 | if (!cmd.tcpm_keylen) { | ||
702 | if (!tcp_sk(sk)->md5sig_info) | ||
703 | return -ENOENT; | ||
704 | if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) | ||
705 | return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); | ||
706 | return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); | ||
707 | } | ||
708 | |||
709 | if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) | ||
710 | return -EINVAL; | ||
711 | |||
712 | if (!tcp_sk(sk)->md5sig_info) { | ||
713 | struct tcp_sock *tp = tcp_sk(sk); | ||
714 | struct tcp_md5sig_info *p; | ||
715 | |||
716 | p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); | ||
717 | if (!p) | ||
718 | return -ENOMEM; | ||
719 | |||
720 | tp->md5sig_info = p; | ||
721 | } | ||
722 | |||
723 | newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); | ||
724 | if (!newkey) | ||
725 | return -ENOMEM; | ||
726 | if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) { | ||
727 | return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3], | ||
728 | newkey, cmd.tcpm_keylen); | ||
729 | } | ||
730 | return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen); | ||
731 | } | ||
732 | |||
733 | static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
734 | struct in6_addr *saddr, | ||
735 | struct in6_addr *daddr, | ||
736 | struct tcphdr *th, int protocol, | ||
737 | int tcplen) | ||
738 | { | ||
739 | struct scatterlist sg[4]; | ||
740 | __u16 data_len; | ||
741 | int block = 0; | ||
742 | __sum16 cksum; | ||
743 | struct tcp_md5sig_pool *hp; | ||
744 | struct tcp6_pseudohdr *bp; | ||
745 | struct hash_desc *desc; | ||
746 | int err; | ||
747 | unsigned int nbytes = 0; | ||
748 | |||
749 | hp = tcp_get_md5sig_pool(); | ||
750 | if (!hp) { | ||
751 | printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__); | ||
752 | goto clear_hash_noput; | ||
753 | } | ||
754 | bp = &hp->md5_blk.ip6; | ||
755 | desc = &hp->md5_desc; | ||
756 | |||
757 | /* 1. TCP pseudo-header (RFC2460) */ | ||
758 | ipv6_addr_copy(&bp->saddr, saddr); | ||
759 | ipv6_addr_copy(&bp->daddr, daddr); | ||
760 | bp->len = htonl(tcplen); | ||
761 | bp->protocol = htonl(protocol); | ||
762 | |||
763 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | ||
764 | nbytes += sizeof(*bp); | ||
765 | |||
766 | /* 2. TCP header, excluding options */ | ||
767 | cksum = th->check; | ||
768 | th->check = 0; | ||
769 | sg_set_buf(&sg[block++], th, sizeof(*th)); | ||
770 | nbytes += sizeof(*th); | ||
771 | |||
772 | /* 3. TCP segment data (if any) */ | ||
773 | data_len = tcplen - (th->doff << 2); | ||
774 | if (data_len > 0) { | ||
775 | u8 *data = (u8 *)th + (th->doff << 2); | ||
776 | sg_set_buf(&sg[block++], data, data_len); | ||
777 | nbytes += data_len; | ||
778 | } | ||
779 | |||
780 | /* 4. shared key */ | ||
781 | sg_set_buf(&sg[block++], key->key, key->keylen); | ||
782 | nbytes += key->keylen; | ||
783 | |||
784 | /* Now store the hash into the packet */ | ||
785 | err = crypto_hash_init(desc); | ||
786 | if (err) { | ||
787 | printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__); | ||
788 | goto clear_hash; | ||
789 | } | ||
790 | err = crypto_hash_update(desc, sg, nbytes); | ||
791 | if (err) { | ||
792 | printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__); | ||
793 | goto clear_hash; | ||
794 | } | ||
795 | err = crypto_hash_final(desc, md5_hash); | ||
796 | if (err) { | ||
797 | printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__); | ||
798 | goto clear_hash; | ||
799 | } | ||
800 | |||
801 | /* Reset header, and free up the crypto */ | ||
802 | tcp_put_md5sig_pool(); | ||
803 | th->check = cksum; | ||
804 | out: | ||
805 | return 0; | ||
806 | clear_hash: | ||
807 | tcp_put_md5sig_pool(); | ||
808 | clear_hash_noput: | ||
809 | memset(md5_hash, 0, 16); | ||
810 | goto out; | ||
811 | } | ||
812 | |||
813 | static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
814 | struct sock *sk, | ||
815 | struct dst_entry *dst, | ||
816 | struct request_sock *req, | ||
817 | struct tcphdr *th, int protocol, | ||
818 | int tcplen) | ||
819 | { | ||
820 | struct in6_addr *saddr, *daddr; | ||
821 | |||
822 | if (sk) { | ||
823 | saddr = &inet6_sk(sk)->saddr; | ||
824 | daddr = &inet6_sk(sk)->daddr; | ||
825 | } else { | ||
826 | saddr = &inet6_rsk(req)->loc_addr; | ||
827 | daddr = &inet6_rsk(req)->rmt_addr; | ||
828 | } | ||
829 | return tcp_v6_do_calc_md5_hash(md5_hash, key, | ||
830 | saddr, daddr, | ||
831 | th, protocol, tcplen); | ||
832 | } | ||
833 | |||
834 | static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) | ||
835 | { | ||
836 | __u8 *hash_location = NULL; | ||
837 | struct tcp_md5sig_key *hash_expected; | ||
838 | struct ipv6hdr *ip6h = skb->nh.ipv6h; | ||
839 | struct tcphdr *th = skb->h.th; | ||
840 | int length = (th->doff << 2) - sizeof (*th); | ||
841 | int genhash; | ||
842 | u8 *ptr; | ||
843 | u8 newhash[16]; | ||
844 | |||
845 | hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); | ||
846 | |||
847 | /* If the TCP option is too short, we can short cut */ | ||
848 | if (length < TCPOLEN_MD5SIG) | ||
849 | return hash_expected ? 1 : 0; | ||
850 | |||
851 | /* parse options */ | ||
852 | ptr = (u8*)(th + 1); | ||
853 | while (length > 0) { | ||
854 | int opcode = *ptr++; | ||
855 | int opsize; | ||
856 | |||
857 | switch(opcode) { | ||
858 | case TCPOPT_EOL: | ||
859 | goto done_opts; | ||
860 | case TCPOPT_NOP: | ||
861 | length--; | ||
862 | continue; | ||
863 | default: | ||
864 | opsize = *ptr++; | ||
865 | if (opsize < 2 || opsize > length) | ||
866 | goto done_opts; | ||
867 | if (opcode == TCPOPT_MD5SIG) { | ||
868 | hash_location = ptr; | ||
869 | goto done_opts; | ||
870 | } | ||
871 | } | ||
872 | ptr += opsize - 2; | ||
873 | length -= opsize; | ||
874 | } | ||
875 | |||
876 | done_opts: | ||
877 | /* do we have a hash as expected? */ | ||
878 | if (!hash_expected) { | ||
879 | if (!hash_location) | ||
880 | return 0; | ||
881 | if (net_ratelimit()) { | ||
882 | printk(KERN_INFO "MD5 Hash NOT expected but found " | ||
883 | "(" NIP6_FMT ", %u)->" | ||
884 | "(" NIP6_FMT ", %u)\n", | ||
885 | NIP6(ip6h->saddr), ntohs(th->source), | ||
886 | NIP6(ip6h->daddr), ntohs(th->dest)); | ||
887 | } | ||
888 | return 1; | ||
889 | } | ||
890 | |||
891 | if (!hash_location) { | ||
892 | if (net_ratelimit()) { | ||
893 | printk(KERN_INFO "MD5 Hash expected but NOT found " | ||
894 | "(" NIP6_FMT ", %u)->" | ||
895 | "(" NIP6_FMT ", %u)\n", | ||
896 | NIP6(ip6h->saddr), ntohs(th->source), | ||
897 | NIP6(ip6h->daddr), ntohs(th->dest)); | ||
898 | } | ||
899 | return 1; | ||
900 | } | ||
901 | |||
902 | /* check the signature */ | ||
903 | genhash = tcp_v6_do_calc_md5_hash(newhash, | ||
904 | hash_expected, | ||
905 | &ip6h->saddr, &ip6h->daddr, | ||
906 | th, sk->sk_protocol, | ||
907 | skb->len); | ||
908 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | ||
909 | if (net_ratelimit()) { | ||
910 | printk(KERN_INFO "MD5 Hash %s for " | ||
911 | "(" NIP6_FMT ", %u)->" | ||
912 | "(" NIP6_FMT ", %u)\n", | ||
913 | genhash ? "failed" : "mismatch", | ||
914 | NIP6(ip6h->saddr), ntohs(th->source), | ||
915 | NIP6(ip6h->daddr), ntohs(th->dest)); | ||
916 | } | ||
917 | return 1; | ||
918 | } | ||
919 | return 0; | ||
920 | } | ||
921 | #endif | ||
922 | |||
923 | static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { | ||
530 | .family = AF_INET6, | 924 | .family = AF_INET6, |
531 | .obj_size = sizeof(struct tcp6_request_sock), | 925 | .obj_size = sizeof(struct tcp6_request_sock), |
532 | .rtx_syn_ack = tcp_v6_send_synack, | 926 | .rtx_syn_ack = tcp_v6_send_synack, |
@@ -535,9 +929,16 @@ static struct request_sock_ops tcp6_request_sock_ops = { | |||
535 | .send_reset = tcp_v6_send_reset | 929 | .send_reset = tcp_v6_send_reset |
536 | }; | 930 | }; |
537 | 931 | ||
932 | #ifdef CONFIG_TCP_MD5SIG | ||
933 | static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { | ||
934 | .md5_lookup = tcp_v6_reqsk_md5_lookup, | ||
935 | }; | ||
936 | #endif | ||
937 | |||
538 | static struct timewait_sock_ops tcp6_timewait_sock_ops = { | 938 | static struct timewait_sock_ops tcp6_timewait_sock_ops = { |
539 | .twsk_obj_size = sizeof(struct tcp6_timewait_sock), | 939 | .twsk_obj_size = sizeof(struct tcp6_timewait_sock), |
540 | .twsk_unique = tcp_twsk_unique, | 940 | .twsk_unique = tcp_twsk_unique, |
941 | .twsk_destructor= tcp_twsk_destructor, | ||
541 | }; | 942 | }; |
542 | 943 | ||
543 | static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | 944 | static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) |
@@ -547,7 +948,7 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) | |||
547 | 948 | ||
548 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 949 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
549 | th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); | 950 | th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); |
550 | skb->csum = offsetof(struct tcphdr, check); | 951 | skb->csum_offset = offsetof(struct tcphdr, check); |
551 | } else { | 952 | } else { |
552 | th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, | 953 | th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, |
553 | csum_partial((char *)th, th->doff<<2, | 954 | csum_partial((char *)th, th->doff<<2, |
@@ -569,16 +970,20 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) | |||
569 | th->check = 0; | 970 | th->check = 0; |
570 | th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, | 971 | th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, |
571 | IPPROTO_TCP, 0); | 972 | IPPROTO_TCP, 0); |
572 | skb->csum = offsetof(struct tcphdr, check); | 973 | skb->csum_offset = offsetof(struct tcphdr, check); |
573 | skb->ip_summed = CHECKSUM_PARTIAL; | 974 | skb->ip_summed = CHECKSUM_PARTIAL; |
574 | return 0; | 975 | return 0; |
575 | } | 976 | } |
576 | 977 | ||
577 | static void tcp_v6_send_reset(struct sk_buff *skb) | 978 | static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) |
578 | { | 979 | { |
579 | struct tcphdr *th = skb->h.th, *t1; | 980 | struct tcphdr *th = skb->h.th, *t1; |
580 | struct sk_buff *buff; | 981 | struct sk_buff *buff; |
581 | struct flowi fl; | 982 | struct flowi fl; |
983 | int tot_len = sizeof(*th); | ||
984 | #ifdef CONFIG_TCP_MD5SIG | ||
985 | struct tcp_md5sig_key *key; | ||
986 | #endif | ||
582 | 987 | ||
583 | if (th->rst) | 988 | if (th->rst) |
584 | return; | 989 | return; |
@@ -586,25 +991,35 @@ static void tcp_v6_send_reset(struct sk_buff *skb) | |||
586 | if (!ipv6_unicast_destination(skb)) | 991 | if (!ipv6_unicast_destination(skb)) |
587 | return; | 992 | return; |
588 | 993 | ||
994 | #ifdef CONFIG_TCP_MD5SIG | ||
995 | if (sk) | ||
996 | key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr); | ||
997 | else | ||
998 | key = NULL; | ||
999 | |||
1000 | if (key) | ||
1001 | tot_len += TCPOLEN_MD5SIG_ALIGNED; | ||
1002 | #endif | ||
1003 | |||
589 | /* | 1004 | /* |
590 | * We need to grab some memory, and put together an RST, | 1005 | * We need to grab some memory, and put together an RST, |
591 | * and then put it into the queue to be sent. | 1006 | * and then put it into the queue to be sent. |
592 | */ | 1007 | */ |
593 | 1008 | ||
594 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr), | 1009 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, |
595 | GFP_ATOMIC); | 1010 | GFP_ATOMIC); |
596 | if (buff == NULL) | 1011 | if (buff == NULL) |
597 | return; | 1012 | return; |
598 | 1013 | ||
599 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); | 1014 | skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); |
600 | 1015 | ||
601 | t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr)); | 1016 | t1 = (struct tcphdr *) skb_push(buff, tot_len); |
602 | 1017 | ||
603 | /* Swap the send and the receive. */ | 1018 | /* Swap the send and the receive. */ |
604 | memset(t1, 0, sizeof(*t1)); | 1019 | memset(t1, 0, sizeof(*t1)); |
605 | t1->dest = th->source; | 1020 | t1->dest = th->source; |
606 | t1->source = th->dest; | 1021 | t1->source = th->dest; |
607 | t1->doff = sizeof(*t1)/4; | 1022 | t1->doff = tot_len / 4; |
608 | t1->rst = 1; | 1023 | t1->rst = 1; |
609 | 1024 | ||
610 | if(th->ack) { | 1025 | if(th->ack) { |
@@ -615,6 +1030,22 @@ static void tcp_v6_send_reset(struct sk_buff *skb) | |||
615 | + skb->len - (th->doff<<2)); | 1030 | + skb->len - (th->doff<<2)); |
616 | } | 1031 | } |
617 | 1032 | ||
1033 | #ifdef CONFIG_TCP_MD5SIG | ||
1034 | if (key) { | ||
1035 | __be32 *opt = (__be32*)(t1 + 1); | ||
1036 | opt[0] = htonl((TCPOPT_NOP << 24) | | ||
1037 | (TCPOPT_NOP << 16) | | ||
1038 | (TCPOPT_MD5SIG << 8) | | ||
1039 | TCPOLEN_MD5SIG); | ||
1040 | tcp_v6_do_calc_md5_hash((__u8*)&opt[1], | ||
1041 | key, | ||
1042 | &skb->nh.ipv6h->daddr, | ||
1043 | &skb->nh.ipv6h->saddr, | ||
1044 | t1, IPPROTO_TCP, | ||
1045 | tot_len); | ||
1046 | } | ||
1047 | #endif | ||
1048 | |||
618 | buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); | 1049 | buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); |
619 | 1050 | ||
620 | memset(&fl, 0, sizeof(fl)); | 1051 | memset(&fl, 0, sizeof(fl)); |
@@ -645,15 +1076,37 @@ static void tcp_v6_send_reset(struct sk_buff *skb) | |||
645 | kfree_skb(buff); | 1076 | kfree_skb(buff); |
646 | } | 1077 | } |
647 | 1078 | ||
648 | static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) | 1079 | static void tcp_v6_send_ack(struct tcp_timewait_sock *tw, |
1080 | struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) | ||
649 | { | 1081 | { |
650 | struct tcphdr *th = skb->h.th, *t1; | 1082 | struct tcphdr *th = skb->h.th, *t1; |
651 | struct sk_buff *buff; | 1083 | struct sk_buff *buff; |
652 | struct flowi fl; | 1084 | struct flowi fl; |
653 | int tot_len = sizeof(struct tcphdr); | 1085 | int tot_len = sizeof(struct tcphdr); |
1086 | __be32 *topt; | ||
1087 | #ifdef CONFIG_TCP_MD5SIG | ||
1088 | struct tcp_md5sig_key *key; | ||
1089 | struct tcp_md5sig_key tw_key; | ||
1090 | #endif | ||
1091 | |||
1092 | #ifdef CONFIG_TCP_MD5SIG | ||
1093 | if (!tw && skb->sk) { | ||
1094 | key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr); | ||
1095 | } else if (tw && tw->tw_md5_keylen) { | ||
1096 | tw_key.key = tw->tw_md5_key; | ||
1097 | tw_key.keylen = tw->tw_md5_keylen; | ||
1098 | key = &tw_key; | ||
1099 | } else { | ||
1100 | key = NULL; | ||
1101 | } | ||
1102 | #endif | ||
654 | 1103 | ||
655 | if (ts) | 1104 | if (ts) |
656 | tot_len += TCPOLEN_TSTAMP_ALIGNED; | 1105 | tot_len += TCPOLEN_TSTAMP_ALIGNED; |
1106 | #ifdef CONFIG_TCP_MD5SIG | ||
1107 | if (key) | ||
1108 | tot_len += TCPOLEN_MD5SIG_ALIGNED; | ||
1109 | #endif | ||
657 | 1110 | ||
658 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, | 1111 | buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, |
659 | GFP_ATOMIC); | 1112 | GFP_ATOMIC); |
@@ -673,15 +1126,29 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 | |||
673 | t1->ack_seq = htonl(ack); | 1126 | t1->ack_seq = htonl(ack); |
674 | t1->ack = 1; | 1127 | t1->ack = 1; |
675 | t1->window = htons(win); | 1128 | t1->window = htons(win); |
1129 | |||
1130 | topt = (__be32 *)(t1 + 1); | ||
676 | 1131 | ||
677 | if (ts) { | 1132 | if (ts) { |
678 | u32 *ptr = (u32*)(t1 + 1); | 1133 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | |
679 | *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | 1134 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); |
680 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); | 1135 | *topt++ = htonl(tcp_time_stamp); |
681 | *ptr++ = htonl(tcp_time_stamp); | 1136 | *topt = htonl(ts); |
682 | *ptr = htonl(ts); | ||
683 | } | 1137 | } |
684 | 1138 | ||
1139 | #ifdef CONFIG_TCP_MD5SIG | ||
1140 | if (key) { | ||
1141 | *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | ||
1142 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); | ||
1143 | tcp_v6_do_calc_md5_hash((__u8 *)topt, | ||
1144 | key, | ||
1145 | &skb->nh.ipv6h->daddr, | ||
1146 | &skb->nh.ipv6h->saddr, | ||
1147 | t1, IPPROTO_TCP, | ||
1148 | tot_len); | ||
1149 | } | ||
1150 | #endif | ||
1151 | |||
685 | buff->csum = csum_partial((char *)t1, tot_len, 0); | 1152 | buff->csum = csum_partial((char *)t1, tot_len, 0); |
686 | 1153 | ||
687 | memset(&fl, 0, sizeof(fl)); | 1154 | memset(&fl, 0, sizeof(fl)); |
@@ -712,9 +1179,9 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 | |||
712 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | 1179 | static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) |
713 | { | 1180 | { |
714 | struct inet_timewait_sock *tw = inet_twsk(sk); | 1181 | struct inet_timewait_sock *tw = inet_twsk(sk); |
715 | const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | 1182 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); |
716 | 1183 | ||
717 | tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | 1184 | tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
718 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, | 1185 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, |
719 | tcptw->tw_ts_recent); | 1186 | tcptw->tw_ts_recent); |
720 | 1187 | ||
@@ -723,7 +1190,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
723 | 1190 | ||
724 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) | 1191 | static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) |
725 | { | 1192 | { |
726 | tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); | 1193 | tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); |
727 | } | 1194 | } |
728 | 1195 | ||
729 | 1196 | ||
@@ -794,6 +1261,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
794 | if (req == NULL) | 1261 | if (req == NULL) |
795 | goto drop; | 1262 | goto drop; |
796 | 1263 | ||
1264 | #ifdef CONFIG_TCP_MD5SIG | ||
1265 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops; | ||
1266 | #endif | ||
1267 | |||
797 | tcp_clear_options(&tmp_opt); | 1268 | tcp_clear_options(&tmp_opt); |
798 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 1269 | tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
799 | tmp_opt.user_mss = tp->rx_opt.user_mss; | 1270 | tmp_opt.user_mss = tp->rx_opt.user_mss; |
@@ -822,7 +1293,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
822 | treq->iif = inet6_iif(skb); | 1293 | treq->iif = inet6_iif(skb); |
823 | 1294 | ||
824 | if (isn == 0) | 1295 | if (isn == 0) |
825 | isn = tcp_v6_init_sequence(sk,skb); | 1296 | isn = tcp_v6_init_sequence(skb); |
826 | 1297 | ||
827 | tcp_rsk(req)->snt_isn = isn; | 1298 | tcp_rsk(req)->snt_isn = isn; |
828 | 1299 | ||
@@ -852,6 +1323,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
852 | struct tcp_sock *newtp; | 1323 | struct tcp_sock *newtp; |
853 | struct sock *newsk; | 1324 | struct sock *newsk; |
854 | struct ipv6_txoptions *opt; | 1325 | struct ipv6_txoptions *opt; |
1326 | #ifdef CONFIG_TCP_MD5SIG | ||
1327 | struct tcp_md5sig_key *key; | ||
1328 | #endif | ||
855 | 1329 | ||
856 | if (skb->protocol == htons(ETH_P_IP)) { | 1330 | if (skb->protocol == htons(ETH_P_IP)) { |
857 | /* | 1331 | /* |
@@ -882,6 +1356,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
882 | 1356 | ||
883 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; | 1357 | inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; |
884 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; | 1358 | newsk->sk_backlog_rcv = tcp_v4_do_rcv; |
1359 | #ifdef CONFIG_TCP_MD5SIG | ||
1360 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; | ||
1361 | #endif | ||
1362 | |||
885 | newnp->pktoptions = NULL; | 1363 | newnp->pktoptions = NULL; |
886 | newnp->opt = NULL; | 1364 | newnp->opt = NULL; |
887 | newnp->mcast_oif = inet6_iif(skb); | 1365 | newnp->mcast_oif = inet6_iif(skb); |
@@ -1016,6 +1494,21 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1016 | 1494 | ||
1017 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; | 1495 | newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; |
1018 | 1496 | ||
1497 | #ifdef CONFIG_TCP_MD5SIG | ||
1498 | /* Copy over the MD5 key from the original socket */ | ||
1499 | if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { | ||
1500 | /* We're using one, so create a matching key | ||
1501 | * on the newsk structure. If we fail to get | ||
1502 | * memory, then we end up not copying the key | ||
1503 | * across. Shucks. | ||
1504 | */ | ||
1505 | char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); | ||
1506 | if (newkey != NULL) | ||
1507 | tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr, | ||
1508 | newkey, key->keylen); | ||
1509 | } | ||
1510 | #endif | ||
1511 | |||
1019 | __inet6_hash(&tcp_hashinfo, newsk); | 1512 | __inet6_hash(&tcp_hashinfo, newsk); |
1020 | inet_inherit_port(&tcp_hashinfo, sk, newsk); | 1513 | inet_inherit_port(&tcp_hashinfo, sk, newsk); |
1021 | 1514 | ||
@@ -1031,7 +1524,7 @@ out: | |||
1031 | return NULL; | 1524 | return NULL; |
1032 | } | 1525 | } |
1033 | 1526 | ||
1034 | static int tcp_v6_checksum_init(struct sk_buff *skb) | 1527 | static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) |
1035 | { | 1528 | { |
1036 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 1529 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
1037 | if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, | 1530 | if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, |
@@ -1041,8 +1534,8 @@ static int tcp_v6_checksum_init(struct sk_buff *skb) | |||
1041 | } | 1534 | } |
1042 | } | 1535 | } |
1043 | 1536 | ||
1044 | skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, | 1537 | skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, |
1045 | &skb->nh.ipv6h->daddr, 0); | 1538 | &skb->nh.ipv6h->daddr, 0)); |
1046 | 1539 | ||
1047 | if (skb->len <= 76) { | 1540 | if (skb->len <= 76) { |
1048 | return __skb_checksum_complete(skb); | 1541 | return __skb_checksum_complete(skb); |
@@ -1075,6 +1568,11 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1075 | if (skb->protocol == htons(ETH_P_IP)) | 1568 | if (skb->protocol == htons(ETH_P_IP)) |
1076 | return tcp_v4_do_rcv(sk, skb); | 1569 | return tcp_v4_do_rcv(sk, skb); |
1077 | 1570 | ||
1571 | #ifdef CONFIG_TCP_MD5SIG | ||
1572 | if (tcp_v6_inbound_md5_hash (sk, skb)) | ||
1573 | goto discard; | ||
1574 | #endif | ||
1575 | |||
1078 | if (sk_filter(sk, skb)) | 1576 | if (sk_filter(sk, skb)) |
1079 | goto discard; | 1577 | goto discard; |
1080 | 1578 | ||
@@ -1140,7 +1638,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1140 | return 0; | 1638 | return 0; |
1141 | 1639 | ||
1142 | reset: | 1640 | reset: |
1143 | tcp_v6_send_reset(skb); | 1641 | tcp_v6_send_reset(sk, skb); |
1144 | discard: | 1642 | discard: |
1145 | if (opt_skb) | 1643 | if (opt_skb) |
1146 | __kfree_skb(opt_skb); | 1644 | __kfree_skb(opt_skb); |
@@ -1265,7 +1763,7 @@ no_tcp_socket: | |||
1265 | bad_packet: | 1763 | bad_packet: |
1266 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1764 | TCP_INC_STATS_BH(TCP_MIB_INERRS); |
1267 | } else { | 1765 | } else { |
1268 | tcp_v6_send_reset(skb); | 1766 | tcp_v6_send_reset(NULL, skb); |
1269 | } | 1767 | } |
1270 | 1768 | ||
1271 | discard_it: | 1769 | discard_it: |
@@ -1344,6 +1842,15 @@ static struct inet_connection_sock_af_ops ipv6_specific = { | |||
1344 | #endif | 1842 | #endif |
1345 | }; | 1843 | }; |
1346 | 1844 | ||
1845 | #ifdef CONFIG_TCP_MD5SIG | ||
1846 | static struct tcp_sock_af_ops tcp_sock_ipv6_specific = { | ||
1847 | .md5_lookup = tcp_v6_md5_lookup, | ||
1848 | .calc_md5_hash = tcp_v6_calc_md5_hash, | ||
1849 | .md5_add = tcp_v6_md5_add_func, | ||
1850 | .md5_parse = tcp_v6_parse_md5_keys, | ||
1851 | }; | ||
1852 | #endif | ||
1853 | |||
1347 | /* | 1854 | /* |
1348 | * TCP over IPv4 via INET6 API | 1855 | * TCP over IPv4 via INET6 API |
1349 | */ | 1856 | */ |
@@ -1366,6 +1873,15 @@ static struct inet_connection_sock_af_ops ipv6_mapped = { | |||
1366 | #endif | 1873 | #endif |
1367 | }; | 1874 | }; |
1368 | 1875 | ||
1876 | #ifdef CONFIG_TCP_MD5SIG | ||
1877 | static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { | ||
1878 | .md5_lookup = tcp_v4_md5_lookup, | ||
1879 | .calc_md5_hash = tcp_v4_calc_md5_hash, | ||
1880 | .md5_add = tcp_v6_md5_add_func, | ||
1881 | .md5_parse = tcp_v6_parse_md5_keys, | ||
1882 | }; | ||
1883 | #endif | ||
1884 | |||
1369 | /* NOTE: A lot of things set to zero explicitly by call to | 1885 | /* NOTE: A lot of things set to zero explicitly by call to |
1370 | * sk_alloc() so need not be done here. | 1886 | * sk_alloc() so need not be done here. |
1371 | */ | 1887 | */ |
@@ -1405,6 +1921,10 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
1405 | sk->sk_write_space = sk_stream_write_space; | 1921 | sk->sk_write_space = sk_stream_write_space; |
1406 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 1922 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
1407 | 1923 | ||
1924 | #ifdef CONFIG_TCP_MD5SIG | ||
1925 | tp->af_specific = &tcp_sock_ipv6_specific; | ||
1926 | #endif | ||
1927 | |||
1408 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | 1928 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
1409 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 1929 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
1410 | 1930 | ||
@@ -1415,6 +1935,11 @@ static int tcp_v6_init_sock(struct sock *sk) | |||
1415 | 1935 | ||
1416 | static int tcp_v6_destroy_sock(struct sock *sk) | 1936 | static int tcp_v6_destroy_sock(struct sock *sk) |
1417 | { | 1937 | { |
1938 | #ifdef CONFIG_TCP_MD5SIG | ||
1939 | /* Clean up the MD5 key list */ | ||
1940 | if (tcp_sk(sk)->md5sig_info) | ||
1941 | tcp_v6_clear_md5_list(sk); | ||
1942 | #endif | ||
1418 | tcp_v4_destroy_sock(sk); | 1943 | tcp_v4_destroy_sock(sk); |
1419 | return inet6_destroy_sock(sk); | 1944 | return inet6_destroy_sock(sk); |
1420 | } | 1945 | } |