aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2006-11-14 22:07:45 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:22:39 -0500
commitcfb6eeb4c860592edd123fdea908d23c6ad1c7dc (patch)
tree361c073622faa540ef6602ef1b0a6e8c0a17fc60 /net
parentbf6bce71eae386dbc37f93af7e5ad173450d9945 (diff)
[TCP]: MD5 Signature Option (RFC2385) support.
Based on implementation by Rick Payne. Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/ipv4/Kconfig16
-rw-r--r--net/ipv4/tcp.c137
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c673
-rw-r--r--net/ipv4/tcp_minisocks.c64
-rw-r--r--net/ipv4/tcp_output.c111
-rw-r--r--net/ipv6/tcp_ipv6.c568
10 files changed, 1531 insertions, 60 deletions
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 34d6d197c3b2..35985334daee 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -509,7 +509,7 @@ out:
509 return err; 509 return err;
510} 510}
511 511
512static void dccp_v4_ctl_send_reset(struct sk_buff *rxskb) 512static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
513{ 513{
514 int err; 514 int err;
515 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; 515 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
@@ -724,7 +724,7 @@ int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
724 return 0; 724 return 0;
725 725
726reset: 726reset:
727 dccp_v4_ctl_send_reset(skb); 727 dccp_v4_ctl_send_reset(sk, skb);
728discard: 728discard:
729 kfree_skb(skb); 729 kfree_skb(skb);
730 return 0; 730 return 0;
@@ -913,7 +913,7 @@ no_dccp_socket:
913 if (dh->dccph_type != DCCP_PKT_RESET) { 913 if (dh->dccph_type != DCCP_PKT_RESET) {
914 DCCP_SKB_CB(skb)->dccpd_reset_code = 914 DCCP_SKB_CB(skb)->dccpd_reset_code =
915 DCCP_RESET_CODE_NO_CONNECTION; 915 DCCP_RESET_CODE_NO_CONNECTION;
916 dccp_v4_ctl_send_reset(skb); 916 dccp_v4_ctl_send_reset(sk, skb);
917 } 917 }
918 918
919discard_it: 919discard_it:
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index fc326173c215..e0a0607862ef 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -310,7 +310,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
310 kfree_skb(inet6_rsk(req)->pktopts); 310 kfree_skb(inet6_rsk(req)->pktopts);
311} 311}
312 312
313static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) 313static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
314{ 314{
315 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; 315 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
316 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + 316 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
@@ -805,7 +805,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
805 return 0; 805 return 0;
806 806
807reset: 807reset:
808 dccp_v6_ctl_send_reset(skb); 808 dccp_v6_ctl_send_reset(sk, skb);
809discard: 809discard:
810 if (opt_skb != NULL) 810 if (opt_skb != NULL)
811 __kfree_skb(opt_skb); 811 __kfree_skb(opt_skb);
@@ -902,7 +902,7 @@ no_dccp_socket:
902 if (dh->dccph_type != DCCP_PKT_RESET) { 902 if (dh->dccph_type != DCCP_PKT_RESET) {
903 DCCP_SKB_CB(skb)->dccpd_reset_code = 903 DCCP_SKB_CB(skb)->dccpd_reset_code =
904 DCCP_RESET_CODE_NO_CONNECTION; 904 DCCP_RESET_CODE_NO_CONNECTION;
905 dccp_v6_ctl_send_reset(skb); 905 dccp_v6_ctl_send_reset(sk, skb);
906 } 906 }
907 907
908discard_it: 908discard_it:
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 0c49733f5be1..3975048d8094 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -246,7 +246,7 @@ listen_overflow:
246 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; 246 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
247drop: 247drop:
248 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) 248 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
249 req->rsk_ops->send_reset(skb); 249 req->rsk_ops->send_reset(sk, skb);
250 250
251 inet_csk_reqsk_queue_drop(sk, req, prev); 251 inet_csk_reqsk_queue_drop(sk, req, prev);
252 goto out; 252 goto out;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index bc298bcc344e..39e0cb763588 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -618,5 +618,21 @@ config DEFAULT_TCP_CONG
618 default "reno" if DEFAULT_RENO 618 default "reno" if DEFAULT_RENO
619 default "cubic" 619 default "cubic"
620 620
621config TCP_MD5SIG
622 bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)"
623 depends on EXPERIMENTAL
624 select CRYPTO
625 select CRYPTO_MD5
626 ---help---
627 RFC2385 specifices a method of giving MD5 protection to TCP sessions.
628 Its main (only?) use is to protect BGP sessions between core routers
629 on the Internet.
630
631 If unsure, say N.
632
633config TCP_MD5SIG_DEBUG
634 bool "TCP: MD5 Signature Option debugging"
635 depends on TCP_MD5SIG
636
621source "net/ipv4/ipvs/Kconfig" 637source "net/ipv4/ipvs/Kconfig"
622 638
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c05e8edaf544..dadef867a3bb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -258,6 +258,7 @@
258#include <linux/bootmem.h> 258#include <linux/bootmem.h>
259#include <linux/cache.h> 259#include <linux/cache.h>
260#include <linux/err.h> 260#include <linux/err.h>
261#include <linux/crypto.h>
261 262
262#include <net/icmp.h> 263#include <net/icmp.h>
263#include <net/tcp.h> 264#include <net/tcp.h>
@@ -1942,6 +1943,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1942 } 1943 }
1943 break; 1944 break;
1944 1945
1946#ifdef CONFIG_TCP_MD5SIG
1947 case TCP_MD5SIG:
1948 /* Read the IP->Key mappings from userspace */
1949 err = tp->af_specific->md5_parse(sk, optval, optlen);
1950 break;
1951#endif
1952
1945 default: 1953 default:
1946 err = -ENOPROTOOPT; 1954 err = -ENOPROTOOPT;
1947 break; 1955 break;
@@ -2231,6 +2239,135 @@ out:
2231} 2239}
2232EXPORT_SYMBOL(tcp_tso_segment); 2240EXPORT_SYMBOL(tcp_tso_segment);
2233 2241
2242#ifdef CONFIG_TCP_MD5SIG
2243static unsigned long tcp_md5sig_users;
2244static struct tcp_md5sig_pool **tcp_md5sig_pool;
2245static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2246
2247static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2248{
2249 int cpu;
2250 for_each_possible_cpu(cpu) {
2251 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2252 if (p) {
2253 if (p->md5_desc.tfm)
2254 crypto_free_hash(p->md5_desc.tfm);
2255 kfree(p);
2256 p = NULL;
2257 }
2258 }
2259 free_percpu(pool);
2260}
2261
2262void tcp_free_md5sig_pool(void)
2263{
2264 struct tcp_md5sig_pool **pool = NULL;
2265
2266 spin_lock(&tcp_md5sig_pool_lock);
2267 if (--tcp_md5sig_users == 0) {
2268 pool = tcp_md5sig_pool;
2269 tcp_md5sig_pool = NULL;
2270 }
2271 spin_unlock(&tcp_md5sig_pool_lock);
2272 if (pool)
2273 __tcp_free_md5sig_pool(pool);
2274}
2275
2276EXPORT_SYMBOL(tcp_free_md5sig_pool);
2277
2278struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2279{
2280 int cpu;
2281 struct tcp_md5sig_pool **pool;
2282
2283 pool = alloc_percpu(struct tcp_md5sig_pool *);
2284 if (!pool)
2285 return NULL;
2286
2287 for_each_possible_cpu(cpu) {
2288 struct tcp_md5sig_pool *p;
2289 struct crypto_hash *hash;
2290
2291 p = kzalloc(sizeof(*p), GFP_KERNEL);
2292 if (!p)
2293 goto out_free;
2294 *per_cpu_ptr(pool, cpu) = p;
2295
2296 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2297 if (!hash || IS_ERR(hash))
2298 goto out_free;
2299
2300 p->md5_desc.tfm = hash;
2301 }
2302 return pool;
2303out_free:
2304 __tcp_free_md5sig_pool(pool);
2305 return NULL;
2306}
2307
2308struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2309{
2310 struct tcp_md5sig_pool **pool;
2311 int alloc = 0;
2312
2313retry:
2314 spin_lock(&tcp_md5sig_pool_lock);
2315 pool = tcp_md5sig_pool;
2316 if (tcp_md5sig_users++ == 0) {
2317 alloc = 1;
2318 spin_unlock(&tcp_md5sig_pool_lock);
2319 } else if (!pool) {
2320 tcp_md5sig_users--;
2321 spin_unlock(&tcp_md5sig_pool_lock);
2322 cpu_relax();
2323 goto retry;
2324 } else
2325 spin_unlock(&tcp_md5sig_pool_lock);
2326
2327 if (alloc) {
2328 /* we cannot hold spinlock here because this may sleep. */
2329 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2330 spin_lock(&tcp_md5sig_pool_lock);
2331 if (!p) {
2332 tcp_md5sig_users--;
2333 spin_unlock(&tcp_md5sig_pool_lock);
2334 return NULL;
2335 }
2336 pool = tcp_md5sig_pool;
2337 if (pool) {
2338 /* oops, it has already been assigned. */
2339 spin_unlock(&tcp_md5sig_pool_lock);
2340 __tcp_free_md5sig_pool(p);
2341 } else {
2342 tcp_md5sig_pool = pool = p;
2343 spin_unlock(&tcp_md5sig_pool_lock);
2344 }
2345 }
2346 return pool;
2347}
2348
2349EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2350
2351struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2352{
2353 struct tcp_md5sig_pool **p;
2354 spin_lock(&tcp_md5sig_pool_lock);
2355 p = tcp_md5sig_pool;
2356 if (p)
2357 tcp_md5sig_users++;
2358 spin_unlock(&tcp_md5sig_pool_lock);
2359 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2360}
2361
2362EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2363
2364void __tcp_put_md5sig_pool(void) {
2365 __tcp_free_md5sig_pool(tcp_md5sig_pool);
2366}
2367
2368EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2369#endif
2370
2234extern void __skb_cb_too_small_for_tcp(int, int); 2371extern void __skb_cb_too_small_for_tcp(int, int);
2235extern struct tcp_congestion_ops tcp_reno; 2372extern struct tcp_congestion_ops tcp_reno;
2236 2373
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4a8c96cdec7d..6ab3423674bb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2677,6 +2677,14 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
2677 opt_rx->sack_ok) { 2677 opt_rx->sack_ok) {
2678 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 2678 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
2679 } 2679 }
2680#ifdef CONFIG_TCP_MD5SIG
2681 case TCPOPT_MD5SIG:
2682 /*
2683 * The MD5 Hash has already been
2684 * checked (see tcp_v{4,6}_do_rcv()).
2685 */
2686 break;
2687#endif
2680 }; 2688 };
2681 ptr+=opsize-2; 2689 ptr+=opsize-2;
2682 length-=opsize; 2690 length-=opsize;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0ad0904bf56c..8c8e8112f98d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -78,6 +78,9 @@
78#include <linux/proc_fs.h> 78#include <linux/proc_fs.h>
79#include <linux/seq_file.h> 79#include <linux/seq_file.h>
80 80
81#include <linux/crypto.h>
82#include <linux/scatterlist.h>
83
81int sysctl_tcp_tw_reuse __read_mostly; 84int sysctl_tcp_tw_reuse __read_mostly;
82int sysctl_tcp_low_latency __read_mostly; 85int sysctl_tcp_low_latency __read_mostly;
83 86
@@ -89,6 +92,13 @@ static struct socket *tcp_socket;
89 92
90void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); 93void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
91 94
95#ifdef CONFIG_TCP_MD5SIG
96static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr);
97static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
98 __be32 saddr, __be32 daddr, struct tcphdr *th,
99 int protocol, int tcplen);
100#endif
101
92struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 102struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), 103 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
94 .lhash_users = ATOMIC_INIT(0), 104 .lhash_users = ATOMIC_INIT(0),
@@ -526,11 +536,19 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
526 * Exception: precedence violation. We do not implement it in any case. 536 * Exception: precedence violation. We do not implement it in any case.
527 */ 537 */
528 538
529static void tcp_v4_send_reset(struct sk_buff *skb) 539static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
530{ 540{
531 struct tcphdr *th = skb->h.th; 541 struct tcphdr *th = skb->h.th;
532 struct tcphdr rth; 542 struct {
543 struct tcphdr th;
544#ifdef CONFIG_TCP_MD5SIG
545 u32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
546#endif
547 } rep;
533 struct ip_reply_arg arg; 548 struct ip_reply_arg arg;
549#ifdef CONFIG_TCP_MD5SIG
550 struct tcp_md5sig_key *key;
551#endif
534 552
535 /* Never send a reset in response to a reset. */ 553 /* Never send a reset in response to a reset. */
536 if (th->rst) 554 if (th->rst)
@@ -540,29 +558,50 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
540 return; 558 return;
541 559
542 /* Swap the send and the receive. */ 560 /* Swap the send and the receive. */
543 memset(&rth, 0, sizeof(struct tcphdr)); 561 memset(&rep, 0, sizeof(rep));
544 rth.dest = th->source; 562 rep.th.dest = th->source;
545 rth.source = th->dest; 563 rep.th.source = th->dest;
546 rth.doff = sizeof(struct tcphdr) / 4; 564 rep.th.doff = sizeof(struct tcphdr) / 4;
547 rth.rst = 1; 565 rep.th.rst = 1;
548 566
549 if (th->ack) { 567 if (th->ack) {
550 rth.seq = th->ack_seq; 568 rep.th.seq = th->ack_seq;
551 } else { 569 } else {
552 rth.ack = 1; 570 rep.th.ack = 1;
553 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + 571 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
554 skb->len - (th->doff << 2)); 572 skb->len - (th->doff << 2));
555 } 573 }
556 574
557 memset(&arg, 0, sizeof arg); 575 memset(&arg, 0, sizeof arg);
558 arg.iov[0].iov_base = (unsigned char *)&rth; 576 arg.iov[0].iov_base = (unsigned char *)&rep;
559 arg.iov[0].iov_len = sizeof rth; 577 arg.iov[0].iov_len = sizeof(rep.th);
578
579#ifdef CONFIG_TCP_MD5SIG
580 key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL;
581 if (key) {
582 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
583 (TCPOPT_NOP << 16) |
584 (TCPOPT_MD5SIG << 8) |
585 TCPOLEN_MD5SIG);
586 /* Update length and the length the header thinks exists */
587 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
588 rep.th.doff = arg.iov[0].iov_len / 4;
589
590 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
591 key,
592 skb->nh.iph->daddr,
593 skb->nh.iph->saddr,
594 &rep.th, IPPROTO_TCP,
595 arg.iov[0].iov_len);
596 }
597#endif
598
560 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, 599 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
561 skb->nh.iph->saddr, /*XXX*/ 600 skb->nh.iph->saddr, /*XXX*/
562 sizeof(struct tcphdr), IPPROTO_TCP, 0); 601 sizeof(struct tcphdr), IPPROTO_TCP, 0);
563 arg.csumoffset = offsetof(struct tcphdr, check) / 2; 602 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
564 603
565 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); 604 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
566 605
567 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 606 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
568 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 607 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
@@ -572,15 +611,24 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
572 outside socket context is ugly, certainly. What can I do? 611 outside socket context is ugly, certainly. What can I do?
573 */ 612 */
574 613
575static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, 614static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
615 struct sk_buff *skb, u32 seq, u32 ack,
576 u32 win, u32 ts) 616 u32 win, u32 ts)
577{ 617{
578 struct tcphdr *th = skb->h.th; 618 struct tcphdr *th = skb->h.th;
579 struct { 619 struct {
580 struct tcphdr th; 620 struct tcphdr th;
581 u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2]; 621 u32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
622#ifdef CONFIG_TCP_MD5SIG
623 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
624#endif
625 ];
582 } rep; 626 } rep;
583 struct ip_reply_arg arg; 627 struct ip_reply_arg arg;
628#ifdef CONFIG_TCP_MD5SIG
629 struct tcp_md5sig_key *key;
630 struct tcp_md5sig_key tw_key;
631#endif
584 632
585 memset(&rep.th, 0, sizeof(struct tcphdr)); 633 memset(&rep.th, 0, sizeof(struct tcphdr));
586 memset(&arg, 0, sizeof arg); 634 memset(&arg, 0, sizeof arg);
@@ -588,12 +636,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
588 arg.iov[0].iov_base = (unsigned char *)&rep; 636 arg.iov[0].iov_base = (unsigned char *)&rep;
589 arg.iov[0].iov_len = sizeof(rep.th); 637 arg.iov[0].iov_len = sizeof(rep.th);
590 if (ts) { 638 if (ts) {
591 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 639 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
592 (TCPOPT_TIMESTAMP << 8) | 640 (TCPOPT_TIMESTAMP << 8) |
593 TCPOLEN_TIMESTAMP); 641 TCPOLEN_TIMESTAMP);
594 rep.tsopt[1] = htonl(tcp_time_stamp); 642 rep.opt[1] = htonl(tcp_time_stamp);
595 rep.tsopt[2] = htonl(ts); 643 rep.opt[2] = htonl(ts);
596 arg.iov[0].iov_len = sizeof(rep); 644 arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED;
597 } 645 }
598 646
599 /* Swap the send and the receive. */ 647 /* Swap the send and the receive. */
@@ -605,6 +653,44 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
605 rep.th.ack = 1; 653 rep.th.ack = 1;
606 rep.th.window = htons(win); 654 rep.th.window = htons(win);
607 655
656#ifdef CONFIG_TCP_MD5SIG
657 /*
658 * The SKB holds an imcoming packet, but may not have a valid ->sk
659 * pointer. This is especially the case when we're dealing with a
660 * TIME_WAIT ack, because the sk structure is long gone, and only
661 * the tcp_timewait_sock remains. So the md5 key is stashed in that
662 * structure, and we use it in preference. I believe that (twsk ||
663 * skb->sk) holds true, but we program defensively.
664 */
665 if (!twsk && skb->sk) {
666 key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr);
667 } else if (twsk && twsk->tw_md5_keylen) {
668 tw_key.key = twsk->tw_md5_key;
669 tw_key.keylen = twsk->tw_md5_keylen;
670 key = &tw_key;
671 } else {
672 key = NULL;
673 }
674
675 if (key) {
676 int offset = (ts) ? 3 : 0;
677
678 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
679 (TCPOPT_NOP << 16) |
680 (TCPOPT_MD5SIG << 8) |
681 TCPOLEN_MD5SIG);
682 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
683 rep.th.doff = arg.iov[0].iov_len/4;
684
685 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
686 key,
687 skb->nh.iph->daddr,
688 skb->nh.iph->saddr,
689 &rep.th, IPPROTO_TCP,
690 arg.iov[0].iov_len);
691 }
692#endif
693
608 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, 694 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
609 skb->nh.iph->saddr, /*XXX*/ 695 skb->nh.iph->saddr, /*XXX*/
610 arg.iov[0].iov_len, IPPROTO_TCP, 0); 696 arg.iov[0].iov_len, IPPROTO_TCP, 0);
@@ -618,9 +704,9 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
618static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 704static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
619{ 705{
620 struct inet_timewait_sock *tw = inet_twsk(sk); 706 struct inet_timewait_sock *tw = inet_twsk(sk);
621 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 707 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
622 708
623 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 709 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
624 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); 710 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
625 711
626 inet_twsk_put(tw); 712 inet_twsk_put(tw);
@@ -628,7 +714,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
628 714
629static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 715static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
630{ 716{
631 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, 717 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
718 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
632 req->ts_recent); 719 req->ts_recent);
633} 720}
634 721
@@ -714,6 +801,461 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk,
714 return dopt; 801 return dopt;
715} 802}
716 803
804#ifdef CONFIG_TCP_MD5SIG
805/*
806 * RFC2385 MD5 checksumming requires a mapping of
807 * IP address->MD5 Key.
808 * We need to maintain these in the sk structure.
809 */
810
811/* Find the Key structure for an address. */
812static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
813{
814 struct tcp_sock *tp = tcp_sk(sk);
815 int i;
816
817 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
818 return NULL;
819 for (i = 0; i < tp->md5sig_info->entries4; i++) {
820 if (tp->md5sig_info->keys4[i].addr == addr)
821 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys4[i];
822 }
823 return NULL;
824}
825
826struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
827 struct sock *addr_sk)
828{
829 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
830}
831
832EXPORT_SYMBOL(tcp_v4_md5_lookup);
833
834struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
835 struct request_sock *req)
836{
837 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
838}
839
840/* This can be called on a newly created socket, from other files */
841int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
842 u8 *newkey, u8 newkeylen)
843{
844 /* Add Key to the list */
845 struct tcp4_md5sig_key *key;
846 struct tcp_sock *tp = tcp_sk(sk);
847 struct tcp4_md5sig_key *keys;
848
849 key = (struct tcp4_md5sig_key *) tcp_v4_md5_do_lookup(sk, addr);
850 if (key) {
851 /* Pre-existing entry - just update that one. */
852 kfree (key->key);
853 key->key = newkey;
854 key->keylen = newkeylen;
855 } else {
856 if (!tp->md5sig_info) {
857 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
858 if (!tp->md5sig_info) {
859 kfree(newkey);
860 return -ENOMEM;
861 }
862 }
863 if (tcp_alloc_md5sig_pool() == NULL) {
864 kfree(newkey);
865 return -ENOMEM;
866 }
867 if (tp->md5sig_info->alloced4 == tp->md5sig_info->entries4) {
868 keys = kmalloc((sizeof(struct tcp4_md5sig_key) *
869 (tp->md5sig_info->entries4 + 1)), GFP_ATOMIC);
870 if (!keys) {
871 kfree(newkey);
872 tcp_free_md5sig_pool();
873 return -ENOMEM;
874 }
875
876 if (tp->md5sig_info->entries4)
877 memcpy(keys, tp->md5sig_info->keys4,
878 (sizeof (struct tcp4_md5sig_key) *
879 tp->md5sig_info->entries4));
880
881 /* Free old key list, and reference new one */
882 if (tp->md5sig_info->keys4)
883 kfree(tp->md5sig_info->keys4);
884 tp->md5sig_info->keys4 = keys;
885 tp->md5sig_info->alloced4++;
886 }
887 tp->md5sig_info->entries4++;
888 tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].addr = addr;
889 tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].key = newkey;
890 tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].keylen = newkeylen;
891 }
892 return 0;
893}
894
895EXPORT_SYMBOL(tcp_v4_md5_do_add);
896
897static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
898 u8 *newkey, u8 newkeylen)
899{
900 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
901 newkey, newkeylen);
902}
903
904int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
905{
906 struct tcp_sock *tp = tcp_sk(sk);
907 int i;
908
909 for (i = 0; i < tp->md5sig_info->entries4; i++) {
910 if (tp->md5sig_info->keys4[i].addr == addr) {
911 /* Free the key */
912 kfree(tp->md5sig_info->keys4[i].key);
913 tp->md5sig_info->entries4--;
914
915 if (tp->md5sig_info->entries4 == 0) {
916 kfree(tp->md5sig_info->keys4);
917 tp->md5sig_info->keys4 = NULL;
918 } else {
919 /* Need to do some manipulation */
920 if (tp->md5sig_info->entries4 != i)
921 memcpy(&tp->md5sig_info->keys4[i],
922 &tp->md5sig_info->keys4[i+1],
923 (tp->md5sig_info->entries4 - i)
924 * sizeof (struct tcp4_md5sig_key));
925 }
926 tcp_free_md5sig_pool();
927 return 0;
928 }
929 }
930 return -ENOENT;
931}
932
933EXPORT_SYMBOL(tcp_v4_md5_do_del);
934
935static void tcp_v4_clear_md5_list (struct sock *sk)
936{
937 struct tcp_sock *tp = tcp_sk(sk);
938
939 /* Free each key, then the set of key keys,
940 * the crypto element, and then decrement our
941 * hold on the last resort crypto.
942 */
943 if (tp->md5sig_info->entries4) {
944 int i;
945 for (i = 0; i < tp->md5sig_info->entries4; i++)
946 kfree(tp->md5sig_info->keys4[i].key);
947 tp->md5sig_info->entries4 = 0;
948 tcp_free_md5sig_pool();
949 }
950 if (tp->md5sig_info->keys4) {
951 kfree(tp->md5sig_info->keys4);
952 tp->md5sig_info->keys4 = NULL;
953 tp->md5sig_info->alloced4 = 0;
954 }
955}
956
957static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval,
958 int optlen)
959{
960 struct tcp_md5sig cmd;
961 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
962 u8 *newkey;
963
964 if (optlen < sizeof(cmd))
965 return -EINVAL;
966
967 if (copy_from_user (&cmd, optval, sizeof(cmd)))
968 return -EFAULT;
969
970 if (sin->sin_family != AF_INET)
971 return -EINVAL;
972
973 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
974 if (!tcp_sk(sk)->md5sig_info)
975 return -ENOENT;
976 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
977 }
978
979 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
980 return -EINVAL;
981
982 if (!tcp_sk(sk)->md5sig_info) {
983 struct tcp_sock *tp = tcp_sk(sk);
984 struct tcp_md5sig_info *p;
985
986 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
987 if (!p)
988 return -EINVAL;
989
990 tp->md5sig_info = p;
991
992 }
993
994 newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL);
995 if (!newkey)
996 return -ENOMEM;
997 memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen);
998 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
999 newkey, cmd.tcpm_keylen);
1000}
1001
1002static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1003 __be32 saddr, __be32 daddr,
1004 struct tcphdr *th, int protocol,
1005 int tcplen)
1006{
1007 struct scatterlist sg[4];
1008 __u16 data_len;
1009 int block = 0;
1010#ifdef CONFIG_TCP_MD5SIG_DEBUG
1011 int i;
1012#endif
1013 __u16 old_checksum;
1014 struct tcp_md5sig_pool *hp;
1015 struct tcp4_pseudohdr *bp;
1016 struct hash_desc *desc;
1017 int err;
1018 unsigned int nbytes = 0;
1019
1020 /*
1021 * Okay, so RFC2385 is turned on for this connection,
1022 * so we need to generate the MD5 hash for the packet now.
1023 */
1024
1025 hp = tcp_get_md5sig_pool();
1026 if (!hp)
1027 goto clear_hash_noput;
1028
1029 bp = &hp->md5_blk.ip4;
1030 desc = &hp->md5_desc;
1031
1032 /*
1033 * 1. the TCP pseudo-header (in the order: source IP address,
1034 * destination IP address, zero-padded protocol number, and
1035 * segment length)
1036 */
1037 bp->saddr = saddr;
1038 bp->daddr = daddr;
1039 bp->pad = 0;
1040 bp->protocol = protocol;
1041 bp->len = htons(tcplen);
1042 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1043 nbytes += sizeof(*bp);
1044
1045#ifdef CONFIG_TCP_MD5SIG_DEBUG
1046 printk("Calcuating hash for: ");
1047 for (i = 0; i < sizeof (*bp); i++)
1048 printk ("%02x ", (unsigned int)((unsigned char *)bp)[i]);
1049 printk(" ");
1050#endif
1051
1052 /* 2. the TCP header, excluding options, and assuming a
1053 * checksum of zero/
1054 */
1055 old_checksum = th->check;
1056 th->check = 0;
1057 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1058 nbytes += sizeof(struct tcphdr);
1059#ifdef CONFIG_TCP_MD5SIG_DEBUG
1060 for (i = 0; i < sizeof (struct tcphdr); i++)
1061 printk (" %02x", (unsigned int)((unsigned char *)th)[i]);
1062#endif
1063 /* 3. the TCP segment data (if any) */
1064 data_len = tcplen - (th->doff << 2);
1065 if (data_len > 0) {
1066 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1067 sg_set_buf(&sg[block++], data, data_len);
1068 nbytes += data_len;
1069 }
1070
1071 /* 4. an independently-specified key or password, known to both
1072 * TCPs and presumably connection-specific
1073 */
1074 sg_set_buf(&sg[block++], key->key, key->keylen);
1075 nbytes += key->keylen;
1076
1077#ifdef CONFIG_TCP_MD5SIG_DEBUG
1078 printk (" and password: ");
1079 for (i = 0; i < key->keylen; i++)
1080 printk ("%02x ", (unsigned int)key->key[i]);
1081#endif
1082
1083 /* Now store the Hash into the packet */
1084 err = crypto_hash_init(desc);
1085 if (err)
1086 goto clear_hash;
1087 err = crypto_hash_update(desc, sg, nbytes);
1088 if (err)
1089 goto clear_hash;
1090 err = crypto_hash_final(desc, md5_hash);
1091 if (err)
1092 goto clear_hash;
1093
1094 /* Reset header, and free up the crypto */
1095 tcp_put_md5sig_pool();
1096 th->check = old_checksum;
1097
1098out:
1099#ifdef CONFIG_TCP_MD5SIG_DEBUG
1100 printk(" result:");
1101 for (i = 0; i < 16; i++)
1102 printk (" %02x", (unsigned int)(((u8*)md5_hash)[i]));
1103 printk("\n");
1104#endif
1105 return 0;
1106clear_hash:
1107 tcp_put_md5sig_pool();
1108clear_hash_noput:
1109 memset(md5_hash, 0, 16);
1110 goto out;
1111}
1112
1113int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1114 struct sock *sk,
1115 struct dst_entry *dst,
1116 struct request_sock *req,
1117 struct tcphdr *th, int protocol,
1118 int tcplen)
1119{
1120 __be32 saddr, daddr;
1121
1122 if (sk) {
1123 saddr = inet_sk(sk)->saddr;
1124 daddr = inet_sk(sk)->daddr;
1125 } else {
1126 struct rtable *rt = (struct rtable *)dst;
1127 BUG_ON(!rt);
1128 saddr = rt->rt_src;
1129 daddr = rt->rt_dst;
1130 }
1131 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1132 saddr, daddr,
1133 th, protocol, tcplen);
1134}
1135
1136EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
1137
1138static int tcp_v4_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
1139{
1140 /*
1141 * This gets called for each TCP segment that arrives
1142 * so we want to be efficient.
1143 * We have 3 drop cases:
1144 * o No MD5 hash and one expected.
1145 * o MD5 hash and we're not expecting one.
1146 * o MD5 hash and its wrong.
1147 */
1148 __u8 *hash_location = NULL;
1149 struct tcp_md5sig_key *hash_expected;
1150 struct iphdr *iph = skb->nh.iph;
1151 struct tcphdr *th = skb->h.th;
1152 int length = (th->doff << 2) - sizeof (struct tcphdr);
1153 int genhash;
1154 unsigned char *ptr;
1155 unsigned char newhash[16];
1156
1157 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1158
1159 /*
1160 * If the TCP option length is less than the TCP_MD5SIG
1161 * option length, then we can shortcut
1162 */
1163 if (length < TCPOLEN_MD5SIG) {
1164 if (hash_expected)
1165 return 1;
1166 else
1167 return 0;
1168 }
1169
1170 /* Okay, we can't shortcut - we have to grub through the options */
1171 ptr = (unsigned char *)(th + 1);
1172 while (length > 0) {
1173 int opcode = *ptr++;
1174 int opsize;
1175
1176 switch (opcode) {
1177 case TCPOPT_EOL:
1178 goto done_opts;
1179 case TCPOPT_NOP:
1180 length--;
1181 continue;
1182 default:
1183 opsize = *ptr++;
1184 if (opsize < 2)
1185 goto done_opts;
1186 if (opsize > length)
1187 goto done_opts;
1188
1189 if (opcode == TCPOPT_MD5SIG) {
1190 hash_location = ptr;
1191 goto done_opts;
1192 }
1193 }
1194 ptr += opsize-2;
1195 length -= opsize;
1196 }
1197done_opts:
1198 /* We've parsed the options - do we have a hash? */
1199 if (!hash_expected && !hash_location)
1200 return 0;
1201
1202 if (hash_expected && !hash_location) {
1203 if (net_ratelimit()) {
1204 printk(KERN_INFO "MD5 Hash NOT expected but found "
1205 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1206 NIPQUAD (iph->saddr), ntohs(th->source),
1207 NIPQUAD (iph->daddr), ntohs(th->dest));
1208 }
1209 return 1;
1210 }
1211
1212 if (!hash_expected && hash_location) {
1213 if (net_ratelimit()) {
1214 printk(KERN_INFO "MD5 Hash NOT expected but found "
1215 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
1216 NIPQUAD (iph->saddr), ntohs(th->source),
1217 NIPQUAD (iph->daddr), ntohs(th->dest));
1218 }
1219 return 1;
1220 }
1221
1222 /* Okay, so this is hash_expected and hash_location -
1223 * so we need to calculate the checksum.
1224 */
1225 genhash = tcp_v4_do_calc_md5_hash(newhash,
1226 hash_expected,
1227 iph->saddr, iph->daddr,
1228 th, sk->sk_protocol,
1229 skb->len);
1230
1231 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1232 if (net_ratelimit()) {
1233 printk(KERN_INFO "MD5 Hash failed for "
1234 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
1235 NIPQUAD (iph->saddr), ntohs(th->source),
1236 NIPQUAD (iph->daddr), ntohs(th->dest),
1237 genhash ? " tcp_v4_calc_md5_hash failed" : "");
1238#ifdef CONFIG_TCP_MD5SIG_DEBUG
1239 do {
1240 int i;
1241 printk("Received: ");
1242 for (i = 0; i < 16; i++)
1243 printk("%02x ", 0xff & (int)hash_location[i]);
1244 printk("\n");
1245 printk("Calculated: ");
1246 for (i = 0; i < 16; i++)
1247 printk("%02x ", 0xff & (int)newhash[i]);
1248 printk("\n");
1249 } while(0);
1250#endif
1251 }
1252 return 1;
1253 }
1254 return 0;
1255}
1256
1257#endif
1258
717struct request_sock_ops tcp_request_sock_ops __read_mostly = { 1259struct request_sock_ops tcp_request_sock_ops __read_mostly = {
718 .family = PF_INET, 1260 .family = PF_INET,
719 .obj_size = sizeof(struct tcp_request_sock), 1261 .obj_size = sizeof(struct tcp_request_sock),
@@ -723,9 +1265,16 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
723 .send_reset = tcp_v4_send_reset, 1265 .send_reset = tcp_v4_send_reset,
724}; 1266};
725 1267
1268struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1269#ifdef CONFIG_TCP_MD5SIG
1270 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1271#endif
1272};
1273
726static struct timewait_sock_ops tcp_timewait_sock_ops = { 1274static struct timewait_sock_ops tcp_timewait_sock_ops = {
727 .twsk_obj_size = sizeof(struct tcp_timewait_sock), 1275 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
728 .twsk_unique = tcp_twsk_unique, 1276 .twsk_unique = tcp_twsk_unique,
1277 .twsk_destructor= tcp_twsk_destructor,
729}; 1278};
730 1279
731int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1280int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -773,6 +1322,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
773 if (!req) 1322 if (!req)
774 goto drop; 1323 goto drop;
775 1324
1325#ifdef CONFIG_TCP_MD5SIG
1326 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1327#endif
1328
776 tcp_clear_options(&tmp_opt); 1329 tcp_clear_options(&tmp_opt);
777 tmp_opt.mss_clamp = 536; 1330 tmp_opt.mss_clamp = 536;
778 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; 1331 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
@@ -891,6 +1444,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
891 struct inet_sock *newinet; 1444 struct inet_sock *newinet;
892 struct tcp_sock *newtp; 1445 struct tcp_sock *newtp;
893 struct sock *newsk; 1446 struct sock *newsk;
1447#ifdef CONFIG_TCP_MD5SIG
1448 struct tcp_md5sig_key *key;
1449#endif
894 1450
895 if (sk_acceptq_is_full(sk)) 1451 if (sk_acceptq_is_full(sk))
896 goto exit_overflow; 1452 goto exit_overflow;
@@ -925,6 +1481,24 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
925 newtp->advmss = dst_metric(dst, RTAX_ADVMSS); 1481 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
926 tcp_initialize_rcv_mss(newsk); 1482 tcp_initialize_rcv_mss(newsk);
927 1483
1484#ifdef CONFIG_TCP_MD5SIG
1485 /* Copy over the MD5 key from the original socket */
1486 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1487 /*
1488 * We're using one, so create a matching key
1489 * on the newsk structure. If we fail to get
1490 * memory, then we end up not copying the key
1491 * across. Shucks.
1492 */
1493 char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
1494 if (newkey) {
1495 memcpy(newkey, key->key, key->keylen);
1496 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1497 newkey, key->keylen);
1498 }
1499 }
1500#endif
1501
928 __inet_hash(&tcp_hashinfo, newsk, 0); 1502 __inet_hash(&tcp_hashinfo, newsk, 0);
929 __inet_inherit_port(&tcp_hashinfo, sk, newsk); 1503 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
930 1504
@@ -1000,10 +1574,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
1000 */ 1574 */
1001int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) 1575int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1002{ 1576{
1577 struct sock *rsk;
1578#ifdef CONFIG_TCP_MD5SIG
1579 /*
1580 * We really want to reject the packet as early as possible
1581 * if:
1582 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1583 * o There is an MD5 option and we're not expecting one
1584 */
1585 if (tcp_v4_inbound_md5_hash (sk, skb))
1586 goto discard;
1587#endif
1588
1003 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1589 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1004 TCP_CHECK_TIMER(sk); 1590 TCP_CHECK_TIMER(sk);
1005 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) 1591 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
1592 rsk = sk;
1006 goto reset; 1593 goto reset;
1594 }
1007 TCP_CHECK_TIMER(sk); 1595 TCP_CHECK_TIMER(sk);
1008 return 0; 1596 return 0;
1009 } 1597 }
@@ -1017,20 +1605,24 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1017 goto discard; 1605 goto discard;
1018 1606
1019 if (nsk != sk) { 1607 if (nsk != sk) {
1020 if (tcp_child_process(sk, nsk, skb)) 1608 if (tcp_child_process(sk, nsk, skb)) {
1609 rsk = nsk;
1021 goto reset; 1610 goto reset;
1611 }
1022 return 0; 1612 return 0;
1023 } 1613 }
1024 } 1614 }
1025 1615
1026 TCP_CHECK_TIMER(sk); 1616 TCP_CHECK_TIMER(sk);
1027 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) 1617 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
1618 rsk = sk;
1028 goto reset; 1619 goto reset;
1620 }
1029 TCP_CHECK_TIMER(sk); 1621 TCP_CHECK_TIMER(sk);
1030 return 0; 1622 return 0;
1031 1623
1032reset: 1624reset:
1033 tcp_v4_send_reset(skb); 1625 tcp_v4_send_reset(rsk, skb);
1034discard: 1626discard:
1035 kfree_skb(skb); 1627 kfree_skb(skb);
1036 /* Be careful here. If this function gets more complicated and 1628 /* Be careful here. If this function gets more complicated and
@@ -1139,7 +1731,7 @@ no_tcp_socket:
1139bad_packet: 1731bad_packet:
1140 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1732 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1141 } else { 1733 } else {
1142 tcp_v4_send_reset(skb); 1734 tcp_v4_send_reset(NULL, skb);
1143 } 1735 }
1144 1736
1145discard_it: 1737discard_it:
@@ -1262,6 +1854,15 @@ struct inet_connection_sock_af_ops ipv4_specific = {
1262#endif 1854#endif
1263}; 1855};
1264 1856
1857struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1858#ifdef CONFIG_TCP_MD5SIG
1859 .md5_lookup = tcp_v4_md5_lookup,
1860 .calc_md5_hash = tcp_v4_calc_md5_hash,
1861 .md5_add = tcp_v4_md5_add_func,
1862 .md5_parse = tcp_v4_parse_md5_keys,
1863#endif
1864};
1865
1265/* NOTE: A lot of things set to zero explicitly by call to 1866/* NOTE: A lot of things set to zero explicitly by call to
1266 * sk_alloc() so need not be done here. 1867 * sk_alloc() so need not be done here.
1267 */ 1868 */
@@ -1301,6 +1902,9 @@ static int tcp_v4_init_sock(struct sock *sk)
1301 1902
1302 icsk->icsk_af_ops = &ipv4_specific; 1903 icsk->icsk_af_ops = &ipv4_specific;
1303 icsk->icsk_sync_mss = tcp_sync_mss; 1904 icsk->icsk_sync_mss = tcp_sync_mss;
1905#ifdef CONFIG_TCP_MD5SIG
1906 tp->af_specific = &tcp_sock_ipv4_specific;
1907#endif
1304 1908
1305 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1909 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1306 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1910 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -1324,6 +1928,15 @@ int tcp_v4_destroy_sock(struct sock *sk)
1324 /* Cleans up our, hopefully empty, out_of_order_queue. */ 1928 /* Cleans up our, hopefully empty, out_of_order_queue. */
1325 __skb_queue_purge(&tp->out_of_order_queue); 1929 __skb_queue_purge(&tp->out_of_order_queue);
1326 1930
1931#ifdef CONFIG_TCP_MD5SIG
1932 /* Clean up the MD5 key list, if any */
1933 if (tp->md5sig_info) {
1934 tcp_v4_clear_md5_list(sk);
1935 kfree(tp->md5sig_info);
1936 tp->md5sig_info = NULL;
1937 }
1938#endif
1939
1327#ifdef CONFIG_NET_DMA 1940#ifdef CONFIG_NET_DMA
1328 /* Cleans up our sk_async_wait_queue */ 1941 /* Cleans up our sk_async_wait_queue */
1329 __skb_queue_purge(&sk->sk_async_wait_queue); 1942 __skb_queue_purge(&sk->sk_async_wait_queue);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0163d9826907..ac55d8892cf1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -306,6 +306,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
306 tw->tw_ipv6only = np->ipv6only; 306 tw->tw_ipv6only = np->ipv6only;
307 } 307 }
308#endif 308#endif
309
310#ifdef CONFIG_TCP_MD5SIG
311 /*
312 * The timewait bucket does not have the key DB from the
313 * sock structure. We just make a quick copy of the
314 * md5 key being used (if indeed we are using one)
315 * so the timewait ack generating code has the key.
316 */
317 do {
318 struct tcp_md5sig_key *key;
319 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
320 tcptw->tw_md5_keylen = 0;
321 key = tp->af_specific->md5_lookup(sk, sk);
322 if (key != NULL) {
323 memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
324 tcptw->tw_md5_keylen = key->keylen;
325 if (tcp_alloc_md5sig_pool() == NULL)
326 BUG();
327 }
328 } while(0);
329#endif
330
309 /* Linkage updates. */ 331 /* Linkage updates. */
310 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 332 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
311 333
@@ -337,6 +359,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
337 tcp_done(sk); 359 tcp_done(sk);
338} 360}
339 361
362void tcp_twsk_destructor(struct sock *sk)
363{
364 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
365#ifdef CONFIG_TCP_MD5SIG
366 if (twsk->tw_md5_keylen)
367 tcp_put_md5sig_pool();
368#endif
369}
370
371EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
372
340/* This is not only more efficient than what we used to do, it eliminates 373/* This is not only more efficient than what we used to do, it eliminates
341 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 374 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
342 * 375 *
@@ -435,6 +468,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
435 newtp->rx_opt.ts_recent_stamp = 0; 468 newtp->rx_opt.ts_recent_stamp = 0;
436 newtp->tcp_header_len = sizeof(struct tcphdr); 469 newtp->tcp_header_len = sizeof(struct tcphdr);
437 } 470 }
471#ifdef CONFIG_TCP_MD5SIG
472 newtp->md5sig_info = NULL; /*XXX*/
473 if (newtp->af_specific->md5_lookup(sk, newsk))
474 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
475#endif
438 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) 476 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
439 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 477 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
440 newtp->rx_opt.mss_clamp = req->mss; 478 newtp->rx_opt.mss_clamp = req->mss;
@@ -617,6 +655,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
617 req, NULL); 655 req, NULL);
618 if (child == NULL) 656 if (child == NULL)
619 goto listen_overflow; 657 goto listen_overflow;
658#ifdef CONFIG_TCP_MD5SIG
659 else {
660 /* Copy over the MD5 key from the original socket */
661 struct tcp_md5sig_key *key;
662 struct tcp_sock *tp = tcp_sk(sk);
663 key = tp->af_specific->md5_lookup(sk, child);
664 if (key != NULL) {
665 /*
666 * We're using one, so create a matching key on the
667 * newsk structure. If we fail to get memory then we
668 * end up not copying the key across. Shucks.
669 */
670 char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
671 if (newkey) {
672 if (!tcp_alloc_md5sig_pool())
673 BUG();
674 memcpy(newkey, key->key, key->keylen);
675 tp->af_specific->md5_add(child, child,
676 newkey,
677 key->keylen);
678 }
679 }
680 }
681#endif
620 682
621 inet_csk_reqsk_queue_unlink(sk, req, prev); 683 inet_csk_reqsk_queue_unlink(sk, req, prev);
622 inet_csk_reqsk_queue_removed(sk, req); 684 inet_csk_reqsk_queue_removed(sk, req);
@@ -633,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
633 embryonic_reset: 695 embryonic_reset:
634 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); 696 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
635 if (!(flg & TCP_FLAG_RST)) 697 if (!(flg & TCP_FLAG_RST))
636 req->rsk_ops->send_reset(skb); 698 req->rsk_ops->send_reset(sk, skb);
637 699
638 inet_csk_reqsk_queue_drop(sk, req, prev); 700 inet_csk_reqsk_queue_drop(sk, req, prev);
639 return NULL; 701 return NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6a8581ab9a23..32c1a972fa31 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -270,7 +270,7 @@ static u16 tcp_select_window(struct sock *sk)
270} 270}
271 271
272static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, 272static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
273 __u32 tstamp) 273 __u32 tstamp, __u8 **md5_hash)
274{ 274{
275 if (tp->rx_opt.tstamp_ok) { 275 if (tp->rx_opt.tstamp_ok) {
276 *ptr++ = htonl((TCPOPT_NOP << 24) | 276 *ptr++ = htonl((TCPOPT_NOP << 24) |
@@ -298,16 +298,29 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
298 tp->rx_opt.eff_sacks--; 298 tp->rx_opt.eff_sacks--;
299 } 299 }
300 } 300 }
301#ifdef CONFIG_TCP_MD5SIG
302 if (md5_hash) {
303 *ptr++ = htonl((TCPOPT_NOP << 24) |
304 (TCPOPT_NOP << 16) |
305 (TCPOPT_MD5SIG << 8) |
306 TCPOLEN_MD5SIG);
307 *md5_hash = (__u8 *)ptr;
308 }
309#endif
301} 310}
302 311
303/* Construct a tcp options header for a SYN or SYN_ACK packet. 312/* Construct a tcp options header for a SYN or SYN_ACK packet.
304 * If this is every changed make sure to change the definition of 313 * If this is every changed make sure to change the definition of
305 * MAX_SYN_SIZE to match the new maximum number of options that you 314 * MAX_SYN_SIZE to match the new maximum number of options that you
306 * can generate. 315 * can generate.
316 *
317 * Note - that with the RFC2385 TCP option, we make room for the
318 * 16 byte MD5 hash. This will be filled in later, so the pointer for the
319 * location to be filled is passed back up.
307 */ 320 */
308static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, 321static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
309 int offer_wscale, int wscale, __u32 tstamp, 322 int offer_wscale, int wscale, __u32 tstamp,
310 __u32 ts_recent) 323 __u32 ts_recent, __u8 **md5_hash)
311{ 324{
312 /* We always get an MSS option. 325 /* We always get an MSS option.
313 * The option bytes which will be seen in normal data 326 * The option bytes which will be seen in normal data
@@ -346,6 +359,20 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
346 (TCPOPT_WINDOW << 16) | 359 (TCPOPT_WINDOW << 16) |
347 (TCPOLEN_WINDOW << 8) | 360 (TCPOLEN_WINDOW << 8) |
348 (wscale)); 361 (wscale));
362#ifdef CONFIG_TCP_MD5SIG
363 /*
364 * If MD5 is enabled, then we set the option, and include the size
365 * (always 18). The actual MD5 hash is added just before the
366 * packet is sent.
367 */
368 if (md5_hash) {
369 *ptr++ = htonl((TCPOPT_NOP << 24) |
370 (TCPOPT_NOP << 16) |
371 (TCPOPT_MD5SIG << 8) |
372 TCPOLEN_MD5SIG);
373 *md5_hash = (__u8 *) ptr;
374 }
375#endif
349} 376}
350 377
351/* This routine actually transmits TCP packets queued in by 378/* This routine actually transmits TCP packets queued in by
@@ -366,6 +393,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
366 struct tcp_sock *tp; 393 struct tcp_sock *tp;
367 struct tcp_skb_cb *tcb; 394 struct tcp_skb_cb *tcb;
368 int tcp_header_size; 395 int tcp_header_size;
396#ifdef CONFIG_TCP_MD5SIG
397 struct tcp_md5sig_key *md5;
398 __u8 *md5_hash_location;
399#endif
369 struct tcphdr *th; 400 struct tcphdr *th;
370 int sysctl_flags; 401 int sysctl_flags;
371 int err; 402 int err;
@@ -424,6 +455,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
424 if (tcp_packets_in_flight(tp) == 0) 455 if (tcp_packets_in_flight(tp) == 0)
425 tcp_ca_event(sk, CA_EVENT_TX_START); 456 tcp_ca_event(sk, CA_EVENT_TX_START);
426 457
458#ifdef CONFIG_TCP_MD5SIG
459 /*
460 * Are we doing MD5 on this segment? If so - make
461 * room for it.
462 */
463 md5 = tp->af_specific->md5_lookup(sk, sk);
464 if (md5)
465 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
466#endif
467
427 th = (struct tcphdr *) skb_push(skb, tcp_header_size); 468 th = (struct tcphdr *) skb_push(skb, tcp_header_size);
428 skb->h.th = th; 469 skb->h.th = th;
429 470
@@ -460,13 +501,34 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
460 (sysctl_flags & SYSCTL_FLAG_WSCALE), 501 (sysctl_flags & SYSCTL_FLAG_WSCALE),
461 tp->rx_opt.rcv_wscale, 502 tp->rx_opt.rcv_wscale,
462 tcb->when, 503 tcb->when,
463 tp->rx_opt.ts_recent); 504 tp->rx_opt.ts_recent,
505
506#ifdef CONFIG_TCP_MD5SIG
507 md5 ? &md5_hash_location :
508#endif
509 NULL);
464 } else { 510 } else {
465 tcp_build_and_update_options((__be32 *)(th + 1), 511 tcp_build_and_update_options((__be32 *)(th + 1),
466 tp, tcb->when); 512 tp, tcb->when,
513#ifdef CONFIG_TCP_MD5SIG
514 md5 ? &md5_hash_location :
515#endif
516 NULL);
467 TCP_ECN_send(sk, tp, skb, tcp_header_size); 517 TCP_ECN_send(sk, tp, skb, tcp_header_size);
468 } 518 }
469 519
520#ifdef CONFIG_TCP_MD5SIG
521 /* Calculate the MD5 hash, as we have all we need now */
522 if (md5) {
523 tp->af_specific->calc_md5_hash(md5_hash_location,
524 md5,
525 sk, NULL, NULL,
526 skb->h.th,
527 sk->sk_protocol,
528 skb->len);
529 }
530#endif
531
470 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 532 icsk->icsk_af_ops->send_check(sk, skb->len, skb);
471 533
472 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 534 if (likely(tcb->flags & TCPCB_FLAG_ACK))
@@ -840,6 +902,11 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
840 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + 902 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
841 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); 903 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
842 904
905#ifdef CONFIG_TCP_MD5SIG
906 if (tp->af_specific->md5_lookup(sk, sk))
907 mss_now -= TCPOLEN_MD5SIG_ALIGNED;
908#endif
909
843 xmit_size_goal = mss_now; 910 xmit_size_goal = mss_now;
844 911
845 if (doing_tso) { 912 if (doing_tso) {
@@ -2033,6 +2100,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2033 struct tcphdr *th; 2100 struct tcphdr *th;
2034 int tcp_header_size; 2101 int tcp_header_size;
2035 struct sk_buff *skb; 2102 struct sk_buff *skb;
2103#ifdef CONFIG_TCP_MD5SIG
2104 struct tcp_md5sig_key *md5;
2105 __u8 *md5_hash_location;
2106#endif
2036 2107
2037 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2108 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2038 if (skb == NULL) 2109 if (skb == NULL)
@@ -2048,6 +2119,13 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2048 (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + 2119 (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
2049 /* SACK_PERM is in the place of NOP NOP of TS */ 2120 /* SACK_PERM is in the place of NOP NOP of TS */
2050 ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); 2121 ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2122
2123#ifdef CONFIG_TCP_MD5SIG
2124 /* Are we doing MD5 on this segment? If so - make room for it */
2125 md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
2126 if (md5)
2127 tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2128#endif
2051 skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); 2129 skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
2052 2130
2053 memset(th, 0, sizeof(struct tcphdr)); 2131 memset(th, 0, sizeof(struct tcphdr));
@@ -2085,11 +2163,29 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2085 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 2163 tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
2086 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 2164 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
2087 TCP_SKB_CB(skb)->when, 2165 TCP_SKB_CB(skb)->when,
2088 req->ts_recent); 2166 req->ts_recent,
2167 (
2168#ifdef CONFIG_TCP_MD5SIG
2169 md5 ? &md5_hash_location :
2170#endif
2171 NULL)
2172 );
2089 2173
2090 skb->csum = 0; 2174 skb->csum = 0;
2091 th->doff = (tcp_header_size >> 2); 2175 th->doff = (tcp_header_size >> 2);
2092 TCP_INC_STATS(TCP_MIB_OUTSEGS); 2176 TCP_INC_STATS(TCP_MIB_OUTSEGS);
2177
2178#ifdef CONFIG_TCP_MD5SIG
2179 /* Okay, we have all we need - do the md5 hash if needed */
2180 if (md5) {
2181 tp->af_specific->calc_md5_hash(md5_hash_location,
2182 md5,
2183 NULL, dst, req,
2184 skb->h.th, sk->sk_protocol,
2185 skb->len);
2186 }
2187#endif
2188
2093 return skb; 2189 return skb;
2094} 2190}
2095 2191
@@ -2108,6 +2204,11 @@ static void tcp_connect_init(struct sock *sk)
2108 tp->tcp_header_len = sizeof(struct tcphdr) + 2204 tp->tcp_header_len = sizeof(struct tcphdr) +
2109 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2205 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2110 2206
2207#ifdef CONFIG_TCP_MD5SIG
2208 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2209 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2210#endif
2211
2111 /* If user gave his TCP_MAXSEG, record it to clamp */ 2212 /* If user gave his TCP_MAXSEG, record it to clamp */
2112 if (tp->rx_opt.user_mss) 2213 if (tp->rx_opt.user_mss)
2113 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2214 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9a88395a7629..663d1d238014 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -66,10 +66,13 @@
66#include <linux/proc_fs.h> 66#include <linux/proc_fs.h>
67#include <linux/seq_file.h> 67#include <linux/seq_file.h>
68 68
69#include <linux/crypto.h>
70#include <linux/scatterlist.h>
71
69/* Socket used for sending RSTs and ACKs */ 72/* Socket used for sending RSTs and ACKs */
70static struct socket *tcp6_socket; 73static struct socket *tcp6_socket;
71 74
72static void tcp_v6_send_reset(struct sk_buff *skb); 75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); 76static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
74static void tcp_v6_send_check(struct sock *sk, int len, 77static void tcp_v6_send_check(struct sock *sk, int len,
75 struct sk_buff *skb); 78 struct sk_buff *skb);
@@ -78,6 +81,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 81
79static struct inet_connection_sock_af_ops ipv6_mapped; 82static struct inet_connection_sock_af_ops ipv6_mapped;
80static struct inet_connection_sock_af_ops ipv6_specific; 83static struct inet_connection_sock_af_ops ipv6_specific;
84static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
85static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
81 86
82static int tcp_v6_get_port(struct sock *sk, unsigned short snum) 87static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
83{ 88{
@@ -208,6 +213,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
208 213
209 icsk->icsk_af_ops = &ipv6_mapped; 214 icsk->icsk_af_ops = &ipv6_mapped;
210 sk->sk_backlog_rcv = tcp_v4_do_rcv; 215 sk->sk_backlog_rcv = tcp_v4_do_rcv;
216#ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
218#endif
211 219
212 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 220 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213 221
@@ -215,6 +223,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
215 icsk->icsk_ext_hdr_len = exthdrlen; 223 icsk->icsk_ext_hdr_len = exthdrlen;
216 icsk->icsk_af_ops = &ipv6_specific; 224 icsk->icsk_af_ops = &ipv6_specific;
217 sk->sk_backlog_rcv = tcp_v6_do_rcv; 225 sk->sk_backlog_rcv = tcp_v6_do_rcv;
226#ifdef CONFIG_TCP_MD5SIG
227 tp->af_specific = &tcp_sock_ipv6_specific;
228#endif
218 goto failure; 229 goto failure;
219 } else { 230 } else {
220 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), 231 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
@@ -518,6 +529,396 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
518 kfree_skb(inet6_rsk(req)->pktopts); 529 kfree_skb(inet6_rsk(req)->pktopts);
519} 530}
520 531
532#ifdef CONFIG_TCP_MD5SIG
533static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
534 struct in6_addr *addr)
535{
536 struct tcp_sock *tp = tcp_sk(sk);
537 int i;
538
539 BUG_ON(tp == NULL);
540
541 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
542 return NULL;
543
544 for (i = 0; i < tp->md5sig_info->entries6; i++) {
545 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
546 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
547 }
548 return NULL;
549}
550
551static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
552 struct sock *addr_sk)
553{
554 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
555}
556
557static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
558 struct request_sock *req)
559{
560 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
561}
562
563static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
564 char *newkey, u8 newkeylen)
565{
566 /* Add key to the list */
567 struct tcp6_md5sig_key *key;
568 struct tcp_sock *tp = tcp_sk(sk);
569 struct tcp6_md5sig_key *keys;
570
571 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
572 if (key) {
573 /* modify existing entry - just update that one */
574 kfree(key->key);
575 key->key = newkey;
576 key->keylen = newkeylen;
577 } else {
578 /* reallocate new list if current one is full. */
579 if (!tp->md5sig_info) {
580 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
581 if (!tp->md5sig_info) {
582 kfree(newkey);
583 return -ENOMEM;
584 }
585 }
586 tcp_alloc_md5sig_pool();
587 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
588 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
589 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
590
591 if (!keys) {
592 tcp_free_md5sig_pool();
593 kfree(newkey);
594 return -ENOMEM;
595 }
596
597 if (tp->md5sig_info->entries6)
598 memmove(keys, tp->md5sig_info->keys6,
599 (sizeof (tp->md5sig_info->keys6[0]) *
600 tp->md5sig_info->entries6));
601
602 kfree(tp->md5sig_info->keys6);
603 tp->md5sig_info->keys6 = keys;
604 tp->md5sig_info->alloced6++;
605 }
606
607 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
608 peer);
609 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
610 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
611
612 tp->md5sig_info->entries6++;
613 }
614 return 0;
615}
616
617static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
618 u8 *newkey, __u8 newkeylen)
619{
620 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
621 newkey, newkeylen);
622}
623
624static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
625{
626 struct tcp_sock *tp = tcp_sk(sk);
627 int i;
628
629 for (i = 0; i < tp->md5sig_info->entries6; i++) {
630 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
631 /* Free the key */
632 kfree(tp->md5sig_info->keys6[i].key);
633 tp->md5sig_info->entries6--;
634
635 if (tp->md5sig_info->entries6 == 0) {
636 kfree(tp->md5sig_info->keys6);
637 tp->md5sig_info->keys6 = NULL;
638
639 tcp_free_md5sig_pool();
640
641 return 0;
642 } else {
643 /* shrink the database */
644 if (tp->md5sig_info->entries6 != i)
645 memmove(&tp->md5sig_info->keys6[i],
646 &tp->md5sig_info->keys6[i+1],
647 (tp->md5sig_info->entries6 - i)
648 * sizeof (tp->md5sig_info->keys6[0]));
649 }
650 }
651 }
652 return -ENOENT;
653}
654
655static void tcp_v6_clear_md5_list (struct sock *sk)
656{
657 struct tcp_sock *tp = tcp_sk(sk);
658 int i;
659
660 if (tp->md5sig_info->entries6) {
661 for (i = 0; i < tp->md5sig_info->entries6; i++)
662 kfree(tp->md5sig_info->keys6[i].key);
663 tp->md5sig_info->entries6 = 0;
664 tcp_free_md5sig_pool();
665 }
666
667 kfree(tp->md5sig_info->keys6);
668 tp->md5sig_info->keys6 = NULL;
669 tp->md5sig_info->alloced6 = 0;
670
671 if (tp->md5sig_info->entries4) {
672 for (i = 0; i < tp->md5sig_info->entries4; i++)
673 kfree(tp->md5sig_info->keys4[i].key);
674 tp->md5sig_info->entries4 = 0;
675 tcp_free_md5sig_pool();
676 }
677
678 kfree(tp->md5sig_info->keys4);
679 tp->md5sig_info->keys4 = NULL;
680 tp->md5sig_info->alloced4 = 0;
681}
682
683static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
684 int optlen)
685{
686 struct tcp_md5sig cmd;
687 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
688 u8 *newkey;
689
690 if (optlen < sizeof(cmd))
691 return -EINVAL;
692
693 if (copy_from_user(&cmd, optval, sizeof(cmd)))
694 return -EFAULT;
695
696 if (sin6->sin6_family != AF_INET6)
697 return -EINVAL;
698
699 if (!cmd.tcpm_keylen) {
700 if (!tcp_sk(sk)->md5sig_info)
701 return -ENOENT;
702 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
703 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
704 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
705 }
706
707 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
708 return -EINVAL;
709
710 if (!tcp_sk(sk)->md5sig_info) {
711 struct tcp_sock *tp = tcp_sk(sk);
712 struct tcp_md5sig_info *p;
713
714 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
715 if (!p)
716 return -ENOMEM;
717
718 tp->md5sig_info = p;
719 }
720
721 newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL);
722 if (!newkey)
723 return -ENOMEM;
724 memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen);
725 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
726 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
727 newkey, cmd.tcpm_keylen);
728 }
729 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
730}
731
732static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
733 struct in6_addr *saddr,
734 struct in6_addr *daddr,
735 struct tcphdr *th, int protocol,
736 int tcplen)
737{
738 struct scatterlist sg[4];
739 __u16 data_len;
740 int block = 0;
741 __u16 cksum;
742 struct tcp_md5sig_pool *hp;
743 struct tcp6_pseudohdr *bp;
744 struct hash_desc *desc;
745 int err;
746 unsigned int nbytes = 0;
747
748 hp = tcp_get_md5sig_pool();
749 if (!hp) {
750 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
751 goto clear_hash_noput;
752 }
753 bp = &hp->md5_blk.ip6;
754 desc = &hp->md5_desc;
755
756 /* 1. TCP pseudo-header (RFC2460) */
757 ipv6_addr_copy(&bp->saddr, saddr);
758 ipv6_addr_copy(&bp->daddr, daddr);
759 bp->len = htonl(tcplen);
760 bp->protocol = htonl(protocol);
761
762 sg_set_buf(&sg[block++], bp, sizeof(*bp));
763 nbytes += sizeof(*bp);
764
765 /* 2. TCP header, excluding options */
766 cksum = th->check;
767 th->check = 0;
768 sg_set_buf(&sg[block++], th, sizeof(*th));
769 nbytes += sizeof(*th);
770
771 /* 3. TCP segment data (if any) */
772 data_len = tcplen - (th->doff << 2);
773 if (data_len > 0) {
774 u8 *data = (u8 *)th + (th->doff << 2);
775 sg_set_buf(&sg[block++], data, data_len);
776 nbytes += data_len;
777 }
778
779 /* 4. shared key */
780 sg_set_buf(&sg[block++], key->key, key->keylen);
781 nbytes += key->keylen;
782
783 /* Now store the hash into the packet */
784 err = crypto_hash_init(desc);
785 if (err) {
786 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
787 goto clear_hash;
788 }
789 err = crypto_hash_update(desc, sg, nbytes);
790 if (err) {
791 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
792 goto clear_hash;
793 }
794 err = crypto_hash_final(desc, md5_hash);
795 if (err) {
796 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
797 goto clear_hash;
798 }
799
800 /* Reset header, and free up the crypto */
801 tcp_put_md5sig_pool();
802 th->check = cksum;
803out:
804 return 0;
805clear_hash:
806 tcp_put_md5sig_pool();
807clear_hash_noput:
808 memset(md5_hash, 0, 16);
809 goto out;
810}
811
812static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
813 struct sock *sk,
814 struct dst_entry *dst,
815 struct request_sock *req,
816 struct tcphdr *th, int protocol,
817 int tcplen)
818{
819 struct in6_addr *saddr, *daddr;
820
821 if (sk) {
822 saddr = &inet6_sk(sk)->saddr;
823 daddr = &inet6_sk(sk)->daddr;
824 } else {
825 saddr = &inet6_rsk(req)->loc_addr;
826 daddr = &inet6_rsk(req)->rmt_addr;
827 }
828 return tcp_v6_do_calc_md5_hash(md5_hash, key,
829 saddr, daddr,
830 th, protocol, tcplen);
831}
832
833static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
834{
835 __u8 *hash_location = NULL;
836 struct tcp_md5sig_key *hash_expected;
837 struct ipv6hdr *ip6h = skb->nh.ipv6h;
838 struct tcphdr *th = skb->h.th;
839 int length = (th->doff << 2) - sizeof (*th);
840 int genhash;
841 u8 *ptr;
842 u8 newhash[16];
843
844 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
845
846 /* If the TCP option is too short, we can short cut */
847 if (length < TCPOLEN_MD5SIG)
848 return hash_expected ? 1 : 0;
849
850 /* parse options */
851 ptr = (u8*)(th + 1);
852 while (length > 0) {
853 int opcode = *ptr++;
854 int opsize;
855
856 switch(opcode) {
857 case TCPOPT_EOL:
858 goto done_opts;
859 case TCPOPT_NOP:
860 length--;
861 continue;
862 default:
863 opsize = *ptr++;
864 if (opsize < 2 || opsize > length)
865 goto done_opts;
866 if (opcode == TCPOPT_MD5SIG) {
867 hash_location = ptr;
868 goto done_opts;
869 }
870 }
871 ptr += opsize - 2;
872 length -= opsize;
873 }
874
875done_opts:
876 /* do we have a hash as expected? */
877 if (!hash_expected) {
878 if (!hash_location)
879 return 0;
880 if (net_ratelimit()) {
881 printk(KERN_INFO "MD5 Hash NOT expected but found "
882 "(" NIP6_FMT ", %u)->"
883 "(" NIP6_FMT ", %u)\n",
884 NIP6(ip6h->saddr), ntohs(th->source),
885 NIP6(ip6h->daddr), ntohs(th->dest));
886 }
887 return 1;
888 }
889
890 if (!hash_location) {
891 if (net_ratelimit()) {
892 printk(KERN_INFO "MD5 Hash expected but NOT found "
893 "(" NIP6_FMT ", %u)->"
894 "(" NIP6_FMT ", %u)\n",
895 NIP6(ip6h->saddr), ntohs(th->source),
896 NIP6(ip6h->daddr), ntohs(th->dest));
897 }
898 return 1;
899 }
900
901 /* check the signature */
902 genhash = tcp_v6_do_calc_md5_hash(newhash,
903 hash_expected,
904 &ip6h->saddr, &ip6h->daddr,
905 th, sk->sk_protocol,
906 skb->len);
907 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
908 if (net_ratelimit()) {
909 printk(KERN_INFO "MD5 Hash %s for "
910 "(" NIP6_FMT ", %u)->"
911 "(" NIP6_FMT ", %u)\n",
912 genhash ? "failed" : "mismatch",
913 NIP6(ip6h->saddr), ntohs(th->source),
914 NIP6(ip6h->daddr), ntohs(th->dest));
915 }
916 return 1;
917 }
918 return 0;
919}
920#endif
921
521static struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 922static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
522 .family = AF_INET6, 923 .family = AF_INET6,
523 .obj_size = sizeof(struct tcp6_request_sock), 924 .obj_size = sizeof(struct tcp6_request_sock),
@@ -527,9 +928,16 @@ static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
527 .send_reset = tcp_v6_send_reset 928 .send_reset = tcp_v6_send_reset
528}; 929};
529 930
931struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
932#ifdef CONFIG_TCP_MD5SIG
933 .md5_lookup = tcp_v6_reqsk_md5_lookup,
934#endif
935};
936
530static struct timewait_sock_ops tcp6_timewait_sock_ops = { 937static struct timewait_sock_ops tcp6_timewait_sock_ops = {
531 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 938 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
532 .twsk_unique = tcp_twsk_unique, 939 .twsk_unique = tcp_twsk_unique,
940 .twsk_destructor= tcp_twsk_destructor,
533}; 941};
534 942
535static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) 943static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
@@ -566,11 +974,15 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
566 return 0; 974 return 0;
567} 975}
568 976
569static void tcp_v6_send_reset(struct sk_buff *skb) 977static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
570{ 978{
571 struct tcphdr *th = skb->h.th, *t1; 979 struct tcphdr *th = skb->h.th, *t1;
572 struct sk_buff *buff; 980 struct sk_buff *buff;
573 struct flowi fl; 981 struct flowi fl;
982 int tot_len = sizeof(*th);
983#ifdef CONFIG_TCP_MD5SIG
984 struct tcp_md5sig_key *key;
985#endif
574 986
575 if (th->rst) 987 if (th->rst)
576 return; 988 return;
@@ -578,25 +990,35 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
578 if (!ipv6_unicast_destination(skb)) 990 if (!ipv6_unicast_destination(skb))
579 return; 991 return;
580 992
993#ifdef CONFIG_TCP_MD5SIG
994 if (sk)
995 key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr);
996 else
997 key = NULL;
998
999 if (key)
1000 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1001#endif
1002
581 /* 1003 /*
582 * We need to grab some memory, and put together an RST, 1004 * We need to grab some memory, and put together an RST,
583 * and then put it into the queue to be sent. 1005 * and then put it into the queue to be sent.
584 */ 1006 */
585 1007
586 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr), 1008 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
587 GFP_ATOMIC); 1009 GFP_ATOMIC);
588 if (buff == NULL) 1010 if (buff == NULL)
589 return; 1011 return;
590 1012
591 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); 1013 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
592 1014
593 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr)); 1015 t1 = (struct tcphdr *) skb_push(buff, tot_len);
594 1016
595 /* Swap the send and the receive. */ 1017 /* Swap the send and the receive. */
596 memset(t1, 0, sizeof(*t1)); 1018 memset(t1, 0, sizeof(*t1));
597 t1->dest = th->source; 1019 t1->dest = th->source;
598 t1->source = th->dest; 1020 t1->source = th->dest;
599 t1->doff = sizeof(*t1)/4; 1021 t1->doff = tot_len / 4;
600 t1->rst = 1; 1022 t1->rst = 1;
601 1023
602 if(th->ack) { 1024 if(th->ack) {
@@ -607,6 +1029,22 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
607 + skb->len - (th->doff<<2)); 1029 + skb->len - (th->doff<<2));
608 } 1030 }
609 1031
1032#ifdef CONFIG_TCP_MD5SIG
1033 if (key) {
1034 u32 *opt = (u32*)(t1 + 1);
1035 opt[0] = htonl((TCPOPT_NOP << 24) |
1036 (TCPOPT_NOP << 16) |
1037 (TCPOPT_MD5SIG << 8) |
1038 TCPOLEN_MD5SIG);
1039 tcp_v6_do_calc_md5_hash((__u8*)&opt[1],
1040 key,
1041 &skb->nh.ipv6h->daddr,
1042 &skb->nh.ipv6h->saddr,
1043 t1, IPPROTO_TCP,
1044 tot_len);
1045 }
1046#endif
1047
610 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); 1048 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
611 1049
612 memset(&fl, 0, sizeof(fl)); 1050 memset(&fl, 0, sizeof(fl));
@@ -637,15 +1075,37 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
637 kfree_skb(buff); 1075 kfree_skb(buff);
638} 1076}
639 1077
640static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) 1078static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1079 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
641{ 1080{
642 struct tcphdr *th = skb->h.th, *t1; 1081 struct tcphdr *th = skb->h.th, *t1;
643 struct sk_buff *buff; 1082 struct sk_buff *buff;
644 struct flowi fl; 1083 struct flowi fl;
645 int tot_len = sizeof(struct tcphdr); 1084 int tot_len = sizeof(struct tcphdr);
1085 u32 *topt;
1086#ifdef CONFIG_TCP_MD5SIG
1087 struct tcp_md5sig_key *key;
1088 struct tcp_md5sig_key tw_key;
1089#endif
1090
1091#ifdef CONFIG_TCP_MD5SIG
1092 if (!tw && skb->sk) {
1093 key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr);
1094 } else if (tw && tw->tw_md5_keylen) {
1095 tw_key.key = tw->tw_md5_key;
1096 tw_key.keylen = tw->tw_md5_keylen;
1097 key = &tw_key;
1098 } else {
1099 key = NULL;
1100 }
1101#endif
646 1102
647 if (ts) 1103 if (ts)
648 tot_len += TCPOLEN_TSTAMP_ALIGNED; 1104 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1105#ifdef CONFIG_TCP_MD5SIG
1106 if (key)
1107 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1108#endif
649 1109
650 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 1110 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
651 GFP_ATOMIC); 1111 GFP_ATOMIC);
@@ -665,15 +1125,29 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
665 t1->ack_seq = htonl(ack); 1125 t1->ack_seq = htonl(ack);
666 t1->ack = 1; 1126 t1->ack = 1;
667 t1->window = htons(win); 1127 t1->window = htons(win);
1128
1129 topt = (u32*)(t1 + 1);
668 1130
669 if (ts) { 1131 if (ts) {
670 u32 *ptr = (u32*)(t1 + 1); 1132 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
671 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 1133 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
672 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 1134 *topt++ = htonl(tcp_time_stamp);
673 *ptr++ = htonl(tcp_time_stamp); 1135 *topt = htonl(ts);
674 *ptr = htonl(ts);
675 } 1136 }
676 1137
1138#ifdef CONFIG_TCP_MD5SIG
1139 if (key) {
1140 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1141 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1142 tcp_v6_do_calc_md5_hash((__u8 *)topt,
1143 key,
1144 &skb->nh.ipv6h->daddr,
1145 &skb->nh.ipv6h->saddr,
1146 t1, IPPROTO_TCP,
1147 tot_len);
1148 }
1149#endif
1150
677 buff->csum = csum_partial((char *)t1, tot_len, 0); 1151 buff->csum = csum_partial((char *)t1, tot_len, 0);
678 1152
679 memset(&fl, 0, sizeof(fl)); 1153 memset(&fl, 0, sizeof(fl));
@@ -704,9 +1178,9 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
704static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1178static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
705{ 1179{
706 struct inet_timewait_sock *tw = inet_twsk(sk); 1180 struct inet_timewait_sock *tw = inet_twsk(sk);
707 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1181 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
708 1182
709 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1183 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
710 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1184 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
711 tcptw->tw_ts_recent); 1185 tcptw->tw_ts_recent);
712 1186
@@ -715,7 +1189,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
715 1189
716static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 1190static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
717{ 1191{
718 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); 1192 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
719} 1193}
720 1194
721 1195
@@ -786,6 +1260,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
786 if (req == NULL) 1260 if (req == NULL)
787 goto drop; 1261 goto drop;
788 1262
1263#ifdef CONFIG_TCP_MD5SIG
1264 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1265#endif
1266
789 tcp_clear_options(&tmp_opt); 1267 tcp_clear_options(&tmp_opt);
790 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1268 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
791 tmp_opt.user_mss = tp->rx_opt.user_mss; 1269 tmp_opt.user_mss = tp->rx_opt.user_mss;
@@ -844,6 +1322,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
844 struct tcp_sock *newtp; 1322 struct tcp_sock *newtp;
845 struct sock *newsk; 1323 struct sock *newsk;
846 struct ipv6_txoptions *opt; 1324 struct ipv6_txoptions *opt;
1325#ifdef CONFIG_TCP_MD5SIG
1326 struct tcp_md5sig_key *key;
1327#endif
847 1328
848 if (skb->protocol == htons(ETH_P_IP)) { 1329 if (skb->protocol == htons(ETH_P_IP)) {
849 /* 1330 /*
@@ -874,6 +1355,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
874 1355
875 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1356 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
876 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1357 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1358#ifdef CONFIG_TCP_MD5SIG
1359 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1360#endif
1361
877 newnp->pktoptions = NULL; 1362 newnp->pktoptions = NULL;
878 newnp->opt = NULL; 1363 newnp->opt = NULL;
879 newnp->mcast_oif = inet6_iif(skb); 1364 newnp->mcast_oif = inet6_iif(skb);
@@ -1008,6 +1493,23 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1008 1493
1009 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1494 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1010 1495
1496#ifdef CONFIG_TCP_MD5SIG
1497 /* Copy over the MD5 key from the original socket */
1498 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1499 /* We're using one, so create a matching key
1500 * on the newsk structure. If we fail to get
1501 * memory, then we end up not copying the key
1502 * across. Shucks.
1503 */
1504 char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
1505 if (newkey) {
1506 memcpy(newkey, key->key, key->keylen);
1507 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1508 newkey, key->keylen);
1509 }
1510 }
1511#endif
1512
1011 __inet6_hash(&tcp_hashinfo, newsk); 1513 __inet6_hash(&tcp_hashinfo, newsk);
1012 inet_inherit_port(&tcp_hashinfo, sk, newsk); 1514 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1013 1515
@@ -1067,6 +1569,11 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1067 if (skb->protocol == htons(ETH_P_IP)) 1569 if (skb->protocol == htons(ETH_P_IP))
1068 return tcp_v4_do_rcv(sk, skb); 1570 return tcp_v4_do_rcv(sk, skb);
1069 1571
1572#ifdef CONFIG_TCP_MD5SIG
1573 if (tcp_v6_inbound_md5_hash (sk, skb))
1574 goto discard;
1575#endif
1576
1070 if (sk_filter(sk, skb)) 1577 if (sk_filter(sk, skb))
1071 goto discard; 1578 goto discard;
1072 1579
@@ -1132,7 +1639,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1132 return 0; 1639 return 0;
1133 1640
1134reset: 1641reset:
1135 tcp_v6_send_reset(skb); 1642 tcp_v6_send_reset(sk, skb);
1136discard: 1643discard:
1137 if (opt_skb) 1644 if (opt_skb)
1138 __kfree_skb(opt_skb); 1645 __kfree_skb(opt_skb);
@@ -1257,7 +1764,7 @@ no_tcp_socket:
1257bad_packet: 1764bad_packet:
1258 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1765 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1259 } else { 1766 } else {
1260 tcp_v6_send_reset(skb); 1767 tcp_v6_send_reset(NULL, skb);
1261 } 1768 }
1262 1769
1263discard_it: 1770discard_it:
@@ -1336,6 +1843,15 @@ static struct inet_connection_sock_af_ops ipv6_specific = {
1336#endif 1843#endif
1337}; 1844};
1338 1845
1846static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1847#ifdef CONFIG_TCP_MD5SIG
1848 .md5_lookup = tcp_v6_md5_lookup,
1849 .calc_md5_hash = tcp_v6_calc_md5_hash,
1850 .md5_add = tcp_v6_md5_add_func,
1851 .md5_parse = tcp_v6_parse_md5_keys,
1852#endif
1853};
1854
1339/* 1855/*
1340 * TCP over IPv4 via INET6 API 1856 * TCP over IPv4 via INET6 API
1341 */ 1857 */
@@ -1358,6 +1874,15 @@ static struct inet_connection_sock_af_ops ipv6_mapped = {
1358#endif 1874#endif
1359}; 1875};
1360 1876
1877static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1878#ifdef CONFIG_TCP_MD5SIG
1879 .md5_lookup = tcp_v4_md5_lookup,
1880 .calc_md5_hash = tcp_v4_calc_md5_hash,
1881 .md5_add = tcp_v6_md5_add_func,
1882 .md5_parse = tcp_v6_parse_md5_keys,
1883#endif
1884};
1885
1361/* NOTE: A lot of things set to zero explicitly by call to 1886/* NOTE: A lot of things set to zero explicitly by call to
1362 * sk_alloc() so need not be done here. 1887 * sk_alloc() so need not be done here.
1363 */ 1888 */
@@ -1397,6 +1922,10 @@ static int tcp_v6_init_sock(struct sock *sk)
1397 sk->sk_write_space = sk_stream_write_space; 1922 sk->sk_write_space = sk_stream_write_space;
1398 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1923 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1399 1924
1925#ifdef CONFIG_TCP_MD5SIG
1926 tp->af_specific = &tcp_sock_ipv6_specific;
1927#endif
1928
1400 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1929 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1401 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1930 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1402 1931
@@ -1407,6 +1936,11 @@ static int tcp_v6_init_sock(struct sock *sk)
1407 1936
1408static int tcp_v6_destroy_sock(struct sock *sk) 1937static int tcp_v6_destroy_sock(struct sock *sk)
1409{ 1938{
1939#ifdef CONFIG_TCP_MD5SIG
1940 /* Clean up the MD5 key list */
1941 if (tcp_sk(sk)->md5sig_info)
1942 tcp_v6_clear_md5_list(sk);
1943#endif
1410 tcp_v4_destroy_sock(sk); 1944 tcp_v4_destroy_sock(sk);
1411 return inet6_destroy_sock(sk); 1945 return inet6_destroy_sock(sk);
1412} 1946}