diff options
author | YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> | 2006-11-14 22:07:45 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-03 00:22:39 -0500 |
commit | cfb6eeb4c860592edd123fdea908d23c6ad1c7dc (patch) | |
tree | 361c073622faa540ef6602ef1b0a6e8c0a17fc60 /net/ipv4 | |
parent | bf6bce71eae386dbc37f93af7e5ad173450d9945 (diff) |
[TCP]: MD5 Signature Option (RFC2385) support.
Based on implementation by Rick Payne.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/Kconfig | 16 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 137 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 673 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 64 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 111 |
6 files changed, 973 insertions, 36 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index bc298bcc344e..39e0cb763588 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -618,5 +618,21 @@ config DEFAULT_TCP_CONG | |||
618 | default "reno" if DEFAULT_RENO | 618 | default "reno" if DEFAULT_RENO |
619 | default "cubic" | 619 | default "cubic" |
620 | 620 | ||
621 | config TCP_MD5SIG | ||
622 | bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)" | ||
623 | depends on EXPERIMENTAL | ||
624 | select CRYPTO | ||
625 | select CRYPTO_MD5 | ||
626 | ---help--- | ||
627 | RFC2385 specifices a method of giving MD5 protection to TCP sessions. | ||
628 | Its main (only?) use is to protect BGP sessions between core routers | ||
629 | on the Internet. | ||
630 | |||
631 | If unsure, say N. | ||
632 | |||
633 | config TCP_MD5SIG_DEBUG | ||
634 | bool "TCP: MD5 Signature Option debugging" | ||
635 | depends on TCP_MD5SIG | ||
636 | |||
621 | source "net/ipv4/ipvs/Kconfig" | 637 | source "net/ipv4/ipvs/Kconfig" |
622 | 638 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c05e8edaf544..dadef867a3bb 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -258,6 +258,7 @@ | |||
258 | #include <linux/bootmem.h> | 258 | #include <linux/bootmem.h> |
259 | #include <linux/cache.h> | 259 | #include <linux/cache.h> |
260 | #include <linux/err.h> | 260 | #include <linux/err.h> |
261 | #include <linux/crypto.h> | ||
261 | 262 | ||
262 | #include <net/icmp.h> | 263 | #include <net/icmp.h> |
263 | #include <net/tcp.h> | 264 | #include <net/tcp.h> |
@@ -1942,6 +1943,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
1942 | } | 1943 | } |
1943 | break; | 1944 | break; |
1944 | 1945 | ||
1946 | #ifdef CONFIG_TCP_MD5SIG | ||
1947 | case TCP_MD5SIG: | ||
1948 | /* Read the IP->Key mappings from userspace */ | ||
1949 | err = tp->af_specific->md5_parse(sk, optval, optlen); | ||
1950 | break; | ||
1951 | #endif | ||
1952 | |||
1945 | default: | 1953 | default: |
1946 | err = -ENOPROTOOPT; | 1954 | err = -ENOPROTOOPT; |
1947 | break; | 1955 | break; |
@@ -2231,6 +2239,135 @@ out: | |||
2231 | } | 2239 | } |
2232 | EXPORT_SYMBOL(tcp_tso_segment); | 2240 | EXPORT_SYMBOL(tcp_tso_segment); |
2233 | 2241 | ||
2242 | #ifdef CONFIG_TCP_MD5SIG | ||
2243 | static unsigned long tcp_md5sig_users; | ||
2244 | static struct tcp_md5sig_pool **tcp_md5sig_pool; | ||
2245 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); | ||
2246 | |||
2247 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool) | ||
2248 | { | ||
2249 | int cpu; | ||
2250 | for_each_possible_cpu(cpu) { | ||
2251 | struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); | ||
2252 | if (p) { | ||
2253 | if (p->md5_desc.tfm) | ||
2254 | crypto_free_hash(p->md5_desc.tfm); | ||
2255 | kfree(p); | ||
2256 | p = NULL; | ||
2257 | } | ||
2258 | } | ||
2259 | free_percpu(pool); | ||
2260 | } | ||
2261 | |||
2262 | void tcp_free_md5sig_pool(void) | ||
2263 | { | ||
2264 | struct tcp_md5sig_pool **pool = NULL; | ||
2265 | |||
2266 | spin_lock(&tcp_md5sig_pool_lock); | ||
2267 | if (--tcp_md5sig_users == 0) { | ||
2268 | pool = tcp_md5sig_pool; | ||
2269 | tcp_md5sig_pool = NULL; | ||
2270 | } | ||
2271 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2272 | if (pool) | ||
2273 | __tcp_free_md5sig_pool(pool); | ||
2274 | } | ||
2275 | |||
2276 | EXPORT_SYMBOL(tcp_free_md5sig_pool); | ||
2277 | |||
2278 | struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void) | ||
2279 | { | ||
2280 | int cpu; | ||
2281 | struct tcp_md5sig_pool **pool; | ||
2282 | |||
2283 | pool = alloc_percpu(struct tcp_md5sig_pool *); | ||
2284 | if (!pool) | ||
2285 | return NULL; | ||
2286 | |||
2287 | for_each_possible_cpu(cpu) { | ||
2288 | struct tcp_md5sig_pool *p; | ||
2289 | struct crypto_hash *hash; | ||
2290 | |||
2291 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
2292 | if (!p) | ||
2293 | goto out_free; | ||
2294 | *per_cpu_ptr(pool, cpu) = p; | ||
2295 | |||
2296 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); | ||
2297 | if (!hash || IS_ERR(hash)) | ||
2298 | goto out_free; | ||
2299 | |||
2300 | p->md5_desc.tfm = hash; | ||
2301 | } | ||
2302 | return pool; | ||
2303 | out_free: | ||
2304 | __tcp_free_md5sig_pool(pool); | ||
2305 | return NULL; | ||
2306 | } | ||
2307 | |||
2308 | struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) | ||
2309 | { | ||
2310 | struct tcp_md5sig_pool **pool; | ||
2311 | int alloc = 0; | ||
2312 | |||
2313 | retry: | ||
2314 | spin_lock(&tcp_md5sig_pool_lock); | ||
2315 | pool = tcp_md5sig_pool; | ||
2316 | if (tcp_md5sig_users++ == 0) { | ||
2317 | alloc = 1; | ||
2318 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2319 | } else if (!pool) { | ||
2320 | tcp_md5sig_users--; | ||
2321 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2322 | cpu_relax(); | ||
2323 | goto retry; | ||
2324 | } else | ||
2325 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2326 | |||
2327 | if (alloc) { | ||
2328 | /* we cannot hold spinlock here because this may sleep. */ | ||
2329 | struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); | ||
2330 | spin_lock(&tcp_md5sig_pool_lock); | ||
2331 | if (!p) { | ||
2332 | tcp_md5sig_users--; | ||
2333 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2334 | return NULL; | ||
2335 | } | ||
2336 | pool = tcp_md5sig_pool; | ||
2337 | if (pool) { | ||
2338 | /* oops, it has already been assigned. */ | ||
2339 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2340 | __tcp_free_md5sig_pool(p); | ||
2341 | } else { | ||
2342 | tcp_md5sig_pool = pool = p; | ||
2343 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2344 | } | ||
2345 | } | ||
2346 | return pool; | ||
2347 | } | ||
2348 | |||
2349 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); | ||
2350 | |||
2351 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) | ||
2352 | { | ||
2353 | struct tcp_md5sig_pool **p; | ||
2354 | spin_lock(&tcp_md5sig_pool_lock); | ||
2355 | p = tcp_md5sig_pool; | ||
2356 | if (p) | ||
2357 | tcp_md5sig_users++; | ||
2358 | spin_unlock(&tcp_md5sig_pool_lock); | ||
2359 | return (p ? *per_cpu_ptr(p, cpu) : NULL); | ||
2360 | } | ||
2361 | |||
2362 | EXPORT_SYMBOL(__tcp_get_md5sig_pool); | ||
2363 | |||
2364 | void __tcp_put_md5sig_pool(void) { | ||
2365 | __tcp_free_md5sig_pool(tcp_md5sig_pool); | ||
2366 | } | ||
2367 | |||
2368 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); | ||
2369 | #endif | ||
2370 | |||
2234 | extern void __skb_cb_too_small_for_tcp(int, int); | 2371 | extern void __skb_cb_too_small_for_tcp(int, int); |
2235 | extern struct tcp_congestion_ops tcp_reno; | 2372 | extern struct tcp_congestion_ops tcp_reno; |
2236 | 2373 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4a8c96cdec7d..6ab3423674bb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2677,6 +2677,14 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, | |||
2677 | opt_rx->sack_ok) { | 2677 | opt_rx->sack_ok) { |
2678 | TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; | 2678 | TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; |
2679 | } | 2679 | } |
2680 | #ifdef CONFIG_TCP_MD5SIG | ||
2681 | case TCPOPT_MD5SIG: | ||
2682 | /* | ||
2683 | * The MD5 Hash has already been | ||
2684 | * checked (see tcp_v{4,6}_do_rcv()). | ||
2685 | */ | ||
2686 | break; | ||
2687 | #endif | ||
2680 | }; | 2688 | }; |
2681 | ptr+=opsize-2; | 2689 | ptr+=opsize-2; |
2682 | length-=opsize; | 2690 | length-=opsize; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0ad0904bf56c..8c8e8112f98d 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -78,6 +78,9 @@ | |||
78 | #include <linux/proc_fs.h> | 78 | #include <linux/proc_fs.h> |
79 | #include <linux/seq_file.h> | 79 | #include <linux/seq_file.h> |
80 | 80 | ||
81 | #include <linux/crypto.h> | ||
82 | #include <linux/scatterlist.h> | ||
83 | |||
81 | int sysctl_tcp_tw_reuse __read_mostly; | 84 | int sysctl_tcp_tw_reuse __read_mostly; |
82 | int sysctl_tcp_low_latency __read_mostly; | 85 | int sysctl_tcp_low_latency __read_mostly; |
83 | 86 | ||
@@ -89,6 +92,13 @@ static struct socket *tcp_socket; | |||
89 | 92 | ||
90 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); | 93 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); |
91 | 94 | ||
95 | #ifdef CONFIG_TCP_MD5SIG | ||
96 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr); | ||
97 | static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
98 | __be32 saddr, __be32 daddr, struct tcphdr *th, | ||
99 | int protocol, int tcplen); | ||
100 | #endif | ||
101 | |||
92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { | 102 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
93 | .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), | 103 | .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), |
94 | .lhash_users = ATOMIC_INIT(0), | 104 | .lhash_users = ATOMIC_INIT(0), |
@@ -526,11 +536,19 @@ int tcp_v4_gso_send_check(struct sk_buff *skb) | |||
526 | * Exception: precedence violation. We do not implement it in any case. | 536 | * Exception: precedence violation. We do not implement it in any case. |
527 | */ | 537 | */ |
528 | 538 | ||
529 | static void tcp_v4_send_reset(struct sk_buff *skb) | 539 | static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) |
530 | { | 540 | { |
531 | struct tcphdr *th = skb->h.th; | 541 | struct tcphdr *th = skb->h.th; |
532 | struct tcphdr rth; | 542 | struct { |
543 | struct tcphdr th; | ||
544 | #ifdef CONFIG_TCP_MD5SIG | ||
545 | u32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; | ||
546 | #endif | ||
547 | } rep; | ||
533 | struct ip_reply_arg arg; | 548 | struct ip_reply_arg arg; |
549 | #ifdef CONFIG_TCP_MD5SIG | ||
550 | struct tcp_md5sig_key *key; | ||
551 | #endif | ||
534 | 552 | ||
535 | /* Never send a reset in response to a reset. */ | 553 | /* Never send a reset in response to a reset. */ |
536 | if (th->rst) | 554 | if (th->rst) |
@@ -540,29 +558,50 @@ static void tcp_v4_send_reset(struct sk_buff *skb) | |||
540 | return; | 558 | return; |
541 | 559 | ||
542 | /* Swap the send and the receive. */ | 560 | /* Swap the send and the receive. */ |
543 | memset(&rth, 0, sizeof(struct tcphdr)); | 561 | memset(&rep, 0, sizeof(rep)); |
544 | rth.dest = th->source; | 562 | rep.th.dest = th->source; |
545 | rth.source = th->dest; | 563 | rep.th.source = th->dest; |
546 | rth.doff = sizeof(struct tcphdr) / 4; | 564 | rep.th.doff = sizeof(struct tcphdr) / 4; |
547 | rth.rst = 1; | 565 | rep.th.rst = 1; |
548 | 566 | ||
549 | if (th->ack) { | 567 | if (th->ack) { |
550 | rth.seq = th->ack_seq; | 568 | rep.th.seq = th->ack_seq; |
551 | } else { | 569 | } else { |
552 | rth.ack = 1; | 570 | rep.th.ack = 1; |
553 | rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + | 571 | rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + |
554 | skb->len - (th->doff << 2)); | 572 | skb->len - (th->doff << 2)); |
555 | } | 573 | } |
556 | 574 | ||
557 | memset(&arg, 0, sizeof arg); | 575 | memset(&arg, 0, sizeof arg); |
558 | arg.iov[0].iov_base = (unsigned char *)&rth; | 576 | arg.iov[0].iov_base = (unsigned char *)&rep; |
559 | arg.iov[0].iov_len = sizeof rth; | 577 | arg.iov[0].iov_len = sizeof(rep.th); |
578 | |||
579 | #ifdef CONFIG_TCP_MD5SIG | ||
580 | key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL; | ||
581 | if (key) { | ||
582 | rep.opt[0] = htonl((TCPOPT_NOP << 24) | | ||
583 | (TCPOPT_NOP << 16) | | ||
584 | (TCPOPT_MD5SIG << 8) | | ||
585 | TCPOLEN_MD5SIG); | ||
586 | /* Update length and the length the header thinks exists */ | ||
587 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; | ||
588 | rep.th.doff = arg.iov[0].iov_len / 4; | ||
589 | |||
590 | tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1], | ||
591 | key, | ||
592 | skb->nh.iph->daddr, | ||
593 | skb->nh.iph->saddr, | ||
594 | &rep.th, IPPROTO_TCP, | ||
595 | arg.iov[0].iov_len); | ||
596 | } | ||
597 | #endif | ||
598 | |||
560 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, | 599 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, |
561 | skb->nh.iph->saddr, /*XXX*/ | 600 | skb->nh.iph->saddr, /*XXX*/ |
562 | sizeof(struct tcphdr), IPPROTO_TCP, 0); | 601 | sizeof(struct tcphdr), IPPROTO_TCP, 0); |
563 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 602 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
564 | 603 | ||
565 | ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); | 604 | ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len); |
566 | 605 | ||
567 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | 606 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); |
568 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); | 607 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); |
@@ -572,15 +611,24 @@ static void tcp_v4_send_reset(struct sk_buff *skb) | |||
572 | outside socket context is ugly, certainly. What can I do? | 611 | outside socket context is ugly, certainly. What can I do? |
573 | */ | 612 | */ |
574 | 613 | ||
575 | static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | 614 | static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk, |
615 | struct sk_buff *skb, u32 seq, u32 ack, | ||
576 | u32 win, u32 ts) | 616 | u32 win, u32 ts) |
577 | { | 617 | { |
578 | struct tcphdr *th = skb->h.th; | 618 | struct tcphdr *th = skb->h.th; |
579 | struct { | 619 | struct { |
580 | struct tcphdr th; | 620 | struct tcphdr th; |
581 | u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2]; | 621 | u32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) |
622 | #ifdef CONFIG_TCP_MD5SIG | ||
623 | + (TCPOLEN_MD5SIG_ALIGNED >> 2) | ||
624 | #endif | ||
625 | ]; | ||
582 | } rep; | 626 | } rep; |
583 | struct ip_reply_arg arg; | 627 | struct ip_reply_arg arg; |
628 | #ifdef CONFIG_TCP_MD5SIG | ||
629 | struct tcp_md5sig_key *key; | ||
630 | struct tcp_md5sig_key tw_key; | ||
631 | #endif | ||
584 | 632 | ||
585 | memset(&rep.th, 0, sizeof(struct tcphdr)); | 633 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
586 | memset(&arg, 0, sizeof arg); | 634 | memset(&arg, 0, sizeof arg); |
@@ -588,12 +636,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
588 | arg.iov[0].iov_base = (unsigned char *)&rep; | 636 | arg.iov[0].iov_base = (unsigned char *)&rep; |
589 | arg.iov[0].iov_len = sizeof(rep.th); | 637 | arg.iov[0].iov_len = sizeof(rep.th); |
590 | if (ts) { | 638 | if (ts) { |
591 | rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | 639 | rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | |
592 | (TCPOPT_TIMESTAMP << 8) | | 640 | (TCPOPT_TIMESTAMP << 8) | |
593 | TCPOLEN_TIMESTAMP); | 641 | TCPOLEN_TIMESTAMP); |
594 | rep.tsopt[1] = htonl(tcp_time_stamp); | 642 | rep.opt[1] = htonl(tcp_time_stamp); |
595 | rep.tsopt[2] = htonl(ts); | 643 | rep.opt[2] = htonl(ts); |
596 | arg.iov[0].iov_len = sizeof(rep); | 644 | arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED; |
597 | } | 645 | } |
598 | 646 | ||
599 | /* Swap the send and the receive. */ | 647 | /* Swap the send and the receive. */ |
@@ -605,6 +653,44 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
605 | rep.th.ack = 1; | 653 | rep.th.ack = 1; |
606 | rep.th.window = htons(win); | 654 | rep.th.window = htons(win); |
607 | 655 | ||
656 | #ifdef CONFIG_TCP_MD5SIG | ||
657 | /* | ||
658 | * The SKB holds an imcoming packet, but may not have a valid ->sk | ||
659 | * pointer. This is especially the case when we're dealing with a | ||
660 | * TIME_WAIT ack, because the sk structure is long gone, and only | ||
661 | * the tcp_timewait_sock remains. So the md5 key is stashed in that | ||
662 | * structure, and we use it in preference. I believe that (twsk || | ||
663 | * skb->sk) holds true, but we program defensively. | ||
664 | */ | ||
665 | if (!twsk && skb->sk) { | ||
666 | key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr); | ||
667 | } else if (twsk && twsk->tw_md5_keylen) { | ||
668 | tw_key.key = twsk->tw_md5_key; | ||
669 | tw_key.keylen = twsk->tw_md5_keylen; | ||
670 | key = &tw_key; | ||
671 | } else { | ||
672 | key = NULL; | ||
673 | } | ||
674 | |||
675 | if (key) { | ||
676 | int offset = (ts) ? 3 : 0; | ||
677 | |||
678 | rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | | ||
679 | (TCPOPT_NOP << 16) | | ||
680 | (TCPOPT_MD5SIG << 8) | | ||
681 | TCPOLEN_MD5SIG); | ||
682 | arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; | ||
683 | rep.th.doff = arg.iov[0].iov_len/4; | ||
684 | |||
685 | tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset], | ||
686 | key, | ||
687 | skb->nh.iph->daddr, | ||
688 | skb->nh.iph->saddr, | ||
689 | &rep.th, IPPROTO_TCP, | ||
690 | arg.iov[0].iov_len); | ||
691 | } | ||
692 | #endif | ||
693 | |||
608 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, | 694 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, |
609 | skb->nh.iph->saddr, /*XXX*/ | 695 | skb->nh.iph->saddr, /*XXX*/ |
610 | arg.iov[0].iov_len, IPPROTO_TCP, 0); | 696 | arg.iov[0].iov_len, IPPROTO_TCP, 0); |
@@ -618,9 +704,9 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
618 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | 704 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) |
619 | { | 705 | { |
620 | struct inet_timewait_sock *tw = inet_twsk(sk); | 706 | struct inet_timewait_sock *tw = inet_twsk(sk); |
621 | const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | 707 | struct tcp_timewait_sock *tcptw = tcp_twsk(sk); |
622 | 708 | ||
623 | tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, | 709 | tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
624 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); | 710 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); |
625 | 711 | ||
626 | inet_twsk_put(tw); | 712 | inet_twsk_put(tw); |
@@ -628,7 +714,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | |||
628 | 714 | ||
629 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) | 715 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) |
630 | { | 716 | { |
631 | tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, | 717 | tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, |
718 | tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, | ||
632 | req->ts_recent); | 719 | req->ts_recent); |
633 | } | 720 | } |
634 | 721 | ||
@@ -714,6 +801,461 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk, | |||
714 | return dopt; | 801 | return dopt; |
715 | } | 802 | } |
716 | 803 | ||
804 | #ifdef CONFIG_TCP_MD5SIG | ||
805 | /* | ||
806 | * RFC2385 MD5 checksumming requires a mapping of | ||
807 | * IP address->MD5 Key. | ||
808 | * We need to maintain these in the sk structure. | ||
809 | */ | ||
810 | |||
811 | /* Find the Key structure for an address. */ | ||
812 | static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) | ||
813 | { | ||
814 | struct tcp_sock *tp = tcp_sk(sk); | ||
815 | int i; | ||
816 | |||
817 | if (!tp->md5sig_info || !tp->md5sig_info->entries4) | ||
818 | return NULL; | ||
819 | for (i = 0; i < tp->md5sig_info->entries4; i++) { | ||
820 | if (tp->md5sig_info->keys4[i].addr == addr) | ||
821 | return (struct tcp_md5sig_key *)&tp->md5sig_info->keys4[i]; | ||
822 | } | ||
823 | return NULL; | ||
824 | } | ||
825 | |||
826 | struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, | ||
827 | struct sock *addr_sk) | ||
828 | { | ||
829 | return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr); | ||
830 | } | ||
831 | |||
832 | EXPORT_SYMBOL(tcp_v4_md5_lookup); | ||
833 | |||
834 | struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, | ||
835 | struct request_sock *req) | ||
836 | { | ||
837 | return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr); | ||
838 | } | ||
839 | |||
840 | /* This can be called on a newly created socket, from other files */ | ||
841 | int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | ||
842 | u8 *newkey, u8 newkeylen) | ||
843 | { | ||
844 | /* Add Key to the list */ | ||
845 | struct tcp4_md5sig_key *key; | ||
846 | struct tcp_sock *tp = tcp_sk(sk); | ||
847 | struct tcp4_md5sig_key *keys; | ||
848 | |||
849 | key = (struct tcp4_md5sig_key *) tcp_v4_md5_do_lookup(sk, addr); | ||
850 | if (key) { | ||
851 | /* Pre-existing entry - just update that one. */ | ||
852 | kfree (key->key); | ||
853 | key->key = newkey; | ||
854 | key->keylen = newkeylen; | ||
855 | } else { | ||
856 | if (!tp->md5sig_info) { | ||
857 | tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); | ||
858 | if (!tp->md5sig_info) { | ||
859 | kfree(newkey); | ||
860 | return -ENOMEM; | ||
861 | } | ||
862 | } | ||
863 | if (tcp_alloc_md5sig_pool() == NULL) { | ||
864 | kfree(newkey); | ||
865 | return -ENOMEM; | ||
866 | } | ||
867 | if (tp->md5sig_info->alloced4 == tp->md5sig_info->entries4) { | ||
868 | keys = kmalloc((sizeof(struct tcp4_md5sig_key) * | ||
869 | (tp->md5sig_info->entries4 + 1)), GFP_ATOMIC); | ||
870 | if (!keys) { | ||
871 | kfree(newkey); | ||
872 | tcp_free_md5sig_pool(); | ||
873 | return -ENOMEM; | ||
874 | } | ||
875 | |||
876 | if (tp->md5sig_info->entries4) | ||
877 | memcpy(keys, tp->md5sig_info->keys4, | ||
878 | (sizeof (struct tcp4_md5sig_key) * | ||
879 | tp->md5sig_info->entries4)); | ||
880 | |||
881 | /* Free old key list, and reference new one */ | ||
882 | if (tp->md5sig_info->keys4) | ||
883 | kfree(tp->md5sig_info->keys4); | ||
884 | tp->md5sig_info->keys4 = keys; | ||
885 | tp->md5sig_info->alloced4++; | ||
886 | } | ||
887 | tp->md5sig_info->entries4++; | ||
888 | tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].addr = addr; | ||
889 | tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].key = newkey; | ||
890 | tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].keylen = newkeylen; | ||
891 | } | ||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | EXPORT_SYMBOL(tcp_v4_md5_do_add); | ||
896 | |||
897 | static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, | ||
898 | u8 *newkey, u8 newkeylen) | ||
899 | { | ||
900 | return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr, | ||
901 | newkey, newkeylen); | ||
902 | } | ||
903 | |||
904 | int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | ||
905 | { | ||
906 | struct tcp_sock *tp = tcp_sk(sk); | ||
907 | int i; | ||
908 | |||
909 | for (i = 0; i < tp->md5sig_info->entries4; i++) { | ||
910 | if (tp->md5sig_info->keys4[i].addr == addr) { | ||
911 | /* Free the key */ | ||
912 | kfree(tp->md5sig_info->keys4[i].key); | ||
913 | tp->md5sig_info->entries4--; | ||
914 | |||
915 | if (tp->md5sig_info->entries4 == 0) { | ||
916 | kfree(tp->md5sig_info->keys4); | ||
917 | tp->md5sig_info->keys4 = NULL; | ||
918 | } else { | ||
919 | /* Need to do some manipulation */ | ||
920 | if (tp->md5sig_info->entries4 != i) | ||
921 | memcpy(&tp->md5sig_info->keys4[i], | ||
922 | &tp->md5sig_info->keys4[i+1], | ||
923 | (tp->md5sig_info->entries4 - i) | ||
924 | * sizeof (struct tcp4_md5sig_key)); | ||
925 | } | ||
926 | tcp_free_md5sig_pool(); | ||
927 | return 0; | ||
928 | } | ||
929 | } | ||
930 | return -ENOENT; | ||
931 | } | ||
932 | |||
933 | EXPORT_SYMBOL(tcp_v4_md5_do_del); | ||
934 | |||
935 | static void tcp_v4_clear_md5_list (struct sock *sk) | ||
936 | { | ||
937 | struct tcp_sock *tp = tcp_sk(sk); | ||
938 | |||
939 | /* Free each key, then the set of key keys, | ||
940 | * the crypto element, and then decrement our | ||
941 | * hold on the last resort crypto. | ||
942 | */ | ||
943 | if (tp->md5sig_info->entries4) { | ||
944 | int i; | ||
945 | for (i = 0; i < tp->md5sig_info->entries4; i++) | ||
946 | kfree(tp->md5sig_info->keys4[i].key); | ||
947 | tp->md5sig_info->entries4 = 0; | ||
948 | tcp_free_md5sig_pool(); | ||
949 | } | ||
950 | if (tp->md5sig_info->keys4) { | ||
951 | kfree(tp->md5sig_info->keys4); | ||
952 | tp->md5sig_info->keys4 = NULL; | ||
953 | tp->md5sig_info->alloced4 = 0; | ||
954 | } | ||
955 | } | ||
956 | |||
957 | static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval, | ||
958 | int optlen) | ||
959 | { | ||
960 | struct tcp_md5sig cmd; | ||
961 | struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; | ||
962 | u8 *newkey; | ||
963 | |||
964 | if (optlen < sizeof(cmd)) | ||
965 | return -EINVAL; | ||
966 | |||
967 | if (copy_from_user (&cmd, optval, sizeof(cmd))) | ||
968 | return -EFAULT; | ||
969 | |||
970 | if (sin->sin_family != AF_INET) | ||
971 | return -EINVAL; | ||
972 | |||
973 | if (!cmd.tcpm_key || !cmd.tcpm_keylen) { | ||
974 | if (!tcp_sk(sk)->md5sig_info) | ||
975 | return -ENOENT; | ||
976 | return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr); | ||
977 | } | ||
978 | |||
979 | if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) | ||
980 | return -EINVAL; | ||
981 | |||
982 | if (!tcp_sk(sk)->md5sig_info) { | ||
983 | struct tcp_sock *tp = tcp_sk(sk); | ||
984 | struct tcp_md5sig_info *p; | ||
985 | |||
986 | p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); | ||
987 | if (!p) | ||
988 | return -EINVAL; | ||
989 | |||
990 | tp->md5sig_info = p; | ||
991 | |||
992 | } | ||
993 | |||
994 | newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL); | ||
995 | if (!newkey) | ||
996 | return -ENOMEM; | ||
997 | memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen); | ||
998 | return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, | ||
999 | newkey, cmd.tcpm_keylen); | ||
1000 | } | ||
1001 | |||
1002 | static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
1003 | __be32 saddr, __be32 daddr, | ||
1004 | struct tcphdr *th, int protocol, | ||
1005 | int tcplen) | ||
1006 | { | ||
1007 | struct scatterlist sg[4]; | ||
1008 | __u16 data_len; | ||
1009 | int block = 0; | ||
1010 | #ifdef CONFIG_TCP_MD5SIG_DEBUG | ||
1011 | int i; | ||
1012 | #endif | ||
1013 | __u16 old_checksum; | ||
1014 | struct tcp_md5sig_pool *hp; | ||
1015 | struct tcp4_pseudohdr *bp; | ||
1016 | struct hash_desc *desc; | ||
1017 | int err; | ||
1018 | unsigned int nbytes = 0; | ||
1019 | |||
1020 | /* | ||
1021 | * Okay, so RFC2385 is turned on for this connection, | ||
1022 | * so we need to generate the MD5 hash for the packet now. | ||
1023 | */ | ||
1024 | |||
1025 | hp = tcp_get_md5sig_pool(); | ||
1026 | if (!hp) | ||
1027 | goto clear_hash_noput; | ||
1028 | |||
1029 | bp = &hp->md5_blk.ip4; | ||
1030 | desc = &hp->md5_desc; | ||
1031 | |||
1032 | /* | ||
1033 | * 1. the TCP pseudo-header (in the order: source IP address, | ||
1034 | * destination IP address, zero-padded protocol number, and | ||
1035 | * segment length) | ||
1036 | */ | ||
1037 | bp->saddr = saddr; | ||
1038 | bp->daddr = daddr; | ||
1039 | bp->pad = 0; | ||
1040 | bp->protocol = protocol; | ||
1041 | bp->len = htons(tcplen); | ||
1042 | sg_set_buf(&sg[block++], bp, sizeof(*bp)); | ||
1043 | nbytes += sizeof(*bp); | ||
1044 | |||
1045 | #ifdef CONFIG_TCP_MD5SIG_DEBUG | ||
1046 | printk("Calcuating hash for: "); | ||
1047 | for (i = 0; i < sizeof (*bp); i++) | ||
1048 | printk ("%02x ", (unsigned int)((unsigned char *)bp)[i]); | ||
1049 | printk(" "); | ||
1050 | #endif | ||
1051 | |||
1052 | /* 2. the TCP header, excluding options, and assuming a | ||
1053 | * checksum of zero/ | ||
1054 | */ | ||
1055 | old_checksum = th->check; | ||
1056 | th->check = 0; | ||
1057 | sg_set_buf(&sg[block++], th, sizeof(struct tcphdr)); | ||
1058 | nbytes += sizeof(struct tcphdr); | ||
1059 | #ifdef CONFIG_TCP_MD5SIG_DEBUG | ||
1060 | for (i = 0; i < sizeof (struct tcphdr); i++) | ||
1061 | printk (" %02x", (unsigned int)((unsigned char *)th)[i]); | ||
1062 | #endif | ||
1063 | /* 3. the TCP segment data (if any) */ | ||
1064 | data_len = tcplen - (th->doff << 2); | ||
1065 | if (data_len > 0) { | ||
1066 | unsigned char *data = (unsigned char *)th + (th->doff << 2); | ||
1067 | sg_set_buf(&sg[block++], data, data_len); | ||
1068 | nbytes += data_len; | ||
1069 | } | ||
1070 | |||
1071 | /* 4. an independently-specified key or password, known to both | ||
1072 | * TCPs and presumably connection-specific | ||
1073 | */ | ||
1074 | sg_set_buf(&sg[block++], key->key, key->keylen); | ||
1075 | nbytes += key->keylen; | ||
1076 | |||
1077 | #ifdef CONFIG_TCP_MD5SIG_DEBUG | ||
1078 | printk (" and password: "); | ||
1079 | for (i = 0; i < key->keylen; i++) | ||
1080 | printk ("%02x ", (unsigned int)key->key[i]); | ||
1081 | #endif | ||
1082 | |||
1083 | /* Now store the Hash into the packet */ | ||
1084 | err = crypto_hash_init(desc); | ||
1085 | if (err) | ||
1086 | goto clear_hash; | ||
1087 | err = crypto_hash_update(desc, sg, nbytes); | ||
1088 | if (err) | ||
1089 | goto clear_hash; | ||
1090 | err = crypto_hash_final(desc, md5_hash); | ||
1091 | if (err) | ||
1092 | goto clear_hash; | ||
1093 | |||
1094 | /* Reset header, and free up the crypto */ | ||
1095 | tcp_put_md5sig_pool(); | ||
1096 | th->check = old_checksum; | ||
1097 | |||
1098 | out: | ||
1099 | #ifdef CONFIG_TCP_MD5SIG_DEBUG | ||
1100 | printk(" result:"); | ||
1101 | for (i = 0; i < 16; i++) | ||
1102 | printk (" %02x", (unsigned int)(((u8*)md5_hash)[i])); | ||
1103 | printk("\n"); | ||
1104 | #endif | ||
1105 | return 0; | ||
1106 | clear_hash: | ||
1107 | tcp_put_md5sig_pool(); | ||
1108 | clear_hash_noput: | ||
1109 | memset(md5_hash, 0, 16); | ||
1110 | goto out; | ||
1111 | } | ||
1112 | |||
1113 | int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key, | ||
1114 | struct sock *sk, | ||
1115 | struct dst_entry *dst, | ||
1116 | struct request_sock *req, | ||
1117 | struct tcphdr *th, int protocol, | ||
1118 | int tcplen) | ||
1119 | { | ||
1120 | __be32 saddr, daddr; | ||
1121 | |||
1122 | if (sk) { | ||
1123 | saddr = inet_sk(sk)->saddr; | ||
1124 | daddr = inet_sk(sk)->daddr; | ||
1125 | } else { | ||
1126 | struct rtable *rt = (struct rtable *)dst; | ||
1127 | BUG_ON(!rt); | ||
1128 | saddr = rt->rt_src; | ||
1129 | daddr = rt->rt_dst; | ||
1130 | } | ||
1131 | return tcp_v4_do_calc_md5_hash(md5_hash, key, | ||
1132 | saddr, daddr, | ||
1133 | th, protocol, tcplen); | ||
1134 | } | ||
1135 | |||
1136 | EXPORT_SYMBOL(tcp_v4_calc_md5_hash); | ||
1137 | |||
1138 | static int tcp_v4_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) | ||
1139 | { | ||
1140 | /* | ||
1141 | * This gets called for each TCP segment that arrives | ||
1142 | * so we want to be efficient. | ||
1143 | * We have 3 drop cases: | ||
1144 | * o No MD5 hash and one expected. | ||
1145 | * o MD5 hash and we're not expecting one. | ||
1146 | * o MD5 hash and its wrong. | ||
1147 | */ | ||
1148 | __u8 *hash_location = NULL; | ||
1149 | struct tcp_md5sig_key *hash_expected; | ||
1150 | struct iphdr *iph = skb->nh.iph; | ||
1151 | struct tcphdr *th = skb->h.th; | ||
1152 | int length = (th->doff << 2) - sizeof (struct tcphdr); | ||
1153 | int genhash; | ||
1154 | unsigned char *ptr; | ||
1155 | unsigned char newhash[16]; | ||
1156 | |||
1157 | hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); | ||
1158 | |||
1159 | /* | ||
1160 | * If the TCP option length is less than the TCP_MD5SIG | ||
1161 | * option length, then we can shortcut | ||
1162 | */ | ||
1163 | if (length < TCPOLEN_MD5SIG) { | ||
1164 | if (hash_expected) | ||
1165 | return 1; | ||
1166 | else | ||
1167 | return 0; | ||
1168 | } | ||
1169 | |||
1170 | /* Okay, we can't shortcut - we have to grub through the options */ | ||
1171 | ptr = (unsigned char *)(th + 1); | ||
1172 | while (length > 0) { | ||
1173 | int opcode = *ptr++; | ||
1174 | int opsize; | ||
1175 | |||
1176 | switch (opcode) { | ||
1177 | case TCPOPT_EOL: | ||
1178 | goto done_opts; | ||
1179 | case TCPOPT_NOP: | ||
1180 | length--; | ||
1181 | continue; | ||
1182 | default: | ||
1183 | opsize = *ptr++; | ||
1184 | if (opsize < 2) | ||
1185 | goto done_opts; | ||
1186 | if (opsize > length) | ||
1187 | goto done_opts; | ||
1188 | |||
1189 | if (opcode == TCPOPT_MD5SIG) { | ||
1190 | hash_location = ptr; | ||
1191 | goto done_opts; | ||
1192 | } | ||
1193 | } | ||
1194 | ptr += opsize-2; | ||
1195 | length -= opsize; | ||
1196 | } | ||
1197 | done_opts: | ||
1198 | /* We've parsed the options - do we have a hash? */ | ||
1199 | if (!hash_expected && !hash_location) | ||
1200 | return 0; | ||
1201 | |||
1202 | if (hash_expected && !hash_location) { | ||
1203 | if (net_ratelimit()) { | ||
1204 | printk(KERN_INFO "MD5 Hash NOT expected but found " | ||
1205 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", | ||
1206 | NIPQUAD (iph->saddr), ntohs(th->source), | ||
1207 | NIPQUAD (iph->daddr), ntohs(th->dest)); | ||
1208 | } | ||
1209 | return 1; | ||
1210 | } | ||
1211 | |||
1212 | if (!hash_expected && hash_location) { | ||
1213 | if (net_ratelimit()) { | ||
1214 | printk(KERN_INFO "MD5 Hash NOT expected but found " | ||
1215 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n", | ||
1216 | NIPQUAD (iph->saddr), ntohs(th->source), | ||
1217 | NIPQUAD (iph->daddr), ntohs(th->dest)); | ||
1218 | } | ||
1219 | return 1; | ||
1220 | } | ||
1221 | |||
1222 | /* Okay, so this is hash_expected and hash_location - | ||
1223 | * so we need to calculate the checksum. | ||
1224 | */ | ||
1225 | genhash = tcp_v4_do_calc_md5_hash(newhash, | ||
1226 | hash_expected, | ||
1227 | iph->saddr, iph->daddr, | ||
1228 | th, sk->sk_protocol, | ||
1229 | skb->len); | ||
1230 | |||
1231 | if (genhash || memcmp(hash_location, newhash, 16) != 0) { | ||
1232 | if (net_ratelimit()) { | ||
1233 | printk(KERN_INFO "MD5 Hash failed for " | ||
1234 | "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n", | ||
1235 | NIPQUAD (iph->saddr), ntohs(th->source), | ||
1236 | NIPQUAD (iph->daddr), ntohs(th->dest), | ||
1237 | genhash ? " tcp_v4_calc_md5_hash failed" : ""); | ||
1238 | #ifdef CONFIG_TCP_MD5SIG_DEBUG | ||
1239 | do { | ||
1240 | int i; | ||
1241 | printk("Received: "); | ||
1242 | for (i = 0; i < 16; i++) | ||
1243 | printk("%02x ", 0xff & (int)hash_location[i]); | ||
1244 | printk("\n"); | ||
1245 | printk("Calculated: "); | ||
1246 | for (i = 0; i < 16; i++) | ||
1247 | printk("%02x ", 0xff & (int)newhash[i]); | ||
1248 | printk("\n"); | ||
1249 | } while(0); | ||
1250 | #endif | ||
1251 | } | ||
1252 | return 1; | ||
1253 | } | ||
1254 | return 0; | ||
1255 | } | ||
1256 | |||
1257 | #endif | ||
1258 | |||
717 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { | 1259 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { |
718 | .family = PF_INET, | 1260 | .family = PF_INET, |
719 | .obj_size = sizeof(struct tcp_request_sock), | 1261 | .obj_size = sizeof(struct tcp_request_sock), |
@@ -723,9 +1265,16 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { | |||
723 | .send_reset = tcp_v4_send_reset, | 1265 | .send_reset = tcp_v4_send_reset, |
724 | }; | 1266 | }; |
725 | 1267 | ||
1268 | struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { | ||
1269 | #ifdef CONFIG_TCP_MD5SIG | ||
1270 | .md5_lookup = tcp_v4_reqsk_md5_lookup, | ||
1271 | #endif | ||
1272 | }; | ||
1273 | |||
726 | static struct timewait_sock_ops tcp_timewait_sock_ops = { | 1274 | static struct timewait_sock_ops tcp_timewait_sock_ops = { |
727 | .twsk_obj_size = sizeof(struct tcp_timewait_sock), | 1275 | .twsk_obj_size = sizeof(struct tcp_timewait_sock), |
728 | .twsk_unique = tcp_twsk_unique, | 1276 | .twsk_unique = tcp_twsk_unique, |
1277 | .twsk_destructor= tcp_twsk_destructor, | ||
729 | }; | 1278 | }; |
730 | 1279 | ||
731 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | 1280 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
@@ -773,6 +1322,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
773 | if (!req) | 1322 | if (!req) |
774 | goto drop; | 1323 | goto drop; |
775 | 1324 | ||
1325 | #ifdef CONFIG_TCP_MD5SIG | ||
1326 | tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops; | ||
1327 | #endif | ||
1328 | |||
776 | tcp_clear_options(&tmp_opt); | 1329 | tcp_clear_options(&tmp_opt); |
777 | tmp_opt.mss_clamp = 536; | 1330 | tmp_opt.mss_clamp = 536; |
778 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; | 1331 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; |
@@ -891,6 +1444,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
891 | struct inet_sock *newinet; | 1444 | struct inet_sock *newinet; |
892 | struct tcp_sock *newtp; | 1445 | struct tcp_sock *newtp; |
893 | struct sock *newsk; | 1446 | struct sock *newsk; |
1447 | #ifdef CONFIG_TCP_MD5SIG | ||
1448 | struct tcp_md5sig_key *key; | ||
1449 | #endif | ||
894 | 1450 | ||
895 | if (sk_acceptq_is_full(sk)) | 1451 | if (sk_acceptq_is_full(sk)) |
896 | goto exit_overflow; | 1452 | goto exit_overflow; |
@@ -925,6 +1481,24 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
925 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); | 1481 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); |
926 | tcp_initialize_rcv_mss(newsk); | 1482 | tcp_initialize_rcv_mss(newsk); |
927 | 1483 | ||
1484 | #ifdef CONFIG_TCP_MD5SIG | ||
1485 | /* Copy over the MD5 key from the original socket */ | ||
1486 | if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) { | ||
1487 | /* | ||
1488 | * We're using one, so create a matching key | ||
1489 | * on the newsk structure. If we fail to get | ||
1490 | * memory, then we end up not copying the key | ||
1491 | * across. Shucks. | ||
1492 | */ | ||
1493 | char *newkey = kmalloc(key->keylen, GFP_ATOMIC); | ||
1494 | if (newkey) { | ||
1495 | memcpy(newkey, key->key, key->keylen); | ||
1496 | tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr, | ||
1497 | newkey, key->keylen); | ||
1498 | } | ||
1499 | } | ||
1500 | #endif | ||
1501 | |||
928 | __inet_hash(&tcp_hashinfo, newsk, 0); | 1502 | __inet_hash(&tcp_hashinfo, newsk, 0); |
929 | __inet_inherit_port(&tcp_hashinfo, sk, newsk); | 1503 | __inet_inherit_port(&tcp_hashinfo, sk, newsk); |
930 | 1504 | ||
@@ -1000,10 +1574,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb) | |||
1000 | */ | 1574 | */ |
1001 | int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | 1575 | int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) |
1002 | { | 1576 | { |
1577 | struct sock *rsk; | ||
1578 | #ifdef CONFIG_TCP_MD5SIG | ||
1579 | /* | ||
1580 | * We really want to reject the packet as early as possible | ||
1581 | * if: | ||
1582 | * o We're expecting an MD5'd packet and this is no MD5 tcp option | ||
1583 | * o There is an MD5 option and we're not expecting one | ||
1584 | */ | ||
1585 | if (tcp_v4_inbound_md5_hash (sk, skb)) | ||
1586 | goto discard; | ||
1587 | #endif | ||
1588 | |||
1003 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1589 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1004 | TCP_CHECK_TIMER(sk); | 1590 | TCP_CHECK_TIMER(sk); |
1005 | if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) | 1591 | if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) { |
1592 | rsk = sk; | ||
1006 | goto reset; | 1593 | goto reset; |
1594 | } | ||
1007 | TCP_CHECK_TIMER(sk); | 1595 | TCP_CHECK_TIMER(sk); |
1008 | return 0; | 1596 | return 0; |
1009 | } | 1597 | } |
@@ -1017,20 +1605,24 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1017 | goto discard; | 1605 | goto discard; |
1018 | 1606 | ||
1019 | if (nsk != sk) { | 1607 | if (nsk != sk) { |
1020 | if (tcp_child_process(sk, nsk, skb)) | 1608 | if (tcp_child_process(sk, nsk, skb)) { |
1609 | rsk = nsk; | ||
1021 | goto reset; | 1610 | goto reset; |
1611 | } | ||
1022 | return 0; | 1612 | return 0; |
1023 | } | 1613 | } |
1024 | } | 1614 | } |
1025 | 1615 | ||
1026 | TCP_CHECK_TIMER(sk); | 1616 | TCP_CHECK_TIMER(sk); |
1027 | if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) | 1617 | if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) { |
1618 | rsk = sk; | ||
1028 | goto reset; | 1619 | goto reset; |
1620 | } | ||
1029 | TCP_CHECK_TIMER(sk); | 1621 | TCP_CHECK_TIMER(sk); |
1030 | return 0; | 1622 | return 0; |
1031 | 1623 | ||
1032 | reset: | 1624 | reset: |
1033 | tcp_v4_send_reset(skb); | 1625 | tcp_v4_send_reset(rsk, skb); |
1034 | discard: | 1626 | discard: |
1035 | kfree_skb(skb); | 1627 | kfree_skb(skb); |
1036 | /* Be careful here. If this function gets more complicated and | 1628 | /* Be careful here. If this function gets more complicated and |
@@ -1139,7 +1731,7 @@ no_tcp_socket: | |||
1139 | bad_packet: | 1731 | bad_packet: |
1140 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 1732 | TCP_INC_STATS_BH(TCP_MIB_INERRS); |
1141 | } else { | 1733 | } else { |
1142 | tcp_v4_send_reset(skb); | 1734 | tcp_v4_send_reset(NULL, skb); |
1143 | } | 1735 | } |
1144 | 1736 | ||
1145 | discard_it: | 1737 | discard_it: |
@@ -1262,6 +1854,15 @@ struct inet_connection_sock_af_ops ipv4_specific = { | |||
1262 | #endif | 1854 | #endif |
1263 | }; | 1855 | }; |
1264 | 1856 | ||
1857 | struct tcp_sock_af_ops tcp_sock_ipv4_specific = { | ||
1858 | #ifdef CONFIG_TCP_MD5SIG | ||
1859 | .md5_lookup = tcp_v4_md5_lookup, | ||
1860 | .calc_md5_hash = tcp_v4_calc_md5_hash, | ||
1861 | .md5_add = tcp_v4_md5_add_func, | ||
1862 | .md5_parse = tcp_v4_parse_md5_keys, | ||
1863 | #endif | ||
1864 | }; | ||
1865 | |||
1265 | /* NOTE: A lot of things set to zero explicitly by call to | 1866 | /* NOTE: A lot of things set to zero explicitly by call to |
1266 | * sk_alloc() so need not be done here. | 1867 | * sk_alloc() so need not be done here. |
1267 | */ | 1868 | */ |
@@ -1301,6 +1902,9 @@ static int tcp_v4_init_sock(struct sock *sk) | |||
1301 | 1902 | ||
1302 | icsk->icsk_af_ops = &ipv4_specific; | 1903 | icsk->icsk_af_ops = &ipv4_specific; |
1303 | icsk->icsk_sync_mss = tcp_sync_mss; | 1904 | icsk->icsk_sync_mss = tcp_sync_mss; |
1905 | #ifdef CONFIG_TCP_MD5SIG | ||
1906 | tp->af_specific = &tcp_sock_ipv4_specific; | ||
1907 | #endif | ||
1304 | 1908 | ||
1305 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | 1909 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
1306 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 1910 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
@@ -1324,6 +1928,15 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
1324 | /* Cleans up our, hopefully empty, out_of_order_queue. */ | 1928 | /* Cleans up our, hopefully empty, out_of_order_queue. */ |
1325 | __skb_queue_purge(&tp->out_of_order_queue); | 1929 | __skb_queue_purge(&tp->out_of_order_queue); |
1326 | 1930 | ||
1931 | #ifdef CONFIG_TCP_MD5SIG | ||
1932 | /* Clean up the MD5 key list, if any */ | ||
1933 | if (tp->md5sig_info) { | ||
1934 | tcp_v4_clear_md5_list(sk); | ||
1935 | kfree(tp->md5sig_info); | ||
1936 | tp->md5sig_info = NULL; | ||
1937 | } | ||
1938 | #endif | ||
1939 | |||
1327 | #ifdef CONFIG_NET_DMA | 1940 | #ifdef CONFIG_NET_DMA |
1328 | /* Cleans up our sk_async_wait_queue */ | 1941 | /* Cleans up our sk_async_wait_queue */ |
1329 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1942 | __skb_queue_purge(&sk->sk_async_wait_queue); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0163d9826907..ac55d8892cf1 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -306,6 +306,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
306 | tw->tw_ipv6only = np->ipv6only; | 306 | tw->tw_ipv6only = np->ipv6only; |
307 | } | 307 | } |
308 | #endif | 308 | #endif |
309 | |||
310 | #ifdef CONFIG_TCP_MD5SIG | ||
311 | /* | ||
312 | * The timewait bucket does not have the key DB from the | ||
313 | * sock structure. We just make a quick copy of the | ||
314 | * md5 key being used (if indeed we are using one) | ||
315 | * so the timewait ack generating code has the key. | ||
316 | */ | ||
317 | do { | ||
318 | struct tcp_md5sig_key *key; | ||
319 | memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); | ||
320 | tcptw->tw_md5_keylen = 0; | ||
321 | key = tp->af_specific->md5_lookup(sk, sk); | ||
322 | if (key != NULL) { | ||
323 | memcpy(&tcptw->tw_md5_key, key->key, key->keylen); | ||
324 | tcptw->tw_md5_keylen = key->keylen; | ||
325 | if (tcp_alloc_md5sig_pool() == NULL) | ||
326 | BUG(); | ||
327 | } | ||
328 | } while(0); | ||
329 | #endif | ||
330 | |||
309 | /* Linkage updates. */ | 331 | /* Linkage updates. */ |
310 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); | 332 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); |
311 | 333 | ||
@@ -337,6 +359,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
337 | tcp_done(sk); | 359 | tcp_done(sk); |
338 | } | 360 | } |
339 | 361 | ||
362 | void tcp_twsk_destructor(struct sock *sk) | ||
363 | { | ||
364 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); | ||
365 | #ifdef CONFIG_TCP_MD5SIG | ||
366 | if (twsk->tw_md5_keylen) | ||
367 | tcp_put_md5sig_pool(); | ||
368 | #endif | ||
369 | } | ||
370 | |||
371 | EXPORT_SYMBOL_GPL(tcp_twsk_destructor); | ||
372 | |||
340 | /* This is not only more efficient than what we used to do, it eliminates | 373 | /* This is not only more efficient than what we used to do, it eliminates |
341 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM | 374 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM |
342 | * | 375 | * |
@@ -435,6 +468,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
435 | newtp->rx_opt.ts_recent_stamp = 0; | 468 | newtp->rx_opt.ts_recent_stamp = 0; |
436 | newtp->tcp_header_len = sizeof(struct tcphdr); | 469 | newtp->tcp_header_len = sizeof(struct tcphdr); |
437 | } | 470 | } |
471 | #ifdef CONFIG_TCP_MD5SIG | ||
472 | newtp->md5sig_info = NULL; /*XXX*/ | ||
473 | if (newtp->af_specific->md5_lookup(sk, newsk)) | ||
474 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | ||
475 | #endif | ||
438 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) | 476 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) |
439 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; | 477 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
440 | newtp->rx_opt.mss_clamp = req->mss; | 478 | newtp->rx_opt.mss_clamp = req->mss; |
@@ -617,6 +655,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
617 | req, NULL); | 655 | req, NULL); |
618 | if (child == NULL) | 656 | if (child == NULL) |
619 | goto listen_overflow; | 657 | goto listen_overflow; |
658 | #ifdef CONFIG_TCP_MD5SIG | ||
659 | else { | ||
660 | /* Copy over the MD5 key from the original socket */ | ||
661 | struct tcp_md5sig_key *key; | ||
662 | struct tcp_sock *tp = tcp_sk(sk); | ||
663 | key = tp->af_specific->md5_lookup(sk, child); | ||
664 | if (key != NULL) { | ||
665 | /* | ||
666 | * We're using one, so create a matching key on the | ||
667 | * newsk structure. If we fail to get memory then we | ||
668 | * end up not copying the key across. Shucks. | ||
669 | */ | ||
670 | char *newkey = kmalloc(key->keylen, GFP_ATOMIC); | ||
671 | if (newkey) { | ||
672 | if (!tcp_alloc_md5sig_pool()) | ||
673 | BUG(); | ||
674 | memcpy(newkey, key->key, key->keylen); | ||
675 | tp->af_specific->md5_add(child, child, | ||
676 | newkey, | ||
677 | key->keylen); | ||
678 | } | ||
679 | } | ||
680 | } | ||
681 | #endif | ||
620 | 682 | ||
621 | inet_csk_reqsk_queue_unlink(sk, req, prev); | 683 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
622 | inet_csk_reqsk_queue_removed(sk, req); | 684 | inet_csk_reqsk_queue_removed(sk, req); |
@@ -633,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
633 | embryonic_reset: | 695 | embryonic_reset: |
634 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); | 696 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); |
635 | if (!(flg & TCP_FLAG_RST)) | 697 | if (!(flg & TCP_FLAG_RST)) |
636 | req->rsk_ops->send_reset(skb); | 698 | req->rsk_ops->send_reset(sk, skb); |
637 | 699 | ||
638 | inet_csk_reqsk_queue_drop(sk, req, prev); | 700 | inet_csk_reqsk_queue_drop(sk, req, prev); |
639 | return NULL; | 701 | return NULL; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6a8581ab9a23..32c1a972fa31 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -270,7 +270,7 @@ static u16 tcp_select_window(struct sock *sk) | |||
270 | } | 270 | } |
271 | 271 | ||
272 | static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, | 272 | static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, |
273 | __u32 tstamp) | 273 | __u32 tstamp, __u8 **md5_hash) |
274 | { | 274 | { |
275 | if (tp->rx_opt.tstamp_ok) { | 275 | if (tp->rx_opt.tstamp_ok) { |
276 | *ptr++ = htonl((TCPOPT_NOP << 24) | | 276 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
@@ -298,16 +298,29 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, | |||
298 | tp->rx_opt.eff_sacks--; | 298 | tp->rx_opt.eff_sacks--; |
299 | } | 299 | } |
300 | } | 300 | } |
301 | #ifdef CONFIG_TCP_MD5SIG | ||
302 | if (md5_hash) { | ||
303 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
304 | (TCPOPT_NOP << 16) | | ||
305 | (TCPOPT_MD5SIG << 8) | | ||
306 | TCPOLEN_MD5SIG); | ||
307 | *md5_hash = (__u8 *)ptr; | ||
308 | } | ||
309 | #endif | ||
301 | } | 310 | } |
302 | 311 | ||
303 | /* Construct a tcp options header for a SYN or SYN_ACK packet. | 312 | /* Construct a tcp options header for a SYN or SYN_ACK packet. |
304 | * If this is every changed make sure to change the definition of | 313 | * If this is every changed make sure to change the definition of |
305 | * MAX_SYN_SIZE to match the new maximum number of options that you | 314 | * MAX_SYN_SIZE to match the new maximum number of options that you |
306 | * can generate. | 315 | * can generate. |
316 | * | ||
317 | * Note - that with the RFC2385 TCP option, we make room for the | ||
318 | * 16 byte MD5 hash. This will be filled in later, so the pointer for the | ||
319 | * location to be filled is passed back up. | ||
307 | */ | 320 | */ |
308 | static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, | 321 | static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, |
309 | int offer_wscale, int wscale, __u32 tstamp, | 322 | int offer_wscale, int wscale, __u32 tstamp, |
310 | __u32 ts_recent) | 323 | __u32 ts_recent, __u8 **md5_hash) |
311 | { | 324 | { |
312 | /* We always get an MSS option. | 325 | /* We always get an MSS option. |
313 | * The option bytes which will be seen in normal data | 326 | * The option bytes which will be seen in normal data |
@@ -346,6 +359,20 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, | |||
346 | (TCPOPT_WINDOW << 16) | | 359 | (TCPOPT_WINDOW << 16) | |
347 | (TCPOLEN_WINDOW << 8) | | 360 | (TCPOLEN_WINDOW << 8) | |
348 | (wscale)); | 361 | (wscale)); |
362 | #ifdef CONFIG_TCP_MD5SIG | ||
363 | /* | ||
364 | * If MD5 is enabled, then we set the option, and include the size | ||
365 | * (always 18). The actual MD5 hash is added just before the | ||
366 | * packet is sent. | ||
367 | */ | ||
368 | if (md5_hash) { | ||
369 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
370 | (TCPOPT_NOP << 16) | | ||
371 | (TCPOPT_MD5SIG << 8) | | ||
372 | TCPOLEN_MD5SIG); | ||
373 | *md5_hash = (__u8 *) ptr; | ||
374 | } | ||
375 | #endif | ||
349 | } | 376 | } |
350 | 377 | ||
351 | /* This routine actually transmits TCP packets queued in by | 378 | /* This routine actually transmits TCP packets queued in by |
@@ -366,6 +393,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
366 | struct tcp_sock *tp; | 393 | struct tcp_sock *tp; |
367 | struct tcp_skb_cb *tcb; | 394 | struct tcp_skb_cb *tcb; |
368 | int tcp_header_size; | 395 | int tcp_header_size; |
396 | #ifdef CONFIG_TCP_MD5SIG | ||
397 | struct tcp_md5sig_key *md5; | ||
398 | __u8 *md5_hash_location; | ||
399 | #endif | ||
369 | struct tcphdr *th; | 400 | struct tcphdr *th; |
370 | int sysctl_flags; | 401 | int sysctl_flags; |
371 | int err; | 402 | int err; |
@@ -424,6 +455,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
424 | if (tcp_packets_in_flight(tp) == 0) | 455 | if (tcp_packets_in_flight(tp) == 0) |
425 | tcp_ca_event(sk, CA_EVENT_TX_START); | 456 | tcp_ca_event(sk, CA_EVENT_TX_START); |
426 | 457 | ||
458 | #ifdef CONFIG_TCP_MD5SIG | ||
459 | /* | ||
460 | * Are we doing MD5 on this segment? If so - make | ||
461 | * room for it. | ||
462 | */ | ||
463 | md5 = tp->af_specific->md5_lookup(sk, sk); | ||
464 | if (md5) | ||
465 | tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; | ||
466 | #endif | ||
467 | |||
427 | th = (struct tcphdr *) skb_push(skb, tcp_header_size); | 468 | th = (struct tcphdr *) skb_push(skb, tcp_header_size); |
428 | skb->h.th = th; | 469 | skb->h.th = th; |
429 | 470 | ||
@@ -460,13 +501,34 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
460 | (sysctl_flags & SYSCTL_FLAG_WSCALE), | 501 | (sysctl_flags & SYSCTL_FLAG_WSCALE), |
461 | tp->rx_opt.rcv_wscale, | 502 | tp->rx_opt.rcv_wscale, |
462 | tcb->when, | 503 | tcb->when, |
463 | tp->rx_opt.ts_recent); | 504 | tp->rx_opt.ts_recent, |
505 | |||
506 | #ifdef CONFIG_TCP_MD5SIG | ||
507 | md5 ? &md5_hash_location : | ||
508 | #endif | ||
509 | NULL); | ||
464 | } else { | 510 | } else { |
465 | tcp_build_and_update_options((__be32 *)(th + 1), | 511 | tcp_build_and_update_options((__be32 *)(th + 1), |
466 | tp, tcb->when); | 512 | tp, tcb->when, |
513 | #ifdef CONFIG_TCP_MD5SIG | ||
514 | md5 ? &md5_hash_location : | ||
515 | #endif | ||
516 | NULL); | ||
467 | TCP_ECN_send(sk, tp, skb, tcp_header_size); | 517 | TCP_ECN_send(sk, tp, skb, tcp_header_size); |
468 | } | 518 | } |
469 | 519 | ||
520 | #ifdef CONFIG_TCP_MD5SIG | ||
521 | /* Calculate the MD5 hash, as we have all we need now */ | ||
522 | if (md5) { | ||
523 | tp->af_specific->calc_md5_hash(md5_hash_location, | ||
524 | md5, | ||
525 | sk, NULL, NULL, | ||
526 | skb->h.th, | ||
527 | sk->sk_protocol, | ||
528 | skb->len); | ||
529 | } | ||
530 | #endif | ||
531 | |||
470 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); | 532 | icsk->icsk_af_ops->send_check(sk, skb->len, skb); |
471 | 533 | ||
472 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) | 534 | if (likely(tcb->flags & TCPCB_FLAG_ACK)) |
@@ -840,6 +902,11 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
840 | mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + | 902 | mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + |
841 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); | 903 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); |
842 | 904 | ||
905 | #ifdef CONFIG_TCP_MD5SIG | ||
906 | if (tp->af_specific->md5_lookup(sk, sk)) | ||
907 | mss_now -= TCPOLEN_MD5SIG_ALIGNED; | ||
908 | #endif | ||
909 | |||
843 | xmit_size_goal = mss_now; | 910 | xmit_size_goal = mss_now; |
844 | 911 | ||
845 | if (doing_tso) { | 912 | if (doing_tso) { |
@@ -2033,6 +2100,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2033 | struct tcphdr *th; | 2100 | struct tcphdr *th; |
2034 | int tcp_header_size; | 2101 | int tcp_header_size; |
2035 | struct sk_buff *skb; | 2102 | struct sk_buff *skb; |
2103 | #ifdef CONFIG_TCP_MD5SIG | ||
2104 | struct tcp_md5sig_key *md5; | ||
2105 | __u8 *md5_hash_location; | ||
2106 | #endif | ||
2036 | 2107 | ||
2037 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); | 2108 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
2038 | if (skb == NULL) | 2109 | if (skb == NULL) |
@@ -2048,6 +2119,13 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2048 | (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + | 2119 | (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + |
2049 | /* SACK_PERM is in the place of NOP NOP of TS */ | 2120 | /* SACK_PERM is in the place of NOP NOP of TS */ |
2050 | ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); | 2121 | ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); |
2122 | |||
2123 | #ifdef CONFIG_TCP_MD5SIG | ||
2124 | /* Are we doing MD5 on this segment? If so - make room for it */ | ||
2125 | md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); | ||
2126 | if (md5) | ||
2127 | tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; | ||
2128 | #endif | ||
2051 | skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); | 2129 | skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); |
2052 | 2130 | ||
2053 | memset(th, 0, sizeof(struct tcphdr)); | 2131 | memset(th, 0, sizeof(struct tcphdr)); |
@@ -2085,11 +2163,29 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2085 | tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, | 2163 | tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, |
2086 | ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, | 2164 | ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, |
2087 | TCP_SKB_CB(skb)->when, | 2165 | TCP_SKB_CB(skb)->when, |
2088 | req->ts_recent); | 2166 | req->ts_recent, |
2167 | ( | ||
2168 | #ifdef CONFIG_TCP_MD5SIG | ||
2169 | md5 ? &md5_hash_location : | ||
2170 | #endif | ||
2171 | NULL) | ||
2172 | ); | ||
2089 | 2173 | ||
2090 | skb->csum = 0; | 2174 | skb->csum = 0; |
2091 | th->doff = (tcp_header_size >> 2); | 2175 | th->doff = (tcp_header_size >> 2); |
2092 | TCP_INC_STATS(TCP_MIB_OUTSEGS); | 2176 | TCP_INC_STATS(TCP_MIB_OUTSEGS); |
2177 | |||
2178 | #ifdef CONFIG_TCP_MD5SIG | ||
2179 | /* Okay, we have all we need - do the md5 hash if needed */ | ||
2180 | if (md5) { | ||
2181 | tp->af_specific->calc_md5_hash(md5_hash_location, | ||
2182 | md5, | ||
2183 | NULL, dst, req, | ||
2184 | skb->h.th, sk->sk_protocol, | ||
2185 | skb->len); | ||
2186 | } | ||
2187 | #endif | ||
2188 | |||
2093 | return skb; | 2189 | return skb; |
2094 | } | 2190 | } |
2095 | 2191 | ||
@@ -2108,6 +2204,11 @@ static void tcp_connect_init(struct sock *sk) | |||
2108 | tp->tcp_header_len = sizeof(struct tcphdr) + | 2204 | tp->tcp_header_len = sizeof(struct tcphdr) + |
2109 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); | 2205 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); |
2110 | 2206 | ||
2207 | #ifdef CONFIG_TCP_MD5SIG | ||
2208 | if (tp->af_specific->md5_lookup(sk, sk) != NULL) | ||
2209 | tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | ||
2210 | #endif | ||
2211 | |||
2111 | /* If user gave his TCP_MAXSEG, record it to clamp */ | 2212 | /* If user gave his TCP_MAXSEG, record it to clamp */ |
2112 | if (tp->rx_opt.user_mss) | 2213 | if (tp->rx_opt.user_mss) |
2113 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; | 2214 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; |