diff options
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 64 |
1 files changed, 63 insertions, 1 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0163d9826907..ac55d8892cf1 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -306,6 +306,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
306 | tw->tw_ipv6only = np->ipv6only; | 306 | tw->tw_ipv6only = np->ipv6only; |
307 | } | 307 | } |
308 | #endif | 308 | #endif |
309 | |||
310 | #ifdef CONFIG_TCP_MD5SIG | ||
311 | /* | ||
312 | * The timewait bucket does not have the key DB from the | ||
313 | * sock structure. We just make a quick copy of the | ||
314 | * md5 key being used (if indeed we are using one) | ||
315 | * so the timewait ack generating code has the key. | ||
316 | */ | ||
317 | do { | ||
318 | struct tcp_md5sig_key *key; | ||
319 | memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); | ||
320 | tcptw->tw_md5_keylen = 0; | ||
321 | key = tp->af_specific->md5_lookup(sk, sk); | ||
322 | if (key != NULL) { | ||
323 | memcpy(&tcptw->tw_md5_key, key->key, key->keylen); | ||
324 | tcptw->tw_md5_keylen = key->keylen; | ||
325 | if (tcp_alloc_md5sig_pool() == NULL) | ||
326 | BUG(); | ||
327 | } | ||
328 | } while(0); | ||
329 | #endif | ||
330 | |||
309 | /* Linkage updates. */ | 331 | /* Linkage updates. */ |
310 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); | 332 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); |
311 | 333 | ||
@@ -337,6 +359,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
337 | tcp_done(sk); | 359 | tcp_done(sk); |
338 | } | 360 | } |
339 | 361 | ||
362 | void tcp_twsk_destructor(struct sock *sk) | ||
363 | { | ||
364 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); | ||
365 | #ifdef CONFIG_TCP_MD5SIG | ||
366 | if (twsk->tw_md5_keylen) | ||
367 | tcp_put_md5sig_pool(); | ||
368 | #endif | ||
369 | } | ||
370 | |||
371 | EXPORT_SYMBOL_GPL(tcp_twsk_destructor); | ||
372 | |||
340 | /* This is not only more efficient than what we used to do, it eliminates | 373 | /* This is not only more efficient than what we used to do, it eliminates |
341 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM | 374 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM |
342 | * | 375 | * |
@@ -435,6 +468,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
435 | newtp->rx_opt.ts_recent_stamp = 0; | 468 | newtp->rx_opt.ts_recent_stamp = 0; |
436 | newtp->tcp_header_len = sizeof(struct tcphdr); | 469 | newtp->tcp_header_len = sizeof(struct tcphdr); |
437 | } | 470 | } |
471 | #ifdef CONFIG_TCP_MD5SIG | ||
472 | newtp->md5sig_info = NULL; /*XXX*/ | ||
473 | if (newtp->af_specific->md5_lookup(sk, newsk)) | ||
474 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | ||
475 | #endif | ||
438 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) | 476 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) |
439 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; | 477 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
440 | newtp->rx_opt.mss_clamp = req->mss; | 478 | newtp->rx_opt.mss_clamp = req->mss; |
@@ -617,6 +655,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
617 | req, NULL); | 655 | req, NULL); |
618 | if (child == NULL) | 656 | if (child == NULL) |
619 | goto listen_overflow; | 657 | goto listen_overflow; |
658 | #ifdef CONFIG_TCP_MD5SIG | ||
659 | else { | ||
660 | /* Copy over the MD5 key from the original socket */ | ||
661 | struct tcp_md5sig_key *key; | ||
662 | struct tcp_sock *tp = tcp_sk(sk); | ||
663 | key = tp->af_specific->md5_lookup(sk, child); | ||
664 | if (key != NULL) { | ||
665 | /* | ||
666 | * We're using one, so create a matching key on the | ||
667 | * newsk structure. If we fail to get memory then we | ||
668 | * end up not copying the key across. Shucks. | ||
669 | */ | ||
670 | char *newkey = kmalloc(key->keylen, GFP_ATOMIC); | ||
671 | if (newkey) { | ||
672 | if (!tcp_alloc_md5sig_pool()) | ||
673 | BUG(); | ||
674 | memcpy(newkey, key->key, key->keylen); | ||
675 | tp->af_specific->md5_add(child, child, | ||
676 | newkey, | ||
677 | key->keylen); | ||
678 | } | ||
679 | } | ||
680 | } | ||
681 | #endif | ||
620 | 682 | ||
621 | inet_csk_reqsk_queue_unlink(sk, req, prev); | 683 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
622 | inet_csk_reqsk_queue_removed(sk, req); | 684 | inet_csk_reqsk_queue_removed(sk, req); |
@@ -633,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
633 | embryonic_reset: | 695 | embryonic_reset: |
634 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); | 696 | NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); |
635 | if (!(flg & TCP_FLAG_RST)) | 697 | if (!(flg & TCP_FLAG_RST)) |
636 | req->rsk_ops->send_reset(skb); | 698 | req->rsk_ops->send_reset(sk, skb); |
637 | 699 | ||
638 | inet_csk_reqsk_queue_drop(sk, req, prev); | 700 | inet_csk_reqsk_queue_drop(sk, req, prev); |
639 | return NULL; | 701 | return NULL; |