aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_minisocks.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-12-05 09:37:56 -0500
committerDavid Howells <dhowells@warthog.cambridge.redhat.com>2006-12-05 09:37:56 -0500
commit4c1ac1b49122b805adfa4efc620592f68dccf5db (patch)
tree87557f4bc2fd4fe65b7570489c2f610c45c0adcd /net/ipv4/tcp_minisocks.c
parentc4028958b6ecad064b1a6303a6a5906d4fe48d73 (diff)
parentd916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/infiniband/core/iwcm.c drivers/net/chelsio/cxgb2.c drivers/net/wireless/bcm43xx/bcm43xx_main.c drivers/net/wireless/prism54/islpci_eth.c drivers/usb/core/hub.h drivers/usb/input/hid-core.c net/core/netpoll.c Fix up merge failures with Linus's head and fix new compilation failures. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r--net/ipv4/tcp_minisocks.c69
1 files changed, 65 insertions, 4 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index af7b2c986b1f..4a3889dd1943 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -305,6 +305,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
305 tw->tw_ipv6only = np->ipv6only; 305 tw->tw_ipv6only = np->ipv6only;
306 } 306 }
307#endif 307#endif
308
309#ifdef CONFIG_TCP_MD5SIG
310 /*
311 * The timewait bucket does not have the key DB from the
312 * sock structure. We just make a quick copy of the
313 * md5 key being used (if indeed we are using one)
314 * so the timewait ack generating code has the key.
315 */
316 do {
317 struct tcp_md5sig_key *key;
318 memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
319 tcptw->tw_md5_keylen = 0;
320 key = tp->af_specific->md5_lookup(sk, sk);
321 if (key != NULL) {
322 memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
323 tcptw->tw_md5_keylen = key->keylen;
324 if (tcp_alloc_md5sig_pool() == NULL)
325 BUG();
326 }
327 } while(0);
328#endif
329
308 /* Linkage updates. */ 330 /* Linkage updates. */
309 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); 331 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
310 332
@@ -328,14 +350,24 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
328 * socket up. We've got bigger problems than 350 * socket up. We've got bigger problems than
329 * non-graceful socket closings. 351 * non-graceful socket closings.
330 */ 352 */
331 if (net_ratelimit()) 353 LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
332 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
333 } 354 }
334 355
335 tcp_update_metrics(sk); 356 tcp_update_metrics(sk);
336 tcp_done(sk); 357 tcp_done(sk);
337} 358}
338 359
360void tcp_twsk_destructor(struct sock *sk)
361{
362#ifdef CONFIG_TCP_MD5SIG
363 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
364 if (twsk->tw_md5_keylen)
365 tcp_put_md5sig_pool();
366#endif
367}
368
369EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
370
339/* This is not only more efficient than what we used to do, it eliminates 371/* This is not only more efficient than what we used to do, it eliminates
340 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM 372 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
341 * 373 *
@@ -434,6 +466,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
434 newtp->rx_opt.ts_recent_stamp = 0; 466 newtp->rx_opt.ts_recent_stamp = 0;
435 newtp->tcp_header_len = sizeof(struct tcphdr); 467 newtp->tcp_header_len = sizeof(struct tcphdr);
436 } 468 }
469#ifdef CONFIG_TCP_MD5SIG
470 newtp->md5sig_info = NULL; /*XXX*/
471 if (newtp->af_specific->md5_lookup(sk, newsk))
472 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
473#endif
437 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) 474 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
438 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 475 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
439 newtp->rx_opt.mss_clamp = req->mss; 476 newtp->rx_opt.mss_clamp = req->mss;
@@ -454,7 +491,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
454 struct request_sock **prev) 491 struct request_sock **prev)
455{ 492{
456 struct tcphdr *th = skb->h.th; 493 struct tcphdr *th = skb->h.th;
457 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 494 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
458 int paws_reject = 0; 495 int paws_reject = 0;
459 struct tcp_options_received tmp_opt; 496 struct tcp_options_received tmp_opt;
460 struct sock *child; 497 struct sock *child;
@@ -616,6 +653,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
616 req, NULL); 653 req, NULL);
617 if (child == NULL) 654 if (child == NULL)
618 goto listen_overflow; 655 goto listen_overflow;
656#ifdef CONFIG_TCP_MD5SIG
657 else {
658 /* Copy over the MD5 key from the original socket */
659 struct tcp_md5sig_key *key;
660 struct tcp_sock *tp = tcp_sk(sk);
661 key = tp->af_specific->md5_lookup(sk, child);
662 if (key != NULL) {
663 /*
664 * We're using one, so create a matching key on the
665 * newsk structure. If we fail to get memory then we
666 * end up not copying the key across. Shucks.
667 */
668 char *newkey = kmemdup(key->key, key->keylen,
669 GFP_ATOMIC);
670 if (newkey) {
671 if (!tcp_alloc_md5sig_pool())
672 BUG();
673 tp->af_specific->md5_add(child, child,
674 newkey,
675 key->keylen);
676 }
677 }
678 }
679#endif
619 680
620 inet_csk_reqsk_queue_unlink(sk, req, prev); 681 inet_csk_reqsk_queue_unlink(sk, req, prev);
621 inet_csk_reqsk_queue_removed(sk, req); 682 inet_csk_reqsk_queue_removed(sk, req);
@@ -632,7 +693,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
632 embryonic_reset: 693 embryonic_reset:
633 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS); 694 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
634 if (!(flg & TCP_FLAG_RST)) 695 if (!(flg & TCP_FLAG_RST))
635 req->rsk_ops->send_reset(skb); 696 req->rsk_ops->send_reset(sk, skb);
636 697
637 inet_csk_reqsk_queue_drop(sk, req, prev); 698 inet_csk_reqsk_queue_drop(sk, req, prev);
638 return NULL; 699 return NULL;