diff options
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 73 |
1 files changed, 50 insertions, 23 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 4c03598ed924..5fabff9ac6d6 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -20,19 +20,14 @@ | |||
20 | 20 | ||
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/slab.h> | ||
23 | #include <linux/sysctl.h> | 24 | #include <linux/sysctl.h> |
24 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
25 | #include <net/tcp.h> | 26 | #include <net/tcp.h> |
26 | #include <net/inet_common.h> | 27 | #include <net/inet_common.h> |
27 | #include <net/xfrm.h> | 28 | #include <net/xfrm.h> |
28 | 29 | ||
29 | #ifdef CONFIG_SYSCTL | 30 | int sysctl_tcp_syncookies __read_mostly = 1; |
30 | #define SYNC_INIT 0 /* let the user enable it */ | ||
31 | #else | ||
32 | #define SYNC_INIT 1 | ||
33 | #endif | ||
34 | |||
35 | int sysctl_tcp_syncookies __read_mostly = SYNC_INIT; | ||
36 | EXPORT_SYMBOL(sysctl_tcp_syncookies); | 31 | EXPORT_SYMBOL(sysctl_tcp_syncookies); |
37 | 32 | ||
38 | int sysctl_tcp_abort_on_overflow __read_mostly; | 33 | int sysctl_tcp_abort_on_overflow __read_mostly; |
@@ -96,13 +91,14 @@ enum tcp_tw_status | |||
96 | tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, | 91 | tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, |
97 | const struct tcphdr *th) | 92 | const struct tcphdr *th) |
98 | { | 93 | { |
99 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | ||
100 | struct tcp_options_received tmp_opt; | 94 | struct tcp_options_received tmp_opt; |
95 | u8 *hash_location; | ||
96 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | ||
101 | int paws_reject = 0; | 97 | int paws_reject = 0; |
102 | 98 | ||
103 | tmp_opt.saw_tstamp = 0; | 99 | tmp_opt.saw_tstamp = 0; |
104 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { | 100 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
105 | tcp_parse_options(skb, &tmp_opt, 0); | 101 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
106 | 102 | ||
107 | if (tmp_opt.saw_tstamp) { | 103 | if (tmp_opt.saw_tstamp) { |
108 | tmp_opt.ts_recent = tcptw->tw_ts_recent; | 104 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
@@ -389,14 +385,43 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
389 | const struct inet_request_sock *ireq = inet_rsk(req); | 385 | const struct inet_request_sock *ireq = inet_rsk(req); |
390 | struct tcp_request_sock *treq = tcp_rsk(req); | 386 | struct tcp_request_sock *treq = tcp_rsk(req); |
391 | struct inet_connection_sock *newicsk = inet_csk(newsk); | 387 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
392 | struct tcp_sock *newtp; | 388 | struct tcp_sock *newtp = tcp_sk(newsk); |
389 | struct tcp_sock *oldtp = tcp_sk(sk); | ||
390 | struct tcp_cookie_values *oldcvp = oldtp->cookie_values; | ||
391 | |||
392 | /* TCP Cookie Transactions require space for the cookie pair, | ||
393 | * as it differs for each connection. There is no need to | ||
394 | * copy any s_data_payload stored at the original socket. | ||
395 | * Failure will prevent resuming the connection. | ||
396 | * | ||
397 | * Presumed copied, in order of appearance: | ||
398 | * cookie_in_always, cookie_out_never | ||
399 | */ | ||
400 | if (oldcvp != NULL) { | ||
401 | struct tcp_cookie_values *newcvp = | ||
402 | kzalloc(sizeof(*newtp->cookie_values), | ||
403 | GFP_ATOMIC); | ||
404 | |||
405 | if (newcvp != NULL) { | ||
406 | kref_init(&newcvp->kref); | ||
407 | newcvp->cookie_desired = | ||
408 | oldcvp->cookie_desired; | ||
409 | newtp->cookie_values = newcvp; | ||
410 | } else { | ||
411 | /* Not Yet Implemented */ | ||
412 | newtp->cookie_values = NULL; | ||
413 | } | ||
414 | } | ||
393 | 415 | ||
394 | /* Now setup tcp_sock */ | 416 | /* Now setup tcp_sock */ |
395 | newtp = tcp_sk(newsk); | ||
396 | newtp->pred_flags = 0; | 417 | newtp->pred_flags = 0; |
397 | newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; | 418 | |
398 | newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1; | 419 | newtp->rcv_wup = newtp->copied_seq = |
399 | newtp->snd_up = treq->snt_isn + 1; | 420 | newtp->rcv_nxt = treq->rcv_isn + 1; |
421 | |||
422 | newtp->snd_sml = newtp->snd_una = | ||
423 | newtp->snd_nxt = newtp->snd_up = | ||
424 | treq->snt_isn + 1 + tcp_s_data_size(oldtp); | ||
400 | 425 | ||
401 | tcp_prequeue_init(newtp); | 426 | tcp_prequeue_init(newtp); |
402 | 427 | ||
@@ -429,8 +454,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
429 | tcp_set_ca_state(newsk, TCP_CA_Open); | 454 | tcp_set_ca_state(newsk, TCP_CA_Open); |
430 | tcp_init_xmit_timers(newsk); | 455 | tcp_init_xmit_timers(newsk); |
431 | skb_queue_head_init(&newtp->out_of_order_queue); | 456 | skb_queue_head_init(&newtp->out_of_order_queue); |
432 | newtp->write_seq = treq->snt_isn + 1; | 457 | newtp->write_seq = newtp->pushed_seq = |
433 | newtp->pushed_seq = newtp->write_seq; | 458 | treq->snt_isn + 1 + tcp_s_data_size(oldtp); |
434 | 459 | ||
435 | newtp->rx_opt.saw_tstamp = 0; | 460 | newtp->rx_opt.saw_tstamp = 0; |
436 | 461 | ||
@@ -476,7 +501,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
476 | if (newtp->af_specific->md5_lookup(sk, newsk)) | 501 | if (newtp->af_specific->md5_lookup(sk, newsk)) |
477 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | 502 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; |
478 | #endif | 503 | #endif |
479 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) | 504 | if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) |
480 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; | 505 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
481 | newtp->rx_opt.mss_clamp = req->mss; | 506 | newtp->rx_opt.mss_clamp = req->mss; |
482 | TCP_ECN_openreq_child(newtp, req); | 507 | TCP_ECN_openreq_child(newtp, req); |
@@ -495,15 +520,16 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
495 | struct request_sock *req, | 520 | struct request_sock *req, |
496 | struct request_sock **prev) | 521 | struct request_sock **prev) |
497 | { | 522 | { |
523 | struct tcp_options_received tmp_opt; | ||
524 | u8 *hash_location; | ||
525 | struct sock *child; | ||
498 | const struct tcphdr *th = tcp_hdr(skb); | 526 | const struct tcphdr *th = tcp_hdr(skb); |
499 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); | 527 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
500 | int paws_reject = 0; | 528 | int paws_reject = 0; |
501 | struct tcp_options_received tmp_opt; | ||
502 | struct sock *child; | ||
503 | 529 | ||
504 | tmp_opt.saw_tstamp = 0; | 530 | tmp_opt.saw_tstamp = 0; |
505 | if (th->doff > (sizeof(struct tcphdr)>>2)) { | 531 | if (th->doff > (sizeof(struct tcphdr)>>2)) { |
506 | tcp_parse_options(skb, &tmp_opt, 0); | 532 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0); |
507 | 533 | ||
508 | if (tmp_opt.saw_tstamp) { | 534 | if (tmp_opt.saw_tstamp) { |
509 | tmp_opt.ts_recent = req->ts_recent; | 535 | tmp_opt.ts_recent = req->ts_recent; |
@@ -537,7 +563,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
537 | * Enforce "SYN-ACK" according to figure 8, figure 6 | 563 | * Enforce "SYN-ACK" according to figure 8, figure 6 |
538 | * of RFC793, fixed by RFC1122. | 564 | * of RFC793, fixed by RFC1122. |
539 | */ | 565 | */ |
540 | req->rsk_ops->rtx_syn_ack(sk, req); | 566 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); |
541 | return NULL; | 567 | return NULL; |
542 | } | 568 | } |
543 | 569 | ||
@@ -596,7 +622,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
596 | * Invalid ACK: reset will be sent by listening socket | 622 | * Invalid ACK: reset will be sent by listening socket |
597 | */ | 623 | */ |
598 | if ((flg & TCP_FLAG_ACK) && | 624 | if ((flg & TCP_FLAG_ACK) && |
599 | (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) | 625 | (TCP_SKB_CB(skb)->ack_seq != |
626 | tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk)))) | ||
600 | return sk; | 627 | return sk; |
601 | 628 | ||
602 | /* Also, it would be not so bad idea to check rcv_tsecr, which | 629 | /* Also, it would be not so bad idea to check rcv_tsecr, which |
@@ -702,7 +729,7 @@ int tcp_child_process(struct sock *parent, struct sock *child, | |||
702 | * in main socket hash table and lock on listening | 729 | * in main socket hash table and lock on listening |
703 | * socket does not protect us more. | 730 | * socket does not protect us more. |
704 | */ | 731 | */ |
705 | sk_add_backlog(child, skb); | 732 | __sk_add_backlog(child, skb); |
706 | } | 733 | } |
707 | 734 | ||
708 | bh_unlock_sock(child); | 735 | bh_unlock_sock(child); |