aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_ipv4.c20
-rw-r--r--net/ipv4/tcp_minisocks.c46
2 files changed, 58 insertions, 8 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 649a36d99c73..a2bcac9b388e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1833,6 +1833,19 @@ static int tcp_v4_init_sock(struct sock *sk)
1833 tp->af_specific = &tcp_sock_ipv4_specific; 1833 tp->af_specific = &tcp_sock_ipv4_specific;
1834#endif 1834#endif
1835 1835
1836 /* TCP Cookie Transactions */
1837 if (sysctl_tcp_cookie_size > 0) {
1838 /* Default, cookies without s_data_payload. */
1839 tp->cookie_values =
1840 kzalloc(sizeof(*tp->cookie_values),
1841 sk->sk_allocation);
1842 if (tp->cookie_values != NULL)
1843 kref_init(&tp->cookie_values->kref);
1844 }
1845 /* Presumed zeroed, in order of appearance:
1846 * cookie_in_always, cookie_out_never,
1847 * s_data_constant, s_data_in, s_data_out
1848 */
1836 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1849 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1837 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1850 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1838 1851
@@ -1886,6 +1899,13 @@ void tcp_v4_destroy_sock(struct sock *sk)
1886 sk->sk_sndmsg_page = NULL; 1899 sk->sk_sndmsg_page = NULL;
1887 } 1900 }
1888 1901
1902 /* TCP Cookie Transactions */
1903 if (tp->cookie_values != NULL) {
1904 kref_put(&tp->cookie_values->kref,
1905 tcp_cookie_values_release);
1906 tp->cookie_values = NULL;
1907 }
1908
1889 percpu_counter_dec(&tcp_sockets_allocated); 1909 percpu_counter_dec(&tcp_sockets_allocated);
1890} 1910}
1891 1911
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d3f6bbfc76f0..96852af43ca7 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -383,14 +383,43 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
383 const struct inet_request_sock *ireq = inet_rsk(req); 383 const struct inet_request_sock *ireq = inet_rsk(req);
384 struct tcp_request_sock *treq = tcp_rsk(req); 384 struct tcp_request_sock *treq = tcp_rsk(req);
385 struct inet_connection_sock *newicsk = inet_csk(newsk); 385 struct inet_connection_sock *newicsk = inet_csk(newsk);
386 struct tcp_sock *newtp; 386 struct tcp_sock *newtp = tcp_sk(newsk);
387 struct tcp_sock *oldtp = tcp_sk(sk);
388 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
389
390 /* TCP Cookie Transactions require space for the cookie pair,
391 * as it differs for each connection. There is no need to
392 * copy any s_data_payload stored at the original socket.
393 * Failure will prevent resuming the connection.
394 *
395 * Presumed copied, in order of appearance:
396 * cookie_in_always, cookie_out_never
397 */
398 if (oldcvp != NULL) {
399 struct tcp_cookie_values *newcvp =
400 kzalloc(sizeof(*newtp->cookie_values),
401 GFP_ATOMIC);
402
403 if (newcvp != NULL) {
404 kref_init(&newcvp->kref);
405 newcvp->cookie_desired =
406 oldcvp->cookie_desired;
407 newtp->cookie_values = newcvp;
408 } else {
409 /* Not Yet Implemented */
410 newtp->cookie_values = NULL;
411 }
412 }
387 413
388 /* Now setup tcp_sock */ 414 /* Now setup tcp_sock */
389 newtp = tcp_sk(newsk);
390 newtp->pred_flags = 0; 415 newtp->pred_flags = 0;
391 newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; 416
392 newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1; 417 newtp->rcv_wup = newtp->copied_seq =
393 newtp->snd_up = treq->snt_isn + 1; 418 newtp->rcv_nxt = treq->rcv_isn + 1;
419
420 newtp->snd_sml = newtp->snd_una =
421 newtp->snd_nxt = newtp->snd_up =
422 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
394 423
395 tcp_prequeue_init(newtp); 424 tcp_prequeue_init(newtp);
396 425
@@ -423,8 +452,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
423 tcp_set_ca_state(newsk, TCP_CA_Open); 452 tcp_set_ca_state(newsk, TCP_CA_Open);
424 tcp_init_xmit_timers(newsk); 453 tcp_init_xmit_timers(newsk);
425 skb_queue_head_init(&newtp->out_of_order_queue); 454 skb_queue_head_init(&newtp->out_of_order_queue);
426 newtp->write_seq = treq->snt_isn + 1; 455 newtp->write_seq = newtp->pushed_seq =
427 newtp->pushed_seq = newtp->write_seq; 456 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
428 457
429 newtp->rx_opt.saw_tstamp = 0; 458 newtp->rx_opt.saw_tstamp = 0;
430 459
@@ -590,7 +619,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
590 * Invalid ACK: reset will be sent by listening socket 619 * Invalid ACK: reset will be sent by listening socket
591 */ 620 */
592 if ((flg & TCP_FLAG_ACK) && 621 if ((flg & TCP_FLAG_ACK) &&
593 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) 622 (TCP_SKB_CB(skb)->ack_seq !=
623 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
594 return sk; 624 return sk;
595 625
596 /* Also, it would be not so bad idea to check rcv_tsecr, which 626 /* Also, it would be not so bad idea to check rcv_tsecr, which