aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_minisocks.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-09-25 16:00:40 -0400
committerDavid S. Miller <davem@davemloft.net>2015-09-25 16:00:40 -0400
commit4d54d86546f62c7c4a0fe3b36a64c5e3b98ce1a9 (patch)
treebd48e072bacebb47bc6cd4ebb3483e9bd9da21b0 /net/ipv4/tcp_minisocks.c
parent6ea29da1d04f56e167ec8cc5ed15e927997d9d67 (diff)
parent1b70e977cef6ce7e7411c9bbec21f9adc8e29097 (diff)
Merge branch 'listener-sock-const'
Eric Dumazet says: ==================== dccp/tcp: constify listener sock Another patch bomb to prepare lockless TCP/DCCP LISTEN handling. SYNACK retransmits are built and sent without listener socket being locked. Soon, initial SYNACK packets will have same property. This series makes sure we did not something wrong with this model, by adding a const qualifier in all the paths taken from synack building and transmit, for IPv4/IPv6 and TCP/dccp. The only potential problem was the rewrite of ecn bits for connections with DCTCP as congestion module, but this was a very minor one. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r--net/ipv4/tcp_minisocks.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 85830bb92d04..e0a87c238882 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -362,27 +362,35 @@ void tcp_twsk_destructor(struct sock *sk)
362} 362}
363EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 363EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
364 364
365/* Warning : This function is called without sk_listener being locked.
366 * Be sure to read socket fields once, as their value could change under us.
367 */
365void tcp_openreq_init_rwin(struct request_sock *req, 368void tcp_openreq_init_rwin(struct request_sock *req,
366 struct sock *sk, struct dst_entry *dst) 369 const struct sock *sk_listener,
370 const struct dst_entry *dst)
367{ 371{
368 struct inet_request_sock *ireq = inet_rsk(req); 372 struct inet_request_sock *ireq = inet_rsk(req);
369 struct tcp_sock *tp = tcp_sk(sk); 373 const struct tcp_sock *tp = tcp_sk(sk_listener);
370 __u8 rcv_wscale; 374 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
375 int full_space = tcp_full_space(sk_listener);
371 int mss = dst_metric_advmss(dst); 376 int mss = dst_metric_advmss(dst);
377 u32 window_clamp;
378 __u8 rcv_wscale;
372 379
373 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 380 if (user_mss && user_mss < mss)
374 mss = tp->rx_opt.user_mss; 381 mss = user_mss;
375 382
383 window_clamp = READ_ONCE(tp->window_clamp);
376 /* Set this up on the first call only */ 384 /* Set this up on the first call only */
377 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 385 req->window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
378 386
379 /* limit the window selection if the user enforce a smaller rx buffer */ 387 /* limit the window selection if the user enforce a smaller rx buffer */
380 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 388 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
381 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) 389 (req->window_clamp > full_space || req->window_clamp == 0))
382 req->window_clamp = tcp_full_space(sk); 390 req->window_clamp = full_space;
383 391
384 /* tcp_full_space because it is guaranteed to be the first packet */ 392 /* tcp_full_space because it is guaranteed to be the first packet */
385 tcp_select_initial_window(tcp_full_space(sk), 393 tcp_select_initial_window(full_space,
386 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 394 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
387 &req->rcv_wnd, 395 &req->rcv_wnd,
388 &req->window_clamp, 396 &req->window_clamp,