aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_minisocks.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2007-02-22 06:20:44 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:23:21 -0400
commit54287cc178cf85dbae0decec8b4dc190bff757ad (patch)
tree44dedf7be5ac37fae65718d359ff598902247753 /net/ipv4/tcp_minisocks.c
parentcf4c6bf83d0fa070f60b1ba8124dfe0e65fbfbcc (diff)
[TCP]: Keep copied_seq, rcv_wup and rcv_next together.
I noticed in oprofile study a cache miss in tcp_rcv_established() to read copied_seq. ffffffff80400a80 <tcp_rcv_established>: /* tcp_rcv_established total: 4034293   2.0400 */  55493  0.0281 :ffffffff80400bc9:   mov    0x4c8(%r12),%eax copied_seq 543103  0.2746 :ffffffff80400bd1:   cmp    0x3e0(%r12),%eax   rcv_nxt     if (tp->copied_seq == tp->rcv_nxt &&         len - tcp_header_len <= tp->ucopy.len) { In this function, the cache line 0x4c0 -> 0x500 is used only for this reading 'copied_seq' field. rcv_wup and copied_seq should be next to rcv_nxt field, to lower number of active cache lines in hot paths. (tcp_rcv_established(), tcp_poll(), ...) As you suggested, I changed tcp_create_openreq_child() so that these fields are changed together, to avoid adding a new store buffer stall. Patch is 64bit friendly (no new hole because of alignment constraints) Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_minisocks.c')
-rw-r--r--net/ipv4/tcp_minisocks.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6b5c64f3c92..706932726a1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -387,8 +387,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
387 /* Now setup tcp_sock */ 387 /* Now setup tcp_sock */
388 newtp = tcp_sk(newsk); 388 newtp = tcp_sk(newsk);
389 newtp->pred_flags = 0; 389 newtp->pred_flags = 0;
390 newtp->rcv_nxt = treq->rcv_isn + 1; 390 newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1;
391 newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1; 391 newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1;
392 392
393 tcp_prequeue_init(newtp); 393 tcp_prequeue_init(newtp);
394 394
@@ -422,10 +422,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
422 tcp_set_ca_state(newsk, TCP_CA_Open); 422 tcp_set_ca_state(newsk, TCP_CA_Open);
423 tcp_init_xmit_timers(newsk); 423 tcp_init_xmit_timers(newsk);
424 skb_queue_head_init(&newtp->out_of_order_queue); 424 skb_queue_head_init(&newtp->out_of_order_queue);
425 newtp->rcv_wup = treq->rcv_isn + 1;
426 newtp->write_seq = treq->snt_isn + 1; 425 newtp->write_seq = treq->snt_isn + 1;
427 newtp->pushed_seq = newtp->write_seq; 426 newtp->pushed_seq = newtp->write_seq;
428 newtp->copied_seq = treq->rcv_isn + 1;
429 427
430 newtp->rx_opt.saw_tstamp = 0; 428 newtp->rx_opt.saw_tstamp = 0;
431 429