aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-09-22 19:29:32 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-26 15:40:06 -0400
commitf4a775d14489a801a5b8b0540e23ab82e2703091 (patch)
treefea57ec3004b0586f0fc25f4cf3f0dc8631dcb5f /net/ipv4/tcp.c
parentaebac744932439b5f869869f07f406a9cced465d (diff)
net: introduce __skb_header_release()
While profiling TCP stack, I noticed one useless atomic operation in tcp_sendmsg(), caused by skb_header_release(). It turns out all current skb_header_release() users have a fresh skb, that no other user can see, so we can avoid one atomic operation. Introduce __skb_header_release() to clearly document this. This gave me a 1.5 % improvement on TCP_RR workload. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 070aeff1b131..553b01f52f71 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -609,7 +609,7 @@ static inline bool forced_push(const struct tcp_sock *tp)
609 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 609 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
610} 610}
611 611
612static inline void skb_entail(struct sock *sk, struct sk_buff *skb) 612static void skb_entail(struct sock *sk, struct sk_buff *skb)
613{ 613{
614 struct tcp_sock *tp = tcp_sk(sk); 614 struct tcp_sock *tp = tcp_sk(sk);
615 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 615 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
@@ -618,7 +618,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
618 tcb->seq = tcb->end_seq = tp->write_seq; 618 tcb->seq = tcb->end_seq = tp->write_seq;
619 tcb->tcp_flags = TCPHDR_ACK; 619 tcb->tcp_flags = TCPHDR_ACK;
620 tcb->sacked = 0; 620 tcb->sacked = 0;
621 skb_header_release(skb); 621 __skb_header_release(skb);
622 tcp_add_write_queue_tail(sk, skb); 622 tcp_add_write_queue_tail(sk, skb);
623 sk->sk_wmem_queued += skb->truesize; 623 sk->sk_wmem_queued += skb->truesize;
624 sk_mem_charge(sk, skb->truesize); 624 sk_mem_charge(sk, skb->truesize);