aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-11-26 17:49:12 -0500
committerDavid S. Miller <davem@davemloft.net>2018-11-27 19:38:08 -0500
commite7395f1f4ba24dc4116b0b87b4ed0664109b450a (patch)
tree796a8c62478ec3c83958dc272cbe23a3d1ea3ca6 /net/ipv4/tcp_input.c
parent16e8c4ca21a238cdf0355475bf15bd72e92feb8f (diff)
tcp: remove hdrlen argument from tcp_queue_rcv()
Only one caller needs to pull TCP headers, so lets move __skb_pull() to the caller side. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 568dbf3b711a..f32397890b6d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4603,13 +4603,12 @@ end:
4603 } 4603 }
4604} 4604}
4605 4605
4606static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, 4606static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
4607 bool *fragstolen) 4607 bool *fragstolen)
4608{ 4608{
4609 int eaten; 4609 int eaten;
4610 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); 4610 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
4611 4611
4612 __skb_pull(skb, hdrlen);
4613 eaten = (tail && 4612 eaten = (tail &&
4614 tcp_try_coalesce(sk, tail, 4613 tcp_try_coalesce(sk, tail,
4615 skb, fragstolen)) ? 1 : 0; 4614 skb, fragstolen)) ? 1 : 0;
@@ -4660,7 +4659,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4660 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; 4659 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
4661 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; 4660 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
4662 4661
4663 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { 4662 if (tcp_queue_rcv(sk, skb, &fragstolen)) {
4664 WARN_ON_ONCE(fragstolen); /* should not happen */ 4663 WARN_ON_ONCE(fragstolen); /* should not happen */
4665 __kfree_skb(skb); 4664 __kfree_skb(skb);
4666 } 4665 }
@@ -4720,7 +4719,7 @@ queue_and_out:
4720 goto drop; 4719 goto drop;
4721 } 4720 }
4722 4721
4723 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4722 eaten = tcp_queue_rcv(sk, skb, &fragstolen);
4724 if (skb->len) 4723 if (skb->len)
4725 tcp_event_data_recv(sk, skb); 4724 tcp_event_data_recv(sk, skb);
4726 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 4725 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -5596,8 +5595,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
5596 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); 5595 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
5597 5596
5598 /* Bulk data transfer: receiver */ 5597 /* Bulk data transfer: receiver */
5599 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, 5598 __skb_pull(skb, tcp_header_len);
5600 &fragstolen); 5599 eaten = tcp_queue_rcv(sk, skb, &fragstolen);
5601 5600
5602 tcp_event_data_recv(sk, skb); 5601 tcp_event_data_recv(sk, skb);
5603 5602