aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-07-23 12:28:21 -0400
committerDavid S. Miller <davem@davemloft.net>2018-07-23 15:01:36 -0400
commit58152ecbbcc6a0ce7fddd5bf5f6ee535834ece0c (patch)
tree67c0e66dbb4895d1c03116df3a63896c67431680 /net/ipv4/tcp_input.c
parent8541b21e781a22dce52a74fef0b9bed00404a1cd (diff)
tcp: add tcp_ooo_try_coalesce() helper
In case skb in out_or_order_queue is the result of multiple skbs coalescing, we would like to get a proper gso_segs counter tracking, so that future tcp_drop() can report an accurate number. I chose to not implement this tracking for skbs in receive queue, since they are not dropped, unless socket is disconnected. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b062a7692238..3bcd30a2ba06 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4358,6 +4358,23 @@ static bool tcp_try_coalesce(struct sock *sk,
4358 return true; 4358 return true;
4359} 4359}
4360 4360
4361static bool tcp_ooo_try_coalesce(struct sock *sk,
4362 struct sk_buff *to,
4363 struct sk_buff *from,
4364 bool *fragstolen)
4365{
4366 bool res = tcp_try_coalesce(sk, to, from, fragstolen);
4367
4368 /* In case tcp_drop() is called later, update to->gso_segs */
4369 if (res) {
4370 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
4371 max_t(u16, 1, skb_shinfo(from)->gso_segs);
4372
4373 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
4374 }
4375 return res;
4376}
4377
4361static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4378static void tcp_drop(struct sock *sk, struct sk_buff *skb)
4362{ 4379{
4363 sk_drops_add(sk, skb); 4380 sk_drops_add(sk, skb);
@@ -4481,8 +4498,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4481 /* In the typical case, we are adding an skb to the end of the list. 4498 /* In the typical case, we are adding an skb to the end of the list.
4482 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 4499 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
4483 */ 4500 */
4484 if (tcp_try_coalesce(sk, tp->ooo_last_skb, 4501 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
4485 skb, &fragstolen)) { 4502 skb, &fragstolen)) {
4486coalesce_done: 4503coalesce_done:
4487 tcp_grow_window(sk, skb); 4504 tcp_grow_window(sk, skb);
4488 kfree_skb_partial(skb, fragstolen); 4505 kfree_skb_partial(skb, fragstolen);
@@ -4532,8 +4549,8 @@ coalesce_done:
4532 tcp_drop(sk, skb1); 4549 tcp_drop(sk, skb1);
4533 goto merge_right; 4550 goto merge_right;
4534 } 4551 }
4535 } else if (tcp_try_coalesce(sk, skb1, 4552 } else if (tcp_ooo_try_coalesce(sk, skb1,
4536 skb, &fragstolen)) { 4553 skb, &fragstolen)) {
4537 goto coalesce_done; 4554 goto coalesce_done;
4538 } 4555 }
4539 p = &parent->rb_right; 4556 p = &parent->rb_right;