aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-07-23 12:28:19 -0400
committerDavid S. Miller <davem@davemloft.net>2018-07-23 15:01:36 -0400
commit3d4bf93ac12003f9b8e1e2de37fe27983deebdcf (patch)
treef133ddfef1cad281067830449ce844e03cf15ba0 /net/ipv4/tcp_input.c
parentf4a3313d8e2ca9fd8d8f45e40a2903ba782607e7 (diff)
tcp: detect malicious patterns in tcp_collapse_ofo_queue()
In case an attacker feeds tiny packets completely out of order, tcp_collapse_ofo_queue() might scan the whole rb-tree, performing expensive copies, but not changing socket memory usage at all. 1) Do not attempt to collapse tiny skbs. 2) Add logic to exit early when too many tiny skbs are detected. We prefer not doing aggressive collapsing (which copies packets) for pathological flows, and revert to tcp_prune_ofo_queue() which will be less expensive. In the future, we might add the possibility of terminating flows that are proven to be malicious. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53289911362a..78068b902e7b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4902,6 +4902,7 @@ end:
4902static void tcp_collapse_ofo_queue(struct sock *sk) 4902static void tcp_collapse_ofo_queue(struct sock *sk)
4903{ 4903{
4904 struct tcp_sock *tp = tcp_sk(sk); 4904 struct tcp_sock *tp = tcp_sk(sk);
4905 u32 range_truesize, sum_tiny = 0;
4905 struct sk_buff *skb, *head; 4906 struct sk_buff *skb, *head;
4906 u32 start, end; 4907 u32 start, end;
4907 4908
@@ -4913,6 +4914,7 @@ new_range:
4913 } 4914 }
4914 start = TCP_SKB_CB(skb)->seq; 4915 start = TCP_SKB_CB(skb)->seq;
4915 end = TCP_SKB_CB(skb)->end_seq; 4916 end = TCP_SKB_CB(skb)->end_seq;
4917 range_truesize = skb->truesize;
4916 4918
4917 for (head = skb;;) { 4919 for (head = skb;;) {
4918 skb = skb_rb_next(skb); 4920 skb = skb_rb_next(skb);
@@ -4923,11 +4925,20 @@ new_range:
4923 if (!skb || 4925 if (!skb ||
4924 after(TCP_SKB_CB(skb)->seq, end) || 4926 after(TCP_SKB_CB(skb)->seq, end) ||
4925 before(TCP_SKB_CB(skb)->end_seq, start)) { 4927 before(TCP_SKB_CB(skb)->end_seq, start)) {
4926 tcp_collapse(sk, NULL, &tp->out_of_order_queue, 4928 /* Do not attempt collapsing tiny skbs */
4927 head, skb, start, end); 4929 if (range_truesize != head->truesize ||
4930 end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
4931 tcp_collapse(sk, NULL, &tp->out_of_order_queue,
4932 head, skb, start, end);
4933 } else {
4934 sum_tiny += range_truesize;
4935 if (sum_tiny > sk->sk_rcvbuf >> 3)
4936 return;
4937 }
4928 goto new_range; 4938 goto new_range;
4929 } 4939 }
4930 4940
4941 range_truesize += skb->truesize;
4931 if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) 4942 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
4932 start = TCP_SKB_CB(skb)->seq; 4943 start = TCP_SKB_CB(skb)->seq;
4933 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4944 if (after(TCP_SKB_CB(skb)->end_seq, end))