diff options
author | Eric Dumazet <edumazet@google.com> | 2015-11-19 00:03:33 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-11-20 10:57:33 -0500 |
commit | 5d4c9bfbabdb1d497f21afd81501e5c54b0c85d9 (patch) | |
tree | 2d59e7176c7c351ca7113839fa6f8db42762d43e /net/ipv4/tcp_input.c | |
parent | dd52bc2b4ed16db66f9347aa263d8f1dc889b4b6 (diff) |
tcp: fix potential huge kmalloc() calls in TCP_REPAIR
tcp_send_rcvq() is used for re-injecting data into tcp receive queue.
Problems :
- No check against size is performed, allowed user to fool kernel in
attempting very large memory allocations, eventually triggering
OOM when memory is fragmented.
- In case of fault during the copy we do not return correct errno.
Lets use alloc_skb_with_frags() to cook optimal skbs.
Fixes: 292e8d8c8538 ("tcp: Move rcvq sending to tcp_input.c")
Fixes: c0e88ff0f256 ("tcp: Repair socket queues")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Acked-by: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fdd88c3803a6..a4a0b6b3bcf2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int | |||
4481 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | 4481 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) |
4482 | { | 4482 | { |
4483 | struct sk_buff *skb; | 4483 | struct sk_buff *skb; |
4484 | int err = -ENOMEM; | ||
4485 | int data_len = 0; | ||
4484 | bool fragstolen; | 4486 | bool fragstolen; |
4485 | 4487 | ||
4486 | if (size == 0) | 4488 | if (size == 0) |
4487 | return 0; | 4489 | return 0; |
4488 | 4490 | ||
4489 | skb = alloc_skb(size, sk->sk_allocation); | 4491 | if (size > PAGE_SIZE) { |
4492 | int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); | ||
4493 | |||
4494 | data_len = npages << PAGE_SHIFT; | ||
4495 | size = data_len + (size & ~PAGE_MASK); | ||
4496 | } | ||
4497 | skb = alloc_skb_with_frags(size - data_len, data_len, | ||
4498 | PAGE_ALLOC_COSTLY_ORDER, | ||
4499 | &err, sk->sk_allocation); | ||
4490 | if (!skb) | 4500 | if (!skb) |
4491 | goto err; | 4501 | goto err; |
4492 | 4502 | ||
4503 | skb_put(skb, size - data_len); | ||
4504 | skb->data_len = data_len; | ||
4505 | skb->len = size; | ||
4506 | |||
4493 | if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) | 4507 | if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) |
4494 | goto err_free; | 4508 | goto err_free; |
4495 | 4509 | ||
4496 | if (memcpy_from_msg(skb_put(skb, size), msg, size)) | 4510 | err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); |
4511 | if (err) | ||
4497 | goto err_free; | 4512 | goto err_free; |
4498 | 4513 | ||
4499 | TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; | 4514 | TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; |
@@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | |||
4509 | err_free: | 4524 | err_free: |
4510 | kfree_skb(skb); | 4525 | kfree_skb(skb); |
4511 | err: | 4526 | err: |
4512 | return -ENOMEM; | 4527 | return err; |
4528 | |||
4513 | } | 4529 | } |
4514 | 4530 | ||
4515 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | 4531 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) |