diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2005-09-01 20:48:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-09-01 20:48:59 -0400 |
commit | ef015786152adaff5a6a8bf0c8ea2f70cee8059d (patch) | |
tree | 3042db7e451c61aefc60c1463bb6e307ca510638 | |
parent | d80d99d643090c3cf2b1f9fb3fadd1256f7e384f (diff) |
[TCP]: Fix sk_forward_alloc underflow in tcp_sendmsg
I've finally found a potential cause of the sk_forward_alloc underflows
that people have been reporting sporadically.
When tcp_sendmsg tacks on extra bits to an existing TCP_PAGE we don't
check sk_forward_alloc even though a large amount of time may have
elapsed since we allocated the page. In the mean time someone could've
come along and liberated packets and reclaimed sk_forward_alloc memory.
This patch makes tcp_sendmsg check sk_forward_alloc every time as we
do in do_tcp_sendpages.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/sock.h | 5 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 14 |
2 files changed, 11 insertions, 8 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index e51e626e9af1..cf628261da52 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1232,9 +1232,8 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) | |||
1232 | { | 1232 | { |
1233 | struct page *page = NULL; | 1233 | struct page *page = NULL; |
1234 | 1234 | ||
1235 | if (sk_stream_wmem_schedule(sk, PAGE_SIZE)) | 1235 | page = alloc_pages(sk->sk_allocation, 0); |
1236 | page = alloc_pages(sk->sk_allocation, 0); | 1236 | if (!page) { |
1237 | else { | ||
1238 | sk->sk_prot->enter_memory_pressure(); | 1237 | sk->sk_prot->enter_memory_pressure(); |
1239 | sk_stream_moderate_sndbuf(sk); | 1238 | sk_stream_moderate_sndbuf(sk); |
1240 | } | 1239 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 854f6d0c4bb3..cbcc9fc47783 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -769,19 +769,23 @@ new_segment: | |||
769 | if (off == PAGE_SIZE) { | 769 | if (off == PAGE_SIZE) { |
770 | put_page(page); | 770 | put_page(page); |
771 | TCP_PAGE(sk) = page = NULL; | 771 | TCP_PAGE(sk) = page = NULL; |
772 | TCP_OFF(sk) = off = 0; | ||
772 | } | 773 | } |
773 | } | 774 | } else |
775 | BUG_ON(off); | ||
776 | |||
777 | if (copy > PAGE_SIZE - off) | ||
778 | copy = PAGE_SIZE - off; | ||
779 | |||
780 | if (!sk_stream_wmem_schedule(sk, copy)) | ||
781 | goto wait_for_memory; | ||
774 | 782 | ||
775 | if (!page) { | 783 | if (!page) { |
776 | /* Allocate new cache page. */ | 784 | /* Allocate new cache page. */ |
777 | if (!(page = sk_stream_alloc_page(sk))) | 785 | if (!(page = sk_stream_alloc_page(sk))) |
778 | goto wait_for_memory; | 786 | goto wait_for_memory; |
779 | off = 0; | ||
780 | } | 787 | } |
781 | 788 | ||
782 | if (copy > PAGE_SIZE - off) | ||
783 | copy = PAGE_SIZE - off; | ||
784 | |||
785 | /* Time to copy data. We are close to | 789 | /* Time to copy data. We are close to |
786 | * the end! */ | 790 | * the end! */ |
787 | err = skb_copy_to_page(sk, from, skb, page, | 791 | err = skb_copy_to_page(sk, from, skb, page, |