aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2005-09-01 20:48:23 -0400
committerDavid S. Miller <davem@davemloft.net>2005-09-01 20:48:23 -0400
commitd80d99d643090c3cf2b1f9fb3fadd1256f7e384f (patch)
tree5e8bd46fa6c73cace5efb77c43e863cd36edb0c9
parent2dac4b96b9362954a0638317b90e3e7bcb112e83 (diff)
[NET]: Add sk_stream_wmem_schedule
This patch introduces sk_stream_wmem_schedule as a short-hand for the sk_forward_alloc checking on egress. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h12
-rw-r--r--net/ipv4/tcp.c3
2 files changed, 9 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 312cb25cbd18..e51e626e9af1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -709,6 +709,12 @@ static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
709 sk_stream_mem_schedule(sk, skb->truesize, 1); 709 sk_stream_mem_schedule(sk, skb->truesize, 1);
710} 710}
711 711
712static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
713{
714 return size <= sk->sk_forward_alloc ||
715 sk_stream_mem_schedule(sk, size, 0);
716}
717
712/* Used by processes to "lock" a socket state, so that 718/* Used by processes to "lock" a socket state, so that
713 * interrupts and bottom half handlers won't change it 719 * interrupts and bottom half handlers won't change it
714 * from under us. It essentially blocks any incoming 720 * from under us. It essentially blocks any incoming
@@ -1203,8 +1209,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1203 skb = alloc_skb_fclone(size + hdr_len, gfp); 1209 skb = alloc_skb_fclone(size + hdr_len, gfp);
1204 if (skb) { 1210 if (skb) {
1205 skb->truesize += mem; 1211 skb->truesize += mem;
1206 if (sk->sk_forward_alloc >= (int)skb->truesize || 1212 if (sk_stream_wmem_schedule(sk, skb->truesize)) {
1207 sk_stream_mem_schedule(sk, skb->truesize, 0)) {
1208 skb_reserve(skb, hdr_len); 1213 skb_reserve(skb, hdr_len);
1209 return skb; 1214 return skb;
1210 } 1215 }
@@ -1227,8 +1232,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
1227{ 1232{
1228 struct page *page = NULL; 1233 struct page *page = NULL;
1229 1234
1230 if (sk->sk_forward_alloc >= (int)PAGE_SIZE || 1235 if (sk_stream_wmem_schedule(sk, PAGE_SIZE))
1231 sk_stream_mem_schedule(sk, PAGE_SIZE, 0))
1232 page = alloc_pages(sk->sk_allocation, 0); 1236 page = alloc_pages(sk->sk_allocation, 0);
1233 else { 1237 else {
1234 sk->sk_prot->enter_memory_pressure(); 1238 sk->sk_prot->enter_memory_pressure();
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 02fdda68718d..854f6d0c4bb3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -552,8 +552,7 @@ new_segment:
552 tcp_mark_push(tp, skb); 552 tcp_mark_push(tp, skb);
553 goto new_segment; 553 goto new_segment;
554 } 554 }
555 if (sk->sk_forward_alloc < copy && 555 if (!sk_stream_wmem_schedule(sk, copy))
556 !sk_stream_mem_schedule(sk, copy, 0))
557 goto wait_for_memory; 556 goto wait_for_memory;
558 557
559 if (can_coalesce) { 558 if (can_coalesce) {