diff options
author | Eric Dumazet <edumazet@google.com> | 2015-05-15 15:39:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-05-17 22:45:48 -0400 |
commit | b8da51ebb1aa93908350f95efae73aecbc2e266c (patch) | |
tree | 5d6b1edc272ea24b07ec2a38cbcba7c53fb44bae | |
parent | a6c5ea4ccf0033591e6e476d7a273c0074c07aa7 (diff) |
tcp: introduce tcp_under_memory_pressure()
Introduce an optimized version of sk_under_memory_pressure()
for TCP. Our intent is to use it in fast paths.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 2 |
4 files changed, 15 insertions, 7 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 841691a296dc..0d85223efa4c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -286,6 +286,14 @@ extern atomic_long_t tcp_memory_allocated; | |||
286 | extern struct percpu_counter tcp_sockets_allocated; | 286 | extern struct percpu_counter tcp_sockets_allocated; |
287 | extern int tcp_memory_pressure; | 287 | extern int tcp_memory_pressure; |
288 | 288 | ||
289 | /* optimized version of sk_under_memory_pressure() for TCP sockets */ | ||
290 | static inline bool tcp_under_memory_pressure(const struct sock *sk) | ||
291 | { | ||
292 | if (mem_cgroup_sockets_enabled && sk->sk_cgrp) | ||
293 | return !!sk->sk_cgrp->memory_pressure; | ||
294 | |||
295 | return tcp_memory_pressure; | ||
296 | } | ||
289 | /* | 297 | /* |
290 | * The next routines deal with comparing 32 bit unsigned ints | 298 | * The next routines deal with comparing 32 bit unsigned ints |
291 | * and worry about wraparound (automatic with unsigned arithmetic). | 299 | * and worry about wraparound (automatic with unsigned arithmetic). |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index cf8b20ff6658..093779f7e893 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -359,7 +359,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) | |||
359 | /* Check #1 */ | 359 | /* Check #1 */ |
360 | if (tp->rcv_ssthresh < tp->window_clamp && | 360 | if (tp->rcv_ssthresh < tp->window_clamp && |
361 | (int)tp->rcv_ssthresh < tcp_space(sk) && | 361 | (int)tp->rcv_ssthresh < tcp_space(sk) && |
362 | !sk_under_memory_pressure(sk)) { | 362 | !tcp_under_memory_pressure(sk)) { |
363 | int incr; | 363 | int incr; |
364 | 364 | ||
365 | /* Check #2. Increase window, if skb with such overhead | 365 | /* Check #2. Increase window, if skb with such overhead |
@@ -446,7 +446,7 @@ static void tcp_clamp_window(struct sock *sk) | |||
446 | 446 | ||
447 | if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && | 447 | if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && |
448 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && | 448 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && |
449 | !sk_under_memory_pressure(sk) && | 449 | !tcp_under_memory_pressure(sk) && |
450 | sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { | 450 | sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { |
451 | sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), | 451 | sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), |
452 | sysctl_tcp_rmem[2]); | 452 | sysctl_tcp_rmem[2]); |
@@ -4781,7 +4781,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
4781 | 4781 | ||
4782 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) | 4782 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
4783 | tcp_clamp_window(sk); | 4783 | tcp_clamp_window(sk); |
4784 | else if (sk_under_memory_pressure(sk)) | 4784 | else if (tcp_under_memory_pressure(sk)) |
4785 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); | 4785 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
4786 | 4786 | ||
4787 | tcp_collapse_ofo_queue(sk); | 4787 | tcp_collapse_ofo_queue(sk); |
@@ -4825,7 +4825,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk) | |||
4825 | return false; | 4825 | return false; |
4826 | 4826 | ||
4827 | /* If we are under global TCP memory pressure, do not expand. */ | 4827 | /* If we are under global TCP memory pressure, do not expand. */ |
4828 | if (sk_under_memory_pressure(sk)) | 4828 | if (tcp_under_memory_pressure(sk)) |
4829 | return false; | 4829 | return false; |
4830 | 4830 | ||
4831 | /* If we are under soft global TCP memory pressure, do not expand. */ | 4831 | /* If we are under soft global TCP memory pressure, do not expand. */ |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bac1a950d087..08c2cc40b26d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2392,7 +2392,7 @@ u32 __tcp_select_window(struct sock *sk) | |||
2392 | if (free_space < (full_space >> 1)) { | 2392 | if (free_space < (full_space >> 1)) { |
2393 | icsk->icsk_ack.quick = 0; | 2393 | icsk->icsk_ack.quick = 0; |
2394 | 2394 | ||
2395 | if (sk_under_memory_pressure(sk)) | 2395 | if (tcp_under_memory_pressure(sk)) |
2396 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, | 2396 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, |
2397 | 4U * tp->advmss); | 2397 | 4U * tp->advmss); |
2398 | 2398 | ||
@@ -2843,7 +2843,7 @@ void tcp_send_fin(struct sock *sk) | |||
2843 | * Note: in the latter case, FIN packet will be sent after a timeout, | 2843 | * Note: in the latter case, FIN packet will be sent after a timeout, |
2844 | * as TCP stack thinks it has already been transmitted. | 2844 | * as TCP stack thinks it has already been transmitted. |
2845 | */ | 2845 | */ |
2846 | if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) { | 2846 | if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) { |
2847 | coalesce: | 2847 | coalesce: |
2848 | TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; | 2848 | TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; |
2849 | TCP_SKB_CB(tskb)->end_seq++; | 2849 | TCP_SKB_CB(tskb)->end_seq++; |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 65bf670e8714..5b752f58a900 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -247,7 +247,7 @@ void tcp_delack_timer_handler(struct sock *sk) | |||
247 | } | 247 | } |
248 | 248 | ||
249 | out: | 249 | out: |
250 | if (sk_under_memory_pressure(sk)) | 250 | if (tcp_under_memory_pressure(sk)) |
251 | sk_mem_reclaim(sk); | 251 | sk_mem_reclaim(sk); |
252 | } | 252 | } |
253 | 253 | ||