aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2011-12-11 16:47:02 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-12 19:04:10 -0500
commit180d8cd942ce336b2c869d324855c40c5db478ad (patch)
tree2424d854345d81464d6030ef8090a8e22bd414b0 /net/ipv4
parente5671dfae59b165e2adfd4dfbdeab11ac8db5bda (diff)
foundations of per-cgroup memory pressure controlling.
This patch replaces all uses of struct sock fields' memory_pressure, memory_allocated, sockets_allocated, and sysctl_mem to acessor macros. Those macros can either receive a socket argument, or a mem_cgroup argument, depending on the context they live in. Since we're only doing a macro wrapping here, no performance impact at all is expected in the case where we don't have cgroups disabled. Signed-off-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Hiroyouki Kamezawa <kamezawa.hiroyu@jp.fujitsu.com> CC: David S. Miller <davem@davemloft.net> CC: Eric W. Biederman <ebiederm@xmission.com> CC: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/proc.c6
-rw-r--r--net/ipv4/tcp_input.c12
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tcp_timer.c2
5 files changed, 13 insertions, 13 deletions
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 961eed4f510a..3569d8ecaeac 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -56,17 +56,17 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
56 56
57 local_bh_disable(); 57 local_bh_disable();
58 orphans = percpu_counter_sum_positive(&tcp_orphan_count); 58 orphans = percpu_counter_sum_positive(&tcp_orphan_count);
59 sockets = percpu_counter_sum_positive(&tcp_sockets_allocated); 59 sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
60 local_bh_enable(); 60 local_bh_enable();
61 61
62 socket_seq_show(seq); 62 socket_seq_show(seq);
63 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", 63 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
64 sock_prot_inuse_get(net, &tcp_prot), orphans, 64 sock_prot_inuse_get(net, &tcp_prot), orphans,
65 tcp_death_row.tw_count, sockets, 65 tcp_death_row.tw_count, sockets,
66 atomic_long_read(&tcp_memory_allocated)); 66 proto_memory_allocated(&tcp_prot));
67 seq_printf(seq, "UDP: inuse %d mem %ld\n", 67 seq_printf(seq, "UDP: inuse %d mem %ld\n",
68 sock_prot_inuse_get(net, &udp_prot), 68 sock_prot_inuse_get(net, &udp_prot),
69 atomic_long_read(&udp_memory_allocated)); 69 proto_memory_allocated(&udp_prot));
70 seq_printf(seq, "UDPLITE: inuse %d\n", 70 seq_printf(seq, "UDPLITE: inuse %d\n",
71 sock_prot_inuse_get(net, &udplite_prot)); 71 sock_prot_inuse_get(net, &udplite_prot));
72 seq_printf(seq, "RAW: inuse %d\n", 72 seq_printf(seq, "RAW: inuse %d\n",
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b9cbc351c511..f131d92d25ee 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -322,7 +322,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
322 /* Check #1 */ 322 /* Check #1 */
323 if (tp->rcv_ssthresh < tp->window_clamp && 323 if (tp->rcv_ssthresh < tp->window_clamp &&
324 (int)tp->rcv_ssthresh < tcp_space(sk) && 324 (int)tp->rcv_ssthresh < tcp_space(sk) &&
325 !tcp_memory_pressure) { 325 !sk_under_memory_pressure(sk)) {
326 int incr; 326 int incr;
327 327
328 /* Check #2. Increase window, if skb with such overhead 328 /* Check #2. Increase window, if skb with such overhead
@@ -411,8 +411,8 @@ static void tcp_clamp_window(struct sock *sk)
411 411
412 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 412 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
413 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 413 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
414 !tcp_memory_pressure && 414 !sk_under_memory_pressure(sk) &&
415 atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 415 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
416 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 416 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
417 sysctl_tcp_rmem[2]); 417 sysctl_tcp_rmem[2]);
418 } 418 }
@@ -4866,7 +4866,7 @@ static int tcp_prune_queue(struct sock *sk)
4866 4866
4867 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4867 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4868 tcp_clamp_window(sk); 4868 tcp_clamp_window(sk);
4869 else if (tcp_memory_pressure) 4869 else if (sk_under_memory_pressure(sk))
4870 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4870 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4871 4871
4872 tcp_collapse_ofo_queue(sk); 4872 tcp_collapse_ofo_queue(sk);
@@ -4932,11 +4932,11 @@ static int tcp_should_expand_sndbuf(const struct sock *sk)
4932 return 0; 4932 return 0;
4933 4933
4934 /* If we are under global TCP memory pressure, do not expand. */ 4934 /* If we are under global TCP memory pressure, do not expand. */
4935 if (tcp_memory_pressure) 4935 if (sk_under_memory_pressure(sk))
4936 return 0; 4936 return 0;
4937 4937
4938 /* If we are under soft global TCP memory pressure, do not expand. */ 4938 /* If we are under soft global TCP memory pressure, do not expand. */
4939 if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 4939 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
4940 return 0; 4940 return 0;
4941 4941
4942 /* If we filled the congestion window, do not expand. */ 4942 /* If we filled the congestion window, do not expand. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c4b8b09db9f5..f48bf312cfe8 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1917,7 +1917,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1917 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1917 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1918 1918
1919 local_bh_disable(); 1919 local_bh_disable();
1920 percpu_counter_inc(&tcp_sockets_allocated); 1920 sk_sockets_allocated_inc(sk);
1921 local_bh_enable(); 1921 local_bh_enable();
1922 1922
1923 return 0; 1923 return 0;
@@ -1973,7 +1973,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
1973 tp->cookie_values = NULL; 1973 tp->cookie_values = NULL;
1974 } 1974 }
1975 1975
1976 percpu_counter_dec(&tcp_sockets_allocated); 1976 sk_sockets_allocated_dec(sk);
1977} 1977}
1978EXPORT_SYMBOL(tcp_v4_destroy_sock); 1978EXPORT_SYMBOL(tcp_v4_destroy_sock);
1979 1979
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cf3068038942..8c8de2780c7a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1922,7 +1922,7 @@ u32 __tcp_select_window(struct sock *sk)
1922 if (free_space < (full_space >> 1)) { 1922 if (free_space < (full_space >> 1)) {
1923 icsk->icsk_ack.quick = 0; 1923 icsk->icsk_ack.quick = 0;
1924 1924
1925 if (tcp_memory_pressure) 1925 if (sk_under_memory_pressure(sk))
1926 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 1926 tp->rcv_ssthresh = min(tp->rcv_ssthresh,
1927 4U * tp->advmss); 1927 4U * tp->advmss);
1928 1928
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index aa39a692f4c8..40a41f077981 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -261,7 +261,7 @@ static void tcp_delack_timer(unsigned long data)
261 } 261 }
262 262
263out: 263out:
264 if (tcp_memory_pressure) 264 if (sk_under_memory_pressure(sk))
265 sk_mem_reclaim(sk); 265 sk_mem_reclaim(sk);
266out_unlock: 266out_unlock:
267 bh_unlock_sock(sk); 267 bh_unlock_sock(sk);