aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-01-14 18:21:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commitbaac50bbc3cdfd184ebf586b1704edbfcee866df (patch)
treeb1b168157c38ef0533d2c8765bb3016d6a495cac /net/ipv4
parente805605c721021879a1469bdae45c6f80bc985f4 (diff)
net: tcp_memcontrol: simplify linkage between socket and page counter
There won't be any separate counters for socket memory consumed by protocols other than TCP in the future. Remove the indirection and link sockets directly to their owning memory cgroup. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_ipv4.c7
-rw-r--r--net/ipv4/tcp_memcontrol.c67
-rw-r--r--net/ipv4/tcp_output.c4
3 files changed, 29 insertions, 49 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index eb39e02899e5..c7d1fb50f381 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1819,7 +1819,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
1819 1819
1820 sk_sockets_allocated_dec(sk); 1820 sk_sockets_allocated_dec(sk);
1821 1821
1822 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1822 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1823 sock_release_memcg(sk); 1823 sock_release_memcg(sk);
1824} 1824}
1825EXPORT_SYMBOL(tcp_v4_destroy_sock); 1825EXPORT_SYMBOL(tcp_v4_destroy_sock);
@@ -2344,11 +2344,6 @@ struct proto tcp_prot = {
2344 .compat_setsockopt = compat_tcp_setsockopt, 2344 .compat_setsockopt = compat_tcp_setsockopt,
2345 .compat_getsockopt = compat_tcp_getsockopt, 2345 .compat_getsockopt = compat_tcp_getsockopt,
2346#endif 2346#endif
2347#ifdef CONFIG_MEMCG_KMEM
2348 .init_cgroup = tcp_init_cgroup,
2349 .destroy_cgroup = tcp_destroy_cgroup,
2350 .proto_cgroup = tcp_proto_cgroup,
2351#endif
2352 .diag_destroy = tcp_abort, 2347 .diag_destroy = tcp_abort,
2353}; 2348};
2354EXPORT_SYMBOL(tcp_prot); 2349EXPORT_SYMBOL(tcp_prot);
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index ef4268d12e43..e5078259cbe3 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -8,60 +8,47 @@
8 8
9int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 9int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
10{ 10{
11 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
12 struct page_counter *counter_parent = NULL;
11 /* 13 /*
12 * The root cgroup does not use page_counters, but rather, 14 * The root cgroup does not use page_counters, but rather,
13 * rely on the data already collected by the network 15 * rely on the data already collected by the network
14 * subsystem 16 * subsystem
15 */ 17 */
16 struct mem_cgroup *parent = parent_mem_cgroup(memcg); 18 if (memcg == root_mem_cgroup)
17 struct page_counter *counter_parent = NULL;
18 struct cg_proto *cg_proto, *parent_cg;
19
20 cg_proto = tcp_prot.proto_cgroup(memcg);
21 if (!cg_proto)
22 return 0; 19 return 0;
23 20
24 cg_proto->memory_pressure = 0; 21 memcg->tcp_mem.memory_pressure = 0;
25 cg_proto->memcg = memcg;
26 22
27 parent_cg = tcp_prot.proto_cgroup(parent); 23 if (parent)
28 if (parent_cg) 24 counter_parent = &parent->tcp_mem.memory_allocated;
29 counter_parent = &parent_cg->memory_allocated;
30 25
31 page_counter_init(&cg_proto->memory_allocated, counter_parent); 26 page_counter_init(&memcg->tcp_mem.memory_allocated, counter_parent);
32 27
33 return 0; 28 return 0;
34} 29}
35EXPORT_SYMBOL(tcp_init_cgroup);
36 30
37void tcp_destroy_cgroup(struct mem_cgroup *memcg) 31void tcp_destroy_cgroup(struct mem_cgroup *memcg)
38{ 32{
39 struct cg_proto *cg_proto; 33 if (memcg == root_mem_cgroup)
40
41 cg_proto = tcp_prot.proto_cgroup(memcg);
42 if (!cg_proto)
43 return; 34 return;
44 35
45 if (cg_proto->active) 36 if (memcg->tcp_mem.active)
46 static_key_slow_dec(&memcg_socket_limit_enabled); 37 static_key_slow_dec(&memcg_socket_limit_enabled);
47
48} 38}
49EXPORT_SYMBOL(tcp_destroy_cgroup);
50 39
51static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) 40static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
52{ 41{
53 struct cg_proto *cg_proto;
54 int ret; 42 int ret;
55 43
56 cg_proto = tcp_prot.proto_cgroup(memcg); 44 if (memcg == root_mem_cgroup)
57 if (!cg_proto)
58 return -EINVAL; 45 return -EINVAL;
59 46
60 ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages); 47 ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, nr_pages);
61 if (ret) 48 if (ret)
62 return ret; 49 return ret;
63 50
64 if (!cg_proto->active) { 51 if (!memcg->tcp_mem.active) {
65 /* 52 /*
66 * The active flag needs to be written after the static_key 53 * The active flag needs to be written after the static_key
67 * update. This is what guarantees that the socket activation 54 * update. This is what guarantees that the socket activation
@@ -79,7 +66,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
79 * patched in yet. 66 * patched in yet.
80 */ 67 */
81 static_key_slow_inc(&memcg_socket_limit_enabled); 68 static_key_slow_inc(&memcg_socket_limit_enabled);
82 cg_proto->active = true; 69 memcg->tcp_mem.active = true;
83 } 70 }
84 71
85 return 0; 72 return 0;
@@ -123,32 +110,32 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of,
123static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft) 110static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
124{ 111{
125 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 112 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
126 struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg);
127 u64 val; 113 u64 val;
128 114
129 switch (cft->private) { 115 switch (cft->private) {
130 case RES_LIMIT: 116 case RES_LIMIT:
131 if (!cg_proto) 117 if (memcg == root_mem_cgroup)
132 return PAGE_COUNTER_MAX; 118 val = PAGE_COUNTER_MAX;
133 val = cg_proto->memory_allocated.limit; 119 else
120 val = memcg->tcp_mem.memory_allocated.limit;
134 val *= PAGE_SIZE; 121 val *= PAGE_SIZE;
135 break; 122 break;
136 case RES_USAGE: 123 case RES_USAGE:
137 if (!cg_proto) 124 if (memcg == root_mem_cgroup)
138 val = atomic_long_read(&tcp_memory_allocated); 125 val = atomic_long_read(&tcp_memory_allocated);
139 else 126 else
140 val = page_counter_read(&cg_proto->memory_allocated); 127 val = page_counter_read(&memcg->tcp_mem.memory_allocated);
141 val *= PAGE_SIZE; 128 val *= PAGE_SIZE;
142 break; 129 break;
143 case RES_FAILCNT: 130 case RES_FAILCNT:
144 if (!cg_proto) 131 if (memcg == root_mem_cgroup)
145 return 0; 132 return 0;
146 val = cg_proto->memory_allocated.failcnt; 133 val = memcg->tcp_mem.memory_allocated.failcnt;
147 break; 134 break;
148 case RES_MAX_USAGE: 135 case RES_MAX_USAGE:
149 if (!cg_proto) 136 if (memcg == root_mem_cgroup)
150 return 0; 137 return 0;
151 val = cg_proto->memory_allocated.watermark; 138 val = memcg->tcp_mem.memory_allocated.watermark;
152 val *= PAGE_SIZE; 139 val *= PAGE_SIZE;
153 break; 140 break;
154 default: 141 default:
@@ -161,19 +148,17 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of,
161 char *buf, size_t nbytes, loff_t off) 148 char *buf, size_t nbytes, loff_t off)
162{ 149{
163 struct mem_cgroup *memcg; 150 struct mem_cgroup *memcg;
164 struct cg_proto *cg_proto;
165 151
166 memcg = mem_cgroup_from_css(of_css(of)); 152 memcg = mem_cgroup_from_css(of_css(of));
167 cg_proto = tcp_prot.proto_cgroup(memcg); 153 if (memcg == root_mem_cgroup)
168 if (!cg_proto)
169 return nbytes; 154 return nbytes;
170 155
171 switch (of_cft(of)->private) { 156 switch (of_cft(of)->private) {
172 case RES_MAX_USAGE: 157 case RES_MAX_USAGE:
173 page_counter_reset_watermark(&cg_proto->memory_allocated); 158 page_counter_reset_watermark(&memcg->tcp_mem.memory_allocated);
174 break; 159 break;
175 case RES_FAILCNT: 160 case RES_FAILCNT:
176 cg_proto->memory_allocated.failcnt = 0; 161 memcg->tcp_mem.memory_allocated.failcnt = 0;
177 break; 162 break;
178 } 163 }
179 164
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 493b48945f0c..fda379cd600d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2821,8 +2821,8 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
2821 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 2821 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2822 sk_memory_allocated_add(sk, amt); 2822 sk_memory_allocated_add(sk, amt);
2823 2823
2824 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 2824 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2825 mem_cgroup_charge_skmem(sk->sk_cgrp, amt); 2825 mem_cgroup_charge_skmem(sk->sk_memcg, amt);
2826} 2826}
2827 2827
2828/* Send a FIN. The caller locks the socket for us. 2828/* Send a FIN. The caller locks the socket for us.