diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-15 14:41:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-15 14:41:44 -0500 |
commit | 875fc4f5ddf35605581f9a5900c14afef48611f2 (patch) | |
tree | e237a28a71a5d1e72eaf0ecda737eb5c8614c72c /net | |
parent | 7d1fc01afc5af35e5197e0e75abe900f6bd279b8 (diff) | |
parent | 7dfa4612204b511c934ca2a0e4f306f9981bd9aa (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge first patch-bomb from Andrew Morton:
- A few hotfixes which missed 4.4 becasue I was asleep. cc'ed to
-stable
- A few misc fixes
- OCFS2 updates
- Part of MM. Including pretty large changes to page-flags handling
and to thp management which have been buffered up for 2-3 cycles now.
I have a lot of MM material this time.
[ It turns out the THP part wasn't quite ready, so that got dropped from
this series - Linus ]
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits)
zsmalloc: reorganize struct size_class to pack 4 bytes hole
mm/zbud.c: use list_last_entry() instead of list_tail_entry()
zram/zcomp: do not zero out zcomp private pages
zram: pass gfp from zcomp frontend to backend
zram: try vmalloc() after kmalloc()
zram/zcomp: use GFP_NOIO to allocate streams
mm: add tracepoint for scanning pages
drivers/base/memory.c: fix kernel warning during memory hotplug on ppc64
mm/page_isolation: use macro to judge the alignment
mm: fix noisy sparse warning in LIBCFS_ALLOC_PRE()
mm: rework virtual memory accounting
include/linux/memblock.h: fix ordering of 'flags' argument in comments
mm: move lru_to_page to mm_inline.h
Documentation/filesystems: describe the shared memory usage/accounting
memory-hotplug: don't BUG() in register_memory_resource()
hugetlb: make mm and fs code explicitly non-modular
mm/swapfile.c: use list_for_each_entry_safe in free_swap_count_continuations
mm: /proc/pid/clear_refs: no need to clear VM_SOFTDIRTY in clear_soft_dirty_pmd()
mm: make sure isolate_lru_page() is never called for tail page
vmstat: make vmstat_updater deferrable again and shut down on idle
...
Diffstat (limited to 'net')
-rw-r--r-- | net/core/sock.c | 78 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 9 | ||||
-rw-r--r-- | net/ipv4/tcp_memcontrol.c | 91 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 7 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 3 | ||||
-rw-r--r-- | net/socket.c | 2 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 2 |
8 files changed, 59 insertions, 136 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index 51270238e269..6c1c8bc93412 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -195,44 +195,6 @@ bool sk_net_capable(const struct sock *sk, int cap) | |||
195 | } | 195 | } |
196 | EXPORT_SYMBOL(sk_net_capable); | 196 | EXPORT_SYMBOL(sk_net_capable); |
197 | 197 | ||
198 | |||
199 | #ifdef CONFIG_MEMCG_KMEM | ||
200 | int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | ||
201 | { | ||
202 | struct proto *proto; | ||
203 | int ret = 0; | ||
204 | |||
205 | mutex_lock(&proto_list_mutex); | ||
206 | list_for_each_entry(proto, &proto_list, node) { | ||
207 | if (proto->init_cgroup) { | ||
208 | ret = proto->init_cgroup(memcg, ss); | ||
209 | if (ret) | ||
210 | goto out; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | mutex_unlock(&proto_list_mutex); | ||
215 | return ret; | ||
216 | out: | ||
217 | list_for_each_entry_continue_reverse(proto, &proto_list, node) | ||
218 | if (proto->destroy_cgroup) | ||
219 | proto->destroy_cgroup(memcg); | ||
220 | mutex_unlock(&proto_list_mutex); | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) | ||
225 | { | ||
226 | struct proto *proto; | ||
227 | |||
228 | mutex_lock(&proto_list_mutex); | ||
229 | list_for_each_entry_reverse(proto, &proto_list, node) | ||
230 | if (proto->destroy_cgroup) | ||
231 | proto->destroy_cgroup(memcg); | ||
232 | mutex_unlock(&proto_list_mutex); | ||
233 | } | ||
234 | #endif | ||
235 | |||
236 | /* | 198 | /* |
237 | * Each address family might have different locking rules, so we have | 199 | * Each address family might have different locking rules, so we have |
238 | * one slock key per address family: | 200 | * one slock key per address family: |
@@ -240,11 +202,6 @@ void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) | |||
240 | static struct lock_class_key af_family_keys[AF_MAX]; | 202 | static struct lock_class_key af_family_keys[AF_MAX]; |
241 | static struct lock_class_key af_family_slock_keys[AF_MAX]; | 203 | static struct lock_class_key af_family_slock_keys[AF_MAX]; |
242 | 204 | ||
243 | #if defined(CONFIG_MEMCG_KMEM) | ||
244 | struct static_key memcg_socket_limit_enabled; | ||
245 | EXPORT_SYMBOL(memcg_socket_limit_enabled); | ||
246 | #endif | ||
247 | |||
248 | /* | 205 | /* |
249 | * Make lock validator output more readable. (we pre-construct these | 206 | * Make lock validator output more readable. (we pre-construct these |
250 | * strings build-time, so that runtime initialization of socket | 207 | * strings build-time, so that runtime initialization of socket |
@@ -1507,12 +1464,6 @@ void sk_free(struct sock *sk) | |||
1507 | } | 1464 | } |
1508 | EXPORT_SYMBOL(sk_free); | 1465 | EXPORT_SYMBOL(sk_free); |
1509 | 1466 | ||
1510 | static void sk_update_clone(const struct sock *sk, struct sock *newsk) | ||
1511 | { | ||
1512 | if (mem_cgroup_sockets_enabled && sk->sk_cgrp) | ||
1513 | sock_update_memcg(newsk); | ||
1514 | } | ||
1515 | |||
1516 | /** | 1467 | /** |
1517 | * sk_clone_lock - clone a socket, and lock its clone | 1468 | * sk_clone_lock - clone a socket, and lock its clone |
1518 | * @sk: the socket to clone | 1469 | * @sk: the socket to clone |
@@ -1607,7 +1558,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1607 | sk_set_socket(newsk, NULL); | 1558 | sk_set_socket(newsk, NULL); |
1608 | newsk->sk_wq = NULL; | 1559 | newsk->sk_wq = NULL; |
1609 | 1560 | ||
1610 | sk_update_clone(sk, newsk); | 1561 | if (mem_cgroup_sockets_enabled && sk->sk_memcg) |
1562 | sock_update_memcg(newsk); | ||
1611 | 1563 | ||
1612 | if (newsk->sk_prot->sockets_allocated) | 1564 | if (newsk->sk_prot->sockets_allocated) |
1613 | sk_sockets_allocated_inc(newsk); | 1565 | sk_sockets_allocated_inc(newsk); |
@@ -2089,27 +2041,27 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) | |||
2089 | struct proto *prot = sk->sk_prot; | 2041 | struct proto *prot = sk->sk_prot; |
2090 | int amt = sk_mem_pages(size); | 2042 | int amt = sk_mem_pages(size); |
2091 | long allocated; | 2043 | long allocated; |
2092 | int parent_status = UNDER_LIMIT; | ||
2093 | 2044 | ||
2094 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; | 2045 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; |
2095 | 2046 | ||
2096 | allocated = sk_memory_allocated_add(sk, amt, &parent_status); | 2047 | allocated = sk_memory_allocated_add(sk, amt); |
2048 | |||
2049 | if (mem_cgroup_sockets_enabled && sk->sk_memcg && | ||
2050 | !mem_cgroup_charge_skmem(sk->sk_memcg, amt)) | ||
2051 | goto suppress_allocation; | ||
2097 | 2052 | ||
2098 | /* Under limit. */ | 2053 | /* Under limit. */ |
2099 | if (parent_status == UNDER_LIMIT && | 2054 | if (allocated <= sk_prot_mem_limits(sk, 0)) { |
2100 | allocated <= sk_prot_mem_limits(sk, 0)) { | ||
2101 | sk_leave_memory_pressure(sk); | 2055 | sk_leave_memory_pressure(sk); |
2102 | return 1; | 2056 | return 1; |
2103 | } | 2057 | } |
2104 | 2058 | ||
2105 | /* Under pressure. (we or our parents) */ | 2059 | /* Under pressure. */ |
2106 | if ((parent_status > SOFT_LIMIT) || | 2060 | if (allocated > sk_prot_mem_limits(sk, 1)) |
2107 | allocated > sk_prot_mem_limits(sk, 1)) | ||
2108 | sk_enter_memory_pressure(sk); | 2061 | sk_enter_memory_pressure(sk); |
2109 | 2062 | ||
2110 | /* Over hard limit (we or our parents) */ | 2063 | /* Over hard limit. */ |
2111 | if ((parent_status == OVER_LIMIT) || | 2064 | if (allocated > sk_prot_mem_limits(sk, 2)) |
2112 | (allocated > sk_prot_mem_limits(sk, 2))) | ||
2113 | goto suppress_allocation; | 2065 | goto suppress_allocation; |
2114 | 2066 | ||
2115 | /* guarantee minimum buffer size under pressure */ | 2067 | /* guarantee minimum buffer size under pressure */ |
@@ -2158,6 +2110,9 @@ suppress_allocation: | |||
2158 | 2110 | ||
2159 | sk_memory_allocated_sub(sk, amt); | 2111 | sk_memory_allocated_sub(sk, amt); |
2160 | 2112 | ||
2113 | if (mem_cgroup_sockets_enabled && sk->sk_memcg) | ||
2114 | mem_cgroup_uncharge_skmem(sk->sk_memcg, amt); | ||
2115 | |||
2161 | return 0; | 2116 | return 0; |
2162 | } | 2117 | } |
2163 | EXPORT_SYMBOL(__sk_mem_schedule); | 2118 | EXPORT_SYMBOL(__sk_mem_schedule); |
@@ -2173,6 +2128,9 @@ void __sk_mem_reclaim(struct sock *sk, int amount) | |||
2173 | sk_memory_allocated_sub(sk, amount); | 2128 | sk_memory_allocated_sub(sk, amount); |
2174 | sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; | 2129 | sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; |
2175 | 2130 | ||
2131 | if (mem_cgroup_sockets_enabled && sk->sk_memcg) | ||
2132 | mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); | ||
2133 | |||
2176 | if (sk_under_memory_pressure(sk) && | 2134 | if (sk_under_memory_pressure(sk) && |
2177 | (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) | 2135 | (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) |
2178 | sk_leave_memory_pressure(sk); | 2136 | sk_leave_memory_pressure(sk); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7bb1b091efd1..fd17eec93525 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -422,7 +422,8 @@ void tcp_init_sock(struct sock *sk) | |||
422 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | 422 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
423 | 423 | ||
424 | local_bh_disable(); | 424 | local_bh_disable(); |
425 | sock_update_memcg(sk); | 425 | if (mem_cgroup_sockets_enabled) |
426 | sock_update_memcg(sk); | ||
426 | sk_sockets_allocated_inc(sk); | 427 | sk_sockets_allocated_inc(sk); |
427 | local_bh_enable(); | 428 | local_bh_enable(); |
428 | } | 429 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 65947c1f4733..c7d1fb50f381 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1818,7 +1818,9 @@ void tcp_v4_destroy_sock(struct sock *sk) | |||
1818 | tcp_saved_syn_free(tp); | 1818 | tcp_saved_syn_free(tp); |
1819 | 1819 | ||
1820 | sk_sockets_allocated_dec(sk); | 1820 | sk_sockets_allocated_dec(sk); |
1821 | sock_release_memcg(sk); | 1821 | |
1822 | if (mem_cgroup_sockets_enabled && sk->sk_memcg) | ||
1823 | sock_release_memcg(sk); | ||
1822 | } | 1824 | } |
1823 | EXPORT_SYMBOL(tcp_v4_destroy_sock); | 1825 | EXPORT_SYMBOL(tcp_v4_destroy_sock); |
1824 | 1826 | ||
@@ -2342,11 +2344,6 @@ struct proto tcp_prot = { | |||
2342 | .compat_setsockopt = compat_tcp_setsockopt, | 2344 | .compat_setsockopt = compat_tcp_setsockopt, |
2343 | .compat_getsockopt = compat_tcp_getsockopt, | 2345 | .compat_getsockopt = compat_tcp_getsockopt, |
2344 | #endif | 2346 | #endif |
2345 | #ifdef CONFIG_MEMCG_KMEM | ||
2346 | .init_cgroup = tcp_init_cgroup, | ||
2347 | .destroy_cgroup = tcp_destroy_cgroup, | ||
2348 | .proto_cgroup = tcp_proto_cgroup, | ||
2349 | #endif | ||
2350 | .diag_destroy = tcp_abort, | 2347 | .diag_destroy = tcp_abort, |
2351 | }; | 2348 | }; |
2352 | EXPORT_SYMBOL(tcp_prot); | 2349 | EXPORT_SYMBOL(tcp_prot); |
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 2379c1b4efb2..18bc7f745e9c 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c | |||
@@ -8,75 +8,49 @@ | |||
8 | 8 | ||
9 | int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) | 9 | int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) |
10 | { | 10 | { |
11 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); | ||
12 | struct page_counter *counter_parent = NULL; | ||
11 | /* | 13 | /* |
12 | * The root cgroup does not use page_counters, but rather, | 14 | * The root cgroup does not use page_counters, but rather, |
13 | * rely on the data already collected by the network | 15 | * rely on the data already collected by the network |
14 | * subsystem | 16 | * subsystem |
15 | */ | 17 | */ |
16 | struct mem_cgroup *parent = parent_mem_cgroup(memcg); | 18 | if (memcg == root_mem_cgroup) |
17 | struct page_counter *counter_parent = NULL; | ||
18 | struct cg_proto *cg_proto, *parent_cg; | ||
19 | |||
20 | cg_proto = tcp_prot.proto_cgroup(memcg); | ||
21 | if (!cg_proto) | ||
22 | return 0; | 19 | return 0; |
23 | 20 | ||
24 | cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0]; | 21 | memcg->tcp_mem.memory_pressure = 0; |
25 | cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1]; | ||
26 | cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2]; | ||
27 | cg_proto->memory_pressure = 0; | ||
28 | cg_proto->memcg = memcg; | ||
29 | 22 | ||
30 | parent_cg = tcp_prot.proto_cgroup(parent); | 23 | if (parent) |
31 | if (parent_cg) | 24 | counter_parent = &parent->tcp_mem.memory_allocated; |
32 | counter_parent = &parent_cg->memory_allocated; | ||
33 | 25 | ||
34 | page_counter_init(&cg_proto->memory_allocated, counter_parent); | 26 | page_counter_init(&memcg->tcp_mem.memory_allocated, counter_parent); |
35 | percpu_counter_init(&cg_proto->sockets_allocated, 0, GFP_KERNEL); | ||
36 | 27 | ||
37 | return 0; | 28 | return 0; |
38 | } | 29 | } |
39 | EXPORT_SYMBOL(tcp_init_cgroup); | ||
40 | 30 | ||
41 | void tcp_destroy_cgroup(struct mem_cgroup *memcg) | 31 | void tcp_destroy_cgroup(struct mem_cgroup *memcg) |
42 | { | 32 | { |
43 | struct cg_proto *cg_proto; | 33 | if (memcg == root_mem_cgroup) |
44 | |||
45 | cg_proto = tcp_prot.proto_cgroup(memcg); | ||
46 | if (!cg_proto) | ||
47 | return; | 34 | return; |
48 | 35 | ||
49 | percpu_counter_destroy(&cg_proto->sockets_allocated); | 36 | if (memcg->tcp_mem.active) |
50 | 37 | static_branch_dec(&memcg_sockets_enabled_key); | |
51 | if (test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags)) | ||
52 | static_key_slow_dec(&memcg_socket_limit_enabled); | ||
53 | |||
54 | } | 38 | } |
55 | EXPORT_SYMBOL(tcp_destroy_cgroup); | ||
56 | 39 | ||
57 | static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) | 40 | static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) |
58 | { | 41 | { |
59 | struct cg_proto *cg_proto; | ||
60 | int i; | ||
61 | int ret; | 42 | int ret; |
62 | 43 | ||
63 | cg_proto = tcp_prot.proto_cgroup(memcg); | 44 | if (memcg == root_mem_cgroup) |
64 | if (!cg_proto) | ||
65 | return -EINVAL; | 45 | return -EINVAL; |
66 | 46 | ||
67 | ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages); | 47 | ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, nr_pages); |
68 | if (ret) | 48 | if (ret) |
69 | return ret; | 49 | return ret; |
70 | 50 | ||
71 | for (i = 0; i < 3; i++) | 51 | if (!memcg->tcp_mem.active) { |
72 | cg_proto->sysctl_mem[i] = min_t(long, nr_pages, | ||
73 | sysctl_tcp_mem[i]); | ||
74 | |||
75 | if (nr_pages == PAGE_COUNTER_MAX) | ||
76 | clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); | ||
77 | else { | ||
78 | /* | 52 | /* |
79 | * The active bit needs to be written after the static_key | 53 | * The active flag needs to be written after the static_key |
80 | * update. This is what guarantees that the socket activation | 54 | * update. This is what guarantees that the socket activation |
81 | * function is the last one to run. See sock_update_memcg() for | 55 | * function is the last one to run. See sock_update_memcg() for |
82 | * details, and note that we don't mark any socket as belonging | 56 | * details, and note that we don't mark any socket as belonging |
@@ -90,14 +64,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) | |||
90 | * We never race with the readers in sock_update_memcg(), | 64 | * We never race with the readers in sock_update_memcg(), |
91 | * because when this value change, the code to process it is not | 65 | * because when this value change, the code to process it is not |
92 | * patched in yet. | 66 | * patched in yet. |
93 | * | ||
94 | * The activated bit is used to guarantee that no two writers | ||
95 | * will do the update in the same memcg. Without that, we can't | ||
96 | * properly shutdown the static key. | ||
97 | */ | 67 | */ |
98 | if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags)) | 68 | static_branch_inc(&memcg_sockets_enabled_key); |
99 | static_key_slow_inc(&memcg_socket_limit_enabled); | 69 | memcg->tcp_mem.active = true; |
100 | set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); | ||
101 | } | 70 | } |
102 | 71 | ||
103 | return 0; | 72 | return 0; |
@@ -141,32 +110,32 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, | |||
141 | static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft) | 110 | static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft) |
142 | { | 111 | { |
143 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); | 112 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
144 | struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg); | ||
145 | u64 val; | 113 | u64 val; |
146 | 114 | ||
147 | switch (cft->private) { | 115 | switch (cft->private) { |
148 | case RES_LIMIT: | 116 | case RES_LIMIT: |
149 | if (!cg_proto) | 117 | if (memcg == root_mem_cgroup) |
150 | return PAGE_COUNTER_MAX; | 118 | val = PAGE_COUNTER_MAX; |
151 | val = cg_proto->memory_allocated.limit; | 119 | else |
120 | val = memcg->tcp_mem.memory_allocated.limit; | ||
152 | val *= PAGE_SIZE; | 121 | val *= PAGE_SIZE; |
153 | break; | 122 | break; |
154 | case RES_USAGE: | 123 | case RES_USAGE: |
155 | if (!cg_proto) | 124 | if (memcg == root_mem_cgroup) |
156 | val = atomic_long_read(&tcp_memory_allocated); | 125 | val = atomic_long_read(&tcp_memory_allocated); |
157 | else | 126 | else |
158 | val = page_counter_read(&cg_proto->memory_allocated); | 127 | val = page_counter_read(&memcg->tcp_mem.memory_allocated); |
159 | val *= PAGE_SIZE; | 128 | val *= PAGE_SIZE; |
160 | break; | 129 | break; |
161 | case RES_FAILCNT: | 130 | case RES_FAILCNT: |
162 | if (!cg_proto) | 131 | if (memcg == root_mem_cgroup) |
163 | return 0; | 132 | return 0; |
164 | val = cg_proto->memory_allocated.failcnt; | 133 | val = memcg->tcp_mem.memory_allocated.failcnt; |
165 | break; | 134 | break; |
166 | case RES_MAX_USAGE: | 135 | case RES_MAX_USAGE: |
167 | if (!cg_proto) | 136 | if (memcg == root_mem_cgroup) |
168 | return 0; | 137 | return 0; |
169 | val = cg_proto->memory_allocated.watermark; | 138 | val = memcg->tcp_mem.memory_allocated.watermark; |
170 | val *= PAGE_SIZE; | 139 | val *= PAGE_SIZE; |
171 | break; | 140 | break; |
172 | default: | 141 | default: |
@@ -179,19 +148,17 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of, | |||
179 | char *buf, size_t nbytes, loff_t off) | 148 | char *buf, size_t nbytes, loff_t off) |
180 | { | 149 | { |
181 | struct mem_cgroup *memcg; | 150 | struct mem_cgroup *memcg; |
182 | struct cg_proto *cg_proto; | ||
183 | 151 | ||
184 | memcg = mem_cgroup_from_css(of_css(of)); | 152 | memcg = mem_cgroup_from_css(of_css(of)); |
185 | cg_proto = tcp_prot.proto_cgroup(memcg); | 153 | if (memcg == root_mem_cgroup) |
186 | if (!cg_proto) | ||
187 | return nbytes; | 154 | return nbytes; |
188 | 155 | ||
189 | switch (of_cft(of)->private) { | 156 | switch (of_cft(of)->private) { |
190 | case RES_MAX_USAGE: | 157 | case RES_MAX_USAGE: |
191 | page_counter_reset_watermark(&cg_proto->memory_allocated); | 158 | page_counter_reset_watermark(&memcg->tcp_mem.memory_allocated); |
192 | break; | 159 | break; |
193 | case RES_FAILCNT: | 160 | case RES_FAILCNT: |
194 | cg_proto->memory_allocated.failcnt = 0; | 161 | memcg->tcp_mem.memory_allocated.failcnt = 0; |
195 | break; | 162 | break; |
196 | } | 163 | } |
197 | 164 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 412a920fe0ec..fda379cd600d 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2813,13 +2813,16 @@ begin_fwd: | |||
2813 | */ | 2813 | */ |
2814 | void sk_forced_mem_schedule(struct sock *sk, int size) | 2814 | void sk_forced_mem_schedule(struct sock *sk, int size) |
2815 | { | 2815 | { |
2816 | int amt, status; | 2816 | int amt; |
2817 | 2817 | ||
2818 | if (size <= sk->sk_forward_alloc) | 2818 | if (size <= sk->sk_forward_alloc) |
2819 | return; | 2819 | return; |
2820 | amt = sk_mem_pages(size); | 2820 | amt = sk_mem_pages(size); |
2821 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; | 2821 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; |
2822 | sk_memory_allocated_add(sk, amt, &status); | 2822 | sk_memory_allocated_add(sk, amt); |
2823 | |||
2824 | if (mem_cgroup_sockets_enabled && sk->sk_memcg) | ||
2825 | mem_cgroup_charge_skmem(sk->sk_memcg, amt); | ||
2823 | } | 2826 | } |
2824 | 2827 | ||
2825 | /* Send a FIN. The caller locks the socket for us. | 2828 | /* Send a FIN. The caller locks the socket for us. |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index db9f1c318afc..4ad8edb46f7c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1889,9 +1889,6 @@ struct proto tcpv6_prot = { | |||
1889 | .compat_setsockopt = compat_tcp_setsockopt, | 1889 | .compat_setsockopt = compat_tcp_setsockopt, |
1890 | .compat_getsockopt = compat_tcp_getsockopt, | 1890 | .compat_getsockopt = compat_tcp_getsockopt, |
1891 | #endif | 1891 | #endif |
1892 | #ifdef CONFIG_MEMCG_KMEM | ||
1893 | .proto_cgroup = tcp_proto_cgroup, | ||
1894 | #endif | ||
1895 | .clear_sk = tcp_v6_clear_sk, | 1892 | .clear_sk = tcp_v6_clear_sk, |
1896 | .diag_destroy = tcp_abort, | 1893 | .diag_destroy = tcp_abort, |
1897 | }; | 1894 | }; |
diff --git a/net/socket.c b/net/socket.c index 91c2de6f5020..c044d1e8508c 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -294,7 +294,7 @@ static int init_inodecache(void) | |||
294 | 0, | 294 | 0, |
295 | (SLAB_HWCACHE_ALIGN | | 295 | (SLAB_HWCACHE_ALIGN | |
296 | SLAB_RECLAIM_ACCOUNT | | 296 | SLAB_RECLAIM_ACCOUNT | |
297 | SLAB_MEM_SPREAD), | 297 | SLAB_MEM_SPREAD | SLAB_ACCOUNT), |
298 | init_once); | 298 | init_once); |
299 | if (sock_inode_cachep == NULL) | 299 | if (sock_inode_cachep == NULL) |
300 | return -ENOMEM; | 300 | return -ENOMEM; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index d81186d34558..14f45bf0410c 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -1500,7 +1500,7 @@ int register_rpc_pipefs(void) | |||
1500 | rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", | 1500 | rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", |
1501 | sizeof(struct rpc_inode), | 1501 | sizeof(struct rpc_inode), |
1502 | 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| | 1502 | 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| |
1503 | SLAB_MEM_SPREAD), | 1503 | SLAB_MEM_SPREAD|SLAB_ACCOUNT), |
1504 | init_once); | 1504 | init_once); |
1505 | if (!rpc_inode_cachep) | 1505 | if (!rpc_inode_cachep) |
1506 | return -ENOMEM; | 1506 | return -ENOMEM; |