aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-10-07 20:00:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-07 21:46:29 -0400
commit2d75807383459c04d457bf2d295fa6ad858507d2 (patch)
tree66463e88f69356ec92d59d1c8416b076181970e6
parent08ea8c07fb56d6eb8194d8ad408b469544bf2c29 (diff)
mm: memcontrol: consolidate cgroup socket tracking
The cgroup core and the memory controller need to track socket ownership for different purposes, but the tracking sites being entirely different is kind of ugly. Be a better citizen and rename the memory controller callbacks to match the cgroup core callbacks, then move them to the same place. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20160914194846.11153-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Tejun Heo <tj@kernel.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Michal Hocko <mhocko@suse.cz> Cc: Vladimir Davydov <vdavydov@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--mm/memcontrol.c23
-rw-r--r--net/core/sock.c6
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_ipv4.c3
5 files changed, 20 insertions, 20 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 0710143723bc..61d20c17f3b7 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -773,13 +773,13 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
773#endif /* CONFIG_CGROUP_WRITEBACK */ 773#endif /* CONFIG_CGROUP_WRITEBACK */
774 774
775struct sock; 775struct sock;
776void sock_update_memcg(struct sock *sk);
777void sock_release_memcg(struct sock *sk);
778bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 776bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
779void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 777void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
780#ifdef CONFIG_MEMCG 778#ifdef CONFIG_MEMCG
781extern struct static_key_false memcg_sockets_enabled_key; 779extern struct static_key_false memcg_sockets_enabled_key;
782#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 780#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
781void mem_cgroup_sk_alloc(struct sock *sk);
782void mem_cgroup_sk_free(struct sock *sk);
783static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 783static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
784{ 784{
785 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 785 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
@@ -792,6 +792,8 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
792} 792}
793#else 793#else
794#define mem_cgroup_sockets_enabled 0 794#define mem_cgroup_sockets_enabled 0
795static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
796static inline void mem_cgroup_sk_free(struct sock *sk) { };
795static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 797static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
796{ 798{
797 return false; 799 return false;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 60bb830abc34..ae052b5e3315 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2939,16 +2939,16 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2939 /* 2939 /*
2940 * The active flag needs to be written after the static_key 2940 * The active flag needs to be written after the static_key
2941 * update. This is what guarantees that the socket activation 2941 * update. This is what guarantees that the socket activation
2942 * function is the last one to run. See sock_update_memcg() for 2942 * function is the last one to run. See mem_cgroup_sk_alloc()
2943 * details, and note that we don't mark any socket as belonging 2943 * for details, and note that we don't mark any socket as
2944 * to this memcg until that flag is up. 2944 * belonging to this memcg until that flag is up.
2945 * 2945 *
2946 * We need to do this, because static_keys will span multiple 2946 * We need to do this, because static_keys will span multiple
2947 * sites, but we can't control their order. If we mark a socket 2947 * sites, but we can't control their order. If we mark a socket
2948 * as accounted, but the accounting functions are not patched in 2948 * as accounted, but the accounting functions are not patched in
2949 * yet, we'll lose accounting. 2949 * yet, we'll lose accounting.
2950 * 2950 *
2951 * We never race with the readers in sock_update_memcg(), 2951 * We never race with the readers in mem_cgroup_sk_alloc(),
2952 * because when this value change, the code to process it is not 2952 * because when this value change, the code to process it is not
2953 * patched in yet. 2953 * patched in yet.
2954 */ 2954 */
@@ -5651,11 +5651,15 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5651DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); 5651DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5652EXPORT_SYMBOL(memcg_sockets_enabled_key); 5652EXPORT_SYMBOL(memcg_sockets_enabled_key);
5653 5653
5654void sock_update_memcg(struct sock *sk) 5654void mem_cgroup_sk_alloc(struct sock *sk)
5655{ 5655{
5656 struct mem_cgroup *memcg; 5656 struct mem_cgroup *memcg;
5657 5657
5658 /* Socket cloning can throw us here with sk_cgrp already 5658 if (!mem_cgroup_sockets_enabled)
5659 return;
5660
5661 /*
5662 * Socket cloning can throw us here with sk_memcg already
5659 * filled. It won't however, necessarily happen from 5663 * filled. It won't however, necessarily happen from
5660 * process context. So the test for root memcg given 5664 * process context. So the test for root memcg given
5661 * the current task's memcg won't help us in this case. 5665 * the current task's memcg won't help us in this case.
@@ -5680,12 +5684,11 @@ void sock_update_memcg(struct sock *sk)
5680out: 5684out:
5681 rcu_read_unlock(); 5685 rcu_read_unlock();
5682} 5686}
5683EXPORT_SYMBOL(sock_update_memcg);
5684 5687
5685void sock_release_memcg(struct sock *sk) 5688void mem_cgroup_sk_free(struct sock *sk)
5686{ 5689{
5687 WARN_ON(!sk->sk_memcg); 5690 if (sk->sk_memcg)
5688 css_put(&sk->sk_memcg->css); 5691 css_put(&sk->sk_memcg->css);
5689} 5692}
5690 5693
5691/** 5694/**
diff --git a/net/core/sock.c b/net/core/sock.c
index 038e660ef844..c73e28fc9c2a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1363,6 +1363,7 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
1363 slab = prot->slab; 1363 slab = prot->slab;
1364 1364
1365 cgroup_sk_free(&sk->sk_cgrp_data); 1365 cgroup_sk_free(&sk->sk_cgrp_data);
1366 mem_cgroup_sk_free(sk);
1366 security_sk_free(sk); 1367 security_sk_free(sk);
1367 if (slab != NULL) 1368 if (slab != NULL)
1368 kmem_cache_free(slab, sk); 1369 kmem_cache_free(slab, sk);
@@ -1399,6 +1400,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1399 sock_net_set(sk, net); 1400 sock_net_set(sk, net);
1400 atomic_set(&sk->sk_wmem_alloc, 1); 1401 atomic_set(&sk->sk_wmem_alloc, 1);
1401 1402
1403 mem_cgroup_sk_alloc(sk);
1402 cgroup_sk_alloc(&sk->sk_cgrp_data); 1404 cgroup_sk_alloc(&sk->sk_cgrp_data);
1403 sock_update_classid(&sk->sk_cgrp_data); 1405 sock_update_classid(&sk->sk_cgrp_data);
1404 sock_update_netprioidx(&sk->sk_cgrp_data); 1406 sock_update_netprioidx(&sk->sk_cgrp_data);
@@ -1545,6 +1547,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1545 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1547 newsk->sk_incoming_cpu = raw_smp_processor_id();
1546 atomic64_set(&newsk->sk_cookie, 0); 1548 atomic64_set(&newsk->sk_cookie, 0);
1547 1549
1550 mem_cgroup_sk_alloc(newsk);
1548 cgroup_sk_alloc(&newsk->sk_cgrp_data); 1551 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1549 1552
1550 /* 1553 /*
@@ -1569,9 +1572,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1569 sk_set_socket(newsk, NULL); 1572 sk_set_socket(newsk, NULL);
1570 newsk->sk_wq = NULL; 1573 newsk->sk_wq = NULL;
1571 1574
1572 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1573 sock_update_memcg(newsk);
1574
1575 if (newsk->sk_prot->sockets_allocated) 1575 if (newsk->sk_prot->sockets_allocated)
1576 sk_sockets_allocated_inc(newsk); 1576 sk_sockets_allocated_inc(newsk);
1577 1577
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f253e5019d22..ab984d2ff88a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -424,8 +424,6 @@ void tcp_init_sock(struct sock *sk)
424 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 424 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
425 425
426 local_bh_disable(); 426 local_bh_disable();
427 if (mem_cgroup_sockets_enabled)
428 sock_update_memcg(sk);
429 sk_sockets_allocated_inc(sk); 427 sk_sockets_allocated_inc(sk);
430 local_bh_enable(); 428 local_bh_enable();
431} 429}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7ac37c314312..bd5e8d10893f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1871,9 +1871,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
1871 local_bh_disable(); 1871 local_bh_disable();
1872 sk_sockets_allocated_dec(sk); 1872 sk_sockets_allocated_dec(sk);
1873 local_bh_enable(); 1873 local_bh_enable();
1874
1875 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1876 sock_release_memcg(sk);
1877} 1874}
1878EXPORT_SYMBOL(tcp_v4_destroy_sock); 1875EXPORT_SYMBOL(tcp_v4_destroy_sock);
1879 1876