aboutsummaryrefslogtreecommitdiffstats
path: root/net/socket.c
diff options
context:
space:
mode:
authorTonghao Zhang <xiangxia.m.yue@gmail.com>2017-12-14 08:51:58 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-19 09:58:14 -0500
commit648845ab7e200993dccd3948c719c858368c91e7 (patch)
treeca35bba9338cb8bca9cadfa1badd2e755277beda /net/socket.c
parent08fc7f8140730d2f8499c91b5abad44581b74635 (diff)
sock: Move the socket inuse to namespace.
In some case, we want to know how many sockets are in use in different _net_ namespaces. It's a key resource metric. This patch add a member in struct netns_core. This is a counter for socket-inuse in the _net_ namespace. The patch will add/sub counter in the sk_alloc, sk_clone_lock and __sk_free. This patch will not counter the socket created in kernel. It's not very useful for userspace to know how many kernel sockets we created. The main reasons for doing this are that: 1. When linux calls the 'do_exit' for process to exit, the functions 'exit_task_namespaces' and 'exit_task_work' will be called sequentially. 'exit_task_namespaces' may have destroyed the _net_ namespace, but 'sock_release' called in 'exit_task_work' may use the _net_ namespace if we counter the socket-inuse in sock_release. 2. socket and sock are in pair. More important, sock holds the _net_ namespace. We counter the socket-inuse in sock, for avoiding holding _net_ namespace again in socket. It's a easy way to maintain the code. Signed-off-by: Martin Zhang <zhangjunweimartin@didichuxing.com> Signed-off-by: Tonghao Zhang <zhangtonghao@didichuxing.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/socket.c')
-rw-r--r--net/socket.c21
1 files changed, 2 insertions, 19 deletions
diff --git a/net/socket.c b/net/socket.c
index 05f361faec45..bbd2e9ceb692 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -163,12 +163,6 @@ static DEFINE_SPINLOCK(net_family_lock);
163static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; 163static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
164 164
165/* 165/*
166 * Statistics counters of the socket lists
167 */
168
169static DEFINE_PER_CPU(int, sockets_in_use);
170
171/*
172 * Support routines. 166 * Support routines.
173 * Move socket addresses back and forth across the kernel/user 167 * Move socket addresses back and forth across the kernel/user
174 * divide and look after the messy bits. 168 * divide and look after the messy bits.
@@ -578,7 +572,6 @@ struct socket *sock_alloc(void)
578 inode->i_gid = current_fsgid(); 572 inode->i_gid = current_fsgid();
579 inode->i_op = &sockfs_inode_ops; 573 inode->i_op = &sockfs_inode_ops;
580 574
581 this_cpu_add(sockets_in_use, 1);
582 return sock; 575 return sock;
583} 576}
584EXPORT_SYMBOL(sock_alloc); 577EXPORT_SYMBOL(sock_alloc);
@@ -605,7 +598,6 @@ void sock_release(struct socket *sock)
605 if (rcu_dereference_protected(sock->wq, 1)->fasync_list) 598 if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
606 pr_err("%s: fasync list not empty!\n", __func__); 599 pr_err("%s: fasync list not empty!\n", __func__);
607 600
608 this_cpu_sub(sockets_in_use, 1);
609 if (!sock->file) { 601 if (!sock->file) {
610 iput(SOCK_INODE(sock)); 602 iput(SOCK_INODE(sock));
611 return; 603 return;
@@ -2622,17 +2614,8 @@ core_initcall(sock_init); /* early initcall */
2622#ifdef CONFIG_PROC_FS 2614#ifdef CONFIG_PROC_FS
2623void socket_seq_show(struct seq_file *seq) 2615void socket_seq_show(struct seq_file *seq)
2624{ 2616{
2625 int cpu; 2617 seq_printf(seq, "sockets: used %d\n",
2626 int counter = 0; 2618 sock_inuse_get(seq->private));
2627
2628 for_each_possible_cpu(cpu)
2629 counter += per_cpu(sockets_in_use, cpu);
2630
2631 /* It can be negative, by the way. 8) */
2632 if (counter < 0)
2633 counter = 0;
2634
2635 seq_printf(seq, "sockets: used %d\n", counter);
2636} 2619}
2637#endif /* CONFIG_PROC_FS */ 2620#endif /* CONFIG_PROC_FS */
2638 2621