diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 20:22:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-06 20:22:09 -0500 |
commit | 9753dfe19a85e7e45a34a56f4cb2048bb4f50e27 (patch) | |
tree | c017a1b4a70b8447c71b01d8b320e071546b5c9d /mm | |
parent | edf7c8148ec40c0fd27c0ef3f688defcc65e3913 (diff) | |
parent | 9f42f126154786e6e76df513004800c8c633f020 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1958 commits)
net: pack skb_shared_info more efficiently
net_sched: red: split red_parms into parms and vars
net_sched: sfq: extend limits
cnic: Improve error recovery on bnx2x devices
cnic: Re-init dev->stats_addr after chip reset
net_sched: Bug in netem reordering
bna: fix sparse warnings/errors
bna: make ethtool_ops and strings const
xgmac: cleanups
net: make ethtool_ops const
vmxnet3" make ethtool ops const
xen-netback: make ops structs const
virtio_net: Pass gfp flags when allocating rx buffers.
ixgbe: FCoE: Add support for ndo_get_fcoe_hbainfo() call
netdev: FCoE: Add new ndo_get_fcoe_hbainfo() call
igb: reset PHY after recovering from PHY power down
igb: add basic runtime PM support
igb: Add support for byte queue limits.
e1000: cleanup CE4100 MDIO registers access
e1000: unmap ce4100_gbe_mdio_base_virt in e1000_remove
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 100 |
1 files changed, 97 insertions, 3 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b63f5f7dfa0..94da8ee9e2c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -50,6 +50,8 @@ | |||
50 | #include <linux/cpu.h> | 50 | #include <linux/cpu.h> |
51 | #include <linux/oom.h> | 51 | #include <linux/oom.h> |
52 | #include "internal.h" | 52 | #include "internal.h" |
53 | #include <net/sock.h> | ||
54 | #include <net/tcp_memcontrol.h> | ||
53 | 55 | ||
54 | #include <asm/uaccess.h> | 56 | #include <asm/uaccess.h> |
55 | 57 | ||
@@ -286,6 +288,10 @@ struct mem_cgroup { | |||
286 | */ | 288 | */ |
287 | struct mem_cgroup_stat_cpu nocpu_base; | 289 | struct mem_cgroup_stat_cpu nocpu_base; |
288 | spinlock_t pcp_counter_lock; | 290 | spinlock_t pcp_counter_lock; |
291 | |||
292 | #ifdef CONFIG_INET | ||
293 | struct tcp_memcontrol tcp_mem; | ||
294 | #endif | ||
289 | }; | 295 | }; |
290 | 296 | ||
291 | /* Stuffs for move charges at task migration. */ | 297 | /* Stuffs for move charges at task migration. */ |
@@ -365,7 +371,58 @@ enum charge_type { | |||
365 | 371 | ||
366 | static void mem_cgroup_get(struct mem_cgroup *memcg); | 372 | static void mem_cgroup_get(struct mem_cgroup *memcg); |
367 | static void mem_cgroup_put(struct mem_cgroup *memcg); | 373 | static void mem_cgroup_put(struct mem_cgroup *memcg); |
368 | static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); | 374 | |
375 | /* Writing them here to avoid exposing memcg's inner layout */ | ||
376 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | ||
377 | #ifdef CONFIG_INET | ||
378 | #include <net/sock.h> | ||
379 | #include <net/ip.h> | ||
380 | |||
381 | static bool mem_cgroup_is_root(struct mem_cgroup *memcg); | ||
382 | void sock_update_memcg(struct sock *sk) | ||
383 | { | ||
384 | /* A socket spends its whole life in the same cgroup */ | ||
385 | if (sk->sk_cgrp) { | ||
386 | WARN_ON(1); | ||
387 | return; | ||
388 | } | ||
389 | if (static_branch(&memcg_socket_limit_enabled)) { | ||
390 | struct mem_cgroup *memcg; | ||
391 | |||
392 | BUG_ON(!sk->sk_prot->proto_cgroup); | ||
393 | |||
394 | rcu_read_lock(); | ||
395 | memcg = mem_cgroup_from_task(current); | ||
396 | if (!mem_cgroup_is_root(memcg)) { | ||
397 | mem_cgroup_get(memcg); | ||
398 | sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg); | ||
399 | } | ||
400 | rcu_read_unlock(); | ||
401 | } | ||
402 | } | ||
403 | EXPORT_SYMBOL(sock_update_memcg); | ||
404 | |||
405 | void sock_release_memcg(struct sock *sk) | ||
406 | { | ||
407 | if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) { | ||
408 | struct mem_cgroup *memcg; | ||
409 | WARN_ON(!sk->sk_cgrp->memcg); | ||
410 | memcg = sk->sk_cgrp->memcg; | ||
411 | mem_cgroup_put(memcg); | ||
412 | } | ||
413 | } | ||
414 | |||
415 | struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) | ||
416 | { | ||
417 | if (!memcg || mem_cgroup_is_root(memcg)) | ||
418 | return NULL; | ||
419 | |||
420 | return &memcg->tcp_mem.cg_proto; | ||
421 | } | ||
422 | EXPORT_SYMBOL(tcp_proto_cgroup); | ||
423 | #endif /* CONFIG_INET */ | ||
424 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ | ||
425 | |||
369 | static void drain_all_stock_async(struct mem_cgroup *memcg); | 426 | static void drain_all_stock_async(struct mem_cgroup *memcg); |
370 | 427 | ||
371 | static struct mem_cgroup_per_zone * | 428 | static struct mem_cgroup_per_zone * |
@@ -745,7 +802,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) | |||
745 | preempt_enable(); | 802 | preempt_enable(); |
746 | } | 803 | } |
747 | 804 | ||
748 | static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | 805 | struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) |
749 | { | 806 | { |
750 | return container_of(cgroup_subsys_state(cont, | 807 | return container_of(cgroup_subsys_state(cont, |
751 | mem_cgroup_subsys_id), struct mem_cgroup, | 808 | mem_cgroup_subsys_id), struct mem_cgroup, |
@@ -4612,6 +4669,36 @@ static int mem_control_numa_stat_open(struct inode *unused, struct file *file) | |||
4612 | } | 4669 | } |
4613 | #endif /* CONFIG_NUMA */ | 4670 | #endif /* CONFIG_NUMA */ |
4614 | 4671 | ||
4672 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | ||
4673 | static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) | ||
4674 | { | ||
4675 | /* | ||
4676 | * Part of this would be better living in a separate allocation | ||
4677 | * function, leaving us with just the cgroup tree population work. | ||
4678 | * We, however, depend on state such as network's proto_list that | ||
4679 | * is only initialized after cgroup creation. I found the less | ||
4680 | * cumbersome way to deal with it to defer it all to populate time | ||
4681 | */ | ||
4682 | return mem_cgroup_sockets_init(cont, ss); | ||
4683 | }; | ||
4684 | |||
4685 | static void kmem_cgroup_destroy(struct cgroup_subsys *ss, | ||
4686 | struct cgroup *cont) | ||
4687 | { | ||
4688 | mem_cgroup_sockets_destroy(cont, ss); | ||
4689 | } | ||
4690 | #else | ||
4691 | static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss) | ||
4692 | { | ||
4693 | return 0; | ||
4694 | } | ||
4695 | |||
4696 | static void kmem_cgroup_destroy(struct cgroup_subsys *ss, | ||
4697 | struct cgroup *cont) | ||
4698 | { | ||
4699 | } | ||
4700 | #endif | ||
4701 | |||
4615 | static struct cftype mem_cgroup_files[] = { | 4702 | static struct cftype mem_cgroup_files[] = { |
4616 | { | 4703 | { |
4617 | .name = "usage_in_bytes", | 4704 | .name = "usage_in_bytes", |
@@ -4843,12 +4930,13 @@ static void mem_cgroup_put(struct mem_cgroup *memcg) | |||
4843 | /* | 4930 | /* |
4844 | * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. | 4931 | * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. |
4845 | */ | 4932 | */ |
4846 | static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) | 4933 | struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) |
4847 | { | 4934 | { |
4848 | if (!memcg->res.parent) | 4935 | if (!memcg->res.parent) |
4849 | return NULL; | 4936 | return NULL; |
4850 | return mem_cgroup_from_res_counter(memcg->res.parent, res); | 4937 | return mem_cgroup_from_res_counter(memcg->res.parent, res); |
4851 | } | 4938 | } |
4939 | EXPORT_SYMBOL(parent_mem_cgroup); | ||
4852 | 4940 | ||
4853 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 4941 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
4854 | static void __init enable_swap_cgroup(void) | 4942 | static void __init enable_swap_cgroup(void) |
@@ -4964,6 +5052,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss, | |||
4964 | { | 5052 | { |
4965 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); | 5053 | struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); |
4966 | 5054 | ||
5055 | kmem_cgroup_destroy(ss, cont); | ||
5056 | |||
4967 | mem_cgroup_put(memcg); | 5057 | mem_cgroup_put(memcg); |
4968 | } | 5058 | } |
4969 | 5059 | ||
@@ -4977,6 +5067,10 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss, | |||
4977 | 5067 | ||
4978 | if (!ret) | 5068 | if (!ret) |
4979 | ret = register_memsw_files(cont, ss); | 5069 | ret = register_memsw_files(cont, ss); |
5070 | |||
5071 | if (!ret) | ||
5072 | ret = register_kmem_files(cont, ss); | ||
5073 | |||
4980 | return ret; | 5074 | return ret; |
4981 | } | 5075 | } |
4982 | 5076 | ||