diff options
author | Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> | 2011-03-23 19:42:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-23 22:46:25 -0400 |
commit | f212ad7cf9c73f8a7fa160e223dcb3f074441a72 (patch) | |
tree | 95789ef7ce662e86a3e4aded5dfb97c51dc7b0a0 /mm | |
parent | af4a662144884a7dbb19acbef70878b3b955f928 (diff) |
memcg: add memcg sanity checks at allocating and freeing pages
Add checks at allocating or freeing a page whether the page is used (iow,
charged) from the view point of memcg.
This check may be useful in debugging a problem and we did similar checks
before the commit 52d4b9ac(memcg: allocate all page_cgroup at boot).
This patch adds some overheads at allocating or freeing memory, so it's
enabled only when CONFIG_DEBUG_VM is enabled.
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 46 | ||||
-rw-r--r-- | mm/page_alloc.c | 8 |
2 files changed, 52 insertions, 2 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3a2d54bdf076..0356cb6c9504 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3046,6 +3046,52 @@ int mem_cgroup_shmem_charge_fallback(struct page *page, | |||
3046 | return ret; | 3046 | return ret; |
3047 | } | 3047 | } |
3048 | 3048 | ||
3049 | #ifdef CONFIG_DEBUG_VM | ||
3050 | static struct page_cgroup *lookup_page_cgroup_used(struct page *page) | ||
3051 | { | ||
3052 | struct page_cgroup *pc; | ||
3053 | |||
3054 | pc = lookup_page_cgroup(page); | ||
3055 | if (likely(pc) && PageCgroupUsed(pc)) | ||
3056 | return pc; | ||
3057 | return NULL; | ||
3058 | } | ||
3059 | |||
3060 | bool mem_cgroup_bad_page_check(struct page *page) | ||
3061 | { | ||
3062 | if (mem_cgroup_disabled()) | ||
3063 | return false; | ||
3064 | |||
3065 | return lookup_page_cgroup_used(page) != NULL; | ||
3066 | } | ||
3067 | |||
3068 | void mem_cgroup_print_bad_page(struct page *page) | ||
3069 | { | ||
3070 | struct page_cgroup *pc; | ||
3071 | |||
3072 | pc = lookup_page_cgroup_used(page); | ||
3073 | if (pc) { | ||
3074 | int ret = -1; | ||
3075 | char *path; | ||
3076 | |||
3077 | printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p", | ||
3078 | pc, pc->flags, pc->mem_cgroup); | ||
3079 | |||
3080 | path = kmalloc(PATH_MAX, GFP_KERNEL); | ||
3081 | if (path) { | ||
3082 | rcu_read_lock(); | ||
3083 | ret = cgroup_path(pc->mem_cgroup->css.cgroup, | ||
3084 | path, PATH_MAX); | ||
3085 | rcu_read_unlock(); | ||
3086 | } | ||
3087 | |||
3088 | printk(KERN_CONT "(%s)\n", | ||
3089 | (ret < 0) ? "cannot get the path" : path); | ||
3090 | kfree(path); | ||
3091 | } | ||
3092 | } | ||
3093 | #endif | ||
3094 | |||
3049 | static DEFINE_MUTEX(set_limit_mutex); | 3095 | static DEFINE_MUTEX(set_limit_mutex); |
3050 | 3096 | ||
3051 | static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, | 3097 | static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3a58221f4c22..8e5726ab0d85 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <linux/compaction.h> | 53 | #include <linux/compaction.h> |
54 | #include <trace/events/kmem.h> | 54 | #include <trace/events/kmem.h> |
55 | #include <linux/ftrace_event.h> | 55 | #include <linux/ftrace_event.h> |
56 | #include <linux/memcontrol.h> | ||
56 | 57 | ||
57 | #include <asm/tlbflush.h> | 58 | #include <asm/tlbflush.h> |
58 | #include <asm/div64.h> | 59 | #include <asm/div64.h> |
@@ -565,7 +566,8 @@ static inline int free_pages_check(struct page *page) | |||
565 | if (unlikely(page_mapcount(page) | | 566 | if (unlikely(page_mapcount(page) | |
566 | (page->mapping != NULL) | | 567 | (page->mapping != NULL) | |
567 | (atomic_read(&page->_count) != 0) | | 568 | (atomic_read(&page->_count) != 0) | |
568 | (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) { | 569 | (page->flags & PAGE_FLAGS_CHECK_AT_FREE) | |
570 | (mem_cgroup_bad_page_check(page)))) { | ||
569 | bad_page(page); | 571 | bad_page(page); |
570 | return 1; | 572 | return 1; |
571 | } | 573 | } |
@@ -754,7 +756,8 @@ static inline int check_new_page(struct page *page) | |||
754 | if (unlikely(page_mapcount(page) | | 756 | if (unlikely(page_mapcount(page) | |
755 | (page->mapping != NULL) | | 757 | (page->mapping != NULL) | |
756 | (atomic_read(&page->_count) != 0) | | 758 | (atomic_read(&page->_count) != 0) | |
757 | (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) { | 759 | (page->flags & PAGE_FLAGS_CHECK_AT_PREP) | |
760 | (mem_cgroup_bad_page_check(page)))) { | ||
758 | bad_page(page); | 761 | bad_page(page); |
759 | return 1; | 762 | return 1; |
760 | } | 763 | } |
@@ -5684,4 +5687,5 @@ void dump_page(struct page *page) | |||
5684 | page, atomic_read(&page->_count), page_mapcount(page), | 5687 | page, atomic_read(&page->_count), page_mapcount(page), |
5685 | page->mapping, page->index); | 5688 | page->mapping, page->index); |
5686 | dump_page_flags(page->flags); | 5689 | dump_page_flags(page->flags); |
5690 | mem_cgroup_print_bad_page(page); | ||
5687 | } | 5691 | } |