aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/page_cgroup.c33
2 files changed, 26 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d4a92b63e98e..866dcc7eeb0c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1088,7 +1088,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1088 int node; 1088 int node;
1089 1089
1090 if (unlikely((cont->parent) == NULL)) { 1090 if (unlikely((cont->parent) == NULL)) {
1091 page_cgroup_init();
1092 mem = &init_mem_cgroup; 1091 mem = &init_mem_cgroup;
1093 } else { 1092 } else {
1094 mem = mem_cgroup_alloc(); 1093 mem = mem_cgroup_alloc();
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 5d86550701f2..f59d797dc5a9 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -4,7 +4,10 @@
4#include <linux/bit_spinlock.h> 4#include <linux/bit_spinlock.h>
5#include <linux/page_cgroup.h> 5#include <linux/page_cgroup.h>
6#include <linux/hash.h> 6#include <linux/hash.h>
7#include <linux/slab.h>
7#include <linux/memory.h> 8#include <linux/memory.h>
9#include <linux/vmalloc.h>
10#include <linux/cgroup.h>
8 11
9static void __meminit 12static void __meminit
10__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) 13__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -66,6 +69,9 @@ void __init page_cgroup_init(void)
66 69
67 int nid, fail; 70 int nid, fail;
68 71
72 if (mem_cgroup_subsys.disabled)
73 return;
74
69 for_each_online_node(nid) { 75 for_each_online_node(nid) {
70 fail = alloc_node_page_cgroup(nid); 76 fail = alloc_node_page_cgroup(nid);
71 if (fail) 77 if (fail)
@@ -106,9 +112,14 @@ int __meminit init_section_page_cgroup(unsigned long pfn)
106 nid = page_to_nid(pfn_to_page(pfn)); 112 nid = page_to_nid(pfn_to_page(pfn));
107 113
108 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 114 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
109 base = kmalloc_node(table_size, GFP_KERNEL, nid); 115 if (slab_is_available()) {
110 if (!base) 116 base = kmalloc_node(table_size, GFP_KERNEL, nid);
111 base = vmalloc_node(table_size, nid); 117 if (!base)
118 base = vmalloc_node(table_size, nid);
119 } else {
120 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
121 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
122 }
112 123
113 if (!base) { 124 if (!base) {
114 printk(KERN_ERR "page cgroup allocation failure\n"); 125 printk(KERN_ERR "page cgroup allocation failure\n");
@@ -135,11 +146,16 @@ void __free_page_cgroup(unsigned long pfn)
135 if (!ms || !ms->page_cgroup) 146 if (!ms || !ms->page_cgroup)
136 return; 147 return;
137 base = ms->page_cgroup + pfn; 148 base = ms->page_cgroup + pfn;
138 ms->page_cgroup = NULL; 149 if (is_vmalloc_addr(base)) {
139 if (is_vmalloc_addr(base))
140 vfree(base); 150 vfree(base);
141 else 151 ms->page_cgroup = NULL;
142 kfree(base); 152 } else {
153 struct page *page = virt_to_page(base);
154 if (!PageReserved(page)) { /* Is bootmem ? */
155 kfree(base);
156 ms->page_cgroup = NULL;
157 }
158 }
143} 159}
144 160
145int online_page_cgroup(unsigned long start_pfn, 161int online_page_cgroup(unsigned long start_pfn,
@@ -213,6 +229,9 @@ void __init page_cgroup_init(void)
213 unsigned long pfn; 229 unsigned long pfn;
214 int fail = 0; 230 int fail = 0;
215 231
232 if (mem_cgroup_subsys.disabled)
233 return;
234
216 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { 235 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
217 if (!pfn_present(pfn)) 236 if (!pfn_present(pfn))
218 continue; 237 continue;