aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_cgroup.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_cgroup.c')
-rw-r--r--mm/page_cgroup.c140
1 files changed, 84 insertions, 56 deletions
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 5bffada7cde1..99055010cece 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -11,12 +11,11 @@
11#include <linux/swapops.h> 11#include <linux/swapops.h>
12#include <linux/kmemleak.h> 12#include <linux/kmemleak.h>
13 13
14static void __meminit 14static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
15__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
16{ 15{
17 pc->flags = 0; 16 pc->flags = 0;
17 set_page_cgroup_array_id(pc, id);
18 pc->mem_cgroup = NULL; 18 pc->mem_cgroup = NULL;
19 pc->page = pfn_to_page(pfn);
20 INIT_LIST_HEAD(&pc->lru); 19 INIT_LIST_HEAD(&pc->lru);
21} 20}
22static unsigned long total_usage; 21static unsigned long total_usage;
@@ -43,6 +42,19 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
43 return base + offset; 42 return base + offset;
44} 43}
45 44
45struct page *lookup_cgroup_page(struct page_cgroup *pc)
46{
47 unsigned long pfn;
48 struct page *page;
49 pg_data_t *pgdat;
50
51 pgdat = NODE_DATA(page_cgroup_array_id(pc));
52 pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
53 page = pfn_to_page(pfn);
54 VM_BUG_ON(pc != lookup_page_cgroup(page));
55 return page;
56}
57
46static int __init alloc_node_page_cgroup(int nid) 58static int __init alloc_node_page_cgroup(int nid)
47{ 59{
48 struct page_cgroup *base, *pc; 60 struct page_cgroup *base, *pc;
@@ -63,7 +75,7 @@ static int __init alloc_node_page_cgroup(int nid)
63 return -ENOMEM; 75 return -ENOMEM;
64 for (index = 0; index < nr_pages; index++) { 76 for (index = 0; index < nr_pages; index++) {
65 pc = base + index; 77 pc = base + index;
66 __init_page_cgroup(pc, start_pfn + index); 78 init_page_cgroup(pc, nid);
67 } 79 }
68 NODE_DATA(nid)->node_page_cgroup = base; 80 NODE_DATA(nid)->node_page_cgroup = base;
69 total_usage += table_size; 81 total_usage += table_size;
@@ -105,46 +117,75 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
105 return section->page_cgroup + pfn; 117 return section->page_cgroup + pfn;
106} 118}
107 119
108/* __alloc_bootmem...() is protected by !slab_available() */ 120struct page *lookup_cgroup_page(struct page_cgroup *pc)
121{
122 struct mem_section *section;
123 struct page *page;
124 unsigned long nr;
125
126 nr = page_cgroup_array_id(pc);
127 section = __nr_to_section(nr);
128 page = pfn_to_page(pc - section->page_cgroup);
129 VM_BUG_ON(pc != lookup_page_cgroup(page));
130 return page;
131}
132
133static void *__init_refok alloc_page_cgroup(size_t size, int nid)
134{
135 void *addr = NULL;
136
137 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN);
138 if (addr)
139 return addr;
140
141 if (node_state(nid, N_HIGH_MEMORY))
142 addr = vmalloc_node(size, nid);
143 else
144 addr = vmalloc(size);
145
146 return addr;
147}
148
149#ifdef CONFIG_MEMORY_HOTPLUG
150static void free_page_cgroup(void *addr)
151{
152 if (is_vmalloc_addr(addr)) {
153 vfree(addr);
154 } else {
155 struct page *page = virt_to_page(addr);
156 size_t table_size =
157 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
158
159 BUG_ON(PageReserved(page));
160 free_pages_exact(addr, table_size);
161 }
162}
163#endif
164
109static int __init_refok init_section_page_cgroup(unsigned long pfn) 165static int __init_refok init_section_page_cgroup(unsigned long pfn)
110{ 166{
111 struct mem_section *section = __pfn_to_section(pfn);
112 struct page_cgroup *base, *pc; 167 struct page_cgroup *base, *pc;
168 struct mem_section *section;
113 unsigned long table_size; 169 unsigned long table_size;
170 unsigned long nr;
114 int nid, index; 171 int nid, index;
115 172
116 if (!section->page_cgroup) { 173 nr = pfn_to_section_nr(pfn);
117 nid = page_to_nid(pfn_to_page(pfn)); 174 section = __nr_to_section(nr);
118 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 175
119 VM_BUG_ON(!slab_is_available()); 176 if (section->page_cgroup)
120 if (node_state(nid, N_HIGH_MEMORY)) { 177 return 0;
121 base = kmalloc_node(table_size, 178
122 GFP_KERNEL | __GFP_NOWARN, nid); 179 nid = page_to_nid(pfn_to_page(pfn));
123 if (!base) 180 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
124 base = vmalloc_node(table_size, nid); 181 base = alloc_page_cgroup(table_size, nid);
125 } else { 182
126 base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN); 183 /*
127 if (!base) 184 * The value stored in section->page_cgroup is (base - pfn)
128 base = vmalloc(table_size); 185 * and it does not point to the memory block allocated above,
129 } 186 * causing kmemleak false positives.
130 /* 187 */
131 * The value stored in section->page_cgroup is (base - pfn) 188 kmemleak_not_leak(base);
132 * and it does not point to the memory block allocated above,
133 * causing kmemleak false positives.
134 */
135 kmemleak_not_leak(base);
136 } else {
137 /*
138 * We don't have to allocate page_cgroup again, but
139 * address of memmap may be changed. So, we have to initialize
140 * again.
141 */
142 base = section->page_cgroup + pfn;
143 table_size = 0;
144 /* check address of memmap is changed or not. */
145 if (base->page == pfn_to_page(pfn))
146 return 0;
147 }
148 189
149 if (!base) { 190 if (!base) {
150 printk(KERN_ERR "page cgroup allocation failure\n"); 191 printk(KERN_ERR "page cgroup allocation failure\n");
@@ -153,7 +194,7 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
153 194
154 for (index = 0; index < PAGES_PER_SECTION; index++) { 195 for (index = 0; index < PAGES_PER_SECTION; index++) {
155 pc = base + index; 196 pc = base + index;
156 __init_page_cgroup(pc, pfn + index); 197 init_page_cgroup(pc, nr);
157 } 198 }
158 199
159 section->page_cgroup = base - pfn; 200 section->page_cgroup = base - pfn;
@@ -170,16 +211,8 @@ void __free_page_cgroup(unsigned long pfn)
170 if (!ms || !ms->page_cgroup) 211 if (!ms || !ms->page_cgroup)
171 return; 212 return;
172 base = ms->page_cgroup + pfn; 213 base = ms->page_cgroup + pfn;
173 if (is_vmalloc_addr(base)) { 214 free_page_cgroup(base);
174 vfree(base); 215 ms->page_cgroup = NULL;
175 ms->page_cgroup = NULL;
176 } else {
177 struct page *page = virt_to_page(base);
178 if (!PageReserved(page)) { /* Is bootmem ? */
179 kfree(base);
180 ms->page_cgroup = NULL;
181 }
182 }
183} 216}
184 217
185int __meminit online_page_cgroup(unsigned long start_pfn, 218int __meminit online_page_cgroup(unsigned long start_pfn,
@@ -243,12 +276,7 @@ static int __meminit page_cgroup_callback(struct notifier_block *self,
243 break; 276 break;
244 } 277 }
245 278
246 if (ret) 279 return notifier_from_errno(ret);
247 ret = notifier_from_errno(ret);
248 else
249 ret = NOTIFY_OK;
250
251 return ret;
252} 280}
253 281
254#endif 282#endif
@@ -349,7 +377,7 @@ not_enough_page:
349 * @new: new id 377 * @new: new id
350 * 378 *
351 * Returns old id at success, 0 at failure. 379 * Returns old id at success, 0 at failure.
352 * (There is no mem_cgroup useing 0 as its id) 380 * (There is no mem_cgroup using 0 as its id)
353 */ 381 */
354unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, 382unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
355 unsigned short old, unsigned short new) 383 unsigned short old, unsigned short new)