aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_cgroup.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-06-12 03:33:53 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2009-06-12 04:00:54 -0400
commitca371c0d7e23d0d0afae65fc83a0e91cf7399573 (patch)
tree20ca26fbdceefe25d1c362dc23a2152c1aac8b4c /mm/page_cgroup.c
parent8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff)
memcg: fix page_cgroup fatal error in FLATMEM
Now, SLAB is configured in very early stage and it can be used in init routine now. But replacing alloc_bootmem() in FLAT/DISCONTIGMEM's page_cgroup() initialization breaks the allocation, now. (Works well in SPARSEMEM case...it supports MEMORY_HOTPLUG and size of page_cgroup is in reasonable size (< 1 << MAX_ORDER.) This patch revive FLATMEM+memory cgroup by using alloc_bootmem. In future, We stop to support FLATMEM (if no users) or rewrite codes for flatmem completely.But this will adds more messy codes and overheads. Reported-by: Li Zefan <lizf@cn.fujitsu.com> Tested-by: Li Zefan <lizf@cn.fujitsu.com> Tested-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/page_cgroup.c')
-rw-r--r--mm/page_cgroup.c29
1 files changed, 10 insertions, 19 deletions
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 3dd4a909a1de..11a8a10a3909 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid)
47 struct page_cgroup *base, *pc; 47 struct page_cgroup *base, *pc;
48 unsigned long table_size; 48 unsigned long table_size;
49 unsigned long start_pfn, nr_pages, index; 49 unsigned long start_pfn, nr_pages, index;
50 struct page *page;
51 unsigned int order;
52 50
53 start_pfn = NODE_DATA(nid)->node_start_pfn; 51 start_pfn = NODE_DATA(nid)->node_start_pfn;
54 nr_pages = NODE_DATA(nid)->node_spanned_pages; 52 nr_pages = NODE_DATA(nid)->node_spanned_pages;
@@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid)
57 return 0; 55 return 0;
58 56
59 table_size = sizeof(struct page_cgroup) * nr_pages; 57 table_size = sizeof(struct page_cgroup) * nr_pages;
60 order = get_order(table_size); 58
61 page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); 59 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
62 if (!page) 60 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
63 page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); 61 if (!base)
64 if (!page)
65 return -ENOMEM; 62 return -ENOMEM;
66 base = page_address(page);
67 for (index = 0; index < nr_pages; index++) { 63 for (index = 0; index < nr_pages; index++) {
68 pc = base + index; 64 pc = base + index;
69 __init_page_cgroup(pc, start_pfn + index); 65 __init_page_cgroup(pc, start_pfn + index);
@@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
73 return 0; 69 return 0;
74} 70}
75 71
76void __init page_cgroup_init(void) 72void __init page_cgroup_init_flatmem(void)
77{ 73{
78 74
79 int nid, fail; 75 int nid, fail;
@@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
117 if (!section->page_cgroup) { 113 if (!section->page_cgroup) {
118 nid = page_to_nid(pfn_to_page(pfn)); 114 nid = page_to_nid(pfn_to_page(pfn));
119 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
120 if (slab_is_available()) { 116 VM_BUG_ON(!slab_is_available());
121 base = kmalloc_node(table_size, 117 base = kmalloc_node(table_size,
122 GFP_KERNEL | __GFP_NOWARN, nid); 118 GFP_KERNEL | __GFP_NOWARN, nid);
123 if (!base) 119 if (!base)
124 base = vmalloc_node(table_size, nid); 120 base = vmalloc_node(table_size, nid);
125 } else {
126 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
127 table_size,
128 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
129 }
130 } else { 121 } else {
131 /* 122 /*
132 * We don't have to allocate page_cgroup again, but 123 * We don't have to allocate page_cgroup again, but