aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-06-12 03:33:53 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2009-06-12 04:00:54 -0400
commitca371c0d7e23d0d0afae65fc83a0e91cf7399573 (patch)
tree20ca26fbdceefe25d1c362dc23a2152c1aac8b4c
parent8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff)
memcg: fix page_cgroup fatal error in FLATMEM
Now, SLAB is configured in very early stage and it can be used in init routine now. But replacing alloc_bootmem() in FLAT/DISCONTIGMEM's page_cgroup() initialization breaks the allocation, now. (Works well in SPARSEMEM case...it supports MEMORY_HOTPLUG and size of page_cgroup is in reasonable size (< 1 << MAX_ORDER.) This patch revive FLATMEM+memory cgroup by using alloc_bootmem. In future, We stop to support FLATMEM (if no users) or rewrite codes for flatmem completely.But this will adds more messy codes and overheads. Reported-by: Li Zefan <lizf@cn.fujitsu.com> Tested-by: Li Zefan <lizf@cn.fujitsu.com> Tested-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--include/linux/page_cgroup.h18
-rw-r--r--init/main.c5
-rw-r--r--mm/page_cgroup.c29
3 files changed, 32 insertions, 20 deletions
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 7339c7bf7331..13f126c89ae8 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -18,7 +18,19 @@ struct page_cgroup {
18}; 18};
19 19
20void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); 20void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
21void __init page_cgroup_init(void); 21
22#ifdef CONFIG_SPARSEMEM
23static inline void __init page_cgroup_init_flatmem(void)
24{
25}
26extern void __init page_cgroup_init(void);
27#else
28void __init page_cgroup_init_flatmem(void);
29static inline void __init page_cgroup_init(void)
30{
31}
32#endif
33
22struct page_cgroup *lookup_page_cgroup(struct page *page); 34struct page_cgroup *lookup_page_cgroup(struct page *page);
23 35
24enum { 36enum {
@@ -87,6 +99,10 @@ static inline void page_cgroup_init(void)
87{ 99{
88} 100}
89 101
102static inline void __init page_cgroup_init_flatmem(void)
103{
104}
105
90#endif 106#endif
91 107
92#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 108#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
diff --git a/init/main.c b/init/main.c
index 5616661eac01..b3e8f14c568a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -539,6 +539,11 @@ void __init __weak thread_info_cache_init(void)
539 */ 539 */
540static void __init mm_init(void) 540static void __init mm_init(void)
541{ 541{
542 /*
543 * page_cgroup requires countinous pages as memmap
544 * and it's bigger than MAX_ORDER unless SPARSEMEM.
545 */
546 page_cgroup_init_flatmem();
542 mem_init(); 547 mem_init();
543 kmem_cache_init(); 548 kmem_cache_init();
544 vmalloc_init(); 549 vmalloc_init();
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 3dd4a909a1de..11a8a10a3909 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid)
47 struct page_cgroup *base, *pc; 47 struct page_cgroup *base, *pc;
48 unsigned long table_size; 48 unsigned long table_size;
49 unsigned long start_pfn, nr_pages, index; 49 unsigned long start_pfn, nr_pages, index;
50 struct page *page;
51 unsigned int order;
52 50
53 start_pfn = NODE_DATA(nid)->node_start_pfn; 51 start_pfn = NODE_DATA(nid)->node_start_pfn;
54 nr_pages = NODE_DATA(nid)->node_spanned_pages; 52 nr_pages = NODE_DATA(nid)->node_spanned_pages;
@@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid)
57 return 0; 55 return 0;
58 56
59 table_size = sizeof(struct page_cgroup) * nr_pages; 57 table_size = sizeof(struct page_cgroup) * nr_pages;
60 order = get_order(table_size); 58
61 page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); 59 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
62 if (!page) 60 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
63 page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); 61 if (!base)
64 if (!page)
65 return -ENOMEM; 62 return -ENOMEM;
66 base = page_address(page);
67 for (index = 0; index < nr_pages; index++) { 63 for (index = 0; index < nr_pages; index++) {
68 pc = base + index; 64 pc = base + index;
69 __init_page_cgroup(pc, start_pfn + index); 65 __init_page_cgroup(pc, start_pfn + index);
@@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid)
73 return 0; 69 return 0;
74} 70}
75 71
76void __init page_cgroup_init(void) 72void __init page_cgroup_init_flatmem(void)
77{ 73{
78 74
79 int nid, fail; 75 int nid, fail;
@@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
117 if (!section->page_cgroup) { 113 if (!section->page_cgroup) {
118 nid = page_to_nid(pfn_to_page(pfn)); 114 nid = page_to_nid(pfn_to_page(pfn));
119 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
120 if (slab_is_available()) { 116 VM_BUG_ON(!slab_is_available());
121 base = kmalloc_node(table_size, 117 base = kmalloc_node(table_size,
122 GFP_KERNEL | __GFP_NOWARN, nid); 118 GFP_KERNEL | __GFP_NOWARN, nid);
123 if (!base) 119 if (!base)
124 base = vmalloc_node(table_size, nid); 120 base = vmalloc_node(table_size, nid);
125 } else {
126 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
127 table_size,
128 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
129 }
130 } else { 121 } else {
131 /* 122 /*
132 * We don't have to allocate page_cgroup again, but 123 * We don't have to allocate page_cgroup again, but