aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2009-06-27 09:58:13 -0400
committerArnd Bergmann <arnd@arndb.de>2009-06-27 09:58:13 -0400
commitfbd85b0e26bab0a13dcf860f2c20e86cb0507b61 (patch)
tree979873812505a8f0d784dbe332df232a04d1daf2
parent9b05706a744da939655525eeeae23f1989b434ce (diff)
score: clean up mm/init.c
score does not need multiple zero pages, because it does not suffer from cache aliasing problems, so simplify that code. Also make some functions static and include the appropriate header files. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--arch/score/mm/init.c34
1 files changed, 10 insertions, 24 deletions
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index 7780eecc5a43..d496e9f1a601 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -32,44 +32,30 @@
32#include <linux/pagemap.h> 32#include <linux/pagemap.h>
33#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
34#include <linux/sched.h> 34#include <linux/sched.h>
35#include <asm-generic/sections.h> 35#include <linux/initrd.h>
36 36
37#include <asm/sections.h>
37#include <asm/tlb.h> 38#include <asm/tlb.h>
38 39
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40 41
41/*
42 * We have up to 8 empty zeroed pages so we can map one of the right colour
43 * when needed.
44 */
45unsigned long zero_page_mask;
46unsigned long empty_zero_page; 42unsigned long empty_zero_page;
47EXPORT_SYMBOL_GPL(empty_zero_page); 43EXPORT_SYMBOL_GPL(empty_zero_page);
48 44
49static struct kcore_list kcore_mem, kcore_vmalloc; 45static struct kcore_list kcore_mem, kcore_vmalloc;
50 46
51unsigned long setup_zero_pages(void) 47static unsigned long setup_zero_page(void)
52{ 48{
53 unsigned int order = 0;
54 unsigned long size;
55 struct page *page; 49 struct page *page;
56 50
57 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 51 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
58 if (!empty_zero_page) 52 if (!empty_zero_page)
59 panic("Oh boy, that early out of memory?"); 53 panic("Oh boy, that early out of memory?");
60 54
61 page = virt_to_page((void *) empty_zero_page); 55 page = virt_to_page((void *) empty_zero_page);
62 split_page(page, order); 56 SetPageReserved(page);
63 while (page < virt_to_page((void *) (empty_zero_page +
64 (PAGE_SIZE << order)))) {
65 SetPageReserved(page);
66 page++;
67 }
68
69 size = PAGE_SIZE << order;
70 zero_page_mask = (size - 1) & PAGE_MASK;
71 57
72 return 1UL << order; 58 return 1UL;
73} 59}
74 60
75#ifndef CONFIG_NEED_MULTIPLE_NODES 61#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -100,7 +86,7 @@ void __init mem_init(void)
100 max_mapnr = max_low_pfn; 86 max_mapnr = max_low_pfn;
101 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 87 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
102 totalram_pages += free_all_bootmem(); 88 totalram_pages += free_all_bootmem();
103 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 89 totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */
104 reservedpages = 0; 90 reservedpages = 0;
105 91
106 for (tmp = 0; tmp < max_low_pfn; tmp++) 92 for (tmp = 0; tmp < max_low_pfn; tmp++)
@@ -129,7 +115,7 @@ void __init mem_init(void)
129} 115}
130#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 116#endif /* !CONFIG_NEED_MULTIPLE_NODES */
131 117
132void free_init_pages(const char *what, unsigned long begin, unsigned long end) 118static void free_init_pages(const char *what, unsigned long begin, unsigned long end)
133{ 119{
134 unsigned long pfn; 120 unsigned long pfn;
135 121
@@ -150,8 +136,8 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
150void free_initrd_mem(unsigned long start, unsigned long end) 136void free_initrd_mem(unsigned long start, unsigned long end)
151{ 137{
152 free_init_pages("initrd memory", 138 free_init_pages("initrd memory",
153 virt_to_phys((void *) start), 139 virt_to_phys((void *) start),
154 virt_to_phys((void *) end)); 140 virt_to_phys((void *) end));
155} 141}
156#endif 142#endif
157 143