aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2011-12-19 07:46:35 -0500
committerMichal Simek <monstr@monstr.eu>2012-03-23 04:28:10 -0400
commit83a92529c1789f86481190743a6bb09f31ec39a8 (patch)
tree04e451b84770b08eceb5e483fb2c79a4d7f3c0c2 /arch/microblaze
parent4e2e4124b7fe68b28e9f759b7ecc0ec16307fce6 (diff)
microblaze: mm: Fix lowmem max memory size limits
Use CONFIG_LOWMEM_SIZE if system has larger ram size. For system with larger ram size, enable HIGMEM support. Also setup limitation for memblock and use memblock allocation in lowmem region. Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/include/asm/page.h1
-rw-r--r--arch/microblaze/include/asm/pgtable.h3
-rw-r--r--arch/microblaze/include/asm/uaccess.h2
-rw-r--r--arch/microblaze/mm/init.c67
-rw-r--r--arch/microblaze/mm/pgtable.c7
5 files changed, 46 insertions, 34 deletions
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index a25e6b5e2ad4..665f29330ce1 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -135,7 +135,6 @@ extern unsigned long min_low_pfn;
135extern unsigned long max_pfn; 135extern unsigned long max_pfn;
136 136
137extern unsigned long memory_start; 137extern unsigned long memory_start;
138extern unsigned long memory_end;
139extern unsigned long memory_size; 138extern unsigned long memory_size;
140 139
141extern int page_is_ram(unsigned long pfn); 140extern int page_is_ram(unsigned long pfn);
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index b2af42311a12..d8f2c3c68d38 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -94,8 +94,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
94/* Start and end of the vmalloc area. */ 94/* Start and end of the vmalloc area. */
95/* Make sure to map the vmalloc area above the pinned kernel memory area 95/* Make sure to map the vmalloc area above the pinned kernel memory area
96 of 32Mb. */ 96 of 32Mb. */
97#define VMALLOC_START (CONFIG_KERNEL_START + \ 97#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
98 max(32 * 1024 * 1024UL, memory_size))
99#define VMALLOC_END ioremap_bot 98#define VMALLOC_END ioremap_bot
100 99
101#endif /* __ASSEMBLY__ */ 100#endif /* __ASSEMBLY__ */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 072b0077abf9..ef25f7538d4a 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -80,7 +80,7 @@ extern unsigned long search_exception_table(unsigned long);
80static inline int ___range_ok(unsigned long addr, unsigned long size) 80static inline int ___range_ok(unsigned long addr, unsigned long size)
81{ 81{
82 return ((addr < memory_start) || 82 return ((addr < memory_start) ||
83 ((addr + size) > memory_end)); 83 ((addr + size - 1) > (memory_start + memory_size - 1)));
84} 84}
85 85
86#define __range_ok(addr, size) \ 86#define __range_ok(addr, size) \
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a72f42498c25..2253e122aa85 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -44,9 +44,9 @@ char *klimit = _end;
44 */ 44 */
45unsigned long memory_start; 45unsigned long memory_start;
46EXPORT_SYMBOL(memory_start); 46EXPORT_SYMBOL(memory_start);
47unsigned long memory_end; /* due to mm/nommu.c */
48unsigned long memory_size; 47unsigned long memory_size;
49EXPORT_SYMBOL(memory_size); 48EXPORT_SYMBOL(memory_size);
49unsigned long lowmem_size;
50 50
51/* 51/*
52 * paging_init() sets up the page tables - in fact we've already done this. 52 * paging_init() sets up the page tables - in fact we've already done this.
@@ -58,7 +58,7 @@ static void __init paging_init(void)
58 /* Clean every zones */ 58 /* Clean every zones */
59 memset(zones_size, 0, sizeof(zones_size)); 59 memset(zones_size, 0, sizeof(zones_size));
60 60
61 zones_size[ZONE_DMA] = max_mapnr; 61 zones_size[ZONE_DMA] = max_pfn;
62 62
63 free_area_init(zones_size); 63 free_area_init(zones_size);
64} 64}
@@ -74,32 +74,31 @@ void __init setup_memory(void)
74 /* Find main memory where is the kernel */ 74 /* Find main memory where is the kernel */
75 for_each_memblock(memory, reg) { 75 for_each_memblock(memory, reg) {
76 memory_start = (u32)reg->base; 76 memory_start = (u32)reg->base;
77 memory_end = (u32) reg->base + reg->size; 77 lowmem_size = reg->size;
78 if ((memory_start <= (u32)_text) && 78 if ((memory_start <= (u32)_text) &&
79 ((u32)_text <= memory_end)) { 79 ((u32)_text <= (memory_start + lowmem_size - 1))) {
80 memory_size = memory_end - memory_start; 80 memory_size = lowmem_size;
81 PAGE_OFFSET = memory_start; 81 PAGE_OFFSET = memory_start;
82 printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, " 82 printk(KERN_INFO "%s: Main mem: 0x%x, "
83 "size 0x%08x\n", __func__, (u32) memory_start, 83 "size 0x%08x\n", __func__, (u32) memory_start,
84 (u32) memory_end, (u32) memory_size); 84 (u32) memory_size);
85 break; 85 break;
86 } 86 }
87 } 87 }
88 88
89 if (!memory_start || !memory_end) { 89 if (!memory_start || !memory_size) {
90 panic("%s: Missing memory setting 0x%08x-0x%08x\n", 90 panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
91 __func__, (u32) memory_start, (u32) memory_end); 91 __func__, (u32) memory_start, (u32) memory_size);
92 } 92 }
93 93
94 /* reservation of region where is the kernel */ 94 /* reservation of region where is the kernel */
95 kernel_align_start = PAGE_DOWN((u32)_text); 95 kernel_align_start = PAGE_DOWN((u32)_text);
96 /* ALIGN can be remove because _end in vmlinux.lds.S is align */ 96 /* ALIGN can be remove because _end in vmlinux.lds.S is align */
97 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; 97 kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
98 memblock_reserve(kernel_align_start, kernel_align_size); 98 printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
99 printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
100 __func__, kernel_align_start, kernel_align_start 99 __func__, kernel_align_start, kernel_align_start
101 + kernel_align_size, kernel_align_size); 100 + kernel_align_size, kernel_align_size);
102 101 memblock_reserve(kernel_align_start, kernel_align_size);
103#endif 102#endif
104 /* 103 /*
105 * Kernel: 104 * Kernel:
@@ -116,11 +115,13 @@ void __init setup_memory(void)
116 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ 115 min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
117 /* RAM is assumed contiguous */ 116 /* RAM is assumed contiguous */
118 num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; 117 num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
119 max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT; 118 max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
119 max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
120 120
121 printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); 121 printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
122 printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); 122 printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
123 printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); 123 printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
124 printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn);
124 125
125 /* 126 /*
126 * Find an area to use for the bootmem bitmap. 127 * Find an area to use for the bootmem bitmap.
@@ -134,14 +135,25 @@ void __init setup_memory(void)
134 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); 135 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
135 136
136 /* free bootmem is whole main memory */ 137 /* free bootmem is whole main memory */
137 free_bootmem(memory_start, memory_size); 138 free_bootmem(memory_start, lowmem_size);
138 139
139 /* reserve allocate blocks */ 140 /* reserve allocate blocks */
140 for_each_memblock(reserved, reg) { 141 for_each_memblock(reserved, reg) {
141 pr_debug("reserved - 0x%08x-0x%08x\n", 142 unsigned long top = reg->base + reg->size - 1;
142 (u32) reg->base, (u32) reg->size); 143
143 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 144 pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
145 (u32) reg->base, (u32) reg->size, top,
146 memory_start + lowmem_size - 1);
147
148 if (top <= (memory_start + lowmem_size - 1)) {
149 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
150 } else if (reg->base < (memory_start + lowmem_size - 1)) {
151 unsigned long trunc_size = memory_start + lowmem_size -
152 reg->base;
153 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
154 }
144 } 155 }
156
145#ifdef CONFIG_MMU 157#ifdef CONFIG_MMU
146 init_bootmem_done = 1; 158 init_bootmem_done = 1;
147#endif 159#endif
@@ -186,7 +198,8 @@ void free_initmem(void)
186 198
187void __init mem_init(void) 199void __init mem_init(void)
188{ 200{
189 high_memory = (void *)__va(memory_end); 201 high_memory = (void *)__va(memory_start + lowmem_size - 1);
202
190 /* this will put all memory onto the freelists */ 203 /* this will put all memory onto the freelists */
191 totalram_pages += free_all_bootmem(); 204 totalram_pages += free_all_bootmem();
192 205
@@ -222,7 +235,6 @@ static void mm_cmdline_setup(void)
222 maxmem = memparse(p, &p); 235 maxmem = memparse(p, &p);
223 if (maxmem && memory_size > maxmem) { 236 if (maxmem && memory_size > maxmem) {
224 memory_size = maxmem; 237 memory_size = maxmem;
225 memory_end = memory_start + memory_size;
226 memblock.memory.regions[0].size = memory_size; 238 memblock.memory.regions[0].size = memory_size;
227 } 239 }
228 } 240 }
@@ -272,9 +284,12 @@ asmlinkage void __init mmu_init(void)
272 } 284 }
273 /* Find main memory where the kernel is */ 285 /* Find main memory where the kernel is */
274 memory_start = (u32) memblock.memory.regions[0].base; 286 memory_start = (u32) memblock.memory.regions[0].base;
275 memory_end = (u32) memblock.memory.regions[0].base + 287 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
276 (u32) memblock.memory.regions[0].size; 288
277 memory_size = memory_end - memory_start; 289 if (lowmem_size > CONFIG_LOWMEM_SIZE) {
290 lowmem_size = CONFIG_LOWMEM_SIZE;
291 memory_size = lowmem_size;
292 }
278 293
279 mm_cmdline_setup(); /* FIXME parse args from command line - not used */ 294 mm_cmdline_setup(); /* FIXME parse args from command line - not used */
280 295
@@ -307,9 +322,13 @@ asmlinkage void __init mmu_init(void)
307 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ 322 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
308#endif /* CONFIG_HIGHMEM_START_BOOL */ 323#endif /* CONFIG_HIGHMEM_START_BOOL */
309 ioremap_bot = ioremap_base; 324 ioremap_bot = ioremap_base;
310
311 /* Initialize the context management stuff */ 325 /* Initialize the context management stuff */
312 mmu_context_init(); 326 mmu_context_init();
327
328 /* Shortly after that, the entire linear mapping will be available */
329 /* This will also cause that unflatten device tree will be allocated
330 * inside 768MB limit */
331 memblock_set_current_limit(memory_start + lowmem_size - 1);
313} 332}
314 333
315/* This is only called until mem_init is done. */ 334/* This is only called until mem_init is done. */
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index e3a68bb2da0b..68f5c01e4ad1 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -44,11 +44,6 @@ unsigned long ioremap_base;
44unsigned long ioremap_bot; 44unsigned long ioremap_bot;
45EXPORT_SYMBOL(ioremap_bot); 45EXPORT_SYMBOL(ioremap_bot);
46 46
47/* The maximum lowmem defaults to 768Mb, but this can be configured to
48 * another value.
49 */
50#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
51
52#ifndef CONFIG_SMP 47#ifndef CONFIG_SMP
53struct pgtable_cache_struct quicklists; 48struct pgtable_cache_struct quicklists;
54#endif 49#endif
@@ -171,7 +166,7 @@ void __init mapin_ram(void)
171 166
172 v = CONFIG_KERNEL_START; 167 v = CONFIG_KERNEL_START;
173 p = memory_start; 168 p = memory_start;
174 for (s = 0; s < memory_size; s += PAGE_SIZE) { 169 for (s = 0; s < CONFIG_LOWMEM_SIZE; s += PAGE_SIZE) {
175 f = _PAGE_PRESENT | _PAGE_ACCESSED | 170 f = _PAGE_PRESENT | _PAGE_ACCESSED |
176 _PAGE_SHARED | _PAGE_HWEXEC; 171 _PAGE_SHARED | _PAGE_HWEXEC;
177 if ((char *) v < _stext || (char *) v >= _etext) 172 if ((char *) v < _stext || (char *) v >= _etext)