diff options
Diffstat (limited to 'arch/microblaze')
-rw-r--r-- | arch/microblaze/mm/init.c | 158 |
1 files changed, 154 insertions, 4 deletions
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index af789e26a7ce..b5a701cd71e0 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -23,18 +23,26 @@ | |||
23 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
24 | #include <asm/tlb.h> | 24 | #include <asm/tlb.h> |
25 | 25 | ||
26 | #ifndef CONFIG_MMU | ||
26 | unsigned int __page_offset; | 27 | unsigned int __page_offset; |
27 | EXPORT_SYMBOL(__page_offset); | 28 | EXPORT_SYMBOL(__page_offset); |
28 | 29 | ||
30 | #else | ||
31 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
32 | |||
33 | int mem_init_done; | ||
34 | static int init_bootmem_done; | ||
35 | #endif /* CONFIG_MMU */ | ||
36 | |||
29 | char *klimit = _end; | 37 | char *klimit = _end; |
30 | 38 | ||
31 | /* | 39 | /* |
32 | * Initialize the bootmem system and give it all the memory we | 40 | * Initialize the bootmem system and give it all the memory we |
33 | * have available. | 41 | * have available. |
34 | */ | 42 | */ |
35 | unsigned int memory_start; | 43 | unsigned long memory_start; |
36 | unsigned int memory_end; /* due to mm/nommu.c */ | 44 | unsigned long memory_end; /* due to mm/nommu.c */ |
37 | unsigned int memory_size; | 45 | unsigned long memory_size; |
38 | 46 | ||
39 | /* | 47 | /* |
40 | * paging_init() sets up the page tables - in fact we've already done this. | 48 | * paging_init() sets up the page tables - in fact we've already done this. |
@@ -59,6 +67,7 @@ void __init setup_memory(void) | |||
59 | { | 67 | { |
60 | int i; | 68 | int i; |
61 | unsigned long map_size; | 69 | unsigned long map_size; |
70 | #ifndef CONFIG_MMU | ||
62 | u32 kernel_align_start, kernel_align_size; | 71 | u32 kernel_align_start, kernel_align_size; |
63 | 72 | ||
64 | /* Find main memory where is the kernel */ | 73 | /* Find main memory where is the kernel */ |
@@ -91,6 +100,7 @@ void __init setup_memory(void) | |||
91 | __func__, kernel_align_start, kernel_align_start | 100 | __func__, kernel_align_start, kernel_align_start |
92 | + kernel_align_size, kernel_align_size); | 101 | + kernel_align_size, kernel_align_size); |
93 | 102 | ||
103 | #endif | ||
94 | /* | 104 | /* |
95 | * Kernel: | 105 | * Kernel: |
96 | * start: base phys address of kernel - page align | 106 | * start: base phys address of kernel - page align |
@@ -119,9 +129,13 @@ void __init setup_memory(void) | |||
119 | * for 4GB of memory, using 4kB pages), plus 1 page | 129 | * for 4GB of memory, using 4kB pages), plus 1 page |
120 | * (in case the address isn't page-aligned). | 130 | * (in case the address isn't page-aligned). |
121 | */ | 131 | */ |
132 | #ifndef CONFIG_MMU | ||
122 | map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)), | 133 | map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)), |
123 | min_low_pfn, max_low_pfn); | 134 | min_low_pfn, max_low_pfn); |
124 | 135 | #else | |
136 | map_size = init_bootmem_node(&contig_page_data, | ||
137 | PFN_UP(TOPHYS((u32)_end)), min_low_pfn, max_low_pfn); | ||
138 | #endif | ||
125 | lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size); | 139 | lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size); |
126 | 140 | ||
127 | /* free bootmem is whole main memory */ | 141 | /* free bootmem is whole main memory */ |
@@ -135,6 +149,9 @@ void __init setup_memory(void) | |||
135 | reserve_bootmem(lmb.reserved.region[i].base, | 149 | reserve_bootmem(lmb.reserved.region[i].base, |
136 | lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); | 150 | lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); |
137 | } | 151 | } |
152 | #ifdef CONFIG_MMU | ||
153 | init_bootmem_done = 1; | ||
154 | #endif | ||
138 | paging_init(); | 155 | paging_init(); |
139 | } | 156 | } |
140 | 157 | ||
@@ -189,8 +206,12 @@ void __init mem_init(void) | |||
189 | printk(KERN_INFO "Memory: %luk/%luk available\n", | 206 | printk(KERN_INFO "Memory: %luk/%luk available\n", |
190 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 207 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
191 | num_physpages << (PAGE_SHIFT-10)); | 208 | num_physpages << (PAGE_SHIFT-10)); |
209 | #ifdef CONFIG_MMU | ||
210 | mem_init_done = 1; | ||
211 | #endif | ||
192 | } | 212 | } |
193 | 213 | ||
214 | #ifndef CONFIG_MMU | ||
194 | /* Check against bounds of physical memory */ | 215 | /* Check against bounds of physical memory */ |
195 | int ___range_ok(unsigned long addr, unsigned long size) | 216 | int ___range_ok(unsigned long addr, unsigned long size) |
196 | { | 217 | { |
@@ -198,3 +219,132 @@ int ___range_ok(unsigned long addr, unsigned long size) | |||
198 | ((addr + size) > memory_end)); | 219 | ((addr + size) > memory_end)); |
199 | } | 220 | } |
200 | EXPORT_SYMBOL(___range_ok); | 221 | EXPORT_SYMBOL(___range_ok); |
222 | |||
223 | #else | ||
224 | int page_is_ram(unsigned long pfn) | ||
225 | { | ||
226 | return pfn < max_low_pfn; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Check for command-line options that affect what MMU_init will do. | ||
231 | */ | ||
232 | static void mm_cmdline_setup(void) | ||
233 | { | ||
234 | unsigned long maxmem = 0; | ||
235 | char *p = cmd_line; | ||
236 | |||
237 | /* Look for mem= option on command line */ | ||
238 | p = strstr(cmd_line, "mem="); | ||
239 | if (p) { | ||
240 | p += 4; | ||
241 | maxmem = memparse(p, &p); | ||
242 | if (maxmem && memory_size > maxmem) { | ||
243 | memory_size = maxmem; | ||
244 | memory_end = memory_start + memory_size; | ||
245 | lmb.memory.region[0].size = memory_size; | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * MMU_init_hw does the chip-specific initialization of the MMU hardware. | ||
252 | */ | ||
253 | static void __init mmu_init_hw(void) | ||
254 | { | ||
255 | /* | ||
256 | * The Zone Protection Register (ZPR) defines how protection will | ||
257 | * be applied to every page which is a member of a given zone. At | ||
258 | * present, we utilize only two of the zones. | ||
259 | * The zone index bits (of ZSEL) in the PTE are used for software | ||
260 | * indicators, except the LSB. For user access, zone 1 is used, | ||
261 | * for kernel access, zone 0 is used. We set all but zone 1 | ||
262 | * to zero, allowing only kernel access as indicated in the PTE. | ||
263 | * For zone 1, we set a 01 binary (a value of 10 will not work) | ||
264 | * to allow user access as indicated in the PTE. This also allows | ||
265 | * kernel access as indicated in the PTE. | ||
266 | */ | ||
267 | __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ | ||
268 | "mts rzpr, r11;" | ||
269 | : : : "r11"); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * MMU_init sets up the basic memory mappings for the kernel, | ||
274 | * including both RAM and possibly some I/O regions, | ||
275 | * and sets up the page tables and the MMU hardware ready to go. | ||
276 | */ | ||
277 | |||
278 | /* called from head.S */ | ||
279 | asmlinkage void __init mmu_init(void) | ||
280 | { | ||
281 | unsigned int kstart, ksize; | ||
282 | |||
283 | if (!lmb.reserved.cnt) { | ||
284 | printk(KERN_EMERG "Error memory count\n"); | ||
285 | machine_restart(NULL); | ||
286 | } | ||
287 | |||
288 | if ((u32) lmb.memory.region[0].size < 0x1000000) { | ||
289 | printk(KERN_EMERG "Memory must be greater than 16MB\n"); | ||
290 | machine_restart(NULL); | ||
291 | } | ||
292 | /* Find main memory where the kernel is */ | ||
293 | memory_start = (u32) lmb.memory.region[0].base; | ||
294 | memory_end = (u32) lmb.memory.region[0].base + | ||
295 | (u32) lmb.memory.region[0].size; | ||
296 | memory_size = memory_end - memory_start; | ||
297 | |||
298 | mm_cmdline_setup(); /* FIXME parse args from command line - not used */ | ||
299 | |||
300 | /* | ||
301 | * Map out the kernel text/data/bss from the available physical | ||
302 | * memory. | ||
303 | */ | ||
304 | kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ | ||
305 | /* kernel size */ | ||
306 | ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); | ||
307 | lmb_reserve(kstart, ksize); | ||
308 | |||
309 | #if defined(CONFIG_BLK_DEV_INITRD) | ||
310 | /* Remove the init RAM disk from the available memory. */ | ||
311 | /* if (initrd_start) { | ||
312 | mem_pieces_remove(&phys_avail, __pa(initrd_start), | ||
313 | initrd_end - initrd_start, 1); | ||
314 | }*/ | ||
315 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
316 | |||
317 | /* Initialize the MMU hardware */ | ||
318 | mmu_init_hw(); | ||
319 | |||
320 | /* Map in all of RAM starting at CONFIG_KERNEL_START */ | ||
321 | mapin_ram(); | ||
322 | |||
323 | #ifdef HIGHMEM_START_BOOL | ||
324 | ioremap_base = HIGHMEM_START; | ||
325 | #else | ||
326 | ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ | ||
327 | #endif /* CONFIG_HIGHMEM */ | ||
328 | ioremap_bot = ioremap_base; | ||
329 | |||
330 | /* Initialize the context management stuff */ | ||
331 | mmu_context_init(); | ||
332 | } | ||
333 | |||
334 | /* This is only called until mem_init is done. */ | ||
335 | void __init *early_get_page(void) | ||
336 | { | ||
337 | void *p; | ||
338 | if (init_bootmem_done) { | ||
339 | p = alloc_bootmem_pages(PAGE_SIZE); | ||
340 | } else { | ||
341 | /* | ||
342 | * Mem start + 32MB -> here is limit | ||
343 | * because of mem mapping from head.S | ||
344 | */ | ||
345 | p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
346 | memory_start + 0x2000000)); | ||
347 | } | ||
348 | return p; | ||
349 | } | ||
350 | #endif /* CONFIG_MMU */ | ||