diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2006-09-27 10:27:33 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-09-27 10:27:33 -0400 |
commit | d111e8f9644aa585c1a7e198d74a4d2682ef1374 (patch) | |
tree | 4f29f58d0bbcd224790a3eae00668ce4d8a7483a /arch/arm/mm | |
parent | 456335e2072fb35bf290b45e61d51916c322c145 (diff) |
[ARM] Split ARM MM initialisation for !mmu
Move the MMU specific code from init.c into mmu.c, and add nommu
fixups to nommu.c
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 209 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 4 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 229 | ||||
-rw-r--r-- | arch/arm/mm/nommu.c | 36 |
5 files changed, 274 insertions, 206 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 1a1563f859af..cabaa3b30548 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := consistent.o extable.o fault.o init.o \ | |||
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ |
9 | mm-armv.o | 9 | mm-armv.o mmu.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 83145d1d3389..22217fe2650b 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -27,10 +27,7 @@ | |||
27 | 27 | ||
28 | #include "mm.h" | 28 | #include "mm.h" |
29 | 29 | ||
30 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 30 | extern void _text, _etext, __data_start, _end, __init_begin, __init_end; |
31 | |||
32 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
33 | extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end; | ||
34 | extern unsigned long phys_initrd_start; | 31 | extern unsigned long phys_initrd_start; |
35 | extern unsigned long phys_initrd_size; | 32 | extern unsigned long phys_initrd_size; |
36 | 33 | ||
@@ -40,17 +37,6 @@ extern unsigned long phys_initrd_size; | |||
40 | */ | 37 | */ |
41 | static struct meminfo meminfo __initdata = { 0, }; | 38 | static struct meminfo meminfo __initdata = { 0, }; |
42 | 39 | ||
43 | /* | ||
44 | * empty_zero_page is a special page that is used for | ||
45 | * zero-initialized data and COW. | ||
46 | */ | ||
47 | struct page *empty_zero_page; | ||
48 | |||
49 | /* | ||
50 | * The pmd table for the upper-most set of pages. | ||
51 | */ | ||
52 | pmd_t *top_pmd; | ||
53 | |||
54 | void show_mem(void) | 40 | void show_mem(void) |
55 | { | 41 | { |
56 | int free = 0, total = 0, reserved = 0; | 42 | int free = 0, total = 0, reserved = 0; |
@@ -173,87 +159,9 @@ static int __init check_initrd(struct meminfo *mi) | |||
173 | return initrd_node; | 159 | return initrd_node; |
174 | } | 160 | } |
175 | 161 | ||
176 | /* | ||
177 | * Reserve the various regions of node 0 | ||
178 | */ | ||
179 | static __init void reserve_node_zero(pg_data_t *pgdat) | ||
180 | { | ||
181 | unsigned long res_size = 0; | ||
182 | |||
183 | /* | ||
184 | * Register the kernel text and data with bootmem. | ||
185 | * Note that this can only be in node 0. | ||
186 | */ | ||
187 | #ifdef CONFIG_XIP_KERNEL | ||
188 | reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start); | ||
189 | #else | ||
190 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); | ||
191 | #endif | ||
192 | |||
193 | /* | ||
194 | * Reserve the page tables. These are already in use, | ||
195 | * and can only be in node 0. | ||
196 | */ | ||
197 | reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), | ||
198 | PTRS_PER_PGD * sizeof(pgd_t)); | ||
199 | |||
200 | /* | ||
201 | * Hmm... This should go elsewhere, but we really really need to | ||
202 | * stop things allocating the low memory; ideally we need a better | ||
203 | * implementation of GFP_DMA which does not assume that DMA-able | ||
204 | * memory starts at zero. | ||
205 | */ | ||
206 | if (machine_is_integrator() || machine_is_cintegrator()) | ||
207 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | ||
208 | |||
209 | /* | ||
210 | * These should likewise go elsewhere. They pre-reserve the | ||
211 | * screen memory region at the start of main system memory. | ||
212 | */ | ||
213 | if (machine_is_edb7211()) | ||
214 | res_size = 0x00020000; | ||
215 | if (machine_is_p720t()) | ||
216 | res_size = 0x00014000; | ||
217 | |||
218 | #ifdef CONFIG_SA1111 | ||
219 | /* | ||
220 | * Because of the SA1111 DMA bug, we want to preserve our | ||
221 | * precious DMA-able memory... | ||
222 | */ | ||
223 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | ||
224 | #endif | ||
225 | if (res_size) | ||
226 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); | ||
227 | } | ||
228 | |||
229 | static inline void prepare_page_table(struct meminfo *mi) | ||
230 | { | ||
231 | unsigned long addr; | ||
232 | |||
233 | /* | ||
234 | * Clear out all the mappings below the kernel image. | ||
235 | */ | ||
236 | for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) | ||
237 | pmd_clear(pmd_off_k(addr)); | ||
238 | |||
239 | #ifdef CONFIG_XIP_KERNEL | ||
240 | /* The XIP kernel is mapped in the module area -- skip over it */ | ||
241 | addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
242 | #endif | ||
243 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) | ||
244 | pmd_clear(pmd_off_k(addr)); | ||
245 | |||
246 | /* | ||
247 | * Clear out all the kernel space mappings, except for the first | ||
248 | * memory bank, up to the end of the vmalloc region. | ||
249 | */ | ||
250 | for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); | ||
251 | addr < VMALLOC_END; addr += PGDIR_SIZE) | ||
252 | pmd_clear(pmd_off_k(addr)); | ||
253 | } | ||
254 | |||
255 | static inline void map_memory_bank(struct membank *bank) | 162 | static inline void map_memory_bank(struct membank *bank) |
256 | { | 163 | { |
164 | #ifdef CONFIG_MMU | ||
257 | struct map_desc map; | 165 | struct map_desc map; |
258 | 166 | ||
259 | map.pfn = __phys_to_pfn(bank->start); | 167 | map.pfn = __phys_to_pfn(bank->start); |
@@ -262,6 +170,7 @@ static inline void map_memory_bank(struct membank *bank) | |||
262 | map.type = MT_MEMORY; | 170 | map.type = MT_MEMORY; |
263 | 171 | ||
264 | create_mapping(&map); | 172 | create_mapping(&map); |
173 | #endif | ||
265 | } | 174 | } |
266 | 175 | ||
267 | static unsigned long __init | 176 | static unsigned long __init |
@@ -373,7 +282,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi) | |||
373 | return end_pfn; | 282 | return end_pfn; |
374 | } | 283 | } |
375 | 284 | ||
376 | static void __init bootmem_init(struct meminfo *mi) | 285 | void __init bootmem_init(struct meminfo *mi) |
377 | { | 286 | { |
378 | unsigned long memend_pfn = 0; | 287 | unsigned long memend_pfn = 0; |
379 | int node, initrd_node, i; | 288 | int node, initrd_node, i; |
@@ -387,8 +296,6 @@ static void __init bootmem_init(struct meminfo *mi) | |||
387 | 296 | ||
388 | memcpy(&meminfo, mi, sizeof(meminfo)); | 297 | memcpy(&meminfo, mi, sizeof(meminfo)); |
389 | 298 | ||
390 | prepare_page_table(mi); | ||
391 | |||
392 | /* | 299 | /* |
393 | * Locate which node contains the ramdisk image, if any. | 300 | * Locate which node contains the ramdisk image, if any. |
394 | */ | 301 | */ |
@@ -422,114 +329,6 @@ static void __init bootmem_init(struct meminfo *mi) | |||
422 | max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; | 329 | max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; |
423 | } | 330 | } |
424 | 331 | ||
425 | /* | ||
426 | * Set up device the mappings. Since we clear out the page tables for all | ||
427 | * mappings above VMALLOC_END, we will remove any debug device mappings. | ||
428 | * This means you have to be careful how you debug this function, or any | ||
429 | * called function. This means you can't use any function or debugging | ||
430 | * method which may touch any device, otherwise the kernel _will_ crash. | ||
431 | */ | ||
432 | static void __init devicemaps_init(struct machine_desc *mdesc) | ||
433 | { | ||
434 | struct map_desc map; | ||
435 | unsigned long addr; | ||
436 | void *vectors; | ||
437 | |||
438 | /* | ||
439 | * Allocate the vector page early. | ||
440 | */ | ||
441 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); | ||
442 | BUG_ON(!vectors); | ||
443 | |||
444 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | ||
445 | pmd_clear(pmd_off_k(addr)); | ||
446 | |||
447 | /* | ||
448 | * Map the kernel if it is XIP. | ||
449 | * It is always first in the modulearea. | ||
450 | */ | ||
451 | #ifdef CONFIG_XIP_KERNEL | ||
452 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); | ||
453 | map.virtual = MODULE_START; | ||
454 | map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; | ||
455 | map.type = MT_ROM; | ||
456 | create_mapping(&map); | ||
457 | #endif | ||
458 | |||
459 | /* | ||
460 | * Map the cache flushing regions. | ||
461 | */ | ||
462 | #ifdef FLUSH_BASE | ||
463 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); | ||
464 | map.virtual = FLUSH_BASE; | ||
465 | map.length = SZ_1M; | ||
466 | map.type = MT_CACHECLEAN; | ||
467 | create_mapping(&map); | ||
468 | #endif | ||
469 | #ifdef FLUSH_BASE_MINICACHE | ||
470 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); | ||
471 | map.virtual = FLUSH_BASE_MINICACHE; | ||
472 | map.length = SZ_1M; | ||
473 | map.type = MT_MINICLEAN; | ||
474 | create_mapping(&map); | ||
475 | #endif | ||
476 | |||
477 | /* | ||
478 | * Create a mapping for the machine vectors at the high-vectors | ||
479 | * location (0xffff0000). If we aren't using high-vectors, also | ||
480 | * create a mapping at the low-vectors virtual address. | ||
481 | */ | ||
482 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | ||
483 | map.virtual = 0xffff0000; | ||
484 | map.length = PAGE_SIZE; | ||
485 | map.type = MT_HIGH_VECTORS; | ||
486 | create_mapping(&map); | ||
487 | |||
488 | if (!vectors_high()) { | ||
489 | map.virtual = 0; | ||
490 | map.type = MT_LOW_VECTORS; | ||
491 | create_mapping(&map); | ||
492 | } | ||
493 | |||
494 | /* | ||
495 | * Ask the machine support to map in the statically mapped devices. | ||
496 | */ | ||
497 | if (mdesc->map_io) | ||
498 | mdesc->map_io(); | ||
499 | |||
500 | /* | ||
501 | * Finally flush the caches and tlb to ensure that we're in a | ||
502 | * consistent state wrt the writebuffer. This also ensures that | ||
503 | * any write-allocated cache lines in the vector page are written | ||
504 | * back. After this point, we can start to touch devices again. | ||
505 | */ | ||
506 | local_flush_tlb_all(); | ||
507 | flush_cache_all(); | ||
508 | } | ||
509 | |||
510 | /* | ||
511 | * paging_init() sets up the page tables, initialises the zone memory | ||
512 | * maps, and sets up the zero page, bad page and bad page tables. | ||
513 | */ | ||
514 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | ||
515 | { | ||
516 | void *zero_page; | ||
517 | |||
518 | build_mem_type_table(); | ||
519 | bootmem_init(mi); | ||
520 | devicemaps_init(mdesc); | ||
521 | |||
522 | top_pmd = pmd_off_k(0xffff0000); | ||
523 | |||
524 | /* | ||
525 | * allocate the zero page. Note that we count on this going ok. | ||
526 | */ | ||
527 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
528 | memzero(zero_page, PAGE_SIZE); | ||
529 | empty_zero_page = virt_to_page(zero_page); | ||
530 | flush_dcache_page(empty_zero_page); | ||
531 | } | ||
532 | |||
533 | static inline void free_area(unsigned long addr, unsigned long end, char *s) | 332 | static inline void free_area(unsigned long addr, unsigned long end, char *s) |
534 | { | 333 | { |
535 | unsigned int size = (end - addr) >> 10; | 334 | unsigned int size = (end - addr) >> 10; |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 8d73ffbce8df..083c51d3903f 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -14,6 +14,10 @@ static inline pmd_t *pmd_off_k(unsigned long virt) | |||
14 | } | 14 | } |
15 | 15 | ||
16 | struct map_desc; | 16 | struct map_desc; |
17 | struct meminfo; | ||
18 | struct pglist_data; | ||
17 | 19 | ||
18 | void __init build_mem_type_table(void); | 20 | void __init build_mem_type_table(void); |
19 | void __init create_mapping(struct map_desc *md); | 21 | void __init create_mapping(struct map_desc *md); |
22 | void __init bootmem_init(struct meminfo *mi); | ||
23 | void reserve_node_zero(struct pglist_data *pgdat); | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c new file mode 100644 index 000000000000..9648e6800ffe --- /dev/null +++ b/arch/arm/mm/mmu.c | |||
@@ -0,0 +1,229 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/mmu.c | ||
3 | * | ||
4 | * Copyright (C) 1995-2005 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/bootmem.h> | ||
14 | #include <linux/mman.h> | ||
15 | #include <linux/nodemask.h> | ||
16 | |||
17 | #include <asm/mach-types.h> | ||
18 | #include <asm/setup.h> | ||
19 | #include <asm/sizes.h> | ||
20 | #include <asm/tlb.h> | ||
21 | |||
22 | #include <asm/mach/arch.h> | ||
23 | #include <asm/mach/map.h> | ||
24 | |||
25 | #include "mm.h" | ||
26 | |||
27 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
28 | |||
29 | extern void _stext, __data_start, _end; | ||
30 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
31 | |||
32 | /* | ||
33 | * empty_zero_page is a special page that is used for | ||
34 | * zero-initialized data and COW. | ||
35 | */ | ||
36 | struct page *empty_zero_page; | ||
37 | |||
38 | /* | ||
39 | * The pmd table for the upper-most set of pages. | ||
40 | */ | ||
41 | pmd_t *top_pmd; | ||
42 | |||
43 | static inline void prepare_page_table(struct meminfo *mi) | ||
44 | { | ||
45 | unsigned long addr; | ||
46 | |||
47 | /* | ||
48 | * Clear out all the mappings below the kernel image. | ||
49 | */ | ||
50 | for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) | ||
51 | pmd_clear(pmd_off_k(addr)); | ||
52 | |||
53 | #ifdef CONFIG_XIP_KERNEL | ||
54 | /* The XIP kernel is mapped in the module area -- skip over it */ | ||
55 | addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
56 | #endif | ||
57 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) | ||
58 | pmd_clear(pmd_off_k(addr)); | ||
59 | |||
60 | /* | ||
61 | * Clear out all the kernel space mappings, except for the first | ||
62 | * memory bank, up to the end of the vmalloc region. | ||
63 | */ | ||
64 | for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); | ||
65 | addr < VMALLOC_END; addr += PGDIR_SIZE) | ||
66 | pmd_clear(pmd_off_k(addr)); | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Reserve the various regions of node 0 | ||
71 | */ | ||
72 | void __init reserve_node_zero(pg_data_t *pgdat) | ||
73 | { | ||
74 | unsigned long res_size = 0; | ||
75 | |||
76 | /* | ||
77 | * Register the kernel text and data with bootmem. | ||
78 | * Note that this can only be in node 0. | ||
79 | */ | ||
80 | #ifdef CONFIG_XIP_KERNEL | ||
81 | reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start); | ||
82 | #else | ||
83 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); | ||
84 | #endif | ||
85 | |||
86 | /* | ||
87 | * Reserve the page tables. These are already in use, | ||
88 | * and can only be in node 0. | ||
89 | */ | ||
90 | reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), | ||
91 | PTRS_PER_PGD * sizeof(pgd_t)); | ||
92 | |||
93 | /* | ||
94 | * Hmm... This should go elsewhere, but we really really need to | ||
95 | * stop things allocating the low memory; ideally we need a better | ||
96 | * implementation of GFP_DMA which does not assume that DMA-able | ||
97 | * memory starts at zero. | ||
98 | */ | ||
99 | if (machine_is_integrator() || machine_is_cintegrator()) | ||
100 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | ||
101 | |||
102 | /* | ||
103 | * These should likewise go elsewhere. They pre-reserve the | ||
104 | * screen memory region at the start of main system memory. | ||
105 | */ | ||
106 | if (machine_is_edb7211()) | ||
107 | res_size = 0x00020000; | ||
108 | if (machine_is_p720t()) | ||
109 | res_size = 0x00014000; | ||
110 | |||
111 | #ifdef CONFIG_SA1111 | ||
112 | /* | ||
113 | * Because of the SA1111 DMA bug, we want to preserve our | ||
114 | * precious DMA-able memory... | ||
115 | */ | ||
116 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | ||
117 | #endif | ||
118 | if (res_size) | ||
119 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Set up device the mappings. Since we clear out the page tables for all | ||
124 | * mappings above VMALLOC_END, we will remove any debug device mappings. | ||
125 | * This means you have to be careful how you debug this function, or any | ||
126 | * called function. This means you can't use any function or debugging | ||
127 | * method which may touch any device, otherwise the kernel _will_ crash. | ||
128 | */ | ||
129 | static void __init devicemaps_init(struct machine_desc *mdesc) | ||
130 | { | ||
131 | struct map_desc map; | ||
132 | unsigned long addr; | ||
133 | void *vectors; | ||
134 | |||
135 | /* | ||
136 | * Allocate the vector page early. | ||
137 | */ | ||
138 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); | ||
139 | BUG_ON(!vectors); | ||
140 | |||
141 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | ||
142 | pmd_clear(pmd_off_k(addr)); | ||
143 | |||
144 | /* | ||
145 | * Map the kernel if it is XIP. | ||
146 | * It is always first in the modulearea. | ||
147 | */ | ||
148 | #ifdef CONFIG_XIP_KERNEL | ||
149 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); | ||
150 | map.virtual = MODULE_START; | ||
151 | map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; | ||
152 | map.type = MT_ROM; | ||
153 | create_mapping(&map); | ||
154 | #endif | ||
155 | |||
156 | /* | ||
157 | * Map the cache flushing regions. | ||
158 | */ | ||
159 | #ifdef FLUSH_BASE | ||
160 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); | ||
161 | map.virtual = FLUSH_BASE; | ||
162 | map.length = SZ_1M; | ||
163 | map.type = MT_CACHECLEAN; | ||
164 | create_mapping(&map); | ||
165 | #endif | ||
166 | #ifdef FLUSH_BASE_MINICACHE | ||
167 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); | ||
168 | map.virtual = FLUSH_BASE_MINICACHE; | ||
169 | map.length = SZ_1M; | ||
170 | map.type = MT_MINICLEAN; | ||
171 | create_mapping(&map); | ||
172 | #endif | ||
173 | |||
174 | /* | ||
175 | * Create a mapping for the machine vectors at the high-vectors | ||
176 | * location (0xffff0000). If we aren't using high-vectors, also | ||
177 | * create a mapping at the low-vectors virtual address. | ||
178 | */ | ||
179 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | ||
180 | map.virtual = 0xffff0000; | ||
181 | map.length = PAGE_SIZE; | ||
182 | map.type = MT_HIGH_VECTORS; | ||
183 | create_mapping(&map); | ||
184 | |||
185 | if (!vectors_high()) { | ||
186 | map.virtual = 0; | ||
187 | map.type = MT_LOW_VECTORS; | ||
188 | create_mapping(&map); | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Ask the machine support to map in the statically mapped devices. | ||
193 | */ | ||
194 | if (mdesc->map_io) | ||
195 | mdesc->map_io(); | ||
196 | |||
197 | /* | ||
198 | * Finally flush the caches and tlb to ensure that we're in a | ||
199 | * consistent state wrt the writebuffer. This also ensures that | ||
200 | * any write-allocated cache lines in the vector page are written | ||
201 | * back. After this point, we can start to touch devices again. | ||
202 | */ | ||
203 | local_flush_tlb_all(); | ||
204 | flush_cache_all(); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * paging_init() sets up the page tables, initialises the zone memory | ||
209 | * maps, and sets up the zero page, bad page and bad page tables. | ||
210 | */ | ||
211 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | ||
212 | { | ||
213 | void *zero_page; | ||
214 | |||
215 | build_mem_type_table(); | ||
216 | prepare_page_table(mi); | ||
217 | bootmem_init(mi); | ||
218 | devicemaps_init(mdesc); | ||
219 | |||
220 | top_pmd = pmd_off_k(0xffff0000); | ||
221 | |||
222 | /* | ||
223 | * allocate the zero page. Note that we count on this going ok. | ||
224 | */ | ||
225 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
226 | memzero(zero_page, PAGE_SIZE); | ||
227 | empty_zero_page = virt_to_page(zero_page); | ||
228 | flush_dcache_page(empty_zero_page); | ||
229 | } | ||
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 1464ed817b5d..e369aeb0c25c 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -11,6 +11,42 @@ | |||
11 | #include <asm/io.h> | 11 | #include <asm/io.h> |
12 | #include <asm/page.h> | 12 | #include <asm/page.h> |
13 | 13 | ||
14 | #include "mm.h" | ||
15 | |||
16 | extern void _stext, __data_start, _end; | ||
17 | |||
18 | /* | ||
19 | * Reserve the various regions of node 0 | ||
20 | */ | ||
21 | void __init reserve_node_zero(pg_data_t *pgdat) | ||
22 | { | ||
23 | /* | ||
24 | * Register the kernel text and data with bootmem. | ||
25 | * Note that this can only be in node 0. | ||
26 | */ | ||
27 | #ifdef CONFIG_XIP_KERNEL | ||
28 | reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start); | ||
29 | #else | ||
30 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); | ||
31 | #endif | ||
32 | |||
33 | /* | ||
34 | * Register the exception vector page. | ||
35 | * some architectures which the DRAM is the exception vector to trap, | ||
36 | * alloc_page breaks with error, although it is not NULL, but "0." | ||
37 | */ | ||
38 | reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE); | ||
39 | } | ||
40 | |||
41 | /* | ||
42 | * paging_init() sets up the page tables, initialises the zone memory | ||
43 | * maps, and sets up the zero page, bad page and bad page tables. | ||
44 | */ | ||
45 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | ||
46 | { | ||
47 | bootmem_init(mi); | ||
48 | } | ||
49 | |||
14 | void flush_dcache_page(struct page *page) | 50 | void flush_dcache_page(struct page *page) |
15 | { | 51 | { |
16 | __cpuc_flush_dcache_page(page_address(page)); | 52 | __cpuc_flush_dcache_page(page_address(page)); |