diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 771 |
1 files changed, 771 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c new file mode 100644 index 000000000000..e566cbe4b222 --- /dev/null +++ b/arch/arm/mm/mmu.c | |||
@@ -0,0 +1,771 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/mmu.c | ||
3 | * | ||
4 | * Copyright (C) 1995-2005 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/bootmem.h> | ||
15 | #include <linux/mman.h> | ||
16 | #include <linux/nodemask.h> | ||
17 | |||
18 | #include <asm/mach-types.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <asm/sizes.h> | ||
21 | #include <asm/tlb.h> | ||
22 | |||
23 | #include <asm/mach/arch.h> | ||
24 | #include <asm/mach/map.h> | ||
25 | |||
26 | #include "mm.h" | ||
27 | |||
28 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
29 | |||
30 | extern void _stext, __data_start, _end; | ||
31 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
32 | |||
33 | /* | ||
34 | * empty_zero_page is a special page that is used for | ||
35 | * zero-initialized data and COW. | ||
36 | */ | ||
37 | struct page *empty_zero_page; | ||
38 | |||
39 | /* | ||
40 | * The pmd table for the upper-most set of pages. | ||
41 | */ | ||
42 | pmd_t *top_pmd; | ||
43 | |||
44 | #define CPOLICY_UNCACHED 0 | ||
45 | #define CPOLICY_BUFFERED 1 | ||
46 | #define CPOLICY_WRITETHROUGH 2 | ||
47 | #define CPOLICY_WRITEBACK 3 | ||
48 | #define CPOLICY_WRITEALLOC 4 | ||
49 | |||
50 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; | ||
51 | static unsigned int ecc_mask __initdata = 0; | ||
52 | pgprot_t pgprot_kernel; | ||
53 | |||
54 | EXPORT_SYMBOL(pgprot_kernel); | ||
55 | |||
56 | struct cachepolicy { | ||
57 | const char policy[16]; | ||
58 | unsigned int cr_mask; | ||
59 | unsigned int pmd; | ||
60 | unsigned int pte; | ||
61 | }; | ||
62 | |||
63 | static struct cachepolicy cache_policies[] __initdata = { | ||
64 | { | ||
65 | .policy = "uncached", | ||
66 | .cr_mask = CR_W|CR_C, | ||
67 | .pmd = PMD_SECT_UNCACHED, | ||
68 | .pte = 0, | ||
69 | }, { | ||
70 | .policy = "buffered", | ||
71 | .cr_mask = CR_C, | ||
72 | .pmd = PMD_SECT_BUFFERED, | ||
73 | .pte = PTE_BUFFERABLE, | ||
74 | }, { | ||
75 | .policy = "writethrough", | ||
76 | .cr_mask = 0, | ||
77 | .pmd = PMD_SECT_WT, | ||
78 | .pte = PTE_CACHEABLE, | ||
79 | }, { | ||
80 | .policy = "writeback", | ||
81 | .cr_mask = 0, | ||
82 | .pmd = PMD_SECT_WB, | ||
83 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | ||
84 | }, { | ||
85 | .policy = "writealloc", | ||
86 | .cr_mask = 0, | ||
87 | .pmd = PMD_SECT_WBWA, | ||
88 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | ||
89 | } | ||
90 | }; | ||
91 | |||
92 | /* | ||
93 | * These are useful for identifing cache coherency | ||
94 | * problems by allowing the cache or the cache and | ||
95 | * writebuffer to be turned off. (Note: the write | ||
96 | * buffer should not be on and the cache off). | ||
97 | */ | ||
98 | static void __init early_cachepolicy(char **p) | ||
99 | { | ||
100 | int i; | ||
101 | |||
102 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | ||
103 | int len = strlen(cache_policies[i].policy); | ||
104 | |||
105 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | ||
106 | cachepolicy = i; | ||
107 | cr_alignment &= ~cache_policies[i].cr_mask; | ||
108 | cr_no_alignment &= ~cache_policies[i].cr_mask; | ||
109 | *p += len; | ||
110 | break; | ||
111 | } | ||
112 | } | ||
113 | if (i == ARRAY_SIZE(cache_policies)) | ||
114 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); | ||
115 | flush_cache_all(); | ||
116 | set_cr(cr_alignment); | ||
117 | } | ||
118 | __early_param("cachepolicy=", early_cachepolicy); | ||
119 | |||
120 | static void __init early_nocache(char **__unused) | ||
121 | { | ||
122 | char *p = "buffered"; | ||
123 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | ||
124 | early_cachepolicy(&p); | ||
125 | } | ||
126 | __early_param("nocache", early_nocache); | ||
127 | |||
128 | static void __init early_nowrite(char **__unused) | ||
129 | { | ||
130 | char *p = "uncached"; | ||
131 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | ||
132 | early_cachepolicy(&p); | ||
133 | } | ||
134 | __early_param("nowb", early_nowrite); | ||
135 | |||
136 | static void __init early_ecc(char **p) | ||
137 | { | ||
138 | if (memcmp(*p, "on", 2) == 0) { | ||
139 | ecc_mask = PMD_PROTECTION; | ||
140 | *p += 2; | ||
141 | } else if (memcmp(*p, "off", 3) == 0) { | ||
142 | ecc_mask = 0; | ||
143 | *p += 3; | ||
144 | } | ||
145 | } | ||
146 | __early_param("ecc=", early_ecc); | ||
147 | |||
148 | static int __init noalign_setup(char *__unused) | ||
149 | { | ||
150 | cr_alignment &= ~CR_A; | ||
151 | cr_no_alignment &= ~CR_A; | ||
152 | set_cr(cr_alignment); | ||
153 | return 1; | ||
154 | } | ||
155 | __setup("noalign", noalign_setup); | ||
156 | |||
157 | struct mem_types { | ||
158 | unsigned int prot_pte; | ||
159 | unsigned int prot_l1; | ||
160 | unsigned int prot_sect; | ||
161 | unsigned int domain; | ||
162 | }; | ||
163 | |||
164 | static struct mem_types mem_types[] __initdata = { | ||
165 | [MT_DEVICE] = { | ||
166 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
167 | L_PTE_WRITE, | ||
168 | .prot_l1 = PMD_TYPE_TABLE, | ||
169 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | | ||
170 | PMD_SECT_AP_WRITE, | ||
171 | .domain = DOMAIN_IO, | ||
172 | }, | ||
173 | [MT_CACHECLEAN] = { | ||
174 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4, | ||
175 | .domain = DOMAIN_KERNEL, | ||
176 | }, | ||
177 | [MT_MINICLEAN] = { | ||
178 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, | ||
179 | .domain = DOMAIN_KERNEL, | ||
180 | }, | ||
181 | [MT_LOW_VECTORS] = { | ||
182 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
183 | L_PTE_EXEC, | ||
184 | .prot_l1 = PMD_TYPE_TABLE, | ||
185 | .domain = DOMAIN_USER, | ||
186 | }, | ||
187 | [MT_HIGH_VECTORS] = { | ||
188 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
189 | L_PTE_USER | L_PTE_EXEC, | ||
190 | .prot_l1 = PMD_TYPE_TABLE, | ||
191 | .domain = DOMAIN_USER, | ||
192 | }, | ||
193 | [MT_MEMORY] = { | ||
194 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, | ||
195 | .domain = DOMAIN_KERNEL, | ||
196 | }, | ||
197 | [MT_ROM] = { | ||
198 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4, | ||
199 | .domain = DOMAIN_KERNEL, | ||
200 | }, | ||
201 | [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | ||
202 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
203 | L_PTE_WRITE, | ||
204 | .prot_l1 = PMD_TYPE_TABLE, | ||
205 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | | ||
206 | PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | | ||
207 | PMD_SECT_TEX(1), | ||
208 | .domain = DOMAIN_IO, | ||
209 | }, | ||
210 | [MT_NONSHARED_DEVICE] = { | ||
211 | .prot_l1 = PMD_TYPE_TABLE, | ||
212 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV | | ||
213 | PMD_SECT_AP_WRITE, | ||
214 | .domain = DOMAIN_IO, | ||
215 | } | ||
216 | }; | ||
217 | |||
218 | /* | ||
219 | * Adjust the PMD section entries according to the CPU in use. | ||
220 | */ | ||
221 | static void __init build_mem_type_table(void) | ||
222 | { | ||
223 | struct cachepolicy *cp; | ||
224 | unsigned int cr = get_cr(); | ||
225 | unsigned int user_pgprot, kern_pgprot; | ||
226 | int cpu_arch = cpu_architecture(); | ||
227 | int i; | ||
228 | |||
229 | #if defined(CONFIG_CPU_DCACHE_DISABLE) | ||
230 | if (cachepolicy > CPOLICY_BUFFERED) | ||
231 | cachepolicy = CPOLICY_BUFFERED; | ||
232 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | ||
233 | if (cachepolicy > CPOLICY_WRITETHROUGH) | ||
234 | cachepolicy = CPOLICY_WRITETHROUGH; | ||
235 | #endif | ||
236 | if (cpu_arch < CPU_ARCH_ARMv5) { | ||
237 | if (cachepolicy >= CPOLICY_WRITEALLOC) | ||
238 | cachepolicy = CPOLICY_WRITEBACK; | ||
239 | ecc_mask = 0; | ||
240 | } | ||
241 | |||
242 | /* | ||
243 | * Xscale must not have PMD bit 4 set for section mappings. | ||
244 | */ | ||
245 | if (cpu_is_xscale()) | ||
246 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) | ||
247 | mem_types[i].prot_sect &= ~PMD_BIT4; | ||
248 | |||
249 | /* | ||
250 | * ARMv5 and lower, excluding Xscale, bit 4 must be set for | ||
251 | * page tables. | ||
252 | */ | ||
253 | if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale()) | ||
254 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) | ||
255 | if (mem_types[i].prot_l1) | ||
256 | mem_types[i].prot_l1 |= PMD_BIT4; | ||
257 | |||
258 | cp = &cache_policies[cachepolicy]; | ||
259 | kern_pgprot = user_pgprot = cp->pte; | ||
260 | |||
261 | /* | ||
262 | * Enable CPU-specific coherency if supported. | ||
263 | * (Only available on XSC3 at the moment.) | ||
264 | */ | ||
265 | if (arch_is_coherent()) { | ||
266 | if (cpu_is_xsc3()) { | ||
267 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | ||
268 | mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT; | ||
269 | } | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * ARMv6 and above have extended page tables. | ||
274 | */ | ||
275 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | ||
276 | /* | ||
277 | * bit 4 becomes XN which we must clear for the | ||
278 | * kernel memory mapping. | ||
279 | */ | ||
280 | mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN; | ||
281 | mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN; | ||
282 | |||
283 | /* | ||
284 | * Mark cache clean areas and XIP ROM read only | ||
285 | * from SVC mode and no access from userspace. | ||
286 | */ | ||
287 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | ||
288 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | ||
289 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | ||
290 | |||
291 | /* | ||
292 | * Mark the device area as "shared device" | ||
293 | */ | ||
294 | mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; | ||
295 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | ||
296 | |||
297 | /* | ||
298 | * User pages need to be mapped with the ASID | ||
299 | * (iow, non-global) | ||
300 | */ | ||
301 | user_pgprot |= L_PTE_ASID; | ||
302 | |||
303 | #ifdef CONFIG_SMP | ||
304 | /* | ||
305 | * Mark memory with the "shared" attribute for SMP systems | ||
306 | */ | ||
307 | user_pgprot |= L_PTE_SHARED; | ||
308 | kern_pgprot |= L_PTE_SHARED; | ||
309 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | ||
310 | #endif | ||
311 | } | ||
312 | |||
313 | for (i = 0; i < 16; i++) { | ||
314 | unsigned long v = pgprot_val(protection_map[i]); | ||
315 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; | ||
316 | protection_map[i] = __pgprot(v); | ||
317 | } | ||
318 | |||
319 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; | ||
320 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; | ||
321 | |||
322 | if (cpu_arch >= CPU_ARCH_ARMv5) { | ||
323 | #ifndef CONFIG_SMP | ||
324 | /* | ||
325 | * Only use write-through for non-SMP systems | ||
326 | */ | ||
327 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
328 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
329 | #endif | ||
330 | } else { | ||
331 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | ||
332 | } | ||
333 | |||
334 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | ||
335 | L_PTE_DIRTY | L_PTE_WRITE | | ||
336 | L_PTE_EXEC | kern_pgprot); | ||
337 | |||
338 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | ||
339 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | ||
340 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | ||
341 | mem_types[MT_ROM].prot_sect |= cp->pmd; | ||
342 | |||
343 | switch (cp->pmd) { | ||
344 | case PMD_SECT_WT: | ||
345 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | ||
346 | break; | ||
347 | case PMD_SECT_WB: | ||
348 | case PMD_SECT_WBWA: | ||
349 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; | ||
350 | break; | ||
351 | } | ||
352 | printk("Memory policy: ECC %sabled, Data cache %s\n", | ||
353 | ecc_mask ? "en" : "dis", cp->policy); | ||
354 | } | ||
355 | |||
356 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | ||
357 | |||
358 | /* | ||
359 | * Create a SECTION PGD between VIRT and PHYS in domain | ||
360 | * DOMAIN with protection PROT. This operates on half- | ||
361 | * pgdir entry increments. | ||
362 | */ | ||
363 | static inline void | ||
364 | alloc_init_section(unsigned long virt, unsigned long phys, int prot) | ||
365 | { | ||
366 | pmd_t *pmdp = pmd_off_k(virt); | ||
367 | |||
368 | if (virt & (1 << 20)) | ||
369 | pmdp++; | ||
370 | |||
371 | *pmdp = __pmd(phys | prot); | ||
372 | flush_pmd_entry(pmdp); | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT | ||
377 | */ | ||
378 | static inline void | ||
379 | alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) | ||
380 | { | ||
381 | int i; | ||
382 | |||
383 | for (i = 0; i < 16; i += 1) { | ||
384 | alloc_init_section(virt, phys, prot | PMD_SECT_SUPER); | ||
385 | |||
386 | virt += (PGDIR_SIZE / 2); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * Add a PAGE mapping between VIRT and PHYS in domain | ||
392 | * DOMAIN with protection PROT. Note that due to the | ||
393 | * way we map the PTEs, we must allocate two PTE_SIZE'd | ||
394 | * blocks - one for the Linux pte table, and one for | ||
395 | * the hardware pte table. | ||
396 | */ | ||
397 | static inline void | ||
398 | alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) | ||
399 | { | ||
400 | pmd_t *pmdp = pmd_off_k(virt); | ||
401 | pte_t *ptep; | ||
402 | |||
403 | if (pmd_none(*pmdp)) { | ||
404 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * | ||
405 | sizeof(pte_t)); | ||
406 | |||
407 | __pmd_populate(pmdp, __pa(ptep) | prot_l1); | ||
408 | } | ||
409 | ptep = pte_offset_kernel(pmdp, virt); | ||
410 | |||
411 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Create the page directory entries and any necessary | ||
416 | * page tables for the mapping specified by `md'. We | ||
417 | * are able to cope here with varying sizes and address | ||
418 | * offsets, and we take full advantage of sections and | ||
419 | * supersections. | ||
420 | */ | ||
421 | void __init create_mapping(struct map_desc *md) | ||
422 | { | ||
423 | unsigned long virt, length; | ||
424 | int prot_sect, prot_l1, domain; | ||
425 | pgprot_t prot_pte; | ||
426 | unsigned long off = (u32)__pfn_to_phys(md->pfn); | ||
427 | |||
428 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | ||
429 | printk(KERN_WARNING "BUG: not creating mapping for " | ||
430 | "0x%08llx at 0x%08lx in user region\n", | ||
431 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
432 | return; | ||
433 | } | ||
434 | |||
435 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | ||
436 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | ||
437 | printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " | ||
438 | "overlaps vmalloc space\n", | ||
439 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
440 | } | ||
441 | |||
442 | domain = mem_types[md->type].domain; | ||
443 | prot_pte = __pgprot(mem_types[md->type].prot_pte); | ||
444 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); | ||
445 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); | ||
446 | |||
447 | /* | ||
448 | * Catch 36-bit addresses | ||
449 | */ | ||
450 | if(md->pfn >= 0x100000) { | ||
451 | if(domain) { | ||
452 | printk(KERN_ERR "MM: invalid domain in supersection " | ||
453 | "mapping for 0x%08llx at 0x%08lx\n", | ||
454 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
455 | return; | ||
456 | } | ||
457 | if((md->virtual | md->length | __pfn_to_phys(md->pfn)) | ||
458 | & ~SUPERSECTION_MASK) { | ||
459 | printk(KERN_ERR "MM: cannot create mapping for " | ||
460 | "0x%08llx at 0x%08lx invalid alignment\n", | ||
461 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
462 | return; | ||
463 | } | ||
464 | |||
465 | /* | ||
466 | * Shift bits [35:32] of address into bits [23:20] of PMD | ||
467 | * (See ARMv6 spec). | ||
468 | */ | ||
469 | off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | ||
470 | } | ||
471 | |||
472 | virt = md->virtual; | ||
473 | off -= virt; | ||
474 | length = md->length; | ||
475 | |||
476 | if (mem_types[md->type].prot_l1 == 0 && | ||
477 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | ||
478 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | ||
479 | "be mapped using pages, ignoring.\n", | ||
480 | __pfn_to_phys(md->pfn), md->virtual); | ||
481 | return; | ||
482 | } | ||
483 | |||
484 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { | ||
485 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | ||
486 | |||
487 | virt += PAGE_SIZE; | ||
488 | length -= PAGE_SIZE; | ||
489 | } | ||
490 | |||
491 | /* N.B. ARMv6 supersections are only defined to work with domain 0. | ||
492 | * Since domain assignments can in fact be arbitrary, the | ||
493 | * 'domain == 0' check below is required to insure that ARMv6 | ||
494 | * supersections are only allocated for domain 0 regardless | ||
495 | * of the actual domain assignments in use. | ||
496 | */ | ||
497 | if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3()) | ||
498 | && domain == 0) { | ||
499 | /* | ||
500 | * Align to supersection boundary if !high pages. | ||
501 | * High pages have already been checked for proper | ||
502 | * alignment above and they will fail the SUPSERSECTION_MASK | ||
503 | * check because of the way the address is encoded into | ||
504 | * offset. | ||
505 | */ | ||
506 | if (md->pfn <= 0x100000) { | ||
507 | while ((virt & ~SUPERSECTION_MASK || | ||
508 | (virt + off) & ~SUPERSECTION_MASK) && | ||
509 | length >= (PGDIR_SIZE / 2)) { | ||
510 | alloc_init_section(virt, virt + off, prot_sect); | ||
511 | |||
512 | virt += (PGDIR_SIZE / 2); | ||
513 | length -= (PGDIR_SIZE / 2); | ||
514 | } | ||
515 | } | ||
516 | |||
517 | while (length >= SUPERSECTION_SIZE) { | ||
518 | alloc_init_supersection(virt, virt + off, prot_sect); | ||
519 | |||
520 | virt += SUPERSECTION_SIZE; | ||
521 | length -= SUPERSECTION_SIZE; | ||
522 | } | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * A section mapping covers half a "pgdir" entry. | ||
527 | */ | ||
528 | while (length >= (PGDIR_SIZE / 2)) { | ||
529 | alloc_init_section(virt, virt + off, prot_sect); | ||
530 | |||
531 | virt += (PGDIR_SIZE / 2); | ||
532 | length -= (PGDIR_SIZE / 2); | ||
533 | } | ||
534 | |||
535 | while (length >= PAGE_SIZE) { | ||
536 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | ||
537 | |||
538 | virt += PAGE_SIZE; | ||
539 | length -= PAGE_SIZE; | ||
540 | } | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * Create the architecture specific mappings | ||
545 | */ | ||
546 | void __init iotable_init(struct map_desc *io_desc, int nr) | ||
547 | { | ||
548 | int i; | ||
549 | |||
550 | for (i = 0; i < nr; i++) | ||
551 | create_mapping(io_desc + i); | ||
552 | } | ||
553 | |||
554 | static inline void prepare_page_table(struct meminfo *mi) | ||
555 | { | ||
556 | unsigned long addr; | ||
557 | |||
558 | /* | ||
559 | * Clear out all the mappings below the kernel image. | ||
560 | */ | ||
561 | for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) | ||
562 | pmd_clear(pmd_off_k(addr)); | ||
563 | |||
564 | #ifdef CONFIG_XIP_KERNEL | ||
565 | /* The XIP kernel is mapped in the module area -- skip over it */ | ||
566 | addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
567 | #endif | ||
568 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) | ||
569 | pmd_clear(pmd_off_k(addr)); | ||
570 | |||
571 | /* | ||
572 | * Clear out all the kernel space mappings, except for the first | ||
573 | * memory bank, up to the end of the vmalloc region. | ||
574 | */ | ||
575 | for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); | ||
576 | addr < VMALLOC_END; addr += PGDIR_SIZE) | ||
577 | pmd_clear(pmd_off_k(addr)); | ||
578 | } | ||
579 | |||
580 | /* | ||
581 | * Reserve the various regions of node 0 | ||
582 | */ | ||
583 | void __init reserve_node_zero(pg_data_t *pgdat) | ||
584 | { | ||
585 | unsigned long res_size = 0; | ||
586 | |||
587 | /* | ||
588 | * Register the kernel text and data with bootmem. | ||
589 | * Note that this can only be in node 0. | ||
590 | */ | ||
591 | #ifdef CONFIG_XIP_KERNEL | ||
592 | reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start); | ||
593 | #else | ||
594 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); | ||
595 | #endif | ||
596 | |||
597 | /* | ||
598 | * Reserve the page tables. These are already in use, | ||
599 | * and can only be in node 0. | ||
600 | */ | ||
601 | reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), | ||
602 | PTRS_PER_PGD * sizeof(pgd_t)); | ||
603 | |||
604 | /* | ||
605 | * Hmm... This should go elsewhere, but we really really need to | ||
606 | * stop things allocating the low memory; ideally we need a better | ||
607 | * implementation of GFP_DMA which does not assume that DMA-able | ||
608 | * memory starts at zero. | ||
609 | */ | ||
610 | if (machine_is_integrator() || machine_is_cintegrator()) | ||
611 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | ||
612 | |||
613 | /* | ||
614 | * These should likewise go elsewhere. They pre-reserve the | ||
615 | * screen memory region at the start of main system memory. | ||
616 | */ | ||
617 | if (machine_is_edb7211()) | ||
618 | res_size = 0x00020000; | ||
619 | if (machine_is_p720t()) | ||
620 | res_size = 0x00014000; | ||
621 | |||
622 | #ifdef CONFIG_SA1111 | ||
623 | /* | ||
624 | * Because of the SA1111 DMA bug, we want to preserve our | ||
625 | * precious DMA-able memory... | ||
626 | */ | ||
627 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | ||
628 | #endif | ||
629 | if (res_size) | ||
630 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); | ||
631 | } | ||
632 | |||
633 | /* | ||
634 | * Set up device the mappings. Since we clear out the page tables for all | ||
635 | * mappings above VMALLOC_END, we will remove any debug device mappings. | ||
636 | * This means you have to be careful how you debug this function, or any | ||
637 | * called function. This means you can't use any function or debugging | ||
638 | * method which may touch any device, otherwise the kernel _will_ crash. | ||
639 | */ | ||
640 | static void __init devicemaps_init(struct machine_desc *mdesc) | ||
641 | { | ||
642 | struct map_desc map; | ||
643 | unsigned long addr; | ||
644 | void *vectors; | ||
645 | |||
646 | /* | ||
647 | * Allocate the vector page early. | ||
648 | */ | ||
649 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); | ||
650 | BUG_ON(!vectors); | ||
651 | |||
652 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | ||
653 | pmd_clear(pmd_off_k(addr)); | ||
654 | |||
655 | /* | ||
656 | * Map the kernel if it is XIP. | ||
657 | * It is always first in the modulearea. | ||
658 | */ | ||
659 | #ifdef CONFIG_XIP_KERNEL | ||
660 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); | ||
661 | map.virtual = MODULE_START; | ||
662 | map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; | ||
663 | map.type = MT_ROM; | ||
664 | create_mapping(&map); | ||
665 | #endif | ||
666 | |||
667 | /* | ||
668 | * Map the cache flushing regions. | ||
669 | */ | ||
670 | #ifdef FLUSH_BASE | ||
671 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); | ||
672 | map.virtual = FLUSH_BASE; | ||
673 | map.length = SZ_1M; | ||
674 | map.type = MT_CACHECLEAN; | ||
675 | create_mapping(&map); | ||
676 | #endif | ||
677 | #ifdef FLUSH_BASE_MINICACHE | ||
678 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); | ||
679 | map.virtual = FLUSH_BASE_MINICACHE; | ||
680 | map.length = SZ_1M; | ||
681 | map.type = MT_MINICLEAN; | ||
682 | create_mapping(&map); | ||
683 | #endif | ||
684 | |||
685 | /* | ||
686 | * Create a mapping for the machine vectors at the high-vectors | ||
687 | * location (0xffff0000). If we aren't using high-vectors, also | ||
688 | * create a mapping at the low-vectors virtual address. | ||
689 | */ | ||
690 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | ||
691 | map.virtual = 0xffff0000; | ||
692 | map.length = PAGE_SIZE; | ||
693 | map.type = MT_HIGH_VECTORS; | ||
694 | create_mapping(&map); | ||
695 | |||
696 | if (!vectors_high()) { | ||
697 | map.virtual = 0; | ||
698 | map.type = MT_LOW_VECTORS; | ||
699 | create_mapping(&map); | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * Ask the machine support to map in the statically mapped devices. | ||
704 | */ | ||
705 | if (mdesc->map_io) | ||
706 | mdesc->map_io(); | ||
707 | |||
708 | /* | ||
709 | * Finally flush the caches and tlb to ensure that we're in a | ||
710 | * consistent state wrt the writebuffer. This also ensures that | ||
711 | * any write-allocated cache lines in the vector page are written | ||
712 | * back. After this point, we can start to touch devices again. | ||
713 | */ | ||
714 | local_flush_tlb_all(); | ||
715 | flush_cache_all(); | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | * paging_init() sets up the page tables, initialises the zone memory | ||
720 | * maps, and sets up the zero page, bad page and bad page tables. | ||
721 | */ | ||
722 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | ||
723 | { | ||
724 | void *zero_page; | ||
725 | |||
726 | build_mem_type_table(); | ||
727 | prepare_page_table(mi); | ||
728 | bootmem_init(mi); | ||
729 | devicemaps_init(mdesc); | ||
730 | |||
731 | top_pmd = pmd_off_k(0xffff0000); | ||
732 | |||
733 | /* | ||
734 | * allocate the zero page. Note that we count on this going ok. | ||
735 | */ | ||
736 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
737 | memzero(zero_page, PAGE_SIZE); | ||
738 | empty_zero_page = virt_to_page(zero_page); | ||
739 | flush_dcache_page(empty_zero_page); | ||
740 | } | ||
741 | |||
742 | /* | ||
743 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
744 | * the user-mode pages. This will then ensure that we have predictable | ||
745 | * results when turning the mmu off | ||
746 | */ | ||
747 | void setup_mm_for_reboot(char mode) | ||
748 | { | ||
749 | unsigned long base_pmdval; | ||
750 | pgd_t *pgd; | ||
751 | int i; | ||
752 | |||
753 | if (current->mm && current->mm->pgd) | ||
754 | pgd = current->mm->pgd; | ||
755 | else | ||
756 | pgd = init_mm.pgd; | ||
757 | |||
758 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | ||
759 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
760 | base_pmdval |= PMD_BIT4; | ||
761 | |||
762 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | ||
763 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | ||
764 | pmd_t *pmd; | ||
765 | |||
766 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); | ||
767 | pmd[0] = __pmd(pmdval); | ||
768 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | ||
769 | flush_pmd_entry(pmd); | ||
770 | } | ||
771 | } | ||