diff options
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r-- | arch/arm/mm/mm-armv.c | 760 |
1 files changed, 760 insertions, 0 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c new file mode 100644 index 000000000000..f5a87db8b498 --- /dev/null +++ b/arch/arm/mm/mm-armv.c | |||
@@ -0,0 +1,760 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/mm-armv.c | ||
3 | * | ||
4 | * Copyright (C) 1998-2002 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Page table sludge for ARM v3 and v4 processor architectures. | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/bootmem.h> | ||
17 | #include <linux/highmem.h> | ||
18 | #include <linux/nodemask.h> | ||
19 | |||
20 | #include <asm/pgalloc.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/setup.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | |||
26 | #include <asm/mach/map.h> | ||
27 | |||
28 | #define CPOLICY_UNCACHED 0 | ||
29 | #define CPOLICY_BUFFERED 1 | ||
30 | #define CPOLICY_WRITETHROUGH 2 | ||
31 | #define CPOLICY_WRITEBACK 3 | ||
32 | #define CPOLICY_WRITEALLOC 4 | ||
33 | |||
34 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; | ||
35 | static unsigned int ecc_mask __initdata = 0; | ||
36 | pgprot_t pgprot_kernel; | ||
37 | |||
38 | EXPORT_SYMBOL(pgprot_kernel); | ||
39 | |||
40 | struct cachepolicy { | ||
41 | const char policy[16]; | ||
42 | unsigned int cr_mask; | ||
43 | unsigned int pmd; | ||
44 | unsigned int pte; | ||
45 | }; | ||
46 | |||
47 | static struct cachepolicy cache_policies[] __initdata = { | ||
48 | { | ||
49 | .policy = "uncached", | ||
50 | .cr_mask = CR_W|CR_C, | ||
51 | .pmd = PMD_SECT_UNCACHED, | ||
52 | .pte = 0, | ||
53 | }, { | ||
54 | .policy = "buffered", | ||
55 | .cr_mask = CR_C, | ||
56 | .pmd = PMD_SECT_BUFFERED, | ||
57 | .pte = PTE_BUFFERABLE, | ||
58 | }, { | ||
59 | .policy = "writethrough", | ||
60 | .cr_mask = 0, | ||
61 | .pmd = PMD_SECT_WT, | ||
62 | .pte = PTE_CACHEABLE, | ||
63 | }, { | ||
64 | .policy = "writeback", | ||
65 | .cr_mask = 0, | ||
66 | .pmd = PMD_SECT_WB, | ||
67 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | ||
68 | }, { | ||
69 | .policy = "writealloc", | ||
70 | .cr_mask = 0, | ||
71 | .pmd = PMD_SECT_WBWA, | ||
72 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | ||
73 | } | ||
74 | }; | ||
75 | |||
76 | /* | ||
77 | * These are useful for identifing cache coherency | ||
78 | * problems by allowing the cache or the cache and | ||
79 | * writebuffer to be turned off. (Note: the write | ||
80 | * buffer should not be on and the cache off). | ||
81 | */ | ||
82 | static void __init early_cachepolicy(char **p) | ||
83 | { | ||
84 | int i; | ||
85 | |||
86 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | ||
87 | int len = strlen(cache_policies[i].policy); | ||
88 | |||
89 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | ||
90 | cachepolicy = i; | ||
91 | cr_alignment &= ~cache_policies[i].cr_mask; | ||
92 | cr_no_alignment &= ~cache_policies[i].cr_mask; | ||
93 | *p += len; | ||
94 | break; | ||
95 | } | ||
96 | } | ||
97 | if (i == ARRAY_SIZE(cache_policies)) | ||
98 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); | ||
99 | flush_cache_all(); | ||
100 | set_cr(cr_alignment); | ||
101 | } | ||
102 | |||
103 | static void __init early_nocache(char **__unused) | ||
104 | { | ||
105 | char *p = "buffered"; | ||
106 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | ||
107 | early_cachepolicy(&p); | ||
108 | } | ||
109 | |||
110 | static void __init early_nowrite(char **__unused) | ||
111 | { | ||
112 | char *p = "uncached"; | ||
113 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | ||
114 | early_cachepolicy(&p); | ||
115 | } | ||
116 | |||
117 | static void __init early_ecc(char **p) | ||
118 | { | ||
119 | if (memcmp(*p, "on", 2) == 0) { | ||
120 | ecc_mask = PMD_PROTECTION; | ||
121 | *p += 2; | ||
122 | } else if (memcmp(*p, "off", 3) == 0) { | ||
123 | ecc_mask = 0; | ||
124 | *p += 3; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | __early_param("nocache", early_nocache); | ||
129 | __early_param("nowb", early_nowrite); | ||
130 | __early_param("cachepolicy=", early_cachepolicy); | ||
131 | __early_param("ecc=", early_ecc); | ||
132 | |||
133 | static int __init noalign_setup(char *__unused) | ||
134 | { | ||
135 | cr_alignment &= ~CR_A; | ||
136 | cr_no_alignment &= ~CR_A; | ||
137 | set_cr(cr_alignment); | ||
138 | return 1; | ||
139 | } | ||
140 | |||
141 | __setup("noalign", noalign_setup); | ||
142 | |||
143 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | ||
144 | |||
145 | /* | ||
146 | * need to get a 16k page for level 1 | ||
147 | */ | ||
148 | pgd_t *get_pgd_slow(struct mm_struct *mm) | ||
149 | { | ||
150 | pgd_t *new_pgd, *init_pgd; | ||
151 | pmd_t *new_pmd, *init_pmd; | ||
152 | pte_t *new_pte, *init_pte; | ||
153 | |||
154 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); | ||
155 | if (!new_pgd) | ||
156 | goto no_pgd; | ||
157 | |||
158 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | ||
159 | |||
160 | init_pgd = pgd_offset_k(0); | ||
161 | |||
162 | if (!vectors_high()) { | ||
163 | /* | ||
164 | * This lock is here just to satisfy pmd_alloc and pte_lock | ||
165 | */ | ||
166 | spin_lock(&mm->page_table_lock); | ||
167 | |||
168 | /* | ||
169 | * On ARM, first page must always be allocated since it | ||
170 | * contains the machine vectors. | ||
171 | */ | ||
172 | new_pmd = pmd_alloc(mm, new_pgd, 0); | ||
173 | if (!new_pmd) | ||
174 | goto no_pmd; | ||
175 | |||
176 | new_pte = pte_alloc_map(mm, new_pmd, 0); | ||
177 | if (!new_pte) | ||
178 | goto no_pte; | ||
179 | |||
180 | init_pmd = pmd_offset(init_pgd, 0); | ||
181 | init_pte = pte_offset_map_nested(init_pmd, 0); | ||
182 | set_pte(new_pte, *init_pte); | ||
183 | pte_unmap_nested(init_pte); | ||
184 | pte_unmap(new_pte); | ||
185 | |||
186 | spin_unlock(&mm->page_table_lock); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Copy over the kernel and IO PGD entries | ||
191 | */ | ||
192 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | ||
193 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | ||
194 | |||
195 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | ||
196 | |||
197 | return new_pgd; | ||
198 | |||
199 | no_pte: | ||
200 | spin_unlock(&mm->page_table_lock); | ||
201 | pmd_free(new_pmd); | ||
202 | free_pages((unsigned long)new_pgd, 2); | ||
203 | return NULL; | ||
204 | |||
205 | no_pmd: | ||
206 | spin_unlock(&mm->page_table_lock); | ||
207 | free_pages((unsigned long)new_pgd, 2); | ||
208 | return NULL; | ||
209 | |||
210 | no_pgd: | ||
211 | return NULL; | ||
212 | } | ||
213 | |||
214 | void free_pgd_slow(pgd_t *pgd) | ||
215 | { | ||
216 | pmd_t *pmd; | ||
217 | struct page *pte; | ||
218 | |||
219 | if (!pgd) | ||
220 | return; | ||
221 | |||
222 | /* pgd is always present and good */ | ||
223 | pmd = (pmd_t *)pgd; | ||
224 | if (pmd_none(*pmd)) | ||
225 | goto free; | ||
226 | if (pmd_bad(*pmd)) { | ||
227 | pmd_ERROR(*pmd); | ||
228 | pmd_clear(pmd); | ||
229 | goto free; | ||
230 | } | ||
231 | |||
232 | pte = pmd_page(*pmd); | ||
233 | pmd_clear(pmd); | ||
234 | dec_page_state(nr_page_table_pages); | ||
235 | pte_free(pte); | ||
236 | pmd_free(pmd); | ||
237 | free: | ||
238 | free_pages((unsigned long) pgd, 2); | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Create a SECTION PGD between VIRT and PHYS in domain | ||
243 | * DOMAIN with protection PROT. This operates on half- | ||
244 | * pgdir entry increments. | ||
245 | */ | ||
246 | static inline void | ||
247 | alloc_init_section(unsigned long virt, unsigned long phys, int prot) | ||
248 | { | ||
249 | pmd_t *pmdp; | ||
250 | |||
251 | pmdp = pmd_offset(pgd_offset_k(virt), virt); | ||
252 | if (virt & (1 << 20)) | ||
253 | pmdp++; | ||
254 | |||
255 | *pmdp = __pmd(phys | prot); | ||
256 | flush_pmd_entry(pmdp); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT | ||
261 | */ | ||
262 | static inline void | ||
263 | alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) | ||
264 | { | ||
265 | int i; | ||
266 | |||
267 | for (i = 0; i < 16; i += 1) { | ||
268 | alloc_init_section(virt, phys & SUPERSECTION_MASK, | ||
269 | prot | PMD_SECT_SUPER); | ||
270 | |||
271 | virt += (PGDIR_SIZE / 2); | ||
272 | phys += (PGDIR_SIZE / 2); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * Add a PAGE mapping between VIRT and PHYS in domain | ||
278 | * DOMAIN with protection PROT. Note that due to the | ||
279 | * way we map the PTEs, we must allocate two PTE_SIZE'd | ||
280 | * blocks - one for the Linux pte table, and one for | ||
281 | * the hardware pte table. | ||
282 | */ | ||
283 | static inline void | ||
284 | alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) | ||
285 | { | ||
286 | pmd_t *pmdp; | ||
287 | pte_t *ptep; | ||
288 | |||
289 | pmdp = pmd_offset(pgd_offset_k(virt), virt); | ||
290 | |||
291 | if (pmd_none(*pmdp)) { | ||
292 | unsigned long pmdval; | ||
293 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * | ||
294 | sizeof(pte_t)); | ||
295 | |||
296 | pmdval = __pa(ptep) | prot_l1; | ||
297 | pmdp[0] = __pmd(pmdval); | ||
298 | pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); | ||
299 | flush_pmd_entry(pmdp); | ||
300 | } | ||
301 | ptep = pte_offset_kernel(pmdp, virt); | ||
302 | |||
303 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * Clear any PGD mapping. On a two-level page table system, | ||
308 | * the clearance is done by the middle-level functions (pmd) | ||
309 | * rather than the top-level (pgd) functions. | ||
310 | */ | ||
311 | static inline void clear_mapping(unsigned long virt) | ||
312 | { | ||
313 | pmd_clear(pmd_offset(pgd_offset_k(virt), virt)); | ||
314 | } | ||
315 | |||
316 | struct mem_types { | ||
317 | unsigned int prot_pte; | ||
318 | unsigned int prot_l1; | ||
319 | unsigned int prot_sect; | ||
320 | unsigned int domain; | ||
321 | }; | ||
322 | |||
323 | static struct mem_types mem_types[] __initdata = { | ||
324 | [MT_DEVICE] = { | ||
325 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
326 | L_PTE_WRITE, | ||
327 | .prot_l1 = PMD_TYPE_TABLE, | ||
328 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | | ||
329 | PMD_SECT_AP_WRITE, | ||
330 | .domain = DOMAIN_IO, | ||
331 | }, | ||
332 | [MT_CACHECLEAN] = { | ||
333 | .prot_sect = PMD_TYPE_SECT, | ||
334 | .domain = DOMAIN_KERNEL, | ||
335 | }, | ||
336 | [MT_MINICLEAN] = { | ||
337 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, | ||
338 | .domain = DOMAIN_KERNEL, | ||
339 | }, | ||
340 | [MT_LOW_VECTORS] = { | ||
341 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
342 | L_PTE_EXEC, | ||
343 | .prot_l1 = PMD_TYPE_TABLE, | ||
344 | .domain = DOMAIN_USER, | ||
345 | }, | ||
346 | [MT_HIGH_VECTORS] = { | ||
347 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
348 | L_PTE_USER | L_PTE_EXEC, | ||
349 | .prot_l1 = PMD_TYPE_TABLE, | ||
350 | .domain = DOMAIN_USER, | ||
351 | }, | ||
352 | [MT_MEMORY] = { | ||
353 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | ||
354 | .domain = DOMAIN_KERNEL, | ||
355 | }, | ||
356 | [MT_ROM] = { | ||
357 | .prot_sect = PMD_TYPE_SECT, | ||
358 | .domain = DOMAIN_KERNEL, | ||
359 | }, | ||
360 | [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | ||
361 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
362 | L_PTE_WRITE, | ||
363 | .prot_l1 = PMD_TYPE_TABLE, | ||
364 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | | ||
365 | PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | | ||
366 | PMD_SECT_TEX(1), | ||
367 | .domain = DOMAIN_IO, | ||
368 | } | ||
369 | }; | ||
370 | |||
371 | /* | ||
372 | * Adjust the PMD section entries according to the CPU in use. | ||
373 | */ | ||
374 | static void __init build_mem_type_table(void) | ||
375 | { | ||
376 | struct cachepolicy *cp; | ||
377 | unsigned int cr = get_cr(); | ||
378 | int cpu_arch = cpu_architecture(); | ||
379 | int i; | ||
380 | |||
381 | #if defined(CONFIG_CPU_DCACHE_DISABLE) | ||
382 | if (cachepolicy > CPOLICY_BUFFERED) | ||
383 | cachepolicy = CPOLICY_BUFFERED; | ||
384 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) | ||
385 | if (cachepolicy > CPOLICY_WRITETHROUGH) | ||
386 | cachepolicy = CPOLICY_WRITETHROUGH; | ||
387 | #endif | ||
388 | if (cpu_arch < CPU_ARCH_ARMv5) { | ||
389 | if (cachepolicy >= CPOLICY_WRITEALLOC) | ||
390 | cachepolicy = CPOLICY_WRITEBACK; | ||
391 | ecc_mask = 0; | ||
392 | } | ||
393 | |||
394 | if (cpu_arch <= CPU_ARCH_ARMv5) { | ||
395 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | ||
396 | if (mem_types[i].prot_l1) | ||
397 | mem_types[i].prot_l1 |= PMD_BIT4; | ||
398 | if (mem_types[i].prot_sect) | ||
399 | mem_types[i].prot_sect |= PMD_BIT4; | ||
400 | } | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * ARMv6 and above have extended page tables. | ||
405 | */ | ||
406 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | ||
407 | /* | ||
408 | * bit 4 becomes XN which we must clear for the | ||
409 | * kernel memory mapping. | ||
410 | */ | ||
411 | mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; | ||
412 | mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; | ||
413 | /* | ||
414 | * Mark cache clean areas read only from SVC mode | ||
415 | * and no access from userspace. | ||
416 | */ | ||
417 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | ||
418 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | ||
419 | } | ||
420 | |||
421 | cp = &cache_policies[cachepolicy]; | ||
422 | |||
423 | if (cpu_arch >= CPU_ARCH_ARMv5) { | ||
424 | mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; | ||
425 | mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; | ||
426 | } else { | ||
427 | mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte; | ||
428 | mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte; | ||
429 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | ||
430 | } | ||
431 | |||
432 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | ||
433 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | ||
434 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | ||
435 | mem_types[MT_ROM].prot_sect |= cp->pmd; | ||
436 | |||
437 | for (i = 0; i < 16; i++) { | ||
438 | unsigned long v = pgprot_val(protection_map[i]); | ||
439 | v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | cp->pte; | ||
440 | protection_map[i] = __pgprot(v); | ||
441 | } | ||
442 | |||
443 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | ||
444 | L_PTE_DIRTY | L_PTE_WRITE | | ||
445 | L_PTE_EXEC | cp->pte); | ||
446 | |||
447 | switch (cp->pmd) { | ||
448 | case PMD_SECT_WT: | ||
449 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; | ||
450 | break; | ||
451 | case PMD_SECT_WB: | ||
452 | case PMD_SECT_WBWA: | ||
453 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; | ||
454 | break; | ||
455 | } | ||
456 | printk("Memory policy: ECC %sabled, Data cache %s\n", | ||
457 | ecc_mask ? "en" : "dis", cp->policy); | ||
458 | } | ||
459 | |||
460 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | ||
461 | |||
462 | /* | ||
463 | * Create the page directory entries and any necessary | ||
464 | * page tables for the mapping specified by `md'. We | ||
465 | * are able to cope here with varying sizes and address | ||
466 | * offsets, and we take full advantage of sections and | ||
467 | * supersections. | ||
468 | */ | ||
469 | static void __init create_mapping(struct map_desc *md) | ||
470 | { | ||
471 | unsigned long virt, length; | ||
472 | int prot_sect, prot_l1, domain; | ||
473 | pgprot_t prot_pte; | ||
474 | long off; | ||
475 | |||
476 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | ||
477 | printk(KERN_WARNING "BUG: not creating mapping for " | ||
478 | "0x%08lx at 0x%08lx in user region\n", | ||
479 | md->physical, md->virtual); | ||
480 | return; | ||
481 | } | ||
482 | |||
483 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | ||
484 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | ||
485 | printk(KERN_WARNING "BUG: mapping for 0x%08lx at 0x%08lx " | ||
486 | "overlaps vmalloc space\n", | ||
487 | md->physical, md->virtual); | ||
488 | } | ||
489 | |||
490 | domain = mem_types[md->type].domain; | ||
491 | prot_pte = __pgprot(mem_types[md->type].prot_pte); | ||
492 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); | ||
493 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); | ||
494 | |||
495 | virt = md->virtual; | ||
496 | off = md->physical - virt; | ||
497 | length = md->length; | ||
498 | |||
499 | if (mem_types[md->type].prot_l1 == 0 && | ||
500 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | ||
501 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | ||
502 | "be mapped using pages, ignoring.\n", | ||
503 | md->physical, md->virtual); | ||
504 | return; | ||
505 | } | ||
506 | |||
507 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { | ||
508 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | ||
509 | |||
510 | virt += PAGE_SIZE; | ||
511 | length -= PAGE_SIZE; | ||
512 | } | ||
513 | |||
514 | /* N.B. ARMv6 supersections are only defined to work with domain 0. | ||
515 | * Since domain assignments can in fact be arbitrary, the | ||
516 | * 'domain == 0' check below is required to insure that ARMv6 | ||
517 | * supersections are only allocated for domain 0 regardless | ||
518 | * of the actual domain assignments in use. | ||
519 | */ | ||
520 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { | ||
521 | /* Align to supersection boundary */ | ||
522 | while ((virt & ~SUPERSECTION_MASK || (virt + off) & | ||
523 | ~SUPERSECTION_MASK) && length >= (PGDIR_SIZE / 2)) { | ||
524 | alloc_init_section(virt, virt + off, prot_sect); | ||
525 | |||
526 | virt += (PGDIR_SIZE / 2); | ||
527 | length -= (PGDIR_SIZE / 2); | ||
528 | } | ||
529 | |||
530 | while (length >= SUPERSECTION_SIZE) { | ||
531 | alloc_init_supersection(virt, virt + off, prot_sect); | ||
532 | |||
533 | virt += SUPERSECTION_SIZE; | ||
534 | length -= SUPERSECTION_SIZE; | ||
535 | } | ||
536 | } | ||
537 | |||
538 | /* | ||
539 | * A section mapping covers half a "pgdir" entry. | ||
540 | */ | ||
541 | while (length >= (PGDIR_SIZE / 2)) { | ||
542 | alloc_init_section(virt, virt + off, prot_sect); | ||
543 | |||
544 | virt += (PGDIR_SIZE / 2); | ||
545 | length -= (PGDIR_SIZE / 2); | ||
546 | } | ||
547 | |||
548 | while (length >= PAGE_SIZE) { | ||
549 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | ||
550 | |||
551 | virt += PAGE_SIZE; | ||
552 | length -= PAGE_SIZE; | ||
553 | } | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
558 | * the user-mode pages. This will then ensure that we have predictable | ||
559 | * results when turning the mmu off | ||
560 | */ | ||
561 | void setup_mm_for_reboot(char mode) | ||
562 | { | ||
563 | unsigned long pmdval; | ||
564 | pgd_t *pgd; | ||
565 | pmd_t *pmd; | ||
566 | int i; | ||
567 | int cpu_arch = cpu_architecture(); | ||
568 | |||
569 | if (current->mm && current->mm->pgd) | ||
570 | pgd = current->mm->pgd; | ||
571 | else | ||
572 | pgd = init_mm.pgd; | ||
573 | |||
574 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) { | ||
575 | pmdval = (i << PGDIR_SHIFT) | | ||
576 | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | | ||
577 | PMD_TYPE_SECT; | ||
578 | if (cpu_arch <= CPU_ARCH_ARMv5) | ||
579 | pmdval |= PMD_BIT4; | ||
580 | pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); | ||
581 | pmd[0] = __pmd(pmdval); | ||
582 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | ||
583 | flush_pmd_entry(pmd); | ||
584 | } | ||
585 | } | ||
586 | |||
587 | extern void _stext, _etext; | ||
588 | |||
589 | /* | ||
590 | * Setup initial mappings. We use the page we allocated for zero page to hold | ||
591 | * the mappings, which will get overwritten by the vectors in traps_init(). | ||
592 | * The mappings must be in virtual address order. | ||
593 | */ | ||
594 | void __init memtable_init(struct meminfo *mi) | ||
595 | { | ||
596 | struct map_desc *init_maps, *p, *q; | ||
597 | unsigned long address = 0; | ||
598 | int i; | ||
599 | |||
600 | build_mem_type_table(); | ||
601 | |||
602 | init_maps = p = alloc_bootmem_low_pages(PAGE_SIZE); | ||
603 | |||
604 | #ifdef CONFIG_XIP_KERNEL | ||
605 | p->physical = CONFIG_XIP_PHYS_ADDR & PMD_MASK; | ||
606 | p->virtual = (unsigned long)&_stext & PMD_MASK; | ||
607 | p->length = ((unsigned long)&_etext - p->virtual + ~PMD_MASK) & PMD_MASK; | ||
608 | p->type = MT_ROM; | ||
609 | p ++; | ||
610 | #endif | ||
611 | |||
612 | for (i = 0; i < mi->nr_banks; i++) { | ||
613 | if (mi->bank[i].size == 0) | ||
614 | continue; | ||
615 | |||
616 | p->physical = mi->bank[i].start; | ||
617 | p->virtual = __phys_to_virt(p->physical); | ||
618 | p->length = mi->bank[i].size; | ||
619 | p->type = MT_MEMORY; | ||
620 | p ++; | ||
621 | } | ||
622 | |||
623 | #ifdef FLUSH_BASE | ||
624 | p->physical = FLUSH_BASE_PHYS; | ||
625 | p->virtual = FLUSH_BASE; | ||
626 | p->length = PGDIR_SIZE; | ||
627 | p->type = MT_CACHECLEAN; | ||
628 | p ++; | ||
629 | #endif | ||
630 | |||
631 | #ifdef FLUSH_BASE_MINICACHE | ||
632 | p->physical = FLUSH_BASE_PHYS + PGDIR_SIZE; | ||
633 | p->virtual = FLUSH_BASE_MINICACHE; | ||
634 | p->length = PGDIR_SIZE; | ||
635 | p->type = MT_MINICLEAN; | ||
636 | p ++; | ||
637 | #endif | ||
638 | |||
639 | /* | ||
640 | * Go through the initial mappings, but clear out any | ||
641 | * pgdir entries that are not in the description. | ||
642 | */ | ||
643 | q = init_maps; | ||
644 | do { | ||
645 | if (address < q->virtual || q == p) { | ||
646 | clear_mapping(address); | ||
647 | address += PGDIR_SIZE; | ||
648 | } else { | ||
649 | create_mapping(q); | ||
650 | |||
651 | address = q->virtual + q->length; | ||
652 | address = (address + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
653 | |||
654 | q ++; | ||
655 | } | ||
656 | } while (address != 0); | ||
657 | |||
658 | /* | ||
659 | * Create a mapping for the machine vectors at the high-vectors | ||
660 | * location (0xffff0000). If we aren't using high-vectors, also | ||
661 | * create a mapping at the low-vectors virtual address. | ||
662 | */ | ||
663 | init_maps->physical = virt_to_phys(init_maps); | ||
664 | init_maps->virtual = 0xffff0000; | ||
665 | init_maps->length = PAGE_SIZE; | ||
666 | init_maps->type = MT_HIGH_VECTORS; | ||
667 | create_mapping(init_maps); | ||
668 | |||
669 | if (!vectors_high()) { | ||
670 | init_maps->virtual = 0; | ||
671 | init_maps->type = MT_LOW_VECTORS; | ||
672 | create_mapping(init_maps); | ||
673 | } | ||
674 | |||
675 | flush_cache_all(); | ||
676 | flush_tlb_all(); | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * Create the architecture specific mappings | ||
681 | */ | ||
682 | void __init iotable_init(struct map_desc *io_desc, int nr) | ||
683 | { | ||
684 | int i; | ||
685 | |||
686 | for (i = 0; i < nr; i++) | ||
687 | create_mapping(io_desc + i); | ||
688 | } | ||
689 | |||
690 | static inline void | ||
691 | free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) | ||
692 | { | ||
693 | struct page *start_pg, *end_pg; | ||
694 | unsigned long pg, pgend; | ||
695 | |||
696 | /* | ||
697 | * Convert start_pfn/end_pfn to a struct page pointer. | ||
698 | */ | ||
699 | start_pg = pfn_to_page(start_pfn); | ||
700 | end_pg = pfn_to_page(end_pfn); | ||
701 | |||
702 | /* | ||
703 | * Convert to physical addresses, and | ||
704 | * round start upwards and end downwards. | ||
705 | */ | ||
706 | pg = PAGE_ALIGN(__pa(start_pg)); | ||
707 | pgend = __pa(end_pg) & PAGE_MASK; | ||
708 | |||
709 | /* | ||
710 | * If there are free pages between these, | ||
711 | * free the section of the memmap array. | ||
712 | */ | ||
713 | if (pg < pgend) | ||
714 | free_bootmem_node(NODE_DATA(node), pg, pgend - pg); | ||
715 | } | ||
716 | |||
717 | static inline void free_unused_memmap_node(int node, struct meminfo *mi) | ||
718 | { | ||
719 | unsigned long bank_start, prev_bank_end = 0; | ||
720 | unsigned int i; | ||
721 | |||
722 | /* | ||
723 | * [FIXME] This relies on each bank being in address order. This | ||
724 | * may not be the case, especially if the user has provided the | ||
725 | * information on the command line. | ||
726 | */ | ||
727 | for (i = 0; i < mi->nr_banks; i++) { | ||
728 | if (mi->bank[i].size == 0 || mi->bank[i].node != node) | ||
729 | continue; | ||
730 | |||
731 | bank_start = mi->bank[i].start >> PAGE_SHIFT; | ||
732 | if (bank_start < prev_bank_end) { | ||
733 | printk(KERN_ERR "MEM: unordered memory banks. " | ||
734 | "Not freeing memmap.\n"); | ||
735 | break; | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * If we had a previous bank, and there is a space | ||
740 | * between the current bank and the previous, free it. | ||
741 | */ | ||
742 | if (prev_bank_end && prev_bank_end != bank_start) | ||
743 | free_memmap(node, prev_bank_end, bank_start); | ||
744 | |||
745 | prev_bank_end = PAGE_ALIGN(mi->bank[i].start + | ||
746 | mi->bank[i].size) >> PAGE_SHIFT; | ||
747 | } | ||
748 | } | ||
749 | |||
750 | /* | ||
751 | * The mem_map array can get very big. Free | ||
752 | * the unused area of the memory map. | ||
753 | */ | ||
754 | void __init create_memmap_holes(struct meminfo *mi) | ||
755 | { | ||
756 | int node; | ||
757 | |||
758 | for_each_online_node(node) | ||
759 | free_unused_memmap_node(node, mi); | ||
760 | } | ||