diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 108 |
1 files changed, 69 insertions, 39 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 25d9a11eb617..8ba754064559 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/mman.h> | 15 | #include <linux/mman.h> |
16 | #include <linux/nodemask.h> | 16 | #include <linux/nodemask.h> |
17 | 17 | ||
18 | #include <asm/cputype.h> | ||
18 | #include <asm/mach-types.h> | 19 | #include <asm/mach-types.h> |
19 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
20 | #include <asm/sizes.h> | 21 | #include <asm/sizes.h> |
@@ -27,9 +28,6 @@ | |||
27 | 28 | ||
28 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 29 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
29 | 30 | ||
30 | extern void _stext, _etext, __data_start, _end; | ||
31 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
32 | |||
33 | /* | 31 | /* |
34 | * empty_zero_page is a special page that is used for | 32 | * empty_zero_page is a special page that is used for |
35 | * zero-initialized data and COW. | 33 | * zero-initialized data and COW. |
@@ -68,27 +66,27 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
68 | .policy = "uncached", | 66 | .policy = "uncached", |
69 | .cr_mask = CR_W|CR_C, | 67 | .cr_mask = CR_W|CR_C, |
70 | .pmd = PMD_SECT_UNCACHED, | 68 | .pmd = PMD_SECT_UNCACHED, |
71 | .pte = 0, | 69 | .pte = L_PTE_MT_UNCACHED, |
72 | }, { | 70 | }, { |
73 | .policy = "buffered", | 71 | .policy = "buffered", |
74 | .cr_mask = CR_C, | 72 | .cr_mask = CR_C, |
75 | .pmd = PMD_SECT_BUFFERED, | 73 | .pmd = PMD_SECT_BUFFERED, |
76 | .pte = PTE_BUFFERABLE, | 74 | .pte = L_PTE_MT_BUFFERABLE, |
77 | }, { | 75 | }, { |
78 | .policy = "writethrough", | 76 | .policy = "writethrough", |
79 | .cr_mask = 0, | 77 | .cr_mask = 0, |
80 | .pmd = PMD_SECT_WT, | 78 | .pmd = PMD_SECT_WT, |
81 | .pte = PTE_CACHEABLE, | 79 | .pte = L_PTE_MT_WRITETHROUGH, |
82 | }, { | 80 | }, { |
83 | .policy = "writeback", | 81 | .policy = "writeback", |
84 | .cr_mask = 0, | 82 | .cr_mask = 0, |
85 | .pmd = PMD_SECT_WB, | 83 | .pmd = PMD_SECT_WB, |
86 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | 84 | .pte = L_PTE_MT_WRITEBACK, |
87 | }, { | 85 | }, { |
88 | .policy = "writealloc", | 86 | .policy = "writealloc", |
89 | .cr_mask = 0, | 87 | .cr_mask = 0, |
90 | .pmd = PMD_SECT_WBWA, | 88 | .pmd = PMD_SECT_WBWA, |
91 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | 89 | .pte = L_PTE_MT_WRITEALLOC, |
92 | } | 90 | } |
93 | }; | 91 | }; |
94 | 92 | ||
@@ -186,29 +184,28 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
186 | 184 | ||
187 | static struct mem_type mem_types[] = { | 185 | static struct mem_type mem_types[] = { |
188 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ | 186 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
189 | .prot_pte = PROT_PTE_DEVICE, | 187 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
188 | L_PTE_SHARED, | ||
190 | .prot_l1 = PMD_TYPE_TABLE, | 189 | .prot_l1 = PMD_TYPE_TABLE, |
191 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, | 190 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, |
192 | .domain = DOMAIN_IO, | 191 | .domain = DOMAIN_IO, |
193 | }, | 192 | }, |
194 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ | 193 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ |
195 | .prot_pte = PROT_PTE_DEVICE, | 194 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, |
196 | .prot_pte_ext = PTE_EXT_TEX(2), | ||
197 | .prot_l1 = PMD_TYPE_TABLE, | 195 | .prot_l1 = PMD_TYPE_TABLE, |
198 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), | 196 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), |
199 | .domain = DOMAIN_IO, | 197 | .domain = DOMAIN_IO, |
200 | }, | 198 | }, |
201 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ | 199 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ |
202 | .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE, | 200 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, |
203 | .prot_l1 = PMD_TYPE_TABLE, | 201 | .prot_l1 = PMD_TYPE_TABLE, |
204 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, | 202 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, |
205 | .domain = DOMAIN_IO, | 203 | .domain = DOMAIN_IO, |
206 | }, | 204 | }, |
207 | [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | 205 | [MT_DEVICE_WC] = { /* ioremap_wc */ |
208 | .prot_pte = PROT_PTE_DEVICE, | 206 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, |
209 | .prot_l1 = PMD_TYPE_TABLE, | 207 | .prot_l1 = PMD_TYPE_TABLE, |
210 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | | 208 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, |
211 | PMD_SECT_TEX(1), | ||
212 | .domain = DOMAIN_IO, | 209 | .domain = DOMAIN_IO, |
213 | }, | 210 | }, |
214 | [MT_CACHECLEAN] = { | 211 | [MT_CACHECLEAN] = { |
@@ -253,7 +250,7 @@ static void __init build_mem_type_table(void) | |||
253 | { | 250 | { |
254 | struct cachepolicy *cp; | 251 | struct cachepolicy *cp; |
255 | unsigned int cr = get_cr(); | 252 | unsigned int cr = get_cr(); |
256 | unsigned int user_pgprot, kern_pgprot; | 253 | unsigned int user_pgprot, kern_pgprot, vecs_pgprot; |
257 | int cpu_arch = cpu_architecture(); | 254 | int cpu_arch = cpu_architecture(); |
258 | int i; | 255 | int i; |
259 | 256 | ||
@@ -271,6 +268,20 @@ static void __init build_mem_type_table(void) | |||
271 | cachepolicy = CPOLICY_WRITEBACK; | 268 | cachepolicy = CPOLICY_WRITEBACK; |
272 | ecc_mask = 0; | 269 | ecc_mask = 0; |
273 | } | 270 | } |
271 | #ifdef CONFIG_SMP | ||
272 | cachepolicy = CPOLICY_WRITEALLOC; | ||
273 | #endif | ||
274 | |||
275 | /* | ||
276 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 | ||
277 | * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 | ||
278 | * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable | ||
279 | * in xsc3 parlance, Uncached Normal in ARMv6 parlance). | ||
280 | */ | ||
281 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { | ||
282 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | ||
283 | mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; | ||
284 | } | ||
274 | 285 | ||
275 | /* | 286 | /* |
276 | * ARMv5 and lower, bit 4 must be set for page tables. | 287 | * ARMv5 and lower, bit 4 must be set for page tables. |
@@ -292,7 +303,15 @@ static void __init build_mem_type_table(void) | |||
292 | } | 303 | } |
293 | 304 | ||
294 | cp = &cache_policies[cachepolicy]; | 305 | cp = &cache_policies[cachepolicy]; |
295 | kern_pgprot = user_pgprot = cp->pte; | 306 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
307 | |||
308 | #ifndef CONFIG_SMP | ||
309 | /* | ||
310 | * Only use write-through for non-SMP systems | ||
311 | */ | ||
312 | if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | ||
313 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | ||
314 | #endif | ||
296 | 315 | ||
297 | /* | 316 | /* |
298 | * Enable CPU-specific coherency if supported. | 317 | * Enable CPU-specific coherency if supported. |
@@ -320,7 +339,6 @@ static void __init build_mem_type_table(void) | |||
320 | /* | 339 | /* |
321 | * Mark the device area as "shared device" | 340 | * Mark the device area as "shared device" |
322 | */ | 341 | */ |
323 | mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; | ||
324 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | 342 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; |
325 | 343 | ||
326 | #ifdef CONFIG_SMP | 344 | #ifdef CONFIG_SMP |
@@ -329,30 +347,21 @@ static void __init build_mem_type_table(void) | |||
329 | */ | 347 | */ |
330 | user_pgprot |= L_PTE_SHARED; | 348 | user_pgprot |= L_PTE_SHARED; |
331 | kern_pgprot |= L_PTE_SHARED; | 349 | kern_pgprot |= L_PTE_SHARED; |
350 | vecs_pgprot |= L_PTE_SHARED; | ||
332 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 351 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
333 | #endif | 352 | #endif |
334 | } | 353 | } |
335 | 354 | ||
336 | for (i = 0; i < 16; i++) { | 355 | for (i = 0; i < 16; i++) { |
337 | unsigned long v = pgprot_val(protection_map[i]); | 356 | unsigned long v = pgprot_val(protection_map[i]); |
338 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; | 357 | protection_map[i] = __pgprot(v | user_pgprot); |
339 | protection_map[i] = __pgprot(v); | ||
340 | } | 358 | } |
341 | 359 | ||
342 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; | 360 | mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; |
343 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; | 361 | mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; |
344 | 362 | ||
345 | if (cpu_arch >= CPU_ARCH_ARMv5) { | 363 | if (cpu_arch < CPU_ARCH_ARMv5) |
346 | #ifndef CONFIG_SMP | ||
347 | /* | ||
348 | * Only use write-through for non-SMP systems | ||
349 | */ | ||
350 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
351 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
352 | #endif | ||
353 | } else { | ||
354 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | 364 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); |
355 | } | ||
356 | 365 | ||
357 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 366 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
358 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 367 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
@@ -400,8 +409,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
400 | 409 | ||
401 | pte = pte_offset_kernel(pmd, addr); | 410 | pte = pte_offset_kernel(pmd, addr); |
402 | do { | 411 | do { |
403 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), | 412 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); |
404 | type->prot_pte_ext); | ||
405 | pfn++; | 413 | pfn++; |
406 | } while (pte++, addr += PAGE_SIZE, addr != end); | 414 | } while (pte++, addr += PAGE_SIZE, addr != end); |
407 | } | 415 | } |
@@ -568,12 +576,35 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
568 | create_mapping(io_desc + i); | 576 | create_mapping(io_desc + i); |
569 | } | 577 | } |
570 | 578 | ||
579 | static unsigned long __initdata vmalloc_reserve = SZ_128M; | ||
580 | |||
581 | /* | ||
582 | * vmalloc=size forces the vmalloc area to be exactly 'size' | ||
583 | * bytes. This can be used to increase (or decrease) the vmalloc | ||
584 | * area - the default is 128m. | ||
585 | */ | ||
586 | static void __init early_vmalloc(char **arg) | ||
587 | { | ||
588 | vmalloc_reserve = memparse(*arg, arg); | ||
589 | |||
590 | if (vmalloc_reserve < SZ_16M) { | ||
591 | vmalloc_reserve = SZ_16M; | ||
592 | printk(KERN_WARNING | ||
593 | "vmalloc area too small, limiting to %luMB\n", | ||
594 | vmalloc_reserve >> 20); | ||
595 | } | ||
596 | } | ||
597 | __early_param("vmalloc=", early_vmalloc); | ||
598 | |||
599 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | ||
600 | |||
571 | static int __init check_membank_valid(struct membank *mb) | 601 | static int __init check_membank_valid(struct membank *mb) |
572 | { | 602 | { |
573 | /* | 603 | /* |
574 | * Check whether this memory region has non-zero size. | 604 | * Check whether this memory region has non-zero size or |
605 | * invalid node number. | ||
575 | */ | 606 | */ |
576 | if (mb->size == 0) | 607 | if (mb->size == 0 || mb->node >= MAX_NUMNODES) |
577 | return 0; | 608 | return 0; |
578 | 609 | ||
579 | /* | 610 | /* |
@@ -607,8 +638,7 @@ static int __init check_membank_valid(struct membank *mb) | |||
607 | 638 | ||
608 | static void __init sanity_check_meminfo(struct meminfo *mi) | 639 | static void __init sanity_check_meminfo(struct meminfo *mi) |
609 | { | 640 | { |
610 | int i; | 641 | int i, j; |
611 | int j; | ||
612 | 642 | ||
613 | for (i = 0, j = 0; i < mi->nr_banks; i++) { | 643 | for (i = 0, j = 0; i < mi->nr_banks; i++) { |
614 | if (check_membank_valid(&mi->bank[i])) | 644 | if (check_membank_valid(&mi->bank[i])) |