diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 106 |
1 files changed, 58 insertions, 48 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index a713e40e1f1a..8ba754064559 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/mman.h> | 15 | #include <linux/mman.h> |
16 | #include <linux/nodemask.h> | 16 | #include <linux/nodemask.h> |
17 | 17 | ||
18 | #include <asm/cputype.h> | ||
18 | #include <asm/mach-types.h> | 19 | #include <asm/mach-types.h> |
19 | #include <asm/setup.h> | 20 | #include <asm/setup.h> |
20 | #include <asm/sizes.h> | 21 | #include <asm/sizes.h> |
@@ -27,9 +28,6 @@ | |||
27 | 28 | ||
28 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 29 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
29 | 30 | ||
30 | extern void _stext, _etext, __data_start, _end; | ||
31 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
32 | |||
33 | /* | 31 | /* |
34 | * empty_zero_page is a special page that is used for | 32 | * empty_zero_page is a special page that is used for |
35 | * zero-initialized data and COW. | 33 | * zero-initialized data and COW. |
@@ -68,27 +66,27 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
68 | .policy = "uncached", | 66 | .policy = "uncached", |
69 | .cr_mask = CR_W|CR_C, | 67 | .cr_mask = CR_W|CR_C, |
70 | .pmd = PMD_SECT_UNCACHED, | 68 | .pmd = PMD_SECT_UNCACHED, |
71 | .pte = 0, | 69 | .pte = L_PTE_MT_UNCACHED, |
72 | }, { | 70 | }, { |
73 | .policy = "buffered", | 71 | .policy = "buffered", |
74 | .cr_mask = CR_C, | 72 | .cr_mask = CR_C, |
75 | .pmd = PMD_SECT_BUFFERED, | 73 | .pmd = PMD_SECT_BUFFERED, |
76 | .pte = PTE_BUFFERABLE, | 74 | .pte = L_PTE_MT_BUFFERABLE, |
77 | }, { | 75 | }, { |
78 | .policy = "writethrough", | 76 | .policy = "writethrough", |
79 | .cr_mask = 0, | 77 | .cr_mask = 0, |
80 | .pmd = PMD_SECT_WT, | 78 | .pmd = PMD_SECT_WT, |
81 | .pte = PTE_CACHEABLE, | 79 | .pte = L_PTE_MT_WRITETHROUGH, |
82 | }, { | 80 | }, { |
83 | .policy = "writeback", | 81 | .policy = "writeback", |
84 | .cr_mask = 0, | 82 | .cr_mask = 0, |
85 | .pmd = PMD_SECT_WB, | 83 | .pmd = PMD_SECT_WB, |
86 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | 84 | .pte = L_PTE_MT_WRITEBACK, |
87 | }, { | 85 | }, { |
88 | .policy = "writealloc", | 86 | .policy = "writealloc", |
89 | .cr_mask = 0, | 87 | .cr_mask = 0, |
90 | .pmd = PMD_SECT_WBWA, | 88 | .pmd = PMD_SECT_WBWA, |
91 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, | 89 | .pte = L_PTE_MT_WRITEALLOC, |
92 | } | 90 | } |
93 | }; | 91 | }; |
94 | 92 | ||
@@ -186,35 +184,28 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
186 | 184 | ||
187 | static struct mem_type mem_types[] = { | 185 | static struct mem_type mem_types[] = { |
188 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ | 186 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
189 | .prot_pte = PROT_PTE_DEVICE, | 187 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
188 | L_PTE_SHARED, | ||
190 | .prot_l1 = PMD_TYPE_TABLE, | 189 | .prot_l1 = PMD_TYPE_TABLE, |
191 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, | 190 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, |
192 | .domain = DOMAIN_IO, | 191 | .domain = DOMAIN_IO, |
193 | }, | 192 | }, |
194 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ | 193 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ |
195 | .prot_pte = PROT_PTE_DEVICE, | 194 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, |
196 | .prot_pte_ext = PTE_EXT_TEX(2), | ||
197 | .prot_l1 = PMD_TYPE_TABLE, | 195 | .prot_l1 = PMD_TYPE_TABLE, |
198 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), | 196 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), |
199 | .domain = DOMAIN_IO, | 197 | .domain = DOMAIN_IO, |
200 | }, | 198 | }, |
201 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ | 199 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ |
202 | .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE, | 200 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, |
203 | .prot_l1 = PMD_TYPE_TABLE, | 201 | .prot_l1 = PMD_TYPE_TABLE, |
204 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, | 202 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, |
205 | .domain = DOMAIN_IO, | 203 | .domain = DOMAIN_IO, |
206 | }, | 204 | }, |
207 | [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | ||
208 | .prot_pte = PROT_PTE_DEVICE, | ||
209 | .prot_l1 = PMD_TYPE_TABLE, | ||
210 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | | ||
211 | PMD_SECT_TEX(1), | ||
212 | .domain = DOMAIN_IO, | ||
213 | }, | ||
214 | [MT_DEVICE_WC] = { /* ioremap_wc */ | 205 | [MT_DEVICE_WC] = { /* ioremap_wc */ |
215 | .prot_pte = PROT_PTE_DEVICE, | 206 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, |
216 | .prot_l1 = PMD_TYPE_TABLE, | 207 | .prot_l1 = PMD_TYPE_TABLE, |
217 | .prot_sect = PROT_SECT_DEVICE, | 208 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, |
218 | .domain = DOMAIN_IO, | 209 | .domain = DOMAIN_IO, |
219 | }, | 210 | }, |
220 | [MT_CACHECLEAN] = { | 211 | [MT_CACHECLEAN] = { |
@@ -259,7 +250,7 @@ static void __init build_mem_type_table(void) | |||
259 | { | 250 | { |
260 | struct cachepolicy *cp; | 251 | struct cachepolicy *cp; |
261 | unsigned int cr = get_cr(); | 252 | unsigned int cr = get_cr(); |
262 | unsigned int user_pgprot, kern_pgprot; | 253 | unsigned int user_pgprot, kern_pgprot, vecs_pgprot; |
263 | int cpu_arch = cpu_architecture(); | 254 | int cpu_arch = cpu_architecture(); |
264 | int i; | 255 | int i; |
265 | 256 | ||
@@ -277,6 +268,9 @@ static void __init build_mem_type_table(void) | |||
277 | cachepolicy = CPOLICY_WRITEBACK; | 268 | cachepolicy = CPOLICY_WRITEBACK; |
278 | ecc_mask = 0; | 269 | ecc_mask = 0; |
279 | } | 270 | } |
271 | #ifdef CONFIG_SMP | ||
272 | cachepolicy = CPOLICY_WRITEALLOC; | ||
273 | #endif | ||
280 | 274 | ||
281 | /* | 275 | /* |
282 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 | 276 | * On non-Xscale3 ARMv5-and-older systems, use CB=01 |
@@ -285,11 +279,8 @@ static void __init build_mem_type_table(void) | |||
285 | * in xsc3 parlance, Uncached Normal in ARMv6 parlance). | 279 | * in xsc3 parlance, Uncached Normal in ARMv6 parlance). |
286 | */ | 280 | */ |
287 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { | 281 | if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { |
288 | mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1); | ||
289 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); | 282 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); |
290 | } else { | 283 | mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; |
291 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE; | ||
292 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; | ||
293 | } | 284 | } |
294 | 285 | ||
295 | /* | 286 | /* |
@@ -312,7 +303,15 @@ static void __init build_mem_type_table(void) | |||
312 | } | 303 | } |
313 | 304 | ||
314 | cp = &cache_policies[cachepolicy]; | 305 | cp = &cache_policies[cachepolicy]; |
315 | kern_pgprot = user_pgprot = cp->pte; | 306 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
307 | |||
308 | #ifndef CONFIG_SMP | ||
309 | /* | ||
310 | * Only use write-through for non-SMP systems | ||
311 | */ | ||
312 | if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | ||
313 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | ||
314 | #endif | ||
316 | 315 | ||
317 | /* | 316 | /* |
318 | * Enable CPU-specific coherency if supported. | 317 | * Enable CPU-specific coherency if supported. |
@@ -340,7 +339,6 @@ static void __init build_mem_type_table(void) | |||
340 | /* | 339 | /* |
341 | * Mark the device area as "shared device" | 340 | * Mark the device area as "shared device" |
342 | */ | 341 | */ |
343 | mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; | ||
344 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; | 342 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; |
345 | 343 | ||
346 | #ifdef CONFIG_SMP | 344 | #ifdef CONFIG_SMP |
@@ -349,30 +347,21 @@ static void __init build_mem_type_table(void) | |||
349 | */ | 347 | */ |
350 | user_pgprot |= L_PTE_SHARED; | 348 | user_pgprot |= L_PTE_SHARED; |
351 | kern_pgprot |= L_PTE_SHARED; | 349 | kern_pgprot |= L_PTE_SHARED; |
350 | vecs_pgprot |= L_PTE_SHARED; | ||
352 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 351 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
353 | #endif | 352 | #endif |
354 | } | 353 | } |
355 | 354 | ||
356 | for (i = 0; i < 16; i++) { | 355 | for (i = 0; i < 16; i++) { |
357 | unsigned long v = pgprot_val(protection_map[i]); | 356 | unsigned long v = pgprot_val(protection_map[i]); |
358 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; | 357 | protection_map[i] = __pgprot(v | user_pgprot); |
359 | protection_map[i] = __pgprot(v); | ||
360 | } | 358 | } |
361 | 359 | ||
362 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; | 360 | mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; |
363 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; | 361 | mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; |
364 | 362 | ||
365 | if (cpu_arch >= CPU_ARCH_ARMv5) { | 363 | if (cpu_arch < CPU_ARCH_ARMv5) |
366 | #ifndef CONFIG_SMP | ||
367 | /* | ||
368 | * Only use write-through for non-SMP systems | ||
369 | */ | ||
370 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
371 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; | ||
372 | #endif | ||
373 | } else { | ||
374 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); | 364 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); |
375 | } | ||
376 | 365 | ||
377 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 366 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
378 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 367 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
@@ -420,8 +409,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
420 | 409 | ||
421 | pte = pte_offset_kernel(pmd, addr); | 410 | pte = pte_offset_kernel(pmd, addr); |
422 | do { | 411 | do { |
423 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), | 412 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); |
424 | type->prot_pte_ext); | ||
425 | pfn++; | 413 | pfn++; |
426 | } while (pte++, addr += PAGE_SIZE, addr != end); | 414 | } while (pte++, addr += PAGE_SIZE, addr != end); |
427 | } | 415 | } |
@@ -588,12 +576,35 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
588 | create_mapping(io_desc + i); | 576 | create_mapping(io_desc + i); |
589 | } | 577 | } |
590 | 578 | ||
579 | static unsigned long __initdata vmalloc_reserve = SZ_128M; | ||
580 | |||
581 | /* | ||
582 | * vmalloc=size forces the vmalloc area to be exactly 'size' | ||
583 | * bytes. This can be used to increase (or decrease) the vmalloc | ||
584 | * area - the default is 128m. | ||
585 | */ | ||
586 | static void __init early_vmalloc(char **arg) | ||
587 | { | ||
588 | vmalloc_reserve = memparse(*arg, arg); | ||
589 | |||
590 | if (vmalloc_reserve < SZ_16M) { | ||
591 | vmalloc_reserve = SZ_16M; | ||
592 | printk(KERN_WARNING | ||
593 | "vmalloc area too small, limiting to %luMB\n", | ||
594 | vmalloc_reserve >> 20); | ||
595 | } | ||
596 | } | ||
597 | __early_param("vmalloc=", early_vmalloc); | ||
598 | |||
599 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | ||
600 | |||
591 | static int __init check_membank_valid(struct membank *mb) | 601 | static int __init check_membank_valid(struct membank *mb) |
592 | { | 602 | { |
593 | /* | 603 | /* |
594 | * Check whether this memory region has non-zero size. | 604 | * Check whether this memory region has non-zero size or |
605 | * invalid node number. | ||
595 | */ | 606 | */ |
596 | if (mb->size == 0) | 607 | if (mb->size == 0 || mb->node >= MAX_NUMNODES) |
597 | return 0; | 608 | return 0; |
598 | 609 | ||
599 | /* | 610 | /* |
@@ -627,8 +638,7 @@ static int __init check_membank_valid(struct membank *mb) | |||
627 | 638 | ||
628 | static void __init sanity_check_meminfo(struct meminfo *mi) | 639 | static void __init sanity_check_meminfo(struct meminfo *mi) |
629 | { | 640 | { |
630 | int i; | 641 | int i, j; |
631 | int j; | ||
632 | 642 | ||
633 | for (i = 0, j = 0; i < mi->nr_banks; i++) { | 643 | for (i = 0, j = 0; i < mi->nr_banks; i++) { |
634 | if (check_membank_valid(&mi->bank[i])) | 644 | if (check_membank_valid(&mi->bank[i])) |