aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorSantosh Shilimkar <santosh.shilimkar@ti.com>2010-09-24 02:18:22 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-09-25 10:58:39 -0400
commitf1a2481c0ad3aebd94d11b317c488deaadc25002 (patch)
tree34835ebb681604635db2f8b44c2fa7d2d0f75933 /arch/arm/mm
parentf933b87e6f06fcce5988fd5e8ebf3a97858a7155 (diff)
ARM: 6407/1: mmu: Setup MT_MEMORY and MT_MEMORY_NONCACHED L1 entries
This patch populates the L1 entries for MT_MEMORY and MT_MEMORY_NONCACHED types so that at boot-up, we can map memories outside system memory at page level granularity Previously the mapping was limiting to section level, which creates unnecessary additional mapping for which physical memory may not present. On the newer ARM with speculation, this is dangerous and can result in untraceable aborts. Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/mmu.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a486bd0d97dc..6a3a2d0cd6db 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -247,6 +247,9 @@ static struct mem_type mem_types[] = {
247 .domain = DOMAIN_USER, 247 .domain = DOMAIN_USER,
248 }, 248 },
249 [MT_MEMORY] = { 249 [MT_MEMORY] = {
250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
251 L_PTE_USER | L_PTE_EXEC,
252 .prot_l1 = PMD_TYPE_TABLE,
250 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
251 .domain = DOMAIN_KERNEL, 254 .domain = DOMAIN_KERNEL,
252 }, 255 },
@@ -255,6 +258,9 @@ static struct mem_type mem_types[] = {
255 .domain = DOMAIN_KERNEL, 258 .domain = DOMAIN_KERNEL,
256 }, 259 },
257 [MT_MEMORY_NONCACHED] = { 260 [MT_MEMORY_NONCACHED] = {
261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
262 L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
263 .prot_l1 = PMD_TYPE_TABLE,
258 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
259 .domain = DOMAIN_KERNEL, 265 .domain = DOMAIN_KERNEL,
260 }, 266 },
@@ -412,9 +418,12 @@ static void __init build_mem_type_table(void)
412 * Enable CPU-specific coherency if supported. 418 * Enable CPU-specific coherency if supported.
413 * (Only available on XSC3 at the moment.) 419 * (Only available on XSC3 at the moment.)
414 */ 420 */
415 if (arch_is_coherent() && cpu_is_xsc3()) 421 if (arch_is_coherent() && cpu_is_xsc3()) {
416 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 422 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
417 423 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
425 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
426 }
418 /* 427 /*
419 * ARMv6 and above have extended page tables. 428 * ARMv6 and above have extended page tables.
420 */ 429 */
@@ -439,7 +448,9 @@ static void __init build_mem_type_table(void)
439 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 448 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
440 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 449 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
441 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 450 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
451 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
442 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 452 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
453 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
443#endif 454#endif
444 } 455 }
445 456
@@ -476,6 +487,8 @@ static void __init build_mem_type_table(void)
476 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 487 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
477 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 488 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
478 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 489 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
490 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
491 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
479 mem_types[MT_ROM].prot_sect |= cp->pmd; 492 mem_types[MT_ROM].prot_sect |= cp->pmd;
480 493
481 switch (cp->pmd) { 494 switch (cp->pmd) {