aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-06-02 04:29:37 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-06-02 07:52:43 -0400
commit20e7e364331d7b5590695a839a18a00547403f18 (patch)
treedf44deaa7b2a774dcde2495c3b78eb9c948f95bd /arch/arm/mm/mmu.c
parentca8f0b0a545f55b3dc6877cda24d609a8979c951 (diff)
ARM: ensure C page table setup code follows assembly code (part II)
This does the same as the previous commit, but for the S bit, which also needs to match the initial value which the assembly code used for the same reasons. Again, we add a check for SMP to ensure that the page tables are correctly setup for SMP. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c27
1 files changed, 19 insertions, 8 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 92df149c88a8..df875c457068 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -117,6 +117,8 @@ static struct cachepolicy cache_policies[] __initdata = {
117}; 117};
118 118
119#ifdef CONFIG_CPU_CP15 119#ifdef CONFIG_CPU_CP15
120static unsigned long initial_pmd_value __initdata = 0;
121
120/* 122/*
121 * Initialise the cache_policy variable with the initial state specified 123 * Initialise the cache_policy variable with the initial state specified
122 * via the "pmd" value. This is used to ensure that on ARMv6 and later, 124 * via the "pmd" value. This is used to ensure that on ARMv6 and later,
@@ -128,6 +130,8 @@ void __init init_default_cache_policy(unsigned long pmd)
128{ 130{
129 int i; 131 int i;
130 132
133 initial_pmd_value = pmd;
134
131 pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE; 135 pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
132 136
133 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) 137 for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
@@ -414,9 +418,15 @@ static void __init build_mem_type_table(void)
414 ecc_mask = 0; 418 ecc_mask = 0;
415 } 419 }
416 420
417 if (is_smp() && cachepolicy != CPOLICY_WRITEALLOC) { 421 if (is_smp()) {
418 pr_warn("Forcing write-allocate cache policy for SMP\n"); 422 if (cachepolicy != CPOLICY_WRITEALLOC) {
419 cachepolicy = CPOLICY_WRITEALLOC; 423 pr_warn("Forcing write-allocate cache policy for SMP\n");
424 cachepolicy = CPOLICY_WRITEALLOC;
425 }
426 if (!(initial_pmd_value & PMD_SECT_S)) {
427 pr_warn("Forcing shared mappings for SMP\n");
428 initial_pmd_value |= PMD_SECT_S;
429 }
420 } 430 }
421 431
422 /* 432 /*
@@ -541,11 +551,12 @@ static void __init build_mem_type_table(void)
541 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 551 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
542#endif 552#endif
543 553
544 if (is_smp()) { 554 /*
545 /* 555 * If the initial page tables were created with the S bit
546 * Mark memory with the "shared" attribute 556 * set, then we need to do the same here for the same
547 * for SMP systems 557 * reasons given in early_cachepolicy().
548 */ 558 */
559 if (initial_pmd_value & PMD_SECT_S) {
549 user_pgprot |= L_PTE_SHARED; 560 user_pgprot |= L_PTE_SHARED;
550 kern_pgprot |= L_PTE_SHARED; 561 kern_pgprot |= L_PTE_SHARED;
551 vecs_pgprot |= L_PTE_SHARED; 562 vecs_pgprot |= L_PTE_SHARED;