aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-05-27 15:34:28 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-06-02 04:23:54 -0400
commitca8f0b0a545f55b3dc6877cda24d609a8979c951 (patch)
tree6f6800bbf78f1b81a1d6bb73d84f380e08113c17
parent8229c54fa1747765dae1a77875b04e4d69f6ab62 (diff)
ARM: ensure C page table setup code follows assembly code
Fix a long standing bug where, for ARMv6+, we don't fully ensure that the C code sets the same cache policy as the assembly code. This was introduced partially by commit 11179d8ca28d ([ARM] 4497/1: Only allow safe cache configurations on ARMv6 and later) and also by adding SMP support. This patch sets the default cache policy based on the flags used by the assembly code, and then ensures that when a cache policy command line argument is used, we verify that on ARMv6, it matches the initial setup. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/kernel/setup.c5
-rw-r--r--arch/arm/mm/mmu.c63
2 files changed, 51 insertions, 17 deletions
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index df21f9f98945..f5120ca08671 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -72,6 +72,7 @@ static int __init fpe_setup(char *line)
72__setup("fpe=", fpe_setup); 72__setup("fpe=", fpe_setup);
73#endif 73#endif
74 74
75extern void init_default_cache_policy(unsigned long);
75extern void paging_init(const struct machine_desc *desc); 76extern void paging_init(const struct machine_desc *desc);
76extern void early_paging_init(const struct machine_desc *, 77extern void early_paging_init(const struct machine_desc *,
77 struct proc_info_list *); 78 struct proc_info_list *);
@@ -603,7 +604,9 @@ static void __init setup_processor(void)
603#ifndef CONFIG_ARM_THUMB 604#ifndef CONFIG_ARM_THUMB
604 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); 605 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
605#endif 606#endif
606 607#ifdef CONFIG_MMU
608 init_default_cache_policy(list->__cpu_mm_mmu_flags);
609#endif
607 erratum_a15_798181_init(); 610 erratum_a15_798181_init();
608 611
609 feat_v6_fixup(); 612 feat_v6_fixup();
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9c8fec02c274..92df149c88a8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -118,27 +118,49 @@ static struct cachepolicy cache_policies[] __initdata = {
118 118
119#ifdef CONFIG_CPU_CP15 119#ifdef CONFIG_CPU_CP15
120/* 120/*
121 * These are useful for identifying cache coherency 121 * Initialise the cache_policy variable with the initial state specified
122 * problems by allowing the cache or the cache and 122 * via the "pmd" value. This is used to ensure that on ARMv6 and later,
123 * writebuffer to be turned off. (Note: the write 123 * the C code sets the page tables up with the same policy as the head
124 * buffer should not be on and the cache off). 124 * assembly code, which avoids an illegal state where the TLBs can get
125 * confused. See comments in early_cachepolicy() for more information.
125 */ 126 */
126static int __init early_cachepolicy(char *p) 127void __init init_default_cache_policy(unsigned long pmd)
127{ 128{
128 unsigned long cr = get_cr();
129 int i; 129 int i;
130 130
131 pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
132
133 for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
134 if (cache_policies[i].pmd == pmd) {
135 cachepolicy = i;
136 break;
137 }
138
139 if (i == ARRAY_SIZE(cache_policies))
140 pr_err("ERROR: could not find cache policy\n");
141}
142
143/*
144 * These are useful for identifying cache coherency problems by allowing
145 * the cache or the cache and writebuffer to be turned off. (Note: the
146 * write buffer should not be on and the cache off).
147 */
148static int __init early_cachepolicy(char *p)
149{
150 int i, selected = -1;
151
131 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 152 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
132 int len = strlen(cache_policies[i].policy); 153 int len = strlen(cache_policies[i].policy);
133 154
134 if (memcmp(p, cache_policies[i].policy, len) == 0) { 155 if (memcmp(p, cache_policies[i].policy, len) == 0) {
135 cachepolicy = i; 156 selected = i;
136 cr = __clear_cr(cache_policies[i].cr_mask);
137 break; 157 break;
138 } 158 }
139 } 159 }
140 if (i == ARRAY_SIZE(cache_policies)) 160
141 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); 161 if (selected == -1)
162 pr_err("ERROR: unknown or unsupported cache policy\n");
163
142 /* 164 /*
143 * This restriction is partly to do with the way we boot; it is 165 * This restriction is partly to do with the way we boot; it is
144 * unpredictable to have memory mapped using two different sets of 166 * unpredictable to have memory mapped using two different sets of
@@ -146,12 +168,18 @@ static int __init early_cachepolicy(char *p)
146 * change these attributes once the initial assembly has setup the 168 * change these attributes once the initial assembly has setup the
147 * page tables. 169 * page tables.
148 */ 170 */
149 if (cpu_architecture() >= CPU_ARCH_ARMv6) { 171 if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
150 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); 172 pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
151 cachepolicy = CPOLICY_WRITEBACK; 173 cache_policies[cachepolicy].policy);
174 return 0;
175 }
176
177 if (selected != cachepolicy) {
178 unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
179 cachepolicy = selected;
180 flush_cache_all();
181 set_cr(cr);
152 } 182 }
153 flush_cache_all();
154 set_cr(cr);
155 return 0; 183 return 0;
156} 184}
157early_param("cachepolicy", early_cachepolicy); 185early_param("cachepolicy", early_cachepolicy);
@@ -385,8 +413,11 @@ static void __init build_mem_type_table(void)
385 cachepolicy = CPOLICY_WRITEBACK; 413 cachepolicy = CPOLICY_WRITEBACK;
386 ecc_mask = 0; 414 ecc_mask = 0;
387 } 415 }
388 if (is_smp()) 416
417 if (is_smp() && cachepolicy != CPOLICY_WRITEALLOC) {
418 pr_warn("Forcing write-allocate cache policy for SMP\n");
389 cachepolicy = CPOLICY_WRITEALLOC; 419 cachepolicy = CPOLICY_WRITEALLOC;
420 }
390 421
391 /* 422 /*
392 * Strip out features not present on earlier architectures. 423 * Strip out features not present on earlier architectures.