diff options
author | Mark Rutland <mark.rutland@arm.com> | 2015-01-08 12:07:47 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2015-01-13 17:50:47 -0500 |
commit | 26a945caf381225c9a1e68f14826a884c08ea9cb (patch) | |
tree | e8202e932270cd5149d3a18c9b4d41d874333866 /arch | |
parent | c26a535b747a56298000c42cdd669514456dfc2d (diff) |
arm64: remove broken cachepolicy code
The cachepolicy kernel parameter was intended to aid in the debugging of
coherency issues, but it is fundamentally broken for several reasons:
* On SMP platforms, only the boot CPU's tcr_el1 is altered. Secondary
CPUs may therefore use differ w.r.t. the attributes they apply to
MT_NORMAL memory, resulting in a loss of coherency.
* The cache maintenance using flush_dcache_all (based on Set/Way
operations) is not guaranteed to empty a given CPU's cache hierarchy
while said CPU has caches enabled, it cannot empty the caches of
other coherent PEs, nor is it guaranteed to flush data to the PoC
even when caches are disabled.
* The TLBs are not invalidated around the modification of MAIR_EL1 and
TCR_EL1, as required by the architecture (as both are permitted to be
cached in a TLB). This may result in CPUs using attributes other than
those expected for some memory accesses, resulting in a loss of
coherency.
* Exclusive accesses are not architecturally guaranteed to function as
expected on memory marked as Write-Through or Non-Cacheable. Thus
changing the attributes of MT_NORMAL away from the (architecurally
safe) defaults may cause uses of these instructions (e.g. atomics) to
behave erratically.
Given this, the cachepolicy code cannot be used for debugging purposes
as it alone is likely to cause coherency issues. This patch removes the
broken cachepolicy code.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/mm/mmu.c | 74 |
1 files changed, 0 insertions, 74 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 328638548871..e57c170a91f3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -45,80 +45,6 @@ | |||
45 | struct page *empty_zero_page; | 45 | struct page *empty_zero_page; |
46 | EXPORT_SYMBOL(empty_zero_page); | 46 | EXPORT_SYMBOL(empty_zero_page); |
47 | 47 | ||
48 | struct cachepolicy { | ||
49 | const char policy[16]; | ||
50 | u64 mair; | ||
51 | u64 tcr; | ||
52 | }; | ||
53 | |||
54 | static struct cachepolicy cache_policies[] __initdata = { | ||
55 | { | ||
56 | .policy = "uncached", | ||
57 | .mair = 0x44, /* inner, outer non-cacheable */ | ||
58 | .tcr = TCR_IRGN_NC | TCR_ORGN_NC, | ||
59 | }, { | ||
60 | .policy = "writethrough", | ||
61 | .mair = 0xaa, /* inner, outer write-through, read-allocate */ | ||
62 | .tcr = TCR_IRGN_WT | TCR_ORGN_WT, | ||
63 | }, { | ||
64 | .policy = "writeback", | ||
65 | .mair = 0xee, /* inner, outer write-back, read-allocate */ | ||
66 | .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA, | ||
67 | } | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * These are useful for identifying cache coherency problems by allowing the | ||
72 | * cache or the cache and writebuffer to be turned off. It changes the Normal | ||
73 | * memory caching attributes in the MAIR_EL1 register. | ||
74 | */ | ||
75 | static int __init early_cachepolicy(char *p) | ||
76 | { | ||
77 | int i; | ||
78 | u64 tmp; | ||
79 | |||
80 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | ||
81 | int len = strlen(cache_policies[i].policy); | ||
82 | |||
83 | if (memcmp(p, cache_policies[i].policy, len) == 0) | ||
84 | break; | ||
85 | } | ||
86 | if (i == ARRAY_SIZE(cache_policies)) { | ||
87 | pr_err("ERROR: unknown or unsupported cache policy: %s\n", p); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | flush_cache_all(); | ||
92 | |||
93 | /* | ||
94 | * Modify MT_NORMAL attributes in MAIR_EL1. | ||
95 | */ | ||
96 | asm volatile( | ||
97 | " mrs %0, mair_el1\n" | ||
98 | " bfi %0, %1, %2, #8\n" | ||
99 | " msr mair_el1, %0\n" | ||
100 | " isb\n" | ||
101 | : "=&r" (tmp) | ||
102 | : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8)); | ||
103 | |||
104 | /* | ||
105 | * Modify TCR PTW cacheability attributes. | ||
106 | */ | ||
107 | asm volatile( | ||
108 | " mrs %0, tcr_el1\n" | ||
109 | " bic %0, %0, %2\n" | ||
110 | " orr %0, %0, %1\n" | ||
111 | " msr tcr_el1, %0\n" | ||
112 | " isb\n" | ||
113 | : "=&r" (tmp) | ||
114 | : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK)); | ||
115 | |||
116 | flush_cache_all(); | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | early_param("cachepolicy", early_cachepolicy); | ||
121 | |||
122 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 48 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
123 | unsigned long size, pgprot_t vma_prot) | 49 | unsigned long size, pgprot_t vma_prot) |
124 | { | 50 | { |