aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/cache.c
diff options
context:
space:
mode:
authorChris Dearman <chris@mips.com>2007-09-18 19:58:24 -0400
committerRalf Baechle <ralf@linux-mips.org>2008-04-28 12:14:25 -0400
commit351336929ccf222ae38ff0cb7a8dd5fd5c6236a0 (patch)
treed9e9253bd30cf011d248ad3b63761a9a725e5cb0 /arch/mips/mm/cache.c
parentbec5052743ec8ae4c5669918cf9b130bf15709a2 (diff)
[MIPS] Allow setting of the cache attribute at run time.
Slightly tacky, but there is a precedent in the sparc archirecture code. Signed-off-by: Chris Dearman <chris@mips.com> Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/cache.c')
-rw-r--r--arch/mips/mm/cache.c31
1 files changed, 23 insertions, 8 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index f5903679ee6a..034e8506f6ea 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -130,8 +130,28 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
130 } 130 }
131} 131}
132 132
133static char cache_panic[] __cpuinitdata = 133unsigned long _page_cachable_default;
134 "Yeee, unsupported cache architecture."; 134EXPORT_SYMBOL_GPL(_page_cachable_default);
135
136static inline void setup_protection_map(void)
137{
138 protection_map[0] = PAGE_NONE;
139 protection_map[1] = PAGE_READONLY;
140 protection_map[2] = PAGE_COPY;
141 protection_map[3] = PAGE_COPY;
142 protection_map[4] = PAGE_READONLY;
143 protection_map[5] = PAGE_READONLY;
144 protection_map[6] = PAGE_COPY;
145 protection_map[7] = PAGE_COPY;
146 protection_map[8] = PAGE_NONE;
147 protection_map[9] = PAGE_READONLY;
148 protection_map[10] = PAGE_SHARED;
149 protection_map[11] = PAGE_SHARED;
150 protection_map[12] = PAGE_READONLY;
151 protection_map[13] = PAGE_READONLY;
152 protection_map[14] = PAGE_SHARED;
153 protection_map[15] = PAGE_SHARED;
154}
135 155
136void __devinit cpu_cache_init(void) 156void __devinit cpu_cache_init(void)
137{ 157{
@@ -139,34 +159,29 @@ void __devinit cpu_cache_init(void)
139 extern void __weak r3k_cache_init(void); 159 extern void __weak r3k_cache_init(void);
140 160
141 r3k_cache_init(); 161 r3k_cache_init();
142 return;
143 } 162 }
144 if (cpu_has_6k_cache) { 163 if (cpu_has_6k_cache) {
145 extern void __weak r6k_cache_init(void); 164 extern void __weak r6k_cache_init(void);
146 165
147 r6k_cache_init(); 166 r6k_cache_init();
148 return;
149 } 167 }
150 if (cpu_has_4k_cache) { 168 if (cpu_has_4k_cache) {
151 extern void __weak r4k_cache_init(void); 169 extern void __weak r4k_cache_init(void);
152 170
153 r4k_cache_init(); 171 r4k_cache_init();
154 return;
155 } 172 }
156 if (cpu_has_8k_cache) { 173 if (cpu_has_8k_cache) {
157 extern void __weak r8k_cache_init(void); 174 extern void __weak r8k_cache_init(void);
158 175
159 r8k_cache_init(); 176 r8k_cache_init();
160 return;
161 } 177 }
162 if (cpu_has_tx39_cache) { 178 if (cpu_has_tx39_cache) {
163 extern void __weak tx39_cache_init(void); 179 extern void __weak tx39_cache_init(void);
164 180
165 tx39_cache_init(); 181 tx39_cache_init();
166 return;
167 } 182 }
168 183
169 panic(cache_panic); 184 setup_protection_map();
170} 185}
171 186
172int __weak __uncached_access(struct file *file, unsigned long addr) 187int __weak __uncached_access(struct file *file, unsigned long addr)