diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/mips/mm/c-octeon.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'arch/mips/mm/c-octeon.c')
-rw-r--r-- | arch/mips/mm/c-octeon.c | 96 |
1 files changed, 33 insertions, 63 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 6ec04daf423..16c4d256b76 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c | |||
@@ -5,7 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2005-2007 Cavium Networks | 6 | * Copyright (C) 2005-2007 Cavium Networks |
7 | */ | 7 | */ |
8 | #include <linux/export.h> | ||
9 | #include <linux/init.h> | 8 | #include <linux/init.h> |
10 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
11 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
@@ -22,14 +21,13 @@ | |||
22 | #include <asm/page.h> | 21 | #include <asm/page.h> |
23 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
24 | #include <asm/r4kcache.h> | 23 | #include <asm/r4kcache.h> |
25 | #include <asm/traps.h> | 24 | #include <asm/system.h> |
26 | #include <asm/mmu_context.h> | 25 | #include <asm/mmu_context.h> |
27 | #include <asm/war.h> | 26 | #include <asm/war.h> |
28 | 27 | ||
29 | #include <asm/octeon/octeon.h> | 28 | #include <asm/octeon/octeon.h> |
30 | 29 | ||
31 | unsigned long long cache_err_dcache[NR_CPUS]; | 30 | unsigned long long cache_err_dcache[NR_CPUS]; |
32 | EXPORT_SYMBOL_GPL(cache_err_dcache); | ||
33 | 31 | ||
34 | /** | 32 | /** |
35 | * Octeon automatically flushes the dcache on tlb changes, so | 33 | * Octeon automatically flushes the dcache on tlb changes, so |
@@ -83,9 +81,9 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) | |||
83 | if (vma) | 81 | if (vma) |
84 | mask = *mm_cpumask(vma->vm_mm); | 82 | mask = *mm_cpumask(vma->vm_mm); |
85 | else | 83 | else |
86 | mask = *cpu_online_mask; | 84 | mask = cpu_online_map; |
87 | cpumask_clear_cpu(cpu, &mask); | 85 | cpu_clear(cpu, mask); |
88 | for_each_cpu(cpu, &mask) | 86 | for_each_cpu_mask(cpu, mask) |
89 | octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); | 87 | octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH); |
90 | 88 | ||
91 | preempt_enable(); | 89 | preempt_enable(); |
@@ -171,10 +169,6 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma, | |||
171 | octeon_flush_icache_all_cores(vma); | 169 | octeon_flush_icache_all_cores(vma); |
172 | } | 170 | } |
173 | 171 | ||
174 | static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) | ||
175 | { | ||
176 | BUG(); | ||
177 | } | ||
178 | 172 | ||
179 | /** | 173 | /** |
180 | * Probe Octeon's caches | 174 | * Probe Octeon's caches |
@@ -225,7 +219,7 @@ static void __cpuinit probe_octeon(void) | |||
225 | break; | 219 | break; |
226 | 220 | ||
227 | default: | 221 | default: |
228 | panic("Unsupported Cavium Networks CPU type"); | 222 | panic("Unsupported Cavium Networks CPU type\n"); |
229 | break; | 223 | break; |
230 | } | 224 | } |
231 | 225 | ||
@@ -251,11 +245,6 @@ static void __cpuinit probe_octeon(void) | |||
251 | } | 245 | } |
252 | } | 246 | } |
253 | 247 | ||
254 | static void __cpuinit octeon_cache_error_setup(void) | ||
255 | { | ||
256 | extern char except_vec2_octeon; | ||
257 | set_handler(0x100, &except_vec2_octeon, 0x80); | ||
258 | } | ||
259 | 248 | ||
260 | /** | 249 | /** |
261 | * Setup the Octeon cache flush routines | 250 | * Setup the Octeon cache flush routines |
@@ -263,6 +252,12 @@ static void __cpuinit octeon_cache_error_setup(void) | |||
263 | */ | 252 | */ |
264 | void __cpuinit octeon_cache_init(void) | 253 | void __cpuinit octeon_cache_init(void) |
265 | { | 254 | { |
255 | extern unsigned long ebase; | ||
256 | extern char except_vec2_octeon; | ||
257 | |||
258 | memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80); | ||
259 | octeon_flush_cache_sigtramp(ebase + 0x100); | ||
260 | |||
266 | probe_octeon(); | 261 | probe_octeon(); |
267 | 262 | ||
268 | shm_align_mask = PAGE_SIZE - 1; | 263 | shm_align_mask = PAGE_SIZE - 1; |
@@ -278,67 +273,43 @@ void __cpuinit octeon_cache_init(void) | |||
278 | flush_icache_range = octeon_flush_icache_range; | 273 | flush_icache_range = octeon_flush_icache_range; |
279 | local_flush_icache_range = local_octeon_flush_icache_range; | 274 | local_flush_icache_range = local_octeon_flush_icache_range; |
280 | 275 | ||
281 | __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range; | ||
282 | |||
283 | build_clear_page(); | 276 | build_clear_page(); |
284 | build_copy_page(); | 277 | build_copy_page(); |
285 | |||
286 | board_cache_error_setup = octeon_cache_error_setup; | ||
287 | } | 278 | } |
288 | 279 | ||
289 | /* | 280 | /** |
290 | * Handle a cache error exception | 281 | * Handle a cache error exception |
291 | */ | 282 | */ |
292 | static RAW_NOTIFIER_HEAD(co_cache_error_chain); | ||
293 | |||
294 | int register_co_cache_error_notifier(struct notifier_block *nb) | ||
295 | { | ||
296 | return raw_notifier_chain_register(&co_cache_error_chain, nb); | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(register_co_cache_error_notifier); | ||
299 | 283 | ||
300 | int unregister_co_cache_error_notifier(struct notifier_block *nb) | 284 | static void cache_parity_error_octeon(int non_recoverable) |
301 | { | 285 | { |
302 | return raw_notifier_chain_unregister(&co_cache_error_chain, nb); | 286 | unsigned long coreid = cvmx_get_core_num(); |
303 | } | 287 | uint64_t icache_err = read_octeon_c0_icacheerr(); |
304 | EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier); | 288 | |
305 | 289 | pr_err("Cache error exception:\n"); | |
306 | static void co_cache_error_call_notifiers(unsigned long val) | 290 | pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); |
307 | { | 291 | if (icache_err & 1) { |
308 | int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL); | 292 | pr_err("CacheErr (Icache) == %llx\n", |
309 | if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) { | 293 | (unsigned long long)icache_err); |
310 | u64 dcache_err; | 294 | write_octeon_c0_icacheerr(0); |
311 | unsigned long coreid = cvmx_get_core_num(); | 295 | } |
312 | u64 icache_err = read_octeon_c0_icacheerr(); | 296 | if (cache_err_dcache[coreid] & 1) { |
313 | 297 | pr_err("CacheErr (Dcache) == %llx\n", | |
314 | if (val) { | 298 | (unsigned long long)cache_err_dcache[coreid]); |
315 | dcache_err = cache_err_dcache[coreid]; | 299 | cache_err_dcache[coreid] = 0; |
316 | cache_err_dcache[coreid] = 0; | ||
317 | } else { | ||
318 | dcache_err = read_octeon_c0_dcacheerr(); | ||
319 | } | ||
320 | |||
321 | pr_err("Core%lu: Cache error exception:\n", coreid); | ||
322 | pr_err("cp0_errorepc == %lx\n", read_c0_errorepc()); | ||
323 | if (icache_err & 1) { | ||
324 | pr_err("CacheErr (Icache) == %llx\n", | ||
325 | (unsigned long long)icache_err); | ||
326 | write_octeon_c0_icacheerr(0); | ||
327 | } | ||
328 | if (dcache_err & 1) { | ||
329 | pr_err("CacheErr (Dcache) == %llx\n", | ||
330 | (unsigned long long)dcache_err); | ||
331 | } | ||
332 | } | 300 | } |
301 | |||
302 | if (non_recoverable) | ||
303 | panic("Can't handle cache error: nested exception"); | ||
333 | } | 304 | } |
334 | 305 | ||
335 | /* | 306 | /** |
336 | * Called when the the exception is recoverable | 307 | * Called when the the exception is recoverable |
337 | */ | 308 | */ |
338 | 309 | ||
339 | asmlinkage void cache_parity_error_octeon_recoverable(void) | 310 | asmlinkage void cache_parity_error_octeon_recoverable(void) |
340 | { | 311 | { |
341 | co_cache_error_call_notifiers(0); | 312 | cache_parity_error_octeon(0); |
342 | } | 313 | } |
343 | 314 | ||
344 | /** | 315 | /** |
@@ -347,6 +318,5 @@ asmlinkage void cache_parity_error_octeon_recoverable(void) | |||
347 | 318 | ||
348 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) | 319 | asmlinkage void cache_parity_error_octeon_non_recoverable(void) |
349 | { | 320 | { |
350 | co_cache_error_call_notifiers(1); | 321 | cache_parity_error_octeon(1); |
351 | panic("Can't handle cache error: nested exception"); | ||
352 | } | 322 | } |