aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-09-27 13:26:43 -0400
committerRalf Baechle <ralf@linux-mips.org>2007-10-11 18:46:12 -0400
commitdb813fe5a77d03b29e872da47463d2efbddc3fc2 (patch)
treec97a767a484dddda9842bfad3a3f6a5b24ab66bd /arch/mips
parente58d95abb7b3232333ab35a09f7f5b0cd6a19cdb (diff)
[MIPS] Avoid indexed cacheops.
On MP configurations it's highly dubious what this code will actually affect since blasting away cachelines may or may not do the right thing wrt. cache coherency. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/mm/c-r4k.c74
1 files changed, 28 insertions, 46 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 738b89803a44..cf48371e5690 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -8,6 +8,7 @@
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/highmem.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/linkage.h> 13#include <linux/linkage.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
@@ -318,23 +319,6 @@ static void __init r4k_blast_scache_setup(void)
318 r4k_blast_scache = blast_scache128; 319 r4k_blast_scache = blast_scache128;
319} 320}
320 321
321/*
322 * This is former mm's flush_cache_all() which really should be
323 * flush_cache_vunmap these days ...
324 */
325static inline void local_r4k_flush_cache_all(void * args)
326{
327 r4k_blast_dcache();
328}
329
330static void r4k_flush_cache_all(void)
331{
332 if (!cpu_has_dc_aliases)
333 return;
334
335 r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
336}
337
338static inline void local_r4k___flush_cache_all(void * args) 322static inline void local_r4k___flush_cache_all(void * args)
339{ 323{
340#if defined(CONFIG_CPU_LOONGSON2) 324#if defined(CONFIG_CPU_LOONGSON2)
@@ -423,13 +407,14 @@ static inline void local_r4k_flush_cache_page(void *args)
423 struct flush_cache_page_args *fcp_args = args; 407 struct flush_cache_page_args *fcp_args = args;
424 struct vm_area_struct *vma = fcp_args->vma; 408 struct vm_area_struct *vma = fcp_args->vma;
425 unsigned long addr = fcp_args->addr; 409 unsigned long addr = fcp_args->addr;
426 unsigned long paddr = fcp_args->pfn << PAGE_SHIFT; 410 struct page *page = pfn_to_page(fcp_args->pfn);
427 int exec = vma->vm_flags & VM_EXEC; 411 int exec = vma->vm_flags & VM_EXEC;
428 struct mm_struct *mm = vma->vm_mm; 412 struct mm_struct *mm = vma->vm_mm;
429 pgd_t *pgdp; 413 pgd_t *pgdp;
430 pud_t *pudp; 414 pud_t *pudp;
431 pmd_t *pmdp; 415 pmd_t *pmdp;
432 pte_t *ptep; 416 pte_t *ptep;
417 void *vaddr;
433 418
434 /* 419 /*
435 * If ownes no valid ASID yet, cannot possibly have gotten 420 * If ownes no valid ASID yet, cannot possibly have gotten
@@ -451,43 +436,40 @@ static inline void local_r4k_flush_cache_page(void *args)
451 if (!(pte_val(*ptep) & _PAGE_PRESENT)) 436 if (!(pte_val(*ptep) & _PAGE_PRESENT))
452 return; 437 return;
453 438
454 /* 439 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
455 * Doing flushes for another ASID than the current one is 440 vaddr = NULL;
456 * too difficult since stupid R4k caches do a TLB translation 441 else {
457 * for every cache flush operation. So we do indexed flushes 442 /*
458 * in that case, which doesn't overly flush the cache too much. 443 * Use kmap_coherent or kmap_atomic to do flushes for
459 */ 444 * another ASID than the current one.
460 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { 445 */
461 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 446 if (cpu_has_dc_aliases)
462 r4k_blast_dcache_page(addr); 447 vaddr = kmap_coherent(page, addr);
463 if (exec && !cpu_icache_snoops_remote_store) 448 else
464 r4k_blast_scache_page(addr); 449 vaddr = kmap_atomic(page, KM_USER0);
465 } 450 addr = (unsigned long)vaddr;
466 if (exec)
467 r4k_blast_icache_page(addr);
468
469 return;
470 } 451 }
471 452
472 /*
473 * Do indexed flush, too much work to get the (possible) TLB refills
474 * to work correctly.
475 */
476 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 453 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
477 r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ? 454 r4k_blast_dcache_page(addr);
478 paddr : addr); 455 if (exec && !cpu_icache_snoops_remote_store)
479 if (exec && !cpu_icache_snoops_remote_store) { 456 r4k_blast_scache_page(addr);
480 r4k_blast_scache_page_indexed(paddr);
481 }
482 } 457 }
483 if (exec) { 458 if (exec) {
484 if (cpu_has_vtag_icache && mm == current->active_mm) { 459 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
485 int cpu = smp_processor_id(); 460 int cpu = smp_processor_id();
486 461
487 if (cpu_context(cpu, mm) != 0) 462 if (cpu_context(cpu, mm) != 0)
488 drop_mmu_context(mm, cpu); 463 drop_mmu_context(mm, cpu);
489 } else 464 } else
490 r4k_blast_icache_page_indexed(addr); 465 r4k_blast_icache_page(addr);
466 }
467
468 if (vaddr) {
469 if (cpu_has_dc_aliases)
470 kunmap_coherent();
471 else
472 kunmap_atomic(vaddr, KM_USER0);
491 } 473 }
492} 474}
493 475
@@ -1279,7 +1261,7 @@ void __init r4k_cache_init(void)
1279 PAGE_SIZE - 1); 1261 PAGE_SIZE - 1);
1280 else 1262 else
1281 shm_align_mask = PAGE_SIZE-1; 1263 shm_align_mask = PAGE_SIZE-1;
1282 flush_cache_all = r4k_flush_cache_all; 1264 flush_cache_all = cache_noop;
1283 __flush_cache_all = r4k___flush_cache_all; 1265 __flush_cache_all = r4k___flush_cache_all;
1284 flush_cache_mm = r4k_flush_cache_mm; 1266 flush_cache_mm = r4k_flush_cache_mm;
1285 flush_cache_page = r4k_flush_cache_page; 1267 flush_cache_page = r4k_flush_cache_page;