aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/mm/c-r4k.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 6e99665ae860..c43f4b26a690 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
618 if (cpu_has_inclusive_pcaches) { 618 if (cpu_has_inclusive_pcaches) {
619 if (size >= scache_size) 619 if (size >= scache_size)
620 r4k_blast_scache(); 620 r4k_blast_scache();
621 else 621 else {
622 unsigned long lsize = cpu_scache_line_size();
623 unsigned long almask = ~(lsize - 1);
624
625 /*
626 * There is no clearly documented alignment requirement
627 * for the cache instruction on MIPS processors and
628 * some processors, among them the RM5200 and RM7000
629 * QED processors will throw an address error for cache
630 * hit ops with insufficient alignment. Solved by
631 * aligning the address to cache line size.
632 */
633 cache_op(Hit_Writeback_Inv_SD, addr & almask);
634 cache_op(Hit_Writeback_Inv_SD,
635 (addr + size - 1) & almask);
622 blast_inv_scache_range(addr, addr + size); 636 blast_inv_scache_range(addr, addr + size);
637 }
623 return; 638 return;
624 } 639 }
625 640
626 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 641 if (cpu_has_safe_index_cacheops && size >= dcache_size) {
627 r4k_blast_dcache(); 642 r4k_blast_dcache();
628 } else { 643 } else {
644 unsigned long lsize = cpu_dcache_line_size();
645 unsigned long almask = ~(lsize - 1);
646
629 R4600_HIT_CACHEOP_WAR_IMPL; 647 R4600_HIT_CACHEOP_WAR_IMPL;
648 cache_op(Hit_Writeback_Inv_D, addr & almask);
649 cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
630 blast_inv_dcache_range(addr, addr + size); 650 blast_inv_dcache_range(addr, addr + size);
631 } 651 }
632 652