aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2012-06-09 15:48:47 -0400
committerRalf Baechle <ralf@linux-mips.org>2012-12-13 12:15:27 -0500
commita16dad7763420a3b46cff1e703a9070827796cfc (patch)
tree9eba907d769432ef0704f3eaa64416395eb5c967 /arch/mips
parent51d943f07d3015998d448f9d8353f618e3fe5873 (diff)
MIPS: Fix potencial corruption
Normally r4k_dma_cache_inv should only ever be called with cacheline aligned addresses. If however, it isn't there is the theoretical possibility of data corruption. There is no correct way of handling this and anyway, it should only happen if the DMA API is used incorrectly so drop There is a different corruption scenario with these CACHE instructions removed but again there is no way of handling this correctly and it can be triggered only through incorrect use of the DMA API. So just get rid of the complexity. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Reported-by: James Rodriguez <jamesr@juniper.net>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/mm/c-r4k.c11
1 files changed, 0 insertions, 11 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4c32ede464b5..2b6146241bde 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -632,9 +632,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
632 if (size >= scache_size) 632 if (size >= scache_size)
633 r4k_blast_scache(); 633 r4k_blast_scache();
634 else { 634 else {
635 unsigned long lsize = cpu_scache_line_size();
636 unsigned long almask = ~(lsize - 1);
637
638 /* 635 /*
639 * There is no clearly documented alignment requirement 636 * There is no clearly documented alignment requirement
640 * for the cache instruction on MIPS processors and 637 * for the cache instruction on MIPS processors and
@@ -643,9 +640,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
643 * hit ops with insufficient alignment. Solved by 640 * hit ops with insufficient alignment. Solved by
644 * aligning the address to cache line size. 641 * aligning the address to cache line size.
645 */ 642 */
646 cache_op(Hit_Writeback_Inv_SD, addr & almask);
647 cache_op(Hit_Writeback_Inv_SD,
648 (addr + size - 1) & almask);
649 blast_inv_scache_range(addr, addr + size); 643 blast_inv_scache_range(addr, addr + size);
650 } 644 }
651 __sync(); 645 __sync();
@@ -655,12 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
655 if (cpu_has_safe_index_cacheops && size >= dcache_size) { 649 if (cpu_has_safe_index_cacheops && size >= dcache_size) {
656 r4k_blast_dcache(); 650 r4k_blast_dcache();
657 } else { 651 } else {
658 unsigned long lsize = cpu_dcache_line_size();
659 unsigned long almask = ~(lsize - 1);
660
661 R4600_HIT_CACHEOP_WAR_IMPL; 652 R4600_HIT_CACHEOP_WAR_IMPL;
662 cache_op(Hit_Writeback_Inv_D, addr & almask);
663 cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
664 blast_inv_dcache_range(addr, addr + size); 653 blast_inv_dcache_range(addr, addr + size);
665 } 654 }
666 655