diff options
author | Kevin Cernekee <cernekee@gmail.com> | 2010-09-07 00:03:46 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2011-07-25 12:26:53 -0400 |
commit | d0023c4a0af1ff16fe183257682025bfcc068e85 (patch) | |
tree | 3773ba49525c135e13423ec5fc5df0bf80bbdd9f /arch/mips | |
parent | b6da0ffb09ad4468e6749488909f04f1efac5de3 (diff) |
MIPS: Add SYNC after cacheflush
On processors with deep write buffers, it is likely that many cycles
will pass between a CACHE instruction and the time the data actually
gets written out to DRAM. Add a SYNC instruction to ensure that the
buffers get emptied before the flush functions return.
Actual problem seen in the wild:
1) dma_alloc_coherent() allocates cached memory
2) memset() is called to clear the new pages
3) dma_cache_wback_inv() is called to flush the zero data out to memory
4) dma_alloc_coherent() returns an uncached (kseg1) pointer to the
freshly allocated pages
5) Caller writes data through the kseg1 pointer
6) Buffered writeback data finally gets flushed out to DRAM
7) Part of caller's data is inexplicably zeroed out
This patch adds SYNC between steps 3 and 4, which fixed the problem.
Signed-off-by: Kevin Cernekee <cernekee@gmail.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork:
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index eeb642e4066e..b9aabb998a32 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -604,6 +604,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
604 | r4k_blast_scache(); | 604 | r4k_blast_scache(); |
605 | else | 605 | else |
606 | blast_scache_range(addr, addr + size); | 606 | blast_scache_range(addr, addr + size); |
607 | __sync(); | ||
607 | return; | 608 | return; |
608 | } | 609 | } |
609 | 610 | ||
@@ -620,6 +621,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | |||
620 | } | 621 | } |
621 | 622 | ||
622 | bc_wback_inv(addr, size); | 623 | bc_wback_inv(addr, size); |
624 | __sync(); | ||
623 | } | 625 | } |
624 | 626 | ||
625 | static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | 627 | static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) |
@@ -647,6 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
647 | (addr + size - 1) & almask); | 649 | (addr + size - 1) & almask); |
648 | blast_inv_scache_range(addr, addr + size); | 650 | blast_inv_scache_range(addr, addr + size); |
649 | } | 651 | } |
652 | __sync(); | ||
650 | return; | 653 | return; |
651 | } | 654 | } |
652 | 655 | ||
@@ -663,6 +666,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
663 | } | 666 | } |
664 | 667 | ||
665 | bc_inv(addr, size); | 668 | bc_inv(addr, size); |
669 | __sync(); | ||
666 | } | 670 | } |
667 | #endif /* CONFIG_DMA_NONCOHERENT */ | 671 | #endif /* CONFIG_DMA_NONCOHERENT */ |
668 | 672 | ||