summaryrefslogtreecommitdiffstats
path: root/arch/nds32/include
diff options
context:
space:
mode:
authorGreentime Hu <greentime@andestech.com>2018-06-28 06:03:25 -0400
committerGreentime Hu <greentime@andestech.com>2018-07-02 23:11:56 -0400
commitf706abf188a82c9d961ed267a18ff5cb5e9aace9 (patch)
tree28db7e7a06825befed2c0c3b903f6fefa1818ede /arch/nds32/include
parenta78945c357f58665d6a5da8a69e085898e831c70 (diff)
nds32: To implement these icache invalidation APIs since nds32 cores don't snoop
data cache. This issue is found by Guo Ren. Based on the Documentation/core-api/cachetlb.rst and it says: "Any necessary cache flushing or other coherency operations that need to occur should happen here. If the processor's instruction cache does not snoop cpu stores, it is very likely that you will need to flush the instruction cache for copy_to_user_page()." "If the icache does not snoop stores then this routine(flush_icache_range) will need to flush it." Signed-off-by: Guo Ren <ren_guo@c-sky.com> Signed-off-by: Greentime Hu <greentime@andestech.com>
Diffstat (limited to 'arch/nds32/include')
-rw-r--r--arch/nds32/include/asm/cacheflush.h9
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 10b48f0d8e85..8b26198d51bb 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -8,6 +8,8 @@
8 8
9#define PG_dcache_dirty PG_arch_1 9#define PG_dcache_dirty PG_arch_1
10 10
11void flush_icache_range(unsigned long start, unsigned long end);
12void flush_icache_page(struct vm_area_struct *vma, struct page *page);
11#ifdef CONFIG_CPU_CACHE_ALIASING 13#ifdef CONFIG_CPU_CACHE_ALIASING
12void flush_cache_mm(struct mm_struct *mm); 14void flush_cache_mm(struct mm_struct *mm);
13void flush_cache_dup_mm(struct mm_struct *mm); 15void flush_cache_dup_mm(struct mm_struct *mm);
@@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma,
34void flush_kernel_dcache_page(struct page *page); 36void flush_kernel_dcache_page(struct page *page);
35void flush_kernel_vmap_range(void *addr, int size); 37void flush_kernel_vmap_range(void *addr, int size);
36void invalidate_kernel_vmap_range(void *addr, int size); 38void invalidate_kernel_vmap_range(void *addr, int size);
37void flush_icache_range(unsigned long start, unsigned long end);
38void flush_icache_page(struct vm_area_struct *vma, struct page *page);
39#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) 39#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
40#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) 40#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
41 41
42#else 42#else
43#include <asm-generic/cacheflush.h> 43#include <asm-generic/cacheflush.h>
44#undef flush_icache_range
45#undef flush_icache_page
46#undef flush_icache_user_range
47void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
48 unsigned long addr, int len);
44#endif 49#endif
45 50
46#endif /* __NDS32_CACHEFLUSH_H__ */ 51#endif /* __NDS32_CACHEFLUSH_H__ */