aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/parisc/include/asm/cacheflush.h23
-rw-r--r--arch/parisc/kernel/cache.c22
2 files changed, 24 insertions, 21 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 19c9c3c5f267..c7e15cc5c668 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
43 43
44#define flush_kernel_dcache_range(start,size) \ 44#define flush_kernel_dcache_range(start,size) \
45 flush_kernel_dcache_range_asm((start), (start)+(size)); 45 flush_kernel_dcache_range_asm((start), (start)+(size));
46/* vmap range flushes and invalidates. Architecturally, we don't need
47 * the invalidate, because the CPU should refuse to speculate once an
48 * area has been flushed, so invalidate is left empty */
49static inline void flush_kernel_vmap_range(void *vaddr, int size)
50{
51 unsigned long start = (unsigned long)vaddr;
52
53 flush_kernel_dcache_range_asm(start, start + size);
54}
55static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
56{
57 unsigned long start = (unsigned long)vaddr;
58 void *cursor = vaddr;
59 46
60 for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { 47void flush_kernel_vmap_range(void *vaddr, int size);
61 struct page *page = vmalloc_to_page(cursor); 48void invalidate_kernel_vmap_range(void *vaddr, int size);
62
63 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
64 flush_kernel_dcache_page(page);
65 }
66 flush_kernel_dcache_range_asm(start, start + size);
67}
68 49
69#define flush_cache_vmap(start, end) flush_cache_all() 50#define flush_cache_vmap(start, end) flush_cache_all()
70#define flush_cache_vunmap(start, end) flush_cache_all() 51#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 0dc72d5de861..c32a09095216 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
617 } 617 }
618} 618}
619
620void flush_kernel_vmap_range(void *vaddr, int size)
621{
622 unsigned long start = (unsigned long)vaddr;
623
624 if ((unsigned long)size > parisc_cache_flush_threshold)
625 flush_data_cache();
626 else
627 flush_kernel_dcache_range_asm(start, start + size);
628}
629EXPORT_SYMBOL(flush_kernel_vmap_range);
630
631void invalidate_kernel_vmap_range(void *vaddr, int size)
632{
633 unsigned long start = (unsigned long)vaddr;
634
635 if ((unsigned long)size > parisc_cache_flush_threshold)
636 flush_data_cache();
637 else
638 flush_kernel_dcache_range_asm(start, start + size);
639}
640EXPORT_SYMBOL(invalidate_kernel_vmap_range);