aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2017-03-11 18:03:34 -0500
committerHelge Deller <deller@gmx.de>2017-03-15 15:57:33 -0400
commit316ec0624f951166daedbe446988ef92ae72b59f (patch)
treea8244e961645a0be283a7a451e32756a507ac01d
parent5f655322b1ba4bd46e26e307d04098f9c84df764 (diff)
parisc: Optimize flush_kernel_vmap_range and invalidate_kernel_vmap_range
The previously submitted patch did not resolve the random segmentation faults observed on the phantom buildd system. There are still unresolved problems with the Debian 4.8 and 4.9 kernels on C8000. The attached patch removes the flush of the offset map pages and does a whole data cache flush for large ranges. No other arch flushes the offset map in these routines as far as I can tell. I have not observed any random segmentation faults on rp3440 in two weeks of testing with 4.10.0 and 4.10.1. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: stable@vger.kernel.org # v4.8+ Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/include/asm/cacheflush.h23
-rw-r--r--arch/parisc/kernel/cache.c22
2 files changed, 24 insertions, 21 deletions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 19c9c3c5f267..c7e15cc5c668 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
43 43
44#define flush_kernel_dcache_range(start,size) \ 44#define flush_kernel_dcache_range(start,size) \
45 flush_kernel_dcache_range_asm((start), (start)+(size)); 45 flush_kernel_dcache_range_asm((start), (start)+(size));
46/* vmap range flushes and invalidates. Architecturally, we don't need
47 * the invalidate, because the CPU should refuse to speculate once an
48 * area has been flushed, so invalidate is left empty */
49static inline void flush_kernel_vmap_range(void *vaddr, int size)
50{
51 unsigned long start = (unsigned long)vaddr;
52
53 flush_kernel_dcache_range_asm(start, start + size);
54}
55static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
56{
57 unsigned long start = (unsigned long)vaddr;
58 void *cursor = vaddr;
59 46
60 for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { 47void flush_kernel_vmap_range(void *vaddr, int size);
61 struct page *page = vmalloc_to_page(cursor); 48void invalidate_kernel_vmap_range(void *vaddr, int size);
62
63 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
64 flush_kernel_dcache_page(page);
65 }
66 flush_kernel_dcache_range_asm(start, start + size);
67}
68 49
69#define flush_cache_vmap(start, end) flush_cache_all() 50#define flush_cache_vmap(start, end) flush_cache_all()
70#define flush_cache_vunmap(start, end) flush_cache_all() 51#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 0dc72d5de861..c32a09095216 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
617 } 617 }
618} 618}
619
620void flush_kernel_vmap_range(void *vaddr, int size)
621{
622 unsigned long start = (unsigned long)vaddr;
623
624 if ((unsigned long)size > parisc_cache_flush_threshold)
625 flush_data_cache();
626 else
627 flush_kernel_dcache_range_asm(start, start + size);
628}
629EXPORT_SYMBOL(flush_kernel_vmap_range);
630
631void invalidate_kernel_vmap_range(void *vaddr, int size)
632{
633 unsigned long start = (unsigned long)vaddr;
634
635 if ((unsigned long)size > parisc_cache_flush_threshold)
636 flush_data_cache();
637 else
638 flush_kernel_dcache_range_asm(start, start + size);
639}
640EXPORT_SYMBOL(invalidate_kernel_vmap_range);