aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/mm/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/mm/cache.c')
-rw-r--r--arch/xtensa/mm/cache.c77
1 files changed, 68 insertions, 9 deletions
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 63cbb867dadd..d75aa1476da7 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -59,9 +59,68 @@
59 * 59 *
60 */ 60 */
61 61
62#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) 62#if (DCACHE_WAY_SIZE > PAGE_SIZE)
63#error "HIGHMEM is not supported on cores with aliasing cache." 63static inline void kmap_invalidate_coherent(struct page *page,
64#endif 64 unsigned long vaddr)
65{
66 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
67 unsigned long kvaddr;
68
69 if (!PageHighMem(page)) {
70 kvaddr = (unsigned long)page_to_virt(page);
71
72 __invalidate_dcache_page(kvaddr);
73 } else {
74 kvaddr = TLBTEMP_BASE_1 +
75 (page_to_phys(page) & DCACHE_ALIAS_MASK);
76
77 __invalidate_dcache_page_alias(kvaddr,
78 page_to_phys(page));
79 }
80 }
81}
82
83static inline void *coherent_kvaddr(struct page *page, unsigned long base,
84 unsigned long vaddr, unsigned long *paddr)
85{
86 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
87 *paddr = page_to_phys(page);
88 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
89 } else {
90 *paddr = 0;
91 return page_to_virt(page);
92 }
93}
94
95void clear_user_highpage(struct page *page, unsigned long vaddr)
96{
97 unsigned long paddr;
98 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
99
100 pagefault_disable();
101 kmap_invalidate_coherent(page, vaddr);
102 set_bit(PG_arch_1, &page->flags);
103 clear_page_alias(kvaddr, paddr);
104 pagefault_enable();
105}
106
107void copy_user_highpage(struct page *dst, struct page *src,
108 unsigned long vaddr, struct vm_area_struct *vma)
109{
110 unsigned long dst_paddr, src_paddr;
111 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
112 &dst_paddr);
113 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
114 &src_paddr);
115
116 pagefault_disable();
117 kmap_invalidate_coherent(dst, vaddr);
118 set_bit(PG_arch_1, &dst->flags);
119 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
120 pagefault_enable();
121}
122
123#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
65 124
66#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 125#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
67 126
@@ -103,7 +162,8 @@ void flush_dcache_page(struct page *page)
103 if (!alias && !mapping) 162 if (!alias && !mapping)
104 return; 163 return;
105 164
106 __flush_invalidate_dcache_page((long)page_address(page)); 165 virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
166 __flush_invalidate_dcache_page_alias(virt, phys);
107 167
108 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); 168 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
109 169
@@ -168,13 +228,12 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
168#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 228#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
169 229
170 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { 230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
171
172 unsigned long paddr = (unsigned long) page_address(page);
173 unsigned long phys = page_to_phys(page); 231 unsigned long phys = page_to_phys(page);
174 unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); 232 unsigned long tmp;
175
176 __flush_invalidate_dcache_page(paddr);
177 233
234 tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
235 __flush_invalidate_dcache_page_alias(tmp, phys);
236 tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
178 __flush_invalidate_dcache_page_alias(tmp, phys); 237 __flush_invalidate_dcache_page_alias(tmp, phys);
179 __invalidate_icache_page_alias(tmp, phys); 238 __invalidate_icache_page_alias(tmp, phys);
180 239