aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa/mm/cache.c
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2014-07-21 10:54:11 -0400
committerMax Filippov <jcmvbkbc@gmail.com>2014-08-14 03:59:20 -0400
commita91902db2990909ea5e6b110811b448f2e8f1571 (patch)
tree133c118a40292c1c4480acaaa1df1add1b70eaf0 /arch/xtensa/mm/cache.c
parent7128039fe2dd3d59da9e4ffa036f3aaa3ba87b9f (diff)
xtensa: implement clear_user_highpage and copy_user_highpage
Existing clear_user_page and copy_user_page cannot be used with highmem because they calculate physical page address from its virtual address and do it incorrectly in case of high memory page mapped with kmap_atomic. Also kmap is not needed, as most likely userspace mapping color would be different from the kmapped color. Provide clear_user_highpage and copy_user_highpage functions that determine if temporary mapping is needed for the pages. Move most of the logic of the former clear_user_page and copy_user_page to xtensa/mm/cache.c only leaving temporary mapping setup, invalidation and clearing/copying in the xtensa/mm/misc.S. Rename these functions to clear_page_alias and copy_page_alias. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'arch/xtensa/mm/cache.c')
-rw-r--r--arch/xtensa/mm/cache.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 63cbb867dadd..96aea6624318 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -63,6 +63,69 @@
63#error "HIGHMEM is not supported on cores with aliasing cache." 63#error "HIGHMEM is not supported on cores with aliasing cache."
64#endif 64#endif
65 65
66#if (DCACHE_WAY_SIZE > PAGE_SIZE)
67static inline void kmap_invalidate_coherent(struct page *page,
68 unsigned long vaddr)
69{
70 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
71 unsigned long kvaddr;
72
73 if (!PageHighMem(page)) {
74 kvaddr = (unsigned long)page_to_virt(page);
75
76 __invalidate_dcache_page(kvaddr);
77 } else {
78 kvaddr = TLBTEMP_BASE_1 +
79 (page_to_phys(page) & DCACHE_ALIAS_MASK);
80
81 __invalidate_dcache_page_alias(kvaddr,
82 page_to_phys(page));
83 }
84 }
85}
86
87static inline void *coherent_kvaddr(struct page *page, unsigned long base,
88 unsigned long vaddr, unsigned long *paddr)
89{
90 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
91 *paddr = page_to_phys(page);
92 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
93 } else {
94 *paddr = 0;
95 return page_to_virt(page);
96 }
97}
98
99void clear_user_highpage(struct page *page, unsigned long vaddr)
100{
101 unsigned long paddr;
102 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
103
104 pagefault_disable();
105 kmap_invalidate_coherent(page, vaddr);
106 set_bit(PG_arch_1, &page->flags);
107 clear_page_alias(kvaddr, paddr);
108 pagefault_enable();
109}
110
111void copy_user_highpage(struct page *dst, struct page *src,
112 unsigned long vaddr, struct vm_area_struct *vma)
113{
114 unsigned long dst_paddr, src_paddr;
115 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
116 &dst_paddr);
117 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
118 &src_paddr);
119
120 pagefault_disable();
121 kmap_invalidate_coherent(dst, vaddr);
122 set_bit(PG_arch_1, &dst->flags);
123 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
124 pagefault_enable();
125}
126
127#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
128
66#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 129#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
67 130
68/* 131/*