aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/cache-sh4.c4
-rw-r--r--arch/sh/mm/cache.c12
2 files changed, 8 insertions, 8 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 92eb98633ab0..112fea12522a 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -244,7 +244,7 @@ static void sh4_flush_cache_page(void *args)
244 if (map_coherent) 244 if (map_coherent)
245 vaddr = kmap_coherent(page, address); 245 vaddr = kmap_coherent(page, address);
246 else 246 else
247 vaddr = kmap_atomic(page, KM_USER0); 247 vaddr = kmap_atomic(page);
248 248
249 address = (unsigned long)vaddr; 249 address = (unsigned long)vaddr;
250 } 250 }
@@ -259,7 +259,7 @@ static void sh4_flush_cache_page(void *args)
259 if (map_coherent) 259 if (map_coherent)
260 kunmap_coherent(vaddr); 260 kunmap_coherent(vaddr);
261 else 261 else
262 kunmap_atomic(vaddr, KM_USER0); 262 kunmap_atomic(vaddr);
263 } 263 }
264} 264}
265 265
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 5a580ea04429..616966a96cba 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -95,7 +95,7 @@ void copy_user_highpage(struct page *to, struct page *from,
95{ 95{
96 void *vfrom, *vto; 96 void *vfrom, *vto;
97 97
98 vto = kmap_atomic(to, KM_USER1); 98 vto = kmap_atomic(to);
99 99
100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && 100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101 test_bit(PG_dcache_clean, &from->flags)) { 101 test_bit(PG_dcache_clean, &from->flags)) {
@@ -103,16 +103,16 @@ void copy_user_highpage(struct page *to, struct page *from,
103 copy_page(vto, vfrom); 103 copy_page(vto, vfrom);
104 kunmap_coherent(vfrom); 104 kunmap_coherent(vfrom);
105 } else { 105 } else {
106 vfrom = kmap_atomic(from, KM_USER0); 106 vfrom = kmap_atomic(from);
107 copy_page(vto, vfrom); 107 copy_page(vto, vfrom);
108 kunmap_atomic(vfrom, KM_USER0); 108 kunmap_atomic(vfrom);
109 } 109 }
110 110
111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || 111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
112 (vma->vm_flags & VM_EXEC)) 112 (vma->vm_flags & VM_EXEC))
113 __flush_purge_region(vto, PAGE_SIZE); 113 __flush_purge_region(vto, PAGE_SIZE);
114 114
115 kunmap_atomic(vto, KM_USER1); 115 kunmap_atomic(vto);
116 /* Make sure this page is cleared on other CPU's too before using it */ 116 /* Make sure this page is cleared on other CPU's too before using it */
117 smp_wmb(); 117 smp_wmb();
118} 118}
@@ -120,14 +120,14 @@ EXPORT_SYMBOL(copy_user_highpage);
120 120
121void clear_user_highpage(struct page *page, unsigned long vaddr) 121void clear_user_highpage(struct page *page, unsigned long vaddr)
122{ 122{
123 void *kaddr = kmap_atomic(page, KM_USER0); 123 void *kaddr = kmap_atomic(page);
124 124
125 clear_page(kaddr); 125 clear_page(kaddr);
126 126
127 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) 127 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
128 __flush_purge_region(kaddr, PAGE_SIZE); 128 __flush_purge_region(kaddr, PAGE_SIZE);
129 129
130 kunmap_atomic(kaddr, KM_USER0); 130 kunmap_atomic(kaddr);
131} 131}
132EXPORT_SYMBOL(clear_user_highpage); 132EXPORT_SYMBOL(clear_user_highpage);
133 133