aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v6.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 12:32:19 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 18:53:48 -0500
commit303c6443659bc1dc911356f5de149f48ff1d97b8 (patch)
tree75da0aef28ec8e843cdeb24c96349bdf812e2740 /arch/arm/mm/copypage-v6.c
parent063b0a4207e43acbeff3d4b09f43e750e0212b48 (diff)
[ARM] clearpage: provide our own clear_user_highpage()
For similar reasons as copy_user_page(), we want to avoid the additional kmap_atomic if it's unnecessary. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v6.c')
-rw-r--r--arch/arm/mm/copypage-v6.c23
1 files changed, 9 insertions, 14 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 2ea75d0f5048..4127a7bddfe5 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -49,9 +49,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
49 * Clear the user page. No aliasing to deal with so we can just 49 * Clear the user page. No aliasing to deal with so we can just
50 * attack the kernel's existing mapping of this page. 50 * attack the kernel's existing mapping of this page.
51 */ 51 */
52static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) 52static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
53{ 53{
54 void *kaddr = kmap_atomic(page, KM_USER0);
54 clear_page(kaddr); 55 clear_page(kaddr);
56 kunmap_atomic(kaddr, KM_USER0);
55} 57}
56 58
57/* 59/*
@@ -107,20 +109,13 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
107 * so remap the kernel page into the same cache colour as the user 109 * so remap the kernel page into the same cache colour as the user
108 * page. 110 * page.
109 */ 111 */
110static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) 112static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
111{ 113{
112 unsigned int offset = CACHE_COLOUR(vaddr); 114 unsigned int offset = CACHE_COLOUR(vaddr);
113 unsigned long to = to_address + (offset << PAGE_SHIFT); 115 unsigned long to = to_address + (offset << PAGE_SHIFT);
114 116
115 /* 117 /* FIXME: not highmem safe */
116 * Discard data in the kernel mapping for the new page 118 discard_old_kernel_data(page_address(page));
117 * FIXME: needs this MCRR to be supported.
118 */
119 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
120 :
121 : "r" (kaddr),
122 "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
123 : "cc");
124 119
125 /* 120 /*
126 * Now clear the page using the same cache colour as 121 * Now clear the page using the same cache colour as
@@ -128,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
128 */ 123 */
129 spin_lock(&v6_lock); 124 spin_lock(&v6_lock);
130 125
131 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); 126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
132 flush_tlb_kernel_page(to); 127 flush_tlb_kernel_page(to);
133 clear_page((void *)to); 128 clear_page((void *)to);
134 129
@@ -136,14 +131,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
136} 131}
137 132
138struct cpu_user_fns v6_user_fns __initdata = { 133struct cpu_user_fns v6_user_fns __initdata = {
139 .cpu_clear_user_page = v6_clear_user_page_nonaliasing, 134 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
140 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, 135 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
141}; 136};
142 137
143static int __init v6_userpage_init(void) 138static int __init v6_userpage_init(void)
144{ 139{
145 if (cache_is_vipt_aliasing()) { 140 if (cache_is_vipt_aliasing()) {
146 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; 141 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
147 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; 142 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
148 } 143 }
149 144