aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/page.h23
-rw-r--r--arch/arm/mm/copypage-feroceon.c23
-rw-r--r--arch/arm/mm/copypage-v3.c23
-rw-r--r--arch/arm/mm/copypage-v4mc.c21
-rw-r--r--arch/arm/mm/copypage-v4wb.c25
-rw-r--r--arch/arm/mm/copypage-v4wt.c23
-rw-r--r--arch/arm/mm/copypage-v6.c61
-rw-r--r--arch/arm/mm/copypage-xsc3.c24
-rw-r--r--arch/arm/mm/copypage-xscale.c19
-rw-r--r--arch/arm/mm/proc-syms.c2
10 files changed, 161 insertions, 83 deletions
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index bed1c0a00368..1581b8cf8f33 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -108,30 +108,35 @@
108#error Unknown user operations model 108#error Unknown user operations model
109#endif 109#endif
110 110
111struct page;
112
111struct cpu_user_fns { 113struct cpu_user_fns {
112 void (*cpu_clear_user_page)(void *p, unsigned long user); 114 void (*cpu_clear_user_page)(void *p, unsigned long user);
113 void (*cpu_copy_user_page)(void *to, const void *from, 115 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
114 unsigned long user); 116 unsigned long vaddr);
115}; 117};
116 118
117#ifdef MULTI_USER 119#ifdef MULTI_USER
118extern struct cpu_user_fns cpu_user; 120extern struct cpu_user_fns cpu_user;
119 121
120#define __cpu_clear_user_page cpu_user.cpu_clear_user_page 122#define __cpu_clear_user_page cpu_user.cpu_clear_user_page
121#define __cpu_copy_user_page cpu_user.cpu_copy_user_page 123#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage
122 124
123#else 125#else
124 126
125#define __cpu_clear_user_page __glue(_USER,_clear_user_page) 127#define __cpu_clear_user_page __glue(_USER,_clear_user_page)
126#define __cpu_copy_user_page __glue(_USER,_copy_user_page) 128#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage)
127 129
128extern void __cpu_clear_user_page(void *p, unsigned long user); 130extern void __cpu_clear_user_page(void *p, unsigned long user);
129extern void __cpu_copy_user_page(void *to, const void *from, 131extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
130 unsigned long user); 132 unsigned long vaddr);
131#endif 133#endif
132 134
133#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) 135#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
134#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) 136
137#define __HAVE_ARCH_COPY_USER_HIGHPAGE
138#define copy_user_highpage(to,from,vaddr,vma) \
139 __cpu_copy_user_highpage(to, from, vaddr)
135 140
136#define clear_page(page) memzero((void *)(page), PAGE_SIZE) 141#define clear_page(page) memzero((void *)(page), PAGE_SIZE)
137extern void copy_page(void *to, const void *from); 142extern void copy_page(void *to, const void *from);
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index c8347670ab00..edd71686b8df 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -7,15 +7,14 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * This handles copy_user_page and clear_user_page on Feroceon 10 * This handles copy_user_highpage and clear_user_page on Feroceon
11 * more optimally than the generic implementations. 11 * more optimally than the generic implementations.
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/highmem.h>
14 15
15#include <asm/page.h> 16static void __attribute__((naked))
16 17feroceon_copy_user_page(void *kto, const void *kfrom)
17void __attribute__((naked))
18feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
19{ 18{
20 asm("\ 19 asm("\
21 stmfd sp!, {r4-r9, lr} \n\ 20 stmfd sp!, {r4-r9, lr} \n\
@@ -68,6 +67,18 @@ feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
68 : "I" (PAGE_SIZE)); 67 : "I" (PAGE_SIZE));
69} 68}
70 69
70void feroceon_copy_user_highpage(struct page *to, struct page *from,
71 unsigned long vaddr)
72{
73 void *kto, *kfrom;
74
75 kto = kmap_atomic(to, KM_USER0);
76 kfrom = kmap_atomic(from, KM_USER1);
77 feroceon_copy_user_page(kto, kfrom);
78 kunmap_atomic(kfrom, KM_USER1);
79 kunmap_atomic(kto, KM_USER0);
80}
81
71void __attribute__((naked)) 82void __attribute__((naked))
72feroceon_clear_user_page(void *kaddr, unsigned long vaddr) 83feroceon_clear_user_page(void *kaddr, unsigned long vaddr)
73{ 84{
@@ -95,6 +106,6 @@ feroceon_clear_user_page(void *kaddr, unsigned long vaddr)
95 106
96struct cpu_user_fns feroceon_user_fns __initdata = { 107struct cpu_user_fns feroceon_user_fns __initdata = {
97 .cpu_clear_user_page = feroceon_clear_user_page, 108 .cpu_clear_user_page = feroceon_clear_user_page,
98 .cpu_copy_user_page = feroceon_copy_user_page, 109 .cpu_copy_user_highpage = feroceon_copy_user_highpage,
99}; 110};
100 111
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index 184911089e6c..52df8f04d3f7 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -8,16 +8,15 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11 11#include <linux/highmem.h>
12#include <asm/page.h>
13 12
14/* 13/*
15 * ARMv3 optimised copy_user_page 14 * ARMv3 optimised copy_user_highpage
16 * 15 *
17 * FIXME: do we need to handle cache stuff... 16 * FIXME: do we need to handle cache stuff...
18 */ 17 */
19void __attribute__((naked)) 18static void __attribute__((naked))
20v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 19v3_copy_user_page(void *kto, const void *kfrom)
21{ 20{
22 asm("\n\ 21 asm("\n\
23 stmfd sp!, {r4, lr} @ 2\n\ 22 stmfd sp!, {r4, lr} @ 2\n\
@@ -38,6 +37,18 @@ v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
38 : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); 37 : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
39} 38}
40 39
40void v3_copy_user_highpage(struct page *to, struct page *from,
41 unsigned long vaddr)
42{
43 void *kto, *kfrom;
44
45 kto = kmap_atomic(to, KM_USER0);
46 kfrom = kmap_atomic(from, KM_USER1);
47 v3_copy_user_page(kto, kfrom);
48 kunmap_atomic(kfrom, KM_USER1);
49 kunmap_atomic(kto, KM_USER0);
50}
51
41/* 52/*
42 * ARMv3 optimised clear_user_page 53 * ARMv3 optimised clear_user_page
43 * 54 *
@@ -65,5 +76,5 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr)
65 76
66struct cpu_user_fns v3_user_fns __initdata = { 77struct cpu_user_fns v3_user_fns __initdata = {
67 .cpu_clear_user_page = v3_clear_user_page, 78 .cpu_clear_user_page = v3_clear_user_page,
68 .cpu_copy_user_page = v3_copy_user_page, 79 .cpu_copy_user_highpage = v3_copy_user_highpage,
69}; 80};
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 8d33e2549344..a7dc838fee76 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -15,8 +15,8 @@
15 */ 15 */
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/highmem.h>
18 19
19#include <asm/page.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
@@ -33,7 +33,7 @@
33static DEFINE_SPINLOCK(minicache_lock); 33static DEFINE_SPINLOCK(minicache_lock);
34 34
35/* 35/*
36 * ARMv4 mini-dcache optimised copy_user_page 36 * ARMv4 mini-dcache optimised copy_user_highpage
37 * 37 *
38 * We flush the destination cache lines just before we write the data into the 38 * We flush the destination cache lines just before we write the data into the
39 * corresponding address. Since the Dcache is read-allocate, this removes the 39 * corresponding address. Since the Dcache is read-allocate, this removes the
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
42 * 42 *
43 * Note: We rely on all ARMv4 processors implementing the "invalidate D line" 43 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
44 * instruction. If your processor does not supply this, you have to write your 44 * instruction. If your processor does not supply this, you have to write your
45 * own copy_user_page that does the right thing. 45 * own copy_user_highpage that does the right thing.
46 */ 46 */
47static void __attribute__((naked)) 47static void __attribute__((naked))
48mc_copy_user_page(void *from, void *to) 48mc_copy_user_page(void *from, void *to)
@@ -68,21 +68,24 @@ mc_copy_user_page(void *from, void *to)
68 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); 68 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
69} 69}
70 70
71void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 71void v4_mc_copy_user_highpage(struct page *from, struct page *to,
72 unsigned long vaddr)
72{ 73{
73 struct page *page = virt_to_page(kfrom); 74 void *kto = kmap_atomic(to, KM_USER1);
74 75
75 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 76 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
76 __flush_dcache_page(page_mapping(page), page); 77 __flush_dcache_page(page_mapping(from), from);
77 78
78 spin_lock(&minicache_lock); 79 spin_lock(&minicache_lock);
79 80
80 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
81 flush_tlb_kernel_page(0xffff8000); 82 flush_tlb_kernel_page(0xffff8000);
82 83
83 mc_copy_user_page((void *)0xffff8000, kto); 84 mc_copy_user_page((void *)0xffff8000, kto);
84 85
85 spin_unlock(&minicache_lock); 86 spin_unlock(&minicache_lock);
87
88 kunmap_atomic(kto, KM_USER1);
86} 89}
87 90
88/* 91/*
@@ -113,5 +116,5 @@ v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)
113 116
114struct cpu_user_fns v4_mc_user_fns __initdata = { 117struct cpu_user_fns v4_mc_user_fns __initdata = {
115 .cpu_clear_user_page = v4_mc_clear_user_page, 118 .cpu_clear_user_page = v4_mc_clear_user_page,
116 .cpu_copy_user_page = v4_mc_copy_user_page, 119 .cpu_copy_user_highpage = v4_mc_copy_user_highpage,
117}; 120};
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 230210822961..186a68a794a9 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -8,11 +8,10 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11 11#include <linux/highmem.h>
12#include <asm/page.h>
13 12
14/* 13/*
15 * ARMv4 optimised copy_user_page 14 * ARMv4 optimised copy_user_highpage
16 * 15 *
17 * We flush the destination cache lines just before we write the data into the 16 * We flush the destination cache lines just before we write the data into the
18 * corresponding address. Since the Dcache is read-allocate, this removes the 17 * corresponding address. Since the Dcache is read-allocate, this removes the
@@ -21,10 +20,10 @@
21 * 20 *
22 * Note: We rely on all ARMv4 processors implementing the "invalidate D line" 21 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
23 * instruction. If your processor does not supply this, you have to write your 22 * instruction. If your processor does not supply this, you have to write your
24 * own copy_user_page that does the right thing. 23 * own copy_user_highpage that does the right thing.
25 */ 24 */
26void __attribute__((naked)) 25static void __attribute__((naked))
27v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 26v4wb_copy_user_page(void *kto, const void *kfrom)
28{ 27{
29 asm("\ 28 asm("\
30 stmfd sp!, {r4, lr} @ 2\n\ 29 stmfd sp!, {r4, lr} @ 2\n\
@@ -48,6 +47,18 @@ v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
48 : "I" (PAGE_SIZE / 64)); 47 : "I" (PAGE_SIZE / 64));
49} 48}
50 49
50void v4wb_copy_user_highpage(struct page *to, struct page *from,
51 unsigned long vaddr)
52{
53 void *kto, *kfrom;
54
55 kto = kmap_atomic(to, KM_USER0);
56 kfrom = kmap_atomic(from, KM_USER1);
57 v4wb_copy_user_page(kto, kfrom);
58 kunmap_atomic(kfrom, KM_USER1);
59 kunmap_atomic(kto, KM_USER0);
60}
61
51/* 62/*
52 * ARMv4 optimised clear_user_page 63 * ARMv4 optimised clear_user_page
53 * 64 *
@@ -79,5 +90,5 @@ v4wb_clear_user_page(void *kaddr, unsigned long vaddr)
79 90
80struct cpu_user_fns v4wb_user_fns __initdata = { 91struct cpu_user_fns v4wb_user_fns __initdata = {
81 .cpu_clear_user_page = v4wb_clear_user_page, 92 .cpu_clear_user_page = v4wb_clear_user_page,
82 .cpu_copy_user_page = v4wb_copy_user_page, 93 .cpu_copy_user_highpage = v4wb_copy_user_highpage,
83}; 94};
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index d8ef39503ff0..86c2cfdbde03 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -11,18 +11,17 @@
11 * the only supported cache operation. 11 * the only supported cache operation.
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14 14#include <linux/highmem.h>
15#include <asm/page.h>
16 15
17/* 16/*
18 * ARMv4 optimised copy_user_page 17 * ARMv4 optimised copy_user_highpage
19 * 18 *
20 * Since we have writethrough caches, we don't have to worry about 19 * Since we have writethrough caches, we don't have to worry about
21 * dirty data in the cache. However, we do have to ensure that 20 * dirty data in the cache. However, we do have to ensure that
22 * subsequent reads are up to date. 21 * subsequent reads are up to date.
23 */ 22 */
24void __attribute__((naked)) 23static void __attribute__((naked))
25v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 24v4wt_copy_user_page(void *kto, const void *kfrom)
26{ 25{
27 asm("\ 26 asm("\
28 stmfd sp!, {r4, lr} @ 2\n\ 27 stmfd sp!, {r4, lr} @ 2\n\
@@ -44,6 +43,18 @@ v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
44 : "I" (PAGE_SIZE / 64)); 43 : "I" (PAGE_SIZE / 64));
45} 44}
46 45
46void v4wt_copy_user_highpage(struct page *to, struct page *from,
47 unsigned long vaddr)
48{
49 void *kto, *kfrom;
50
51 kto = kmap_atomic(to, KM_USER0);
52 kfrom = kmap_atomic(from, KM_USER1);
53 v4wt_copy_user_page(kto, kfrom);
54 kunmap_atomic(kfrom, KM_USER1);
55 kunmap_atomic(kto, KM_USER0);
56}
57
47/* 58/*
48 * ARMv4 optimised clear_user_page 59 * ARMv4 optimised clear_user_page
49 * 60 *
@@ -73,5 +84,5 @@ v4wt_clear_user_page(void *kaddr, unsigned long vaddr)
73 84
74struct cpu_user_fns v4wt_user_fns __initdata = { 85struct cpu_user_fns v4wt_user_fns __initdata = {
75 .cpu_clear_user_page = v4wt_clear_user_page, 86 .cpu_clear_user_page = v4wt_clear_user_page,
76 .cpu_copy_user_page = v4wt_copy_user_page, 87 .cpu_copy_user_highpage = v4wt_copy_user_highpage,
77}; 88};
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 0e21c0767580..2ea75d0f5048 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -10,8 +10,8 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/highmem.h>
13 14
14#include <asm/page.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
@@ -33,9 +33,16 @@ static DEFINE_SPINLOCK(v6_lock);
33 * Copy the user page. No aliasing to deal with so we can just 33 * Copy the user page. No aliasing to deal with so we can just
34 * attack the kernel's existing mapping of these pages. 34 * attack the kernel's existing mapping of these pages.
35 */ 35 */
36static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) 36static void v6_copy_user_highpage_nonaliasing(struct page *to,
37 struct page *from, unsigned long vaddr)
37{ 38{
39 void *kto, *kfrom;
40
41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1);
38 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44 kunmap_atomic(kto, KM_USER1);
45 kunmap_atomic(kfrom, KM_USER0);
39} 46}
40 47
41/* 48/*
@@ -48,26 +55,32 @@ static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
48} 55}
49 56
50/* 57/*
51 * Copy the page, taking account of the cache colour. 58 * Discard data in the kernel mapping for the new page.
59 * FIXME: needs this MCRR to be supported.
52 */ 60 */
53static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) 61static void discard_old_kernel_data(void *kto)
54{ 62{
55 unsigned int offset = CACHE_COLOUR(vaddr);
56 unsigned long from, to;
57 struct page *page = virt_to_page(kfrom);
58
59 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
60 __flush_dcache_page(page_mapping(page), page);
61
62 /*
63 * Discard data in the kernel mapping for the new page.
64 * FIXME: needs this MCRR to be supported.
65 */
66 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" 63 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
67 : 64 :
68 : "r" (kto), 65 : "r" (kto),
69 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) 66 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
70 : "cc"); 67 : "cc");
68}
69
70/*
71 * Copy the page, taking account of the cache colour.
72 */
73static void v6_copy_user_highpage_aliasing(struct page *to,
74 struct page *from, unsigned long vaddr)
75{
76 unsigned int offset = CACHE_COLOUR(vaddr);
77 unsigned long kfrom, kto;
78
79 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
80 __flush_dcache_page(page_mapping(from), from);
81
82 /* FIXME: not highmem safe */
83 discard_old_kernel_data(page_address(to));
71 84
72 /* 85 /*
73 * Now copy the page using the same cache colour as the 86 * Now copy the page using the same cache colour as the
@@ -75,16 +88,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
75 */ 88 */
76 spin_lock(&v6_lock); 89 spin_lock(&v6_lock);
77 90
78 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); 91 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
79 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); 92 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
80 93
81 from = from_address + (offset << PAGE_SHIFT); 94 kfrom = from_address + (offset << PAGE_SHIFT);
82 to = to_address + (offset << PAGE_SHIFT); 95 kto = to_address + (offset << PAGE_SHIFT);
83 96
84 flush_tlb_kernel_page(from); 97 flush_tlb_kernel_page(kfrom);
85 flush_tlb_kernel_page(to); 98 flush_tlb_kernel_page(kto);
86 99
87 copy_page((void *)to, (void *)from); 100 copy_page((void *)kto, (void *)kfrom);
88 101
89 spin_unlock(&v6_lock); 102 spin_unlock(&v6_lock);
90} 103}
@@ -124,14 +137,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
124 137
125struct cpu_user_fns v6_user_fns __initdata = { 138struct cpu_user_fns v6_user_fns __initdata = {
126 .cpu_clear_user_page = v6_clear_user_page_nonaliasing, 139 .cpu_clear_user_page = v6_clear_user_page_nonaliasing,
127 .cpu_copy_user_page = v6_copy_user_page_nonaliasing, 140 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
128}; 141};
129 142
130static int __init v6_userpage_init(void) 143static int __init v6_userpage_init(void)
131{ 144{
132 if (cache_is_vipt_aliasing()) { 145 if (cache_is_vipt_aliasing()) {
133 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; 146 cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
134 cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; 147 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
135 } 148 }
136 149
137 return 0; 150 return 0;
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 51ed502e5777..caa697ccd8db 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -11,8 +11,7 @@
11 * Author: Matt Gilbert (matthew.m.gilbert@intel.com) 11 * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14 14#include <linux/highmem.h>
15#include <asm/page.h>
16 15
17/* 16/*
18 * General note: 17 * General note:
@@ -21,18 +20,17 @@
21 */ 20 */
22 21
23/* 22/*
24 * XSC3 optimised copy_user_page 23 * XSC3 optimised copy_user_highpage
25 * r0 = destination 24 * r0 = destination
26 * r1 = source 25 * r1 = source
27 * r2 = virtual user address of ultimate destination page
28 * 26 *
29 * The source page may have some clean entries in the cache already, but we 27 * The source page may have some clean entries in the cache already, but we
30 * can safely ignore them - break_cow() will flush them out of the cache 28 * can safely ignore them - break_cow() will flush them out of the cache
31 * if we eventually end up using our copied page. 29 * if we eventually end up using our copied page.
32 * 30 *
33 */ 31 */
34void __attribute__((naked)) 32static void __attribute__((naked))
35xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 33xsc3_mc_copy_user_page(void *kto, const void *kfrom)
36{ 34{
37 asm("\ 35 asm("\
38 stmfd sp!, {r4, r5, lr} \n\ 36 stmfd sp!, {r4, r5, lr} \n\
@@ -72,6 +70,18 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
72 : "I" (PAGE_SIZE / 64 - 1)); 70 : "I" (PAGE_SIZE / 64 - 1));
73} 71}
74 72
73void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
74 unsigned long vaddr)
75{
76 void *kto, *kfrom;
77
78 kto = kmap_atomic(to, KM_USER0);
79 kfrom = kmap_atomic(from, KM_USER1);
80 xsc3_mc_copy_user_page(kto, kfrom);
81 kunmap_atomic(kfrom, KM_USER1);
82 kunmap_atomic(kto, KM_USER0);
83}
84
75/* 85/*
76 * XScale optimised clear_user_page 86 * XScale optimised clear_user_page
77 * r0 = destination 87 * r0 = destination
@@ -98,5 +108,5 @@ xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr)
98 108
99struct cpu_user_fns xsc3_mc_user_fns __initdata = { 109struct cpu_user_fns xsc3_mc_user_fns __initdata = {
100 .cpu_clear_user_page = xsc3_mc_clear_user_page, 110 .cpu_clear_user_page = xsc3_mc_clear_user_page,
101 .cpu_copy_user_page = xsc3_mc_copy_user_page, 111 .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
102}; 112};
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index bad49331bbf9..01bafafce181 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -15,8 +15,8 @@
15 */ 15 */
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/highmem.h>
18 19
19#include <asm/page.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
@@ -35,7 +35,7 @@
35static DEFINE_SPINLOCK(minicache_lock); 35static DEFINE_SPINLOCK(minicache_lock);
36 36
37/* 37/*
38 * XScale mini-dcache optimised copy_user_page 38 * XScale mini-dcache optimised copy_user_highpage
39 * 39 *
40 * We flush the destination cache lines just before we write the data into the 40 * We flush the destination cache lines just before we write the data into the
41 * corresponding address. Since the Dcache is read-allocate, this removes the 41 * corresponding address. Since the Dcache is read-allocate, this removes the
@@ -90,21 +90,24 @@ mc_copy_user_page(void *from, void *to)
90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); 90 : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
91} 91}
92 92
93void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 93void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
94 unsigned long vaddr)
94{ 95{
95 struct page *page = virt_to_page(kfrom); 96 void *kto = kmap_atomic(to, KM_USER1);
96 97
97 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 98 if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
98 __flush_dcache_page(page_mapping(page), page); 99 __flush_dcache_page(page_mapping(from), from);
99 100
100 spin_lock(&minicache_lock); 101 spin_lock(&minicache_lock);
101 102
102 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); 103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
103 flush_tlb_kernel_page(COPYPAGE_MINICACHE); 104 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
104 105
105 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
106 107
107 spin_unlock(&minicache_lock); 108 spin_unlock(&minicache_lock);
109
110 kunmap_atomic(kto, KM_USER1);
108} 111}
109 112
110/* 113/*
@@ -133,5 +136,5 @@ xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
133 136
134struct cpu_user_fns xscale_mc_user_fns __initdata = { 137struct cpu_user_fns xscale_mc_user_fns __initdata = {
135 .cpu_clear_user_page = xscale_mc_clear_user_page, 138 .cpu_clear_user_page = xscale_mc_clear_user_page,
136 .cpu_copy_user_page = xscale_mc_copy_user_page, 139 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
137}; 140};
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 2b5ba396e3a6..b9743e6416c4 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL(cpu_cache);
34#ifdef CONFIG_MMU 34#ifdef CONFIG_MMU
35#ifndef MULTI_USER 35#ifndef MULTI_USER
36EXPORT_SYMBOL(__cpu_clear_user_page); 36EXPORT_SYMBOL(__cpu_clear_user_page);
37EXPORT_SYMBOL(__cpu_copy_user_page); 37EXPORT_SYMBOL(__cpu_copy_user_highpage);
38#else 38#else
39EXPORT_SYMBOL(cpu_user); 39EXPORT_SYMBOL(cpu_user);
40#endif 40#endif