diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-10-31 12:32:19 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-11-27 18:53:48 -0500 |
commit | 303c6443659bc1dc911356f5de149f48ff1d97b8 (patch) | |
tree | 75da0aef28ec8e843cdeb24c96349bdf812e2740 /arch | |
parent | 063b0a4207e43acbeff3d4b09f43e750e0212b48 (diff) |
[ARM] clearpage: provide our own clear_user_highpage()
For similar reasons as copy_user_page(), we want to avoid the
additional kmap_atomic if it's unnecessary.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/page.h | 11 | ||||
-rw-r--r-- | arch/arm/mm/copypage-feroceon.c | 20 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v3.c | 13 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 28 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.c | 28 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 24 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 23 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 25 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 26 | ||||
-rw-r--r-- | arch/arm/mm/proc-syms.c | 2 |
10 files changed, 100 insertions, 100 deletions
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 1581b8cf8f33..77747df713b4 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -111,7 +111,7 @@ | |||
111 | struct page; | 111 | struct page; |
112 | 112 | ||
113 | struct cpu_user_fns { | 113 | struct cpu_user_fns { |
114 | void (*cpu_clear_user_page)(void *p, unsigned long user); | 114 | void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); |
115 | void (*cpu_copy_user_highpage)(struct page *to, struct page *from, | 115 | void (*cpu_copy_user_highpage)(struct page *to, struct page *from, |
116 | unsigned long vaddr); | 116 | unsigned long vaddr); |
117 | }; | 117 | }; |
@@ -119,20 +119,21 @@ struct cpu_user_fns { | |||
119 | #ifdef MULTI_USER | 119 | #ifdef MULTI_USER |
120 | extern struct cpu_user_fns cpu_user; | 120 | extern struct cpu_user_fns cpu_user; |
121 | 121 | ||
122 | #define __cpu_clear_user_page cpu_user.cpu_clear_user_page | 122 | #define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage |
123 | #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage | 123 | #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage |
124 | 124 | ||
125 | #else | 125 | #else |
126 | 126 | ||
127 | #define __cpu_clear_user_page __glue(_USER,_clear_user_page) | 127 | #define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage) |
128 | #define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) | 128 | #define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) |
129 | 129 | ||
130 | extern void __cpu_clear_user_page(void *p, unsigned long user); | 130 | extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); |
131 | extern void __cpu_copy_user_highpage(struct page *to, struct page *from, | 131 | extern void __cpu_copy_user_highpage(struct page *to, struct page *from, |
132 | unsigned long vaddr); | 132 | unsigned long vaddr); |
133 | #endif | 133 | #endif |
134 | 134 | ||
135 | #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) | 135 | #define clear_user_highpage(page,vaddr) \ |
136 | __cpu_clear_user_highpage(page, vaddr) | ||
136 | 137 | ||
137 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | 138 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE |
138 | #define copy_user_highpage(to,from,vaddr,vma) \ | 139 | #define copy_user_highpage(to,from,vaddr,vma) \ |
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index edd71686b8df..c3651b2939c7 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -79,12 +79,11 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, | |||
79 | kunmap_atomic(kto, KM_USER0); | 79 | kunmap_atomic(kto, KM_USER0); |
80 | } | 80 | } |
81 | 81 | ||
82 | void __attribute__((naked)) | 82 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) |
83 | feroceon_clear_user_page(void *kaddr, unsigned long vaddr) | ||
84 | { | 83 | { |
84 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
85 | asm("\ | 85 | asm("\ |
86 | stmfd sp!, {r4-r7, lr} \n\ | 86 | mov r1, %1 \n\ |
87 | mov r1, %0 \n\ | ||
88 | mov r2, #0 \n\ | 87 | mov r2, #0 \n\ |
89 | mov r3, #0 \n\ | 88 | mov r3, #0 \n\ |
90 | mov r4, #0 \n\ | 89 | mov r4, #0 \n\ |
@@ -93,19 +92,20 @@ feroceon_clear_user_page(void *kaddr, unsigned long vaddr) | |||
93 | mov r7, #0 \n\ | 92 | mov r7, #0 \n\ |
94 | mov ip, #0 \n\ | 93 | mov ip, #0 \n\ |
95 | mov lr, #0 \n\ | 94 | mov lr, #0 \n\ |
96 | 1: stmia r0, {r2-r7, ip, lr} \n\ | 95 | 1: stmia %0, {r2-r7, ip, lr} \n\ |
97 | subs r1, r1, #1 \n\ | 96 | subs r1, r1, #1 \n\ |
98 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | 97 | mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ |
99 | add r0, r0, #32 \n\ | 98 | add r0, r0, #32 \n\ |
100 | bne 1b \n\ | 99 | bne 1b \n\ |
101 | mcr p15, 0, r1, c7, c10, 4 @ drain WB\n\ | 100 | mcr p15, 0, r1, c7, c10, 4 @ drain WB" |
102 | ldmfd sp!, {r4-r7, pc}" | ||
103 | : | 101 | : |
104 | : "I" (PAGE_SIZE / 32)); | 102 | : "r" (kaddr), "I" (PAGE_SIZE / 32) |
103 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); | ||
104 | kunmap_atomic(kaddr, KM_USER0); | ||
105 | } | 105 | } |
106 | 106 | ||
107 | struct cpu_user_fns feroceon_user_fns __initdata = { | 107 | struct cpu_user_fns feroceon_user_fns __initdata = { |
108 | .cpu_clear_user_page = feroceon_clear_user_page, | 108 | .cpu_clear_user_highpage = feroceon_clear_user_highpage, |
109 | .cpu_copy_user_highpage = feroceon_copy_user_highpage, | 109 | .cpu_copy_user_highpage = feroceon_copy_user_highpage, |
110 | }; | 110 | }; |
111 | 111 | ||
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 52df8f04d3f7..13ce0baa6ba5 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -54,10 +54,10 @@ void v3_copy_user_highpage(struct page *to, struct page *from, | |||
54 | * | 54 | * |
55 | * FIXME: do we need to handle cache stuff... | 55 | * FIXME: do we need to handle cache stuff... |
56 | */ | 56 | */ |
57 | void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) | 57 | void v3_clear_user_highpage(struct page *page, unsigned long vaddr) |
58 | { | 58 | { |
59 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
59 | asm("\n\ | 60 | asm("\n\ |
60 | str lr, [sp, #-4]!\n\ | ||
61 | mov r1, %1 @ 1\n\ | 61 | mov r1, %1 @ 1\n\ |
62 | mov r2, #0 @ 1\n\ | 62 | mov r2, #0 @ 1\n\ |
63 | mov r3, #0 @ 1\n\ | 63 | mov r3, #0 @ 1\n\ |
@@ -68,13 +68,14 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) | |||
68 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | 68 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
69 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | 69 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
70 | subs r1, r1, #1 @ 1\n\ | 70 | subs r1, r1, #1 @ 1\n\ |
71 | bne 1b @ 1\n\ | 71 | bne 1b @ 1" |
72 | ldr pc, [sp], #4" | ||
73 | : | 72 | : |
74 | : "r" (kaddr), "I" (PAGE_SIZE / 64)); | 73 | : "r" (kaddr), "I" (PAGE_SIZE / 64) |
74 | : "r1", "r2", "r3", "ip", "lr"); | ||
75 | kunmap_atomic(kaddr, KM_USER0); | ||
75 | } | 76 | } |
76 | 77 | ||
77 | struct cpu_user_fns v3_user_fns __initdata = { | 78 | struct cpu_user_fns v3_user_fns __initdata = { |
78 | .cpu_clear_user_page = v3_clear_user_page, | 79 | .cpu_clear_user_highpage = v3_clear_user_highpage, |
79 | .cpu_copy_user_highpage = v3_copy_user_highpage, | 80 | .cpu_copy_user_highpage = v3_copy_user_highpage, |
80 | }; | 81 | }; |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index a7dc838fee76..a5eae503a34f 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -91,30 +91,30 @@ void v4_mc_copy_user_highpage(struct page *from, struct page *to, | |||
91 | /* | 91 | /* |
92 | * ARMv4 optimised clear_user_page | 92 | * ARMv4 optimised clear_user_page |
93 | */ | 93 | */ |
94 | void __attribute__((naked)) | 94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
95 | v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) | ||
96 | { | 95 | { |
97 | asm volatile( | 96 | void *kaddr = kmap_atomic(page, KM_USER0); |
98 | "str lr, [sp, #-4]!\n\ | 97 | asm volatile("\ |
99 | mov r1, %0 @ 1\n\ | 98 | mov r1, %0 @ 1\n\ |
100 | mov r2, #0 @ 1\n\ | 99 | mov r2, #0 @ 1\n\ |
101 | mov r3, #0 @ 1\n\ | 100 | mov r3, #0 @ 1\n\ |
102 | mov ip, #0 @ 1\n\ | 101 | mov ip, #0 @ 1\n\ |
103 | mov lr, #0 @ 1\n\ | 102 | mov lr, #0 @ 1\n\ |
104 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 103 | 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
105 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 104 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
106 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 105 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
107 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 106 | mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
108 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 107 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
109 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 108 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
110 | subs r1, r1, #1 @ 1\n\ | 109 | subs r1, r1, #1 @ 1\n\ |
111 | bne 1b @ 1\n\ | 110 | bne 1b @ 1" |
112 | ldr pc, [sp], #4" | ||
113 | : | 111 | : |
114 | : "I" (PAGE_SIZE / 64)); | 112 | : "r" (kaddr), "I" (PAGE_SIZE / 64) |
113 | : "r1", "r2", "r3", "ip", "lr"); | ||
114 | kunmap_atomic(kaddr, KM_USER0); | ||
115 | } | 115 | } |
116 | 116 | ||
117 | struct cpu_user_fns v4_mc_user_fns __initdata = { | 117 | struct cpu_user_fns v4_mc_user_fns __initdata = { |
118 | .cpu_clear_user_page = v4_mc_clear_user_page, | 118 | .cpu_clear_user_highpage = v4_mc_clear_user_highpage, |
119 | .cpu_copy_user_highpage = v4_mc_copy_user_highpage, | 119 | .cpu_copy_user_highpage = v4_mc_copy_user_highpage, |
120 | }; | 120 | }; |
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 186a68a794a9..9144a96037bf 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -64,31 +64,31 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, | |||
64 | * | 64 | * |
65 | * Same story as above. | 65 | * Same story as above. |
66 | */ | 66 | */ |
67 | void __attribute__((naked)) | 67 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) |
68 | v4wb_clear_user_page(void *kaddr, unsigned long vaddr) | ||
69 | { | 68 | { |
69 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
70 | asm("\ | 70 | asm("\ |
71 | str lr, [sp, #-4]!\n\ | 71 | mov r1, %1 @ 1\n\ |
72 | mov r1, %0 @ 1\n\ | ||
73 | mov r2, #0 @ 1\n\ | 72 | mov r2, #0 @ 1\n\ |
74 | mov r3, #0 @ 1\n\ | 73 | mov r3, #0 @ 1\n\ |
75 | mov ip, #0 @ 1\n\ | 74 | mov ip, #0 @ 1\n\ |
76 | mov lr, #0 @ 1\n\ | 75 | mov lr, #0 @ 1\n\ |
77 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 76 | 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
78 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 77 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
79 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 78 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
80 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 79 | mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
81 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 80 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
82 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 81 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
83 | subs r1, r1, #1 @ 1\n\ | 82 | subs r1, r1, #1 @ 1\n\ |
84 | bne 1b @ 1\n\ | 83 | bne 1b @ 1\n\ |
85 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ | 84 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" |
86 | ldr pc, [sp], #4" | ||
87 | : | 85 | : |
88 | : "I" (PAGE_SIZE / 64)); | 86 | : "r" (kaddr), "I" (PAGE_SIZE / 64) |
87 | : "r1", "r2", "r3", "ip", "lr"); | ||
88 | kunmap_atomic(kaddr, KM_USER0); | ||
89 | } | 89 | } |
90 | 90 | ||
91 | struct cpu_user_fns v4wb_user_fns __initdata = { | 91 | struct cpu_user_fns v4wb_user_fns __initdata = { |
92 | .cpu_clear_user_page = v4wb_clear_user_page, | 92 | .cpu_clear_user_highpage = v4wb_clear_user_highpage, |
93 | .cpu_copy_user_highpage = v4wb_copy_user_highpage, | 93 | .cpu_copy_user_highpage = v4wb_copy_user_highpage, |
94 | }; | 94 | }; |
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 86c2cfdbde03..b8a345d6e77e 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -60,29 +60,29 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, | |||
60 | * | 60 | * |
61 | * Same story as above. | 61 | * Same story as above. |
62 | */ | 62 | */ |
63 | void __attribute__((naked)) | 63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) |
64 | v4wt_clear_user_page(void *kaddr, unsigned long vaddr) | ||
65 | { | 64 | { |
65 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
66 | asm("\ | 66 | asm("\ |
67 | str lr, [sp, #-4]!\n\ | 67 | mov r1, %1 @ 1\n\ |
68 | mov r1, %0 @ 1\n\ | ||
69 | mov r2, #0 @ 1\n\ | 68 | mov r2, #0 @ 1\n\ |
70 | mov r3, #0 @ 1\n\ | 69 | mov r3, #0 @ 1\n\ |
71 | mov ip, #0 @ 1\n\ | 70 | mov ip, #0 @ 1\n\ |
72 | mov lr, #0 @ 1\n\ | 71 | mov lr, #0 @ 1\n\ |
73 | 1: stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 72 | 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
74 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 73 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
75 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 74 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
76 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 75 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
77 | subs r1, r1, #1 @ 1\n\ | 76 | subs r1, r1, #1 @ 1\n\ |
78 | bne 1b @ 1\n\ | 77 | bne 1b @ 1\n\ |
79 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ | 78 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" |
80 | ldr pc, [sp], #4" | ||
81 | : | 79 | : |
82 | : "I" (PAGE_SIZE / 64)); | 80 | : "r" (kaddr), "I" (PAGE_SIZE / 64) |
81 | : "r1", "r2", "r3", "ip", "lr"); | ||
82 | kunmap_atomic(kaddr, KM_USER0); | ||
83 | } | 83 | } |
84 | 84 | ||
85 | struct cpu_user_fns v4wt_user_fns __initdata = { | 85 | struct cpu_user_fns v4wt_user_fns __initdata = { |
86 | .cpu_clear_user_page = v4wt_clear_user_page, | 86 | .cpu_clear_user_highpage = v4wt_clear_user_highpage, |
87 | .cpu_copy_user_highpage = v4wt_copy_user_highpage, | 87 | .cpu_copy_user_highpage = v4wt_copy_user_highpage, |
88 | }; | 88 | }; |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 2ea75d0f5048..4127a7bddfe5 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -49,9 +49,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
49 | * Clear the user page. No aliasing to deal with so we can just | 49 | * Clear the user page. No aliasing to deal with so we can just |
50 | * attack the kernel's existing mapping of this page. | 50 | * attack the kernel's existing mapping of this page. |
51 | */ | 51 | */ |
52 | static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) | 52 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) |
53 | { | 53 | { |
54 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
54 | clear_page(kaddr); | 55 | clear_page(kaddr); |
56 | kunmap_atomic(kaddr, KM_USER0); | ||
55 | } | 57 | } |
56 | 58 | ||
57 | /* | 59 | /* |
@@ -107,20 +109,13 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
107 | * so remap the kernel page into the same cache colour as the user | 109 | * so remap the kernel page into the same cache colour as the user |
108 | * page. | 110 | * page. |
109 | */ | 111 | */ |
110 | static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | 112 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) |
111 | { | 113 | { |
112 | unsigned int offset = CACHE_COLOUR(vaddr); | 114 | unsigned int offset = CACHE_COLOUR(vaddr); |
113 | unsigned long to = to_address + (offset << PAGE_SHIFT); | 115 | unsigned long to = to_address + (offset << PAGE_SHIFT); |
114 | 116 | ||
115 | /* | 117 | /* FIXME: not highmem safe */ |
116 | * Discard data in the kernel mapping for the new page | 118 | discard_old_kernel_data(page_address(page)); |
117 | * FIXME: needs this MCRR to be supported. | ||
118 | */ | ||
119 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" | ||
120 | : | ||
121 | : "r" (kaddr), | ||
122 | "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) | ||
123 | : "cc"); | ||
124 | 119 | ||
125 | /* | 120 | /* |
126 | * Now clear the page using the same cache colour as | 121 | * Now clear the page using the same cache colour as |
@@ -128,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |||
128 | */ | 123 | */ |
129 | spin_lock(&v6_lock); | 124 | spin_lock(&v6_lock); |
130 | 125 | ||
131 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); | 126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); |
132 | flush_tlb_kernel_page(to); | 127 | flush_tlb_kernel_page(to); |
133 | clear_page((void *)to); | 128 | clear_page((void *)to); |
134 | 129 | ||
@@ -136,14 +131,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |||
136 | } | 131 | } |
137 | 132 | ||
138 | struct cpu_user_fns v6_user_fns __initdata = { | 133 | struct cpu_user_fns v6_user_fns __initdata = { |
139 | .cpu_clear_user_page = v6_clear_user_page_nonaliasing, | 134 | .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, |
140 | .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, | 135 | .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, |
141 | }; | 136 | }; |
142 | 137 | ||
143 | static int __init v6_userpage_init(void) | 138 | static int __init v6_userpage_init(void) |
144 | { | 139 | { |
145 | if (cache_is_vipt_aliasing()) { | 140 | if (cache_is_vipt_aliasing()) { |
146 | cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; | 141 | cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; |
147 | cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; | 142 | cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; |
148 | } | 143 | } |
149 | 144 | ||
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index caa697ccd8db..0e7cb325ca4c 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -87,26 +87,27 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | |||
87 | * r0 = destination | 87 | * r0 = destination |
88 | * r1 = virtual user address of ultimate destination page | 88 | * r1 = virtual user address of ultimate destination page |
89 | */ | 89 | */ |
90 | void __attribute__((naked)) | 90 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
91 | xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr) | ||
92 | { | 91 | { |
92 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
93 | asm("\ | 93 | asm("\ |
94 | mov r1, %0 \n\ | 94 | mov r1, %1 \n\ |
95 | mov r2, #0 \n\ | 95 | mov r2, #0 \n\ |
96 | mov r3, #0 \n\ | 96 | mov r3, #0 \n\ |
97 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line\n\ | 97 | 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ |
98 | strd r2, [r0], #8 \n\ | 98 | strd r2, [%0], #8 \n\ |
99 | strd r2, [r0], #8 \n\ | 99 | strd r2, [%0], #8 \n\ |
100 | strd r2, [r0], #8 \n\ | 100 | strd r2, [%0], #8 \n\ |
101 | strd r2, [r0], #8 \n\ | 101 | strd r2, [%0], #8 \n\ |
102 | subs r1, r1, #1 \n\ | 102 | subs r1, r1, #1 \n\ |
103 | bne 1b \n\ | 103 | bne 1b" |
104 | mov pc, lr" | ||
105 | : | 104 | : |
106 | : "I" (PAGE_SIZE / 32)); | 105 | : "r" (kaddr), "I" (PAGE_SIZE / 32) |
106 | : "r1", "r2", "r3"); | ||
107 | kunmap_atomic(kaddr, KM_USER0); | ||
107 | } | 108 | } |
108 | 109 | ||
109 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { | 110 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { |
110 | .cpu_clear_user_page = xsc3_mc_clear_user_page, | 111 | .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, |
111 | .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, | 112 | .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, |
112 | }; | 113 | }; |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 01bafafce181..aa9f2ff9dce0 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -113,28 +113,30 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
113 | /* | 113 | /* |
114 | * XScale optimised clear_user_page | 114 | * XScale optimised clear_user_page |
115 | */ | 115 | */ |
116 | void __attribute__((naked)) | 116 | void |
117 | xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) | 117 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
118 | { | 118 | { |
119 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
119 | asm volatile( | 120 | asm volatile( |
120 | "mov r1, %0 \n\ | 121 | "mov r1, %1 \n\ |
121 | mov r2, #0 \n\ | 122 | mov r2, #0 \n\ |
122 | mov r3, #0 \n\ | 123 | mov r3, #0 \n\ |
123 | 1: mov ip, r0 \n\ | 124 | 1: mov ip, %0 \n\ |
124 | strd r2, [r0], #8 \n\ | 125 | strd r2, [%0], #8 \n\ |
125 | strd r2, [r0], #8 \n\ | 126 | strd r2, [%0], #8 \n\ |
126 | strd r2, [r0], #8 \n\ | 127 | strd r2, [%0], #8 \n\ |
127 | strd r2, [r0], #8 \n\ | 128 | strd r2, [%0], #8 \n\ |
128 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ | 129 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ |
129 | subs r1, r1, #1 \n\ | 130 | subs r1, r1, #1 \n\ |
130 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ | 131 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ |
131 | bne 1b \n\ | 132 | bne 1b" |
132 | mov pc, lr" | ||
133 | : | 133 | : |
134 | : "I" (PAGE_SIZE / 32)); | 134 | : "r" (kaddr), "I" (PAGE_SIZE / 32) |
135 | : "r1", "r2", "r3", "ip"); | ||
136 | kunmap_atomic(kaddr, KM_USER0); | ||
135 | } | 137 | } |
136 | 138 | ||
137 | struct cpu_user_fns xscale_mc_user_fns __initdata = { | 139 | struct cpu_user_fns xscale_mc_user_fns __initdata = { |
138 | .cpu_clear_user_page = xscale_mc_clear_user_page, | 140 | .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, |
139 | .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, | 141 | .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, |
140 | }; | 142 | }; |
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index b9743e6416c4..4ad3bf291ad3 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c | |||
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(cpu_cache); | |||
33 | 33 | ||
34 | #ifdef CONFIG_MMU | 34 | #ifdef CONFIG_MMU |
35 | #ifndef MULTI_USER | 35 | #ifndef MULTI_USER |
36 | EXPORT_SYMBOL(__cpu_clear_user_page); | 36 | EXPORT_SYMBOL(__cpu_clear_user_highpage); |
37 | EXPORT_SYMBOL(__cpu_copy_user_highpage); | 37 | EXPORT_SYMBOL(__cpu_copy_user_highpage); |
38 | #else | 38 | #else |
39 | EXPORT_SYMBOL(cpu_user); | 39 | EXPORT_SYMBOL(cpu_user); |