aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v4wt.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 11:08:35 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 18:53:47 -0500
commit063b0a4207e43acbeff3d4b09f43e750e0212b48 (patch)
treeeb2a2c1faa732c763102040478830111fc13f2a5 /arch/arm/mm/copypage-v4wt.c
parentd73e60b7144a86baf0fdfcc9537a70bb4f72e11c (diff)
[ARM] copypage: provide our own copy_user_highpage()
We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v4wt.c')
-rw-r--r--arch/arm/mm/copypage-v4wt.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index d8ef39503ff0..86c2cfdbde03 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -11,18 +11,17 @@
11 * the only supported cache operation. 11 * the only supported cache operation.
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14 14#include <linux/highmem.h>
15#include <asm/page.h>
16 15
17/* 16/*
18 * ARMv4 optimised copy_user_page 17 * ARMv4 optimised copy_user_highpage
19 * 18 *
20 * Since we have writethrough caches, we don't have to worry about 19 * Since we have writethrough caches, we don't have to worry about
21 * dirty data in the cache. However, we do have to ensure that 20 * dirty data in the cache. However, we do have to ensure that
22 * subsequent reads are up to date. 21 * subsequent reads are up to date.
23 */ 22 */
24void __attribute__((naked)) 23static void __attribute__((naked))
25v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 24v4wt_copy_user_page(void *kto, const void *kfrom)
26{ 25{
27 asm("\ 26 asm("\
28 stmfd sp!, {r4, lr} @ 2\n\ 27 stmfd sp!, {r4, lr} @ 2\n\
@@ -44,6 +43,18 @@ v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
44 : "I" (PAGE_SIZE / 64)); 43 : "I" (PAGE_SIZE / 64));
45} 44}
46 45
46void v4wt_copy_user_highpage(struct page *to, struct page *from,
47 unsigned long vaddr)
48{
49 void *kto, *kfrom;
50
51 kto = kmap_atomic(to, KM_USER0);
52 kfrom = kmap_atomic(from, KM_USER1);
53 v4wt_copy_user_page(kto, kfrom);
54 kunmap_atomic(kfrom, KM_USER1);
55 kunmap_atomic(kto, KM_USER0);
56}
57
47/* 58/*
48 * ARMv4 optimised clear_user_page 59 * ARMv4 optimised clear_user_page
49 * 60 *
@@ -73,5 +84,5 @@ v4wt_clear_user_page(void *kaddr, unsigned long vaddr)
73 84
74struct cpu_user_fns v4wt_user_fns __initdata = { 85struct cpu_user_fns v4wt_user_fns __initdata = {
75 .cpu_clear_user_page = v4wt_clear_user_page, 86 .cpu_clear_user_page = v4wt_clear_user_page,
76 .cpu_copy_user_page = v4wt_copy_user_page, 87 .cpu_copy_user_highpage = v4wt_copy_user_highpage,
77}; 88};