aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v3.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 11:08:35 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 18:53:47 -0500
commit063b0a4207e43acbeff3d4b09f43e750e0212b48 (patch)
treeeb2a2c1faa732c763102040478830111fc13f2a5 /arch/arm/mm/copypage-v3.c
parentd73e60b7144a86baf0fdfcc9537a70bb4f72e11c (diff)
[ARM] copypage: provide our own copy_user_highpage()
We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v3.c')
-rw-r--r--arch/arm/mm/copypage-v3.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index 184911089e6c..52df8f04d3f7 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -8,16 +8,15 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11 11#include <linux/highmem.h>
12#include <asm/page.h>
13 12
14/* 13/*
15 * ARMv3 optimised copy_user_page 14 * ARMv3 optimised copy_user_highpage
16 * 15 *
17 * FIXME: do we need to handle cache stuff... 16 * FIXME: do we need to handle cache stuff...
18 */ 17 */
19void __attribute__((naked)) 18static void __attribute__((naked))
20v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 19v3_copy_user_page(void *kto, const void *kfrom)
21{ 20{
22 asm("\n\ 21 asm("\n\
23 stmfd sp!, {r4, lr} @ 2\n\ 22 stmfd sp!, {r4, lr} @ 2\n\
@@ -38,6 +37,18 @@ v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
38 : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); 37 : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
39} 38}
40 39
40void v3_copy_user_highpage(struct page *to, struct page *from,
41 unsigned long vaddr)
42{
43 void *kto, *kfrom;
44
45 kto = kmap_atomic(to, KM_USER0);
46 kfrom = kmap_atomic(from, KM_USER1);
47 v3_copy_user_page(kto, kfrom);
48 kunmap_atomic(kfrom, KM_USER1);
49 kunmap_atomic(kto, KM_USER0);
50}
51
41/* 52/*
42 * ARMv3 optimised clear_user_page 53 * ARMv3 optimised clear_user_page
43 * 54 *
@@ -65,5 +76,5 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr)
65 76
66struct cpu_user_fns v3_user_fns __initdata = { 77struct cpu_user_fns v3_user_fns __initdata = {
67 .cpu_clear_user_page = v3_clear_user_page, 78 .cpu_clear_user_page = v3_clear_user_page,
68 .cpu_copy_user_page = v3_copy_user_page, 79 .cpu_copy_user_highpage = v3_copy_user_highpage,
69}; 80};