aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v4wb.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 11:08:35 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 18:53:47 -0500
commit063b0a4207e43acbeff3d4b09f43e750e0212b48 (patch)
treeeb2a2c1faa732c763102040478830111fc13f2a5 /arch/arm/mm/copypage-v4wb.c
parentd73e60b7144a86baf0fdfcc9537a70bb4f72e11c (diff)
[ARM] copypage: provide our own copy_user_highpage()
We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v4wb.c')
-rw-r--r--arch/arm/mm/copypage-v4wb.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 230210822961..186a68a794a9 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -8,11 +8,10 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11 11#include <linux/highmem.h>
12#include <asm/page.h>
13 12
14/* 13/*
15 * ARMv4 optimised copy_user_page 14 * ARMv4 optimised copy_user_highpage
16 * 15 *
17 * We flush the destination cache lines just before we write the data into the 16 * We flush the destination cache lines just before we write the data into the
18 * corresponding address. Since the Dcache is read-allocate, this removes the 17 * corresponding address. Since the Dcache is read-allocate, this removes the
@@ -21,10 +20,10 @@
21 * 20 *
22 * Note: We rely on all ARMv4 processors implementing the "invalidate D line" 21 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
23 * instruction. If your processor does not supply this, you have to write your 22 * instruction. If your processor does not supply this, you have to write your
24 * own copy_user_page that does the right thing. 23 * own copy_user_highpage that does the right thing.
25 */ 24 */
26void __attribute__((naked)) 25static void __attribute__((naked))
27v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 26v4wb_copy_user_page(void *kto, const void *kfrom)
28{ 27{
29 asm("\ 28 asm("\
30 stmfd sp!, {r4, lr} @ 2\n\ 29 stmfd sp!, {r4, lr} @ 2\n\
@@ -48,6 +47,18 @@ v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
48 : "I" (PAGE_SIZE / 64)); 47 : "I" (PAGE_SIZE / 64));
49} 48}
50 49
50void v4wb_copy_user_highpage(struct page *to, struct page *from,
51 unsigned long vaddr)
52{
53 void *kto, *kfrom;
54
55 kto = kmap_atomic(to, KM_USER0);
56 kfrom = kmap_atomic(from, KM_USER1);
57 v4wb_copy_user_page(kto, kfrom);
58 kunmap_atomic(kfrom, KM_USER1);
59 kunmap_atomic(kto, KM_USER0);
60}
61
51/* 62/*
52 * ARMv4 optimised clear_user_page 63 * ARMv4 optimised clear_user_page
53 * 64 *
@@ -79,5 +90,5 @@ v4wb_clear_user_page(void *kaddr, unsigned long vaddr)
79 90
80struct cpu_user_fns v4wb_user_fns __initdata = { 91struct cpu_user_fns v4wb_user_fns __initdata = {
81 .cpu_clear_user_page = v4wb_clear_user_page, 92 .cpu_clear_user_page = v4wb_clear_user_page,
82 .cpu_copy_user_page = v4wb_copy_user_page, 93 .cpu_copy_user_highpage = v4wb_copy_user_highpage,
83}; 94};