aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-xsc3.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 11:08:35 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 18:53:47 -0500
commit063b0a4207e43acbeff3d4b09f43e750e0212b48 (patch)
treeeb2a2c1faa732c763102040478830111fc13f2a5 /arch/arm/mm/copypage-xsc3.c
parentd73e60b7144a86baf0fdfcc9537a70bb4f72e11c (diff)
[ARM] copypage: provide our own copy_user_highpage()
We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-xsc3.c')
-rw-r--r--arch/arm/mm/copypage-xsc3.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 51ed502e5777..caa697ccd8db 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -11,8 +11,7 @@
11 * Author: Matt Gilbert (matthew.m.gilbert@intel.com) 11 * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
12 */ 12 */
13#include <linux/init.h> 13#include <linux/init.h>
14 14#include <linux/highmem.h>
15#include <asm/page.h>
16 15
17/* 16/*
18 * General note: 17 * General note:
@@ -21,18 +20,17 @@
21 */ 20 */
22 21
23/* 22/*
24 * XSC3 optimised copy_user_page 23 * XSC3 optimised copy_user_highpage
25 * r0 = destination 24 * r0 = destination
26 * r1 = source 25 * r1 = source
27 * r2 = virtual user address of ultimate destination page
28 * 26 *
29 * The source page may have some clean entries in the cache already, but we 27 * The source page may have some clean entries in the cache already, but we
30 * can safely ignore them - break_cow() will flush them out of the cache 28 * can safely ignore them - break_cow() will flush them out of the cache
31 * if we eventually end up using our copied page. 29 * if we eventually end up using our copied page.
32 * 30 *
33 */ 31 */
34void __attribute__((naked)) 32static void __attribute__((naked))
35xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 33xsc3_mc_copy_user_page(void *kto, const void *kfrom)
36{ 34{
37 asm("\ 35 asm("\
38 stmfd sp!, {r4, r5, lr} \n\ 36 stmfd sp!, {r4, r5, lr} \n\
@@ -72,6 +70,18 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
72 : "I" (PAGE_SIZE / 64 - 1)); 70 : "I" (PAGE_SIZE / 64 - 1));
73} 71}
74 72
73void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
74 unsigned long vaddr)
75{
76 void *kto, *kfrom;
77
78 kto = kmap_atomic(to, KM_USER0);
79 kfrom = kmap_atomic(from, KM_USER1);
80 xsc3_mc_copy_user_page(kto, kfrom);
81 kunmap_atomic(kfrom, KM_USER1);
82 kunmap_atomic(kto, KM_USER0);
83}
84
75/* 85/*
76 * XScale optimised clear_user_page 86 * XScale optimised clear_user_page
77 * r0 = destination 87 * r0 = destination
@@ -98,5 +108,5 @@ xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr)
98 108
99struct cpu_user_fns xsc3_mc_user_fns __initdata = { 109struct cpu_user_fns xsc3_mc_user_fns __initdata = {
100 .cpu_clear_user_page = xsc3_mc_clear_user_page, 110 .cpu_clear_user_page = xsc3_mc_clear_user_page,
101 .cpu_copy_user_page = xsc3_mc_copy_user_page, 111 .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
102}; 112};