aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v4wb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/copypage-v4wb.c')
-rw-r--r--arch/arm/mm/copypage-v4wb.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 230210822961..186a68a794a9 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -8,11 +8,10 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/init.h> 10#include <linux/init.h>
11 11#include <linux/highmem.h>
12#include <asm/page.h>
13 12
14/* 13/*
15 * ARMv4 optimised copy_user_page 14 * ARMv4 optimised copy_user_highpage
16 * 15 *
17 * We flush the destination cache lines just before we write the data into the 16 * We flush the destination cache lines just before we write the data into the
18 * corresponding address. Since the Dcache is read-allocate, this removes the 17 * corresponding address. Since the Dcache is read-allocate, this removes the
@@ -21,10 +20,10 @@
21 * 20 *
22 * Note: We rely on all ARMv4 processors implementing the "invalidate D line" 21 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
23 * instruction. If your processor does not supply this, you have to write your 22 * instruction. If your processor does not supply this, you have to write your
24 * own copy_user_page that does the right thing. 23 * own copy_user_highpage that does the right thing.
25 */ 24 */
26void __attribute__((naked)) 25static void __attribute__((naked))
27v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) 26v4wb_copy_user_page(void *kto, const void *kfrom)
28{ 27{
29 asm("\ 28 asm("\
30 stmfd sp!, {r4, lr} @ 2\n\ 29 stmfd sp!, {r4, lr} @ 2\n\
@@ -48,6 +47,18 @@ v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
48 : "I" (PAGE_SIZE / 64)); 47 : "I" (PAGE_SIZE / 64));
49} 48}
50 49
50void v4wb_copy_user_highpage(struct page *to, struct page *from,
51 unsigned long vaddr)
52{
53 void *kto, *kfrom;
54
55 kto = kmap_atomic(to, KM_USER0);
56 kfrom = kmap_atomic(from, KM_USER1);
57 v4wb_copy_user_page(kto, kfrom);
58 kunmap_atomic(kfrom, KM_USER1);
59 kunmap_atomic(kto, KM_USER0);
60}
61
51/* 62/*
52 * ARMv4 optimised clear_user_page 63 * ARMv4 optimised clear_user_page
53 * 64 *
@@ -79,5 +90,5 @@ v4wb_clear_user_page(void *kaddr, unsigned long vaddr)
79 90
80struct cpu_user_fns v4wb_user_fns __initdata = { 91struct cpu_user_fns v4wb_user_fns __initdata = {
81 .cpu_clear_user_page = v4wb_clear_user_page, 92 .cpu_clear_user_page = v4wb_clear_user_page,
82 .cpu_copy_user_page = v4wb_copy_user_page, 93 .cpu_copy_user_highpage = v4wb_copy_user_highpage,
83}; 94};