aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v4wb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/copypage-v4wb.c')
-rw-r--r--arch/arm/mm/copypage-v4wb.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
new file mode 100644
index 000000000000..230210822961
--- /dev/null
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -0,0 +1,83 @@
1/*
2 * linux/arch/arm/mm/copypage-v4wb.c
3 *
4 * Copyright (C) 1995-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11
12#include <asm/page.h>
13
14/*
15 * ARMv4 optimised copy_user_page
16 *
17 * We flush the destination cache lines just before we write the data into the
18 * corresponding address. Since the Dcache is read-allocate, this removes the
19 * Dcache aliasing issue. The writes will be forwarded to the write buffer,
20 * and merged as appropriate.
21 *
22 * Note: We rely on all ARMv4 processors implementing the "invalidate D line"
23 * instruction. If your processor does not supply this, you have to write your
24 * own copy_user_page that does the right thing.
25 */
26void __attribute__((naked))
27v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
28{
29 asm("\
30 stmfd sp!, {r4, lr} @ 2\n\
31 mov r2, %0 @ 1\n\
32 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
331: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
34 stmia r0!, {r3, r4, ip, lr} @ 4\n\
35 ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
36 stmia r0!, {r3, r4, ip, lr} @ 4\n\
37 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
38 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
39 stmia r0!, {r3, r4, ip, lr} @ 4\n\
40 ldmia r1!, {r3, r4, ip, lr} @ 4\n\
41 subs r2, r2, #1 @ 1\n\
42 stmia r0!, {r3, r4, ip, lr} @ 4\n\
43 ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
44 bne 1b @ 1\n\
45 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
46 ldmfd sp!, {r4, pc} @ 3"
47 :
48 : "I" (PAGE_SIZE / 64));
49}
50
51/*
52 * ARMv4 optimised clear_user_page
53 *
54 * Same story as above.
55 */
56void __attribute__((naked))
57v4wb_clear_user_page(void *kaddr, unsigned long vaddr)
58{
59 asm("\
60 str lr, [sp, #-4]!\n\
61 mov r1, %0 @ 1\n\
62 mov r2, #0 @ 1\n\
63 mov r3, #0 @ 1\n\
64 mov ip, #0 @ 1\n\
65 mov lr, #0 @ 1\n\
661: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
67 stmia r0!, {r2, r3, ip, lr} @ 4\n\
68 stmia r0!, {r2, r3, ip, lr} @ 4\n\
69 mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
70 stmia r0!, {r2, r3, ip, lr} @ 4\n\
71 stmia r0!, {r2, r3, ip, lr} @ 4\n\
72 subs r1, r1, #1 @ 1\n\
73 bne 1b @ 1\n\
74 mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
75 ldr pc, [sp], #4"
76 :
77 : "I" (PAGE_SIZE / 64));
78}
79
80struct cpu_user_fns v4wb_user_fns __initdata = {
81 .cpu_clear_user_page = v4wb_clear_user_page,
82 .cpu_copy_user_page = v4wb_copy_user_page,
83};