diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/copypage-feroceon.S | 95 | ||||
-rw-r--r-- | arch/arm/mm/copypage-feroceon.c | 111 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v3.S | 67 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v3.c | 81 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 53 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.S | 79 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.c | 94 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.S | 73 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 88 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 84 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.S | 97 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 113 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 47 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 57 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 114 | ||||
-rw-r--r-- | arch/arm/mm/nommu.c | 18 | ||||
-rw-r--r-- | arch/arm/mm/proc-syms.c | 4 |
19 files changed, 692 insertions, 591 deletions
diff --git a/arch/arm/mm/copypage-feroceon.S b/arch/arm/mm/copypage-feroceon.S deleted file mode 100644 index 7eb0d320d240..000000000000 --- a/arch/arm/mm/copypage-feroceon.S +++ /dev/null | |||
@@ -1,95 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage-feroceon.S | ||
3 | * | ||
4 | * Copyright (C) 2008 Marvell Semiconductors | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This handles copy_user_page and clear_user_page on Feroceon | ||
11 | * more optimally than the generic implementations. | ||
12 | */ | ||
13 | #include <linux/linkage.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | |||
17 | .text | ||
18 | .align 5 | ||
19 | |||
20 | ENTRY(feroceon_copy_user_page) | ||
21 | stmfd sp!, {r4-r9, lr} | ||
22 | mov ip, #PAGE_SZ | ||
23 | 1: mov lr, r1 | ||
24 | ldmia r1!, {r2 - r9} | ||
25 | pld [lr, #32] | ||
26 | pld [lr, #64] | ||
27 | pld [lr, #96] | ||
28 | pld [lr, #128] | ||
29 | pld [lr, #160] | ||
30 | pld [lr, #192] | ||
31 | pld [lr, #224] | ||
32 | stmia r0, {r2 - r9} | ||
33 | ldmia r1!, {r2 - r9} | ||
34 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
35 | add r0, r0, #32 | ||
36 | stmia r0, {r2 - r9} | ||
37 | ldmia r1!, {r2 - r9} | ||
38 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
39 | add r0, r0, #32 | ||
40 | stmia r0, {r2 - r9} | ||
41 | ldmia r1!, {r2 - r9} | ||
42 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
43 | add r0, r0, #32 | ||
44 | stmia r0, {r2 - r9} | ||
45 | ldmia r1!, {r2 - r9} | ||
46 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
47 | add r0, r0, #32 | ||
48 | stmia r0, {r2 - r9} | ||
49 | ldmia r1!, {r2 - r9} | ||
50 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
51 | add r0, r0, #32 | ||
52 | stmia r0, {r2 - r9} | ||
53 | ldmia r1!, {r2 - r9} | ||
54 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
55 | add r0, r0, #32 | ||
56 | stmia r0, {r2 - r9} | ||
57 | ldmia r1!, {r2 - r9} | ||
58 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
59 | add r0, r0, #32 | ||
60 | stmia r0, {r2 - r9} | ||
61 | subs ip, ip, #(32 * 8) | ||
62 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
63 | add r0, r0, #32 | ||
64 | bne 1b | ||
65 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | ||
66 | ldmfd sp!, {r4-r9, pc} | ||
67 | |||
68 | .align 5 | ||
69 | |||
70 | ENTRY(feroceon_clear_user_page) | ||
71 | stmfd sp!, {r4-r7, lr} | ||
72 | mov r1, #PAGE_SZ/32 | ||
73 | mov r2, #0 | ||
74 | mov r3, #0 | ||
75 | mov r4, #0 | ||
76 | mov r5, #0 | ||
77 | mov r6, #0 | ||
78 | mov r7, #0 | ||
79 | mov ip, #0 | ||
80 | mov lr, #0 | ||
81 | 1: stmia r0, {r2-r7, ip, lr} | ||
82 | subs r1, r1, #1 | ||
83 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
84 | add r0, r0, #32 | ||
85 | bne 1b | ||
86 | mcr p15, 0, r1, c7, c10, 4 @ drain WB | ||
87 | ldmfd sp!, {r4-r7, pc} | ||
88 | |||
89 | __INITDATA | ||
90 | |||
91 | .type feroceon_user_fns, #object | ||
92 | ENTRY(feroceon_user_fns) | ||
93 | .long feroceon_clear_user_page | ||
94 | .long feroceon_copy_user_page | ||
95 | .size feroceon_user_fns, . - feroceon_user_fns | ||
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c new file mode 100644 index 000000000000..c3ba6a94da0c --- /dev/null +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-feroceon.S | ||
3 | * | ||
4 | * Copyright (C) 2008 Marvell Semiconductors | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This handles copy_user_highpage and clear_user_page on Feroceon | ||
11 | * more optimally than the generic implementations. | ||
12 | */ | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/highmem.h> | ||
15 | |||
16 | static void __attribute__((naked)) | ||
17 | feroceon_copy_user_page(void *kto, const void *kfrom) | ||
18 | { | ||
19 | asm("\ | ||
20 | stmfd sp!, {r4-r9, lr} \n\ | ||
21 | mov ip, %0 \n\ | ||
22 | 1: mov lr, r1 \n\ | ||
23 | ldmia r1!, {r2 - r9} \n\ | ||
24 | pld [lr, #32] \n\ | ||
25 | pld [lr, #64] \n\ | ||
26 | pld [lr, #96] \n\ | ||
27 | pld [lr, #128] \n\ | ||
28 | pld [lr, #160] \n\ | ||
29 | pld [lr, #192] \n\ | ||
30 | pld [lr, #224] \n\ | ||
31 | stmia r0, {r2 - r9} \n\ | ||
32 | ldmia r1!, {r2 - r9} \n\ | ||
33 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
34 | add r0, r0, #32 \n\ | ||
35 | stmia r0, {r2 - r9} \n\ | ||
36 | ldmia r1!, {r2 - r9} \n\ | ||
37 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
38 | add r0, r0, #32 \n\ | ||
39 | stmia r0, {r2 - r9} \n\ | ||
40 | ldmia r1!, {r2 - r9} \n\ | ||
41 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
42 | add r0, r0, #32 \n\ | ||
43 | stmia r0, {r2 - r9} \n\ | ||
44 | ldmia r1!, {r2 - r9} \n\ | ||
45 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
46 | add r0, r0, #32 \n\ | ||
47 | stmia r0, {r2 - r9} \n\ | ||
48 | ldmia r1!, {r2 - r9} \n\ | ||
49 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
50 | add r0, r0, #32 \n\ | ||
51 | stmia r0, {r2 - r9} \n\ | ||
52 | ldmia r1!, {r2 - r9} \n\ | ||
53 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
54 | add r0, r0, #32 \n\ | ||
55 | stmia r0, {r2 - r9} \n\ | ||
56 | ldmia r1!, {r2 - r9} \n\ | ||
57 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
58 | add r0, r0, #32 \n\ | ||
59 | stmia r0, {r2 - r9} \n\ | ||
60 | subs ip, ip, #(32 * 8) \n\ | ||
61 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
62 | add r0, r0, #32 \n\ | ||
63 | bne 1b \n\ | ||
64 | mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ | ||
65 | ldmfd sp!, {r4-r9, pc}" | ||
66 | : | ||
67 | : "I" (PAGE_SIZE)); | ||
68 | } | ||
69 | |||
70 | void feroceon_copy_user_highpage(struct page *to, struct page *from, | ||
71 | unsigned long vaddr) | ||
72 | { | ||
73 | void *kto, *kfrom; | ||
74 | |||
75 | kto = kmap_atomic(to, KM_USER0); | ||
76 | kfrom = kmap_atomic(from, KM_USER1); | ||
77 | feroceon_copy_user_page(kto, kfrom); | ||
78 | kunmap_atomic(kfrom, KM_USER1); | ||
79 | kunmap_atomic(kto, KM_USER0); | ||
80 | } | ||
81 | |||
82 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
83 | { | ||
84 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
85 | asm volatile ("\ | ||
86 | mov r1, %2 \n\ | ||
87 | mov r2, #0 \n\ | ||
88 | mov r3, #0 \n\ | ||
89 | mov r4, #0 \n\ | ||
90 | mov r5, #0 \n\ | ||
91 | mov r6, #0 \n\ | ||
92 | mov r7, #0 \n\ | ||
93 | mov ip, #0 \n\ | ||
94 | mov lr, #0 \n\ | ||
95 | 1: stmia %0, {r2-r7, ip, lr} \n\ | ||
96 | subs r1, r1, #1 \n\ | ||
97 | mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
98 | add %0, %0, #32 \n\ | ||
99 | bne 1b \n\ | ||
100 | mcr p15, 0, r1, c7, c10, 4 @ drain WB" | ||
101 | : "=r" (ptr) | ||
102 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | ||
103 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); | ||
104 | kunmap_atomic(kaddr, KM_USER0); | ||
105 | } | ||
106 | |||
107 | struct cpu_user_fns feroceon_user_fns __initdata = { | ||
108 | .cpu_clear_user_highpage = feroceon_clear_user_highpage, | ||
109 | .cpu_copy_user_highpage = feroceon_copy_user_highpage, | ||
110 | }; | ||
111 | |||
diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S deleted file mode 100644 index 2ee394b11bcb..000000000000 --- a/arch/arm/mm/copypage-v3.S +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage.S | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ASM optimised string functions | ||
11 | */ | ||
12 | #include <linux/linkage.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/assembler.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | |||
17 | .text | ||
18 | .align 5 | ||
19 | /* | ||
20 | * ARMv3 optimised copy_user_page | ||
21 | * | ||
22 | * FIXME: do we need to handle cache stuff... | ||
23 | */ | ||
24 | ENTRY(v3_copy_user_page) | ||
25 | stmfd sp!, {r4, lr} @ 2 | ||
26 | mov r2, #PAGE_SZ/64 @ 1 | ||
27 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
28 | 1: stmia r0!, {r3, r4, ip, lr} @ 4 | ||
29 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
30 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
31 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
32 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
33 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
34 | subs r2, r2, #1 @ 1 | ||
35 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
36 | ldmneia r1!, {r3, r4, ip, lr} @ 4 | ||
37 | bne 1b @ 1 | ||
38 | ldmfd sp!, {r4, pc} @ 3 | ||
39 | |||
40 | .align 5 | ||
41 | /* | ||
42 | * ARMv3 optimised clear_user_page | ||
43 | * | ||
44 | * FIXME: do we need to handle cache stuff... | ||
45 | */ | ||
46 | ENTRY(v3_clear_user_page) | ||
47 | str lr, [sp, #-4]! | ||
48 | mov r1, #PAGE_SZ/64 @ 1 | ||
49 | mov r2, #0 @ 1 | ||
50 | mov r3, #0 @ 1 | ||
51 | mov ip, #0 @ 1 | ||
52 | mov lr, #0 @ 1 | ||
53 | 1: stmia r0!, {r2, r3, ip, lr} @ 4 | ||
54 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
55 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
56 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
57 | subs r1, r1, #1 @ 1 | ||
58 | bne 1b @ 1 | ||
59 | ldr pc, [sp], #4 | ||
60 | |||
61 | __INITDATA | ||
62 | |||
63 | .type v3_user_fns, #object | ||
64 | ENTRY(v3_user_fns) | ||
65 | .long v3_clear_user_page | ||
66 | .long v3_copy_user_page | ||
67 | .size v3_user_fns, . - v3_user_fns | ||
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c new file mode 100644 index 000000000000..70ed96c8af8e --- /dev/null +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-v3.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/highmem.h> | ||
12 | |||
13 | /* | ||
14 | * ARMv3 optimised copy_user_highpage | ||
15 | * | ||
16 | * FIXME: do we need to handle cache stuff... | ||
17 | */ | ||
18 | static void __attribute__((naked)) | ||
19 | v3_copy_user_page(void *kto, const void *kfrom) | ||
20 | { | ||
21 | asm("\n\ | ||
22 | stmfd sp!, {r4, lr} @ 2\n\ | ||
23 | mov r2, %2 @ 1\n\ | ||
24 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
25 | 1: stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
26 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
27 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
28 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
29 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
30 | ldmia %0!, {r3, r4, ip, lr} @ 4\n\ | ||
31 | subs r2, r2, #1 @ 1\n\ | ||
32 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
33 | ldmneia %0!, {r3, r4, ip, lr} @ 4\n\ | ||
34 | bne 1b @ 1\n\ | ||
35 | ldmfd sp!, {r4, pc} @ 3" | ||
36 | : | ||
37 | : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); | ||
38 | } | ||
39 | |||
40 | void v3_copy_user_highpage(struct page *to, struct page *from, | ||
41 | unsigned long vaddr) | ||
42 | { | ||
43 | void *kto, *kfrom; | ||
44 | |||
45 | kto = kmap_atomic(to, KM_USER0); | ||
46 | kfrom = kmap_atomic(from, KM_USER1); | ||
47 | v3_copy_user_page(kto, kfrom); | ||
48 | kunmap_atomic(kfrom, KM_USER1); | ||
49 | kunmap_atomic(kto, KM_USER0); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * ARMv3 optimised clear_user_page | ||
54 | * | ||
55 | * FIXME: do we need to handle cache stuff... | ||
56 | */ | ||
57 | void v3_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
58 | { | ||
59 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
60 | asm volatile("\n\ | ||
61 | mov r1, %2 @ 1\n\ | ||
62 | mov r2, #0 @ 1\n\ | ||
63 | mov r3, #0 @ 1\n\ | ||
64 | mov ip, #0 @ 1\n\ | ||
65 | mov lr, #0 @ 1\n\ | ||
66 | 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
67 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
68 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
69 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
70 | subs r1, r1, #1 @ 1\n\ | ||
71 | bne 1b @ 1" | ||
72 | : "=r" (ptr) | ||
73 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | ||
74 | : "r1", "r2", "r3", "ip", "lr"); | ||
75 | kunmap_atomic(kaddr, KM_USER0); | ||
76 | } | ||
77 | |||
78 | struct cpu_user_fns v3_user_fns __initdata = { | ||
79 | .cpu_clear_user_highpage = v3_clear_user_highpage, | ||
80 | .cpu_copy_user_highpage = v3_copy_user_highpage, | ||
81 | }; | ||
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 8d33e2549344..bdb5fd983b15 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -15,8 +15,8 @@ | |||
15 | */ | 15 | */ |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/highmem.h> | ||
18 | 19 | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -33,7 +33,7 @@ | |||
33 | static DEFINE_SPINLOCK(minicache_lock); | 33 | static DEFINE_SPINLOCK(minicache_lock); |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * ARMv4 mini-dcache optimised copy_user_page | 36 | * ARMv4 mini-dcache optimised copy_user_highpage |
37 | * | 37 | * |
38 | * We flush the destination cache lines just before we write the data into the | 38 | * We flush the destination cache lines just before we write the data into the |
39 | * corresponding address. Since the Dcache is read-allocate, this removes the | 39 | * corresponding address. Since the Dcache is read-allocate, this removes the |
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); | |||
42 | * | 42 | * |
43 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" | 43 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" |
44 | * instruction. If your processor does not supply this, you have to write your | 44 | * instruction. If your processor does not supply this, you have to write your |
45 | * own copy_user_page that does the right thing. | 45 | * own copy_user_highpage that does the right thing. |
46 | */ | 46 | */ |
47 | static void __attribute__((naked)) | 47 | static void __attribute__((naked)) |
48 | mc_copy_user_page(void *from, void *to) | 48 | mc_copy_user_page(void *from, void *to) |
@@ -68,50 +68,53 @@ mc_copy_user_page(void *from, void *to) | |||
68 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); | 68 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); |
69 | } | 69 | } |
70 | 70 | ||
71 | void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) | 71 | void v4_mc_copy_user_highpage(struct page *from, struct page *to, |
72 | unsigned long vaddr) | ||
72 | { | 73 | { |
73 | struct page *page = virt_to_page(kfrom); | 74 | void *kto = kmap_atomic(to, KM_USER1); |
74 | 75 | ||
75 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 76 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) |
76 | __flush_dcache_page(page_mapping(page), page); | 77 | __flush_dcache_page(page_mapping(from), from); |
77 | 78 | ||
78 | spin_lock(&minicache_lock); | 79 | spin_lock(&minicache_lock); |
79 | 80 | ||
80 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); | 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
81 | flush_tlb_kernel_page(0xffff8000); | 82 | flush_tlb_kernel_page(0xffff8000); |
82 | 83 | ||
83 | mc_copy_user_page((void *)0xffff8000, kto); | 84 | mc_copy_user_page((void *)0xffff8000, kto); |
84 | 85 | ||
85 | spin_unlock(&minicache_lock); | 86 | spin_unlock(&minicache_lock); |
87 | |||
88 | kunmap_atomic(kto, KM_USER1); | ||
86 | } | 89 | } |
87 | 90 | ||
88 | /* | 91 | /* |
89 | * ARMv4 optimised clear_user_page | 92 | * ARMv4 optimised clear_user_page |
90 | */ | 93 | */ |
91 | void __attribute__((naked)) | 94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
92 | v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) | ||
93 | { | 95 | { |
94 | asm volatile( | 96 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
95 | "str lr, [sp, #-4]!\n\ | 97 | asm volatile("\ |
96 | mov r1, %0 @ 1\n\ | 98 | mov r1, %2 @ 1\n\ |
97 | mov r2, #0 @ 1\n\ | 99 | mov r2, #0 @ 1\n\ |
98 | mov r3, #0 @ 1\n\ | 100 | mov r3, #0 @ 1\n\ |
99 | mov ip, #0 @ 1\n\ | 101 | mov ip, #0 @ 1\n\ |
100 | mov lr, #0 @ 1\n\ | 102 | mov lr, #0 @ 1\n\ |
101 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 103 | 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
102 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 104 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
103 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 105 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
104 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 106 | mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
105 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 107 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
106 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 108 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
107 | subs r1, r1, #1 @ 1\n\ | 109 | subs r1, r1, #1 @ 1\n\ |
108 | bne 1b @ 1\n\ | 110 | bne 1b @ 1" |
109 | ldr pc, [sp], #4" | 111 | : "=r" (ptr) |
110 | : | 112 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
111 | : "I" (PAGE_SIZE / 64)); | 113 | : "r1", "r2", "r3", "ip", "lr"); |
114 | kunmap_atomic(kaddr, KM_USER0); | ||
112 | } | 115 | } |
113 | 116 | ||
114 | struct cpu_user_fns v4_mc_user_fns __initdata = { | 117 | struct cpu_user_fns v4_mc_user_fns __initdata = { |
115 | .cpu_clear_user_page = v4_mc_clear_user_page, | 118 | .cpu_clear_user_highpage = v4_mc_clear_user_highpage, |
116 | .cpu_copy_user_page = v4_mc_copy_user_page, | 119 | .cpu_copy_user_highpage = v4_mc_copy_user_highpage, |
117 | }; | 120 | }; |
diff --git a/arch/arm/mm/copypage-v4wb.S b/arch/arm/mm/copypage-v4wb.S deleted file mode 100644 index 83117354b1cd..000000000000 --- a/arch/arm/mm/copypage-v4wb.S +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage.S | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ASM optimised string functions | ||
11 | */ | ||
12 | #include <linux/linkage.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/asm-offsets.h> | ||
15 | |||
16 | .text | ||
17 | .align 5 | ||
18 | /* | ||
19 | * ARMv4 optimised copy_user_page | ||
20 | * | ||
21 | * We flush the destination cache lines just before we write the data into the | ||
22 | * corresponding address. Since the Dcache is read-allocate, this removes the | ||
23 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, | ||
24 | * and merged as appropriate. | ||
25 | * | ||
26 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" | ||
27 | * instruction. If your processor does not supply this, you have to write your | ||
28 | * own copy_user_page that does the right thing. | ||
29 | */ | ||
30 | ENTRY(v4wb_copy_user_page) | ||
31 | stmfd sp!, {r4, lr} @ 2 | ||
32 | mov r2, #PAGE_SZ/64 @ 1 | ||
33 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
34 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line | ||
35 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
36 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
37 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
38 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
39 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line | ||
40 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
41 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
42 | subs r2, r2, #1 @ 1 | ||
43 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
44 | ldmneia r1!, {r3, r4, ip, lr} @ 4 | ||
45 | bne 1b @ 1 | ||
46 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB | ||
47 | ldmfd sp!, {r4, pc} @ 3 | ||
48 | |||
49 | .align 5 | ||
50 | /* | ||
51 | * ARMv4 optimised clear_user_page | ||
52 | * | ||
53 | * Same story as above. | ||
54 | */ | ||
55 | ENTRY(v4wb_clear_user_page) | ||
56 | str lr, [sp, #-4]! | ||
57 | mov r1, #PAGE_SZ/64 @ 1 | ||
58 | mov r2, #0 @ 1 | ||
59 | mov r3, #0 @ 1 | ||
60 | mov ip, #0 @ 1 | ||
61 | mov lr, #0 @ 1 | ||
62 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line | ||
63 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
64 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
65 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line | ||
66 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
67 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
68 | subs r1, r1, #1 @ 1 | ||
69 | bne 1b @ 1 | ||
70 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB | ||
71 | ldr pc, [sp], #4 | ||
72 | |||
73 | __INITDATA | ||
74 | |||
75 | .type v4wb_user_fns, #object | ||
76 | ENTRY(v4wb_user_fns) | ||
77 | .long v4wb_clear_user_page | ||
78 | .long v4wb_copy_user_page | ||
79 | .size v4wb_user_fns, . - v4wb_user_fns | ||
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c new file mode 100644 index 000000000000..3ec93dab7656 --- /dev/null +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-v4wb.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/highmem.h> | ||
12 | |||
13 | /* | ||
14 | * ARMv4 optimised copy_user_highpage | ||
15 | * | ||
16 | * We flush the destination cache lines just before we write the data into the | ||
17 | * corresponding address. Since the Dcache is read-allocate, this removes the | ||
18 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, | ||
19 | * and merged as appropriate. | ||
20 | * | ||
21 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" | ||
22 | * instruction. If your processor does not supply this, you have to write your | ||
23 | * own copy_user_highpage that does the right thing. | ||
24 | */ | ||
25 | static void __attribute__((naked)) | ||
26 | v4wb_copy_user_page(void *kto, const void *kfrom) | ||
27 | { | ||
28 | asm("\ | ||
29 | stmfd sp!, {r4, lr} @ 2\n\ | ||
30 | mov r2, %0 @ 1\n\ | ||
31 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
32 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | ||
33 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
34 | ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ | ||
35 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
36 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
37 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | ||
38 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
39 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
40 | subs r2, r2, #1 @ 1\n\ | ||
41 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
42 | ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
43 | bne 1b @ 1\n\ | ||
44 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ | ||
45 | ldmfd sp!, {r4, pc} @ 3" | ||
46 | : | ||
47 | : "I" (PAGE_SIZE / 64)); | ||
48 | } | ||
49 | |||
50 | void v4wb_copy_user_highpage(struct page *to, struct page *from, | ||
51 | unsigned long vaddr) | ||
52 | { | ||
53 | void *kto, *kfrom; | ||
54 | |||
55 | kto = kmap_atomic(to, KM_USER0); | ||
56 | kfrom = kmap_atomic(from, KM_USER1); | ||
57 | v4wb_copy_user_page(kto, kfrom); | ||
58 | kunmap_atomic(kfrom, KM_USER1); | ||
59 | kunmap_atomic(kto, KM_USER0); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * ARMv4 optimised clear_user_page | ||
64 | * | ||
65 | * Same story as above. | ||
66 | */ | ||
67 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
68 | { | ||
69 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
70 | asm volatile("\ | ||
71 | mov r1, %2 @ 1\n\ | ||
72 | mov r2, #0 @ 1\n\ | ||
73 | mov r3, #0 @ 1\n\ | ||
74 | mov ip, #0 @ 1\n\ | ||
75 | mov lr, #0 @ 1\n\ | ||
76 | 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ | ||
77 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
78 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
79 | mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ | ||
80 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
81 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
82 | subs r1, r1, #1 @ 1\n\ | ||
83 | bne 1b @ 1\n\ | ||
84 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" | ||
85 | : "=r" (ptr) | ||
86 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | ||
87 | : "r1", "r2", "r3", "ip", "lr"); | ||
88 | kunmap_atomic(kaddr, KM_USER0); | ||
89 | } | ||
90 | |||
91 | struct cpu_user_fns v4wb_user_fns __initdata = { | ||
92 | .cpu_clear_user_highpage = v4wb_clear_user_highpage, | ||
93 | .cpu_copy_user_highpage = v4wb_copy_user_highpage, | ||
94 | }; | ||
diff --git a/arch/arm/mm/copypage-v4wt.S b/arch/arm/mm/copypage-v4wt.S deleted file mode 100644 index e1f2af28d549..000000000000 --- a/arch/arm/mm/copypage-v4wt.S +++ /dev/null | |||
@@ -1,73 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage-v4.S | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ASM optimised string functions | ||
11 | * | ||
12 | * This is for CPUs with a writethrough cache and 'flush ID cache' is | ||
13 | * the only supported cache operation. | ||
14 | */ | ||
15 | #include <linux/linkage.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | |||
19 | .text | ||
20 | .align 5 | ||
21 | /* | ||
22 | * ARMv4 optimised copy_user_page | ||
23 | * | ||
24 | * Since we have writethrough caches, we don't have to worry about | ||
25 | * dirty data in the cache. However, we do have to ensure that | ||
26 | * subsequent reads are up to date. | ||
27 | */ | ||
28 | ENTRY(v4wt_copy_user_page) | ||
29 | stmfd sp!, {r4, lr} @ 2 | ||
30 | mov r2, #PAGE_SZ/64 @ 1 | ||
31 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
32 | 1: stmia r0!, {r3, r4, ip, lr} @ 4 | ||
33 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
34 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
35 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
36 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
37 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
38 | subs r2, r2, #1 @ 1 | ||
39 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
40 | ldmneia r1!, {r3, r4, ip, lr} @ 4 | ||
41 | bne 1b @ 1 | ||
42 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache | ||
43 | ldmfd sp!, {r4, pc} @ 3 | ||
44 | |||
45 | .align 5 | ||
46 | /* | ||
47 | * ARMv4 optimised clear_user_page | ||
48 | * | ||
49 | * Same story as above. | ||
50 | */ | ||
51 | ENTRY(v4wt_clear_user_page) | ||
52 | str lr, [sp, #-4]! | ||
53 | mov r1, #PAGE_SZ/64 @ 1 | ||
54 | mov r2, #0 @ 1 | ||
55 | mov r3, #0 @ 1 | ||
56 | mov ip, #0 @ 1 | ||
57 | mov lr, #0 @ 1 | ||
58 | 1: stmia r0!, {r2, r3, ip, lr} @ 4 | ||
59 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
60 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
61 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
62 | subs r1, r1, #1 @ 1 | ||
63 | bne 1b @ 1 | ||
64 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache | ||
65 | ldr pc, [sp], #4 | ||
66 | |||
67 | __INITDATA | ||
68 | |||
69 | .type v4wt_user_fns, #object | ||
70 | ENTRY(v4wt_user_fns) | ||
71 | .long v4wt_clear_user_page | ||
72 | .long v4wt_copy_user_page | ||
73 | .size v4wt_user_fns, . - v4wt_user_fns | ||
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c new file mode 100644 index 000000000000..0f1188efae45 --- /dev/null +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-v4wt.S | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This is for CPUs with a writethrough cache and 'flush ID cache' is | ||
11 | * the only supported cache operation. | ||
12 | */ | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/highmem.h> | ||
15 | |||
16 | /* | ||
17 | * ARMv4 optimised copy_user_highpage | ||
18 | * | ||
19 | * Since we have writethrough caches, we don't have to worry about | ||
20 | * dirty data in the cache. However, we do have to ensure that | ||
21 | * subsequent reads are up to date. | ||
22 | */ | ||
23 | static void __attribute__((naked)) | ||
24 | v4wt_copy_user_page(void *kto, const void *kfrom) | ||
25 | { | ||
26 | asm("\ | ||
27 | stmfd sp!, {r4, lr} @ 2\n\ | ||
28 | mov r2, %0 @ 1\n\ | ||
29 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
30 | 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
31 | ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ | ||
32 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
33 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
34 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
35 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
36 | subs r2, r2, #1 @ 1\n\ | ||
37 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
38 | ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
39 | bne 1b @ 1\n\ | ||
40 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ | ||
41 | ldmfd sp!, {r4, pc} @ 3" | ||
42 | : | ||
43 | : "I" (PAGE_SIZE / 64)); | ||
44 | } | ||
45 | |||
46 | void v4wt_copy_user_highpage(struct page *to, struct page *from, | ||
47 | unsigned long vaddr) | ||
48 | { | ||
49 | void *kto, *kfrom; | ||
50 | |||
51 | kto = kmap_atomic(to, KM_USER0); | ||
52 | kfrom = kmap_atomic(from, KM_USER1); | ||
53 | v4wt_copy_user_page(kto, kfrom); | ||
54 | kunmap_atomic(kfrom, KM_USER1); | ||
55 | kunmap_atomic(kto, KM_USER0); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * ARMv4 optimised clear_user_page | ||
60 | * | ||
61 | * Same story as above. | ||
62 | */ | ||
63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
64 | { | ||
65 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
66 | asm volatile("\ | ||
67 | mov r1, %2 @ 1\n\ | ||
68 | mov r2, #0 @ 1\n\ | ||
69 | mov r3, #0 @ 1\n\ | ||
70 | mov ip, #0 @ 1\n\ | ||
71 | mov lr, #0 @ 1\n\ | ||
72 | 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
73 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
74 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
75 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
76 | subs r1, r1, #1 @ 1\n\ | ||
77 | bne 1b @ 1\n\ | ||
78 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" | ||
79 | : "=r" (ptr) | ||
80 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | ||
81 | : "r1", "r2", "r3", "ip", "lr"); | ||
82 | kunmap_atomic(kaddr, KM_USER0); | ||
83 | } | ||
84 | |||
85 | struct cpu_user_fns v4wt_user_fns __initdata = { | ||
86 | .cpu_clear_user_highpage = v4wt_clear_user_highpage, | ||
87 | .cpu_copy_user_highpage = v4wt_copy_user_highpage, | ||
88 | }; | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 0e21c0767580..4127a7bddfe5 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -10,8 +10,8 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/highmem.h> | ||
13 | 14 | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
16 | #include <asm/shmparam.h> | 16 | #include <asm/shmparam.h> |
17 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
@@ -33,41 +33,56 @@ static DEFINE_SPINLOCK(v6_lock); | |||
33 | * Copy the user page. No aliasing to deal with so we can just | 33 | * Copy the user page. No aliasing to deal with so we can just |
34 | * attack the kernel's existing mapping of these pages. | 34 | * attack the kernel's existing mapping of these pages. |
35 | */ | 35 | */ |
36 | static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) | 36 | static void v6_copy_user_highpage_nonaliasing(struct page *to, |
37 | struct page *from, unsigned long vaddr) | ||
37 | { | 38 | { |
39 | void *kto, *kfrom; | ||
40 | |||
41 | kfrom = kmap_atomic(from, KM_USER0); | ||
42 | kto = kmap_atomic(to, KM_USER1); | ||
38 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | kunmap_atomic(kto, KM_USER1); | ||
45 | kunmap_atomic(kfrom, KM_USER0); | ||
39 | } | 46 | } |
40 | 47 | ||
41 | /* | 48 | /* |
42 | * Clear the user page. No aliasing to deal with so we can just | 49 | * Clear the user page. No aliasing to deal with so we can just |
43 | * attack the kernel's existing mapping of this page. | 50 | * attack the kernel's existing mapping of this page. |
44 | */ | 51 | */ |
45 | static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) | 52 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) |
46 | { | 53 | { |
54 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
47 | clear_page(kaddr); | 55 | clear_page(kaddr); |
56 | kunmap_atomic(kaddr, KM_USER0); | ||
48 | } | 57 | } |
49 | 58 | ||
50 | /* | 59 | /* |
51 | * Copy the page, taking account of the cache colour. | 60 | * Discard data in the kernel mapping for the new page. |
61 | * FIXME: needs this MCRR to be supported. | ||
52 | */ | 62 | */ |
53 | static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) | 63 | static void discard_old_kernel_data(void *kto) |
54 | { | 64 | { |
55 | unsigned int offset = CACHE_COLOUR(vaddr); | ||
56 | unsigned long from, to; | ||
57 | struct page *page = virt_to_page(kfrom); | ||
58 | |||
59 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
60 | __flush_dcache_page(page_mapping(page), page); | ||
61 | |||
62 | /* | ||
63 | * Discard data in the kernel mapping for the new page. | ||
64 | * FIXME: needs this MCRR to be supported. | ||
65 | */ | ||
66 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" | 65 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" |
67 | : | 66 | : |
68 | : "r" (kto), | 67 | : "r" (kto), |
69 | "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) | 68 | "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) |
70 | : "cc"); | 69 | : "cc"); |
70 | } | ||
71 | |||
72 | /* | ||
73 | * Copy the page, taking account of the cache colour. | ||
74 | */ | ||
75 | static void v6_copy_user_highpage_aliasing(struct page *to, | ||
76 | struct page *from, unsigned long vaddr) | ||
77 | { | ||
78 | unsigned int offset = CACHE_COLOUR(vaddr); | ||
79 | unsigned long kfrom, kto; | ||
80 | |||
81 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) | ||
82 | __flush_dcache_page(page_mapping(from), from); | ||
83 | |||
84 | /* FIXME: not highmem safe */ | ||
85 | discard_old_kernel_data(page_address(to)); | ||
71 | 86 | ||
72 | /* | 87 | /* |
73 | * Now copy the page using the same cache colour as the | 88 | * Now copy the page using the same cache colour as the |
@@ -75,16 +90,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo | |||
75 | */ | 90 | */ |
76 | spin_lock(&v6_lock); | 91 | spin_lock(&v6_lock); |
77 | 92 | ||
78 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); | 93 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); |
79 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); | 94 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); |
80 | 95 | ||
81 | from = from_address + (offset << PAGE_SHIFT); | 96 | kfrom = from_address + (offset << PAGE_SHIFT); |
82 | to = to_address + (offset << PAGE_SHIFT); | 97 | kto = to_address + (offset << PAGE_SHIFT); |
83 | 98 | ||
84 | flush_tlb_kernel_page(from); | 99 | flush_tlb_kernel_page(kfrom); |
85 | flush_tlb_kernel_page(to); | 100 | flush_tlb_kernel_page(kto); |
86 | 101 | ||
87 | copy_page((void *)to, (void *)from); | 102 | copy_page((void *)kto, (void *)kfrom); |
88 | 103 | ||
89 | spin_unlock(&v6_lock); | 104 | spin_unlock(&v6_lock); |
90 | } | 105 | } |
@@ -94,20 +109,13 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo | |||
94 | * so remap the kernel page into the same cache colour as the user | 109 | * so remap the kernel page into the same cache colour as the user |
95 | * page. | 110 | * page. |
96 | */ | 111 | */ |
97 | static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | 112 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) |
98 | { | 113 | { |
99 | unsigned int offset = CACHE_COLOUR(vaddr); | 114 | unsigned int offset = CACHE_COLOUR(vaddr); |
100 | unsigned long to = to_address + (offset << PAGE_SHIFT); | 115 | unsigned long to = to_address + (offset << PAGE_SHIFT); |
101 | 116 | ||
102 | /* | 117 | /* FIXME: not highmem safe */ |
103 | * Discard data in the kernel mapping for the new page | 118 | discard_old_kernel_data(page_address(page)); |
104 | * FIXME: needs this MCRR to be supported. | ||
105 | */ | ||
106 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" | ||
107 | : | ||
108 | : "r" (kaddr), | ||
109 | "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) | ||
110 | : "cc"); | ||
111 | 119 | ||
112 | /* | 120 | /* |
113 | * Now clear the page using the same cache colour as | 121 | * Now clear the page using the same cache colour as |
@@ -115,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |||
115 | */ | 123 | */ |
116 | spin_lock(&v6_lock); | 124 | spin_lock(&v6_lock); |
117 | 125 | ||
118 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); | 126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); |
119 | flush_tlb_kernel_page(to); | 127 | flush_tlb_kernel_page(to); |
120 | clear_page((void *)to); | 128 | clear_page((void *)to); |
121 | 129 | ||
@@ -123,15 +131,15 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |||
123 | } | 131 | } |
124 | 132 | ||
125 | struct cpu_user_fns v6_user_fns __initdata = { | 133 | struct cpu_user_fns v6_user_fns __initdata = { |
126 | .cpu_clear_user_page = v6_clear_user_page_nonaliasing, | 134 | .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, |
127 | .cpu_copy_user_page = v6_copy_user_page_nonaliasing, | 135 | .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, |
128 | }; | 136 | }; |
129 | 137 | ||
130 | static int __init v6_userpage_init(void) | 138 | static int __init v6_userpage_init(void) |
131 | { | 139 | { |
132 | if (cache_is_vipt_aliasing()) { | 140 | if (cache_is_vipt_aliasing()) { |
133 | cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; | 141 | cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; |
134 | cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; | 142 | cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; |
135 | } | 143 | } |
136 | 144 | ||
137 | return 0; | 145 | return 0; |
diff --git a/arch/arm/mm/copypage-xsc3.S b/arch/arm/mm/copypage-xsc3.S deleted file mode 100644 index 9a2cb4332b4c..000000000000 --- a/arch/arm/mm/copypage-xsc3.S +++ /dev/null | |||
@@ -1,97 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage-xsc3.S | ||
3 | * | ||
4 | * Copyright (C) 2004 Intel Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Adapted for 3rd gen XScale core, no more mini-dcache | ||
11 | * Author: Matt Gilbert (matthew.m.gilbert@intel.com) | ||
12 | */ | ||
13 | |||
14 | #include <linux/linkage.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | |||
18 | /* | ||
19 | * General note: | ||
20 | * We don't really want write-allocate cache behaviour for these functions | ||
21 | * since that will just eat through 8K of the cache. | ||
22 | */ | ||
23 | |||
24 | .text | ||
25 | .align 5 | ||
26 | /* | ||
27 | * XSC3 optimised copy_user_page | ||
28 | * r0 = destination | ||
29 | * r1 = source | ||
30 | * r2 = virtual user address of ultimate destination page | ||
31 | * | ||
32 | * The source page may have some clean entries in the cache already, but we | ||
33 | * can safely ignore them - break_cow() will flush them out of the cache | ||
34 | * if we eventually end up using our copied page. | ||
35 | * | ||
36 | */ | ||
37 | ENTRY(xsc3_mc_copy_user_page) | ||
38 | stmfd sp!, {r4, r5, lr} | ||
39 | mov lr, #PAGE_SZ/64-1 | ||
40 | |||
41 | pld [r1, #0] | ||
42 | pld [r1, #32] | ||
43 | 1: pld [r1, #64] | ||
44 | pld [r1, #96] | ||
45 | |||
46 | 2: ldrd r2, [r1], #8 | ||
47 | mov ip, r0 | ||
48 | ldrd r4, [r1], #8 | ||
49 | mcr p15, 0, ip, c7, c6, 1 @ invalidate | ||
50 | strd r2, [r0], #8 | ||
51 | ldrd r2, [r1], #8 | ||
52 | strd r4, [r0], #8 | ||
53 | ldrd r4, [r1], #8 | ||
54 | strd r2, [r0], #8 | ||
55 | strd r4, [r0], #8 | ||
56 | ldrd r2, [r1], #8 | ||
57 | mov ip, r0 | ||
58 | ldrd r4, [r1], #8 | ||
59 | mcr p15, 0, ip, c7, c6, 1 @ invalidate | ||
60 | strd r2, [r0], #8 | ||
61 | ldrd r2, [r1], #8 | ||
62 | subs lr, lr, #1 | ||
63 | strd r4, [r0], #8 | ||
64 | ldrd r4, [r1], #8 | ||
65 | strd r2, [r0], #8 | ||
66 | strd r4, [r0], #8 | ||
67 | bgt 1b | ||
68 | beq 2b | ||
69 | |||
70 | ldmfd sp!, {r4, r5, pc} | ||
71 | |||
72 | .align 5 | ||
73 | /* | ||
74 | * XScale optimised clear_user_page | ||
75 | * r0 = destination | ||
76 | * r1 = virtual user address of ultimate destination page | ||
77 | */ | ||
78 | ENTRY(xsc3_mc_clear_user_page) | ||
79 | mov r1, #PAGE_SZ/32 | ||
80 | mov r2, #0 | ||
81 | mov r3, #0 | ||
82 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line | ||
83 | strd r2, [r0], #8 | ||
84 | strd r2, [r0], #8 | ||
85 | strd r2, [r0], #8 | ||
86 | strd r2, [r0], #8 | ||
87 | subs r1, r1, #1 | ||
88 | bne 1b | ||
89 | mov pc, lr | ||
90 | |||
91 | __INITDATA | ||
92 | |||
93 | .type xsc3_mc_user_fns, #object | ||
94 | ENTRY(xsc3_mc_user_fns) | ||
95 | .long xsc3_mc_clear_user_page | ||
96 | .long xsc3_mc_copy_user_page | ||
97 | .size xsc3_mc_user_fns, . - xsc3_mc_user_fns | ||
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c new file mode 100644 index 000000000000..39a994542cad --- /dev/null +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-xsc3.S | ||
3 | * | ||
4 | * Copyright (C) 2004 Intel Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Adapted for 3rd gen XScale core, no more mini-dcache | ||
11 | * Author: Matt Gilbert (matthew.m.gilbert@intel.com) | ||
12 | */ | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/highmem.h> | ||
15 | |||
16 | /* | ||
17 | * General note: | ||
18 | * We don't really want write-allocate cache behaviour for these functions | ||
19 | * since that will just eat through 8K of the cache. | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * XSC3 optimised copy_user_highpage | ||
24 | * r0 = destination | ||
25 | * r1 = source | ||
26 | * | ||
27 | * The source page may have some clean entries in the cache already, but we | ||
28 | * can safely ignore them - break_cow() will flush them out of the cache | ||
29 | * if we eventually end up using our copied page. | ||
30 | * | ||
31 | */ | ||
32 | static void __attribute__((naked)) | ||
33 | xsc3_mc_copy_user_page(void *kto, const void *kfrom) | ||
34 | { | ||
35 | asm("\ | ||
36 | stmfd sp!, {r4, r5, lr} \n\ | ||
37 | mov lr, %0 \n\ | ||
38 | \n\ | ||
39 | pld [r1, #0] \n\ | ||
40 | pld [r1, #32] \n\ | ||
41 | 1: pld [r1, #64] \n\ | ||
42 | pld [r1, #96] \n\ | ||
43 | \n\ | ||
44 | 2: ldrd r2, [r1], #8 \n\ | ||
45 | mov ip, r0 \n\ | ||
46 | ldrd r4, [r1], #8 \n\ | ||
47 | mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ | ||
48 | strd r2, [r0], #8 \n\ | ||
49 | ldrd r2, [r1], #8 \n\ | ||
50 | strd r4, [r0], #8 \n\ | ||
51 | ldrd r4, [r1], #8 \n\ | ||
52 | strd r2, [r0], #8 \n\ | ||
53 | strd r4, [r0], #8 \n\ | ||
54 | ldrd r2, [r1], #8 \n\ | ||
55 | mov ip, r0 \n\ | ||
56 | ldrd r4, [r1], #8 \n\ | ||
57 | mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ | ||
58 | strd r2, [r0], #8 \n\ | ||
59 | ldrd r2, [r1], #8 \n\ | ||
60 | subs lr, lr, #1 \n\ | ||
61 | strd r4, [r0], #8 \n\ | ||
62 | ldrd r4, [r1], #8 \n\ | ||
63 | strd r2, [r0], #8 \n\ | ||
64 | strd r4, [r0], #8 \n\ | ||
65 | bgt 1b \n\ | ||
66 | beq 2b \n\ | ||
67 | \n\ | ||
68 | ldmfd sp!, {r4, r5, pc}" | ||
69 | : | ||
70 | : "I" (PAGE_SIZE / 64 - 1)); | ||
71 | } | ||
72 | |||
73 | void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | ||
74 | unsigned long vaddr) | ||
75 | { | ||
76 | void *kto, *kfrom; | ||
77 | |||
78 | kto = kmap_atomic(to, KM_USER0); | ||
79 | kfrom = kmap_atomic(from, KM_USER1); | ||
80 | xsc3_mc_copy_user_page(kto, kfrom); | ||
81 | kunmap_atomic(kfrom, KM_USER1); | ||
82 | kunmap_atomic(kto, KM_USER0); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * XScale optimised clear_user_page | ||
87 | * r0 = destination | ||
88 | * r1 = virtual user address of ultimate destination page | ||
89 | */ | ||
90 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
91 | { | ||
92 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
93 | asm volatile ("\ | ||
94 | mov r1, %2 \n\ | ||
95 | mov r2, #0 \n\ | ||
96 | mov r3, #0 \n\ | ||
97 | 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ | ||
98 | strd r2, [%0], #8 \n\ | ||
99 | strd r2, [%0], #8 \n\ | ||
100 | strd r2, [%0], #8 \n\ | ||
101 | strd r2, [%0], #8 \n\ | ||
102 | subs r1, r1, #1 \n\ | ||
103 | bne 1b" | ||
104 | : "=r" (ptr) | ||
105 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | ||
106 | : "r1", "r2", "r3"); | ||
107 | kunmap_atomic(kaddr, KM_USER0); | ||
108 | } | ||
109 | |||
110 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { | ||
111 | .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, | ||
112 | .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, | ||
113 | }; | ||
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index bad49331bbf9..d18f2397ee2d 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -15,8 +15,8 @@ | |||
15 | */ | 15 | */ |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/highmem.h> | ||
18 | 19 | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -35,7 +35,7 @@ | |||
35 | static DEFINE_SPINLOCK(minicache_lock); | 35 | static DEFINE_SPINLOCK(minicache_lock); |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * XScale mini-dcache optimised copy_user_page | 38 | * XScale mini-dcache optimised copy_user_highpage |
39 | * | 39 | * |
40 | * We flush the destination cache lines just before we write the data into the | 40 | * We flush the destination cache lines just before we write the data into the |
41 | * corresponding address. Since the Dcache is read-allocate, this removes the | 41 | * corresponding address. Since the Dcache is read-allocate, this removes the |
@@ -90,48 +90,53 @@ mc_copy_user_page(void *from, void *to) | |||
90 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); | 90 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); |
91 | } | 91 | } |
92 | 92 | ||
93 | void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) | 93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, |
94 | unsigned long vaddr) | ||
94 | { | 95 | { |
95 | struct page *page = virt_to_page(kfrom); | 96 | void *kto = kmap_atomic(to, KM_USER1); |
96 | 97 | ||
97 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 98 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) |
98 | __flush_dcache_page(page_mapping(page), page); | 99 | __flush_dcache_page(page_mapping(from), from); |
99 | 100 | ||
100 | spin_lock(&minicache_lock); | 101 | spin_lock(&minicache_lock); |
101 | 102 | ||
102 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); | 103 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
103 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); | 104 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); |
104 | 105 | ||
105 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); | 106 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); |
106 | 107 | ||
107 | spin_unlock(&minicache_lock); | 108 | spin_unlock(&minicache_lock); |
109 | |||
110 | kunmap_atomic(kto, KM_USER1); | ||
108 | } | 111 | } |
109 | 112 | ||
110 | /* | 113 | /* |
111 | * XScale optimised clear_user_page | 114 | * XScale optimised clear_user_page |
112 | */ | 115 | */ |
113 | void __attribute__((naked)) | 116 | void |
114 | xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) | 117 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
115 | { | 118 | { |
119 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
116 | asm volatile( | 120 | asm volatile( |
117 | "mov r1, %0 \n\ | 121 | "mov r1, %2 \n\ |
118 | mov r2, #0 \n\ | 122 | mov r2, #0 \n\ |
119 | mov r3, #0 \n\ | 123 | mov r3, #0 \n\ |
120 | 1: mov ip, r0 \n\ | 124 | 1: mov ip, %0 \n\ |
121 | strd r2, [r0], #8 \n\ | 125 | strd r2, [%0], #8 \n\ |
122 | strd r2, [r0], #8 \n\ | 126 | strd r2, [%0], #8 \n\ |
123 | strd r2, [r0], #8 \n\ | 127 | strd r2, [%0], #8 \n\ |
124 | strd r2, [r0], #8 \n\ | 128 | strd r2, [%0], #8 \n\ |
125 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ | 129 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ |
126 | subs r1, r1, #1 \n\ | 130 | subs r1, r1, #1 \n\ |
127 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ | 131 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ |
128 | bne 1b \n\ | 132 | bne 1b" |
129 | mov pc, lr" | 133 | : "=r" (ptr) |
130 | : | 134 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
131 | : "I" (PAGE_SIZE / 32)); | 135 | : "r1", "r2", "r3", "ip"); |
136 | kunmap_atomic(kaddr, KM_USER0); | ||
132 | } | 137 | } |
133 | 138 | ||
134 | struct cpu_user_fns xscale_mc_user_fns __initdata = { | 139 | struct cpu_user_fns xscale_mc_user_fns __initdata = { |
135 | .cpu_clear_user_page = xscale_mc_clear_user_page, | 140 | .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, |
136 | .cpu_copy_user_page = xscale_mc_copy_user_page, | 141 | .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, |
137 | }; | 142 | }; |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 2df8d9facf57..ffd8b228a139 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/kprobes.h> | 15 | #include <linux/kprobes.h> |
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | #include <linux/page-flags.h> | ||
17 | 18 | ||
18 | #include <asm/system.h> | 19 | #include <asm/system.h> |
19 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
@@ -83,13 +84,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
83 | break; | 84 | break; |
84 | } | 85 | } |
85 | 86 | ||
86 | #ifndef CONFIG_HIGHMEM | ||
87 | /* We must not map this if we have highmem enabled */ | 87 | /* We must not map this if we have highmem enabled */ |
88 | if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) | ||
89 | break; | ||
90 | |||
88 | pte = pte_offset_map(pmd, addr); | 91 | pte = pte_offset_map(pmd, addr); |
89 | printk(", *pte=%08lx", pte_val(*pte)); | 92 | printk(", *pte=%08lx", pte_val(*pte)); |
90 | printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); | 93 | printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); |
91 | pte_unmap(pte); | 94 | pte_unmap(pte); |
92 | #endif | ||
93 | } while(0); | 95 | } while(0); |
94 | 96 | ||
95 | printk("\n"); | 97 | printk("\n"); |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 82c4b4217989..ab5c9abd5c34 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -64,10 +64,11 @@ static int __init parse_tag_initrd2(const struct tag *tag) | |||
64 | __tagtable(ATAG_INITRD2, parse_tag_initrd2); | 64 | __tagtable(ATAG_INITRD2, parse_tag_initrd2); |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * This is used to pass memory configuration data from paging_init | 67 | * This keeps memory configuration data used by a couple memory |
68 | * to mem_init, and by show_mem() to skip holes in the memory map. | 68 | * initialization functions, as well as show_mem() for the skipping |
69 | * of holes in the memory map. It is populated by arm_add_memory(). | ||
69 | */ | 70 | */ |
70 | static struct meminfo meminfo = { 0, }; | 71 | struct meminfo meminfo; |
71 | 72 | ||
72 | void show_mem(void) | 73 | void show_mem(void) |
73 | { | 74 | { |
@@ -331,13 +332,12 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) | |||
331 | free_area_init_node(node, zone_size, start_pfn, zhole_size); | 332 | free_area_init_node(node, zone_size, start_pfn, zhole_size); |
332 | } | 333 | } |
333 | 334 | ||
334 | void __init bootmem_init(struct meminfo *mi) | 335 | void __init bootmem_init(void) |
335 | { | 336 | { |
337 | struct meminfo *mi = &meminfo; | ||
336 | unsigned long memend_pfn = 0; | 338 | unsigned long memend_pfn = 0; |
337 | int node, initrd_node; | 339 | int node, initrd_node; |
338 | 340 | ||
339 | memcpy(&meminfo, mi, sizeof(meminfo)); | ||
340 | |||
341 | /* | 341 | /* |
342 | * Locate which node contains the ramdisk image, if any. | 342 | * Locate which node contains the ramdisk image, if any. |
343 | */ | 343 | */ |
@@ -394,20 +394,22 @@ void __init bootmem_init(struct meminfo *mi) | |||
394 | max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; | 394 | max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; |
395 | } | 395 | } |
396 | 396 | ||
397 | static inline void free_area(unsigned long addr, unsigned long end, char *s) | 397 | static inline int free_area(unsigned long pfn, unsigned long end, char *s) |
398 | { | 398 | { |
399 | unsigned int size = (end - addr) >> 10; | 399 | unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); |
400 | 400 | ||
401 | for (; addr < end; addr += PAGE_SIZE) { | 401 | for (; pfn < end; pfn++) { |
402 | struct page *page = virt_to_page(addr); | 402 | struct page *page = pfn_to_page(pfn); |
403 | ClearPageReserved(page); | 403 | ClearPageReserved(page); |
404 | init_page_count(page); | 404 | init_page_count(page); |
405 | free_page(addr); | 405 | __free_page(page); |
406 | totalram_pages++; | 406 | pages++; |
407 | } | 407 | } |
408 | 408 | ||
409 | if (size && s) | 409 | if (size && s) |
410 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); | 410 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); |
411 | |||
412 | return pages; | ||
411 | } | 413 | } |
412 | 414 | ||
413 | static inline void | 415 | static inline void |
@@ -478,13 +480,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
478 | */ | 480 | */ |
479 | void __init mem_init(void) | 481 | void __init mem_init(void) |
480 | { | 482 | { |
481 | unsigned int codepages, datapages, initpages; | 483 | unsigned int codesize, datasize, initsize; |
482 | int i, node; | 484 | int i, node; |
483 | 485 | ||
484 | codepages = &_etext - &_text; | ||
485 | datapages = &_end - &__data_start; | ||
486 | initpages = &__init_end - &__init_begin; | ||
487 | |||
488 | #ifndef CONFIG_DISCONTIGMEM | 486 | #ifndef CONFIG_DISCONTIGMEM |
489 | max_mapnr = virt_to_page(high_memory) - mem_map; | 487 | max_mapnr = virt_to_page(high_memory) - mem_map; |
490 | #endif | 488 | #endif |
@@ -501,7 +499,8 @@ void __init mem_init(void) | |||
501 | 499 | ||
502 | #ifdef CONFIG_SA1111 | 500 | #ifdef CONFIG_SA1111 |
503 | /* now that our DMA memory is actually so designated, we can free it */ | 501 | /* now that our DMA memory is actually so designated, we can free it */ |
504 | free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); | 502 | totalram_pages += free_area(PHYS_PFN_OFFSET, |
503 | __phys_to_pfn(__pa(swapper_pg_dir)), NULL); | ||
505 | #endif | 504 | #endif |
506 | 505 | ||
507 | /* | 506 | /* |
@@ -509,18 +508,21 @@ void __init mem_init(void) | |||
509 | * real number of pages we have in this system | 508 | * real number of pages we have in this system |
510 | */ | 509 | */ |
511 | printk(KERN_INFO "Memory:"); | 510 | printk(KERN_INFO "Memory:"); |
512 | |||
513 | num_physpages = 0; | 511 | num_physpages = 0; |
514 | for (i = 0; i < meminfo.nr_banks; i++) { | 512 | for (i = 0; i < meminfo.nr_banks; i++) { |
515 | num_physpages += bank_pfn_size(&meminfo.bank[i]); | 513 | num_physpages += bank_pfn_size(&meminfo.bank[i]); |
516 | printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); | 514 | printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); |
517 | } | 515 | } |
518 | |||
519 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | 516 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); |
517 | |||
518 | codesize = &_etext - &_text; | ||
519 | datasize = &_end - &__data_start; | ||
520 | initsize = &__init_end - &__init_begin; | ||
521 | |||
520 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " | 522 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " |
521 | "%dK data, %dK init)\n", | 523 | "%dK data, %dK init)\n", |
522 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 524 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
523 | codepages >> 10, datapages >> 10, initpages >> 10); | 525 | codesize >> 10, datasize >> 10, initsize >> 10); |
524 | 526 | ||
525 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 527 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { |
526 | extern int sysctl_overcommit_memory; | 528 | extern int sysctl_overcommit_memory; |
@@ -535,11 +537,10 @@ void __init mem_init(void) | |||
535 | 537 | ||
536 | void free_initmem(void) | 538 | void free_initmem(void) |
537 | { | 539 | { |
538 | if (!machine_is_integrator() && !machine_is_cintegrator()) { | 540 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
539 | free_area((unsigned long)(&__init_begin), | 541 | totalram_pages += free_area(__phys_to_pfn(__pa(&__init_begin)), |
540 | (unsigned long)(&__init_end), | 542 | __phys_to_pfn(__pa(&__init_end)), |
541 | "init"); | 543 | "init"); |
542 | } | ||
543 | } | 544 | } |
544 | 545 | ||
545 | #ifdef CONFIG_BLK_DEV_INITRD | 546 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -549,7 +550,9 @@ static int keep_initrd; | |||
549 | void free_initrd_mem(unsigned long start, unsigned long end) | 550 | void free_initrd_mem(unsigned long start, unsigned long end) |
550 | { | 551 | { |
551 | if (!keep_initrd) | 552 | if (!keep_initrd) |
552 | free_area(start, end, "initrd"); | 553 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), |
554 | __phys_to_pfn(__pa(end)), | ||
555 | "initrd"); | ||
553 | } | 556 | } |
554 | 557 | ||
555 | static int __init keepinitrd_setup(char *__unused) | 558 | static int __init keepinitrd_setup(char *__unused) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5d9f53907b4e..94367bdbb5a8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -32,7 +32,7 @@ struct meminfo; | |||
32 | struct pglist_data; | 32 | struct pglist_data; |
33 | 33 | ||
34 | void __init create_mapping(struct map_desc *md); | 34 | void __init create_mapping(struct map_desc *md); |
35 | void __init bootmem_init(struct meminfo *mi); | 35 | void __init bootmem_init(void); |
36 | void reserve_node_zero(struct pglist_data *pgdat); | 36 | void reserve_node_zero(struct pglist_data *pgdat); |
37 | 37 | ||
38 | extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end; | 38 | extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end; |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index f24803c1fb0b..c0b9a78d7b87 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -646,61 +646,79 @@ static void __init early_vmalloc(char **arg) | |||
646 | "vmalloc area too small, limiting to %luMB\n", | 646 | "vmalloc area too small, limiting to %luMB\n", |
647 | vmalloc_reserve >> 20); | 647 | vmalloc_reserve >> 20); |
648 | } | 648 | } |
649 | |||
650 | if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { | ||
651 | vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); | ||
652 | printk(KERN_WARNING | ||
653 | "vmalloc area is too big, limiting to %luMB\n", | ||
654 | vmalloc_reserve >> 20); | ||
655 | } | ||
649 | } | 656 | } |
650 | __early_param("vmalloc=", early_vmalloc); | 657 | __early_param("vmalloc=", early_vmalloc); |
651 | 658 | ||
652 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | 659 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) |
653 | 660 | ||
654 | static int __init check_membank_valid(struct membank *mb) | 661 | static void __init sanity_check_meminfo(void) |
655 | { | 662 | { |
656 | /* | 663 | int i, j; |
657 | * Check whether this memory region has non-zero size or | ||
658 | * invalid node number. | ||
659 | */ | ||
660 | if (mb->size == 0 || mb->node >= MAX_NUMNODES) | ||
661 | return 0; | ||
662 | |||
663 | /* | ||
664 | * Check whether this memory region would entirely overlap | ||
665 | * the vmalloc area. | ||
666 | */ | ||
667 | if (phys_to_virt(mb->start) >= VMALLOC_MIN) { | ||
668 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | ||
669 | "(vmalloc region overlap).\n", | ||
670 | mb->start, mb->start + mb->size - 1); | ||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | /* | ||
675 | * Check whether this memory region would partially overlap | ||
676 | * the vmalloc area. | ||
677 | */ | ||
678 | if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) || | ||
679 | phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) { | ||
680 | unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start); | ||
681 | |||
682 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | ||
683 | "to -%.8lx (vmalloc region overlap).\n", | ||
684 | mb->start, mb->start + mb->size - 1, | ||
685 | mb->start + newsize - 1); | ||
686 | mb->size = newsize; | ||
687 | } | ||
688 | 664 | ||
689 | return 1; | 665 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
690 | } | 666 | struct membank *bank = &meminfo.bank[j]; |
667 | *bank = meminfo.bank[i]; | ||
691 | 668 | ||
692 | static void __init sanity_check_meminfo(struct meminfo *mi) | 669 | #ifdef CONFIG_HIGHMEM |
693 | { | 670 | /* |
694 | int i, j; | 671 | * Split those memory banks which are partially overlapping |
672 | * the vmalloc area greatly simplifying things later. | ||
673 | */ | ||
674 | if (__va(bank->start) < VMALLOC_MIN && | ||
675 | bank->size > VMALLOC_MIN - __va(bank->start)) { | ||
676 | if (meminfo.nr_banks >= NR_BANKS) { | ||
677 | printk(KERN_CRIT "NR_BANKS too low, " | ||
678 | "ignoring high memory\n"); | ||
679 | } else { | ||
680 | memmove(bank + 1, bank, | ||
681 | (meminfo.nr_banks - i) * sizeof(*bank)); | ||
682 | meminfo.nr_banks++; | ||
683 | i++; | ||
684 | bank[1].size -= VMALLOC_MIN - __va(bank->start); | ||
685 | bank[1].start = __pa(VMALLOC_MIN - 1) + 1; | ||
686 | j++; | ||
687 | } | ||
688 | bank->size = VMALLOC_MIN - __va(bank->start); | ||
689 | } | ||
690 | #else | ||
691 | /* | ||
692 | * Check whether this memory bank would entirely overlap | ||
693 | * the vmalloc area. | ||
694 | */ | ||
695 | if (__va(bank->start) >= VMALLOC_MIN) { | ||
696 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | ||
697 | "(vmalloc region overlap).\n", | ||
698 | bank->start, bank->start + bank->size - 1); | ||
699 | continue; | ||
700 | } | ||
695 | 701 | ||
696 | for (i = 0, j = 0; i < mi->nr_banks; i++) { | 702 | /* |
697 | if (check_membank_valid(&mi->bank[i])) | 703 | * Check whether this memory bank would partially overlap |
698 | mi->bank[j++] = mi->bank[i]; | 704 | * the vmalloc area. |
705 | */ | ||
706 | if (__va(bank->start + bank->size) > VMALLOC_MIN || | ||
707 | __va(bank->start + bank->size) < __va(bank->start)) { | ||
708 | unsigned long newsize = VMALLOC_MIN - __va(bank->start); | ||
709 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | ||
710 | "to -%.8lx (vmalloc region overlap).\n", | ||
711 | bank->start, bank->start + bank->size - 1, | ||
712 | bank->start + newsize - 1); | ||
713 | bank->size = newsize; | ||
714 | } | ||
715 | #endif | ||
716 | j++; | ||
699 | } | 717 | } |
700 | mi->nr_banks = j; | 718 | meminfo.nr_banks = j; |
701 | } | 719 | } |
702 | 720 | ||
703 | static inline void prepare_page_table(struct meminfo *mi) | 721 | static inline void prepare_page_table(void) |
704 | { | 722 | { |
705 | unsigned long addr; | 723 | unsigned long addr; |
706 | 724 | ||
@@ -721,7 +739,7 @@ static inline void prepare_page_table(struct meminfo *mi) | |||
721 | * Clear out all the kernel space mappings, except for the first | 739 | * Clear out all the kernel space mappings, except for the first |
722 | * memory bank, up to the end of the vmalloc region. | 740 | * memory bank, up to the end of the vmalloc region. |
723 | */ | 741 | */ |
724 | for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); | 742 | for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); |
725 | addr < VMALLOC_END; addr += PGDIR_SIZE) | 743 | addr < VMALLOC_END; addr += PGDIR_SIZE) |
726 | pmd_clear(pmd_off_k(addr)); | 744 | pmd_clear(pmd_off_k(addr)); |
727 | } | 745 | } |
@@ -880,14 +898,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
880 | * paging_init() sets up the page tables, initialises the zone memory | 898 | * paging_init() sets up the page tables, initialises the zone memory |
881 | * maps, and sets up the zero page, bad page and bad page tables. | 899 | * maps, and sets up the zero page, bad page and bad page tables. |
882 | */ | 900 | */ |
883 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | 901 | void __init paging_init(struct machine_desc *mdesc) |
884 | { | 902 | { |
885 | void *zero_page; | 903 | void *zero_page; |
886 | 904 | ||
887 | build_mem_type_table(); | 905 | build_mem_type_table(); |
888 | sanity_check_meminfo(mi); | 906 | sanity_check_meminfo(); |
889 | prepare_page_table(mi); | 907 | prepare_page_table(); |
890 | bootmem_init(mi); | 908 | bootmem_init(); |
891 | devicemaps_init(mdesc); | 909 | devicemaps_init(mdesc); |
892 | 910 | ||
893 | top_pmd = pmd_off_k(0xffff0000); | 911 | top_pmd = pmd_off_k(0xffff0000); |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 07b62b238979..c085f4e8248b 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -41,27 +41,13 @@ void __init reserve_node_zero(pg_data_t *pgdat) | |||
41 | BOOTMEM_DEFAULT); | 41 | BOOTMEM_DEFAULT); |
42 | } | 42 | } |
43 | 43 | ||
44 | static void __init sanity_check_meminfo(struct meminfo *mi) | ||
45 | { | ||
46 | int i, j; | ||
47 | |||
48 | for (i = 0, j = 0; i < mi->nr_banks; i++) { | ||
49 | struct membank *mb = &mi->bank[i]; | ||
50 | |||
51 | if (mb->size != 0 && mb->node < MAX_NUMNODES) | ||
52 | mi->bank[j++] = mi->bank[i]; | ||
53 | } | ||
54 | mi->nr_banks = j; | ||
55 | } | ||
56 | |||
57 | /* | 44 | /* |
58 | * paging_init() sets up the page tables, initialises the zone memory | 45 | * paging_init() sets up the page tables, initialises the zone memory |
59 | * maps, and sets up the zero page, bad page and bad page tables. | 46 | * maps, and sets up the zero page, bad page and bad page tables. |
60 | */ | 47 | */ |
61 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | 48 | void __init paging_init(struct machine_desc *mdesc) |
62 | { | 49 | { |
63 | sanity_check_meminfo(mi); | 50 | bootmem_init(); |
64 | bootmem_init(mi); | ||
65 | } | 51 | } |
66 | 52 | ||
67 | /* | 53 | /* |
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 2b5ba396e3a6..4ad3bf291ad3 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c | |||
@@ -33,8 +33,8 @@ EXPORT_SYMBOL(cpu_cache); | |||
33 | 33 | ||
34 | #ifdef CONFIG_MMU | 34 | #ifdef CONFIG_MMU |
35 | #ifndef MULTI_USER | 35 | #ifndef MULTI_USER |
36 | EXPORT_SYMBOL(__cpu_clear_user_page); | 36 | EXPORT_SYMBOL(__cpu_clear_user_highpage); |
37 | EXPORT_SYMBOL(__cpu_copy_user_page); | 37 | EXPORT_SYMBOL(__cpu_copy_user_highpage); |
38 | #else | 38 | #else |
39 | EXPORT_SYMBOL(cpu_user); | 39 | EXPORT_SYMBOL(cpu_user); |
40 | #endif | 40 | #endif |