diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-11-08 03:01:42 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-11-08 03:01:42 -0500 |
commit | 541c54773173d95c8e99a7378a5bb63125c297f1 (patch) | |
tree | 59270b4498d0502f778e26978865e1e8c6eddc54 /arch/sh | |
parent | 6d1c76d4e76ef72fce5a7169430ab8f9a68d7924 (diff) | |
parent | af39c16bd843ee8bde495c8ccb95a5ca209f3051 (diff) |
Merge branch 'page_colouring_despair'
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/mm/clear_page.S | 45 | ||||
-rw-r--r-- | arch/sh/mm/copy_page.S | 61 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 75 |
3 files changed, 52 insertions, 129 deletions
diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S index 8a706131e521..7a7c81ee3f01 100644 --- a/arch/sh/mm/clear_page.S +++ b/arch/sh/mm/clear_page.S | |||
@@ -150,48 +150,3 @@ ENTRY(__clear_user) | |||
150 | .long 8b, .Lbad_clear_user | 150 | .long 8b, .Lbad_clear_user |
151 | .long 9b, .Lbad_clear_user | 151 | .long 9b, .Lbad_clear_user |
152 | .previous | 152 | .previous |
153 | |||
154 | #if defined(CONFIG_CPU_SH4) | ||
155 | /* | ||
156 | * __clear_user_page | ||
157 | * @to: P3 address (with same color) | ||
158 | * @orig_to: P1 address | ||
159 | * | ||
160 | * void __clear_user_page(void *to, void *orig_to) | ||
161 | */ | ||
162 | |||
163 | /* | ||
164 | * r0 --- scratch | ||
165 | * r4 --- to | ||
166 | * r5 --- orig_to | ||
167 | * r6 --- to + PAGE_SIZE | ||
168 | */ | ||
169 | ENTRY(__clear_user_page) | ||
170 | mov.l .Lpsz,r0 | ||
171 | mov r4,r6 | ||
172 | add r0,r6 | ||
173 | mov #0,r0 | ||
174 | ! | ||
175 | 1: ocbi @r5 | ||
176 | add #32,r5 | ||
177 | movca.l r0,@r4 | ||
178 | mov r4,r1 | ||
179 | add #32,r4 | ||
180 | mov.l r0,@-r4 | ||
181 | mov.l r0,@-r4 | ||
182 | mov.l r0,@-r4 | ||
183 | mov.l r0,@-r4 | ||
184 | mov.l r0,@-r4 | ||
185 | mov.l r0,@-r4 | ||
186 | mov.l r0,@-r4 | ||
187 | add #28,r4 | ||
188 | cmp/eq r6,r4 | ||
189 | bf/s 1b | ||
190 | ocbwb @r1 | ||
191 | ! | ||
192 | rts | ||
193 | nop | ||
194 | .Lpsz: .long PAGE_SIZE | ||
195 | |||
196 | #endif | ||
197 | |||
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S index 3d8409daa4be..40685018b952 100644 --- a/arch/sh/mm/copy_page.S +++ b/arch/sh/mm/copy_page.S | |||
@@ -68,67 +68,6 @@ ENTRY(copy_page_slow) | |||
68 | rts | 68 | rts |
69 | nop | 69 | nop |
70 | 70 | ||
71 | #if defined(CONFIG_CPU_SH4) | ||
72 | /* | ||
73 | * __copy_user_page | ||
74 | * @to: P1 address (with same color) | ||
75 | * @from: P1 address | ||
76 | * @orig_to: P1 address | ||
77 | * | ||
78 | * void __copy_user_page(void *to, void *from, void *orig_to) | ||
79 | */ | ||
80 | |||
81 | /* | ||
82 | * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch | ||
83 | * r8 --- from + PAGE_SIZE | ||
84 | * r9 --- orig_to | ||
85 | * r10 --- to | ||
86 | * r11 --- from | ||
87 | */ | ||
88 | ENTRY(__copy_user_page) | ||
89 | mov.l r8,@-r15 | ||
90 | mov.l r9,@-r15 | ||
91 | mov.l r10,@-r15 | ||
92 | mov.l r11,@-r15 | ||
93 | mov r4,r10 | ||
94 | mov r5,r11 | ||
95 | mov r6,r9 | ||
96 | mov r5,r8 | ||
97 | mov.l .Lpsz,r0 | ||
98 | add r0,r8 | ||
99 | ! | ||
100 | 1: ocbi @r9 | ||
101 | add #32,r9 | ||
102 | mov.l @r11+,r0 | ||
103 | mov.l @r11+,r1 | ||
104 | mov.l @r11+,r2 | ||
105 | mov.l @r11+,r3 | ||
106 | mov.l @r11+,r4 | ||
107 | mov.l @r11+,r5 | ||
108 | mov.l @r11+,r6 | ||
109 | mov.l @r11+,r7 | ||
110 | movca.l r0,@r10 | ||
111 | mov r10,r0 | ||
112 | add #32,r10 | ||
113 | mov.l r7,@-r10 | ||
114 | mov.l r6,@-r10 | ||
115 | mov.l r5,@-r10 | ||
116 | mov.l r4,@-r10 | ||
117 | mov.l r3,@-r10 | ||
118 | mov.l r2,@-r10 | ||
119 | mov.l r1,@-r10 | ||
120 | ocbwb @r0 | ||
121 | cmp/eq r11,r8 | ||
122 | bf/s 1b | ||
123 | add #28,r10 | ||
124 | ! | ||
125 | mov.l @r15+,r11 | ||
126 | mov.l @r15+,r10 | ||
127 | mov.l @r15+,r9 | ||
128 | mov.l @r15+,r8 | ||
129 | rts | ||
130 | nop | ||
131 | #endif | ||
132 | .align 2 | 71 | .align 2 |
133 | .Lpsz: .long PAGE_SIZE | 72 | .Lpsz: .long PAGE_SIZE |
134 | /* | 73 | /* |
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 25f5c6f6821d..8c7a9ca79879 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/highmem.h> | ||
13 | #include <linux/module.h> | ||
12 | #include <asm/mmu_context.h> | 14 | #include <asm/mmu_context.h> |
13 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
14 | 16 | ||
@@ -50,34 +52,61 @@ static inline void kunmap_coherent(struct page *page) | |||
50 | void clear_user_page(void *to, unsigned long address, struct page *page) | 52 | void clear_user_page(void *to, unsigned long address, struct page *page) |
51 | { | 53 | { |
52 | __set_bit(PG_mapped, &page->flags); | 54 | __set_bit(PG_mapped, &page->flags); |
53 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 55 | |
54 | clear_page(to); | 56 | clear_page(to); |
55 | else { | 57 | if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) |
56 | void *vto = kmap_coherent(page, address); | 58 | __flush_wback_region(to, PAGE_SIZE); |
57 | __clear_user_page(vto, to); | ||
58 | kunmap_coherent(vto); | ||
59 | } | ||
60 | } | 59 | } |
61 | 60 | ||
62 | /* | 61 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
63 | * copy_user_page | 62 | unsigned long vaddr, void *dst, const void *src, |
64 | * @to: P1 address | 63 | unsigned long len) |
65 | * @from: P1 address | ||
66 | * @address: U0 address to be mapped | ||
67 | * @page: page (virt_to_page(to)) | ||
68 | */ | ||
69 | void copy_user_page(void *to, void *from, unsigned long address, | ||
70 | struct page *page) | ||
71 | { | 64 | { |
65 | void *vto; | ||
66 | |||
72 | __set_bit(PG_mapped, &page->flags); | 67 | __set_bit(PG_mapped, &page->flags); |
73 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 68 | |
74 | copy_page(to, from); | 69 | vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
75 | else { | 70 | memcpy(vto, src, len); |
76 | void *vfrom = kmap_coherent(page, address); | 71 | kunmap_coherent(vto); |
77 | __copy_user_page(vfrom, from, to); | 72 | |
78 | kunmap_coherent(vfrom); | 73 | if (vma->vm_flags & VM_EXEC) |
79 | } | 74 | flush_cache_page(vma, vaddr, page_to_pfn(page)); |
75 | } | ||
76 | |||
77 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | ||
78 | unsigned long vaddr, void *dst, const void *src, | ||
79 | unsigned long len) | ||
80 | { | ||
81 | void *vfrom; | ||
82 | |||
83 | __set_bit(PG_mapped, &page->flags); | ||
84 | |||
85 | vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | ||
86 | memcpy(dst, vfrom, len); | ||
87 | kunmap_coherent(vfrom); | ||
88 | } | ||
89 | |||
90 | void copy_user_highpage(struct page *to, struct page *from, | ||
91 | unsigned long vaddr, struct vm_area_struct *vma) | ||
92 | { | ||
93 | void *vfrom, *vto; | ||
94 | |||
95 | __set_bit(PG_mapped, &to->flags); | ||
96 | |||
97 | vto = kmap_atomic(to, KM_USER1); | ||
98 | vfrom = kmap_coherent(from, vaddr); | ||
99 | copy_page(vto, vfrom); | ||
100 | kunmap_coherent(vfrom); | ||
101 | |||
102 | if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) | ||
103 | __flush_wback_region(vto, PAGE_SIZE); | ||
104 | |||
105 | kunmap_atomic(vto, KM_USER1); | ||
106 | /* Make sure this page is cleared on other CPU's too before using it */ | ||
107 | smp_wmb(); | ||
80 | } | 108 | } |
109 | EXPORT_SYMBOL(copy_user_highpage); | ||
81 | 110 | ||
82 | /* | 111 | /* |
83 | * For SH-4, we have our own implementation for ptep_get_and_clear | 112 | * For SH-4, we have our own implementation for ptep_get_and_clear |