diff options
author | David Howells <dhowells@redhat.com> | 2008-04-10 11:10:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-10 16:41:29 -0400 |
commit | e31c243f984628d02f045dc4b622f1e2827860dc (patch) | |
tree | cd2ac2f33c7da86b515087260d93179b31fd1671 /arch | |
parent | 0c93d8e4d342b1b5cda1037f2527fcf443c80fbc (diff) |
FRV: Add support for emulation of userspace atomic ops [try #2]
Use traps 120-126 to emulate atomic cmpxchg32, xchg32, and XOR-, OR-, AND-, SUB-
and ADD-to-memory operations for userspace.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/frv/kernel/entry-table.S | 8 | ||||
-rw-r--r-- | arch/frv/kernel/entry.S | 20 | ||||
-rw-r--r-- | arch/frv/kernel/traps.c | 227 |
3 files changed, 254 insertions, 1 deletions
diff --git a/arch/frv/kernel/entry-table.S b/arch/frv/kernel/entry-table.S index d3b9253d862a..bf35f33e48c9 100644 --- a/arch/frv/kernel/entry-table.S +++ b/arch/frv/kernel/entry-table.S | |||
@@ -316,8 +316,14 @@ __trap_fixup_kernel_data_tlb_miss: | |||
316 | .section .trap.vector | 316 | .section .trap.vector |
317 | .org TBR_TT_TRAP0 >> 2 | 317 | .org TBR_TT_TRAP0 >> 2 |
318 | .long system_call | 318 | .long system_call |
319 | .rept 126 | 319 | .rept 119 |
320 | .long __entry_unsupported_trap | 320 | .long __entry_unsupported_trap |
321 | .endr | 321 | .endr |
322 | |||
323 | # userspace atomic op emulation, traps 120-126 | ||
324 | .rept 7 | ||
325 | .long __entry_atomic_op | ||
326 | .endr | ||
327 | |||
322 | .org TBR_TT_BREAK >> 2 | 328 | .org TBR_TT_BREAK >> 2 |
323 | .long __entry_debug_exception | 329 | .long __entry_debug_exception |
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index f36d7f4a7c25..b8a4b94779b1 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S | |||
@@ -656,6 +656,26 @@ __entry_debug_exception: | |||
656 | 656 | ||
657 | ############################################################################### | 657 | ############################################################################### |
658 | # | 658 | # |
659 | # handle atomic operation emulation for userspace | ||
660 | # | ||
661 | ############################################################################### | ||
662 | .globl __entry_atomic_op | ||
663 | __entry_atomic_op: | ||
664 | LEDS 0x6012 | ||
665 | sethi.p %hi(atomic_operation),gr5 | ||
666 | setlo %lo(atomic_operation),gr5 | ||
667 | movsg esfr1,gr8 | ||
668 | movsg epcr0,gr9 | ||
669 | movsg esr0,gr10 | ||
670 | |||
671 | # now that we've accessed the exception regs, we can enable exceptions | ||
672 | movsg psr,gr4 | ||
673 | ori gr4,#PSR_ET,gr4 | ||
674 | movgs gr4,psr | ||
675 | jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0) | ||
676 | |||
677 | ############################################################################### | ||
678 | # | ||
659 | # handle media exception | 679 | # handle media exception |
660 | # | 680 | # |
661 | ############################################################################### | 681 | ############################################################################### |
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index 2e6098c85578..2f7e66877f3b 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c | |||
@@ -102,6 +102,233 @@ asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, un | |||
102 | 102 | ||
103 | /*****************************************************************************/ | 103 | /*****************************************************************************/ |
104 | /* | 104 | /* |
105 | * handle atomic operations with errors | ||
106 | * - arguments in gr8, gr9, gr10 | ||
107 | * - original memory value placed in gr5 | ||
108 | * - replacement memory value placed in gr9 | ||
109 | */ | ||
110 | asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0, | ||
111 | unsigned long esr0) | ||
112 | { | ||
113 | static DEFINE_SPINLOCK(atomic_op_lock); | ||
114 | unsigned long x, y, z, *p; | ||
115 | mm_segment_t oldfs; | ||
116 | siginfo_t info; | ||
117 | int ret; | ||
118 | |||
119 | y = 0; | ||
120 | z = 0; | ||
121 | |||
122 | oldfs = get_fs(); | ||
123 | if (!user_mode(__frame)) | ||
124 | set_fs(KERNEL_DS); | ||
125 | |||
126 | switch (__frame->tbr & TBR_TT) { | ||
127 | /* TIRA gr0,#120 | ||
128 | * u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new) | ||
129 | */ | ||
130 | case TBR_TT_ATOMIC_CMPXCHG32: | ||
131 | p = (unsigned long *) __frame->gr8; | ||
132 | x = __frame->gr9; | ||
133 | y = __frame->gr10; | ||
134 | |||
135 | for (;;) { | ||
136 | ret = get_user(z, p); | ||
137 | if (ret < 0) | ||
138 | goto error; | ||
139 | |||
140 | if (z != x) | ||
141 | goto done; | ||
142 | |||
143 | spin_lock_irq(&atomic_op_lock); | ||
144 | |||
145 | if (__get_user(z, p) == 0) { | ||
146 | if (z != x) | ||
147 | goto done2; | ||
148 | |||
149 | if (__put_user(y, p) == 0) | ||
150 | goto done2; | ||
151 | goto error2; | ||
152 | } | ||
153 | |||
154 | spin_unlock_irq(&atomic_op_lock); | ||
155 | } | ||
156 | |||
157 | /* TIRA gr0,#121 | ||
158 | * u32 __atomic_kernel_xchg32(void *v, u32 new) | ||
159 | */ | ||
160 | case TBR_TT_ATOMIC_XCHG32: | ||
161 | p = (unsigned long *) __frame->gr8; | ||
162 | y = __frame->gr9; | ||
163 | |||
164 | for (;;) { | ||
165 | ret = get_user(z, p); | ||
166 | if (ret < 0) | ||
167 | goto error; | ||
168 | |||
169 | spin_lock_irq(&atomic_op_lock); | ||
170 | |||
171 | if (__get_user(z, p) == 0) { | ||
172 | if (__put_user(y, p) == 0) | ||
173 | goto done2; | ||
174 | goto error2; | ||
175 | } | ||
176 | |||
177 | spin_unlock_irq(&atomic_op_lock); | ||
178 | } | ||
179 | |||
180 | /* TIRA gr0,#122 | ||
181 | * ulong __atomic_kernel_XOR_return(ulong i, ulong *v) | ||
182 | */ | ||
183 | case TBR_TT_ATOMIC_XOR: | ||
184 | p = (unsigned long *) __frame->gr8; | ||
185 | x = __frame->gr9; | ||
186 | |||
187 | for (;;) { | ||
188 | ret = get_user(z, p); | ||
189 | if (ret < 0) | ||
190 | goto error; | ||
191 | |||
192 | spin_lock_irq(&atomic_op_lock); | ||
193 | |||
194 | if (__get_user(z, p) == 0) { | ||
195 | y = x ^ z; | ||
196 | if (__put_user(y, p) == 0) | ||
197 | goto done2; | ||
198 | goto error2; | ||
199 | } | ||
200 | |||
201 | spin_unlock_irq(&atomic_op_lock); | ||
202 | } | ||
203 | |||
204 | /* TIRA gr0,#123 | ||
205 | * ulong __atomic_kernel_OR_return(ulong i, ulong *v) | ||
206 | */ | ||
207 | case TBR_TT_ATOMIC_OR: | ||
208 | p = (unsigned long *) __frame->gr8; | ||
209 | x = __frame->gr9; | ||
210 | |||
211 | for (;;) { | ||
212 | ret = get_user(z, p); | ||
213 | if (ret < 0) | ||
214 | goto error; | ||
215 | |||
216 | spin_lock_irq(&atomic_op_lock); | ||
217 | |||
218 | if (__get_user(z, p) == 0) { | ||
219 | y = x ^ z; | ||
220 | if (__put_user(y, p) == 0) | ||
221 | goto done2; | ||
222 | goto error2; | ||
223 | } | ||
224 | |||
225 | spin_unlock_irq(&atomic_op_lock); | ||
226 | } | ||
227 | |||
228 | /* TIRA gr0,#124 | ||
229 | * ulong __atomic_kernel_AND_return(ulong i, ulong *v) | ||
230 | */ | ||
231 | case TBR_TT_ATOMIC_AND: | ||
232 | p = (unsigned long *) __frame->gr8; | ||
233 | x = __frame->gr9; | ||
234 | |||
235 | for (;;) { | ||
236 | ret = get_user(z, p); | ||
237 | if (ret < 0) | ||
238 | goto error; | ||
239 | |||
240 | spin_lock_irq(&atomic_op_lock); | ||
241 | |||
242 | if (__get_user(z, p) == 0) { | ||
243 | y = x & z; | ||
244 | if (__put_user(y, p) == 0) | ||
245 | goto done2; | ||
246 | goto error2; | ||
247 | } | ||
248 | |||
249 | spin_unlock_irq(&atomic_op_lock); | ||
250 | } | ||
251 | |||
252 | /* TIRA gr0,#125 | ||
253 | * int __atomic_user_sub_return(atomic_t *v, int i) | ||
254 | */ | ||
255 | case TBR_TT_ATOMIC_SUB: | ||
256 | p = (unsigned long *) __frame->gr8; | ||
257 | x = __frame->gr9; | ||
258 | |||
259 | for (;;) { | ||
260 | ret = get_user(z, p); | ||
261 | if (ret < 0) | ||
262 | goto error; | ||
263 | |||
264 | spin_lock_irq(&atomic_op_lock); | ||
265 | |||
266 | if (__get_user(z, p) == 0) { | ||
267 | y = z - x; | ||
268 | if (__put_user(y, p) == 0) | ||
269 | goto done2; | ||
270 | goto error2; | ||
271 | } | ||
272 | |||
273 | spin_unlock_irq(&atomic_op_lock); | ||
274 | } | ||
275 | |||
276 | /* TIRA gr0,#126 | ||
277 | * int __atomic_user_add_return(atomic_t *v, int i) | ||
278 | */ | ||
279 | case TBR_TT_ATOMIC_ADD: | ||
280 | p = (unsigned long *) __frame->gr8; | ||
281 | x = __frame->gr9; | ||
282 | |||
283 | for (;;) { | ||
284 | ret = get_user(z, p); | ||
285 | if (ret < 0) | ||
286 | goto error; | ||
287 | |||
288 | spin_lock_irq(&atomic_op_lock); | ||
289 | |||
290 | if (__get_user(z, p) == 0) { | ||
291 | y = z + x; | ||
292 | if (__put_user(y, p) == 0) | ||
293 | goto done2; | ||
294 | goto error2; | ||
295 | } | ||
296 | |||
297 | spin_unlock_irq(&atomic_op_lock); | ||
298 | } | ||
299 | |||
300 | default: | ||
301 | BUG(); | ||
302 | } | ||
303 | |||
304 | done2: | ||
305 | spin_unlock_irq(&atomic_op_lock); | ||
306 | done: | ||
307 | if (!user_mode(__frame)) | ||
308 | set_fs(oldfs); | ||
309 | __frame->gr5 = z; | ||
310 | __frame->gr9 = y; | ||
311 | return; | ||
312 | |||
313 | error2: | ||
314 | spin_unlock_irq(&atomic_op_lock); | ||
315 | error: | ||
316 | if (!user_mode(__frame)) | ||
317 | set_fs(oldfs); | ||
318 | __frame->pc -= 4; | ||
319 | |||
320 | die_if_kernel("-- Atomic Op Error --\n"); | ||
321 | |||
322 | info.si_signo = SIGSEGV; | ||
323 | info.si_code = SEGV_ACCERR; | ||
324 | info.si_errno = 0; | ||
325 | info.si_addr = (void *) __frame->pc; | ||
326 | |||
327 | force_sig_info(info.si_signo, &info, current); | ||
328 | } | ||
329 | |||
330 | /*****************************************************************************/ | ||
331 | /* | ||
105 | * | 332 | * |
106 | */ | 333 | */ |
107 | asmlinkage void media_exception(unsigned long msr0, unsigned long msr1) | 334 | asmlinkage void media_exception(unsigned long msr0, unsigned long msr1) |