diff options
author | Ley Foon Tan <lftan@altera.com> | 2014-11-06 02:19:38 -0500 |
---|---|---|
committer | Ley Foon Tan <lftan@altera.com> | 2014-12-07 23:55:50 -0500 |
commit | 82ed08dd1b0e0e0728f9188f66795c49dffe437d (patch) | |
tree | 97f2d779bcee0d1256910ca210f9dba9a72bb230 | |
parent | 27d22413e60009603bc4eb515392c2639ff31010 (diff) |
nios2: Exception handling
This patch contains the exception entry code (kernel/entry.S) and
misaligned exception.
Signed-off-by: Ley Foon Tan <lftan@altera.com>
-rw-r--r-- | arch/nios2/kernel/entry.S | 555 | ||||
-rw-r--r-- | arch/nios2/kernel/misaligned.c | 256 |
2 files changed, 811 insertions, 0 deletions
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S new file mode 100644 index 000000000000..83bca17d1008 --- /dev/null +++ b/arch/nios2/kernel/entry.S | |||
@@ -0,0 +1,555 @@ | |||
1 | /* | ||
2 | * linux/arch/nios2/kernel/entry.S | ||
3 | * | ||
4 | * Copyright (C) 2013-2014 Altera Corporation | ||
5 | * Copyright (C) 2009, Wind River Systems Inc | ||
6 | * | ||
7 | * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com | ||
8 | * | ||
9 | * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) | ||
10 | * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, | ||
11 | * Kenneth Albanowski <kjahds@kjahds.com>, | ||
12 | * Copyright (C) 2000 Lineo Inc. (www.lineo.com) | ||
13 | * Copyright (C) 2004 Microtronix Datacom Ltd. | ||
14 | * | ||
15 | * This file is subject to the terms and conditions of the GNU General Public | ||
16 | * License. See the file "COPYING" in the main directory of this archive | ||
17 | * for more details. | ||
18 | * | ||
19 | * Linux/m68k support by Hamish Macdonald | ||
20 | * | ||
21 | * 68060 fixes by Jesper Skov | ||
22 | * ColdFire support by Greg Ungerer (gerg@snapgear.com) | ||
23 | * 5307 fixes by David W. Miller | ||
24 | * linux 2.4 support David McCullough <davidm@snapgear.com> | ||
25 | */ | ||
26 | |||
27 | #include <linux/sys.h> | ||
28 | #include <linux/linkage.h> | ||
29 | #include <asm/asm-offsets.h> | ||
30 | #include <asm/asm-macros.h> | ||
31 | #include <asm/thread_info.h> | ||
32 | #include <asm/errno.h> | ||
33 | #include <asm/setup.h> | ||
34 | #include <asm/entry.h> | ||
35 | #include <asm/unistd.h> | ||
36 | #include <asm/processor.h> | ||
37 | |||
38 | .macro GET_THREAD_INFO reg | ||
39 | .if THREAD_SIZE & 0xffff0000 | ||
40 | andhi \reg, sp, %hi(~(THREAD_SIZE-1)) | ||
41 | .else | ||
42 | addi \reg, r0, %lo(~(THREAD_SIZE-1)) | ||
43 | and \reg, \reg, sp | ||
44 | .endif | ||
45 | .endm | ||
46 | |||
47 | .macro kuser_cmpxchg_check | ||
48 | /* | ||
49 | * Make sure our user space atomic helper is restarted if it was | ||
50 | * interrupted in a critical region. | ||
51 | * ea-4 = address of interrupted insn (ea must be preserved). | ||
52 | * sp = saved regs. | ||
53 | * cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn. | ||
54 | * If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to | ||
55 | * cmpxchg_ldw + 4. | ||
56 | */ | ||
57 | /* et = cmpxchg_stw + 4 */ | ||
58 | movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start)) | ||
59 | bgtu ea, et, 1f | ||
60 | |||
61 | subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */ | ||
62 | bltu ea, et, 1f | ||
63 | stw et, PT_EA(sp) /* fix up EA */ | ||
64 | mov ea, et | ||
65 | 1: | ||
66 | .endm | ||
67 | |||
68 | .section .rodata | ||
69 | .align 4 | ||
70 | exception_table: | ||
71 | .word unhandled_exception /* 0 - Reset */ | ||
72 | .word unhandled_exception /* 1 - Processor-only Reset */ | ||
73 | .word external_interrupt /* 2 - Interrupt */ | ||
74 | .word handle_trap /* 3 - Trap Instruction */ | ||
75 | |||
76 | .word instruction_trap /* 4 - Unimplemented instruction */ | ||
77 | .word handle_illegal /* 5 - Illegal instruction */ | ||
78 | .word handle_unaligned /* 6 - Misaligned data access */ | ||
79 | .word handle_unaligned /* 7 - Misaligned destination address */ | ||
80 | |||
81 | .word handle_diverror /* 8 - Division error */ | ||
82 | .word protection_exception_ba /* 9 - Supervisor-only instr. address */ | ||
83 | .word protection_exception_instr /* 10 - Supervisor only instruction */ | ||
84 | .word protection_exception_ba /* 11 - Supervisor only data address */ | ||
85 | |||
86 | .word unhandled_exception /* 12 - Double TLB miss (data) */ | ||
87 | .word protection_exception_pte /* 13 - TLB permission violation (x) */ | ||
88 | .word protection_exception_pte /* 14 - TLB permission violation (r) */ | ||
89 | .word protection_exception_pte /* 15 - TLB permission violation (w) */ | ||
90 | |||
91 | .word unhandled_exception /* 16 - MPU region violation */ | ||
92 | |||
93 | trap_table: | ||
94 | .word handle_system_call /* 0 */ | ||
95 | .word instruction_trap /* 1 */ | ||
96 | .word instruction_trap /* 2 */ | ||
97 | .word instruction_trap /* 3 */ | ||
98 | .word instruction_trap /* 4 */ | ||
99 | .word instruction_trap /* 5 */ | ||
100 | .word instruction_trap /* 6 */ | ||
101 | .word instruction_trap /* 7 */ | ||
102 | .word instruction_trap /* 8 */ | ||
103 | .word instruction_trap /* 9 */ | ||
104 | .word instruction_trap /* 10 */ | ||
105 | .word instruction_trap /* 11 */ | ||
106 | .word instruction_trap /* 12 */ | ||
107 | .word instruction_trap /* 13 */ | ||
108 | .word instruction_trap /* 14 */ | ||
109 | .word instruction_trap /* 15 */ | ||
110 | .word instruction_trap /* 16 */ | ||
111 | .word instruction_trap /* 17 */ | ||
112 | .word instruction_trap /* 18 */ | ||
113 | .word instruction_trap /* 19 */ | ||
114 | .word instruction_trap /* 20 */ | ||
115 | .word instruction_trap /* 21 */ | ||
116 | .word instruction_trap /* 22 */ | ||
117 | .word instruction_trap /* 23 */ | ||
118 | .word instruction_trap /* 24 */ | ||
119 | .word instruction_trap /* 25 */ | ||
120 | .word instruction_trap /* 26 */ | ||
121 | .word instruction_trap /* 27 */ | ||
122 | .word instruction_trap /* 28 */ | ||
123 | .word instruction_trap /* 29 */ | ||
124 | .word instruction_trap /* 30 */ | ||
125 | .word handle_breakpoint /* 31 */ | ||
126 | |||
127 | .text | ||
128 | .set noat | ||
129 | .set nobreak | ||
130 | |||
131 | ENTRY(inthandler) | ||
132 | SAVE_ALL | ||
133 | |||
134 | kuser_cmpxchg_check | ||
135 | |||
136 | /* Clear EH bit before we get a new excpetion in the kernel | ||
137 | * and after we have saved it to the exception frame. This is done | ||
138 | * whether it's trap, tlb-miss or interrupt. If we don't do this | ||
139 | * estatus is not updated the next exception. | ||
140 | */ | ||
141 | rdctl r24, status | ||
142 | movi r9, %lo(~STATUS_EH) | ||
143 | and r24, r24, r9 | ||
144 | wrctl status, r24 | ||
145 | |||
146 | /* Read cause and vector and branch to the associated handler */ | ||
147 | mov r4, sp | ||
148 | rdctl r5, exception | ||
149 | movia r9, exception_table | ||
150 | add r24, r9, r5 | ||
151 | ldw r24, 0(r24) | ||
152 | jmp r24 | ||
153 | |||
154 | |||
155 | /*********************************************************************** | ||
156 | * Handle traps | ||
157 | *********************************************************************** | ||
158 | */ | ||
159 | ENTRY(handle_trap) | ||
160 | ldw r24, -4(ea) /* instruction that caused the exception */ | ||
161 | srli r24, r24, 4 | ||
162 | andi r24, r24, 0x7c | ||
163 | movia r9,trap_table | ||
164 | add r24, r24, r9 | ||
165 | ldw r24, 0(r24) | ||
166 | jmp r24 | ||
167 | |||
168 | |||
169 | /*********************************************************************** | ||
170 | * Handle system calls | ||
171 | *********************************************************************** | ||
172 | */ | ||
173 | ENTRY(handle_system_call) | ||
174 | /* Enable interrupts */ | ||
175 | rdctl r10, status | ||
176 | ori r10, r10, STATUS_PIE | ||
177 | wrctl status, r10 | ||
178 | |||
179 | /* Reload registers destroyed by common code. */ | ||
180 | ldw r4, PT_R4(sp) | ||
181 | ldw r5, PT_R5(sp) | ||
182 | |||
183 | local_restart: | ||
184 | /* Check that the requested system call is within limits */ | ||
185 | movui r1, __NR_syscalls | ||
186 | bgeu r2, r1, ret_invsyscall | ||
187 | slli r1, r2, 2 | ||
188 | movhi r11, %hiadj(sys_call_table) | ||
189 | add r1, r1, r11 | ||
190 | ldw r1, %lo(sys_call_table)(r1) | ||
191 | beq r1, r0, ret_invsyscall | ||
192 | |||
193 | /* Check if we are being traced */ | ||
194 | GET_THREAD_INFO r11 | ||
195 | ldw r11,TI_FLAGS(r11) | ||
196 | BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call | ||
197 | |||
198 | /* Execute the system call */ | ||
199 | callr r1 | ||
200 | |||
201 | /* If the syscall returns a negative result: | ||
202 | * Set r7 to 1 to indicate error, | ||
203 | * Negate r2 to get a positive error code | ||
204 | * If the syscall returns zero or a positive value: | ||
205 | * Set r7 to 0. | ||
206 | * The sigreturn system calls will skip the code below by | ||
207 | * adding to register ra. To avoid destroying registers | ||
208 | */ | ||
209 | translate_rc_and_ret: | ||
210 | movi r1, 0 | ||
211 | bge r2, zero, 3f | ||
212 | sub r2, zero, r2 | ||
213 | movi r1, 1 | ||
214 | 3: | ||
215 | stw r2, PT_R2(sp) | ||
216 | stw r1, PT_R7(sp) | ||
217 | end_translate_rc_and_ret: | ||
218 | |||
219 | ret_from_exception: | ||
220 | ldw r1, PT_ESTATUS(sp) | ||
221 | /* if so, skip resched, signals */ | ||
222 | TSTBNZ r1, r1, ESTATUS_EU, Luser_return | ||
223 | |||
224 | restore_all: | ||
225 | rdctl r10, status /* disable intrs */ | ||
226 | andi r10, r10, %lo(~STATUS_PIE) | ||
227 | wrctl status, r10 | ||
228 | RESTORE_ALL | ||
229 | eret | ||
230 | |||
231 | /* If the syscall number was invalid return ENOSYS */ | ||
232 | ret_invsyscall: | ||
233 | movi r2, -ENOSYS | ||
234 | br translate_rc_and_ret | ||
235 | |||
236 | /* This implements the same as above, except it calls | ||
237 | * do_syscall_trace_enter and do_syscall_trace_exit before and after the | ||
238 | * syscall in order for utilities like strace and gdb to work. | ||
239 | */ | ||
240 | traced_system_call: | ||
241 | SAVE_SWITCH_STACK | ||
242 | call do_syscall_trace_enter | ||
243 | RESTORE_SWITCH_STACK | ||
244 | |||
245 | /* Create system call register arguments. The 5th and 6th | ||
246 | arguments on stack are already in place at the beginning | ||
247 | of pt_regs. */ | ||
248 | ldw r2, PT_R2(sp) | ||
249 | ldw r4, PT_R4(sp) | ||
250 | ldw r5, PT_R5(sp) | ||
251 | ldw r6, PT_R6(sp) | ||
252 | ldw r7, PT_R7(sp) | ||
253 | |||
254 | /* Fetch the syscall function, we don't need to check the boundaries | ||
255 | * since this is already done. | ||
256 | */ | ||
257 | slli r1, r2, 2 | ||
258 | movhi r11,%hiadj(sys_call_table) | ||
259 | add r1, r1, r11 | ||
260 | ldw r1, %lo(sys_call_table)(r1) | ||
261 | |||
262 | callr r1 | ||
263 | |||
264 | /* If the syscall returns a negative result: | ||
265 | * Set r7 to 1 to indicate error, | ||
266 | * Negate r2 to get a positive error code | ||
267 | * If the syscall returns zero or a positive value: | ||
268 | * Set r7 to 0. | ||
269 | * The sigreturn system calls will skip the code below by | ||
270 | * adding to register ra. To avoid destroying registers | ||
271 | */ | ||
272 | translate_rc_and_ret2: | ||
273 | movi r1, 0 | ||
274 | bge r2, zero, 4f | ||
275 | sub r2, zero, r2 | ||
276 | movi r1, 1 | ||
277 | 4: | ||
278 | stw r2, PT_R2(sp) | ||
279 | stw r1, PT_R7(sp) | ||
280 | end_translate_rc_and_ret2: | ||
281 | SAVE_SWITCH_STACK | ||
282 | call do_syscall_trace_exit | ||
283 | RESTORE_SWITCH_STACK | ||
284 | br ret_from_exception | ||
285 | |||
286 | Luser_return: | ||
287 | GET_THREAD_INFO r11 /* get thread_info pointer */ | ||
288 | ldw r10, TI_FLAGS(r11) /* get thread_info->flags */ | ||
289 | ANDI32 r11, r10, _TIF_WORK_MASK | ||
290 | beq r11, r0, restore_all /* Nothing to do */ | ||
291 | BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return | ||
292 | |||
293 | /* Reschedule work */ | ||
294 | call schedule | ||
295 | br ret_from_exception | ||
296 | |||
297 | Lsignal_return: | ||
298 | ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | ||
299 | beq r1, r0, restore_all | ||
300 | mov r4, sp /* pt_regs */ | ||
301 | SAVE_SWITCH_STACK | ||
302 | call do_notify_resume | ||
303 | beq r2, r0, no_work_pending | ||
304 | RESTORE_SWITCH_STACK | ||
305 | /* prepare restart syscall here without leaving kernel */ | ||
306 | ldw r2, PT_R2(sp) /* reload syscall number in r2 */ | ||
307 | ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */ | ||
308 | ldw r5, PT_R5(sp) | ||
309 | ldw r6, PT_R6(sp) | ||
310 | ldw r7, PT_R7(sp) | ||
311 | ldw r8, PT_R8(sp) | ||
312 | ldw r9, PT_R9(sp) | ||
313 | br local_restart /* restart syscall */ | ||
314 | |||
315 | no_work_pending: | ||
316 | RESTORE_SWITCH_STACK | ||
317 | br ret_from_exception | ||
318 | |||
319 | /*********************************************************************** | ||
320 | * Handle external interrupts. | ||
321 | *********************************************************************** | ||
322 | */ | ||
323 | /* | ||
324 | * This is the generic interrupt handler (for all hardware interrupt | ||
325 | * sources). It figures out the vector number and calls the appropriate | ||
326 | * interrupt service routine directly. | ||
327 | */ | ||
328 | external_interrupt: | ||
329 | rdctl r12, ipending | ||
330 | rdctl r9, ienable | ||
331 | and r12, r12, r9 | ||
332 | /* skip if no interrupt is pending */ | ||
333 | beq r12, r0, ret_from_interrupt | ||
334 | |||
335 | movi r24, -1 | ||
336 | stw r24, PT_ORIG_R2(sp) | ||
337 | |||
338 | /* | ||
339 | * Process an external hardware interrupt. | ||
340 | */ | ||
341 | |||
342 | addi ea, ea, -4 /* re-issue the interrupted instruction */ | ||
343 | stw ea, PT_EA(sp) | ||
344 | 2: movi r4, %lo(-1) /* Start from bit position 0, | ||
345 | highest priority */ | ||
346 | /* This is the IRQ # for handler call */ | ||
347 | 1: andi r10, r12, 1 /* Isolate bit we are interested in */ | ||
348 | srli r12, r12, 1 /* shift count is costly without hardware | ||
349 | multiplier */ | ||
350 | addi r4, r4, 1 | ||
351 | beq r10, r0, 1b | ||
352 | mov r5, sp /* Setup pt_regs pointer for handler call */ | ||
353 | call do_IRQ | ||
354 | rdctl r12, ipending /* check again if irq still pending */ | ||
355 | rdctl r9, ienable /* Isolate possible interrupts */ | ||
356 | and r12, r12, r9 | ||
357 | bne r12, r0, 2b | ||
358 | /* br ret_from_interrupt */ /* fall through to ret_from_interrupt */ | ||
359 | |||
360 | ENTRY(ret_from_interrupt) | ||
361 | ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */ | ||
362 | TSTBNZ r1, r1, ESTATUS_EU, Luser_return | ||
363 | |||
364 | #ifdef CONFIG_PREEMPT | ||
365 | GET_THREAD_INFO r1 | ||
366 | ldw r4, TI_PREEMPT_COUNT(r1) | ||
367 | bne r4, r0, restore_all | ||
368 | |||
369 | need_resched: | ||
370 | ldw r4, TI_FLAGS(r1) /* ? Need resched set */ | ||
371 | BTBZ r10, r4, TIF_NEED_RESCHED, restore_all | ||
372 | ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */ | ||
373 | andi r10, r4, ESTATUS_EPIE | ||
374 | beq r10, r0, restore_all | ||
375 | movia r4, PREEMPT_ACTIVE | ||
376 | stw r4, TI_PREEMPT_COUNT(r1) | ||
377 | rdctl r10, status /* enable intrs again */ | ||
378 | ori r10, r10 ,STATUS_PIE | ||
379 | wrctl status, r10 | ||
380 | PUSH r1 | ||
381 | call schedule | ||
382 | POP r1 | ||
383 | mov r4, r0 | ||
384 | stw r4, TI_PREEMPT_COUNT(r1) | ||
385 | rdctl r10, status /* disable intrs */ | ||
386 | andi r10, r10, %lo(~STATUS_PIE) | ||
387 | wrctl status, r10 | ||
388 | br need_resched | ||
389 | #else | ||
390 | br restore_all | ||
391 | #endif | ||
392 | |||
393 | /*********************************************************************** | ||
394 | * A few syscall wrappers | ||
395 | *********************************************************************** | ||
396 | */ | ||
397 | /* | ||
398 | * int clone(unsigned long clone_flags, unsigned long newsp, | ||
399 | * int __user * parent_tidptr, int __user * child_tidptr, | ||
400 | * int tls_val) | ||
401 | */ | ||
402 | ENTRY(sys_clone) | ||
403 | SAVE_SWITCH_STACK | ||
404 | addi sp, sp, -4 | ||
405 | stw r7, 0(sp) /* Pass 5th arg thru stack */ | ||
406 | mov r7, r6 /* 4th arg is 3rd of clone() */ | ||
407 | mov r6, zero /* 3rd arg always 0 */ | ||
408 | call do_fork | ||
409 | addi sp, sp, 4 | ||
410 | RESTORE_SWITCH_STACK | ||
411 | ret | ||
412 | |||
413 | ENTRY(sys_rt_sigreturn) | ||
414 | SAVE_SWITCH_STACK | ||
415 | mov r4, sp | ||
416 | call do_rt_sigreturn | ||
417 | RESTORE_SWITCH_STACK | ||
418 | addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret) | ||
419 | ret | ||
420 | |||
421 | /*********************************************************************** | ||
422 | * A few other wrappers and stubs | ||
423 | *********************************************************************** | ||
424 | */ | ||
425 | protection_exception_pte: | ||
426 | rdctl r6, pteaddr | ||
427 | slli r6, r6, 10 | ||
428 | call do_page_fault | ||
429 | br ret_from_exception | ||
430 | |||
431 | protection_exception_ba: | ||
432 | rdctl r6, badaddr | ||
433 | call do_page_fault | ||
434 | br ret_from_exception | ||
435 | |||
436 | protection_exception_instr: | ||
437 | call handle_supervisor_instr | ||
438 | br ret_from_exception | ||
439 | |||
440 | handle_breakpoint: | ||
441 | call breakpoint_c | ||
442 | br ret_from_exception | ||
443 | |||
444 | #ifdef CONFIG_NIOS2_ALIGNMENT_TRAP | ||
445 | handle_unaligned: | ||
446 | SAVE_SWITCH_STACK | ||
447 | call handle_unaligned_c | ||
448 | RESTORE_SWITCH_STACK | ||
449 | br ret_from_exception | ||
450 | #else | ||
451 | handle_unaligned: | ||
452 | call handle_unaligned_c | ||
453 | br ret_from_exception | ||
454 | #endif | ||
455 | |||
456 | handle_illegal: | ||
457 | call handle_illegal_c | ||
458 | br ret_from_exception | ||
459 | |||
460 | handle_diverror: | ||
461 | call handle_diverror_c | ||
462 | br ret_from_exception | ||
463 | |||
464 | /* | ||
465 | * Beware - when entering resume, prev (the current task) is | ||
466 | * in r4, next (the new task) is in r5, don't change these | ||
467 | * registers. | ||
468 | */ | ||
469 | ENTRY(resume) | ||
470 | |||
471 | rdctl r7, status /* save thread status reg */ | ||
472 | stw r7, TASK_THREAD + THREAD_KPSR(r4) | ||
473 | |||
474 | andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */ | ||
475 | wrctl status, r7 | ||
476 | |||
477 | SAVE_SWITCH_STACK | ||
478 | stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */ | ||
479 | ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */ | ||
480 | movia r24, _current_thread /* save thread */ | ||
481 | GET_THREAD_INFO r1 | ||
482 | stw r1, 0(r24) | ||
483 | RESTORE_SWITCH_STACK | ||
484 | |||
485 | ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */ | ||
486 | wrctl status, r7 | ||
487 | ret | ||
488 | |||
489 | ENTRY(ret_from_fork) | ||
490 | call schedule_tail | ||
491 | br ret_from_exception | ||
492 | |||
493 | ENTRY(ret_from_kernel_thread) | ||
494 | call schedule_tail | ||
495 | mov r4,r17 /* arg */ | ||
496 | callr r16 /* function */ | ||
497 | br ret_from_exception | ||
498 | |||
499 | /* | ||
500 | * Kernel user helpers. | ||
501 | * | ||
502 | * Each segment is 64-byte aligned and will be mapped to the <User space>. | ||
503 | * New segments (if ever needed) must be added after the existing ones. | ||
504 | * This mechanism should be used only for things that are really small and | ||
505 | * justified, and not be abused freely. | ||
506 | * | ||
507 | */ | ||
508 | |||
509 | /* Filling pads with undefined instructions. */ | ||
510 | .macro kuser_pad sym size | ||
511 | .if ((. - \sym) & 3) | ||
512 | .rept (4 - (. - \sym) & 3) | ||
513 | .byte 0 | ||
514 | .endr | ||
515 | .endif | ||
516 | .rept ((\size - (. - \sym)) / 4) | ||
517 | .word 0xdeadbeef | ||
518 | .endr | ||
519 | .endm | ||
520 | |||
521 | .align 6 | ||
522 | .globl __kuser_helper_start | ||
523 | __kuser_helper_start: | ||
524 | |||
525 | __kuser_helper_version: /* @ 0x1000 */ | ||
526 | .word ((__kuser_helper_end - __kuser_helper_start) >> 6) | ||
527 | |||
528 | __kuser_cmpxchg: /* @ 0x1004 */ | ||
529 | /* | ||
530 | * r4 pointer to exchange variable | ||
531 | * r5 old value | ||
532 | * r6 new value | ||
533 | */ | ||
534 | cmpxchg_ldw: | ||
535 | ldw r2, 0(r4) /* load current value */ | ||
536 | sub r2, r2, r5 /* compare with old value */ | ||
537 | bne r2, zero, cmpxchg_ret | ||
538 | |||
539 | /* We had a match, store the new value */ | ||
540 | cmpxchg_stw: | ||
541 | stw r6, 0(r4) | ||
542 | cmpxchg_ret: | ||
543 | ret | ||
544 | |||
545 | kuser_pad __kuser_cmpxchg, 64 | ||
546 | |||
547 | .globl __kuser_sigtramp | ||
548 | __kuser_sigtramp: | ||
549 | movi r2, __NR_rt_sigreturn | ||
550 | trap | ||
551 | |||
552 | kuser_pad __kuser_sigtramp, 64 | ||
553 | |||
554 | .globl __kuser_helper_end | ||
555 | __kuser_helper_end: | ||
diff --git a/arch/nios2/kernel/misaligned.c b/arch/nios2/kernel/misaligned.c new file mode 100644 index 000000000000..4e5907a0cabe --- /dev/null +++ b/arch/nios2/kernel/misaligned.c | |||
@@ -0,0 +1,256 @@ | |||
1 | /* | ||
2 | * linux/arch/nios2/kernel/misaligned.c | ||
3 | * | ||
4 | * basic emulation for mis-aligned accesses on the NIOS II cpu | ||
5 | * modelled after the version for arm in arm/alignment.c | ||
6 | * | ||
7 | * Brad Parker <brad@heeltoe.com> | ||
8 | * Copyright (C) 2010 Ambient Corporation | ||
9 | * Copyright (c) 2010 Altera Corporation, San Jose, California, USA. | ||
10 | * Copyright (c) 2010 Arrow Electronics, Inc. | ||
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General | ||
13 | * Public License. See the file COPYING in the main directory of | ||
14 | * this archive for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/errno.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/proc_fs.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | |||
25 | #include <asm/traps.h> | ||
26 | #include <asm/unaligned.h> | ||
27 | |||
28 | /* instructions we emulate */ | ||
29 | #define INST_LDHU 0x0b | ||
30 | #define INST_STH 0x0d | ||
31 | #define INST_LDH 0x0f | ||
32 | #define INST_STW 0x15 | ||
33 | #define INST_LDW 0x17 | ||
34 | |||
35 | static unsigned long ma_user, ma_kern, ma_skipped, ma_half, ma_word; | ||
36 | |||
37 | static unsigned int ma_usermode; | ||
38 | #define UM_WARN 0x01 | ||
39 | #define UM_FIXUP 0x02 | ||
40 | #define UM_SIGNAL 0x04 | ||
41 | #define KM_WARN 0x08 | ||
42 | |||
43 | /* see arch/nios2/include/asm/ptrace.h */ | ||
44 | static u8 sys_stack_frame_reg_offset[] = { | ||
45 | /* struct pt_regs */ | ||
46 | 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 0, | ||
47 | /* struct switch_stack */ | ||
48 | 16, 17, 18, 19, 20, 21, 22, 23, 0, 0, 0, 0, 0, 0, 0, 0 | ||
49 | }; | ||
50 | |||
51 | static int reg_offsets[32]; | ||
52 | |||
53 | static inline u32 get_reg_val(struct pt_regs *fp, int reg) | ||
54 | { | ||
55 | u8 *p = ((u8 *)fp) + reg_offsets[reg]; | ||
56 | |||
57 | return *(u32 *)p; | ||
58 | } | ||
59 | |||
60 | static inline void put_reg_val(struct pt_regs *fp, int reg, u32 val) | ||
61 | { | ||
62 | u8 *p = ((u8 *)fp) + reg_offsets[reg]; | ||
63 | *(u32 *)p = val; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * (mis)alignment handler | ||
68 | */ | ||
69 | asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause) | ||
70 | { | ||
71 | u32 isn, addr, val; | ||
72 | int in_kernel; | ||
73 | u8 a, b, d0, d1, d2, d3; | ||
74 | u16 imm16; | ||
75 | unsigned int fault; | ||
76 | |||
77 | /* back up one instruction */ | ||
78 | fp->ea -= 4; | ||
79 | |||
80 | if (fixup_exception(fp)) { | ||
81 | ma_skipped++; | ||
82 | return; | ||
83 | } | ||
84 | |||
85 | in_kernel = !user_mode(fp); | ||
86 | |||
87 | isn = *(unsigned long *)(fp->ea); | ||
88 | |||
89 | fault = 0; | ||
90 | |||
91 | /* do fixup if in kernel or mode turned on */ | ||
92 | if (in_kernel || (ma_usermode & UM_FIXUP)) { | ||
93 | /* decompose instruction */ | ||
94 | a = (isn >> 27) & 0x1f; | ||
95 | b = (isn >> 22) & 0x1f; | ||
96 | imm16 = (isn >> 6) & 0xffff; | ||
97 | addr = get_reg_val(fp, a) + imm16; | ||
98 | |||
99 | /* do fixup to saved registers */ | ||
100 | switch (isn & 0x3f) { | ||
101 | case INST_LDHU: | ||
102 | fault |= __get_user(d0, (u8 *)(addr+0)); | ||
103 | fault |= __get_user(d1, (u8 *)(addr+1)); | ||
104 | val = (d1 << 8) | d0; | ||
105 | put_reg_val(fp, b, val); | ||
106 | ma_half++; | ||
107 | break; | ||
108 | case INST_STH: | ||
109 | val = get_reg_val(fp, b); | ||
110 | d1 = val >> 8; | ||
111 | d0 = val >> 0; | ||
112 | |||
113 | pr_debug("sth: ra=%d (%08x) rb=%d (%08x), imm16 %04x addr %08x val %08x\n", | ||
114 | a, get_reg_val(fp, a), | ||
115 | b, get_reg_val(fp, b), | ||
116 | imm16, addr, val); | ||
117 | |||
118 | if (in_kernel) { | ||
119 | *(u8 *)(addr+0) = d0; | ||
120 | *(u8 *)(addr+1) = d1; | ||
121 | } else { | ||
122 | fault |= __put_user(d0, (u8 *)(addr+0)); | ||
123 | fault |= __put_user(d1, (u8 *)(addr+1)); | ||
124 | } | ||
125 | ma_half++; | ||
126 | break; | ||
127 | case INST_LDH: | ||
128 | fault |= __get_user(d0, (u8 *)(addr+0)); | ||
129 | fault |= __get_user(d1, (u8 *)(addr+1)); | ||
130 | val = (short)((d1 << 8) | d0); | ||
131 | put_reg_val(fp, b, val); | ||
132 | ma_half++; | ||
133 | break; | ||
134 | case INST_STW: | ||
135 | val = get_reg_val(fp, b); | ||
136 | d3 = val >> 24; | ||
137 | d2 = val >> 16; | ||
138 | d1 = val >> 8; | ||
139 | d0 = val >> 0; | ||
140 | if (in_kernel) { | ||
141 | *(u8 *)(addr+0) = d0; | ||
142 | *(u8 *)(addr+1) = d1; | ||
143 | *(u8 *)(addr+2) = d2; | ||
144 | *(u8 *)(addr+3) = d3; | ||
145 | } else { | ||
146 | fault |= __put_user(d0, (u8 *)(addr+0)); | ||
147 | fault |= __put_user(d1, (u8 *)(addr+1)); | ||
148 | fault |= __put_user(d2, (u8 *)(addr+2)); | ||
149 | fault |= __put_user(d3, (u8 *)(addr+3)); | ||
150 | } | ||
151 | ma_word++; | ||
152 | break; | ||
153 | case INST_LDW: | ||
154 | fault |= __get_user(d0, (u8 *)(addr+0)); | ||
155 | fault |= __get_user(d1, (u8 *)(addr+1)); | ||
156 | fault |= __get_user(d2, (u8 *)(addr+2)); | ||
157 | fault |= __get_user(d3, (u8 *)(addr+3)); | ||
158 | val = (d3 << 24) | (d2 << 16) | (d1 << 8) | d0; | ||
159 | put_reg_val(fp, b, val); | ||
160 | ma_word++; | ||
161 | break; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | addr = RDCTL(CTL_BADADDR); | ||
166 | cause >>= 2; | ||
167 | |||
168 | if (fault) { | ||
169 | if (in_kernel) { | ||
170 | pr_err("fault during kernel misaligned fixup @ %#lx; addr 0x%08x; isn=0x%08x\n", | ||
171 | fp->ea, (unsigned int)addr, | ||
172 | (unsigned int)isn); | ||
173 | } else { | ||
174 | pr_err("fault during user misaligned fixup @ %#lx; isn=%08x addr=0x%08x sp=0x%08lx pid=%d\n", | ||
175 | fp->ea, | ||
176 | (unsigned int)isn, addr, fp->sp, | ||
177 | current->pid); | ||
178 | |||
179 | _exception(SIGSEGV, fp, SEGV_MAPERR, fp->ea); | ||
180 | return; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * kernel mode - | ||
186 | * note exception and skip bad instruction (return) | ||
187 | */ | ||
188 | if (in_kernel) { | ||
189 | ma_kern++; | ||
190 | fp->ea += 4; | ||
191 | |||
192 | if (ma_usermode & KM_WARN) { | ||
193 | pr_err("kernel unaligned access @ %#lx; BADADDR 0x%08x; cause=%d, isn=0x%08x\n", | ||
194 | fp->ea, | ||
195 | (unsigned int)addr, cause, | ||
196 | (unsigned int)isn); | ||
197 | /* show_regs(fp); */ | ||
198 | } | ||
199 | |||
200 | return; | ||
201 | } | ||
202 | |||
203 | ma_user++; | ||
204 | |||
205 | /* | ||
206 | * user mode - | ||
207 | * possibly warn, | ||
208 | * possibly send SIGBUS signal to process | ||
209 | */ | ||
210 | if (ma_usermode & UM_WARN) { | ||
211 | pr_err("user unaligned access @ %#lx; isn=0x%08lx ea=0x%08lx ra=0x%08lx sp=0x%08lx\n", | ||
212 | (unsigned long)addr, (unsigned long)isn, | ||
213 | fp->ea, fp->ra, fp->sp); | ||
214 | } | ||
215 | |||
216 | if (ma_usermode & UM_SIGNAL) | ||
217 | _exception(SIGBUS, fp, BUS_ADRALN, fp->ea); | ||
218 | else | ||
219 | fp->ea += 4; /* else advance */ | ||
220 | } | ||
221 | |||
222 | static void __init misaligned_calc_reg_offsets(void) | ||
223 | { | ||
224 | int i, r, offset; | ||
225 | |||
226 | /* pre-calc offsets of registers on sys call stack frame */ | ||
227 | offset = 0; | ||
228 | |||
229 | /* struct pt_regs */ | ||
230 | for (i = 0; i < 16; i++) { | ||
231 | r = sys_stack_frame_reg_offset[i]; | ||
232 | reg_offsets[r] = offset; | ||
233 | offset += 4; | ||
234 | } | ||
235 | |||
236 | /* struct switch_stack */ | ||
237 | offset = -sizeof(struct switch_stack); | ||
238 | for (i = 16; i < 32; i++) { | ||
239 | r = sys_stack_frame_reg_offset[i]; | ||
240 | reg_offsets[r] = offset; | ||
241 | offset += 4; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | |||
246 | static int __init misaligned_init(void) | ||
247 | { | ||
248 | /* default mode - silent fix */ | ||
249 | ma_usermode = UM_FIXUP | KM_WARN; | ||
250 | |||
251 | misaligned_calc_reg_offsets(); | ||
252 | |||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | fs_initcall(misaligned_init); | ||