diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2012-03-05 06:49:27 -0500 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2012-09-17 05:24:46 -0400 |
commit | 60ffc30d5652810dd34ea2eec41504222f5d5791 (patch) | |
tree | b1d8364b8a86df0327b2f6318c4f59e973695337 /arch/arm64/kernel | |
parent | 9703d9d7f77ce129621f7d80a844822e2daa7008 (diff) |
arm64: Exception handling
The patch contains the exception entry code (kernel/entry.S), pt_regs
structure and related accessors, undefined instruction trapping and
stack tracing.
AArch64 Linux kernel (including kernel threads) runs in EL1 mode using
the SP1 stack. The vectors don't have a fixed address, only alignment
(2^11) requirements.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/entry.S | 695 | ||||
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 127 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 348 |
3 files changed, 1170 insertions, 0 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S new file mode 100644 index 000000000000..38cf853a3667 --- /dev/null +++ b/arch/arm64/kernel/entry.S | |||
@@ -0,0 +1,695 @@ | |||
1 | /* | ||
2 | * Low-level exception handling code | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * Will Deacon <will.deacon@arm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/linkage.h> | ||
23 | |||
24 | #include <asm/assembler.h> | ||
25 | #include <asm/asm-offsets.h> | ||
26 | #include <asm/errno.h> | ||
27 | #include <asm/thread_info.h> | ||
28 | #include <asm/unistd.h> | ||
29 | |||
30 | /* | ||
31 | * Bad Abort numbers | ||
32 | *----------------- | ||
33 | */ | ||
34 | #define BAD_SYNC 0 | ||
35 | #define BAD_IRQ 1 | ||
36 | #define BAD_FIQ 2 | ||
37 | #define BAD_ERROR 3 | ||
38 | |||
39 | .macro kernel_entry, el, regsize = 64 | ||
40 | sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR | ||
41 | .if \regsize == 32 | ||
42 | mov w0, w0 // zero upper 32 bits of x0 | ||
43 | .endif | ||
44 | push x28, x29 | ||
45 | push x26, x27 | ||
46 | push x24, x25 | ||
47 | push x22, x23 | ||
48 | push x20, x21 | ||
49 | push x18, x19 | ||
50 | push x16, x17 | ||
51 | push x14, x15 | ||
52 | push x12, x13 | ||
53 | push x10, x11 | ||
54 | push x8, x9 | ||
55 | push x6, x7 | ||
56 | push x4, x5 | ||
57 | push x2, x3 | ||
58 | push x0, x1 | ||
59 | .if \el == 0 | ||
60 | mrs x21, sp_el0 | ||
61 | .else | ||
62 | add x21, sp, #S_FRAME_SIZE | ||
63 | .endif | ||
64 | mrs x22, elr_el1 | ||
65 | mrs x23, spsr_el1 | ||
66 | stp lr, x21, [sp, #S_LR] | ||
67 | stp x22, x23, [sp, #S_PC] | ||
68 | |||
69 | /* | ||
70 | * Set syscallno to -1 by default (overridden later if real syscall). | ||
71 | */ | ||
72 | .if \el == 0 | ||
73 | mvn x21, xzr | ||
74 | str x21, [sp, #S_SYSCALLNO] | ||
75 | .endif | ||
76 | |||
77 | /* | ||
78 | * Registers that may be useful after this macro is invoked: | ||
79 | * | ||
80 | * x21 - aborted SP | ||
81 | * x22 - aborted PC | ||
82 | * x23 - aborted PSTATE | ||
83 | */ | ||
84 | .endm | ||
85 | |||
86 | .macro kernel_exit, el, ret = 0 | ||
87 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR | ||
88 | .if \el == 0 | ||
89 | ldr x23, [sp, #S_SP] // load return stack pointer | ||
90 | .endif | ||
91 | .if \ret | ||
92 | ldr x1, [sp, #S_X1] // preserve x0 (syscall return) | ||
93 | add sp, sp, S_X2 | ||
94 | .else | ||
95 | pop x0, x1 | ||
96 | .endif | ||
97 | pop x2, x3 // load the rest of the registers | ||
98 | pop x4, x5 | ||
99 | pop x6, x7 | ||
100 | pop x8, x9 | ||
101 | msr elr_el1, x21 // set up the return data | ||
102 | msr spsr_el1, x22 | ||
103 | .if \el == 0 | ||
104 | msr sp_el0, x23 | ||
105 | .endif | ||
106 | pop x10, x11 | ||
107 | pop x12, x13 | ||
108 | pop x14, x15 | ||
109 | pop x16, x17 | ||
110 | pop x18, x19 | ||
111 | pop x20, x21 | ||
112 | pop x22, x23 | ||
113 | pop x24, x25 | ||
114 | pop x26, x27 | ||
115 | pop x28, x29 | ||
116 | ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP | ||
117 | eret // return to kernel | ||
118 | .endm | ||
119 | |||
120 | .macro get_thread_info, rd | ||
121 | mov \rd, sp | ||
122 | and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack | ||
123 | .endm | ||
124 | |||
125 | /* | ||
126 | * These are the registers used in the syscall handler, and allow us to | ||
127 | * have in theory up to 7 arguments to a function - x0 to x6. | ||
128 | * | ||
129 | * x7 is reserved for the system call number in 32-bit mode. | ||
130 | */ | ||
131 | sc_nr .req x25 // number of system calls | ||
132 | scno .req x26 // syscall number | ||
133 | stbl .req x27 // syscall table pointer | ||
134 | tsk .req x28 // current thread_info | ||
135 | |||
136 | /* | ||
137 | * Interrupt handling. | ||
138 | */ | ||
139 | .macro irq_handler | ||
140 | ldr x1, handle_arch_irq | ||
141 | mov x0, sp | ||
142 | blr x1 | ||
143 | .endm | ||
144 | |||
145 | .text | ||
146 | |||
147 | /* | ||
148 | * Exception vectors. | ||
149 | */ | ||
150 | .macro ventry label | ||
151 | .align 7 | ||
152 | b \label | ||
153 | .endm | ||
154 | |||
155 | .align 11 | ||
156 | ENTRY(vectors) | ||
157 | ventry el1_sync_invalid // Synchronous EL1t | ||
158 | ventry el1_irq_invalid // IRQ EL1t | ||
159 | ventry el1_fiq_invalid // FIQ EL1t | ||
160 | ventry el1_error_invalid // Error EL1t | ||
161 | |||
162 | ventry el1_sync // Synchronous EL1h | ||
163 | ventry el1_irq // IRQ EL1h | ||
164 | ventry el1_fiq_invalid // FIQ EL1h | ||
165 | ventry el1_error_invalid // Error EL1h | ||
166 | |||
167 | ventry el0_sync // Synchronous 64-bit EL0 | ||
168 | ventry el0_irq // IRQ 64-bit EL0 | ||
169 | ventry el0_fiq_invalid // FIQ 64-bit EL0 | ||
170 | ventry el0_error_invalid // Error 64-bit EL0 | ||
171 | |||
172 | #ifdef CONFIG_COMPAT | ||
173 | ventry el0_sync_compat // Synchronous 32-bit EL0 | ||
174 | ventry el0_irq_compat // IRQ 32-bit EL0 | ||
175 | ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 | ||
176 | ventry el0_error_invalid_compat // Error 32-bit EL0 | ||
177 | #else | ||
178 | ventry el0_sync_invalid // Synchronous 32-bit EL0 | ||
179 | ventry el0_irq_invalid // IRQ 32-bit EL0 | ||
180 | ventry el0_fiq_invalid // FIQ 32-bit EL0 | ||
181 | ventry el0_error_invalid // Error 32-bit EL0 | ||
182 | #endif | ||
183 | END(vectors) | ||
184 | |||
185 | /* | ||
186 | * Invalid mode handlers | ||
187 | */ | ||
188 | .macro inv_entry, el, reason, regsize = 64 | ||
189 | kernel_entry el, \regsize | ||
190 | mov x0, sp | ||
191 | mov x1, #\reason | ||
192 | mrs x2, esr_el1 | ||
193 | b bad_mode | ||
194 | .endm | ||
195 | |||
196 | el0_sync_invalid: | ||
197 | inv_entry 0, BAD_SYNC | ||
198 | ENDPROC(el0_sync_invalid) | ||
199 | |||
200 | el0_irq_invalid: | ||
201 | inv_entry 0, BAD_IRQ | ||
202 | ENDPROC(el0_irq_invalid) | ||
203 | |||
204 | el0_fiq_invalid: | ||
205 | inv_entry 0, BAD_FIQ | ||
206 | ENDPROC(el0_fiq_invalid) | ||
207 | |||
208 | el0_error_invalid: | ||
209 | inv_entry 0, BAD_ERROR | ||
210 | ENDPROC(el0_error_invalid) | ||
211 | |||
212 | #ifdef CONFIG_COMPAT | ||
213 | el0_fiq_invalid_compat: | ||
214 | inv_entry 0, BAD_FIQ, 32 | ||
215 | ENDPROC(el0_fiq_invalid_compat) | ||
216 | |||
217 | el0_error_invalid_compat: | ||
218 | inv_entry 0, BAD_ERROR, 32 | ||
219 | ENDPROC(el0_error_invalid_compat) | ||
220 | #endif | ||
221 | |||
222 | el1_sync_invalid: | ||
223 | inv_entry 1, BAD_SYNC | ||
224 | ENDPROC(el1_sync_invalid) | ||
225 | |||
226 | el1_irq_invalid: | ||
227 | inv_entry 1, BAD_IRQ | ||
228 | ENDPROC(el1_irq_invalid) | ||
229 | |||
230 | el1_fiq_invalid: | ||
231 | inv_entry 1, BAD_FIQ | ||
232 | ENDPROC(el1_fiq_invalid) | ||
233 | |||
234 | el1_error_invalid: | ||
235 | inv_entry 1, BAD_ERROR | ||
236 | ENDPROC(el1_error_invalid) | ||
237 | |||
238 | /* | ||
239 | * EL1 mode handlers. | ||
240 | */ | ||
241 | .align 6 | ||
242 | el1_sync: | ||
243 | kernel_entry 1 | ||
244 | mrs x1, esr_el1 // read the syndrome register | ||
245 | lsr x24, x1, #26 // exception class | ||
246 | cmp x24, #0x25 // data abort in EL1 | ||
247 | b.eq el1_da | ||
248 | cmp x24, #0x18 // configurable trap | ||
249 | b.eq el1_undef | ||
250 | cmp x24, #0x26 // stack alignment exception | ||
251 | b.eq el1_sp_pc | ||
252 | cmp x24, #0x22 // pc alignment exception | ||
253 | b.eq el1_sp_pc | ||
254 | cmp x24, #0x00 // unknown exception in EL1 | ||
255 | b.eq el1_undef | ||
256 | cmp x24, #0x30 // debug exception in EL1 | ||
257 | b.ge el1_dbg | ||
258 | b el1_inv | ||
259 | el1_da: | ||
260 | /* | ||
261 | * Data abort handling | ||
262 | */ | ||
263 | mrs x0, far_el1 | ||
264 | enable_dbg_if_not_stepping x2 | ||
265 | // re-enable interrupts if they were enabled in the aborted context | ||
266 | tbnz x23, #7, 1f // PSR_I_BIT | ||
267 | enable_irq | ||
268 | 1: | ||
269 | mov x2, sp // struct pt_regs | ||
270 | bl do_mem_abort | ||
271 | |||
272 | // disable interrupts before pulling preserved data off the stack | ||
273 | disable_irq | ||
274 | kernel_exit 1 | ||
275 | el1_sp_pc: | ||
276 | /* | ||
277 | * Stack or PC alignment exception handling | ||
278 | */ | ||
279 | mrs x0, far_el1 | ||
280 | mov x1, x25 | ||
281 | mov x2, sp | ||
282 | b do_sp_pc_abort | ||
283 | el1_undef: | ||
284 | /* | ||
285 | * Undefined instruction | ||
286 | */ | ||
287 | mov x0, sp | ||
288 | b do_undefinstr | ||
289 | el1_dbg: | ||
290 | /* | ||
291 | * Debug exception handling | ||
292 | */ | ||
293 | tbz x24, #0, el1_inv // EL1 only | ||
294 | mrs x0, far_el1 | ||
295 | mov x2, sp // struct pt_regs | ||
296 | bl do_debug_exception | ||
297 | |||
298 | kernel_exit 1 | ||
299 | el1_inv: | ||
300 | // TODO: add support for undefined instructions in kernel mode | ||
301 | mov x0, sp | ||
302 | mov x1, #BAD_SYNC | ||
303 | mrs x2, esr_el1 | ||
304 | b bad_mode | ||
305 | ENDPROC(el1_sync) | ||
306 | |||
307 | .align 6 | ||
308 | el1_irq: | ||
309 | kernel_entry 1 | ||
310 | enable_dbg_if_not_stepping x0 | ||
311 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
312 | bl trace_hardirqs_off | ||
313 | #endif | ||
314 | #ifdef CONFIG_PREEMPT | ||
315 | get_thread_info tsk | ||
316 | ldr x24, [tsk, #TI_PREEMPT] // get preempt count | ||
317 | add x0, x24, #1 // increment it | ||
318 | str x0, [tsk, #TI_PREEMPT] | ||
319 | #endif | ||
320 | irq_handler | ||
321 | #ifdef CONFIG_PREEMPT | ||
322 | str x24, [tsk, #TI_PREEMPT] // restore preempt count | ||
323 | cbnz x24, 1f // preempt count != 0 | ||
324 | ldr x0, [tsk, #TI_FLAGS] // get flags | ||
325 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? | ||
326 | bl el1_preempt | ||
327 | 1: | ||
328 | #endif | ||
329 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
330 | bl trace_hardirqs_on | ||
331 | #endif | ||
332 | kernel_exit 1 | ||
333 | ENDPROC(el1_irq) | ||
334 | |||
335 | #ifdef CONFIG_PREEMPT | ||
336 | el1_preempt: | ||
337 | mov x24, lr | ||
338 | 1: enable_dbg | ||
339 | bl preempt_schedule_irq // irq en/disable is done inside | ||
340 | ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS | ||
341 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? | ||
342 | ret x24 | ||
343 | #endif | ||
344 | |||
345 | /* | ||
346 | * EL0 mode handlers. | ||
347 | */ | ||
348 | .align 6 | ||
349 | el0_sync: | ||
350 | kernel_entry 0 | ||
351 | mrs x25, esr_el1 // read the syndrome register | ||
352 | lsr x24, x25, #26 // exception class | ||
353 | cmp x24, #0x15 // SVC in 64-bit state | ||
354 | b.eq el0_svc | ||
355 | adr lr, ret_from_exception | ||
356 | cmp x24, #0x24 // data abort in EL0 | ||
357 | b.eq el0_da | ||
358 | cmp x24, #0x20 // instruction abort in EL0 | ||
359 | b.eq el0_ia | ||
360 | cmp x24, #0x07 // FP/ASIMD access | ||
361 | b.eq el0_fpsimd_acc | ||
362 | cmp x24, #0x2c // FP/ASIMD exception | ||
363 | b.eq el0_fpsimd_exc | ||
364 | cmp x24, #0x18 // configurable trap | ||
365 | b.eq el0_undef | ||
366 | cmp x24, #0x26 // stack alignment exception | ||
367 | b.eq el0_sp_pc | ||
368 | cmp x24, #0x22 // pc alignment exception | ||
369 | b.eq el0_sp_pc | ||
370 | cmp x24, #0x00 // unknown exception in EL0 | ||
371 | b.eq el0_undef | ||
372 | cmp x24, #0x30 // debug exception in EL0 | ||
373 | b.ge el0_dbg | ||
374 | b el0_inv | ||
375 | |||
376 | #ifdef CONFIG_COMPAT | ||
377 | .align 6 | ||
378 | el0_sync_compat: | ||
379 | kernel_entry 0, 32 | ||
380 | mrs x25, esr_el1 // read the syndrome register | ||
381 | lsr x24, x25, #26 // exception class | ||
382 | cmp x24, #0x11 // SVC in 32-bit state | ||
383 | b.eq el0_svc_compat | ||
384 | adr lr, ret_from_exception | ||
385 | cmp x24, #0x24 // data abort in EL0 | ||
386 | b.eq el0_da | ||
387 | cmp x24, #0x20 // instruction abort in EL0 | ||
388 | b.eq el0_ia | ||
389 | cmp x24, #0x07 // FP/ASIMD access | ||
390 | b.eq el0_fpsimd_acc | ||
391 | cmp x24, #0x28 // FP/ASIMD exception | ||
392 | b.eq el0_fpsimd_exc | ||
393 | cmp x24, #0x00 // unknown exception in EL0 | ||
394 | b.eq el0_undef | ||
395 | cmp x24, #0x30 // debug exception in EL0 | ||
396 | b.ge el0_dbg | ||
397 | b el0_inv | ||
398 | el0_svc_compat: | ||
399 | /* | ||
400 | * AArch32 syscall handling | ||
401 | */ | ||
402 | adr stbl, compat_sys_call_table // load compat syscall table pointer | ||
403 | uxtw scno, w7 // syscall number in w7 (r7) | ||
404 | mov sc_nr, #__NR_compat_syscalls | ||
405 | b el0_svc_naked | ||
406 | |||
407 | .align 6 | ||
408 | el0_irq_compat: | ||
409 | kernel_entry 0, 32 | ||
410 | b el0_irq_naked | ||
411 | #endif | ||
412 | |||
413 | el0_da: | ||
414 | /* | ||
415 | * Data abort handling | ||
416 | */ | ||
417 | mrs x0, far_el1 | ||
418 | disable_step x1 | ||
419 | isb | ||
420 | enable_dbg | ||
421 | // enable interrupts before calling the main handler | ||
422 | enable_irq | ||
423 | mov x1, x25 | ||
424 | mov x2, sp | ||
425 | b do_mem_abort | ||
426 | el0_ia: | ||
427 | /* | ||
428 | * Instruction abort handling | ||
429 | */ | ||
430 | mrs x0, far_el1 | ||
431 | disable_step x1 | ||
432 | isb | ||
433 | enable_dbg | ||
434 | // enable interrupts before calling the main handler | ||
435 | enable_irq | ||
436 | orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts | ||
437 | mov x2, sp | ||
438 | b do_mem_abort | ||
439 | el0_fpsimd_acc: | ||
440 | /* | ||
441 | * Floating Point or Advanced SIMD access | ||
442 | */ | ||
443 | mov x0, x25 | ||
444 | mov x1, sp | ||
445 | b do_fpsimd_acc | ||
446 | el0_fpsimd_exc: | ||
447 | /* | ||
448 | * Floating Point or Advanced SIMD exception | ||
449 | */ | ||
450 | mov x0, x25 | ||
451 | mov x1, sp | ||
452 | b do_fpsimd_exc | ||
453 | el0_sp_pc: | ||
454 | /* | ||
455 | * Stack or PC alignment exception handling | ||
456 | */ | ||
457 | mrs x0, far_el1 | ||
458 | disable_step x1 | ||
459 | isb | ||
460 | enable_dbg | ||
461 | // enable interrupts before calling the main handler | ||
462 | enable_irq | ||
463 | mov x1, x25 | ||
464 | mov x2, sp | ||
465 | b do_sp_pc_abort | ||
466 | el0_undef: | ||
467 | /* | ||
468 | * Undefined instruction | ||
469 | */ | ||
470 | mov x0, sp | ||
471 | b do_undefinstr | ||
472 | el0_dbg: | ||
473 | /* | ||
474 | * Debug exception handling | ||
475 | */ | ||
476 | tbnz x24, #0, el0_inv // EL0 only | ||
477 | mrs x0, far_el1 | ||
478 | disable_step x1 | ||
479 | mov x1, x25 | ||
480 | mov x2, sp | ||
481 | b do_debug_exception | ||
482 | el0_inv: | ||
483 | mov x0, sp | ||
484 | mov x1, #BAD_SYNC | ||
485 | mrs x2, esr_el1 | ||
486 | b bad_mode | ||
487 | ENDPROC(el0_sync) | ||
488 | |||
489 | .align 6 | ||
490 | el0_irq: | ||
491 | kernel_entry 0 | ||
492 | el0_irq_naked: | ||
493 | disable_step x1 | ||
494 | isb | ||
495 | enable_dbg | ||
496 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
497 | bl trace_hardirqs_off | ||
498 | #endif | ||
499 | get_thread_info tsk | ||
500 | #ifdef CONFIG_PREEMPT | ||
501 | ldr x24, [tsk, #TI_PREEMPT] // get preempt count | ||
502 | add x23, x24, #1 // increment it | ||
503 | str x23, [tsk, #TI_PREEMPT] | ||
504 | #endif | ||
505 | irq_handler | ||
506 | #ifdef CONFIG_PREEMPT | ||
507 | ldr x0, [tsk, #TI_PREEMPT] | ||
508 | str x24, [tsk, #TI_PREEMPT] | ||
509 | cmp x0, x23 | ||
510 | b.eq 1f | ||
511 | mov x1, #0 | ||
512 | str x1, [x1] // BUG | ||
513 | 1: | ||
514 | #endif | ||
515 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
516 | bl trace_hardirqs_on | ||
517 | #endif | ||
518 | b ret_to_user | ||
519 | ENDPROC(el0_irq) | ||
520 | |||
521 | /* | ||
522 | * This is the return code to user mode for abort handlers | ||
523 | */ | ||
524 | ret_from_exception: | ||
525 | get_thread_info tsk | ||
526 | b ret_to_user | ||
527 | ENDPROC(ret_from_exception) | ||
528 | |||
529 | /* | ||
530 | * Register switch for AArch64. The callee-saved registers need to be saved | ||
531 | * and restored. On entry: | ||
532 | * x0 = previous task_struct (must be preserved across the switch) | ||
533 | * x1 = next task_struct | ||
534 | * Previous and next are guaranteed not to be the same. | ||
535 | * | ||
536 | */ | ||
537 | ENTRY(cpu_switch_to) | ||
538 | add x8, x0, #THREAD_CPU_CONTEXT | ||
539 | mov x9, sp | ||
540 | stp x19, x20, [x8], #16 // store callee-saved registers | ||
541 | stp x21, x22, [x8], #16 | ||
542 | stp x23, x24, [x8], #16 | ||
543 | stp x25, x26, [x8], #16 | ||
544 | stp x27, x28, [x8], #16 | ||
545 | stp x29, x9, [x8], #16 | ||
546 | str lr, [x8] | ||
547 | add x8, x1, #THREAD_CPU_CONTEXT | ||
548 | ldp x19, x20, [x8], #16 // restore callee-saved registers | ||
549 | ldp x21, x22, [x8], #16 | ||
550 | ldp x23, x24, [x8], #16 | ||
551 | ldp x25, x26, [x8], #16 | ||
552 | ldp x27, x28, [x8], #16 | ||
553 | ldp x29, x9, [x8], #16 | ||
554 | ldr lr, [x8] | ||
555 | mov sp, x9 | ||
556 | ret | ||
557 | ENDPROC(cpu_switch_to) | ||
558 | |||
559 | /* | ||
560 | * This is the fast syscall return path. We do as little as possible here, | ||
561 | * and this includes saving x0 back into the kernel stack. | ||
562 | */ | ||
563 | ret_fast_syscall: | ||
564 | disable_irq // disable interrupts | ||
565 | ldr x1, [tsk, #TI_FLAGS] | ||
566 | and x2, x1, #_TIF_WORK_MASK | ||
567 | cbnz x2, fast_work_pending | ||
568 | tbz x1, #TIF_SINGLESTEP, fast_exit | ||
569 | disable_dbg | ||
570 | enable_step x2 | ||
571 | fast_exit: | ||
572 | kernel_exit 0, ret = 1 | ||
573 | |||
574 | /* | ||
575 | * Ok, we need to do extra processing, enter the slow path. | ||
576 | */ | ||
577 | fast_work_pending: | ||
578 | str x0, [sp, #S_X0] // returned x0 | ||
579 | work_pending: | ||
580 | tbnz x1, #TIF_NEED_RESCHED, work_resched | ||
581 | /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */ | ||
582 | ldr x2, [sp, #S_PSTATE] | ||
583 | mov x0, sp // 'regs' | ||
584 | tst x2, #PSR_MODE_MASK // user mode regs? | ||
585 | b.ne no_work_pending // returning to kernel | ||
586 | bl do_notify_resume | ||
587 | b ret_to_user | ||
588 | work_resched: | ||
589 | enable_dbg | ||
590 | bl schedule | ||
591 | |||
592 | /* | ||
593 | * "slow" syscall return path. | ||
594 | */ | ||
595 | ENTRY(ret_to_user) | ||
596 | disable_irq // disable interrupts | ||
597 | ldr x1, [tsk, #TI_FLAGS] | ||
598 | and x2, x1, #_TIF_WORK_MASK | ||
599 | cbnz x2, work_pending | ||
600 | tbz x1, #TIF_SINGLESTEP, no_work_pending | ||
601 | disable_dbg | ||
602 | enable_step x2 | ||
603 | no_work_pending: | ||
604 | kernel_exit 0, ret = 0 | ||
605 | ENDPROC(ret_to_user) | ||
606 | |||
607 | /* | ||
608 | * This is how we return from a fork. | ||
609 | */ | ||
610 | ENTRY(ret_from_fork) | ||
611 | bl schedule_tail | ||
612 | get_thread_info tsk | ||
613 | b ret_to_user | ||
614 | ENDPROC(ret_from_fork) | ||
615 | |||
616 | /* | ||
617 | * SVC handler. | ||
618 | */ | ||
619 | .align 6 | ||
620 | el0_svc: | ||
621 | adrp stbl, sys_call_table // load syscall table pointer | ||
622 | uxtw scno, w8 // syscall number in w8 | ||
623 | mov sc_nr, #__NR_syscalls | ||
624 | el0_svc_naked: // compat entry point | ||
625 | stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number | ||
626 | disable_step x16 | ||
627 | isb | ||
628 | enable_dbg | ||
629 | enable_irq | ||
630 | |||
631 | get_thread_info tsk | ||
632 | ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing | ||
633 | tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls? | ||
634 | adr lr, ret_fast_syscall // return address | ||
635 | cmp scno, sc_nr // check upper syscall limit | ||
636 | b.hs ni_sys | ||
637 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | ||
638 | br x16 // call sys_* routine | ||
639 | ni_sys: | ||
640 | mov x0, sp | ||
641 | b do_ni_syscall | ||
642 | ENDPROC(el0_svc) | ||
643 | |||
644 | /* | ||
645 | * This is the really slow path. We're going to be doing context | ||
646 | * switches, and waiting for our parent to respond. | ||
647 | */ | ||
648 | __sys_trace: | ||
649 | mov x1, sp | ||
650 | mov w0, #0 // trace entry | ||
651 | bl syscall_trace | ||
652 | adr lr, __sys_trace_return // return address | ||
653 | uxtw scno, w0 // syscall number (possibly new) | ||
654 | mov x1, sp // pointer to regs | ||
655 | cmp scno, sc_nr // check upper syscall limit | ||
656 | b.hs ni_sys | ||
657 | ldp x0, x1, [sp] // restore the syscall args | ||
658 | ldp x2, x3, [sp, #S_X2] | ||
659 | ldp x4, x5, [sp, #S_X4] | ||
660 | ldp x6, x7, [sp, #S_X6] | ||
661 | ldr x16, [stbl, scno, lsl #3] // address in the syscall table | ||
662 | br x16 // call sys_* routine | ||
663 | |||
664 | __sys_trace_return: | ||
665 | str x0, [sp] // save returned x0 | ||
666 | mov x1, sp | ||
667 | mov w0, #1 // trace exit | ||
668 | bl syscall_trace | ||
669 | b ret_to_user | ||
670 | |||
671 | /* | ||
672 | * Special system call wrappers. | ||
673 | */ | ||
674 | ENTRY(sys_execve_wrapper) | ||
675 | mov x3, sp | ||
676 | b sys_execve | ||
677 | ENDPROC(sys_execve_wrapper) | ||
678 | |||
679 | ENTRY(sys_clone_wrapper) | ||
680 | mov x5, sp | ||
681 | b sys_clone | ||
682 | ENDPROC(sys_clone_wrapper) | ||
683 | |||
684 | ENTRY(sys_rt_sigreturn_wrapper) | ||
685 | mov x0, sp | ||
686 | b sys_rt_sigreturn | ||
687 | ENDPROC(sys_rt_sigreturn_wrapper) | ||
688 | |||
689 | ENTRY(sys_sigaltstack_wrapper) | ||
690 | ldr x2, [sp, #S_SP] | ||
691 | b sys_sigaltstack | ||
692 | ENDPROC(sys_sigaltstack_wrapper) | ||
693 | |||
694 | ENTRY(handle_arch_irq) | ||
695 | .quad 0 | ||
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c new file mode 100644 index 000000000000..d25459ff57fc --- /dev/null +++ b/arch/arm64/kernel/stacktrace.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * Stack tracing support | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/export.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/stacktrace.h> | ||
22 | |||
23 | #include <asm/stacktrace.h> | ||
24 | |||
25 | /* | ||
26 | * AArch64 PCS assigns the frame pointer to x29. | ||
27 | * | ||
28 | * A simple function prologue looks like this: | ||
29 | * sub sp, sp, #0x10 | ||
30 | * stp x29, x30, [sp] | ||
31 | * mov x29, sp | ||
32 | * | ||
33 | * A simple function epilogue looks like this: | ||
34 | * mov sp, x29 | ||
35 | * ldp x29, x30, [sp] | ||
36 | * add sp, sp, #0x10 | ||
37 | */ | ||
38 | int unwind_frame(struct stackframe *frame) | ||
39 | { | ||
40 | unsigned long high, low; | ||
41 | unsigned long fp = frame->fp; | ||
42 | |||
43 | low = frame->sp; | ||
44 | high = ALIGN(low, THREAD_SIZE); | ||
45 | |||
46 | if (fp < low || fp > high || fp & 0xf) | ||
47 | return -EINVAL; | ||
48 | |||
49 | frame->sp = fp + 0x10; | ||
50 | frame->fp = *(unsigned long *)(fp); | ||
51 | frame->pc = *(unsigned long *)(fp + 8); | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | void notrace walk_stackframe(struct stackframe *frame, | ||
57 | int (*fn)(struct stackframe *, void *), void *data) | ||
58 | { | ||
59 | while (1) { | ||
60 | int ret; | ||
61 | |||
62 | if (fn(frame, data)) | ||
63 | break; | ||
64 | ret = unwind_frame(frame); | ||
65 | if (ret < 0) | ||
66 | break; | ||
67 | } | ||
68 | } | ||
69 | EXPORT_SYMBOL(walk_stackframe); | ||
70 | |||
71 | #ifdef CONFIG_STACKTRACE | ||
72 | struct stack_trace_data { | ||
73 | struct stack_trace *trace; | ||
74 | unsigned int no_sched_functions; | ||
75 | unsigned int skip; | ||
76 | }; | ||
77 | |||
78 | static int save_trace(struct stackframe *frame, void *d) | ||
79 | { | ||
80 | struct stack_trace_data *data = d; | ||
81 | struct stack_trace *trace = data->trace; | ||
82 | unsigned long addr = frame->pc; | ||
83 | |||
84 | if (data->no_sched_functions && in_sched_functions(addr)) | ||
85 | return 0; | ||
86 | if (data->skip) { | ||
87 | data->skip--; | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | trace->entries[trace->nr_entries++] = addr; | ||
92 | |||
93 | return trace->nr_entries >= trace->max_entries; | ||
94 | } | ||
95 | |||
96 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||
97 | { | ||
98 | struct stack_trace_data data; | ||
99 | struct stackframe frame; | ||
100 | |||
101 | data.trace = trace; | ||
102 | data.skip = trace->skip; | ||
103 | |||
104 | if (tsk != current) { | ||
105 | data.no_sched_functions = 1; | ||
106 | frame.fp = thread_saved_fp(tsk); | ||
107 | frame.sp = thread_saved_sp(tsk); | ||
108 | frame.pc = thread_saved_pc(tsk); | ||
109 | } else { | ||
110 | register unsigned long current_sp asm("sp"); | ||
111 | data.no_sched_functions = 0; | ||
112 | frame.fp = (unsigned long)__builtin_frame_address(0); | ||
113 | frame.sp = current_sp; | ||
114 | frame.pc = (unsigned long)save_stack_trace_tsk; | ||
115 | } | ||
116 | |||
117 | walk_stackframe(&frame, save_trace, &data); | ||
118 | if (trace->nr_entries < trace->max_entries) | ||
119 | trace->entries[trace->nr_entries++] = ULONG_MAX; | ||
120 | } | ||
121 | |||
122 | void save_stack_trace(struct stack_trace *trace) | ||
123 | { | ||
124 | save_stack_trace_tsk(current, trace); | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
127 | #endif | ||
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c new file mode 100644 index 000000000000..3883f842434f --- /dev/null +++ b/arch/arm64/kernel/traps.c | |||
@@ -0,0 +1,348 @@ | |||
1 | /* | ||
2 | * Based on arch/arm/kernel/traps.c | ||
3 | * | ||
4 | * Copyright (C) 1995-2009 Russell King | ||
5 | * Copyright (C) 2012 ARM Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/signal.h> | ||
21 | #include <linux/personality.h> | ||
22 | #include <linux/kallsyms.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/kdebug.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/kexec.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/syscalls.h> | ||
33 | |||
34 | #include <asm/atomic.h> | ||
35 | #include <asm/traps.h> | ||
36 | #include <asm/stacktrace.h> | ||
37 | #include <asm/exception.h> | ||
38 | #include <asm/system_misc.h> | ||
39 | |||
40 | static const char *handler[]= { | ||
41 | "Synchronous Abort", | ||
42 | "IRQ", | ||
43 | "FIQ", | ||
44 | "Error" | ||
45 | }; | ||
46 | |||
47 | int show_unhandled_signals = 1; | ||
48 | |||
49 | /* | ||
50 | * Dump out the contents of some memory nicely... | ||
51 | */ | ||
52 | static void dump_mem(const char *lvl, const char *str, unsigned long bottom, | ||
53 | unsigned long top) | ||
54 | { | ||
55 | unsigned long first; | ||
56 | mm_segment_t fs; | ||
57 | int i; | ||
58 | |||
59 | /* | ||
60 | * We need to switch to kernel mode so that we can use __get_user | ||
61 | * to safely read from kernel space. Note that we now dump the | ||
62 | * code first, just in case the backtrace kills us. | ||
63 | */ | ||
64 | fs = get_fs(); | ||
65 | set_fs(KERNEL_DS); | ||
66 | |||
67 | printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top); | ||
68 | |||
69 | for (first = bottom & ~31; first < top; first += 32) { | ||
70 | unsigned long p; | ||
71 | char str[sizeof(" 12345678") * 8 + 1]; | ||
72 | |||
73 | memset(str, ' ', sizeof(str)); | ||
74 | str[sizeof(str) - 1] = '\0'; | ||
75 | |||
76 | for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { | ||
77 | if (p >= bottom && p < top) { | ||
78 | unsigned int val; | ||
79 | if (__get_user(val, (unsigned int *)p) == 0) | ||
80 | sprintf(str + i * 9, " %08x", val); | ||
81 | else | ||
82 | sprintf(str + i * 9, " ????????"); | ||
83 | } | ||
84 | } | ||
85 | printk("%s%04lx:%s\n", lvl, first & 0xffff, str); | ||
86 | } | ||
87 | |||
88 | set_fs(fs); | ||
89 | } | ||
90 | |||
91 | static void dump_backtrace_entry(unsigned long where, unsigned long stack) | ||
92 | { | ||
93 | print_ip_sym(where); | ||
94 | if (in_exception_text(where)) | ||
95 | dump_mem("", "Exception stack", stack, | ||
96 | stack + sizeof(struct pt_regs)); | ||
97 | } | ||
98 | |||
99 | static void dump_instr(const char *lvl, struct pt_regs *regs) | ||
100 | { | ||
101 | unsigned long addr = instruction_pointer(regs); | ||
102 | mm_segment_t fs; | ||
103 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; | ||
104 | int i; | ||
105 | |||
106 | /* | ||
107 | * We need to switch to kernel mode so that we can use __get_user | ||
108 | * to safely read from kernel space. Note that we now dump the | ||
109 | * code first, just in case the backtrace kills us. | ||
110 | */ | ||
111 | fs = get_fs(); | ||
112 | set_fs(KERNEL_DS); | ||
113 | |||
114 | for (i = -4; i < 1; i++) { | ||
115 | unsigned int val, bad; | ||
116 | |||
117 | bad = __get_user(val, &((u32 *)addr)[i]); | ||
118 | |||
119 | if (!bad) | ||
120 | p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); | ||
121 | else { | ||
122 | p += sprintf(p, "bad PC value"); | ||
123 | break; | ||
124 | } | ||
125 | } | ||
126 | printk("%sCode: %s\n", lvl, str); | ||
127 | |||
128 | set_fs(fs); | ||
129 | } | ||
130 | |||
131 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | ||
132 | { | ||
133 | struct stackframe frame; | ||
134 | const register unsigned long current_sp asm ("sp"); | ||
135 | |||
136 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); | ||
137 | |||
138 | if (!tsk) | ||
139 | tsk = current; | ||
140 | |||
141 | if (regs) { | ||
142 | frame.fp = regs->regs[29]; | ||
143 | frame.sp = regs->sp; | ||
144 | frame.pc = regs->pc; | ||
145 | } else if (tsk == current) { | ||
146 | frame.fp = (unsigned long)__builtin_frame_address(0); | ||
147 | frame.sp = current_sp; | ||
148 | frame.pc = (unsigned long)dump_backtrace; | ||
149 | } else { | ||
150 | /* | ||
151 | * task blocked in __switch_to | ||
152 | */ | ||
153 | frame.fp = thread_saved_fp(tsk); | ||
154 | frame.sp = thread_saved_sp(tsk); | ||
155 | frame.pc = thread_saved_pc(tsk); | ||
156 | } | ||
157 | |||
158 | printk("Call trace:\n"); | ||
159 | while (1) { | ||
160 | unsigned long where = frame.pc; | ||
161 | int ret; | ||
162 | |||
163 | ret = unwind_frame(&frame); | ||
164 | if (ret < 0) | ||
165 | break; | ||
166 | dump_backtrace_entry(where, frame.sp); | ||
167 | } | ||
168 | } | ||
169 | |||
170 | void dump_stack(void) | ||
171 | { | ||
172 | dump_backtrace(NULL, NULL); | ||
173 | } | ||
174 | |||
175 | EXPORT_SYMBOL(dump_stack); | ||
176 | |||
177 | void show_stack(struct task_struct *tsk, unsigned long *sp) | ||
178 | { | ||
179 | dump_backtrace(NULL, tsk); | ||
180 | barrier(); | ||
181 | } | ||
182 | |||
183 | #ifdef CONFIG_PREEMPT | ||
184 | #define S_PREEMPT " PREEMPT" | ||
185 | #else | ||
186 | #define S_PREEMPT "" | ||
187 | #endif | ||
188 | #ifdef CONFIG_SMP | ||
189 | #define S_SMP " SMP" | ||
190 | #else | ||
191 | #define S_SMP "" | ||
192 | #endif | ||
193 | |||
194 | static int __die(const char *str, int err, struct thread_info *thread, | ||
195 | struct pt_regs *regs) | ||
196 | { | ||
197 | struct task_struct *tsk = thread->task; | ||
198 | static int die_counter; | ||
199 | int ret; | ||
200 | |||
201 | pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", | ||
202 | str, err, ++die_counter); | ||
203 | |||
204 | /* trap and error numbers are mostly meaningless on ARM */ | ||
205 | ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); | ||
206 | if (ret == NOTIFY_STOP) | ||
207 | return ret; | ||
208 | |||
209 | print_modules(); | ||
210 | __show_regs(regs); | ||
211 | pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", | ||
212 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); | ||
213 | |||
214 | if (!user_mode(regs) || in_interrupt()) { | ||
215 | dump_mem(KERN_EMERG, "Stack: ", regs->sp, | ||
216 | THREAD_SIZE + (unsigned long)task_stack_page(tsk)); | ||
217 | dump_backtrace(regs, tsk); | ||
218 | dump_instr(KERN_EMERG, regs); | ||
219 | } | ||
220 | |||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | static DEFINE_RAW_SPINLOCK(die_lock); | ||
225 | |||
226 | /* | ||
227 | * This function is protected against re-entrancy. | ||
228 | */ | ||
229 | void die(const char *str, struct pt_regs *regs, int err) | ||
230 | { | ||
231 | struct thread_info *thread = current_thread_info(); | ||
232 | int ret; | ||
233 | |||
234 | oops_enter(); | ||
235 | |||
236 | raw_spin_lock_irq(&die_lock); | ||
237 | console_verbose(); | ||
238 | bust_spinlocks(1); | ||
239 | ret = __die(str, err, thread, regs); | ||
240 | |||
241 | if (regs && kexec_should_crash(thread->task)) | ||
242 | crash_kexec(regs); | ||
243 | |||
244 | bust_spinlocks(0); | ||
245 | add_taint(TAINT_DIE); | ||
246 | raw_spin_unlock_irq(&die_lock); | ||
247 | oops_exit(); | ||
248 | |||
249 | if (in_interrupt()) | ||
250 | panic("Fatal exception in interrupt"); | ||
251 | if (panic_on_oops) | ||
252 | panic("Fatal exception"); | ||
253 | if (ret != NOTIFY_STOP) | ||
254 | do_exit(SIGSEGV); | ||
255 | } | ||
256 | |||
257 | void arm64_notify_die(const char *str, struct pt_regs *regs, | ||
258 | struct siginfo *info, int err) | ||
259 | { | ||
260 | if (user_mode(regs)) | ||
261 | force_sig_info(info->si_signo, info, current); | ||
262 | else | ||
263 | die(str, regs, err); | ||
264 | } | ||
265 | |||
266 | asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | ||
267 | { | ||
268 | siginfo_t info; | ||
269 | void __user *pc = (void __user *)instruction_pointer(regs); | ||
270 | |||
271 | #ifdef CONFIG_COMPAT | ||
272 | /* check for AArch32 breakpoint instructions */ | ||
273 | if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0) | ||
274 | return; | ||
275 | #endif | ||
276 | |||
277 | if (show_unhandled_signals) { | ||
278 | pr_info("%s[%d]: undefined instruction: pc=%p\n", | ||
279 | current->comm, task_pid_nr(current), pc); | ||
280 | dump_instr(KERN_INFO, regs); | ||
281 | } | ||
282 | |||
283 | info.si_signo = SIGILL; | ||
284 | info.si_errno = 0; | ||
285 | info.si_code = ILL_ILLOPC; | ||
286 | info.si_addr = pc; | ||
287 | |||
288 | arm64_notify_die("Oops - undefined instruction", regs, &info, 0); | ||
289 | } | ||
290 | |||
291 | long compat_arm_syscall(struct pt_regs *regs); | ||
292 | |||
293 | asmlinkage long do_ni_syscall(struct pt_regs *regs) | ||
294 | { | ||
295 | #ifdef CONFIG_COMPAT | ||
296 | long ret; | ||
297 | if (is_compat_task()) { | ||
298 | ret = compat_arm_syscall(regs); | ||
299 | if (ret != -ENOSYS) | ||
300 | return ret; | ||
301 | } | ||
302 | #endif | ||
303 | |||
304 | if (show_unhandled_signals) { | ||
305 | pr_info("%s[%d]: syscall %d\n", current->comm, | ||
306 | task_pid_nr(current), (int)regs->syscallno); | ||
307 | dump_instr("", regs); | ||
308 | if (user_mode(regs)) | ||
309 | __show_regs(regs); | ||
310 | } | ||
311 | |||
312 | return sys_ni_syscall(); | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * bad_mode handles the impossible case in the exception vector. | ||
317 | */ | ||
318 | asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) | ||
319 | { | ||
320 | console_verbose(); | ||
321 | |||
322 | pr_crit("Bad mode in %s handler detected, code 0x%08x\n", | ||
323 | handler[reason], esr); | ||
324 | |||
325 | die("Oops - bad mode", regs, 0); | ||
326 | local_irq_disable(); | ||
327 | panic("bad mode"); | ||
328 | } | ||
329 | |||
330 | void __pte_error(const char *file, int line, unsigned long val) | ||
331 | { | ||
332 | printk("%s:%d: bad pte %016lx.\n", file, line, val); | ||
333 | } | ||
334 | |||
335 | void __pmd_error(const char *file, int line, unsigned long val) | ||
336 | { | ||
337 | printk("%s:%d: bad pmd %016lx.\n", file, line, val); | ||
338 | } | ||
339 | |||
340 | void __pgd_error(const char *file, int line, unsigned long val) | ||
341 | { | ||
342 | printk("%s:%d: bad pgd %016lx.\n", file, line, val); | ||
343 | } | ||
344 | |||
345 | void __init trap_init(void) | ||
346 | { | ||
347 | return; | ||
348 | } | ||