diff options
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 23 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 73 |
2 files changed, 42 insertions, 54 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index fd3e3924041b..5901480bfdca 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #include <asm/ptrace.h> | 23 | #include <asm/ptrace.h> |
24 | #include <asm/thread_info.h> | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * Stack pushing/popping (register pairs only). Equivalent to store decrement | 27 | * Stack pushing/popping (register pairs only). Equivalent to store decrement |
@@ -68,23 +69,31 @@ | |||
68 | msr daifclr, #8 | 69 | msr daifclr, #8 |
69 | .endm | 70 | .endm |
70 | 71 | ||
71 | .macro disable_step, tmp | 72 | .macro disable_step_tsk, flgs, tmp |
73 | tbz \flgs, #TIF_SINGLESTEP, 9990f | ||
72 | mrs \tmp, mdscr_el1 | 74 | mrs \tmp, mdscr_el1 |
73 | bic \tmp, \tmp, #1 | 75 | bic \tmp, \tmp, #1 |
74 | msr mdscr_el1, \tmp | 76 | msr mdscr_el1, \tmp |
77 | isb // Synchronise with enable_dbg | ||
78 | 9990: | ||
75 | .endm | 79 | .endm |
76 | 80 | ||
77 | .macro enable_step, tmp | 81 | .macro enable_step_tsk, flgs, tmp |
82 | tbz \flgs, #TIF_SINGLESTEP, 9990f | ||
83 | disable_dbg | ||
78 | mrs \tmp, mdscr_el1 | 84 | mrs \tmp, mdscr_el1 |
79 | orr \tmp, \tmp, #1 | 85 | orr \tmp, \tmp, #1 |
80 | msr mdscr_el1, \tmp | 86 | msr mdscr_el1, \tmp |
87 | 9990: | ||
81 | .endm | 88 | .endm |
82 | 89 | ||
83 | .macro enable_dbg_if_not_stepping, tmp | 90 | /* |
84 | mrs \tmp, mdscr_el1 | 91 | * Enable both debug exceptions and interrupts. This is likely to be |
85 | tbnz \tmp, #0, 9990f | 92 | * faster than two daifclr operations, since writes to this register |
86 | enable_dbg | 93 | * are self-synchronising. |
87 | 9990: | 94 | */ |
95 | .macro enable_dbg_and_irq | ||
96 | msr daifclr, #(8 | 2) | ||
88 | .endm | 97 | .endm |
89 | 98 | ||
90 | /* | 99 | /* |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 39ac630d83de..e8b23a3b68d0 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -60,6 +60,9 @@ | |||
60 | push x0, x1 | 60 | push x0, x1 |
61 | .if \el == 0 | 61 | .if \el == 0 |
62 | mrs x21, sp_el0 | 62 | mrs x21, sp_el0 |
63 | get_thread_info tsk // Ensure MDSCR_EL1.SS is clear, | ||
64 | ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug | ||
65 | disable_step_tsk x19, x20 // exceptions when scheduling. | ||
63 | .else | 66 | .else |
64 | add x21, sp, #S_FRAME_SIZE | 67 | add x21, sp, #S_FRAME_SIZE |
65 | .endif | 68 | .endif |
@@ -259,7 +262,7 @@ el1_da: | |||
259 | * Data abort handling | 262 | * Data abort handling |
260 | */ | 263 | */ |
261 | mrs x0, far_el1 | 264 | mrs x0, far_el1 |
262 | enable_dbg_if_not_stepping x2 | 265 | enable_dbg |
263 | // re-enable interrupts if they were enabled in the aborted context | 266 | // re-enable interrupts if they were enabled in the aborted context |
264 | tbnz x23, #7, 1f // PSR_I_BIT | 267 | tbnz x23, #7, 1f // PSR_I_BIT |
265 | enable_irq | 268 | enable_irq |
@@ -275,6 +278,7 @@ el1_sp_pc: | |||
275 | * Stack or PC alignment exception handling | 278 | * Stack or PC alignment exception handling |
276 | */ | 279 | */ |
277 | mrs x0, far_el1 | 280 | mrs x0, far_el1 |
281 | enable_dbg | ||
278 | mov x1, x25 | 282 | mov x1, x25 |
279 | mov x2, sp | 283 | mov x2, sp |
280 | b do_sp_pc_abort | 284 | b do_sp_pc_abort |
@@ -282,6 +286,7 @@ el1_undef: | |||
282 | /* | 286 | /* |
283 | * Undefined instruction | 287 | * Undefined instruction |
284 | */ | 288 | */ |
289 | enable_dbg | ||
285 | mov x0, sp | 290 | mov x0, sp |
286 | b do_undefinstr | 291 | b do_undefinstr |
287 | el1_dbg: | 292 | el1_dbg: |
@@ -294,10 +299,11 @@ el1_dbg: | |||
294 | mrs x0, far_el1 | 299 | mrs x0, far_el1 |
295 | mov x2, sp // struct pt_regs | 300 | mov x2, sp // struct pt_regs |
296 | bl do_debug_exception | 301 | bl do_debug_exception |
297 | 302 | enable_dbg | |
298 | kernel_exit 1 | 303 | kernel_exit 1 |
299 | el1_inv: | 304 | el1_inv: |
300 | // TODO: add support for undefined instructions in kernel mode | 305 | // TODO: add support for undefined instructions in kernel mode |
306 | enable_dbg | ||
301 | mov x0, sp | 307 | mov x0, sp |
302 | mov x1, #BAD_SYNC | 308 | mov x1, #BAD_SYNC |
303 | mrs x2, esr_el1 | 309 | mrs x2, esr_el1 |
@@ -307,7 +313,7 @@ ENDPROC(el1_sync) | |||
307 | .align 6 | 313 | .align 6 |
308 | el1_irq: | 314 | el1_irq: |
309 | kernel_entry 1 | 315 | kernel_entry 1 |
310 | enable_dbg_if_not_stepping x0 | 316 | enable_dbg |
311 | #ifdef CONFIG_TRACE_IRQFLAGS | 317 | #ifdef CONFIG_TRACE_IRQFLAGS |
312 | bl trace_hardirqs_off | 318 | bl trace_hardirqs_off |
313 | #endif | 319 | #endif |
@@ -332,8 +338,7 @@ ENDPROC(el1_irq) | |||
332 | #ifdef CONFIG_PREEMPT | 338 | #ifdef CONFIG_PREEMPT |
333 | el1_preempt: | 339 | el1_preempt: |
334 | mov x24, lr | 340 | mov x24, lr |
335 | 1: enable_dbg | 341 | 1: bl preempt_schedule_irq // irq en/disable is done inside |
336 | bl preempt_schedule_irq // irq en/disable is done inside | ||
337 | ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS | 342 | ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS |
338 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? | 343 | tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? |
339 | ret x24 | 344 | ret x24 |
@@ -349,7 +354,7 @@ el0_sync: | |||
349 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class | 354 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class |
350 | cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state | 355 | cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state |
351 | b.eq el0_svc | 356 | b.eq el0_svc |
352 | adr lr, ret_from_exception | 357 | adr lr, ret_to_user |
353 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 | 358 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 |
354 | b.eq el0_da | 359 | b.eq el0_da |
355 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 | 360 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 |
@@ -378,7 +383,7 @@ el0_sync_compat: | |||
378 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class | 383 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class |
379 | cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state | 384 | cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state |
380 | b.eq el0_svc_compat | 385 | b.eq el0_svc_compat |
381 | adr lr, ret_from_exception | 386 | adr lr, ret_to_user |
382 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 | 387 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 |
383 | b.eq el0_da | 388 | b.eq el0_da |
384 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 | 389 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 |
@@ -423,11 +428,8 @@ el0_da: | |||
423 | */ | 428 | */ |
424 | mrs x0, far_el1 | 429 | mrs x0, far_el1 |
425 | bic x0, x0, #(0xff << 56) | 430 | bic x0, x0, #(0xff << 56) |
426 | disable_step x1 | ||
427 | isb | ||
428 | enable_dbg | ||
429 | // enable interrupts before calling the main handler | 431 | // enable interrupts before calling the main handler |
430 | enable_irq | 432 | enable_dbg_and_irq |
431 | mov x1, x25 | 433 | mov x1, x25 |
432 | mov x2, sp | 434 | mov x2, sp |
433 | b do_mem_abort | 435 | b do_mem_abort |
@@ -436,11 +438,8 @@ el0_ia: | |||
436 | * Instruction abort handling | 438 | * Instruction abort handling |
437 | */ | 439 | */ |
438 | mrs x0, far_el1 | 440 | mrs x0, far_el1 |
439 | disable_step x1 | ||
440 | isb | ||
441 | enable_dbg | ||
442 | // enable interrupts before calling the main handler | 441 | // enable interrupts before calling the main handler |
443 | enable_irq | 442 | enable_dbg_and_irq |
444 | orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts | 443 | orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts |
445 | mov x2, sp | 444 | mov x2, sp |
446 | b do_mem_abort | 445 | b do_mem_abort |
@@ -448,6 +447,7 @@ el0_fpsimd_acc: | |||
448 | /* | 447 | /* |
449 | * Floating Point or Advanced SIMD access | 448 | * Floating Point or Advanced SIMD access |
450 | */ | 449 | */ |
450 | enable_dbg | ||
451 | mov x0, x25 | 451 | mov x0, x25 |
452 | mov x1, sp | 452 | mov x1, sp |
453 | b do_fpsimd_acc | 453 | b do_fpsimd_acc |
@@ -455,6 +455,7 @@ el0_fpsimd_exc: | |||
455 | /* | 455 | /* |
456 | * Floating Point or Advanced SIMD exception | 456 | * Floating Point or Advanced SIMD exception |
457 | */ | 457 | */ |
458 | enable_dbg | ||
458 | mov x0, x25 | 459 | mov x0, x25 |
459 | mov x1, sp | 460 | mov x1, sp |
460 | b do_fpsimd_exc | 461 | b do_fpsimd_exc |
@@ -463,11 +464,8 @@ el0_sp_pc: | |||
463 | * Stack or PC alignment exception handling | 464 | * Stack or PC alignment exception handling |
464 | */ | 465 | */ |
465 | mrs x0, far_el1 | 466 | mrs x0, far_el1 |
466 | disable_step x1 | ||
467 | isb | ||
468 | enable_dbg | ||
469 | // enable interrupts before calling the main handler | 467 | // enable interrupts before calling the main handler |
470 | enable_irq | 468 | enable_dbg_and_irq |
471 | mov x1, x25 | 469 | mov x1, x25 |
472 | mov x2, sp | 470 | mov x2, sp |
473 | b do_sp_pc_abort | 471 | b do_sp_pc_abort |
@@ -475,9 +473,9 @@ el0_undef: | |||
475 | /* | 473 | /* |
476 | * Undefined instruction | 474 | * Undefined instruction |
477 | */ | 475 | */ |
478 | mov x0, sp | ||
479 | // enable interrupts before calling the main handler | 476 | // enable interrupts before calling the main handler |
480 | enable_irq | 477 | enable_dbg_and_irq |
478 | mov x0, sp | ||
481 | b do_undefinstr | 479 | b do_undefinstr |
482 | el0_dbg: | 480 | el0_dbg: |
483 | /* | 481 | /* |
@@ -485,11 +483,13 @@ el0_dbg: | |||
485 | */ | 483 | */ |
486 | tbnz x24, #0, el0_inv // EL0 only | 484 | tbnz x24, #0, el0_inv // EL0 only |
487 | mrs x0, far_el1 | 485 | mrs x0, far_el1 |
488 | disable_step x1 | ||
489 | mov x1, x25 | 486 | mov x1, x25 |
490 | mov x2, sp | 487 | mov x2, sp |
491 | b do_debug_exception | 488 | bl do_debug_exception |
489 | enable_dbg | ||
490 | b ret_to_user | ||
492 | el0_inv: | 491 | el0_inv: |
492 | enable_dbg | ||
493 | mov x0, sp | 493 | mov x0, sp |
494 | mov x1, #BAD_SYNC | 494 | mov x1, #BAD_SYNC |
495 | mrs x2, esr_el1 | 495 | mrs x2, esr_el1 |
@@ -500,15 +500,12 @@ ENDPROC(el0_sync) | |||
500 | el0_irq: | 500 | el0_irq: |
501 | kernel_entry 0 | 501 | kernel_entry 0 |
502 | el0_irq_naked: | 502 | el0_irq_naked: |
503 | disable_step x1 | ||
504 | isb | ||
505 | enable_dbg | 503 | enable_dbg |
506 | #ifdef CONFIG_TRACE_IRQFLAGS | 504 | #ifdef CONFIG_TRACE_IRQFLAGS |
507 | bl trace_hardirqs_off | 505 | bl trace_hardirqs_off |
508 | #endif | 506 | #endif |
509 | 507 | ||
510 | irq_handler | 508 | irq_handler |
511 | get_thread_info tsk | ||
512 | 509 | ||
513 | #ifdef CONFIG_TRACE_IRQFLAGS | 510 | #ifdef CONFIG_TRACE_IRQFLAGS |
514 | bl trace_hardirqs_on | 511 | bl trace_hardirqs_on |
@@ -517,14 +514,6 @@ el0_irq_naked: | |||
517 | ENDPROC(el0_irq) | 514 | ENDPROC(el0_irq) |
518 | 515 | ||
519 | /* | 516 | /* |
520 | * This is the return code to user mode for abort handlers | ||
521 | */ | ||
522 | ret_from_exception: | ||
523 | get_thread_info tsk | ||
524 | b ret_to_user | ||
525 | ENDPROC(ret_from_exception) | ||
526 | |||
527 | /* | ||
528 | * Register switch for AArch64. The callee-saved registers need to be saved | 517 | * Register switch for AArch64. The callee-saved registers need to be saved |
529 | * and restored. On entry: | 518 | * and restored. On entry: |
530 | * x0 = previous task_struct (must be preserved across the switch) | 519 | * x0 = previous task_struct (must be preserved across the switch) |
@@ -563,10 +552,7 @@ ret_fast_syscall: | |||
563 | ldr x1, [tsk, #TI_FLAGS] | 552 | ldr x1, [tsk, #TI_FLAGS] |
564 | and x2, x1, #_TIF_WORK_MASK | 553 | and x2, x1, #_TIF_WORK_MASK |
565 | cbnz x2, fast_work_pending | 554 | cbnz x2, fast_work_pending |
566 | tbz x1, #TIF_SINGLESTEP, fast_exit | 555 | enable_step_tsk x1, x2 |
567 | disable_dbg | ||
568 | enable_step x2 | ||
569 | fast_exit: | ||
570 | kernel_exit 0, ret = 1 | 556 | kernel_exit 0, ret = 1 |
571 | 557 | ||
572 | /* | 558 | /* |
@@ -585,7 +571,6 @@ work_pending: | |||
585 | bl do_notify_resume | 571 | bl do_notify_resume |
586 | b ret_to_user | 572 | b ret_to_user |
587 | work_resched: | 573 | work_resched: |
588 | enable_dbg | ||
589 | bl schedule | 574 | bl schedule |
590 | 575 | ||
591 | /* | 576 | /* |
@@ -596,9 +581,7 @@ ret_to_user: | |||
596 | ldr x1, [tsk, #TI_FLAGS] | 581 | ldr x1, [tsk, #TI_FLAGS] |
597 | and x2, x1, #_TIF_WORK_MASK | 582 | and x2, x1, #_TIF_WORK_MASK |
598 | cbnz x2, work_pending | 583 | cbnz x2, work_pending |
599 | tbz x1, #TIF_SINGLESTEP, no_work_pending | 584 | enable_step_tsk x1, x2 |
600 | disable_dbg | ||
601 | enable_step x2 | ||
602 | no_work_pending: | 585 | no_work_pending: |
603 | kernel_exit 0, ret = 0 | 586 | kernel_exit 0, ret = 0 |
604 | ENDPROC(ret_to_user) | 587 | ENDPROC(ret_to_user) |
@@ -625,12 +608,8 @@ el0_svc: | |||
625 | mov sc_nr, #__NR_syscalls | 608 | mov sc_nr, #__NR_syscalls |
626 | el0_svc_naked: // compat entry point | 609 | el0_svc_naked: // compat entry point |
627 | stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number | 610 | stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number |
628 | disable_step x16 | 611 | enable_dbg_and_irq |
629 | isb | ||
630 | enable_dbg | ||
631 | enable_irq | ||
632 | 612 | ||
633 | get_thread_info tsk | ||
634 | ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing | 613 | ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing |
635 | tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls? | 614 | tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls? |
636 | adr lr, ret_fast_syscall // return address | 615 | adr lr, ret_fast_syscall // return address |