diff options
Diffstat (limited to 'arch/arm64/include/asm/assembler.h')
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 136 |
1 files changed, 136 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 053d83e8db6f..0bcc98dbba56 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -565,4 +565,140 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU | |||
565 | #endif | 565 | #endif |
566 | .endm | 566 | .endm |
567 | 567 | ||
568 | /* | ||
569 | * frame_push - Push @regcount callee saved registers to the stack, | ||
570 | * starting at x19, as well as x29/x30, and set x29 to | ||
571 | * the new value of sp. Add @extra bytes of stack space | ||
572 | * for locals. | ||
573 | */ | ||
574 | .macro frame_push, regcount:req, extra | ||
575 | __frame st, \regcount, \extra | ||
576 | .endm | ||
577 | |||
578 | /* | ||
579 | * frame_pop - Pop the callee saved registers from the stack that were | ||
580 | * pushed in the most recent call to frame_push, as well | ||
581 | * as x29/x30 and any extra stack space that may have been | ||
582 | * allocated. | ||
583 | */ | ||
584 | .macro frame_pop | ||
585 | __frame ld | ||
586 | .endm | ||
587 | |||
588 | .macro __frame_regs, reg1, reg2, op, num | ||
589 | .if .Lframe_regcount == \num | ||
590 | \op\()r \reg1, [sp, #(\num + 1) * 8] | ||
591 | .elseif .Lframe_regcount > \num | ||
592 | \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8] | ||
593 | .endif | ||
594 | .endm | ||
595 | |||
596 | .macro __frame, op, regcount, extra=0 | ||
597 | .ifc \op, st | ||
598 | .if (\regcount) < 0 || (\regcount) > 10 | ||
599 | .error "regcount should be in the range [0 ... 10]" | ||
600 | .endif | ||
601 | .if ((\extra) % 16) != 0 | ||
602 | .error "extra should be a multiple of 16 bytes" | ||
603 | .endif | ||
604 | .ifdef .Lframe_regcount | ||
605 | .if .Lframe_regcount != -1 | ||
606 | .error "frame_push/frame_pop may not be nested" | ||
607 | .endif | ||
608 | .endif | ||
609 | .set .Lframe_regcount, \regcount | ||
610 | .set .Lframe_extra, \extra | ||
611 | .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16 | ||
612 | stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]! | ||
613 | mov x29, sp | ||
614 | .endif | ||
615 | |||
616 | __frame_regs x19, x20, \op, 1 | ||
617 | __frame_regs x21, x22, \op, 3 | ||
618 | __frame_regs x23, x24, \op, 5 | ||
619 | __frame_regs x25, x26, \op, 7 | ||
620 | __frame_regs x27, x28, \op, 9 | ||
621 | |||
622 | .ifc \op, ld | ||
623 | .if .Lframe_regcount == -1 | ||
624 | .error "frame_push/frame_pop may not be nested" | ||
625 | .endif | ||
626 | ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra | ||
627 | .set .Lframe_regcount, -1 | ||
628 | .endif | ||
629 | .endm | ||
630 | |||
631 | /* | ||
632 | * Check whether to yield to another runnable task from kernel mode NEON code | ||
633 | * (which runs with preemption disabled). | ||
634 | * | ||
635 | * if_will_cond_yield_neon | ||
636 | * // pre-yield patchup code | ||
637 | * do_cond_yield_neon | ||
638 | * // post-yield patchup code | ||
639 | * endif_yield_neon <label> | ||
640 | * | ||
641 | * where <label> is optional, and marks the point where execution will resume | ||
642 | * after a yield has been performed. If omitted, execution resumes right after | ||
643 | * the endif_yield_neon invocation. Note that the entire sequence, including | ||
644 | * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT | ||
645 | * is not defined. | ||
646 | * | ||
647 | * As a convenience, in the case where no patchup code is required, the above | ||
648 | * sequence may be abbreviated to | ||
649 | * | ||
650 | * cond_yield_neon <label> | ||
651 | * | ||
652 | * Note that the patchup code does not support assembler directives that change | ||
653 | * the output section, any use of such directives is undefined. | ||
654 | * | ||
655 | * The yield itself consists of the following: | ||
656 | * - Check whether the preempt count is exactly 1, in which case disabling | ||
657 | * preemption once will make the task preemptible. If this is not the case, | ||
658 | * yielding is pointless. | ||
659 | * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable | ||
660 | * kernel mode NEON (which will trigger a reschedule), and branch to the | ||
661 | * yield fixup code. | ||
662 | * | ||
663 | * This macro sequence may clobber all CPU state that is not guaranteed by the | ||
664 | * AAPCS to be preserved across an ordinary function call. | ||
665 | */ | ||
666 | |||
667 | .macro cond_yield_neon, lbl | ||
668 | if_will_cond_yield_neon | ||
669 | do_cond_yield_neon | ||
670 | endif_yield_neon \lbl | ||
671 | .endm | ||
672 | |||
673 | .macro if_will_cond_yield_neon | ||
674 | #ifdef CONFIG_PREEMPT | ||
675 | get_thread_info x0 | ||
676 | ldr w1, [x0, #TSK_TI_PREEMPT] | ||
677 | ldr x0, [x0, #TSK_TI_FLAGS] | ||
678 | cmp w1, #PREEMPT_DISABLE_OFFSET | ||
679 | csel x0, x0, xzr, eq | ||
680 | tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling? | ||
681 | /* fall through to endif_yield_neon */ | ||
682 | .subsection 1 | ||
683 | .Lyield_\@ : | ||
684 | #else | ||
685 | .section ".discard.cond_yield_neon", "ax" | ||
686 | #endif | ||
687 | .endm | ||
688 | |||
689 | .macro do_cond_yield_neon | ||
690 | bl kernel_neon_end | ||
691 | bl kernel_neon_begin | ||
692 | .endm | ||
693 | |||
694 | .macro endif_yield_neon, lbl | ||
695 | .ifnb \lbl | ||
696 | b \lbl | ||
697 | .else | ||
698 | b .Lyield_out_\@ | ||
699 | .endif | ||
700 | .previous | ||
701 | .Lyield_out_\@ : | ||
702 | .endm | ||
703 | |||
568 | #endif /* __ASM_ASSEMBLER_H */ | 704 | #endif /* __ASM_ASSEMBLER_H */ |