aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/ultra.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-10-26 13:20:14 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-26 13:20:14 -0400
commita236441bb69723032db94128761a469030c3fe6d (patch)
tree3993d5823822a98f555b1deb9f3988571aafc394 /arch/sparc/mm/ultra.S
parent830cda3f9855ff092b0e9610346d110846fc497c (diff)
sparc64: Fix illegal relative branches in hypervisor patched TLB cross-call code.
Just like the non-cross-call TLB flush handlers, the cross-call ones need to avoid doing PC-relative branches outside of their code blocks. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm/ultra.S')
-rw-r--r--arch/sparc/mm/ultra.S42
1 files changed, 30 insertions, 12 deletions
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5128d38b1d1a..0fa2e6202c1f 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -484,7 +484,7 @@ cheetah_patch_cachetlbops:
484 */ 484 */
485 .align 32 485 .align 32
486 .globl xcall_flush_tlb_mm 486 .globl xcall_flush_tlb_mm
487xcall_flush_tlb_mm: /* 21 insns */ 487xcall_flush_tlb_mm: /* 24 insns */
488 mov PRIMARY_CONTEXT, %g2 488 mov PRIMARY_CONTEXT, %g2
489 ldxa [%g2] ASI_DMMU, %g3 489 ldxa [%g2] ASI_DMMU, %g3
490 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 490 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -506,9 +506,12 @@ xcall_flush_tlb_mm: /* 21 insns */
506 nop 506 nop
507 nop 507 nop
508 nop 508 nop
509 nop
510 nop
511 nop
509 512
510 .globl xcall_flush_tlb_page 513 .globl xcall_flush_tlb_page
511xcall_flush_tlb_page: /* 17 insns */ 514xcall_flush_tlb_page: /* 20 insns */
512 /* %g5=context, %g1=vaddr */ 515 /* %g5=context, %g1=vaddr */
513 mov PRIMARY_CONTEXT, %g4 516 mov PRIMARY_CONTEXT, %g4
514 ldxa [%g4] ASI_DMMU, %g2 517 ldxa [%g4] ASI_DMMU, %g2
@@ -527,9 +530,12 @@ xcall_flush_tlb_page: /* 17 insns */
527 retry 530 retry
528 nop 531 nop
529 nop 532 nop
533 nop
534 nop
535 nop
530 536
531 .globl xcall_flush_tlb_kernel_range 537 .globl xcall_flush_tlb_kernel_range
532xcall_flush_tlb_kernel_range: /* 25 insns */ 538xcall_flush_tlb_kernel_range: /* 28 insns */
533 sethi %hi(PAGE_SIZE - 1), %g2 539 sethi %hi(PAGE_SIZE - 1), %g2
534 or %g2, %lo(PAGE_SIZE - 1), %g2 540 or %g2, %lo(PAGE_SIZE - 1), %g2
535 andn %g1, %g2, %g1 541 andn %g1, %g2, %g1
@@ -555,6 +561,9 @@ xcall_flush_tlb_kernel_range: /* 25 insns */
555 nop 561 nop
556 nop 562 nop
557 nop 563 nop
564 nop
565 nop
566 nop
558 567
559 /* This runs in a very controlled environment, so we do 568 /* This runs in a very controlled environment, so we do
560 * not need to worry about BH races etc. 569 * not need to worry about BH races etc.
@@ -737,7 +746,7 @@ __hypervisor_tlb_xcall_error:
737 ba,a,pt %xcc, rtrap 746 ba,a,pt %xcc, rtrap
738 747
739 .globl __hypervisor_xcall_flush_tlb_mm 748 .globl __hypervisor_xcall_flush_tlb_mm
740__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ 749__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
741 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ 750 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
742 mov %o0, %g2 751 mov %o0, %g2
743 mov %o1, %g3 752 mov %o1, %g3
@@ -751,7 +760,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
751 mov HV_FAST_MMU_DEMAP_CTX, %o5 760 mov HV_FAST_MMU_DEMAP_CTX, %o5
752 ta HV_FAST_TRAP 761 ta HV_FAST_TRAP
753 mov HV_FAST_MMU_DEMAP_CTX, %g6 762 mov HV_FAST_MMU_DEMAP_CTX, %g6
754 brnz,pn %o0, __hypervisor_tlb_xcall_error 763 brnz,pn %o0, 1f
755 mov %o0, %g5 764 mov %o0, %g5
756 mov %g2, %o0 765 mov %g2, %o0
757 mov %g3, %o1 766 mov %g3, %o1
@@ -760,9 +769,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
760 mov %g7, %o5 769 mov %g7, %o5
761 membar #Sync 770 membar #Sync
762 retry 771 retry
7721: sethi %hi(__hypervisor_tlb_xcall_error), %g4
773 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
774 nop
763 775
764 .globl __hypervisor_xcall_flush_tlb_page 776 .globl __hypervisor_xcall_flush_tlb_page
765__hypervisor_xcall_flush_tlb_page: /* 17 insns */ 777__hypervisor_xcall_flush_tlb_page: /* 20 insns */
766 /* %g5=ctx, %g1=vaddr */ 778 /* %g5=ctx, %g1=vaddr */
767 mov %o0, %g2 779 mov %o0, %g2
768 mov %o1, %g3 780 mov %o1, %g3
@@ -774,16 +786,19 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
774 sllx %o0, PAGE_SHIFT, %o0 786 sllx %o0, PAGE_SHIFT, %o0
775 ta HV_MMU_UNMAP_ADDR_TRAP 787 ta HV_MMU_UNMAP_ADDR_TRAP
776 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 788 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
777 brnz,a,pn %o0, __hypervisor_tlb_xcall_error 789 brnz,a,pn %o0, 1f
778 mov %o0, %g5 790 mov %o0, %g5
779 mov %g2, %o0 791 mov %g2, %o0
780 mov %g3, %o1 792 mov %g3, %o1
781 mov %g4, %o2 793 mov %g4, %o2
782 membar #Sync 794 membar #Sync
783 retry 795 retry
7961: sethi %hi(__hypervisor_tlb_xcall_error), %g4
797 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
798 nop
784 799
785 .globl __hypervisor_xcall_flush_tlb_kernel_range 800 .globl __hypervisor_xcall_flush_tlb_kernel_range
786__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ 801__hypervisor_xcall_flush_tlb_kernel_range: /* 28 insns */
787 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ 802 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
788 sethi %hi(PAGE_SIZE - 1), %g2 803 sethi %hi(PAGE_SIZE - 1), %g2
789 or %g2, %lo(PAGE_SIZE - 1), %g2 804 or %g2, %lo(PAGE_SIZE - 1), %g2
@@ -800,7 +815,7 @@ __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
800 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 815 mov HV_MMU_ALL, %o2 /* ARG2: flags */
801 ta HV_MMU_UNMAP_ADDR_TRAP 816 ta HV_MMU_UNMAP_ADDR_TRAP
802 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 817 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
803 brnz,pn %o0, __hypervisor_tlb_xcall_error 818 brnz,pn %o0, 1f
804 mov %o0, %g5 819 mov %o0, %g5
805 sethi %hi(PAGE_SIZE), %o2 820 sethi %hi(PAGE_SIZE), %o2
806 brnz,pt %g3, 1b 821 brnz,pt %g3, 1b
@@ -810,6 +825,9 @@ __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
810 mov %g7, %o2 825 mov %g7, %o2
811 membar #Sync 826 membar #Sync
812 retry 827 retry
8281: sethi %hi(__hypervisor_tlb_xcall_error), %g4
829 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
830 nop
813 831
814 /* These just get rescheduled to PIL vectors. */ 832 /* These just get rescheduled to PIL vectors. */
815 .globl xcall_call_function 833 .globl xcall_call_function
@@ -894,21 +912,21 @@ hypervisor_patch_cachetlbops:
894 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 912 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
895 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 913 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
896 call tlb_patch_one 914 call tlb_patch_one
897 mov 21, %o2 915 mov 24, %o2
898 916
899 sethi %hi(xcall_flush_tlb_page), %o0 917 sethi %hi(xcall_flush_tlb_page), %o0
900 or %o0, %lo(xcall_flush_tlb_page), %o0 918 or %o0, %lo(xcall_flush_tlb_page), %o0
901 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 919 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
902 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 920 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
903 call tlb_patch_one 921 call tlb_patch_one
904 mov 17, %o2 922 mov 20, %o2
905 923
906 sethi %hi(xcall_flush_tlb_kernel_range), %o0 924 sethi %hi(xcall_flush_tlb_kernel_range), %o0
907 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 925 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
908 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 926 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
909 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 927 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
910 call tlb_patch_one 928 call tlb_patch_one
911 mov 25, %o2 929 mov 28, %o2
912#endif /* CONFIG_SMP */ 930#endif /* CONFIG_SMP */
913 931
914 ret 932 ret