aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIsaku Yamahata <yamahata@valinux.co.jp>2008-05-27 18:08:01 -0400
committerTony Luck <tony.luck@intel.com>2008-05-27 18:08:01 -0400
commit4df8d22bbbb16ccfa4e10cc068135183c9e5e006 (patch)
tree6c989c6c493b9487311831218810e5e84886190d
parent498c5170472ff0c03a29d22dbd33225a0be038f4 (diff)
[IA64] pvops: paravirtualize entry.S
paravirtualize ia64_swtich_to, ia64_leave_syscall and ia64_leave_kernel. They include sensitive or performance critical privileged instructions so that they need paravirtualization. To paravirtualize them by single source and multi compile they are converted into indirect jump. And define each pv instances. Cc: Keith Owens <kaos@ocs.com.au> Cc: "Dong, Eddie" <eddie.dong@intel.com> Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/entry.S115
-rw-r--r--arch/ia64/kernel/paravirt.c19
-rw-r--r--arch/ia64/kernel/paravirtentry.S60
-rw-r--r--include/asm-ia64/native/inst.h8
-rw-r--r--include/asm-ia64/paravirt_privop.h23
6 files changed, 183 insertions, 44 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 8b2524293eb4..cea91f17d44b 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -36,7 +36,7 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
36mca_recovery-y += mca_drv.o mca_drv_asm.o 36mca_recovery-y += mca_drv.o mca_drv_asm.o
37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
38 38
39obj-$(CONFIG_PARAVIRT) += paravirt.o 39obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o
40 40
41obj-$(CONFIG_IA64_ESI) += esi.o 41obj-$(CONFIG_IA64_ESI) += esi.o
42ifneq ($(CONFIG_IA64_ESI),) 42ifneq ($(CONFIG_IA64_ESI),)
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ca2bb95726de..56ab156c48ae 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -23,6 +23,11 @@
23 * 11/07/2000 23 * 11/07/2000
24 */ 24 */
25/* 25/*
26 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
27 * VA Linux Systems Japan K.K.
28 * pv_ops.
29 */
30/*
26 * Global (preserved) predicate usage on syscall entry/exit path: 31 * Global (preserved) predicate usage on syscall entry/exit path:
27 * 32 *
28 * pKStk: See entry.h. 33 * pKStk: See entry.h.
@@ -45,6 +50,7 @@
45 50
46#include "minstate.h" 51#include "minstate.h"
47 52
53#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
48 /* 54 /*
49 * execve() is special because in case of success, we need to 55 * execve() is special because in case of success, we need to
50 * setup a null register window frame. 56 * setup a null register window frame.
@@ -173,6 +179,7 @@ GLOBAL_ENTRY(sys_clone)
173 mov rp=loc0 179 mov rp=loc0
174 br.ret.sptk.many rp 180 br.ret.sptk.many rp
175END(sys_clone) 181END(sys_clone)
182#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
176 183
177/* 184/*
178 * prev_task <- ia64_switch_to(struct task_struct *next) 185 * prev_task <- ia64_switch_to(struct task_struct *next)
@@ -180,7 +187,7 @@ END(sys_clone)
180 * called. The code starting at .map relies on this. The rest of the code 187 * called. The code starting at .map relies on this. The rest of the code
181 * doesn't care about the interrupt masking status. 188 * doesn't care about the interrupt masking status.
182 */ 189 */
183GLOBAL_ENTRY(ia64_switch_to) 190GLOBAL_ENTRY(__paravirt_switch_to)
184 .prologue 191 .prologue
185 alloc r16=ar.pfs,1,0,0,0 192 alloc r16=ar.pfs,1,0,0,0
186 DO_SAVE_SWITCH_STACK 193 DO_SAVE_SWITCH_STACK
@@ -204,7 +211,7 @@ GLOBAL_ENTRY(ia64_switch_to)
204 ;; 211 ;;
205.done: 212.done:
206 ld8 sp=[r21] // load kernel stack pointer of new task 213 ld8 sp=[r21] // load kernel stack pointer of new task
207 mov IA64_KR(CURRENT)=in0 // update "current" application register 214 MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register
208 mov r8=r13 // return pointer to previously running task 215 mov r8=r13 // return pointer to previously running task
209 mov r13=in0 // set "current" pointer 216 mov r13=in0 // set "current" pointer
210 ;; 217 ;;
@@ -216,26 +223,25 @@ GLOBAL_ENTRY(ia64_switch_to)
216 br.ret.sptk.many rp // boogie on out in new context 223 br.ret.sptk.many rp // boogie on out in new context
217 224
218.map: 225.map:
219 rsm psr.ic // interrupts (psr.i) are already disabled here 226 RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
220 movl r25=PAGE_KERNEL 227 movl r25=PAGE_KERNEL
221 ;; 228 ;;
222 srlz.d 229 srlz.d
223 or r23=r25,r20 // construct PA | page properties 230 or r23=r25,r20 // construct PA | page properties
224 mov r25=IA64_GRANULE_SHIFT<<2 231 mov r25=IA64_GRANULE_SHIFT<<2
225 ;; 232 ;;
226 mov cr.itir=r25 233 MOV_TO_ITIR(p0, r25, r8)
227 mov cr.ifa=in0 // VA of next task... 234 MOV_TO_IFA(in0, r8) // VA of next task...
228 ;; 235 ;;
229 mov r25=IA64_TR_CURRENT_STACK 236 mov r25=IA64_TR_CURRENT_STACK
230 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... 237 MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped...
231 ;; 238 ;;
232 itr.d dtr[r25]=r23 // wire in new mapping... 239 itr.d dtr[r25]=r23 // wire in new mapping...
233 ssm psr.ic // reenable the psr.ic bit 240 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
234 ;;
235 srlz.d
236 br.cond.sptk .done 241 br.cond.sptk .done
237END(ia64_switch_to) 242END(__paravirt_switch_to)
238 243
244#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
239/* 245/*
240 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This 246 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
241 * means that we may get an interrupt with "sp" pointing to the new kernel stack while 247 * means that we may get an interrupt with "sp" pointing to the new kernel stack while
@@ -375,7 +381,7 @@ END(save_switch_stack)
375 * - b7 holds address to return to 381 * - b7 holds address to return to
376 * - must not touch r8-r11 382 * - must not touch r8-r11
377 */ 383 */
378ENTRY(load_switch_stack) 384GLOBAL_ENTRY(load_switch_stack)
379 .prologue 385 .prologue
380 .altrp b7 386 .altrp b7
381 387
@@ -571,7 +577,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
571.ret3: 577.ret3:
572(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 578(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
573(pUStk) rsm psr.i // disable interrupts 579(pUStk) rsm psr.i // disable interrupts
574 br.cond.sptk .work_pending_syscall_end 580 br.cond.sptk ia64_work_pending_syscall_end
575 581
576strace_error: 582strace_error:
577 ld8 r3=[r2] // load pt_regs.r8 583 ld8 r3=[r2] // load pt_regs.r8
@@ -636,8 +642,17 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
636 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 642 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
637 mov r10=r0 // clear error indication in r10 643 mov r10=r0 // clear error indication in r10
638(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure 644(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
645#ifdef CONFIG_PARAVIRT
646 ;;
647 br.cond.sptk.few ia64_leave_syscall
648 ;;
649#endif /* CONFIG_PARAVIRT */
639END(ia64_ret_from_syscall) 650END(ia64_ret_from_syscall)
651#ifndef CONFIG_PARAVIRT
640 // fall through 652 // fall through
653#endif
654#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
655
641/* 656/*
642 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't 657 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
643 * need to switch to bank 0 and doesn't restore the scratch registers. 658 * need to switch to bank 0 and doesn't restore the scratch registers.
@@ -682,7 +697,7 @@ END(ia64_ret_from_syscall)
682 * ar.csd: cleared 697 * ar.csd: cleared
683 * ar.ssd: cleared 698 * ar.ssd: cleared
684 */ 699 */
685ENTRY(ia64_leave_syscall) 700GLOBAL_ENTRY(__paravirt_leave_syscall)
686 PT_REGS_UNWIND_INFO(0) 701 PT_REGS_UNWIND_INFO(0)
687 /* 702 /*
688 * work.need_resched etc. mustn't get changed by this CPU before it returns to 703 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -692,11 +707,11 @@ ENTRY(ia64_leave_syscall)
692 * extra work. We always check for extra work when returning to user-level. 707 * extra work. We always check for extra work when returning to user-level.
693 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count 708 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
694 * is 0. After extra work processing has been completed, execution 709 * is 0. After extra work processing has been completed, execution
695 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check 710 * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
696 * needs to be redone. 711 * needs to be redone.
697 */ 712 */
698#ifdef CONFIG_PREEMPT 713#ifdef CONFIG_PREEMPT
699 rsm psr.i // disable interrupts 714 RSM_PSR_I(p0, r2, r18) // disable interrupts
700 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 715 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
701(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 716(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
702 ;; 717 ;;
@@ -706,11 +721,12 @@ ENTRY(ia64_leave_syscall)
706 ;; 721 ;;
707 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 722 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
708#else /* !CONFIG_PREEMPT */ 723#else /* !CONFIG_PREEMPT */
709(pUStk) rsm psr.i 724 RSM_PSR_I(pUStk, r2, r18)
710 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall 725 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
711(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 726(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
712#endif 727#endif
713.work_processed_syscall: 728.global __paravirt_work_processed_syscall;
729__paravirt_work_processed_syscall:
714#ifdef CONFIG_VIRT_CPU_ACCOUNTING 730#ifdef CONFIG_VIRT_CPU_ACCOUNTING
715 adds r2=PT(LOADRS)+16,r12 731 adds r2=PT(LOADRS)+16,r12
716(pUStk) mov.m r22=ar.itc // fetch time at leave 732(pUStk) mov.m r22=ar.itc // fetch time at leave
@@ -744,7 +760,7 @@ ENTRY(ia64_leave_syscall)
744(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! 760(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
745 ;; 761 ;;
746 invala // M0|1 invalidate ALAT 762 invala // M0|1 invalidate ALAT
747 rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection 763 RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection
748 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs 764 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
749 765
750 ld8 r29=[r2],16 // M0|1 load cr.ipsr 766 ld8 r29=[r2],16 // M0|1 load cr.ipsr
@@ -765,7 +781,7 @@ ENTRY(ia64_leave_syscall)
765 ;; 781 ;;
766#endif 782#endif
767 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs 783 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
768(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 784 MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled
769 nop 0 785 nop 0
770 ;; 786 ;;
771 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 787 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
@@ -798,7 +814,7 @@ ENTRY(ia64_leave_syscall)
798 814
799 srlz.d // M0 ensure interruption collection is off (for cover) 815 srlz.d // M0 ensure interruption collection is off (for cover)
800 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition 816 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
801 cover // B add current frame into dirty partition & set cr.ifs 817 COVER // B add current frame into dirty partition & set cr.ifs
802 ;; 818 ;;
803#ifdef CONFIG_VIRT_CPU_ACCOUNTING 819#ifdef CONFIG_VIRT_CPU_ACCOUNTING
804 mov r19=ar.bsp // M2 get new backing store pointer 820 mov r19=ar.bsp // M2 get new backing store pointer
@@ -823,8 +839,9 @@ ENTRY(ia64_leave_syscall)
823 mov.m ar.ssd=r0 // M2 clear ar.ssd 839 mov.m ar.ssd=r0 // M2 clear ar.ssd
824 mov f11=f0 // F clear f11 840 mov f11=f0 // F clear f11
825 br.cond.sptk.many rbs_switch // B 841 br.cond.sptk.many rbs_switch // B
826END(ia64_leave_syscall) 842END(__paravirt_leave_syscall)
827 843
844#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
828#ifdef CONFIG_IA32_SUPPORT 845#ifdef CONFIG_IA32_SUPPORT
829GLOBAL_ENTRY(ia64_ret_from_ia32_execve) 846GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
830 PT_REGS_UNWIND_INFO(0) 847 PT_REGS_UNWIND_INFO(0)
@@ -835,10 +852,20 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
835 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit 852 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
836 .mem.offset 8,0 853 .mem.offset 8,0
837 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit 854 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
855#ifdef CONFIG_PARAVIRT
856 ;;
857 // don't fall through, ia64_leave_kernel may be #define'd
858 br.cond.sptk.few ia64_leave_kernel
859 ;;
860#endif /* CONFIG_PARAVIRT */
838END(ia64_ret_from_ia32_execve) 861END(ia64_ret_from_ia32_execve)
862#ifndef CONFIG_PARAVIRT
839 // fall through 863 // fall through
864#endif
840#endif /* CONFIG_IA32_SUPPORT */ 865#endif /* CONFIG_IA32_SUPPORT */
841GLOBAL_ENTRY(ia64_leave_kernel) 866#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
867
868GLOBAL_ENTRY(__paravirt_leave_kernel)
842 PT_REGS_UNWIND_INFO(0) 869 PT_REGS_UNWIND_INFO(0)
843 /* 870 /*
844 * work.need_resched etc. mustn't get changed by this CPU before it returns to 871 * work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -852,7 +879,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
852 * needs to be redone. 879 * needs to be redone.
853 */ 880 */
854#ifdef CONFIG_PREEMPT 881#ifdef CONFIG_PREEMPT
855 rsm psr.i // disable interrupts 882 RSM_PSR_I(p0, r17, r31) // disable interrupts
856 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel 883 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
857(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 884(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
858 ;; 885 ;;
@@ -862,7 +889,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
862 ;; 889 ;;
863 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) 890 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
864#else 891#else
865(pUStk) rsm psr.i 892 RSM_PSR_I(pUStk, r17, r31)
866 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel 893 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
867(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 894(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
868#endif 895#endif
@@ -910,7 +937,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
910 mov ar.csd=r30 937 mov ar.csd=r30
911 mov ar.ssd=r31 938 mov ar.ssd=r31
912 ;; 939 ;;
913 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection 940 RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection
914 invala // invalidate ALAT 941 invala // invalidate ALAT
915 ;; 942 ;;
916 ld8.fill r22=[r2],24 943 ld8.fill r22=[r2],24
@@ -942,7 +969,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
942 mov ar.ccv=r15 969 mov ar.ccv=r15
943 ;; 970 ;;
944 ldf.fill f11=[r2] 971 ldf.fill f11=[r2]
945 bsw.0 // switch back to bank 0 (no stop bit required beforehand...) 972 BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...)
946 ;; 973 ;;
947(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) 974(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
948 adds r16=PT(CR_IPSR)+16,r12 975 adds r16=PT(CR_IPSR)+16,r12
@@ -950,12 +977,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
950 977
951#ifdef CONFIG_VIRT_CPU_ACCOUNTING 978#ifdef CONFIG_VIRT_CPU_ACCOUNTING
952 .pred.rel.mutex pUStk,pKStk 979 .pred.rel.mutex pUStk,pKStk
953(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 980 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
954(pUStk) mov.m r22=ar.itc // M fetch time at leave 981(pUStk) mov.m r22=ar.itc // M fetch time at leave
955 nop.i 0 982 nop.i 0
956 ;; 983 ;;
957#else 984#else
958(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 985 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
959 nop.i 0 986 nop.i 0
960 nop.i 0 987 nop.i 0
961 ;; 988 ;;
@@ -1027,7 +1054,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
1027 * NOTE: alloc, loadrs, and cover can't be predicated. 1054 * NOTE: alloc, loadrs, and cover can't be predicated.
1028 */ 1055 */
1029(pNonSys) br.cond.dpnt dont_preserve_current_frame 1056(pNonSys) br.cond.dpnt dont_preserve_current_frame
1030 cover // add current frame into dirty partition and set cr.ifs 1057 COVER // add current frame into dirty partition and set cr.ifs
1031 ;; 1058 ;;
1032 mov r19=ar.bsp // get new backing store pointer 1059 mov r19=ar.bsp // get new backing store pointer
1033rbs_switch: 1060rbs_switch:
@@ -1130,16 +1157,16 @@ skip_rbs_switch:
1130(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp 1157(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
1131(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise 1158(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1132 ;; 1159 ;;
1133 mov cr.ipsr=r29 // M2 1160 MOV_TO_IPSR(p0, r29, r25) // M2
1134 mov ar.pfs=r26 // I0 1161 mov ar.pfs=r26 // I0
1135(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise 1162(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1136 1163
1137(p9) mov cr.ifs=r30 // M2 1164 MOV_TO_IFS(p9, r30, r25)// M2
1138 mov b0=r21 // I0 1165 mov b0=r21 // I0
1139(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise 1166(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1140 1167
1141 mov ar.fpsr=r20 // M2 1168 mov ar.fpsr=r20 // M2
1142 mov cr.iip=r28 // M2 1169 MOV_TO_IIP(r28, r25) // M2
1143 nop 0 1170 nop 0
1144 ;; 1171 ;;
1145(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode 1172(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
@@ -1148,7 +1175,7 @@ skip_rbs_switch:
1148 1175
1149 mov ar.rsc=r27 // M2 1176 mov ar.rsc=r27 // M2
1150 mov pr=r31,-1 // I0 1177 mov pr=r31,-1 // I0
1151 rfi // B 1178 RFI // B
1152 1179
1153 /* 1180 /*
1154 * On entry: 1181 * On entry:
@@ -1174,35 +1201,36 @@ skip_rbs_switch:
1174 ;; 1201 ;;
1175(pKStk) st4 [r20]=r21 1202(pKStk) st4 [r20]=r21
1176#endif 1203#endif
1177 ssm psr.i // enable interrupts 1204 SSM_PSR_I(p0, p6, r2) // enable interrupts
1178 br.call.spnt.many rp=schedule 1205 br.call.spnt.many rp=schedule
1179.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) 1206.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1180 rsm psr.i // disable interrupts 1207 RSM_PSR_I(p0, r2, r20) // disable interrupts
1181 ;; 1208 ;;
1182#ifdef CONFIG_PREEMPT 1209#ifdef CONFIG_PREEMPT
1183(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 1210(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
1184 ;; 1211 ;;
1185(pKStk) st4 [r20]=r0 // preempt_count() <- 0 1212(pKStk) st4 [r20]=r0 // preempt_count() <- 0
1186#endif 1213#endif
1187(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1214(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1188 br.cond.sptk.many .work_processed_kernel 1215 br.cond.sptk.many .work_processed_kernel
1189 1216
1190.notify: 1217.notify:
1191(pUStk) br.call.spnt.many rp=notify_resume_user 1218(pUStk) br.call.spnt.many rp=notify_resume_user
1192.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) 1219.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1193(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1220(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1194 br.cond.sptk.many .work_processed_kernel 1221 br.cond.sptk.many .work_processed_kernel
1195 1222
1196.work_pending_syscall_end: 1223.global __paravirt_pending_syscall_end;
1224__paravirt_pending_syscall_end:
1197 adds r2=PT(R8)+16,r12 1225 adds r2=PT(R8)+16,r12
1198 adds r3=PT(R10)+16,r12 1226 adds r3=PT(R10)+16,r12
1199 ;; 1227 ;;
1200 ld8 r8=[r2] 1228 ld8 r8=[r2]
1201 ld8 r10=[r3] 1229 ld8 r10=[r3]
1202 br.cond.sptk.many .work_processed_syscall 1230 br.cond.sptk.many __paravirt_work_processed_syscall_target
1203 1231END(__paravirt_leave_kernel)
1204END(ia64_leave_kernel)
1205 1232
1233#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
1206ENTRY(handle_syscall_error) 1234ENTRY(handle_syscall_error)
1207 /* 1235 /*
1208 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could 1236 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
@@ -1244,7 +1272,7 @@ END(ia64_invoke_schedule_tail)
1244 * We declare 8 input registers so the system call args get preserved, 1272 * We declare 8 input registers so the system call args get preserved,
1245 * in case we need to restart a system call. 1273 * in case we need to restart a system call.
1246 */ 1274 */
1247ENTRY(notify_resume_user) 1275GLOBAL_ENTRY(notify_resume_user)
1248 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) 1276 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1249 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! 1277 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1250 mov r9=ar.unat 1278 mov r9=ar.unat
@@ -1306,7 +1334,7 @@ ENTRY(sys_rt_sigreturn)
1306 adds sp=16,sp 1334 adds sp=16,sp
1307 ;; 1335 ;;
1308 ld8 r9=[sp] // load new ar.unat 1336 ld8 r9=[sp] // load new ar.unat
1309 mov.sptk b7=r8,ia64_leave_kernel 1337 mov.sptk b7=r8,ia64_native_leave_kernel
1310 ;; 1338 ;;
1311 mov ar.unat=r9 1339 mov ar.unat=r9
1312 br.many b7 1340 br.many b7
@@ -1665,3 +1693,4 @@ sys_call_table:
1665 data8 sys_timerfd_gettime 1693 data8 sys_timerfd_gettime
1666 1694
1667 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1695 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1696#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index e5482bb6841e..7126ea8f7ecc 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -286,3 +286,22 @@ struct pv_cpu_ops pv_cpu_ops = {
286 = ia64_native_intrin_local_irq_restore_func, 286 = ia64_native_intrin_local_irq_restore_func,
287}; 287};
288EXPORT_SYMBOL(pv_cpu_ops); 288EXPORT_SYMBOL(pv_cpu_ops);
289
290/******************************************************************************
291 * replacement of hand written assembly codes.
292 */
293
294void
295paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch)
296{
297 extern unsigned long paravirt_switch_to_targ;
298 extern unsigned long paravirt_leave_syscall_targ;
299 extern unsigned long paravirt_work_processed_syscall_targ;
300 extern unsigned long paravirt_leave_kernel_targ;
301
302 paravirt_switch_to_targ = cpu_asm_switch->switch_to;
303 paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall;
304 paravirt_work_processed_syscall_targ =
305 cpu_asm_switch->work_processed_syscall;
306 paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel;
307}
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
new file mode 100644
index 000000000000..2f42fcb9776a
--- /dev/null
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -0,0 +1,60 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirtentry.S
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/asmmacro.h>
24#include <asm/asm-offsets.h>
25#include "entry.h"
26
27#define DATA8(sym, init_value) \
28 .pushsection .data.read_mostly ; \
29 .align 8 ; \
30 .global sym ; \
31 sym: ; \
32 data8 init_value ; \
33 .popsection
34
35#define BRANCH(targ, reg, breg) \
36 movl reg=targ ; \
37 ;; \
38 ld8 reg=[reg] ; \
39 ;; \
40 mov breg=reg ; \
41 br.cond.sptk.many breg
42
43#define BRANCH_PROC(sym, reg, breg) \
44 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
45 GLOBAL_ENTRY(paravirt_ ## sym) ; \
46 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
47 END(paravirt_ ## sym)
48
49#define BRANCH_PROC_UNWINFO(sym, reg, breg) \
50 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
51 GLOBAL_ENTRY(paravirt_ ## sym) ; \
52 PT_REGS_UNWIND_INFO(0) ; \
53 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
54 END(paravirt_ ## sym)
55
56
57BRANCH_PROC(switch_to, r22, b7)
58BRANCH_PROC_UNWINFO(leave_syscall, r22, b7)
59BRANCH_PROC(work_processed_syscall, r2, b7)
60BRANCH_PROC_UNWINFO(leave_kernel, r22, b7)
diff --git a/include/asm-ia64/native/inst.h b/include/asm-ia64/native/inst.h
index f1072ace0cfa..c953a2ca4fce 100644
--- a/include/asm-ia64/native/inst.h
+++ b/include/asm-ia64/native/inst.h
@@ -22,6 +22,14 @@
22 22
23#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN 23#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
24 24
25#define __paravirt_switch_to ia64_native_switch_to
26#define __paravirt_leave_syscall ia64_native_leave_syscall
27#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
28#define __paravirt_leave_kernel ia64_native_leave_kernel
29#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
30#define __paravirt_work_processed_syscall_target \
31 ia64_work_processed_syscall
32
25#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK 33#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
26# define PARAVIRT_POISON 0xdeadbeefbaadf00d 34# define PARAVIRT_POISON 0xdeadbeefbaadf00d
27# define CLOBBER(clob) \ 35# define CLOBBER(clob) \
diff --git a/include/asm-ia64/paravirt_privop.h b/include/asm-ia64/paravirt_privop.h
index 7b133ae86df0..52482e6940ac 100644
--- a/include/asm-ia64/paravirt_privop.h
+++ b/include/asm-ia64/paravirt_privop.h
@@ -80,12 +80,35 @@ extern unsigned long ia64_native_getreg_func(int regnum);
80 ia64_native_rsm(mask); \ 80 ia64_native_rsm(mask); \
81 } while (0) 81 } while (0)
82 82
83/******************************************************************************
84 * replacement of hand written assembly codes.
85 */
86struct pv_cpu_asm_switch {
87 unsigned long switch_to;
88 unsigned long leave_syscall;
89 unsigned long work_processed_syscall;
90 unsigned long leave_kernel;
91};
92void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
93
83#endif /* __ASSEMBLY__ */ 94#endif /* __ASSEMBLY__ */
84 95
96#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
97
85#else 98#else
86 99
87/* fallback for native case */ 100/* fallback for native case */
101#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
88 102
89#endif /* CONFIG_PARAVIRT */ 103#endif /* CONFIG_PARAVIRT */
90 104
105/* these routines utilize privilege-sensitive or performance-sensitive
106 * privileged instructions so the code must be replaced with
107 * paravirtualized versions */
108#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
109#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
110#define ia64_work_processed_syscall \
111 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
112#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
113
91#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ 114#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */