aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/fsys.S
diff options
context:
space:
mode:
authorIsaku Yamahata <yamahata@valinux.co.jp>2009-03-04 07:05:36 -0500
committerTony Luck <tony.luck@intel.com>2009-03-26 13:50:01 -0400
commit84b8857a038c060535dafdc8732a1ed60d0e98fc (patch)
tree5e8b565597bca01b34cb9568c8bb209e2f615458 /arch/ia64/kernel/fsys.S
parent533bd156231eec4b399c36579e7c30b6f52cfd29 (diff)
ia64/pv_ops: paravirtualize fsys.S.
paravirtualize fsys.S. Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/fsys.S')
-rw-r--r--arch/ia64/kernel/fsys.S14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 788319f121ab..3544d75e7cbd 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -419,7 +419,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
419 mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) 419 mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))
420 ;; 420 ;;
421 421
422 rsm psr.i // mask interrupt delivery 422 RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
423 mov ar.ccv=0 423 mov ar.ccv=0
424 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP 424 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
425 425
@@ -492,7 +492,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
492#ifdef CONFIG_SMP 492#ifdef CONFIG_SMP
493 st4.rel [r31]=r0 // release the lock 493 st4.rel [r31]=r0 // release the lock
494#endif 494#endif
495 ssm psr.i 495 SSM_PSR_I(p0, p9, r31)
496 ;; 496 ;;
497 497
498 srlz.d // ensure psr.i is set again 498 srlz.d // ensure psr.i is set again
@@ -514,7 +514,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
514#ifdef CONFIG_SMP 514#ifdef CONFIG_SMP
515 st4.rel [r31]=r0 // release the lock 515 st4.rel [r31]=r0 // release the lock
516#endif 516#endif
517 ssm psr.i 517 SSM_PSR_I(p0, p9, r17)
518 ;; 518 ;;
519 srlz.d 519 srlz.d
520 br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall 520 br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall
@@ -522,7 +522,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
522#ifdef CONFIG_SMP 522#ifdef CONFIG_SMP
523.lock_contention: 523.lock_contention:
524 /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ 524 /* Rather than spinning here, fall back on doing a heavy-weight syscall. */
525 ssm psr.i 525 SSM_PSR_I(p0, p9, r17)
526 ;; 526 ;;
527 srlz.d 527 srlz.d
528 br.sptk.many fsys_fallback_syscall 528 br.sptk.many fsys_fallback_syscall
@@ -593,11 +593,11 @@ ENTRY(fsys_fallback_syscall)
593 adds r17=-1024,r15 593 adds r17=-1024,r15
594 movl r14=sys_call_table 594 movl r14=sys_call_table
595 ;; 595 ;;
596 rsm psr.i 596 RSM_PSR_I(p0, r26, r27)
597 shladd r18=r17,3,r14 597 shladd r18=r17,3,r14
598 ;; 598 ;;
599 ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point 599 ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
600 mov r29=psr // read psr (12 cyc load latency) 600 MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency)
601 mov r27=ar.rsc 601 mov r27=ar.rsc
602 mov r21=ar.fpsr 602 mov r21=ar.fpsr
603 mov r26=ar.pfs 603 mov r26=ar.pfs
@@ -735,7 +735,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
735 mov rp=r14 // I0 set the real return addr 735 mov rp=r14 // I0 set the real return addr
736 and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A 736 and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
737 ;; 737 ;;
738 ssm psr.i // M2 we're on kernel stacks now, reenable irqs 738 SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs
739 cmp.eq p8,p0=r3,r0 // A 739 cmp.eq p8,p0=r3,r0 // A
740(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT 740(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
741 741