aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/ivt.S
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-08-29 16:12:36 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-08-29 16:12:36 -0400
commit2fca877b68b2b4fc5b94277858a1bedd46017cde (patch)
treefd02725406299ba2f26354463b3c261721e9eb6b /arch/ia64/kernel/ivt.S
parentff40c6d3d1437ecdf295b8e39adcb06c3d6021ef (diff)
parent02b3e4e2d71b6058ec11cc01c72ac651eb3ded2b (diff)
/spare/repo/libata-dev branch 'v2.6.13'
Diffstat (limited to 'arch/ia64/kernel/ivt.S')
-rw-r--r--arch/ia64/kernel/ivt.S211
1 files changed, 137 insertions, 74 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index d9c05d53435b..3bb3a13c4047 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/ia64/kernel/ivt.S 2 * arch/ia64/kernel/ivt.S
3 * 3 *
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 4 * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com> 5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com> 6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co 7 * Copyright (C) 2000, 2002-2003 Intel Co
@@ -405,17 +405,22 @@ ENTRY(nested_dtlb_miss)
405 * r30: continuation address 405 * r30: continuation address
406 * r31: saved pr 406 * r31: saved pr
407 * 407 *
408 * Clobbered: b0, r18, r19, r21, psr.dt (cleared) 408 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
409 */ 409 */
410 rsm psr.dt // switch to using physical data addressing 410 rsm psr.dt // switch to using physical data addressing
411 mov r19=IA64_KR(PT_BASE) // get the page table base address 411 mov r19=IA64_KR(PT_BASE) // get the page table base address
412 shl r21=r16,3 // shift bit 60 into sign bit 412 shl r21=r16,3 // shift bit 60 into sign bit
413 mov r18=cr.itir
413 ;; 414 ;;
414 shr.u r17=r16,61 // get the region number into r17 415 shr.u r17=r16,61 // get the region number into r17
416 extr.u r18=r18,2,6 // get the faulting page size
415 ;; 417 ;;
416 cmp.eq p6,p7=5,r17 // is faulting address in region 5? 418 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
417 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address 419 add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address
420 add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
418 ;; 421 ;;
422 shr.u r22=r16,r22
423 shr.u r18=r16,r18
419(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 424(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
420 425
421 srlz.d 426 srlz.d
@@ -428,7 +433,7 @@ ENTRY(nested_dtlb_miss)
428(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 433(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
429(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 434(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
430 cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 435 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
431 shr.u r18=r16,PMD_SHIFT // shift L2 index into position 436 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
432 ;; 437 ;;
433 ld8 r17=[r17] // fetch the L1 entry (may be 0) 438 ld8 r17=[r17] // fetch the L1 entry (may be 0)
434 ;; 439 ;;
@@ -436,7 +441,7 @@ ENTRY(nested_dtlb_miss)
436 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 441 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
437 ;; 442 ;;
438(p7) ld8 r17=[r17] // fetch the L2 entry (may be 0) 443(p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
439 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position 444 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
440 ;; 445 ;;
441(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL? 446(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
442 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 447 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
@@ -687,82 +692,118 @@ ENTRY(break_fault)
687 * to prevent leaking bits from kernel to user level. 692 * to prevent leaking bits from kernel to user level.
688 */ 693 */
689 DBG_FAULT(11) 694 DBG_FAULT(11)
690 mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat. 695 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
691 mov r17=cr.iim 696 mov r29=cr.ipsr // M2 (12 cyc)
692 mov r18=__IA64_BREAK_SYSCALL 697 mov r31=pr // I0 (2 cyc)
693 mov r21=ar.fpsr 698
694 mov r29=cr.ipsr 699 mov r17=cr.iim // M2 (2 cyc)
695 mov r19=b6 700 mov.m r27=ar.rsc // M2 (12 cyc)
696 mov r25=ar.unat 701 mov r18=__IA64_BREAK_SYSCALL // A
697 mov r27=ar.rsc 702
698 mov r26=ar.pfs 703 mov.m ar.rsc=0 // M2
699 mov r28=cr.iip 704 mov.m r21=ar.fpsr // M2 (12 cyc)
700 mov r31=pr // prepare to save predicates 705 mov r19=b6 // I0 (2 cyc)
701 mov r20=r1 706 ;;
702 ;; 707 mov.m r23=ar.bspstore // M2 (12 cyc)
708 mov.m r24=ar.rnat // M2 (5 cyc)
709 mov.i r26=ar.pfs // I0 (2 cyc)
710
711 invala // M0|1
712 nop.m 0 // M
713 mov r20=r1 // A save r1
714
715 nop.m 0
716 movl r30=sys_call_table // X
717
718 mov r28=cr.iip // M2 (2 cyc)
719 cmp.eq p0,p7=r18,r17 // I0 is this a system call?
720(p7) br.cond.spnt non_syscall // B no ->
721 //
722 // From this point on, we are definitely on the syscall-path
723 // and we can use (non-banked) scratch registers.
724 //
725///////////////////////////////////////////////////////////////////////
726 mov r1=r16 // A move task-pointer to "addl"-addressable reg
727 mov r2=r16 // A setup r2 for ia64_syscall_setup
728 add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
729
703 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 730 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
704 cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so) 731 adds r15=-1024,r15 // A subtract 1024 from syscall number
705(p7) br.cond.spnt non_syscall 732 mov r3=NR_syscalls - 1
706 ;; 733 ;;
707 ld1 r17=[r16] // load current->thread.on_ustack flag 734 ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
708 st1 [r16]=r0 // clear current->thread.on_ustack flag 735 ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
709 add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT 736 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
737
738 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
739 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
740 cmp.leu p6,p7=r15,r3 // A syscall number in range?
710 ;; 741 ;;
711 invala
712 742
713 /* adjust return address so we skip over the break instruction: */ 743 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
744(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
745 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
714 746
715 extr.u r8=r29,41,2 // extract ei field from cr.ipsr 747 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
716 ;; 748 cmp.eq p8,p9=2,r8 // A isr.ei==2?
717 cmp.eq p6,p7=2,r8 // isr.ei==2?
718 mov r2=r1 // setup r2 for ia64_syscall_setup
719 ;;
720(p6) mov r8=0 // clear ei to 0
721(p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
722(p7) adds r8=1,r8 // increment ei to next slot
723 ;;
724 cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
725 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
726 ;; 749 ;;
727 750
728 // switch from user to kernel RBS: 751(p8) mov r8=0 // A clear ei to 0
729 MINSTATE_START_SAVE_MIN_VIRT 752(p7) movl r30=sys_ni_syscall // X
730 br.call.sptk.many b7=ia64_syscall_setup
731 ;;
732 MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
733 ssm psr.ic | PSR_DEFAULT_BITS
734 ;;
735 srlz.i // guarantee that interruption collection is on
736 mov r3=NR_syscalls - 1
737 ;;
738(p15) ssm psr.i // restore psr.i
739 // p10==true means out registers are more than 8 or r15's Nat is true
740(p10) br.cond.spnt.many ia64_ret_from_syscall
741 ;;
742 movl r16=sys_call_table
743 753
744 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024 754(p8) adds r28=16,r28 // A switch cr.iip to next bundle
745 movl r2=ia64_ret_from_syscall 755(p9) adds r8=1,r8 // A increment ei to next slot
746 ;; 756 nop.i 0
747 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
748 cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
749 mov rp=r2 // set the real return addr
750 ;; 757 ;;
751(p6) ld8 r20=[r20] // load address of syscall entry point
752(p7) movl r20=sys_ni_syscall
753 758
754 add r2=TI_FLAGS+IA64_TASK_SIZE,r13 759 mov.m r25=ar.unat // M2 (5 cyc)
755 ;; 760 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
756 ld4 r2=[r2] // r2 = current_thread_info()->flags 761 adds r15=1024,r15 // A restore original syscall number
757 ;; 762 //
758 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit 763 // If any of the above loads miss in L1D, we'll stall here until
764 // the data arrives.
765 //
766///////////////////////////////////////////////////////////////////////
767 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
768 mov b6=r30 // I0 setup syscall handler branch reg early
769 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
770
771 and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
772 mov r18=ar.bsp // M2 (12 cyc)
773(pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS
774 ;;
775.back_from_break_fixup:
776(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
777 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
778 br.call.sptk.many b7=ia64_syscall_setup // B
7791:
780 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
781 nop 0
782 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
759 ;; 783 ;;
760 cmp.eq p8,p0=r2,r0 784
761 mov b6=r20 785 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
786 movl r3=ia64_ret_from_syscall // X
762 ;; 787 ;;
763(p8) br.call.sptk.many b6=b6 // ignore this return addr 788
764 br.cond.sptk ia64_trace_syscall 789 srlz.i // M0 ensure interruption collection is on
790 mov rp=r3 // I0 set the real return addr
791(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
792
793(p15) ssm psr.i // M2 restore psr.i
794(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
795 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
765 // NOT REACHED 796 // NOT REACHED
797///////////////////////////////////////////////////////////////////////
798 // On entry, we optimistically assumed that we're coming from user-space.
799 // For the rare cases where a system-call is done from within the kernel,
800 // we fix things up at this point:
801.break_fixup:
802 add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure
803 mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
804 ;;
805 mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
806 br.cond.sptk .back_from_break_fixup
766END(break_fault) 807END(break_fault)
767 808
768 .org ia64_ivt+0x3000 809 .org ia64_ivt+0x3000
@@ -837,8 +878,6 @@ END(interrupt)
837 * - r31: saved pr 878 * - r31: saved pr
838 * - b0: original contents (to be saved) 879 * - b0: original contents (to be saved)
839 * On exit: 880 * On exit:
840 * - executing on bank 1 registers
841 * - psr.ic enabled, interrupts restored
842 * - p10: TRUE if syscall is invoked with more than 8 out 881 * - p10: TRUE if syscall is invoked with more than 8 out
843 * registers or r15's Nat is true 882 * registers or r15's Nat is true
844 * - r1: kernel's gp 883 * - r1: kernel's gp
@@ -846,8 +885,11 @@ END(interrupt)
846 * - r8: -EINVAL if p10 is true 885 * - r8: -EINVAL if p10 is true
847 * - r12: points to kernel stack 886 * - r12: points to kernel stack
848 * - r13: points to current task 887 * - r13: points to current task
888 * - r14: preserved (same as on entry)
889 * - p13: preserved
849 * - p15: TRUE if interrupts need to be re-enabled 890 * - p15: TRUE if interrupts need to be re-enabled
850 * - ar.fpsr: set to kernel settings 891 * - ar.fpsr: set to kernel settings
892 * - b6: preserved (same as on entry)
851 */ 893 */
852GLOBAL_ENTRY(ia64_syscall_setup) 894GLOBAL_ENTRY(ia64_syscall_setup)
853#if PT(B6) != 0 895#if PT(B6) != 0
@@ -915,10 +957,10 @@ GLOBAL_ENTRY(ia64_syscall_setup)
915(p13) mov in5=-1 957(p13) mov in5=-1
916 ;; 958 ;;
917 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr 959 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
918 tnat.nz p14,p0=in6 960 tnat.nz p13,p0=in6
919 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 961 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
920 ;; 962 ;;
921 stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error) 963 mov r8=1
922(p9) tnat.nz p10,p0=r15 964(p9) tnat.nz p10,p0=r15
923 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) 965 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
924 966
@@ -929,9 +971,9 @@ GLOBAL_ENTRY(ia64_syscall_setup)
929 mov r13=r2 // establish `current' 971 mov r13=r2 // establish `current'
930 movl r1=__gp // establish kernel global pointer 972 movl r1=__gp // establish kernel global pointer
931 ;; 973 ;;
932(p14) mov in6=-1 974 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
975(p13) mov in6=-1
933(p8) mov in7=-1 976(p8) mov in7=-1
934 nop.i 0
935 977
936 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 978 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
937 movl r17=FPSR_DEFAULT 979 movl r17=FPSR_DEFAULT
@@ -1002,6 +1044,8 @@ END(dispatch_illegal_op_fault)
1002 FAULT(17) 1044 FAULT(17)
1003 1045
1004ENTRY(non_syscall) 1046ENTRY(non_syscall)
1047 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1048 ;;
1005 SAVE_MIN_WITH_COVER 1049 SAVE_MIN_WITH_COVER
1006 1050
1007 // There is no particular reason for this code to be here, other than that 1051 // There is no particular reason for this code to be here, other than that
@@ -1199,6 +1243,25 @@ END(disabled_fp_reg)
1199// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 1243// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1200ENTRY(nat_consumption) 1244ENTRY(nat_consumption)
1201 DBG_FAULT(26) 1245 DBG_FAULT(26)
1246
1247 mov r16=cr.ipsr
1248 mov r17=cr.isr
1249 mov r31=pr // save PR
1250 ;;
1251 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
1252 tbit.z p6,p0=r17,IA64_ISR_NA_BIT
1253 ;;
1254 cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
1255 dep r16=-1,r16,IA64_PSR_ED_BIT,1
1256(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
1257 ;;
1258 mov cr.ipsr=r16 // set cr.ipsr.na
1259 mov pr=r31,-1
1260 ;;
1261 rfi
1262
12631: mov pr=r31,-1
1264 ;;
1202 FAULT(26) 1265 FAULT(26)
1203END(nat_consumption) 1266END(nat_consumption)
1204 1267