diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/configs/sn2_defconfig | 4 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 4 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simserial.c | 16 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 110 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 147 | ||||
-rw-r--r-- | arch/ia64/kernel/gate.S | 62 | ||||
-rw-r--r-- | arch/ia64/kernel/ia64_ksyms.c | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/ivt.S | 198 | ||||
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 22 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/io_init.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/iomv.c | 6 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/setup.c | 43 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/sn2/ptc_deadlock.S | 1 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/tiocx.c | 14 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioca_provider.c | 8 |
17 files changed, 396 insertions, 259 deletions
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index 487d2e36b0a6..c05613980300 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig | |||
@@ -99,7 +99,7 @@ CONFIG_ACPI_DEALLOCATE_IRQ=y | |||
99 | # Firmware Drivers | 99 | # Firmware Drivers |
100 | # | 100 | # |
101 | CONFIG_EFI_VARS=y | 101 | CONFIG_EFI_VARS=y |
102 | # CONFIG_EFI_PCDP is not set | 102 | CONFIG_EFI_PCDP=y |
103 | CONFIG_BINFMT_ELF=y | 103 | CONFIG_BINFMT_ELF=y |
104 | # CONFIG_BINFMT_MISC is not set | 104 | # CONFIG_BINFMT_MISC is not set |
105 | 105 | ||
@@ -650,7 +650,7 @@ CONFIG_MMTIMER=y | |||
650 | # | 650 | # |
651 | # Console display driver support | 651 | # Console display driver support |
652 | # | 652 | # |
653 | # CONFIG_VGA_CONSOLE is not set | 653 | CONFIG_VGA_CONSOLE=y |
654 | CONFIG_DUMMY_CONSOLE=y | 654 | CONFIG_DUMMY_CONSOLE=y |
655 | 655 | ||
656 | # | 656 | # |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index b8db6e3e5e81..11957598a8b9 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -156,10 +156,13 @@ | |||
156 | */ | 156 | */ |
157 | #define DELAYED_RESOURCE_CNT 64 | 157 | #define DELAYED_RESOURCE_CNT 64 |
158 | 158 | ||
159 | #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec | ||
160 | |||
159 | #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP) | 161 | #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP) |
160 | #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP) | 162 | #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP) |
161 | #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP) | 163 | #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP) |
162 | #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP) | 164 | #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP) |
165 | #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP) | ||
163 | 166 | ||
164 | #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ | 167 | #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ |
165 | 168 | ||
@@ -1726,6 +1729,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = { | |||
1726 | { ZX1_IOC_ID, "zx1", ioc_zx1_init }, | 1729 | { ZX1_IOC_ID, "zx1", ioc_zx1_init }, |
1727 | { ZX2_IOC_ID, "zx2", NULL }, | 1730 | { ZX2_IOC_ID, "zx2", NULL }, |
1728 | { SX1000_IOC_ID, "sx1000", NULL }, | 1731 | { SX1000_IOC_ID, "sx1000", NULL }, |
1732 | { SX2000_IOC_ID, "sx2000", NULL }, | ||
1729 | }; | 1733 | }; |
1730 | 1734 | ||
1731 | static struct ioc * __init | 1735 | static struct ioc * __init |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 786e70718ce4..7a8ae0f4b387 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/serial.h> | 31 | #include <linux/serial.h> |
32 | #include <linux/serialP.h> | 32 | #include <linux/serialP.h> |
33 | #include <linux/sysrq.h> | ||
33 | 34 | ||
34 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
35 | #include <asm/hw_irq.h> | 36 | #include <asm/hw_irq.h> |
@@ -149,12 +150,17 @@ static void receive_chars(struct tty_struct *tty, struct pt_regs *regs) | |||
149 | seen_esc = 2; | 150 | seen_esc = 2; |
150 | continue; | 151 | continue; |
151 | } else if ( seen_esc == 2 ) { | 152 | } else if ( seen_esc == 2 ) { |
152 | if ( ch == 'P' ) show_state(); /* F1 key */ | 153 | if ( ch == 'P' ) /* F1 */ |
153 | #ifdef CONFIG_KDB | 154 | show_state(); |
154 | if ( ch == 'S' ) | 155 | #ifdef CONFIG_MAGIC_SYSRQ |
155 | kdb(KDB_REASON_KEYBOARD, 0, (kdb_eframe_t) regs); | 156 | if ( ch == 'S' ) { /* F4 */ |
157 | do | ||
158 | ch = ia64_ssc(0, 0, 0, 0, | ||
159 | SSC_GETCHAR); | ||
160 | while (!ch); | ||
161 | handle_sysrq(ch, regs, NULL); | ||
162 | } | ||
156 | #endif | 163 | #endif |
157 | |||
158 | seen_esc = 0; | 164 | seen_esc = 0; |
159 | continue; | 165 | continue; |
160 | } | 166 | } |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 785a51b0ad8e..69f88d561d62 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -470,18 +470,6 @@ ENTRY(load_switch_stack) | |||
470 | br.cond.sptk.many b7 | 470 | br.cond.sptk.many b7 |
471 | END(load_switch_stack) | 471 | END(load_switch_stack) |
472 | 472 | ||
473 | GLOBAL_ENTRY(__ia64_syscall) | ||
474 | .regstk 6,0,0,0 | ||
475 | mov r15=in5 // put syscall number in place | ||
476 | break __BREAK_SYSCALL | ||
477 | movl r2=errno | ||
478 | cmp.eq p6,p7=-1,r10 | ||
479 | ;; | ||
480 | (p6) st4 [r2]=r8 | ||
481 | (p6) mov r8=-1 | ||
482 | br.ret.sptk.many rp | ||
483 | END(__ia64_syscall) | ||
484 | |||
485 | GLOBAL_ENTRY(execve) | 473 | GLOBAL_ENTRY(execve) |
486 | mov r15=__NR_execve // put syscall number in place | 474 | mov r15=__NR_execve // put syscall number in place |
487 | break __BREAK_SYSCALL | 475 | break __BREAK_SYSCALL |
@@ -637,7 +625,7 @@ END(ia64_ret_from_syscall) | |||
637 | * r8-r11: restored (syscall return value(s)) | 625 | * r8-r11: restored (syscall return value(s)) |
638 | * r12: restored (user-level stack pointer) | 626 | * r12: restored (user-level stack pointer) |
639 | * r13: restored (user-level thread pointer) | 627 | * r13: restored (user-level thread pointer) |
640 | * r14: cleared | 628 | * r14: set to __kernel_syscall_via_epc |
641 | * r15: restored (syscall #) | 629 | * r15: restored (syscall #) |
642 | * r16-r17: cleared | 630 | * r16-r17: cleared |
643 | * r18: user-level b6 | 631 | * r18: user-level b6 |
@@ -658,7 +646,7 @@ END(ia64_ret_from_syscall) | |||
658 | * pr: restored (user-level pr) | 646 | * pr: restored (user-level pr) |
659 | * b0: restored (user-level rp) | 647 | * b0: restored (user-level rp) |
660 | * b6: restored | 648 | * b6: restored |
661 | * b7: cleared | 649 | * b7: set to __kernel_syscall_via_epc |
662 | * ar.unat: restored (user-level ar.unat) | 650 | * ar.unat: restored (user-level ar.unat) |
663 | * ar.pfs: restored (user-level ar.pfs) | 651 | * ar.pfs: restored (user-level ar.pfs) |
664 | * ar.rsc: restored (user-level ar.rsc) | 652 | * ar.rsc: restored (user-level ar.rsc) |
@@ -704,72 +692,79 @@ ENTRY(ia64_leave_syscall) | |||
704 | ;; | 692 | ;; |
705 | (p6) ld4 r31=[r18] // load current_thread_info()->flags | 693 | (p6) ld4 r31=[r18] // load current_thread_info()->flags |
706 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" | 694 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" |
707 | mov b7=r0 // clear b7 | 695 | nop.i 0 |
708 | ;; | 696 | ;; |
709 | ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) | 697 | mov r16=ar.bsp // M2 get existing backing store pointer |
710 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 | 698 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 |
711 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? | 699 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? |
712 | ;; | 700 | ;; |
713 | mov r16=ar.bsp // M2 get existing backing store pointer | 701 | ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) |
714 | (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? | 702 | (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? |
715 | (p6) br.cond.spnt .work_pending_syscall | 703 | (p6) br.cond.spnt .work_pending_syscall |
716 | ;; | 704 | ;; |
717 | // start restoring the state saved on the kernel stack (struct pt_regs): | 705 | // start restoring the state saved on the kernel stack (struct pt_regs): |
718 | ld8 r9=[r2],PT(CR_IPSR)-PT(R9) | 706 | ld8 r9=[r2],PT(CR_IPSR)-PT(R9) |
719 | ld8 r11=[r3],PT(CR_IIP)-PT(R11) | 707 | ld8 r11=[r3],PT(CR_IIP)-PT(R11) |
720 | mov f6=f0 // clear f6 | 708 | (pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! |
721 | ;; | 709 | ;; |
722 | invala // M0|1 invalidate ALAT | 710 | invala // M0|1 invalidate ALAT |
723 | rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection | 711 | rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection |
724 | mov f9=f0 // clear f9 | 712 | cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs |
725 | 713 | ||
726 | ld8 r29=[r2],16 // load cr.ipsr | 714 | ld8 r29=[r2],16 // M0|1 load cr.ipsr |
727 | ld8 r28=[r3],16 // load cr.iip | 715 | ld8 r28=[r3],16 // M0|1 load cr.iip |
728 | mov f8=f0 // clear f8 | 716 | mov r22=r0 // A clear r22 |
729 | ;; | 717 | ;; |
730 | ld8 r30=[r2],16 // M0|1 load cr.ifs | 718 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
731 | ld8 r25=[r3],16 // M0|1 load ar.unat | 719 | ld8 r25=[r3],16 // M0|1 load ar.unat |
732 | cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs | 720 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 |
733 | ;; | 721 | ;; |
734 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs | 722 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs |
735 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | 723 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled |
736 | mov f10=f0 // clear f10 | 724 | nop 0 |
737 | ;; | 725 | ;; |
738 | ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0 | 726 | ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 |
739 | ld8 r27=[r3],PT(PR)-PT(AR_RSC) // load ar.rsc | 727 | ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc |
740 | mov f11=f0 // clear f11 | 728 | mov f6=f0 // F clear f6 |
741 | ;; | 729 | ;; |
742 | ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // load ar.rnat (may be garbage) | 730 | ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage) |
743 | ld8 r31=[r3],PT(R1)-PT(PR) // load predicates | 731 | ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates |
744 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | 732 | mov f7=f0 // F clear f7 |
745 | ;; | 733 | ;; |
746 | ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // load ar.fpsr | 734 | ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr |
747 | ld8.fill r1=[r3],16 // load r1 | 735 | ld8.fill r1=[r3],16 // M0|1 load r1 |
748 | (pUStk) mov r17=1 | 736 | (pUStk) mov r17=1 // A |
749 | ;; | 737 | ;; |
750 | srlz.d // M0 ensure interruption collection is off | 738 | (pUStk) st1 [r14]=r17 // M2|3 |
751 | ld8.fill r13=[r3],16 | 739 | ld8.fill r13=[r3],16 // M0|1 |
752 | mov f7=f0 // clear f7 | 740 | mov f8=f0 // F clear f8 |
753 | ;; | 741 | ;; |
754 | ld8.fill r12=[r2] // restore r12 (sp) | 742 | ld8.fill r12=[r2] // M0|1 restore r12 (sp) |
755 | mov.m ar.ssd=r0 // M2 clear ar.ssd | 743 | ld8.fill r15=[r3] // M0|1 restore r15 |
756 | mov r22=r0 // clear r22 | 744 | mov b6=r18 // I0 restore b6 |
757 | 745 | ||
758 | ld8.fill r15=[r3] // restore r15 | 746 | addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A |
759 | (pUStk) st1 [r14]=r17 | 747 | mov f9=f0 // F clear f9 |
760 | addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0 | 748 | (pKStk) br.cond.dpnt.many skip_rbs_switch // B |
749 | |||
750 | srlz.d // M0 ensure interruption collection is off (for cover) | ||
751 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | ||
752 | cover // B add current frame into dirty partition & set cr.ifs | ||
761 | ;; | 753 | ;; |
762 | (pUStk) ld4 r17=[r3] // r17 = cpu_data->phys_stacked_size_p8 | 754 | (pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8 |
763 | mov.m ar.csd=r0 // M2 clear ar.csd | 755 | mov r19=ar.bsp // M2 get new backing store pointer |
764 | mov b6=r18 // I0 restore b6 | 756 | mov f10=f0 // F clear f10 |
757 | |||
758 | nop.m 0 | ||
759 | movl r14=__kernel_syscall_via_epc // X | ||
765 | ;; | 760 | ;; |
766 | mov r14=r0 // clear r14 | 761 | mov.m ar.csd=r0 // M2 clear ar.csd |
767 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | 762 | mov.m ar.ccv=r0 // M2 clear ar.ccv |
768 | (pKStk) br.cond.dpnt.many skip_rbs_switch | 763 | mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) |
769 | 764 | ||
770 | mov.m ar.ccv=r0 // clear ar.ccv | 765 | mov.m ar.ssd=r0 // M2 clear ar.ssd |
771 | (pNonSys) br.cond.dpnt.many dont_preserve_current_frame | 766 | mov f11=f0 // F clear f11 |
772 | br.cond.sptk.many rbs_switch | 767 | br.cond.sptk.many rbs_switch // B |
773 | END(ia64_leave_syscall) | 768 | END(ia64_leave_syscall) |
774 | 769 | ||
775 | #ifdef CONFIG_IA32_SUPPORT | 770 | #ifdef CONFIG_IA32_SUPPORT |
@@ -885,7 +880,7 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
885 | ldf.fill f7=[r2],PT(F11)-PT(F7) | 880 | ldf.fill f7=[r2],PT(F11)-PT(F7) |
886 | ldf.fill f8=[r3],32 | 881 | ldf.fill f8=[r3],32 |
887 | ;; | 882 | ;; |
888 | srlz.i // ensure interruption collection is off | 883 | srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned) |
889 | mov ar.ccv=r15 | 884 | mov ar.ccv=r15 |
890 | ;; | 885 | ;; |
891 | ldf.fill f11=[r2] | 886 | ldf.fill f11=[r2] |
@@ -945,11 +940,10 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
945 | * NOTE: alloc, loadrs, and cover can't be predicated. | 940 | * NOTE: alloc, loadrs, and cover can't be predicated. |
946 | */ | 941 | */ |
947 | (pNonSys) br.cond.dpnt dont_preserve_current_frame | 942 | (pNonSys) br.cond.dpnt dont_preserve_current_frame |
948 | |||
949 | rbs_switch: | ||
950 | cover // add current frame into dirty partition and set cr.ifs | 943 | cover // add current frame into dirty partition and set cr.ifs |
951 | ;; | 944 | ;; |
952 | mov r19=ar.bsp // get new backing store pointer | 945 | mov r19=ar.bsp // get new backing store pointer |
946 | rbs_switch: | ||
953 | sub r16=r16,r18 // krbs = old bsp - size of dirty partition | 947 | sub r16=r16,r18 // krbs = old bsp - size of dirty partition |
954 | cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs | 948 | cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs |
955 | ;; | 949 | ;; |
@@ -1024,14 +1018,14 @@ rse_clear_invalid: | |||
1024 | mov loc5=0 | 1018 | mov loc5=0 |
1025 | mov loc6=0 | 1019 | mov loc6=0 |
1026 | mov loc7=0 | 1020 | mov loc7=0 |
1027 | (pRecurse) br.call.sptk.few b0=rse_clear_invalid | 1021 | (pRecurse) br.call.dptk.few b0=rse_clear_invalid |
1028 | ;; | 1022 | ;; |
1029 | mov loc8=0 | 1023 | mov loc8=0 |
1030 | mov loc9=0 | 1024 | mov loc9=0 |
1031 | cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret | 1025 | cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret |
1032 | mov loc10=0 | 1026 | mov loc10=0 |
1033 | mov loc11=0 | 1027 | mov loc11=0 |
1034 | (pReturn) br.ret.sptk.many b0 | 1028 | (pReturn) br.ret.dptk.many b0 |
1035 | #endif /* !CONFIG_ITANIUM */ | 1029 | #endif /* !CONFIG_ITANIUM */ |
1036 | # undef pRecurse | 1030 | # undef pRecurse |
1037 | # undef pReturn | 1031 | # undef pReturn |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 962b6c4e32b5..7d7684a369d3 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -531,93 +531,114 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
531 | .altrp b6 | 531 | .altrp b6 |
532 | .body | 532 | .body |
533 | /* | 533 | /* |
534 | * We get here for syscalls that don't have a lightweight handler. For those, we | 534 | * We get here for syscalls that don't have a lightweight |
535 | * need to bubble down into the kernel and that requires setting up a minimal | 535 | * handler. For those, we need to bubble down into the kernel |
536 | * pt_regs structure, and initializing the CPU state more or less as if an | 536 | * and that requires setting up a minimal pt_regs structure, |
537 | * interruption had occurred. To make syscall-restarts work, we setup pt_regs | 537 | * and initializing the CPU state more or less as if an |
538 | * such that cr_iip points to the second instruction in syscall_via_break. | 538 | * interruption had occurred. To make syscall-restarts work, |
539 | * Decrementing the IP hence will restart the syscall via break and not | 539 | * we setup pt_regs such that cr_iip points to the second |
540 | * decrementing IP will return us to the caller, as usual. Note that we preserve | 540 | * instruction in syscall_via_break. Decrementing the IP |
541 | * the value of psr.pp rather than initializing it from dcr.pp. This makes it | 541 | * hence will restart the syscall via break and not |
542 | * possible to distinguish fsyscall execution from other privileged execution. | 542 | * decrementing IP will return us to the caller, as usual. |
543 | * Note that we preserve the value of psr.pp rather than | ||
544 | * initializing it from dcr.pp. This makes it possible to | ||
545 | * distinguish fsyscall execution from other privileged | ||
546 | * execution. | ||
543 | * | 547 | * |
544 | * On entry: | 548 | * On entry: |
545 | * - normal fsyscall handler register usage, except that we also have: | 549 | * - normal fsyscall handler register usage, except |
550 | * that we also have: | ||
546 | * - r18: address of syscall entry point | 551 | * - r18: address of syscall entry point |
547 | * - r21: ar.fpsr | 552 | * - r21: ar.fpsr |
548 | * - r26: ar.pfs | 553 | * - r26: ar.pfs |
549 | * - r27: ar.rsc | 554 | * - r27: ar.rsc |
550 | * - r29: psr | 555 | * - r29: psr |
556 | * | ||
557 | * We used to clear some PSR bits here but that requires slow | ||
558 | * serialization. Fortuntely, that isn't really necessary. | ||
559 | * The rationale is as follows: we used to clear bits | ||
560 | * ~PSR_PRESERVED_BITS in PSR.L. Since | ||
561 | * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we | ||
562 | * ended up clearing PSR.{BE,AC,I,DFL,DFH,DI,DB,SI,TB}. | ||
563 | * However, | ||
564 | * | ||
565 | * PSR.BE : already is turned off in __kernel_syscall_via_epc() | ||
566 | * PSR.AC : don't care (kernel normally turns PSR.AC on) | ||
567 | * PSR.I : already turned off by the time fsys_bubble_down gets | ||
568 | * invoked | ||
569 | * PSR.DFL: always 0 (kernel never turns it on) | ||
570 | * PSR.DFH: don't care --- kernel never touches f32-f127 on its own | ||
571 | * initiative | ||
572 | * PSR.DI : always 0 (kernel never turns it on) | ||
573 | * PSR.SI : always 0 (kernel never turns it on) | ||
574 | * PSR.DB : don't care --- kernel never enables kernel-level | ||
575 | * breakpoints | ||
576 | * PSR.TB : must be 0 already; if it wasn't zero on entry to | ||
577 | * __kernel_syscall_via_epc, the branch to fsys_bubble_down | ||
578 | * will trigger a taken branch; the taken-trap-handler then | ||
579 | * converts the syscall into a break-based system-call. | ||
551 | */ | 580 | */ |
552 | # define PSR_PRESERVED_BITS (IA64_PSR_UP | IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_PK \ | ||
553 | | IA64_PSR_DT | IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_RT \ | ||
554 | | IA64_PSR_IC) | ||
555 | /* | 581 | /* |
556 | * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. The rest we have | 582 | * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. |
557 | * to synthesize. | 583 | * The rest we have to synthesize. |
558 | */ | 584 | */ |
559 | # define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) | (0x1 << IA64_PSR_RI_BIT) \ | 585 | # define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) \ |
586 | | (0x1 << IA64_PSR_RI_BIT) \ | ||
560 | | IA64_PSR_BN | IA64_PSR_I) | 587 | | IA64_PSR_BN | IA64_PSR_I) |
561 | 588 | ||
562 | invala | 589 | invala // M0|1 |
563 | movl r8=PSR_ONE_BITS | 590 | movl r14=ia64_ret_from_syscall // X |
564 | 591 | ||
565 | mov r25=ar.unat // save ar.unat (5 cyc) | 592 | nop.m 0 |
566 | movl r9=PSR_PRESERVED_BITS | 593 | movl r28=__kernel_syscall_via_break // X create cr.iip |
594 | ;; | ||
567 | 595 | ||
568 | mov ar.rsc=0 // set enforced lazy mode, pl 0, little-endian, loadrs=0 | 596 | mov r2=r16 // A get task addr to addl-addressable register |
569 | movl r28=__kernel_syscall_via_break | 597 | adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // A |
598 | mov r31=pr // I0 save pr (2 cyc) | ||
570 | ;; | 599 | ;; |
571 | mov r23=ar.bspstore // save ar.bspstore (12 cyc) | 600 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag |
572 | mov r31=pr // save pr (2 cyc) | 601 | addl r22=IA64_RBS_OFFSET,r2 // A compute base of RBS |
573 | mov r20=r1 // save caller's gp in r20 | 602 | add r3=TI_FLAGS+IA64_TASK_SIZE,r2 // A |
574 | ;; | 603 | ;; |
575 | mov r2=r16 // copy current task addr to addl-addressable register | 604 | ld4 r3=[r3] // M0|1 r3 = current_thread_info()->flags |
576 | and r9=r9,r29 | 605 | lfetch.fault.excl.nt1 [r22] // M0|1 prefetch register backing-store |
577 | mov r19=b6 // save b6 (2 cyc) | 606 | nop.i 0 |
578 | ;; | 607 | ;; |
579 | mov psr.l=r9 // slam the door (17 cyc to srlz.i) | 608 | mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 |
580 | or r29=r8,r29 // construct cr.ipsr value to save | 609 | nop.m 0 |
581 | addl r22=IA64_RBS_OFFSET,r2 // compute base of RBS | 610 | nop.i 0 |
582 | ;; | 611 | ;; |
583 | // GAS reports a spurious RAW hazard on the read of ar.rnat because it thinks | 612 | mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore |
584 | // we may be reading ar.itc after writing to psr.l. Avoid that message with | 613 | mov.m r24=ar.rnat // M2 (5 cyc) read ar.rnat (dual-issues!) |
585 | // this directive: | 614 | nop.i 0 |
586 | dv_serialize_data | ||
587 | mov.m r24=ar.rnat // read ar.rnat (5 cyc lat) | ||
588 | lfetch.fault.excl.nt1 [r22] | ||
589 | adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r2 | ||
590 | |||
591 | // ensure previous insn group is issued before we stall for srlz.i: | ||
592 | ;; | 615 | ;; |
593 | srlz.i // ensure new psr.l has been established | 616 | mov ar.bspstore=r22 // M2 (6 cyc) switch to kernel RBS |
594 | ///////////////////////////////////////////////////////////////////////////// | 617 | movl r8=PSR_ONE_BITS // X |
595 | ////////// from this point on, execution is not interruptible anymore | ||
596 | ///////////////////////////////////////////////////////////////////////////// | ||
597 | addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // compute base of memory stack | ||
598 | cmp.ne pKStk,pUStk=r0,r0 // set pKStk <- 0, pUStk <- 1 | ||
599 | ;; | 618 | ;; |
600 | st1 [r16]=r0 // clear current->thread.on_ustack flag | 619 | mov r25=ar.unat // M2 (5 cyc) save ar.unat |
601 | mov ar.bspstore=r22 // switch to kernel RBS | 620 | mov r19=b6 // I0 save b6 (2 cyc) |
602 | mov b6=r18 // copy syscall entry-point to b6 (7 cyc) | 621 | mov r20=r1 // A save caller's gp in r20 |
603 | add r3=TI_FLAGS+IA64_TASK_SIZE,r2 | ||
604 | ;; | 622 | ;; |
605 | ld4 r3=[r3] // r2 = current_thread_info()->flags | 623 | or r29=r8,r29 // A construct cr.ipsr value to save |
606 | mov r18=ar.bsp // save (kernel) ar.bsp (12 cyc) | 624 | mov b6=r18 // I0 copy syscall entry-point to b6 (7 cyc) |
607 | mov ar.rsc=0x3 // set eager mode, pl 0, little-endian, loadrs=0 | 625 | addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of memory stack |
608 | br.call.sptk.many b7=ia64_syscall_setup | 626 | |
609 | ;; | 627 | mov r18=ar.bsp // M2 save (kernel) ar.bsp (12 cyc) |
610 | ssm psr.i | 628 | cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 |
611 | movl r2=ia64_ret_from_syscall | 629 | br.call.sptk.many b7=ia64_syscall_setup // B |
612 | ;; | 630 | ;; |
613 | mov rp=r2 // set the real return addr | 631 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
614 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 | 632 | mov rp=r14 // I0 set the real return addr |
633 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A | ||
615 | ;; | 634 | ;; |
616 | cmp.eq p8,p0=r3,r0 | 635 | ssm psr.i // M2 we're on kernel stacks now, reenable irqs |
636 | cmp.eq p8,p0=r3,r0 // A | ||
637 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT | ||
617 | 638 | ||
618 | (p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 | 639 | nop.m 0 |
619 | (p8) br.call.sptk.many b6=b6 // ignore this return addr | 640 | (p8) br.call.sptk.many b6=b6 // B (ignore return address) |
620 | br.cond.sptk ia64_trace_syscall | 641 | br.cond.spnt ia64_trace_syscall // B |
621 | END(fsys_bubble_down) | 642 | END(fsys_bubble_down) |
622 | 643 | ||
623 | .rodata | 644 | .rodata |
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index facf75acdc85..86948ce63e43 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S | |||
@@ -72,38 +72,40 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc) | |||
72 | * bundle get executed. The remaining code must be safe even if | 72 | * bundle get executed. The remaining code must be safe even if |
73 | * they do not get executed. | 73 | * they do not get executed. |
74 | */ | 74 | */ |
75 | adds r17=-1024,r15 | 75 | adds r17=-1024,r15 // A |
76 | mov r10=0 // default to successful syscall execution | 76 | mov r10=0 // A default to successful syscall execution |
77 | epc | 77 | epc // B causes split-issue |
78 | } | 78 | } |
79 | ;; | 79 | ;; |
80 | rsm psr.be // note: on McKinley "rsm psr.be/srlz.d" is slightly faster than "rum psr.be" | 80 | rsm psr.be | psr.i // M2 (5 cyc to srlz.d) |
81 | LOAD_FSYSCALL_TABLE(r14) | 81 | LOAD_FSYSCALL_TABLE(r14) // X |
82 | |||
83 | mov r16=IA64_KR(CURRENT) // 12 cycle read latency | ||
84 | tnat.nz p10,p9=r15 | ||
85 | mov r19=NR_syscalls-1 | ||
86 | ;; | 82 | ;; |
87 | shladd r18=r17,3,r14 | 83 | mov r16=IA64_KR(CURRENT) // M2 (12 cyc) |
88 | 84 | shladd r18=r17,3,r14 // A | |
89 | srlz.d | 85 | mov r19=NR_syscalls-1 // A |
90 | cmp.ne p8,p0=r0,r0 // p8 <- FALSE | 86 | ;; |
91 | /* Note: if r17 is a NaT, p6 will be set to zero. */ | 87 | lfetch [r18] // M0|1 |
92 | cmp.geu p6,p7=r19,r17 // (syscall > 0 && syscall < 1024+NR_syscalls)? | 88 | mov r29=psr // M2 (12 cyc) |
93 | ;; | 89 | // If r17 is a NaT, p6 will be zero |
94 | (p6) ld8 r18=[r18] | 90 | cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? |
95 | mov r21=ar.fpsr | 91 | ;; |
96 | add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry | 92 | mov r21=ar.fpsr // M2 (12 cyc) |
97 | ;; | 93 | tnat.nz p10,p9=r15 // I0 |
98 | (p6) mov b7=r18 | 94 | mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) |
99 | (p6) tbit.z p8,p0=r18,0 | 95 | ;; |
100 | (p8) br.dptk.many b7 | 96 | srlz.d // M0 (forces split-issue) ensure PSR.BE==0 |
101 | 97 | (p6) ld8 r18=[r18] // M0|1 | |
102 | (p6) rsm psr.i | 98 | nop.i 0 |
103 | mov r27=ar.rsc | 99 | ;; |
104 | mov r26=ar.pfs | 100 | nop.m 0 |
101 | (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) | ||
102 | nop.i 0 | ||
105 | ;; | 103 | ;; |
106 | mov r29=psr // read psr (12 cyc load latency) | 104 | (p8) ssm psr.i |
105 | (p6) mov b7=r18 // I0 | ||
106 | (p8) br.dptk.many b7 // B | ||
107 | |||
108 | mov r27=ar.rsc // M2 (12 cyc) | ||
107 | /* | 109 | /* |
108 | * brl.cond doesn't work as intended because the linker would convert this branch | 110 | * brl.cond doesn't work as intended because the linker would convert this branch |
109 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some | 111 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some |
@@ -111,6 +113,8 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc) | |||
111 | * instead. | 113 | * instead. |
112 | */ | 114 | */ |
113 | #ifdef CONFIG_ITANIUM | 115 | #ifdef CONFIG_ITANIUM |
116 | (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry | ||
117 | ;; | ||
114 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down | 118 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down |
115 | ;; | 119 | ;; |
116 | (p6) mov b7=r14 | 120 | (p6) mov b7=r14 |
@@ -118,7 +122,7 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc) | |||
118 | #else | 122 | #else |
119 | BRL_COND_FSYS_BUBBLE_DOWN(p6) | 123 | BRL_COND_FSYS_BUBBLE_DOWN(p6) |
120 | #endif | 124 | #endif |
121 | 125 | ssm psr.i | |
122 | mov r10=-1 | 126 | mov r10=-1 |
123 | (p10) mov r8=EINVAL | 127 | (p10) mov r8=EINVAL |
124 | (p9) mov r8=ENOSYS | 128 | (p9) mov r8=ENOSYS |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 7bbf019c9867..01572814abe4 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -58,9 +58,6 @@ EXPORT_SYMBOL(__strlen_user); | |||
58 | EXPORT_SYMBOL(__strncpy_from_user); | 58 | EXPORT_SYMBOL(__strncpy_from_user); |
59 | EXPORT_SYMBOL(__strnlen_user); | 59 | EXPORT_SYMBOL(__strnlen_user); |
60 | 60 | ||
61 | #include <asm/unistd.h> | ||
62 | EXPORT_SYMBOL(__ia64_syscall); | ||
63 | |||
64 | /* from arch/ia64/lib */ | 61 | /* from arch/ia64/lib */ |
65 | extern void __divsi3(void); | 62 | extern void __divsi3(void); |
66 | extern void __udivsi3(void); | 63 | extern void __udivsi3(void); |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 2bc085a73e30..3bb3a13c4047 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/ia64/kernel/ivt.S | 2 | * arch/ia64/kernel/ivt.S |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co |
5 | * Stephane Eranian <eranian@hpl.hp.com> | 5 | * Stephane Eranian <eranian@hpl.hp.com> |
6 | * David Mosberger <davidm@hpl.hp.com> | 6 | * David Mosberger <davidm@hpl.hp.com> |
7 | * Copyright (C) 2000, 2002-2003 Intel Co | 7 | * Copyright (C) 2000, 2002-2003 Intel Co |
@@ -692,82 +692,118 @@ ENTRY(break_fault) | |||
692 | * to prevent leaking bits from kernel to user level. | 692 | * to prevent leaking bits from kernel to user level. |
693 | */ | 693 | */ |
694 | DBG_FAULT(11) | 694 | DBG_FAULT(11) |
695 | mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat. | 695 | mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) |
696 | mov r17=cr.iim | 696 | mov r29=cr.ipsr // M2 (12 cyc) |
697 | mov r18=__IA64_BREAK_SYSCALL | 697 | mov r31=pr // I0 (2 cyc) |
698 | mov r21=ar.fpsr | 698 | |
699 | mov r29=cr.ipsr | 699 | mov r17=cr.iim // M2 (2 cyc) |
700 | mov r19=b6 | 700 | mov.m r27=ar.rsc // M2 (12 cyc) |
701 | mov r25=ar.unat | 701 | mov r18=__IA64_BREAK_SYSCALL // A |
702 | mov r27=ar.rsc | 702 | |
703 | mov r26=ar.pfs | 703 | mov.m ar.rsc=0 // M2 |
704 | mov r28=cr.iip | 704 | mov.m r21=ar.fpsr // M2 (12 cyc) |
705 | mov r31=pr // prepare to save predicates | 705 | mov r19=b6 // I0 (2 cyc) |
706 | mov r20=r1 | 706 | ;; |
707 | ;; | 707 | mov.m r23=ar.bspstore // M2 (12 cyc) |
708 | mov.m r24=ar.rnat // M2 (5 cyc) | ||
709 | mov.i r26=ar.pfs // I0 (2 cyc) | ||
710 | |||
711 | invala // M0|1 | ||
712 | nop.m 0 // M | ||
713 | mov r20=r1 // A save r1 | ||
714 | |||
715 | nop.m 0 | ||
716 | movl r30=sys_call_table // X | ||
717 | |||
718 | mov r28=cr.iip // M2 (2 cyc) | ||
719 | cmp.eq p0,p7=r18,r17 // I0 is this a system call? | ||
720 | (p7) br.cond.spnt non_syscall // B no -> | ||
721 | // | ||
722 | // From this point on, we are definitely on the syscall-path | ||
723 | // and we can use (non-banked) scratch registers. | ||
724 | // | ||
725 | /////////////////////////////////////////////////////////////////////// | ||
726 | mov r1=r16 // A move task-pointer to "addl"-addressable reg | ||
727 | mov r2=r16 // A setup r2 for ia64_syscall_setup | ||
728 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = ¤t_thread_info()->flags | ||
729 | |||
708 | adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 | 730 | adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 |
709 | cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so) | 731 | adds r15=-1024,r15 // A subtract 1024 from syscall number |
710 | (p7) br.cond.spnt non_syscall | 732 | mov r3=NR_syscalls - 1 |
711 | ;; | 733 | ;; |
712 | ld1 r17=[r16] // load current->thread.on_ustack flag | 734 | ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag |
713 | st1 [r16]=r0 // clear current->thread.on_ustack flag | 735 | ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags |
714 | add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT | 736 | extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr |
737 | |||
738 | shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024) | ||
739 | addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS | ||
740 | cmp.leu p6,p7=r15,r3 // A syscall number in range? | ||
715 | ;; | 741 | ;; |
716 | invala | ||
717 | 742 | ||
718 | /* adjust return address so we skip over the break instruction: */ | 743 | lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS |
744 | (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point | ||
745 | tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT? | ||
719 | 746 | ||
720 | extr.u r8=r29,41,2 // extract ei field from cr.ipsr | 747 | mov.m ar.bspstore=r22 // M2 switch to kernel RBS |
721 | ;; | 748 | cmp.eq p8,p9=2,r8 // A isr.ei==2? |
722 | cmp.eq p6,p7=2,r8 // isr.ei==2? | ||
723 | mov r2=r1 // setup r2 for ia64_syscall_setup | ||
724 | ;; | ||
725 | (p6) mov r8=0 // clear ei to 0 | ||
726 | (p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped | ||
727 | (p7) adds r8=1,r8 // increment ei to next slot | ||
728 | ;; | ||
729 | cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already? | ||
730 | dep r29=r8,r29,41,2 // insert new ei into cr.ipsr | ||
731 | ;; | 749 | ;; |
732 | 750 | ||
733 | // switch from user to kernel RBS: | 751 | (p8) mov r8=0 // A clear ei to 0 |
734 | MINSTATE_START_SAVE_MIN_VIRT | 752 | (p7) movl r30=sys_ni_syscall // X |
735 | br.call.sptk.many b7=ia64_syscall_setup | ||
736 | ;; | ||
737 | MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1 | ||
738 | ssm psr.ic | PSR_DEFAULT_BITS | ||
739 | ;; | ||
740 | srlz.i // guarantee that interruption collection is on | ||
741 | mov r3=NR_syscalls - 1 | ||
742 | ;; | ||
743 | (p15) ssm psr.i // restore psr.i | ||
744 | // p10==true means out registers are more than 8 or r15's Nat is true | ||
745 | (p10) br.cond.spnt.many ia64_ret_from_syscall | ||
746 | ;; | ||
747 | movl r16=sys_call_table | ||
748 | 753 | ||
749 | adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024 | 754 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle |
750 | movl r2=ia64_ret_from_syscall | 755 | (p9) adds r8=1,r8 // A increment ei to next slot |
751 | ;; | 756 | nop.i 0 |
752 | shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) | ||
753 | cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ? | ||
754 | mov rp=r2 // set the real return addr | ||
755 | ;; | 757 | ;; |
756 | (p6) ld8 r20=[r20] // load address of syscall entry point | ||
757 | (p7) movl r20=sys_ni_syscall | ||
758 | 758 | ||
759 | add r2=TI_FLAGS+IA64_TASK_SIZE,r13 | 759 | mov.m r25=ar.unat // M2 (5 cyc) |
760 | ;; | 760 | dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr |
761 | ld4 r2=[r2] // r2 = current_thread_info()->flags | 761 | adds r15=1024,r15 // A restore original syscall number |
762 | ;; | 762 | // |
763 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit | 763 | // If any of the above loads miss in L1D, we'll stall here until |
764 | // the data arrives. | ||
765 | // | ||
766 | /////////////////////////////////////////////////////////////////////// | ||
767 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag | ||
768 | mov b6=r30 // I0 setup syscall handler branch reg early | ||
769 | cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? | ||
770 | |||
771 | and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit | ||
772 | mov r18=ar.bsp // M2 (12 cyc) | ||
773 | (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS | ||
774 | ;; | ||
775 | .back_from_break_fixup: | ||
776 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack | ||
777 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? | ||
778 | br.call.sptk.many b7=ia64_syscall_setup // B | ||
779 | 1: | ||
780 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 | ||
781 | nop 0 | ||
782 | bsw.1 // B (6 cyc) regs are saved, switch to bank 1 | ||
764 | ;; | 783 | ;; |
765 | cmp.eq p8,p0=r2,r0 | 784 | |
766 | mov b6=r20 | 785 | ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection |
786 | movl r3=ia64_ret_from_syscall // X | ||
767 | ;; | 787 | ;; |
768 | (p8) br.call.sptk.many b6=b6 // ignore this return addr | 788 | |
769 | br.cond.sptk ia64_trace_syscall | 789 | srlz.i // M0 ensure interruption collection is on |
790 | mov rp=r3 // I0 set the real return addr | ||
791 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT | ||
792 | |||
793 | (p15) ssm psr.i // M2 restore psr.i | ||
794 | (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) | ||
795 | br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic | ||
770 | // NOT REACHED | 796 | // NOT REACHED |
797 | /////////////////////////////////////////////////////////////////////// | ||
798 | // On entry, we optimistically assumed that we're coming from user-space. | ||
799 | // For the rare cases where a system-call is done from within the kernel, | ||
800 | // we fix things up at this point: | ||
801 | .break_fixup: | ||
802 | add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure | ||
803 | mov ar.rnat=r24 // M2 restore kernel's AR.RNAT | ||
804 | ;; | ||
805 | mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE | ||
806 | br.cond.sptk .back_from_break_fixup | ||
771 | END(break_fault) | 807 | END(break_fault) |
772 | 808 | ||
773 | .org ia64_ivt+0x3000 | 809 | .org ia64_ivt+0x3000 |
@@ -842,8 +878,6 @@ END(interrupt) | |||
842 | * - r31: saved pr | 878 | * - r31: saved pr |
843 | * - b0: original contents (to be saved) | 879 | * - b0: original contents (to be saved) |
844 | * On exit: | 880 | * On exit: |
845 | * - executing on bank 1 registers | ||
846 | * - psr.ic enabled, interrupts restored | ||
847 | * - p10: TRUE if syscall is invoked with more than 8 out | 881 | * - p10: TRUE if syscall is invoked with more than 8 out |
848 | * registers or r15's Nat is true | 882 | * registers or r15's Nat is true |
849 | * - r1: kernel's gp | 883 | * - r1: kernel's gp |
@@ -851,8 +885,11 @@ END(interrupt) | |||
851 | * - r8: -EINVAL if p10 is true | 885 | * - r8: -EINVAL if p10 is true |
852 | * - r12: points to kernel stack | 886 | * - r12: points to kernel stack |
853 | * - r13: points to current task | 887 | * - r13: points to current task |
888 | * - r14: preserved (same as on entry) | ||
889 | * - p13: preserved | ||
854 | * - p15: TRUE if interrupts need to be re-enabled | 890 | * - p15: TRUE if interrupts need to be re-enabled |
855 | * - ar.fpsr: set to kernel settings | 891 | * - ar.fpsr: set to kernel settings |
892 | * - b6: preserved (same as on entry) | ||
856 | */ | 893 | */ |
857 | GLOBAL_ENTRY(ia64_syscall_setup) | 894 | GLOBAL_ENTRY(ia64_syscall_setup) |
858 | #if PT(B6) != 0 | 895 | #if PT(B6) != 0 |
@@ -920,10 +957,10 @@ GLOBAL_ENTRY(ia64_syscall_setup) | |||
920 | (p13) mov in5=-1 | 957 | (p13) mov in5=-1 |
921 | ;; | 958 | ;; |
922 | st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr | 959 | st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr |
923 | tnat.nz p14,p0=in6 | 960 | tnat.nz p13,p0=in6 |
924 | cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 | 961 | cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 |
925 | ;; | 962 | ;; |
926 | stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error) | 963 | mov r8=1 |
927 | (p9) tnat.nz p10,p0=r15 | 964 | (p9) tnat.nz p10,p0=r15 |
928 | adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) | 965 | adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) |
929 | 966 | ||
@@ -934,9 +971,9 @@ GLOBAL_ENTRY(ia64_syscall_setup) | |||
934 | mov r13=r2 // establish `current' | 971 | mov r13=r2 // establish `current' |
935 | movl r1=__gp // establish kernel global pointer | 972 | movl r1=__gp // establish kernel global pointer |
936 | ;; | 973 | ;; |
937 | (p14) mov in6=-1 | 974 | st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) |
975 | (p13) mov in6=-1 | ||
938 | (p8) mov in7=-1 | 976 | (p8) mov in7=-1 |
939 | nop.i 0 | ||
940 | 977 | ||
941 | cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 | 978 | cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 |
942 | movl r17=FPSR_DEFAULT | 979 | movl r17=FPSR_DEFAULT |
@@ -1007,6 +1044,8 @@ END(dispatch_illegal_op_fault) | |||
1007 | FAULT(17) | 1044 | FAULT(17) |
1008 | 1045 | ||
1009 | ENTRY(non_syscall) | 1046 | ENTRY(non_syscall) |
1047 | mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER | ||
1048 | ;; | ||
1010 | SAVE_MIN_WITH_COVER | 1049 | SAVE_MIN_WITH_COVER |
1011 | 1050 | ||
1012 | // There is no particular reason for this code to be here, other than that | 1051 | // There is no particular reason for this code to be here, other than that |
@@ -1204,6 +1243,25 @@ END(disabled_fp_reg) | |||
1204 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) | 1243 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) |
1205 | ENTRY(nat_consumption) | 1244 | ENTRY(nat_consumption) |
1206 | DBG_FAULT(26) | 1245 | DBG_FAULT(26) |
1246 | |||
1247 | mov r16=cr.ipsr | ||
1248 | mov r17=cr.isr | ||
1249 | mov r31=pr // save PR | ||
1250 | ;; | ||
1251 | and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} | ||
1252 | tbit.z p6,p0=r17,IA64_ISR_NA_BIT | ||
1253 | ;; | ||
1254 | cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 | ||
1255 | dep r16=-1,r16,IA64_PSR_ED_BIT,1 | ||
1256 | (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) | ||
1257 | ;; | ||
1258 | mov cr.ipsr=r16 // set cr.ipsr.na | ||
1259 | mov pr=r31,-1 | ||
1260 | ;; | ||
1261 | rfi | ||
1262 | |||
1263 | 1: mov pr=r31,-1 | ||
1264 | ;; | ||
1207 | FAULT(26) | 1265 | FAULT(26) |
1208 | END(nat_consumption) | 1266 | END(nat_consumption) |
1209 | 1267 | ||
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 6d57aebad485..bbb8bc7c0552 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -725,12 +725,32 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, | |||
725 | break; | 725 | break; |
726 | } | 726 | } |
727 | 727 | ||
728 | /* | ||
729 | * Note: at the time of this call, the target task is blocked | ||
730 | * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL | ||
731 | * (aka, "pLvSys") we redirect execution from | ||
732 | * .work_pending_syscall_end to .work_processed_kernel. | ||
733 | */ | ||
728 | unw_get_pr(&prev_info, &pr); | 734 | unw_get_pr(&prev_info, &pr); |
729 | pr &= ~(1UL << PRED_SYSCALL); | 735 | pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); |
730 | pr |= (1UL << PRED_NON_SYSCALL); | 736 | pr |= (1UL << PRED_NON_SYSCALL); |
731 | unw_set_pr(&prev_info, pr); | 737 | unw_set_pr(&prev_info, pr); |
732 | 738 | ||
733 | pt->cr_ifs = (1UL << 63) | cfm; | 739 | pt->cr_ifs = (1UL << 63) | cfm; |
740 | /* | ||
741 | * Clear the memory that is NOT written on syscall-entry to | ||
742 | * ensure we do not leak kernel-state to user when execution | ||
743 | * resumes. | ||
744 | */ | ||
745 | pt->r2 = 0; | ||
746 | pt->r3 = 0; | ||
747 | pt->r14 = 0; | ||
748 | memset(&pt->r16, 0, 16*8); /* clear r16-r31 */ | ||
749 | memset(&pt->f6, 0, 6*16); /* clear f6-f11 */ | ||
750 | pt->b7 = 0; | ||
751 | pt->ar_ccv = 0; | ||
752 | pt->ar_csd = 0; | ||
753 | pt->ar_ssd = 0; | ||
734 | } | 754 | } |
735 | 755 | ||
736 | static int | 756 | static int |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index d14692e0920a..2693e1522d7c 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -72,6 +72,8 @@ DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); | |||
72 | unsigned long ia64_cycles_per_usec; | 72 | unsigned long ia64_cycles_per_usec; |
73 | struct ia64_boot_param *ia64_boot_param; | 73 | struct ia64_boot_param *ia64_boot_param; |
74 | struct screen_info screen_info; | 74 | struct screen_info screen_info; |
75 | unsigned long vga_console_iobase; | ||
76 | unsigned long vga_console_membase; | ||
75 | 77 | ||
76 | unsigned long ia64_max_cacheline_size; | 78 | unsigned long ia64_max_cacheline_size; |
77 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ | 79 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ |
@@ -273,23 +275,25 @@ io_port_init (void) | |||
273 | static inline int __init | 275 | static inline int __init |
274 | early_console_setup (char *cmdline) | 276 | early_console_setup (char *cmdline) |
275 | { | 277 | { |
278 | int earlycons = 0; | ||
279 | |||
276 | #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE | 280 | #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE |
277 | { | 281 | { |
278 | extern int sn_serial_console_early_setup(void); | 282 | extern int sn_serial_console_early_setup(void); |
279 | if (!sn_serial_console_early_setup()) | 283 | if (!sn_serial_console_early_setup()) |
280 | return 0; | 284 | earlycons++; |
281 | } | 285 | } |
282 | #endif | 286 | #endif |
283 | #ifdef CONFIG_EFI_PCDP | 287 | #ifdef CONFIG_EFI_PCDP |
284 | if (!efi_setup_pcdp_console(cmdline)) | 288 | if (!efi_setup_pcdp_console(cmdline)) |
285 | return 0; | 289 | earlycons++; |
286 | #endif | 290 | #endif |
287 | #ifdef CONFIG_SERIAL_8250_CONSOLE | 291 | #ifdef CONFIG_SERIAL_8250_CONSOLE |
288 | if (!early_serial_console_init(cmdline)) | 292 | if (!early_serial_console_init(cmdline)) |
289 | return 0; | 293 | earlycons++; |
290 | #endif | 294 | #endif |
291 | 295 | ||
292 | return -1; | 296 | return (earlycons) ? 0 : -1; |
293 | } | 297 | } |
294 | 298 | ||
295 | static inline void | 299 | static inline void |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index b49d4ddaab93..0166a9847095 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -231,13 +231,16 @@ smp_flush_tlb_all (void) | |||
231 | void | 231 | void |
232 | smp_flush_tlb_mm (struct mm_struct *mm) | 232 | smp_flush_tlb_mm (struct mm_struct *mm) |
233 | { | 233 | { |
234 | preempt_disable(); | ||
234 | /* this happens for the common case of a single-threaded fork(): */ | 235 | /* this happens for the common case of a single-threaded fork(): */ |
235 | if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) | 236 | if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) |
236 | { | 237 | { |
237 | local_finish_flush_tlb_mm(mm); | 238 | local_finish_flush_tlb_mm(mm); |
239 | preempt_enable(); | ||
238 | return; | 240 | return; |
239 | } | 241 | } |
240 | 242 | ||
243 | preempt_enable(); | ||
241 | /* | 244 | /* |
242 | * We could optimize this further by using mm->cpu_vm_mask to track which CPUs | 245 | * We could optimize this further by using mm->cpu_vm_mask to track which CPUs |
243 | * have been running in the address space. It's not clear that this is worth the | 246 | * have been running in the address space. It's not clear that this is worth the |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 9e07f5463f21..783eb4323847 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -384,7 +384,7 @@ static int __init sn_pci_init(void) | |||
384 | extern void register_sn_procfs(void); | 384 | extern void register_sn_procfs(void); |
385 | #endif | 385 | #endif |
386 | 386 | ||
387 | if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR()) | 387 | if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) |
388 | return 0; | 388 | return 0; |
389 | 389 | ||
390 | /* | 390 | /* |
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c index fec6d8b8237b..7ce3cdad627b 100644 --- a/arch/ia64/sn/kernel/iomv.c +++ b/arch/ia64/sn/kernel/iomv.c | |||
@@ -9,12 +9,16 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <asm/io.h> | 10 | #include <asm/io.h> |
11 | #include <asm/delay.h> | 11 | #include <asm/delay.h> |
12 | #include <asm/vga.h> | ||
12 | #include <asm/sn/nodepda.h> | 13 | #include <asm/sn/nodepda.h> |
13 | #include <asm/sn/simulator.h> | 14 | #include <asm/sn/simulator.h> |
14 | #include <asm/sn/pda.h> | 15 | #include <asm/sn/pda.h> |
15 | #include <asm/sn/sn_cpuid.h> | 16 | #include <asm/sn/sn_cpuid.h> |
16 | #include <asm/sn/shub_mmr.h> | 17 | #include <asm/sn/shub_mmr.h> |
17 | 18 | ||
19 | #define IS_LEGACY_VGA_IOPORT(p) \ | ||
20 | (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df)) | ||
21 | |||
18 | /** | 22 | /** |
19 | * sn_io_addr - convert an in/out port to an i/o address | 23 | * sn_io_addr - convert an in/out port to an i/o address |
20 | * @port: port to convert | 24 | * @port: port to convert |
@@ -26,6 +30,8 @@ | |||
26 | void *sn_io_addr(unsigned long port) | 30 | void *sn_io_addr(unsigned long port) |
27 | { | 31 | { |
28 | if (!IS_RUNNING_ON_SIMULATOR()) { | 32 | if (!IS_RUNNING_ON_SIMULATOR()) { |
33 | if (IS_LEGACY_VGA_IOPORT(port)) | ||
34 | port += vga_console_iobase; | ||
29 | /* On sn2, legacy I/O ports don't point at anything */ | 35 | /* On sn2, legacy I/O ports don't point at anything */ |
30 | if (port < (64 * 1024)) | 36 | if (port < (64 * 1024)) |
31 | return NULL; | 37 | return NULL; |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 44bfc7f318cb..22e10d282c7f 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/machvec.h> | 36 | #include <asm/machvec.h> |
37 | #include <asm/system.h> | 37 | #include <asm/system.h> |
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <asm/vga.h> | ||
39 | #include <asm/sn/arch.h> | 40 | #include <asm/sn/arch.h> |
40 | #include <asm/sn/addrs.h> | 41 | #include <asm/sn/addrs.h> |
41 | #include <asm/sn/pda.h> | 42 | #include <asm/sn/pda.h> |
@@ -95,6 +96,7 @@ u8 sn_coherency_id; | |||
95 | EXPORT_SYMBOL(sn_coherency_id); | 96 | EXPORT_SYMBOL(sn_coherency_id); |
96 | u8 sn_region_size; | 97 | u8 sn_region_size; |
97 | EXPORT_SYMBOL(sn_region_size); | 98 | EXPORT_SYMBOL(sn_region_size); |
99 | int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */ | ||
98 | 100 | ||
99 | short physical_node_map[MAX_PHYSNODE_ID]; | 101 | short physical_node_map[MAX_PHYSNODE_ID]; |
100 | 102 | ||
@@ -273,14 +275,17 @@ void __init sn_setup(char **cmdline_p) | |||
273 | 275 | ||
274 | ia64_sn_plat_set_error_handling_features(); | 276 | ia64_sn_plat_set_error_handling_features(); |
275 | 277 | ||
278 | #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) | ||
276 | /* | 279 | /* |
277 | * If the generic code has enabled vga console support - lets | 280 | * If there was a primary vga adapter identified through the |
278 | * get rid of it again. This is a kludge for the fact that ACPI | 281 | * EFI PCDP table, make it the preferred console. Otherwise |
279 | * currtently has no way of informing us if legacy VGA is available | 282 | * zero out conswitchp. |
280 | * or not. | ||
281 | */ | 283 | */ |
282 | #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) | 284 | |
283 | if (conswitchp == &vga_con) { | 285 | if (vga_console_membase) { |
286 | /* usable vga ... make tty0 the preferred default console */ | ||
287 | add_preferred_console("tty", 0, NULL); | ||
288 | } else { | ||
284 | printk(KERN_DEBUG "SGI: Disabling VGA console\n"); | 289 | printk(KERN_DEBUG "SGI: Disabling VGA console\n"); |
285 | #ifdef CONFIG_DUMMY_CONSOLE | 290 | #ifdef CONFIG_DUMMY_CONSOLE |
286 | conswitchp = &dummy_con; | 291 | conswitchp = &dummy_con; |
@@ -350,7 +355,7 @@ void __init sn_setup(char **cmdline_p) | |||
350 | 355 | ||
351 | ia64_mark_idle = &snidle; | 356 | ia64_mark_idle = &snidle; |
352 | 357 | ||
353 | /* | 358 | /* |
354 | * For the bootcpu, we do this here. All other cpus will make the | 359 | * For the bootcpu, we do this here. All other cpus will make the |
355 | * call as part of cpu_init in slave cpu initialization. | 360 | * call as part of cpu_init in slave cpu initialization. |
356 | */ | 361 | */ |
@@ -397,7 +402,7 @@ static void __init sn_init_pdas(char **cmdline_p) | |||
397 | nodepdaindr[cnode] = | 402 | nodepdaindr[cnode] = |
398 | alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t)); | 403 | alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t)); |
399 | memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); | 404 | memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); |
400 | memset(nodepdaindr[cnode]->phys_cpuid, -1, | 405 | memset(nodepdaindr[cnode]->phys_cpuid, -1, |
401 | sizeof(nodepdaindr[cnode]->phys_cpuid)); | 406 | sizeof(nodepdaindr[cnode]->phys_cpuid)); |
402 | } | 407 | } |
403 | 408 | ||
@@ -427,7 +432,7 @@ static void __init sn_init_pdas(char **cmdline_p) | |||
427 | } | 432 | } |
428 | 433 | ||
429 | /* | 434 | /* |
430 | * Initialize the per node hubdev. This includes IO Nodes and | 435 | * Initialize the per node hubdev. This includes IO Nodes and |
431 | * headless/memless nodes. | 436 | * headless/memless nodes. |
432 | */ | 437 | */ |
433 | for (cnode = 0; cnode < numionodes; cnode++) { | 438 | for (cnode = 0; cnode < numionodes; cnode++) { |
@@ -455,6 +460,14 @@ void __init sn_cpu_init(void) | |||
455 | int i; | 460 | int i; |
456 | static int wars_have_been_checked; | 461 | static int wars_have_been_checked; |
457 | 462 | ||
463 | if (smp_processor_id() == 0 && IS_MEDUSA()) { | ||
464 | if (ia64_sn_is_fake_prom()) | ||
465 | sn_prom_type = 2; | ||
466 | else | ||
467 | sn_prom_type = 1; | ||
468 | printk("Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake"); | ||
469 | } | ||
470 | |||
458 | memset(pda, 0, sizeof(pda)); | 471 | memset(pda, 0, sizeof(pda)); |
459 | if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift, | 472 | if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift, |
460 | &sn_system_size, &sn_sharing_domain_size, &sn_partition_id, | 473 | &sn_system_size, &sn_sharing_domain_size, &sn_partition_id, |
@@ -520,7 +533,7 @@ void __init sn_cpu_init(void) | |||
520 | */ | 533 | */ |
521 | { | 534 | { |
522 | u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; | 535 | u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; |
523 | u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, | 536 | u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, |
524 | SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3}; | 537 | SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3}; |
525 | u64 *pio; | 538 | u64 *pio; |
526 | pio = is_shub1() ? pio1 : pio2; | 539 | pio = is_shub1() ? pio1 : pio2; |
@@ -552,6 +565,10 @@ static void __init scan_for_ionodes(void) | |||
552 | int nasid = 0; | 565 | int nasid = 0; |
553 | lboard_t *brd; | 566 | lboard_t *brd; |
554 | 567 | ||
568 | /* fakeprom does not support klgraph */ | ||
569 | if (IS_RUNNING_ON_FAKE_PROM()) | ||
570 | return; | ||
571 | |||
555 | /* Setup ionodes with memory */ | 572 | /* Setup ionodes with memory */ |
556 | for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) { | 573 | for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) { |
557 | char *klgraph_header; | 574 | char *klgraph_header; |
@@ -563,8 +580,6 @@ static void __init scan_for_ionodes(void) | |||
563 | cnodeid = -1; | 580 | cnodeid = -1; |
564 | klgraph_header = __va(ia64_sn_get_klconfig_addr(nasid)); | 581 | klgraph_header = __va(ia64_sn_get_klconfig_addr(nasid)); |
565 | if (!klgraph_header) { | 582 | if (!klgraph_header) { |
566 | if (IS_RUNNING_ON_SIMULATOR()) | ||
567 | continue; | ||
568 | BUG(); /* All nodes must have klconfig tables! */ | 583 | BUG(); /* All nodes must have klconfig tables! */ |
569 | } | 584 | } |
570 | cnodeid = nasid_to_cnodeid(nasid); | 585 | cnodeid = nasid_to_cnodeid(nasid); |
@@ -630,8 +645,8 @@ int | |||
630 | nasid_slice_to_cpuid(int nasid, int slice) | 645 | nasid_slice_to_cpuid(int nasid, int slice) |
631 | { | 646 | { |
632 | long cpu; | 647 | long cpu; |
633 | 648 | ||
634 | for (cpu=0; cpu < NR_CPUS; cpu++) | 649 | for (cpu=0; cpu < NR_CPUS; cpu++) |
635 | if (cpuid_to_nasid(cpu) == nasid && | 650 | if (cpuid_to_nasid(cpu) == nasid && |
636 | cpuid_to_slice(cpu) == slice) | 651 | cpuid_to_slice(cpu) == slice) |
637 | return cpu; | 652 | return cpu; |
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S index 7947312801ec..96cb71d15682 100644 --- a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S +++ b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm/types.h> | ||
9 | #include <asm/sn/shub_mmr.h> | 10 | #include <asm/sn/shub_mmr.h> |
10 | 11 | ||
11 | #define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT | 12 | #define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT |
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index a087b274847e..8716f4d5314b 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c | |||
@@ -204,8 +204,8 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num, | |||
204 | cx_dev->dev.parent = NULL; | 204 | cx_dev->dev.parent = NULL; |
205 | cx_dev->dev.bus = &tiocx_bus_type; | 205 | cx_dev->dev.bus = &tiocx_bus_type; |
206 | cx_dev->dev.release = tiocx_bus_release; | 206 | cx_dev->dev.release = tiocx_bus_release; |
207 | snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d.0x%x", | 207 | snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d", |
208 | cx_dev->cx_id.nasid, cx_dev->cx_id.part_num); | 208 | cx_dev->cx_id.nasid); |
209 | device_register(&cx_dev->dev); | 209 | device_register(&cx_dev->dev); |
210 | get_device(&cx_dev->dev); | 210 | get_device(&cx_dev->dev); |
211 | 211 | ||
@@ -236,7 +236,6 @@ int cx_device_unregister(struct cx_dev *cx_dev) | |||
236 | */ | 236 | */ |
237 | static int cx_device_reload(struct cx_dev *cx_dev) | 237 | static int cx_device_reload(struct cx_dev *cx_dev) |
238 | { | 238 | { |
239 | device_remove_file(&cx_dev->dev, &dev_attr_cxdev_control); | ||
240 | cx_device_unregister(cx_dev); | 239 | cx_device_unregister(cx_dev); |
241 | return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, | 240 | return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, |
242 | cx_dev->cx_id.mfg_num, cx_dev->hubdev); | 241 | cx_dev->cx_id.mfg_num, cx_dev->hubdev); |
@@ -383,6 +382,7 @@ static int is_fpga_brick(int nasid) | |||
383 | switch (tiocx_btchar_get(nasid)) { | 382 | switch (tiocx_btchar_get(nasid)) { |
384 | case L1_BRICKTYPE_SA: | 383 | case L1_BRICKTYPE_SA: |
385 | case L1_BRICKTYPE_ATHENA: | 384 | case L1_BRICKTYPE_ATHENA: |
385 | case L1_BRICKTYPE_DAYTONA: | ||
386 | return 1; | 386 | return 1; |
387 | } | 387 | } |
388 | return 0; | 388 | return 0; |
@@ -409,7 +409,7 @@ static int tiocx_reload(struct cx_dev *cx_dev) | |||
409 | uint64_t cx_id; | 409 | uint64_t cx_id; |
410 | 410 | ||
411 | cx_id = | 411 | cx_id = |
412 | *(volatile int32_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) + | 412 | *(volatile uint64_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) + |
413 | WIDGET_ID); | 413 | WIDGET_ID); |
414 | part_num = XWIDGET_PART_NUM(cx_id); | 414 | part_num = XWIDGET_PART_NUM(cx_id); |
415 | mfg_num = XWIDGET_MFG_NUM(cx_id); | 415 | mfg_num = XWIDGET_MFG_NUM(cx_id); |
@@ -458,6 +458,10 @@ static ssize_t store_cxdev_control(struct device *dev, struct device_attribute * | |||
458 | 458 | ||
459 | switch (n) { | 459 | switch (n) { |
460 | case 1: | 460 | case 1: |
461 | tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET); | ||
462 | tiocx_reload(cx_dev); | ||
463 | break; | ||
464 | case 2: | ||
461 | tiocx_reload(cx_dev); | 465 | tiocx_reload(cx_dev); |
462 | break; | 466 | break; |
463 | case 3: | 467 | case 3: |
@@ -537,7 +541,7 @@ static void __exit tiocx_exit(void) | |||
537 | bus_unregister(&tiocx_bus_type); | 541 | bus_unregister(&tiocx_bus_type); |
538 | } | 542 | } |
539 | 543 | ||
540 | module_init(tiocx_init); | 544 | subsys_initcall(tiocx_init); |
541 | module_exit(tiocx_exit); | 545 | module_exit(tiocx_exit); |
542 | 546 | ||
543 | /************************************************************************ | 547 | /************************************************************************ |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 8dae9eb45456..05aa8c2fe9bb 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -336,7 +336,7 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr) | |||
336 | if (!ct_addr) | 336 | if (!ct_addr) |
337 | return 0; | 337 | return 0; |
338 | 338 | ||
339 | bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffff); | 339 | bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL); |
340 | node_upper = ct_addr >> 48; | 340 | node_upper = ct_addr >> 48; |
341 | 341 | ||
342 | if (node_upper > 64) { | 342 | if (node_upper > 64) { |
@@ -464,7 +464,7 @@ map_return: | |||
464 | * For mappings created using the direct modes (64 or 48) there are no | 464 | * For mappings created using the direct modes (64 or 48) there are no |
465 | * resources to release. | 465 | * resources to release. |
466 | */ | 466 | */ |
467 | void | 467 | static void |
468 | tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | 468 | tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) |
469 | { | 469 | { |
470 | int i, entry; | 470 | int i, entry; |
@@ -514,7 +514,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
514 | * The mapping mode used is based on the devices dma_mask. As a last resort | 514 | * The mapping mode used is based on the devices dma_mask. As a last resort |
515 | * use the GART mapped mode. | 515 | * use the GART mapped mode. |
516 | */ | 516 | */ |
517 | uint64_t | 517 | static uint64_t |
518 | tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | 518 | tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) |
519 | { | 519 | { |
520 | uint64_t mapaddr; | 520 | uint64_t mapaddr; |
@@ -580,7 +580,7 @@ tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt) | |||
580 | * On successful setup, returns the kernel version of tioca_common back to | 580 | * On successful setup, returns the kernel version of tioca_common back to |
581 | * the caller. | 581 | * the caller. |
582 | */ | 582 | */ |
583 | void * | 583 | static void * |
584 | tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft) | 584 | tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft) |
585 | { | 585 | { |
586 | struct tioca_common *tioca_common; | 586 | struct tioca_common *tioca_common; |