diff options
author | David Mosberger-Tang <davidm@hpl.hp.com> | 2005-04-28 00:20:11 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-04-28 00:20:11 -0400 |
commit | 21bc4f9b34cc1eab3610955207f72c52495ae8ed (patch) | |
tree | 2fc89a932dcc7e0b62717bbf0838136346ebd193 | |
parent | 70929a57cfea8c18de13fcea9ae6771018a98949 (diff) |
[IA64] Annotate __kernel_syscall_via_epc() with McKinley dispatch info.
Two other very minor changes: use "mov.i" instead of "mov" for reading
ar.pfs (for clarity; doesn't affect the code at all). Also, predicate
the load of r14 for consistency.
Signed-off-by: David Mosberger-Tang <davidm@hpl.hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | arch/ia64/kernel/gate.S | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index 3cd3f2e971f6..272e64c0e21b 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S | |||
@@ -72,41 +72,41 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc) | |||
72 | * bundle get executed. The remaining code must be safe even if | 72 | * bundle get executed. The remaining code must be safe even if |
73 | * they do not get executed. | 73 | * they do not get executed. |
74 | */ | 74 | */ |
75 | adds r17=-1024,r15 | 75 | adds r17=-1024,r15 // A |
76 | mov r10=0 // default to successful syscall execution | 76 | mov r10=0 // A default to successful syscall execution |
77 | epc | 77 | epc // B causes split-issue |
78 | } | 78 | } |
79 | ;; | 79 | ;; |
80 | rsm psr.be // note: on McKinley "rsm psr.be/srlz.d" is slightly faster than "rum psr.be" | 80 | rsm psr.be // M2 (5 cyc to srlz.d) |
81 | LOAD_FSYSCALL_TABLE(r14) | 81 | LOAD_FSYSCALL_TABLE(r14) // X |
82 | ;; | 82 | ;; |
83 | mov r16=IA64_KR(CURRENT) // 12 cycle read latency | 83 | mov r16=IA64_KR(CURRENT) // M2 (12 cyc) |
84 | shladd r18=r17,3,r14 | 84 | shladd r18=r17,3,r14 // A |
85 | mov r19=NR_syscalls-1 | 85 | mov r19=NR_syscalls-1 // A |
86 | ;; | 86 | ;; |
87 | lfetch [r18] // M0|1 | 87 | lfetch [r18] // M0|1 |
88 | mov r29=psr // read psr (12 cyc load latency) | 88 | mov r29=psr // M2 (12 cyc) |
89 | /* Note: if r17 is a NaT, p6 will be set to zero. */ | 89 | // If r17 is a NaT, p6 will be zero |
90 | cmp.geu p6,p7=r19,r17 // (syscall > 0 && syscall < 1024+NR_syscalls)? | 90 | cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? |
91 | ;; | 91 | ;; |
92 | mov r21=ar.fpsr | 92 | mov r21=ar.fpsr // M2 (12 cyc) |
93 | tnat.nz p10,p9=r15 | 93 | tnat.nz p10,p9=r15 // I0 |
94 | mov r26=ar.pfs | 94 | mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) |
95 | ;; | 95 | ;; |
96 | srlz.d | 96 | srlz.d // M0 (forces split-issue) ensure PSR.BE==0 |
97 | (p6) ld8 r18=[r18] | 97 | (p6) ld8 r18=[r18] // M0|1 |
98 | nop.i 0 | 98 | nop.i 0 |
99 | ;; | 99 | ;; |
100 | nop.m 0 | 100 | nop.m 0 |
101 | (p6) mov b7=r18 | 101 | (p6) mov b7=r18 // I0 |
102 | (p6) tbit.z.unc p8,p0=r18,0 | 102 | (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) |
103 | 103 | ||
104 | nop.m 0 | 104 | nop.m 0 |
105 | nop.i 0 | 105 | nop.i 0 |
106 | (p8) br.dptk.many b7 | 106 | (p8) br.dptk.many b7 // B |
107 | 107 | ||
108 | mov r27=ar.rsc | 108 | mov r27=ar.rsc // M2 (12 cyc) |
109 | (p6) rsm psr.i | 109 | (p6) rsm psr.i // M2 |
110 | /* | 110 | /* |
111 | * brl.cond doesn't work as intended because the linker would convert this branch | 111 | * brl.cond doesn't work as intended because the linker would convert this branch |
112 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some | 112 | * into a branch to a PLT. Perhaps there will be a way to avoid this with some |
@@ -114,7 +114,7 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc) | |||
114 | * instead. | 114 | * instead. |
115 | */ | 115 | */ |
116 | #ifdef CONFIG_ITANIUM | 116 | #ifdef CONFIG_ITANIUM |
117 | add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry | 117 | (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry |
118 | ;; | 118 | ;; |
119 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down | 119 | (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down |
120 | ;; | 120 | ;; |