diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-05 23:47:26 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:11:47 -0500 |
commit | 6e02493a7f33ac89e698b980a657d77ab2749eaf (patch) | |
tree | fcb6a17c2222a1aa601faf95a50a92b7a7a9224a /arch/sparc64/kernel/etrap.S | |
parent | d619d7f11670f5b1cfca30e6645e44c8a6014820 (diff) |
[SPARC64]: Fill dead cycles on trap entry with real work.
As we save trap state onto the stack, the store buffer fills up
mid-way through and we stall for several cycles as the store buffer
trickles out to the L2 cache. Meanwhile we can do some privileged
register reads and other calculations, essentially for free.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/etrap.S')
-rw-r--r-- | arch/sparc64/kernel/etrap.S | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 4a0e01b14044..f2556146a735 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S | |||
@@ -98,37 +98,40 @@ etrap_save: save %g2, -STACK_BIAS, %sp | |||
98 | stxa %g3, [%l4] ASI_DMMU | 98 | stxa %g3, [%l4] ASI_DMMU |
99 | sethi %hi(KERNBASE), %l4 | 99 | sethi %hi(KERNBASE), %l4 |
100 | flush %l4 | 100 | flush %l4 |
101 | wr %g0, ASI_AIUS, %asi | 101 | mov ASI_AIUS, %l7 |
102 | 2: wrpr %g0, 0x0, %tl | 102 | 2: mov %g4, %l4 |
103 | mov %g4, %l4 | ||
104 | mov %g5, %l5 | 103 | mov %g5, %l5 |
105 | 104 | add %g7, 4, %l2 | |
106 | mov %g7, %l2 | ||
107 | wrpr %g0, ETRAP_PSTATE1, %pstate | 105 | wrpr %g0, ETRAP_PSTATE1, %pstate |
108 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] | 106 | stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] |
109 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] | 107 | stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] |
108 | sllx %l7, 24, %l7 | ||
110 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] | 109 | stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] |
110 | rdpr %cwp, %l0 | ||
111 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] | 111 | stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] |
112 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] | 112 | stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] |
113 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] | 113 | stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] |
114 | |||
115 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] | 114 | stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] |
115 | or %l7, %l0, %l7 | ||
116 | sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 | ||
117 | or %l7, %l0, %l7 | ||
118 | wrpr %l2, %tnpc | ||
119 | wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate | ||
116 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] | 120 | stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] |
117 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] | 121 | stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] |
118 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] | 122 | stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] |
119 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] | 123 | stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] |
120 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] | 124 | stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] |
121 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] | 125 | stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] |
122 | |||
123 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] | 126 | stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] |
124 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] | ||
125 | wrpr %g0, ETRAP_PSTATE2, %pstate | ||
126 | mov %l6, %g6 | 127 | mov %l6, %g6 |
128 | stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] | ||
127 | LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) | 129 | LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) |
128 | jmpl %l2 + 0x4, %g0 | 130 | ldx [%g6 + TI_TASK], %g4 |
129 | ldx [%g6 + TI_TASK], %g4 | 131 | done |
130 | 132 | ||
131 | 3: ldub [%l6 + TI_FPDEPTH], %l5 | 133 | 3: mov ASI_P, %l7 |
134 | ldub [%l6 + TI_FPDEPTH], %l5 | ||
132 | add %l6, TI_FPSAVED + 1, %l4 | 135 | add %l6, TI_FPSAVED + 1, %l4 |
133 | srl %l5, 1, %l3 | 136 | srl %l5, 1, %l3 |
134 | add %l5, 2, %l5 | 137 | add %l5, 2, %l5 |