diff options
author | David S. Miller <davem@davemloft.net> | 2010-08-09 01:03:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-08-09 01:07:36 -0400 |
commit | b11287e8c5b2797b86351f6db0fcd9ff99b20bab (patch) | |
tree | 560985ca36806d4463f71db4eb4ac4dd538a3cca | |
parent | c8837434e8bfd08abf3b596dbaeffe4a8b59a284 (diff) |
sparc64: Fix perf_arch_get_caller_regs().
After b0f82b81fe6bbcf78d478071f33e44554726bc81 ("perf: Drop the skip
argument from perf_arch_fetch_regs_caller") the build broke on sparc64
due to the lack of a module symbol export of __perf_arch_fetch_caller_regs.
But that assembler helper can actually be complete eliminated now that
the semantics of this interface have been greatly simplified.
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc/include/asm/perf_event.h | 25 | ||||
-rw-r--r-- | arch/sparc/kernel/helpers.S | 75 |
2 files changed, 20 insertions, 80 deletions
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h index 74c4e0cd889c..727af70646cb 100644 --- a/arch/sparc/include/asm/perf_event.h +++ b/arch/sparc/include/asm/perf_event.h | |||
@@ -10,11 +10,26 @@ extern void set_perf_event_pending(void); | |||
10 | 10 | ||
11 | extern void init_hw_perf_events(void); | 11 | extern void init_hw_perf_events(void); |
12 | 12 | ||
13 | extern void | 13 | #define perf_arch_fetch_caller_regs(regs, ip) \ |
14 | __perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | 14 | do { \ |
15 | 15 | unsigned long _pstate, _asi, _pil, _i7, _fp; \ | |
16 | #define perf_arch_fetch_caller_regs(pt_regs, ip) \ | 16 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" \ |
17 | __perf_arch_fetch_caller_regs(pt_regs, ip, 1); | 17 | "rd %%asi, %1\n\t" \ |
18 | "rdpr %%pil, %2\n\t" \ | ||
19 | "mov %%i7, %3\n\t" \ | ||
20 | "mov %%i6, %4\n\t" \ | ||
21 | : "=r" (_pstate), \ | ||
22 | "=r" (_asi), \ | ||
23 | "=r" (_pil), \ | ||
24 | "=r" (_i7), \ | ||
25 | "=r" (_fp)); \ | ||
26 | (regs)->tstate = (_pstate << 8) | \ | ||
27 | (_asi << 24) | (_pil << 20); \ | ||
28 | (regs)->tpc = (ip); \ | ||
29 | (regs)->tnpc = (regs)->tpc + 4; \ | ||
30 | (regs)->u_regs[UREG_I6] = _fp; \ | ||
31 | (regs)->u_regs[UREG_I7] = _i7; \ | ||
32 | } while (0) | ||
18 | #else | 33 | #else |
19 | static inline void init_hw_perf_events(void) { } | 34 | static inline void init_hw_perf_events(void) { } |
20 | #endif | 35 | #endif |
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S index 682fee06a16b..314dd0c9fc5b 100644 --- a/arch/sparc/kernel/helpers.S +++ b/arch/sparc/kernel/helpers.S | |||
@@ -46,81 +46,6 @@ stack_trace_flush: | |||
46 | nop | 46 | nop |
47 | .size stack_trace_flush,.-stack_trace_flush | 47 | .size stack_trace_flush,.-stack_trace_flush |
48 | 48 | ||
49 | #ifdef CONFIG_PERF_EVENTS | ||
50 | .globl __perf_arch_fetch_caller_regs | ||
51 | .type __perf_arch_fetch_caller_regs,#function | ||
52 | __perf_arch_fetch_caller_regs: | ||
53 | /* We always read the %pstate into %o5 since we will use | ||
54 | * that to construct a fake %tstate to store into the regs. | ||
55 | */ | ||
56 | rdpr %pstate, %o5 | ||
57 | brz,pn %o2, 50f | ||
58 | mov %o2, %g7 | ||
59 | |||
60 | /* Turn off interrupts while we walk around the register | ||
61 | * window by hand. | ||
62 | */ | ||
63 | wrpr %o5, PSTATE_IE, %pstate | ||
64 | |||
65 | /* The %canrestore tells us how many register windows are | ||
66 | * still live in the chip above us, past that we have to | ||
67 | * walk the frame as saved on the stack. We stash away | ||
68 | * the %cwp in %g1 so we can return back to the original | ||
69 | * register window. | ||
70 | */ | ||
71 | rdpr %cwp, %g1 | ||
72 | rdpr %canrestore, %g2 | ||
73 | sub %g1, 1, %g3 | ||
74 | |||
75 | /* We have the skip count in %g7, if it hits zero then | ||
76 | * %fp/%i7 are the registers we need. Otherwise if our | ||
77 | * %canrestore count maintained in %g2 hits zero we have | ||
78 | * to start traversing the stack. | ||
79 | */ | ||
80 | 10: brz,pn %g2, 4f | ||
81 | sub %g2, 1, %g2 | ||
82 | wrpr %g3, %cwp | ||
83 | subcc %g7, 1, %g7 | ||
84 | bne,pt %xcc, 10b | ||
85 | sub %g3, 1, %g3 | ||
86 | |||
87 | /* We found the values we need in the cpu's register | ||
88 | * windows. | ||
89 | */ | ||
90 | mov %fp, %g3 | ||
91 | ba,pt %xcc, 3f | ||
92 | mov %i7, %g2 | ||
93 | |||
94 | 50: mov %fp, %g3 | ||
95 | ba,pt %xcc, 2f | ||
96 | mov %i7, %g2 | ||
97 | |||
98 | /* We hit the end of the valid register windows in the | ||
99 | * cpu, start traversing the stack frame. | ||
100 | */ | ||
101 | 4: mov %fp, %g3 | ||
102 | |||
103 | 20: ldx [%g3 + STACK_BIAS + RW_V9_I7], %g2 | ||
104 | subcc %g7, 1, %g7 | ||
105 | bne,pn %xcc, 20b | ||
106 | ldx [%g3 + STACK_BIAS + RW_V9_I6], %g3 | ||
107 | |||
108 | /* Restore the current register window position and | ||
109 | * re-enable interrupts. | ||
110 | */ | ||
111 | 3: wrpr %g1, %cwp | ||
112 | wrpr %o5, %pstate | ||
113 | |||
114 | 2: stx %g3, [%o0 + PT_V9_FP] | ||
115 | sllx %o5, 8, %o5 | ||
116 | stx %o5, [%o0 + PT_V9_TSTATE] | ||
117 | stx %g2, [%o0 + PT_V9_TPC] | ||
118 | add %g2, 4, %g2 | ||
119 | retl | ||
120 | stx %g2, [%o0 + PT_V9_TNPC] | ||
121 | .size perf_arch_fetch_caller_regs,.-perf_arch_fetch_caller_regs | ||
122 | #endif /* CONFIG_PERF_EVENTS */ | ||
123 | |||
124 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
125 | .globl hard_smp_processor_id | 50 | .globl hard_smp_processor_id |
126 | .type hard_smp_processor_id,#function | 51 | .type hard_smp_processor_id,#function |