aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2006-03-26 20:00:45 -0500
committerPaul Mackerras <paulus@samba.org>2006-03-28 21:44:16 -0500
commit15e812ad849e142e3dfc984d33c4d8042389f148 (patch)
treee2d50c583457e8c4b2d06eea9846c04d0f5629d7
parentfa465f8c7008c6cab32b05f3f1af57f7c86e8873 (diff)
[PATCH] powerpc: Remove oprofile spinlock backtrace code
Remove oprofile spinlock backtrace code now we have proper calltrace support. Also make MMCRA sihv and sipr bits a variable since they may change in future cpus. Finally, MMCRA should be a 64bit quantity. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/oprofile/common.c7
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c43
-rw-r--r--include/asm-powerpc/oprofile_impl.h3
3 files changed, 9 insertions, 44 deletions
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 2b9143b0f6ba..5b1de7e8041e 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -117,17 +117,10 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
117 117
118 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); 118 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
119 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); 119 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
120#ifdef CONFIG_PPC64
121 oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
122 &sys.backtrace_spinlocks);
123#endif
124 120
125 /* Default to tracing both kernel and user */ 121 /* Default to tracing both kernel and user */
126 sys.enable_kernel = 1; 122 sys.enable_kernel = 1;
127 sys.enable_user = 1; 123 sys.enable_user = 1;
128#ifdef CONFIG_PPC64
129 sys.backtrace_spinlocks = 0;
130#endif
131 124
132 return 0; 125 return 0;
133} 126}
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 38db2efef3bc..4c2beab1fdc1 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -25,18 +25,14 @@ static unsigned long reset_value[OP_MAX_COUNTER];
25 25
26static int oprofile_running; 26static int oprofile_running;
27static int mmcra_has_sihv; 27static int mmcra_has_sihv;
28/* Unfortunately these bits vary between CPUs */
29static unsigned long mmcra_sihv = MMCRA_SIHV;
30static unsigned long mmcra_sipr = MMCRA_SIPR;
28 31
29/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */ 32/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
30static u32 mmcr0_val; 33static u32 mmcr0_val;
31static u64 mmcr1_val; 34static u64 mmcr1_val;
32static u32 mmcra_val; 35static u64 mmcra_val;
33
34/*
35 * Since we do not have an NMI, backtracing through spinlocks is
36 * only a best guess. In light of this, allow it to be disabled at
37 * runtime.
38 */
39static int backtrace_spinlocks;
40 36
41static void power4_reg_setup(struct op_counter_config *ctr, 37static void power4_reg_setup(struct op_counter_config *ctr,
42 struct op_system_config *sys, 38 struct op_system_config *sys,
@@ -63,8 +59,6 @@ static void power4_reg_setup(struct op_counter_config *ctr,
63 mmcr1_val = sys->mmcr1; 59 mmcr1_val = sys->mmcr1;
64 mmcra_val = sys->mmcra; 60 mmcra_val = sys->mmcra;
65 61
66 backtrace_spinlocks = sys->backtrace_spinlocks;
67
68 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) 62 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
69 reset_value[i] = 0x80000000UL - ctr[i].count; 63 reset_value[i] = 0x80000000UL - ctr[i].count;
70 64
@@ -197,25 +191,6 @@ static void __attribute_used__ kernel_unknown_bucket(void)
197{ 191{
198} 192}
199 193
200static unsigned long check_spinlock_pc(struct pt_regs *regs,
201 unsigned long profile_pc)
202{
203 unsigned long pc = instruction_pointer(regs);
204
205 /*
206 * If both the SIAR (sampled instruction) and the perfmon exception
207 * occurred in a spinlock region then we account the sample to the
208 * calling function. This isnt 100% correct, we really need soft
209 * IRQ disable so we always get the perfmon exception at the
210 * point at which the SIAR is set.
211 */
212 if (backtrace_spinlocks && in_lock_functions(pc) &&
213 in_lock_functions(profile_pc))
214 return regs->link;
215 else
216 return profile_pc;
217}
218
219/* 194/*
220 * On GQ and newer the MMCRA stores the HV and PR bits at the time 195 * On GQ and newer the MMCRA stores the HV and PR bits at the time
221 * the SIAR was sampled. We use that to work out if the SIAR was sampled in 196 * the SIAR was sampled. We use that to work out if the SIAR was sampled in
@@ -228,17 +203,17 @@ static unsigned long get_pc(struct pt_regs *regs)
228 203
229 /* Cant do much about it */ 204 /* Cant do much about it */
230 if (!mmcra_has_sihv) 205 if (!mmcra_has_sihv)
231 return check_spinlock_pc(regs, pc); 206 return pc;
232 207
233 mmcra = mfspr(SPRN_MMCRA); 208 mmcra = mfspr(SPRN_MMCRA);
234 209
235 /* Were we in the hypervisor? */ 210 /* Were we in the hypervisor? */
236 if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & MMCRA_SIHV)) 211 if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & mmcra_sihv))
237 /* function descriptor madness */ 212 /* function descriptor madness */
238 return *((unsigned long *)hypervisor_bucket); 213 return *((unsigned long *)hypervisor_bucket);
239 214
240 /* We were in userspace, nothing to do */ 215 /* We were in userspace, nothing to do */
241 if (mmcra & MMCRA_SIPR) 216 if (mmcra & mmcra_sipr)
242 return pc; 217 return pc;
243 218
244#ifdef CONFIG_PPC_RTAS 219#ifdef CONFIG_PPC_RTAS
@@ -257,7 +232,7 @@ static unsigned long get_pc(struct pt_regs *regs)
257 /* function descriptor madness */ 232 /* function descriptor madness */
258 return *((unsigned long *)kernel_unknown_bucket); 233 return *((unsigned long *)kernel_unknown_bucket);
259 234
260 return check_spinlock_pc(regs, pc); 235 return pc;
261} 236}
262 237
263static int get_kernel(unsigned long pc) 238static int get_kernel(unsigned long pc)
@@ -268,7 +243,7 @@ static int get_kernel(unsigned long pc)
268 is_kernel = is_kernel_addr(pc); 243 is_kernel = is_kernel_addr(pc);
269 } else { 244 } else {
270 unsigned long mmcra = mfspr(SPRN_MMCRA); 245 unsigned long mmcra = mfspr(SPRN_MMCRA);
271 is_kernel = ((mmcra & MMCRA_SIPR) == 0); 246 is_kernel = ((mmcra & mmcra_sipr) == 0);
272 } 247 }
273 248
274 return is_kernel; 249 return is_kernel;
diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
index aa180e907a67..5b33994cd488 100644
--- a/include/asm-powerpc/oprofile_impl.h
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -35,9 +35,6 @@ struct op_system_config {
35#endif 35#endif
36 unsigned long enable_kernel; 36 unsigned long enable_kernel;
37 unsigned long enable_user; 37 unsigned long enable_user;
38#ifdef CONFIG_PPC64
39 unsigned long backtrace_spinlocks;
40#endif
41}; 38};
42 39
43/* Per-arch configuration */ 40/* Per-arch configuration */