aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-07-09 15:42:51 -0400
committerRobert Richter <robert.richter@amd.com>2009-07-20 10:43:21 -0400
commit4d015f79e972cea1761cfee8872b1c0992ccd8b2 (patch)
tree8cecbab7a4c9146fd43b077e2d99a9cd4d5bbbb8 /arch/x86/oprofile
parent5280514471c2803776701c43c027038decac1103 (diff)
x86/oprofile: Implement mux_clone()
To setup a counter for all cpus, its structure is cloned from cpu 0. This patch implements mux_clone() to do this part for multiplexing data. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile')
-rw-r--r--arch/x86/oprofile/nmi_int.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f0fb44725d80..da6d2ab31c6c 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -264,6 +264,16 @@ static inline void mux_init(struct oprofile_operations *ops)
264 ops->switch_events = nmi_switch_event; 264 ops->switch_events = nmi_switch_event;
265} 265}
266 266
267static void mux_clone(int cpu)
268{
269 if (!has_mux())
270 return;
271
272 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
273 per_cpu(cpu_msrs, 0).multiplex,
274 sizeof(struct op_msr) * model->num_virt_counters);
275}
276
267#else 277#else
268 278
269inline int op_x86_phys_to_virt(int phys) { return phys; } 279inline int op_x86_phys_to_virt(int phys) { return phys; }
@@ -272,6 +282,7 @@ static inline int nmi_setup_mux(void) { return 1; }
272static inline void 282static inline void
273nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { } 283nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
274static inline void mux_init(struct oprofile_operations *ops) { } 284static inline void mux_init(struct oprofile_operations *ops) { }
285static void mux_clone(int cpu) { }
275 286
276#endif 287#endif
277 288
@@ -350,20 +361,18 @@ static int nmi_setup(void)
350 /* Assume saved/restored counters are the same on all CPUs */ 361 /* Assume saved/restored counters are the same on all CPUs */
351 model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); 362 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
352 for_each_possible_cpu(cpu) { 363 for_each_possible_cpu(cpu) {
353 if (cpu != 0) { 364 if (!cpu)
354 memcpy(per_cpu(cpu_msrs, cpu).counters, 365 continue;
355 per_cpu(cpu_msrs, 0).counters, 366
356 sizeof(struct op_msr) * model->num_counters); 367 memcpy(per_cpu(cpu_msrs, cpu).counters,
357 368 per_cpu(cpu_msrs, 0).counters,
358 memcpy(per_cpu(cpu_msrs, cpu).controls, 369 sizeof(struct op_msr) * model->num_counters);
359 per_cpu(cpu_msrs, 0).controls, 370
360 sizeof(struct op_msr) * model->num_controls); 371 memcpy(per_cpu(cpu_msrs, cpu).controls,
361#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 372 per_cpu(cpu_msrs, 0).controls,
362 memcpy(per_cpu(cpu_msrs, cpu).multiplex, 373 sizeof(struct op_msr) * model->num_controls);
363 per_cpu(cpu_msrs, 0).multiplex, 374
364 sizeof(struct op_msr) * model->num_virt_counters); 375 mux_clone(cpu);
365#endif
366 }
367 } 376 }
368 on_each_cpu(nmi_cpu_setup, NULL, 1); 377 on_each_cpu(nmi_cpu_setup, NULL, 1);
369 nmi_enabled = 1; 378 nmi_enabled = 1;