diff options
author | Robert Richter <robert.richter@amd.com> | 2008-09-24 05:08:52 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2008-09-24 05:08:52 -0400 |
commit | 4c168eaf7ea39f25a45a3d8c7eebc3fedb633a1d (patch) | |
tree | 315bb66a3ce2e752749a780876e2b7b5f6a596de /arch/x86/oprofile/nmi_int.c | |
parent | 45f197ade73ba95681b9803680c75352fc0a1c0a (diff) |
Revert "Oprofile Multiplexing Patch"
Reverting commit 1a960b402a51d80abf54e3f8e4972374ffe5f22d for the main
branch. Multiplexing will be tracked on a separate feature branch.
Conflicts:
arch/x86/oprofile/nmi_int.c
Diffstat (limited to 'arch/x86/oprofile/nmi_int.c')
-rw-r--r-- | arch/x86/oprofile/nmi_int.c | 100 |
1 files changed, 8 insertions, 92 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 4108d02c5292..287513a09819 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -23,18 +23,12 @@ | |||
23 | #include "op_counter.h" | 23 | #include "op_counter.h" |
24 | #include "op_x86_model.h" | 24 | #include "op_x86_model.h" |
25 | 25 | ||
26 | DEFINE_PER_CPU(int, switch_index); | ||
27 | |||
28 | static struct op_x86_model_spec const *model; | 26 | static struct op_x86_model_spec const *model; |
29 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); | 27 | static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); |
30 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); | 28 | static DEFINE_PER_CPU(unsigned long, saved_lvtpc); |
31 | 29 | ||
32 | static int nmi_start(void); | 30 | static int nmi_start(void); |
33 | static void nmi_stop(void); | 31 | static void nmi_stop(void); |
34 | static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs); | ||
35 | static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs); | ||
36 | static void nmi_cpu_stop(void *dummy); | ||
37 | static void nmi_cpu_start(void *dummy); | ||
38 | 32 | ||
39 | /* 0 == registered but off, 1 == registered and on */ | 33 | /* 0 == registered but off, 1 == registered and on */ |
40 | static int nmi_enabled = 0; | 34 | static int nmi_enabled = 0; |
@@ -87,47 +81,6 @@ static void exit_sysfs(void) | |||
87 | #define exit_sysfs() do { } while (0) | 81 | #define exit_sysfs() do { } while (0) |
88 | #endif /* CONFIG_PM */ | 82 | #endif /* CONFIG_PM */ |
89 | 83 | ||
90 | static void nmi_cpu_switch(void *dummy) | ||
91 | { | ||
92 | int cpu = smp_processor_id(); | ||
93 | int si = per_cpu(switch_index, cpu); | ||
94 | struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); | ||
95 | |||
96 | nmi_cpu_stop(NULL); | ||
97 | nmi_cpu_save_mpx_registers(msrs); | ||
98 | |||
99 | /* move to next set */ | ||
100 | si += model->num_hardware_counters; | ||
101 | if ((si > model->num_counters) || (counter_config[si].count == 0)) | ||
102 | per_cpu(switch_index, smp_processor_id()) = 0; | ||
103 | else | ||
104 | per_cpu(switch_index, smp_processor_id()) = si; | ||
105 | |||
106 | nmi_cpu_restore_mpx_registers(msrs); | ||
107 | model->setup_ctrs(msrs); | ||
108 | nmi_cpu_start(NULL); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Quick check to see if multiplexing is necessary. | ||
113 | * The check should be sufficient since counters are used | ||
114 | * in ordre. | ||
115 | */ | ||
116 | static int nmi_multiplex_on(void) | ||
117 | { | ||
118 | return counter_config[model->num_hardware_counters].count ? 0 : -EINVAL; | ||
119 | } | ||
120 | |||
121 | static int nmi_switch_event(void) | ||
122 | { | ||
123 | if (nmi_multiplex_on() < 0) | ||
124 | return -EINVAL; | ||
125 | |||
126 | on_each_cpu(nmi_cpu_switch, NULL, 1); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int profile_exceptions_notify(struct notifier_block *self, | 84 | static int profile_exceptions_notify(struct notifier_block *self, |
132 | unsigned long val, void *data) | 85 | unsigned long val, void *data) |
133 | { | 86 | { |
@@ -191,10 +144,11 @@ static void free_msrs(void) | |||
191 | 144 | ||
192 | static int allocate_msrs(void) | 145 | static int allocate_msrs(void) |
193 | { | 146 | { |
194 | int i, success = 1; | 147 | int success = 1; |
195 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; | 148 | size_t controls_size = sizeof(struct op_msr) * model->num_controls; |
196 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; | 149 | size_t counters_size = sizeof(struct op_msr) * model->num_counters; |
197 | 150 | ||
151 | int i; | ||
198 | for_each_possible_cpu(i) { | 152 | for_each_possible_cpu(i) { |
199 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, | 153 | per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, |
200 | GFP_KERNEL); | 154 | GFP_KERNEL); |
@@ -202,8 +156,8 @@ static int allocate_msrs(void) | |||
202 | success = 0; | 156 | success = 0; |
203 | break; | 157 | break; |
204 | } | 158 | } |
205 | per_cpu(cpu_msrs, i).controls = | 159 | per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, |
206 | kmalloc(controls_size, GFP_KERNEL); | 160 | GFP_KERNEL); |
207 | if (!per_cpu(cpu_msrs, i).controls) { | 161 | if (!per_cpu(cpu_msrs, i).controls) { |
208 | success = 0; | 162 | success = 0; |
209 | break; | 163 | break; |
@@ -247,8 +201,7 @@ static int nmi_setup(void) | |||
247 | return err; | 201 | return err; |
248 | } | 202 | } |
249 | 203 | ||
250 | /* | 204 | /* We need to serialize save and setup for HT because the subset |
251 | * We need to serialize save and setup for HT because the subset | ||
252 | * of msrs are distinct for save and setup operations | 205 | * of msrs are distinct for save and setup operations |
253 | */ | 206 | */ |
254 | 207 | ||
@@ -264,6 +217,7 @@ static int nmi_setup(void) | |||
264 | per_cpu(cpu_msrs, 0).controls, | 217 | per_cpu(cpu_msrs, 0).controls, |
265 | sizeof(struct op_msr) * model->num_controls); | 218 | sizeof(struct op_msr) * model->num_controls); |
266 | } | 219 | } |
220 | |||
267 | } | 221 | } |
268 | on_each_cpu(nmi_save_registers, NULL, 1); | 222 | on_each_cpu(nmi_save_registers, NULL, 1); |
269 | on_each_cpu(nmi_cpu_setup, NULL, 1); | 223 | on_each_cpu(nmi_cpu_setup, NULL, 1); |
@@ -271,41 +225,7 @@ static int nmi_setup(void) | |||
271 | return 0; | 225 | return 0; |
272 | } | 226 | } |
273 | 227 | ||
274 | static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs) | 228 | static void nmi_restore_registers(struct op_msrs *msrs) |
275 | { | ||
276 | unsigned int si = __get_cpu_var(switch_index); | ||
277 | unsigned int const nr_ctrs = model->num_hardware_counters; | ||
278 | struct op_msr *counters = &msrs->counters[si]; | ||
279 | unsigned int i; | ||
280 | |||
281 | for (i = 0; i < nr_ctrs; ++i) { | ||
282 | int offset = i + si; | ||
283 | if (counters[offset].addr) { | ||
284 | rdmsr(counters[offset].addr, | ||
285 | counters[offset].multiplex.low, | ||
286 | counters[offset].multiplex.high); | ||
287 | } | ||
288 | } | ||
289 | } | ||
290 | |||
291 | static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs) | ||
292 | { | ||
293 | unsigned int si = __get_cpu_var(switch_index); | ||
294 | unsigned int const nr_ctrs = model->num_hardware_counters; | ||
295 | struct op_msr *counters = &msrs->counters[si]; | ||
296 | unsigned int i; | ||
297 | |||
298 | for (i = 0; i < nr_ctrs; ++i) { | ||
299 | int offset = i + si; | ||
300 | if (counters[offset].addr) { | ||
301 | wrmsr(counters[offset].addr, | ||
302 | counters[offset].multiplex.low, | ||
303 | counters[offset].multiplex.high); | ||
304 | } | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | ||
309 | { | 229 | { |
310 | unsigned int const nr_ctrs = model->num_counters; | 230 | unsigned int const nr_ctrs = model->num_counters; |
311 | unsigned int const nr_ctrls = model->num_controls; | 231 | unsigned int const nr_ctrls = model->num_controls; |
@@ -345,8 +265,7 @@ static void nmi_cpu_shutdown(void *dummy) | |||
345 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); | 265 | apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); |
346 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); | 266 | apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); |
347 | apic_write(APIC_LVTERR, v); | 267 | apic_write(APIC_LVTERR, v); |
348 | nmi_cpu_restore_registers(msrs); | 268 | nmi_restore_registers(msrs); |
349 | __get_cpu_var(switch_index) = 0; | ||
350 | } | 269 | } |
351 | 270 | ||
352 | static void nmi_shutdown(void) | 271 | static void nmi_shutdown(void) |
@@ -409,7 +328,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root) | |||
409 | oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); | 328 | oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); |
410 | oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); | 329 | oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); |
411 | oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); | 330 | oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); |
412 | counter_config[i].save_count_low = 0; | ||
413 | } | 331 | } |
414 | 332 | ||
415 | return 0; | 333 | return 0; |
@@ -551,14 +469,12 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
551 | } | 469 | } |
552 | 470 | ||
553 | /* default values, can be overwritten by model */ | 471 | /* default values, can be overwritten by model */ |
554 | __raw_get_cpu_var(switch_index) = 0; | ||
555 | ops->create_files = nmi_create_files; | 472 | ops->create_files = nmi_create_files; |
556 | ops->setup = nmi_setup; | 473 | ops->setup = nmi_setup; |
557 | ops->shutdown = nmi_shutdown; | 474 | ops->shutdown = nmi_shutdown; |
558 | ops->start = nmi_start; | 475 | ops->start = nmi_start; |
559 | ops->stop = nmi_stop; | 476 | ops->stop = nmi_stop; |
560 | ops->cpu_type = cpu_type; | 477 | ops->cpu_type = cpu_type; |
561 | ops->switch_events = nmi_switch_event; | ||
562 | 478 | ||
563 | if (model->init) | 479 | if (model->init) |
564 | ret = model->init(ops); | 480 | ret = model->init(ops); |