aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile/nmi_int.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-07-09 08:38:49 -0400
committerRobert Richter <robert.richter@amd.com>2009-07-20 10:43:20 -0400
commitb28d1b923ab52d535c0719155dccf3b3d98bab9f (patch)
tree985d25f6ae3b7719f3cf3763943112a80bec320d /arch/x86/oprofile/nmi_int.c
parentd0f585dd20010f8479e56b5c6f391ef18e26877e (diff)
x86/oprofile: Moving nmi_cpu_switch() in nmi_int.c
This patch moves some code in nmi_int.c to get a single separate multiplexing code section. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile/nmi_int.c')
-rw-r--r--arch/x86/oprofile/nmi_int.c144
1 files changed, 70 insertions, 74 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f38c5cf0fdbb..998c7dca31e7 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -97,6 +97,29 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
97 } 97 }
98} 98}
99 99
100static void nmi_cpu_start(void *dummy)
101{
102 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
103 model->start(msrs);
104}
105
106static int nmi_start(void)
107{
108 on_each_cpu(nmi_cpu_start, NULL, 1);
109 return 0;
110}
111
112static void nmi_cpu_stop(void *dummy)
113{
114 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
115 model->stop(msrs);
116}
117
118static void nmi_stop(void)
119{
120 on_each_cpu(nmi_cpu_stop, NULL, 1);
121}
122
100#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 123#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
101 124
102static DEFINE_PER_CPU(int, switch_index); 125static DEFINE_PER_CPU(int, switch_index);
@@ -171,6 +194,53 @@ static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
171 } 194 }
172} 195}
173 196
197static void nmi_cpu_switch(void *dummy)
198{
199 int cpu = smp_processor_id();
200 int si = per_cpu(switch_index, cpu);
201 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
202
203 nmi_cpu_stop(NULL);
204 nmi_cpu_save_mpx_registers(msrs);
205
206 /* move to next set */
207 si += model->num_counters;
208 if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
209 per_cpu(switch_index, cpu) = 0;
210 else
211 per_cpu(switch_index, cpu) = si;
212
213 model->switch_ctrl(model, msrs);
214 nmi_cpu_restore_mpx_registers(msrs);
215
216 nmi_cpu_start(NULL);
217}
218
219
220/*
221 * Quick check to see if multiplexing is necessary.
222 * The check should be sufficient since counters are used
223 * in ordre.
224 */
225static int nmi_multiplex_on(void)
226{
227 return counter_config[model->num_counters].count ? 0 : -EINVAL;
228}
229
230static int nmi_switch_event(void)
231{
232 if (!model->switch_ctrl)
233 return -ENOSYS; /* not implemented */
234 if (nmi_multiplex_on() < 0)
235 return -EINVAL; /* not necessary */
236
237 on_each_cpu(nmi_cpu_switch, NULL, 1);
238
239 atomic_inc(&multiplex_counter);
240
241 return 0;
242}
243
174#else 244#else
175 245
176inline int op_x86_phys_to_virt(int phys) { return phys; } 246inline int op_x86_phys_to_virt(int phys) { return phys; }
@@ -325,29 +395,6 @@ static void nmi_shutdown(void)
325 put_cpu_var(cpu_msrs); 395 put_cpu_var(cpu_msrs);
326} 396}
327 397
328static void nmi_cpu_start(void *dummy)
329{
330 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
331 model->start(msrs);
332}
333
334static int nmi_start(void)
335{
336 on_each_cpu(nmi_cpu_start, NULL, 1);
337 return 0;
338}
339
340static void nmi_cpu_stop(void *dummy)
341{
342 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
343 model->stop(msrs);
344}
345
346static void nmi_stop(void)
347{
348 on_each_cpu(nmi_cpu_stop, NULL, 1);
349}
350
351static int nmi_create_files(struct super_block *sb, struct dentry *root) 398static int nmi_create_files(struct super_block *sb, struct dentry *root)
352{ 399{
353 unsigned int i; 400 unsigned int i;
@@ -379,57 +426,6 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
379 return 0; 426 return 0;
380} 427}
381 428
382#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
383
384static void nmi_cpu_switch(void *dummy)
385{
386 int cpu = smp_processor_id();
387 int si = per_cpu(switch_index, cpu);
388 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
389
390 nmi_cpu_stop(NULL);
391 nmi_cpu_save_mpx_registers(msrs);
392
393 /* move to next set */
394 si += model->num_counters;
395 if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
396 per_cpu(switch_index, cpu) = 0;
397 else
398 per_cpu(switch_index, cpu) = si;
399
400 model->switch_ctrl(model, msrs);
401 nmi_cpu_restore_mpx_registers(msrs);
402
403 nmi_cpu_start(NULL);
404}
405
406
407/*
408 * Quick check to see if multiplexing is necessary.
409 * The check should be sufficient since counters are used
410 * in ordre.
411 */
412static int nmi_multiplex_on(void)
413{
414 return counter_config[model->num_counters].count ? 0 : -EINVAL;
415}
416
417static int nmi_switch_event(void)
418{
419 if (!model->switch_ctrl)
420 return -ENOSYS; /* not implemented */
421 if (nmi_multiplex_on() < 0)
422 return -EINVAL; /* not necessary */
423
424 on_each_cpu(nmi_cpu_switch, NULL, 1);
425
426 atomic_inc(&multiplex_counter);
427
428 return 0;
429}
430
431#endif
432
433#ifdef CONFIG_SMP 429#ifdef CONFIG_SMP
434static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action, 430static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
435 void *data) 431 void *data)