aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-05-03 09:52:26 -0400
committerRobert Richter <robert.richter@amd.com>2010-05-04 05:40:11 -0400
commitd30d64c6da3ec7a0708bfffa7e05752d5b9a1093 (patch)
treec3a9ce94fc54c18ebf8c583d1c43147af6bafc4e /arch
parentde654649737696ecf32873c341b305e30f3dc777 (diff)
oprofile/x86: reordering some functions
Reordering some functions. Necessary for the next patch. No functional changes. Cc: Andi Kleen <andi@firstfloor.org> Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/oprofile/nmi_int.c134
1 files changed, 67 insertions, 67 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 94b5481bb6c6..7de0572b0a5e 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -364,56 +364,6 @@ static struct notifier_block profile_exceptions_nb = {
364 .priority = 2 364 .priority = 2
365}; 365};
366 366
367static int nmi_setup(void)
368{
369 int err = 0;
370 int cpu;
371
372 if (!allocate_msrs())
373 return -ENOMEM;
374
375 /* We need to serialize save and setup for HT because the subset
376 * of msrs are distinct for save and setup operations
377 */
378
379 /* Assume saved/restored counters are the same on all CPUs */
380 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
381 if (err)
382 goto fail;
383
384 for_each_possible_cpu(cpu) {
385 if (!cpu)
386 continue;
387
388 memcpy(per_cpu(cpu_msrs, cpu).counters,
389 per_cpu(cpu_msrs, 0).counters,
390 sizeof(struct op_msr) * model->num_counters);
391
392 memcpy(per_cpu(cpu_msrs, cpu).controls,
393 per_cpu(cpu_msrs, 0).controls,
394 sizeof(struct op_msr) * model->num_controls);
395
396 mux_clone(cpu);
397 }
398
399 nmi_enabled = 0;
400 ctr_running = 0;
401 barrier();
402 err = register_die_notifier(&profile_exceptions_nb);
403 if (err)
404 goto fail;
405
406 get_online_cpus();
407 on_each_cpu(nmi_cpu_setup, NULL, 1);
408 nmi_enabled = 1;
409 put_online_cpus();
410
411 return 0;
412fail:
413 free_msrs();
414 return err;
415}
416
417static void nmi_cpu_restore_registers(struct op_msrs *msrs) 367static void nmi_cpu_restore_registers(struct op_msrs *msrs)
418{ 368{
419 struct op_msr *counters = msrs->counters; 369 struct op_msr *counters = msrs->counters;
@@ -449,23 +399,6 @@ static void nmi_cpu_shutdown(void *dummy)
449 nmi_cpu_restore_registers(msrs); 399 nmi_cpu_restore_registers(msrs);
450} 400}
451 401
452static void nmi_shutdown(void)
453{
454 struct op_msrs *msrs;
455
456 get_online_cpus();
457 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
458 nmi_enabled = 0;
459 ctr_running = 0;
460 put_online_cpus();
461 barrier();
462 unregister_die_notifier(&profile_exceptions_nb);
463 msrs = &get_cpu_var(cpu_msrs);
464 model->shutdown(msrs);
465 free_msrs();
466 put_cpu_var(cpu_msrs);
467}
468
469static void nmi_cpu_up(void *dummy) 402static void nmi_cpu_up(void *dummy)
470{ 403{
471 if (nmi_enabled) 404 if (nmi_enabled)
@@ -531,6 +464,73 @@ static struct notifier_block oprofile_cpu_nb = {
531 .notifier_call = oprofile_cpu_notifier 464 .notifier_call = oprofile_cpu_notifier
532}; 465};
533 466
467static int nmi_setup(void)
468{
469 int err = 0;
470 int cpu;
471
472 if (!allocate_msrs())
473 return -ENOMEM;
474
475 /* We need to serialize save and setup for HT because the subset
476 * of msrs are distinct for save and setup operations
477 */
478
479 /* Assume saved/restored counters are the same on all CPUs */
480 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
481 if (err)
482 goto fail;
483
484 for_each_possible_cpu(cpu) {
485 if (!cpu)
486 continue;
487
488 memcpy(per_cpu(cpu_msrs, cpu).counters,
489 per_cpu(cpu_msrs, 0).counters,
490 sizeof(struct op_msr) * model->num_counters);
491
492 memcpy(per_cpu(cpu_msrs, cpu).controls,
493 per_cpu(cpu_msrs, 0).controls,
494 sizeof(struct op_msr) * model->num_controls);
495
496 mux_clone(cpu);
497 }
498
499 nmi_enabled = 0;
500 ctr_running = 0;
501 barrier();
502 err = register_die_notifier(&profile_exceptions_nb);
503 if (err)
504 goto fail;
505
506 get_online_cpus();
507 on_each_cpu(nmi_cpu_setup, NULL, 1);
508 nmi_enabled = 1;
509 put_online_cpus();
510
511 return 0;
512fail:
513 free_msrs();
514 return err;
515}
516
517static void nmi_shutdown(void)
518{
519 struct op_msrs *msrs;
520
521 get_online_cpus();
522 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
523 nmi_enabled = 0;
524 ctr_running = 0;
525 put_online_cpus();
526 barrier();
527 unregister_die_notifier(&profile_exceptions_nb);
528 msrs = &get_cpu_var(cpu_msrs);
529 model->shutdown(msrs);
530 free_msrs();
531 put_cpu_var(cpu_msrs);
532}
533
534#ifdef CONFIG_PM 534#ifdef CONFIG_PM
535 535
536static int nmi_suspend(struct sys_device *dev, pm_message_t state) 536static int nmi_suspend(struct sys_device *dev, pm_message_t state)