diff options
| -rw-r--r-- | arch/x86/oprofile/nmi_int.c | 134 | 
1 files changed, 67 insertions, 67 deletions
| diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 94b5481bb6c6..7de0572b0a5e 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -364,56 +364,6 @@ static struct notifier_block profile_exceptions_nb = { | |||
| 364 | .priority = 2 | 364 | .priority = 2 | 
| 365 | }; | 365 | }; | 
| 366 | 366 | ||
| 367 | static int nmi_setup(void) | ||
| 368 | { | ||
| 369 | int err = 0; | ||
| 370 | int cpu; | ||
| 371 | |||
| 372 | if (!allocate_msrs()) | ||
| 373 | return -ENOMEM; | ||
| 374 | |||
| 375 | /* We need to serialize save and setup for HT because the subset | ||
| 376 | * of msrs are distinct for save and setup operations | ||
| 377 | */ | ||
| 378 | |||
| 379 | /* Assume saved/restored counters are the same on all CPUs */ | ||
| 380 | err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | ||
| 381 | if (err) | ||
| 382 | goto fail; | ||
| 383 | |||
| 384 | for_each_possible_cpu(cpu) { | ||
| 385 | if (!cpu) | ||
| 386 | continue; | ||
| 387 | |||
| 388 | memcpy(per_cpu(cpu_msrs, cpu).counters, | ||
| 389 | per_cpu(cpu_msrs, 0).counters, | ||
| 390 | sizeof(struct op_msr) * model->num_counters); | ||
| 391 | |||
| 392 | memcpy(per_cpu(cpu_msrs, cpu).controls, | ||
| 393 | per_cpu(cpu_msrs, 0).controls, | ||
| 394 | sizeof(struct op_msr) * model->num_controls); | ||
| 395 | |||
| 396 | mux_clone(cpu); | ||
| 397 | } | ||
| 398 | |||
| 399 | nmi_enabled = 0; | ||
| 400 | ctr_running = 0; | ||
| 401 | barrier(); | ||
| 402 | err = register_die_notifier(&profile_exceptions_nb); | ||
| 403 | if (err) | ||
| 404 | goto fail; | ||
| 405 | |||
| 406 | get_online_cpus(); | ||
| 407 | on_each_cpu(nmi_cpu_setup, NULL, 1); | ||
| 408 | nmi_enabled = 1; | ||
| 409 | put_online_cpus(); | ||
| 410 | |||
| 411 | return 0; | ||
| 412 | fail: | ||
| 413 | free_msrs(); | ||
| 414 | return err; | ||
| 415 | } | ||
| 416 | |||
| 417 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | 367 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | 
| 418 | { | 368 | { | 
| 419 | struct op_msr *counters = msrs->counters; | 369 | struct op_msr *counters = msrs->counters; | 
| @@ -449,23 +399,6 @@ static void nmi_cpu_shutdown(void *dummy) | |||
| 449 | nmi_cpu_restore_registers(msrs); | 399 | nmi_cpu_restore_registers(msrs); | 
| 450 | } | 400 | } | 
| 451 | 401 | ||
| 452 | static void nmi_shutdown(void) | ||
| 453 | { | ||
| 454 | struct op_msrs *msrs; | ||
| 455 | |||
| 456 | get_online_cpus(); | ||
| 457 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | ||
| 458 | nmi_enabled = 0; | ||
| 459 | ctr_running = 0; | ||
| 460 | put_online_cpus(); | ||
| 461 | barrier(); | ||
| 462 | unregister_die_notifier(&profile_exceptions_nb); | ||
| 463 | msrs = &get_cpu_var(cpu_msrs); | ||
| 464 | model->shutdown(msrs); | ||
| 465 | free_msrs(); | ||
| 466 | put_cpu_var(cpu_msrs); | ||
| 467 | } | ||
| 468 | |||
| 469 | static void nmi_cpu_up(void *dummy) | 402 | static void nmi_cpu_up(void *dummy) | 
| 470 | { | 403 | { | 
| 471 | if (nmi_enabled) | 404 | if (nmi_enabled) | 
| @@ -531,6 +464,73 @@ static struct notifier_block oprofile_cpu_nb = { | |||
| 531 | .notifier_call = oprofile_cpu_notifier | 464 | .notifier_call = oprofile_cpu_notifier | 
| 532 | }; | 465 | }; | 
| 533 | 466 | ||
| 467 | static int nmi_setup(void) | ||
| 468 | { | ||
| 469 | int err = 0; | ||
| 470 | int cpu; | ||
| 471 | |||
| 472 | if (!allocate_msrs()) | ||
| 473 | return -ENOMEM; | ||
| 474 | |||
| 475 | /* We need to serialize save and setup for HT because the subset | ||
| 476 | * of msrs are distinct for save and setup operations | ||
| 477 | */ | ||
| 478 | |||
| 479 | /* Assume saved/restored counters are the same on all CPUs */ | ||
| 480 | err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | ||
| 481 | if (err) | ||
| 482 | goto fail; | ||
| 483 | |||
| 484 | for_each_possible_cpu(cpu) { | ||
| 485 | if (!cpu) | ||
| 486 | continue; | ||
| 487 | |||
| 488 | memcpy(per_cpu(cpu_msrs, cpu).counters, | ||
| 489 | per_cpu(cpu_msrs, 0).counters, | ||
| 490 | sizeof(struct op_msr) * model->num_counters); | ||
| 491 | |||
| 492 | memcpy(per_cpu(cpu_msrs, cpu).controls, | ||
| 493 | per_cpu(cpu_msrs, 0).controls, | ||
| 494 | sizeof(struct op_msr) * model->num_controls); | ||
| 495 | |||
| 496 | mux_clone(cpu); | ||
| 497 | } | ||
| 498 | |||
| 499 | nmi_enabled = 0; | ||
| 500 | ctr_running = 0; | ||
| 501 | barrier(); | ||
| 502 | err = register_die_notifier(&profile_exceptions_nb); | ||
| 503 | if (err) | ||
| 504 | goto fail; | ||
| 505 | |||
| 506 | get_online_cpus(); | ||
| 507 | on_each_cpu(nmi_cpu_setup, NULL, 1); | ||
| 508 | nmi_enabled = 1; | ||
| 509 | put_online_cpus(); | ||
| 510 | |||
| 511 | return 0; | ||
| 512 | fail: | ||
| 513 | free_msrs(); | ||
| 514 | return err; | ||
| 515 | } | ||
| 516 | |||
| 517 | static void nmi_shutdown(void) | ||
| 518 | { | ||
| 519 | struct op_msrs *msrs; | ||
| 520 | |||
| 521 | get_online_cpus(); | ||
| 522 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | ||
| 523 | nmi_enabled = 0; | ||
| 524 | ctr_running = 0; | ||
| 525 | put_online_cpus(); | ||
| 526 | barrier(); | ||
| 527 | unregister_die_notifier(&profile_exceptions_nb); | ||
| 528 | msrs = &get_cpu_var(cpu_msrs); | ||
| 529 | model->shutdown(msrs); | ||
| 530 | free_msrs(); | ||
| 531 | put_cpu_var(cpu_msrs); | ||
| 532 | } | ||
| 533 | |||
| 534 | #ifdef CONFIG_PM | 534 | #ifdef CONFIG_PM | 
| 535 | 535 | ||
| 536 | static int nmi_suspend(struct sys_device *dev, pm_message_t state) | 536 | static int nmi_suspend(struct sys_device *dev, pm_message_t state) | 
