diff options
author | Richard Cochran <rcochran@linutronix.de> | 2016-07-13 13:16:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-07-14 03:34:33 -0400 |
commit | 96b2bd3866a0b045330e420a2f1829ff2a3399bc (patch) | |
tree | 1e1d9788206aab6a9972fd2fe7f497304611e81e /arch/x86/events/amd/uncore.c | |
parent | 1a246b9f58c6149b5a5bec081418b8ed890e0dfe (diff) |
perf/x86/amd/uncore: Convert to hotplug state machine
Install the callbacks via the state machine and let the core invoke
the callbacks on the already online CPUs.
Signed-off-by: Richard Cochran <rcochran@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Chen Yucong <slaoub@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153333.839150380@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/events/amd/uncore.c')
-rw-r--r-- | arch/x86/events/amd/uncore.c | 122 |
1 files changed, 32 insertions, 90 deletions
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 98ac57381bf9..e6131d4454e6 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c | |||
@@ -358,7 +358,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this, | |||
358 | return this; | 358 | return this; |
359 | } | 359 | } |
360 | 360 | ||
361 | static void amd_uncore_cpu_starting(unsigned int cpu) | 361 | static int amd_uncore_cpu_starting(unsigned int cpu) |
362 | { | 362 | { |
363 | unsigned int eax, ebx, ecx, edx; | 363 | unsigned int eax, ebx, ecx, edx; |
364 | struct amd_uncore *uncore; | 364 | struct amd_uncore *uncore; |
@@ -384,6 +384,8 @@ static void amd_uncore_cpu_starting(unsigned int cpu) | |||
384 | uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2); | 384 | uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2); |
385 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; | 385 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; |
386 | } | 386 | } |
387 | |||
388 | return 0; | ||
387 | } | 389 | } |
388 | 390 | ||
389 | static void uncore_online(unsigned int cpu, | 391 | static void uncore_online(unsigned int cpu, |
@@ -398,13 +400,15 @@ static void uncore_online(unsigned int cpu, | |||
398 | cpumask_set_cpu(cpu, uncore->active_mask); | 400 | cpumask_set_cpu(cpu, uncore->active_mask); |
399 | } | 401 | } |
400 | 402 | ||
401 | static void amd_uncore_cpu_online(unsigned int cpu) | 403 | static int amd_uncore_cpu_online(unsigned int cpu) |
402 | { | 404 | { |
403 | if (amd_uncore_nb) | 405 | if (amd_uncore_nb) |
404 | uncore_online(cpu, amd_uncore_nb); | 406 | uncore_online(cpu, amd_uncore_nb); |
405 | 407 | ||
406 | if (amd_uncore_l2) | 408 | if (amd_uncore_l2) |
407 | uncore_online(cpu, amd_uncore_l2); | 409 | uncore_online(cpu, amd_uncore_l2); |
410 | |||
411 | return 0; | ||
408 | } | 412 | } |
409 | 413 | ||
410 | static void uncore_down_prepare(unsigned int cpu, | 414 | static void uncore_down_prepare(unsigned int cpu, |
@@ -433,13 +437,15 @@ static void uncore_down_prepare(unsigned int cpu, | |||
433 | } | 437 | } |
434 | } | 438 | } |
435 | 439 | ||
436 | static void amd_uncore_cpu_down_prepare(unsigned int cpu) | 440 | static int amd_uncore_cpu_down_prepare(unsigned int cpu) |
437 | { | 441 | { |
438 | if (amd_uncore_nb) | 442 | if (amd_uncore_nb) |
439 | uncore_down_prepare(cpu, amd_uncore_nb); | 443 | uncore_down_prepare(cpu, amd_uncore_nb); |
440 | 444 | ||
441 | if (amd_uncore_l2) | 445 | if (amd_uncore_l2) |
442 | uncore_down_prepare(cpu, amd_uncore_l2); | 446 | uncore_down_prepare(cpu, amd_uncore_l2); |
447 | |||
448 | return 0; | ||
443 | } | 449 | } |
444 | 450 | ||
445 | static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) | 451 | static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) |
@@ -454,74 +460,19 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) | |||
454 | *per_cpu_ptr(uncores, cpu) = NULL; | 460 | *per_cpu_ptr(uncores, cpu) = NULL; |
455 | } | 461 | } |
456 | 462 | ||
457 | static void amd_uncore_cpu_dead(unsigned int cpu) | 463 | static int amd_uncore_cpu_dead(unsigned int cpu) |
458 | { | 464 | { |
459 | if (amd_uncore_nb) | 465 | if (amd_uncore_nb) |
460 | uncore_dead(cpu, amd_uncore_nb); | 466 | uncore_dead(cpu, amd_uncore_nb); |
461 | 467 | ||
462 | if (amd_uncore_l2) | 468 | if (amd_uncore_l2) |
463 | uncore_dead(cpu, amd_uncore_l2); | 469 | uncore_dead(cpu, amd_uncore_l2); |
464 | } | ||
465 | |||
466 | static int | ||
467 | amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | ||
468 | void *hcpu) | ||
469 | { | ||
470 | unsigned int cpu = (long)hcpu; | ||
471 | |||
472 | switch (action & ~CPU_TASKS_FROZEN) { | ||
473 | case CPU_UP_PREPARE: | ||
474 | if (amd_uncore_cpu_up_prepare(cpu)) | ||
475 | return notifier_from_errno(-ENOMEM); | ||
476 | break; | ||
477 | |||
478 | case CPU_STARTING: | ||
479 | amd_uncore_cpu_starting(cpu); | ||
480 | break; | ||
481 | |||
482 | case CPU_ONLINE: | ||
483 | amd_uncore_cpu_online(cpu); | ||
484 | break; | ||
485 | |||
486 | case CPU_DOWN_PREPARE: | ||
487 | amd_uncore_cpu_down_prepare(cpu); | ||
488 | break; | ||
489 | |||
490 | case CPU_UP_CANCELED: | ||
491 | case CPU_DEAD: | ||
492 | amd_uncore_cpu_dead(cpu); | ||
493 | break; | ||
494 | |||
495 | default: | ||
496 | break; | ||
497 | } | ||
498 | |||
499 | return NOTIFY_OK; | ||
500 | } | ||
501 | |||
502 | static struct notifier_block amd_uncore_cpu_notifier_block = { | ||
503 | .notifier_call = amd_uncore_cpu_notifier, | ||
504 | .priority = CPU_PRI_PERF + 1, | ||
505 | }; | ||
506 | |||
507 | static void __init init_cpu_already_online(void *dummy) | ||
508 | { | ||
509 | unsigned int cpu = smp_processor_id(); | ||
510 | |||
511 | amd_uncore_cpu_starting(cpu); | ||
512 | amd_uncore_cpu_online(cpu); | ||
513 | } | ||
514 | 470 | ||
515 | static void cleanup_cpu_online(void *dummy) | 471 | return 0; |
516 | { | ||
517 | unsigned int cpu = smp_processor_id(); | ||
518 | |||
519 | amd_uncore_cpu_dead(cpu); | ||
520 | } | 472 | } |
521 | 473 | ||
522 | static int __init amd_uncore_init(void) | 474 | static int __init amd_uncore_init(void) |
523 | { | 475 | { |
524 | unsigned int cpu, cpu2; | ||
525 | int ret = -ENODEV; | 476 | int ret = -ENODEV; |
526 | 477 | ||
527 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 478 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) |
@@ -558,38 +509,29 @@ static int __init amd_uncore_init(void) | |||
558 | ret = 0; | 509 | ret = 0; |
559 | } | 510 | } |
560 | 511 | ||
561 | if (ret) | 512 | /* |
562 | goto fail_nodev; | 513 | * Install callbacks. Core will call them for each online cpu. |
563 | 514 | */ | |
564 | cpu_notifier_register_begin(); | 515 | if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP, |
565 | 516 | "PERF_X86_AMD_UNCORE_PREP", | |
566 | /* init cpus already online before registering for hotplug notifier */ | 517 | amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead)) |
567 | for_each_online_cpu(cpu) { | 518 | goto fail_l2; |
568 | ret = amd_uncore_cpu_up_prepare(cpu); | 519 | |
569 | if (ret) | 520 | if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, |
570 | goto fail_online; | 521 | "AP_PERF_X86_AMD_UNCORE_STARTING", |
571 | smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); | 522 | amd_uncore_cpu_starting, NULL)) |
572 | } | 523 | goto fail_prep; |
573 | 524 | if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, | |
574 | __register_cpu_notifier(&amd_uncore_cpu_notifier_block); | 525 | "AP_PERF_X86_AMD_UNCORE_ONLINE", |
575 | cpu_notifier_register_done(); | 526 | amd_uncore_cpu_online, |
576 | 527 | amd_uncore_cpu_down_prepare)) | |
528 | goto fail_start; | ||
577 | return 0; | 529 | return 0; |
578 | 530 | ||
579 | 531 | fail_start: | |
580 | fail_online: | 532 | cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING); |
581 | for_each_online_cpu(cpu2) { | 533 | fail_prep: |
582 | if (cpu2 == cpu) | 534 | cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP); |
583 | break; | ||
584 | smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1); | ||
585 | } | ||
586 | cpu_notifier_register_done(); | ||
587 | |||
588 | /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ | ||
589 | amd_uncore_nb = amd_uncore_l2 = NULL; | ||
590 | |||
591 | if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) | ||
592 | perf_pmu_unregister(&amd_l2_pmu); | ||
593 | fail_l2: | 535 | fail_l2: |
594 | if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) | 536 | if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) |
595 | perf_pmu_unregister(&amd_nb_pmu); | 537 | perf_pmu_unregister(&amd_nb_pmu); |