diff options
author | Yan, Zheng <zheng.z.yan@intel.com> | 2013-04-16 07:51:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-04-21 04:59:01 -0400 |
commit | 22cc4ccf63e10e361531bf61e6e6c96c53a2f665 (patch) | |
tree | 0deeb72d431b7916cb0a42ca1983ec2a250fa3d2 | |
parent | 73e21ce28d8d2b75140b742b01373c3a085ecc52 (diff) |
perf/x86: Avoid kfree() in CPU_{STARTING,DYING}
On -rt kfree() can schedule, but CPU_{STARTING,DYING} should be
atomic. So use a list to defer kfree until CPU_{ONLINE,DEAD}.
Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: peterz@infradead.org
Cc: eranian@google.com
Cc: ak@linux.intel.com
Link: http://lkml.kernel.org/r/1366113067-3262-2-git-send-email-zheng.z.yan@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel_uncore.c | 28 |
1 files changed, 25 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 75da9e18b128..50d4a1c58106 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -2622,6 +2622,21 @@ static void __init uncore_pci_exit(void) | |||
2622 | } | 2622 | } |
2623 | } | 2623 | } |
2624 | 2624 | ||
2625 | /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ | ||
2626 | static LIST_HEAD(boxes_to_free); | ||
2627 | |||
2628 | static void __cpuinit uncore_kfree_boxes(void) | ||
2629 | { | ||
2630 | struct intel_uncore_box *box; | ||
2631 | |||
2632 | while (!list_empty(&boxes_to_free)) { | ||
2633 | box = list_entry(boxes_to_free.next, | ||
2634 | struct intel_uncore_box, list); | ||
2635 | list_del(&box->list); | ||
2636 | kfree(box); | ||
2637 | } | ||
2638 | } | ||
2639 | |||
2625 | static void __cpuinit uncore_cpu_dying(int cpu) | 2640 | static void __cpuinit uncore_cpu_dying(int cpu) |
2626 | { | 2641 | { |
2627 | struct intel_uncore_type *type; | 2642 | struct intel_uncore_type *type; |
@@ -2636,7 +2651,7 @@ static void __cpuinit uncore_cpu_dying(int cpu) | |||
2636 | box = *per_cpu_ptr(pmu->box, cpu); | 2651 | box = *per_cpu_ptr(pmu->box, cpu); |
2637 | *per_cpu_ptr(pmu->box, cpu) = NULL; | 2652 | *per_cpu_ptr(pmu->box, cpu) = NULL; |
2638 | if (box && atomic_dec_and_test(&box->refcnt)) | 2653 | if (box && atomic_dec_and_test(&box->refcnt)) |
2639 | kfree(box); | 2654 | list_add(&box->list, &boxes_to_free); |
2640 | } | 2655 | } |
2641 | } | 2656 | } |
2642 | } | 2657 | } |
@@ -2666,8 +2681,11 @@ static int __cpuinit uncore_cpu_starting(int cpu) | |||
2666 | if (exist && exist->phys_id == phys_id) { | 2681 | if (exist && exist->phys_id == phys_id) { |
2667 | atomic_inc(&exist->refcnt); | 2682 | atomic_inc(&exist->refcnt); |
2668 | *per_cpu_ptr(pmu->box, cpu) = exist; | 2683 | *per_cpu_ptr(pmu->box, cpu) = exist; |
2669 | kfree(box); | 2684 | if (box) { |
2670 | box = NULL; | 2685 | list_add(&box->list, |
2686 | &boxes_to_free); | ||
2687 | box = NULL; | ||
2688 | } | ||
2671 | break; | 2689 | break; |
2672 | } | 2690 | } |
2673 | } | 2691 | } |
@@ -2806,6 +2824,10 @@ static int | |||
2806 | case CPU_DYING: | 2824 | case CPU_DYING: |
2807 | uncore_cpu_dying(cpu); | 2825 | uncore_cpu_dying(cpu); |
2808 | break; | 2826 | break; |
2827 | case CPU_ONLINE: | ||
2828 | case CPU_DEAD: | ||
2829 | uncore_kfree_boxes(); | ||
2830 | break; | ||
2809 | default: | 2831 | default: |
2810 | break; | 2832 | break; |
2811 | } | 2833 | } |