diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-09-08 14:13:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-09-08 14:13:16 -0400 |
commit | 899edae615c806f78880077bd46f04d7f23ae6e6 (patch) | |
tree | ae3918d9947a8c4811e86217fc77b3927784492d /arch | |
parent | c8c727db413e18414dc6ebc2cc4f18f390763e17 (diff) | |
parent | 4177c42a6301a34c20038ec2771a33dcc30bb338 (diff) |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf, x86: Try to handle unknown nmis with an enabled PMU
perf, x86: Fix handle_irq return values
perf, x86: Fix accidentally ack'ing a second event on intel perf counter
oprofile, x86: fix init_sysfs() function stub
lockup_detector: Sync touch_*_watchdog back to old semantics
tracing: Fix a race in function profile
oprofile, x86: fix init_sysfs error handling
perf_events: Fix time tracking for events with pid != -1 and cpu != -1
perf: Initialize callchains roots's childen hits
oprofile: fix crash when accessing freed task structs
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 59 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 2 | ||||
-rw-r--r-- | arch/x86/oprofile/nmi_int.c | 22 |
4 files changed, 73 insertions, 25 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index f2da20fda02d..3efdf2870a35 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1154 | /* | 1154 | /* |
1155 | * event overflow | 1155 | * event overflow |
1156 | */ | 1156 | */ |
1157 | handled = 1; | 1157 | handled++; |
1158 | data.period = event->hw.last_period; | 1158 | data.period = event->hw.last_period; |
1159 | 1159 | ||
1160 | if (!x86_perf_event_set_period(event)) | 1160 | if (!x86_perf_event_set_period(event)) |
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void) | |||
1200 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1200 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1201 | } | 1201 | } |
1202 | 1202 | ||
1203 | struct pmu_nmi_state { | ||
1204 | unsigned int marked; | ||
1205 | int handled; | ||
1206 | }; | ||
1207 | |||
1208 | static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); | ||
1209 | |||
1203 | static int __kprobes | 1210 | static int __kprobes |
1204 | perf_event_nmi_handler(struct notifier_block *self, | 1211 | perf_event_nmi_handler(struct notifier_block *self, |
1205 | unsigned long cmd, void *__args) | 1212 | unsigned long cmd, void *__args) |
1206 | { | 1213 | { |
1207 | struct die_args *args = __args; | 1214 | struct die_args *args = __args; |
1208 | struct pt_regs *regs; | 1215 | unsigned int this_nmi; |
1216 | int handled; | ||
1209 | 1217 | ||
1210 | if (!atomic_read(&active_events)) | 1218 | if (!atomic_read(&active_events)) |
1211 | return NOTIFY_DONE; | 1219 | return NOTIFY_DONE; |
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1214 | case DIE_NMI: | 1222 | case DIE_NMI: |
1215 | case DIE_NMI_IPI: | 1223 | case DIE_NMI_IPI: |
1216 | break; | 1224 | break; |
1217 | 1225 | case DIE_NMIUNKNOWN: | |
1226 | this_nmi = percpu_read(irq_stat.__nmi_count); | ||
1227 | if (this_nmi != __get_cpu_var(pmu_nmi).marked) | ||
1228 | /* let the kernel handle the unknown nmi */ | ||
1229 | return NOTIFY_DONE; | ||
1230 | /* | ||
1231 | * This one is a PMU back-to-back nmi. Two events | ||
1232 | * trigger 'simultaneously' raising two back-to-back | ||
1233 | * NMIs. If the first NMI handles both, the latter | ||
1234 | * will be empty and daze the CPU. So, we drop it to | ||
1235 | * avoid false-positive 'unknown nmi' messages. | ||
1236 | */ | ||
1237 | return NOTIFY_STOP; | ||
1218 | default: | 1238 | default: |
1219 | return NOTIFY_DONE; | 1239 | return NOTIFY_DONE; |
1220 | } | 1240 | } |
1221 | 1241 | ||
1222 | regs = args->regs; | ||
1223 | |||
1224 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1242 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1225 | /* | 1243 | |
1226 | * Can't rely on the handled return value to say it was our NMI, two | 1244 | handled = x86_pmu.handle_irq(args->regs); |
1227 | * events could trigger 'simultaneously' raising two back-to-back NMIs. | 1245 | if (!handled) |
1228 | * | 1246 | return NOTIFY_DONE; |
1229 | * If the first NMI handles both, the latter will be empty and daze | 1247 | |
1230 | * the CPU. | 1248 | this_nmi = percpu_read(irq_stat.__nmi_count); |
1231 | */ | 1249 | if ((handled > 1) || |
1232 | x86_pmu.handle_irq(regs); | 1250 | /* the next nmi could be a back-to-back nmi */ |
1251 | ((__get_cpu_var(pmu_nmi).marked == this_nmi) && | ||
1252 | (__get_cpu_var(pmu_nmi).handled > 1))) { | ||
1253 | /* | ||
1254 | * We could have two subsequent back-to-back nmis: The | ||
1255 | * first handles more than one counter, the 2nd | ||
1256 | * handles only one counter and the 3rd handles no | ||
1257 | * counter. | ||
1258 | * | ||
1259 | * This is the 2nd nmi because the previous was | ||
1260 | * handling more than one counter. We will mark the | ||
1261 | * next (3rd) and then drop it if unhandled. | ||
1262 | */ | ||
1263 | __get_cpu_var(pmu_nmi).marked = this_nmi + 1; | ||
1264 | __get_cpu_var(pmu_nmi).handled = handled; | ||
1265 | } | ||
1233 | 1266 | ||
1234 | return NOTIFY_STOP; | 1267 | return NOTIFY_STOP; |
1235 | } | 1268 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index d8d86d014008..ee05c90012d2 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
712 | struct perf_sample_data data; | 712 | struct perf_sample_data data; |
713 | struct cpu_hw_events *cpuc; | 713 | struct cpu_hw_events *cpuc; |
714 | int bit, loops; | 714 | int bit, loops; |
715 | u64 ack, status; | 715 | u64 status; |
716 | int handled = 0; | ||
716 | 717 | ||
717 | perf_sample_data_init(&data, 0); | 718 | perf_sample_data_init(&data, 0); |
718 | 719 | ||
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
728 | 729 | ||
729 | loops = 0; | 730 | loops = 0; |
730 | again: | 731 | again: |
732 | intel_pmu_ack_status(status); | ||
731 | if (++loops > 100) { | 733 | if (++loops > 100) { |
732 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | 734 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); |
733 | perf_event_print_debug(); | 735 | perf_event_print_debug(); |
@@ -736,19 +738,22 @@ again: | |||
736 | } | 738 | } |
737 | 739 | ||
738 | inc_irq_stat(apic_perf_irqs); | 740 | inc_irq_stat(apic_perf_irqs); |
739 | ack = status; | ||
740 | 741 | ||
741 | intel_pmu_lbr_read(); | 742 | intel_pmu_lbr_read(); |
742 | 743 | ||
743 | /* | 744 | /* |
744 | * PEBS overflow sets bit 62 in the global status register | 745 | * PEBS overflow sets bit 62 in the global status register |
745 | */ | 746 | */ |
746 | if (__test_and_clear_bit(62, (unsigned long *)&status)) | 747 | if (__test_and_clear_bit(62, (unsigned long *)&status)) { |
748 | handled++; | ||
747 | x86_pmu.drain_pebs(regs); | 749 | x86_pmu.drain_pebs(regs); |
750 | } | ||
748 | 751 | ||
749 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 752 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
750 | struct perf_event *event = cpuc->events[bit]; | 753 | struct perf_event *event = cpuc->events[bit]; |
751 | 754 | ||
755 | handled++; | ||
756 | |||
752 | if (!test_bit(bit, cpuc->active_mask)) | 757 | if (!test_bit(bit, cpuc->active_mask)) |
753 | continue; | 758 | continue; |
754 | 759 | ||
@@ -761,8 +766,6 @@ again: | |||
761 | x86_pmu_stop(event); | 766 | x86_pmu_stop(event); |
762 | } | 767 | } |
763 | 768 | ||
764 | intel_pmu_ack_status(ack); | ||
765 | |||
766 | /* | 769 | /* |
767 | * Repeat if there is more work to be done: | 770 | * Repeat if there is more work to be done: |
768 | */ | 771 | */ |
@@ -772,7 +775,7 @@ again: | |||
772 | 775 | ||
773 | done: | 776 | done: |
774 | intel_pmu_enable_all(0); | 777 | intel_pmu_enable_all(0); |
775 | return 1; | 778 | return handled; |
776 | } | 779 | } |
777 | 780 | ||
778 | static struct event_constraint * | 781 | static struct event_constraint * |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 7e578e9cc58b..b560db3305be 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) | |||
692 | inc_irq_stat(apic_perf_irqs); | 692 | inc_irq_stat(apic_perf_irqs); |
693 | } | 693 | } |
694 | 694 | ||
695 | return handled > 0; | 695 | return handled; |
696 | } | 696 | } |
697 | 697 | ||
698 | /* | 698 | /* |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index f6b48f6c5951..cfe4faabb0f6 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -568,8 +568,13 @@ static int __init init_sysfs(void) | |||
568 | int error; | 568 | int error; |
569 | 569 | ||
570 | error = sysdev_class_register(&oprofile_sysclass); | 570 | error = sysdev_class_register(&oprofile_sysclass); |
571 | if (!error) | 571 | if (error) |
572 | error = sysdev_register(&device_oprofile); | 572 | return error; |
573 | |||
574 | error = sysdev_register(&device_oprofile); | ||
575 | if (error) | ||
576 | sysdev_class_unregister(&oprofile_sysclass); | ||
577 | |||
573 | return error; | 578 | return error; |
574 | } | 579 | } |
575 | 580 | ||
@@ -580,8 +585,10 @@ static void exit_sysfs(void) | |||
580 | } | 585 | } |
581 | 586 | ||
582 | #else | 587 | #else |
583 | #define init_sysfs() do { } while (0) | 588 | |
584 | #define exit_sysfs() do { } while (0) | 589 | static inline int init_sysfs(void) { return 0; } |
590 | static inline void exit_sysfs(void) { } | ||
591 | |||
585 | #endif /* CONFIG_PM */ | 592 | #endif /* CONFIG_PM */ |
586 | 593 | ||
587 | static int __init p4_init(char **cpu_type) | 594 | static int __init p4_init(char **cpu_type) |
@@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
695 | char *cpu_type = NULL; | 702 | char *cpu_type = NULL; |
696 | int ret = 0; | 703 | int ret = 0; |
697 | 704 | ||
705 | using_nmi = 0; | ||
706 | |||
698 | if (!cpu_has_apic) | 707 | if (!cpu_has_apic) |
699 | return -ENODEV; | 708 | return -ENODEV; |
700 | 709 | ||
@@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops) | |||
774 | 783 | ||
775 | mux_init(ops); | 784 | mux_init(ops); |
776 | 785 | ||
777 | init_sysfs(); | 786 | ret = init_sysfs(); |
787 | if (ret) | ||
788 | return ret; | ||
789 | |||
778 | using_nmi = 1; | 790 | using_nmi = 1; |
779 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); | 791 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); |
780 | return 0; | 792 | return 0; |