diff options
39 files changed, 741 insertions, 398 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 5120bd44f69a..08460a2e9f41 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -1287,7 +1287,7 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
1287 | irq_exit(); | 1287 | irq_exit(); |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | void hw_perf_event_setup(int cpu) | 1290 | static void power_pmu_setup(int cpu) |
1291 | { | 1291 | { |
1292 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 1292 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
1293 | 1293 | ||
@@ -1297,6 +1297,23 @@ void hw_perf_event_setup(int cpu) | |||
1297 | cpuhw->mmcr[0] = MMCR0_FC; | 1297 | cpuhw->mmcr[0] = MMCR0_FC; |
1298 | } | 1298 | } |
1299 | 1299 | ||
1300 | static int __cpuinit | ||
1301 | power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
1302 | { | ||
1303 | unsigned int cpu = (long)hcpu; | ||
1304 | |||
1305 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1306 | case CPU_UP_PREPARE: | ||
1307 | power_pmu_setup(cpu); | ||
1308 | break; | ||
1309 | |||
1310 | default: | ||
1311 | break; | ||
1312 | } | ||
1313 | |||
1314 | return NOTIFY_OK; | ||
1315 | } | ||
1316 | |||
1300 | int register_power_pmu(struct power_pmu *pmu) | 1317 | int register_power_pmu(struct power_pmu *pmu) |
1301 | { | 1318 | { |
1302 | if (ppmu) | 1319 | if (ppmu) |
@@ -1314,5 +1331,7 @@ int register_power_pmu(struct power_pmu *pmu) | |||
1314 | freeze_events_kernel = MMCR0_FCHV; | 1331 | freeze_events_kernel = MMCR0_FCHV; |
1315 | #endif /* CONFIG_PPC64 */ | 1332 | #endif /* CONFIG_PPC64 */ |
1316 | 1333 | ||
1334 | perf_cpu_notifier(power_pmu_notifier); | ||
1335 | |||
1317 | return 0; | 1336 | return 0; |
1318 | } | 1337 | } |
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7ff0943e7a08..9f253e9cce01 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -275,13 +275,30 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
275 | return &pmu; | 275 | return &pmu; |
276 | } | 276 | } |
277 | 277 | ||
278 | void hw_perf_event_setup(int cpu) | 278 | static void sh_pmu_setup(int cpu) |
279 | { | 279 | { |
280 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 280 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
281 | 281 | ||
282 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); | 282 | memset(cpuhw, 0, sizeof(struct cpu_hw_events)); |
283 | } | 283 | } |
284 | 284 | ||
285 | static int __cpuinit | ||
286 | sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
287 | { | ||
288 | unsigned int cpu = (long)hcpu; | ||
289 | |||
290 | switch (action & ~CPU_TASKS_FROZEN) { | ||
291 | case CPU_UP_PREPARE: | ||
292 | sh_pmu_setup(cpu); | ||
293 | break; | ||
294 | |||
295 | default: | ||
296 | break; | ||
297 | } | ||
298 | |||
299 | return NOTIFY_OK; | ||
300 | } | ||
301 | |||
285 | void hw_perf_enable(void) | 302 | void hw_perf_enable(void) |
286 | { | 303 | { |
287 | if (!sh_pmu_initialized()) | 304 | if (!sh_pmu_initialized()) |
@@ -308,5 +325,6 @@ int register_sh_pmu(struct sh_pmu *pmu) | |||
308 | 325 | ||
309 | WARN_ON(pmu->num_events > MAX_HWEVENTS); | 326 | WARN_ON(pmu->num_events > MAX_HWEVENTS); |
310 | 327 | ||
328 | perf_cpu_notifier(sh_pmu_notifier); | ||
311 | return 0; | 329 | return 0; |
312 | } | 330 | } |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 42aafd11e170..60398a0d947c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -133,8 +133,8 @@ struct x86_pmu { | |||
133 | int (*handle_irq)(struct pt_regs *); | 133 | int (*handle_irq)(struct pt_regs *); |
134 | void (*disable_all)(void); | 134 | void (*disable_all)(void); |
135 | void (*enable_all)(void); | 135 | void (*enable_all)(void); |
136 | void (*enable)(struct hw_perf_event *, int); | 136 | void (*enable)(struct perf_event *); |
137 | void (*disable)(struct hw_perf_event *, int); | 137 | void (*disable)(struct perf_event *); |
138 | unsigned eventsel; | 138 | unsigned eventsel; |
139 | unsigned perfctr; | 139 | unsigned perfctr; |
140 | u64 (*event_map)(int); | 140 | u64 (*event_map)(int); |
@@ -157,6 +157,11 @@ struct x86_pmu { | |||
157 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 157 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
158 | struct perf_event *event); | 158 | struct perf_event *event); |
159 | struct event_constraint *event_constraints; | 159 | struct event_constraint *event_constraints; |
160 | |||
161 | void (*cpu_prepare)(int cpu); | ||
162 | void (*cpu_starting)(int cpu); | ||
163 | void (*cpu_dying)(int cpu); | ||
164 | void (*cpu_dead)(int cpu); | ||
160 | }; | 165 | }; |
161 | 166 | ||
162 | static struct x86_pmu x86_pmu __read_mostly; | 167 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -165,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | |||
165 | .enabled = 1, | 170 | .enabled = 1, |
166 | }; | 171 | }; |
167 | 172 | ||
168 | static int x86_perf_event_set_period(struct perf_event *event, | 173 | static int x86_perf_event_set_period(struct perf_event *event); |
169 | struct hw_perf_event *hwc, int idx); | ||
170 | 174 | ||
171 | /* | 175 | /* |
172 | * Generalized hw caching related hw_event table, filled | 176 | * Generalized hw caching related hw_event table, filled |
@@ -189,11 +193,12 @@ static u64 __read_mostly hw_cache_event_ids | |||
189 | * Returns the delta events processed. | 193 | * Returns the delta events processed. |
190 | */ | 194 | */ |
191 | static u64 | 195 | static u64 |
192 | x86_perf_event_update(struct perf_event *event, | 196 | x86_perf_event_update(struct perf_event *event) |
193 | struct hw_perf_event *hwc, int idx) | ||
194 | { | 197 | { |
198 | struct hw_perf_event *hwc = &event->hw; | ||
195 | int shift = 64 - x86_pmu.event_bits; | 199 | int shift = 64 - x86_pmu.event_bits; |
196 | u64 prev_raw_count, new_raw_count; | 200 | u64 prev_raw_count, new_raw_count; |
201 | int idx = hwc->idx; | ||
197 | s64 delta; | 202 | s64 delta; |
198 | 203 | ||
199 | if (idx == X86_PMC_IDX_FIXED_BTS) | 204 | if (idx == X86_PMC_IDX_FIXED_BTS) |
@@ -293,7 +298,7 @@ static inline bool bts_available(void) | |||
293 | return x86_pmu.enable_bts != NULL; | 298 | return x86_pmu.enable_bts != NULL; |
294 | } | 299 | } |
295 | 300 | ||
296 | static inline void init_debug_store_on_cpu(int cpu) | 301 | static void init_debug_store_on_cpu(int cpu) |
297 | { | 302 | { |
298 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; | 303 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; |
299 | 304 | ||
@@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu) | |||
305 | (u32)((u64)(unsigned long)ds >> 32)); | 310 | (u32)((u64)(unsigned long)ds >> 32)); |
306 | } | 311 | } |
307 | 312 | ||
308 | static inline void fini_debug_store_on_cpu(int cpu) | 313 | static void fini_debug_store_on_cpu(int cpu) |
309 | { | 314 | { |
310 | if (!per_cpu(cpu_hw_events, cpu).ds) | 315 | if (!per_cpu(cpu_hw_events, cpu).ds) |
311 | return; | 316 | return; |
@@ -638,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
638 | if (test_bit(hwc->idx, used_mask)) | 643 | if (test_bit(hwc->idx, used_mask)) |
639 | break; | 644 | break; |
640 | 645 | ||
641 | set_bit(hwc->idx, used_mask); | 646 | __set_bit(hwc->idx, used_mask); |
642 | if (assign) | 647 | if (assign) |
643 | assign[i] = hwc->idx; | 648 | assign[i] = hwc->idx; |
644 | } | 649 | } |
@@ -687,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
687 | if (j == X86_PMC_IDX_MAX) | 692 | if (j == X86_PMC_IDX_MAX) |
688 | break; | 693 | break; |
689 | 694 | ||
690 | set_bit(j, used_mask); | 695 | __set_bit(j, used_mask); |
691 | 696 | ||
692 | if (assign) | 697 | if (assign) |
693 | assign[i] = j; | 698 | assign[i] = j; |
@@ -780,6 +785,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, | |||
780 | hwc->last_tag == cpuc->tags[i]; | 785 | hwc->last_tag == cpuc->tags[i]; |
781 | } | 786 | } |
782 | 787 | ||
788 | static int x86_pmu_start(struct perf_event *event); | ||
783 | static void x86_pmu_stop(struct perf_event *event); | 789 | static void x86_pmu_stop(struct perf_event *event); |
784 | 790 | ||
785 | void hw_perf_enable(void) | 791 | void hw_perf_enable(void) |
@@ -796,6 +802,7 @@ void hw_perf_enable(void) | |||
796 | return; | 802 | return; |
797 | 803 | ||
798 | if (cpuc->n_added) { | 804 | if (cpuc->n_added) { |
805 | int n_running = cpuc->n_events - cpuc->n_added; | ||
799 | /* | 806 | /* |
800 | * apply assignment obtained either from | 807 | * apply assignment obtained either from |
801 | * hw_perf_group_sched_in() or x86_pmu_enable() | 808 | * hw_perf_group_sched_in() or x86_pmu_enable() |
@@ -803,8 +810,7 @@ void hw_perf_enable(void) | |||
803 | * step1: save events moving to new counters | 810 | * step1: save events moving to new counters |
804 | * step2: reprogram moved events into new counters | 811 | * step2: reprogram moved events into new counters |
805 | */ | 812 | */ |
806 | for (i = 0; i < cpuc->n_events; i++) { | 813 | for (i = 0; i < n_running; i++) { |
807 | |||
808 | event = cpuc->event_list[i]; | 814 | event = cpuc->event_list[i]; |
809 | hwc = &event->hw; | 815 | hwc = &event->hw; |
810 | 816 | ||
@@ -819,29 +825,18 @@ void hw_perf_enable(void) | |||
819 | continue; | 825 | continue; |
820 | 826 | ||
821 | x86_pmu_stop(event); | 827 | x86_pmu_stop(event); |
822 | |||
823 | hwc->idx = -1; | ||
824 | } | 828 | } |
825 | 829 | ||
826 | for (i = 0; i < cpuc->n_events; i++) { | 830 | for (i = 0; i < cpuc->n_events; i++) { |
827 | |||
828 | event = cpuc->event_list[i]; | 831 | event = cpuc->event_list[i]; |
829 | hwc = &event->hw; | 832 | hwc = &event->hw; |
830 | 833 | ||
831 | if (hwc->idx == -1) { | 834 | if (!match_prev_assignment(hwc, cpuc, i)) |
832 | x86_assign_hw_event(event, cpuc, i); | 835 | x86_assign_hw_event(event, cpuc, i); |
833 | x86_perf_event_set_period(event, hwc, hwc->idx); | 836 | else if (i < n_running) |
834 | } | 837 | continue; |
835 | /* | ||
836 | * need to mark as active because x86_pmu_disable() | ||
837 | * clear active_mask and events[] yet it preserves | ||
838 | * idx | ||
839 | */ | ||
840 | set_bit(hwc->idx, cpuc->active_mask); | ||
841 | cpuc->events[hwc->idx] = event; | ||
842 | 838 | ||
843 | x86_pmu.enable(hwc, hwc->idx); | 839 | x86_pmu_start(event); |
844 | perf_event_update_userpage(event); | ||
845 | } | 840 | } |
846 | cpuc->n_added = 0; | 841 | cpuc->n_added = 0; |
847 | perf_events_lapic_init(); | 842 | perf_events_lapic_init(); |
@@ -853,15 +848,16 @@ void hw_perf_enable(void) | |||
853 | x86_pmu.enable_all(); | 848 | x86_pmu.enable_all(); |
854 | } | 849 | } |
855 | 850 | ||
856 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 851 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) |
857 | { | 852 | { |
858 | (void)checking_wrmsrl(hwc->config_base + idx, | 853 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, |
859 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); | 854 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); |
860 | } | 855 | } |
861 | 856 | ||
862 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 857 | static inline void x86_pmu_disable_event(struct perf_event *event) |
863 | { | 858 | { |
864 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); | 859 | struct hw_perf_event *hwc = &event->hw; |
860 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config); | ||
865 | } | 861 | } |
866 | 862 | ||
867 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 863 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -871,12 +867,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | |||
871 | * To be called with the event disabled in hw: | 867 | * To be called with the event disabled in hw: |
872 | */ | 868 | */ |
873 | static int | 869 | static int |
874 | x86_perf_event_set_period(struct perf_event *event, | 870 | x86_perf_event_set_period(struct perf_event *event) |
875 | struct hw_perf_event *hwc, int idx) | ||
876 | { | 871 | { |
872 | struct hw_perf_event *hwc = &event->hw; | ||
877 | s64 left = atomic64_read(&hwc->period_left); | 873 | s64 left = atomic64_read(&hwc->period_left); |
878 | s64 period = hwc->sample_period; | 874 | s64 period = hwc->sample_period; |
879 | int err, ret = 0; | 875 | int err, ret = 0, idx = hwc->idx; |
880 | 876 | ||
881 | if (idx == X86_PMC_IDX_FIXED_BTS) | 877 | if (idx == X86_PMC_IDX_FIXED_BTS) |
882 | return 0; | 878 | return 0; |
@@ -922,11 +918,11 @@ x86_perf_event_set_period(struct perf_event *event, | |||
922 | return ret; | 918 | return ret; |
923 | } | 919 | } |
924 | 920 | ||
925 | static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 921 | static void x86_pmu_enable_event(struct perf_event *event) |
926 | { | 922 | { |
927 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 923 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
928 | if (cpuc->enabled) | 924 | if (cpuc->enabled) |
929 | __x86_pmu_enable_event(hwc, idx); | 925 | __x86_pmu_enable_event(&event->hw); |
930 | } | 926 | } |
931 | 927 | ||
932 | /* | 928 | /* |
@@ -962,34 +958,32 @@ static int x86_pmu_enable(struct perf_event *event) | |||
962 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 958 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
963 | 959 | ||
964 | cpuc->n_events = n; | 960 | cpuc->n_events = n; |
965 | cpuc->n_added = n - n0; | 961 | cpuc->n_added += n - n0; |
966 | 962 | ||
967 | return 0; | 963 | return 0; |
968 | } | 964 | } |
969 | 965 | ||
970 | static int x86_pmu_start(struct perf_event *event) | 966 | static int x86_pmu_start(struct perf_event *event) |
971 | { | 967 | { |
972 | struct hw_perf_event *hwc = &event->hw; | 968 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
969 | int idx = event->hw.idx; | ||
973 | 970 | ||
974 | if (hwc->idx == -1) | 971 | if (idx == -1) |
975 | return -EAGAIN; | 972 | return -EAGAIN; |
976 | 973 | ||
977 | x86_perf_event_set_period(event, hwc, hwc->idx); | 974 | x86_perf_event_set_period(event); |
978 | x86_pmu.enable(hwc, hwc->idx); | 975 | cpuc->events[idx] = event; |
976 | __set_bit(idx, cpuc->active_mask); | ||
977 | x86_pmu.enable(event); | ||
978 | perf_event_update_userpage(event); | ||
979 | 979 | ||
980 | return 0; | 980 | return 0; |
981 | } | 981 | } |
982 | 982 | ||
983 | static void x86_pmu_unthrottle(struct perf_event *event) | 983 | static void x86_pmu_unthrottle(struct perf_event *event) |
984 | { | 984 | { |
985 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 985 | int ret = x86_pmu_start(event); |
986 | struct hw_perf_event *hwc = &event->hw; | 986 | WARN_ON_ONCE(ret); |
987 | |||
988 | if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || | ||
989 | cpuc->events[hwc->idx] != event)) | ||
990 | return; | ||
991 | |||
992 | x86_pmu.enable(hwc, hwc->idx); | ||
993 | } | 987 | } |
994 | 988 | ||
995 | void perf_event_print_debug(void) | 989 | void perf_event_print_debug(void) |
@@ -1049,18 +1043,16 @@ static void x86_pmu_stop(struct perf_event *event) | |||
1049 | struct hw_perf_event *hwc = &event->hw; | 1043 | struct hw_perf_event *hwc = &event->hw; |
1050 | int idx = hwc->idx; | 1044 | int idx = hwc->idx; |
1051 | 1045 | ||
1052 | /* | 1046 | if (!__test_and_clear_bit(idx, cpuc->active_mask)) |
1053 | * Must be done before we disable, otherwise the nmi handler | 1047 | return; |
1054 | * could reenable again: | 1048 | |
1055 | */ | 1049 | x86_pmu.disable(event); |
1056 | clear_bit(idx, cpuc->active_mask); | ||
1057 | x86_pmu.disable(hwc, idx); | ||
1058 | 1050 | ||
1059 | /* | 1051 | /* |
1060 | * Drain the remaining delta count out of a event | 1052 | * Drain the remaining delta count out of a event |
1061 | * that we are disabling: | 1053 | * that we are disabling: |
1062 | */ | 1054 | */ |
1063 | x86_perf_event_update(event, hwc, idx); | 1055 | x86_perf_event_update(event); |
1064 | 1056 | ||
1065 | cpuc->events[idx] = NULL; | 1057 | cpuc->events[idx] = NULL; |
1066 | } | 1058 | } |
@@ -1108,7 +1100,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1108 | event = cpuc->events[idx]; | 1100 | event = cpuc->events[idx]; |
1109 | hwc = &event->hw; | 1101 | hwc = &event->hw; |
1110 | 1102 | ||
1111 | val = x86_perf_event_update(event, hwc, idx); | 1103 | val = x86_perf_event_update(event); |
1112 | if (val & (1ULL << (x86_pmu.event_bits - 1))) | 1104 | if (val & (1ULL << (x86_pmu.event_bits - 1))) |
1113 | continue; | 1105 | continue; |
1114 | 1106 | ||
@@ -1118,11 +1110,11 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1118 | handled = 1; | 1110 | handled = 1; |
1119 | data.period = event->hw.last_period; | 1111 | data.period = event->hw.last_period; |
1120 | 1112 | ||
1121 | if (!x86_perf_event_set_period(event, hwc, idx)) | 1113 | if (!x86_perf_event_set_period(event)) |
1122 | continue; | 1114 | continue; |
1123 | 1115 | ||
1124 | if (perf_event_overflow(event, 1, &data, regs)) | 1116 | if (perf_event_overflow(event, 1, &data, regs)) |
1125 | x86_pmu.disable(hwc, idx); | 1117 | x86_pmu_stop(event); |
1126 | } | 1118 | } |
1127 | 1119 | ||
1128 | if (handled) | 1120 | if (handled) |
@@ -1309,7 +1301,7 @@ int hw_perf_group_sched_in(struct perf_event *leader, | |||
1309 | memcpy(cpuc->assign, assign, n0*sizeof(int)); | 1301 | memcpy(cpuc->assign, assign, n0*sizeof(int)); |
1310 | 1302 | ||
1311 | cpuc->n_events = n0; | 1303 | cpuc->n_events = n0; |
1312 | cpuc->n_added = n1; | 1304 | cpuc->n_added += n1; |
1313 | ctx->nr_active += n1; | 1305 | ctx->nr_active += n1; |
1314 | 1306 | ||
1315 | /* | 1307 | /* |
@@ -1337,6 +1329,39 @@ undo: | |||
1337 | #include "perf_event_p6.c" | 1329 | #include "perf_event_p6.c" |
1338 | #include "perf_event_intel.c" | 1330 | #include "perf_event_intel.c" |
1339 | 1331 | ||
1332 | static int __cpuinit | ||
1333 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
1334 | { | ||
1335 | unsigned int cpu = (long)hcpu; | ||
1336 | |||
1337 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1338 | case CPU_UP_PREPARE: | ||
1339 | if (x86_pmu.cpu_prepare) | ||
1340 | x86_pmu.cpu_prepare(cpu); | ||
1341 | break; | ||
1342 | |||
1343 | case CPU_STARTING: | ||
1344 | if (x86_pmu.cpu_starting) | ||
1345 | x86_pmu.cpu_starting(cpu); | ||
1346 | break; | ||
1347 | |||
1348 | case CPU_DYING: | ||
1349 | if (x86_pmu.cpu_dying) | ||
1350 | x86_pmu.cpu_dying(cpu); | ||
1351 | break; | ||
1352 | |||
1353 | case CPU_DEAD: | ||
1354 | if (x86_pmu.cpu_dead) | ||
1355 | x86_pmu.cpu_dead(cpu); | ||
1356 | break; | ||
1357 | |||
1358 | default: | ||
1359 | break; | ||
1360 | } | ||
1361 | |||
1362 | return NOTIFY_OK; | ||
1363 | } | ||
1364 | |||
1340 | static void __init pmu_check_apic(void) | 1365 | static void __init pmu_check_apic(void) |
1341 | { | 1366 | { |
1342 | if (cpu_has_apic) | 1367 | if (cpu_has_apic) |
@@ -1415,11 +1440,13 @@ void __init init_hw_perf_events(void) | |||
1415 | pr_info("... max period: %016Lx\n", x86_pmu.max_period); | 1440 | pr_info("... max period: %016Lx\n", x86_pmu.max_period); |
1416 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); | 1441 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); |
1417 | pr_info("... event mask: %016Lx\n", perf_event_mask); | 1442 | pr_info("... event mask: %016Lx\n", perf_event_mask); |
1443 | |||
1444 | perf_cpu_notifier(x86_pmu_notifier); | ||
1418 | } | 1445 | } |
1419 | 1446 | ||
1420 | static inline void x86_pmu_read(struct perf_event *event) | 1447 | static inline void x86_pmu_read(struct perf_event *event) |
1421 | { | 1448 | { |
1422 | x86_perf_event_update(event, &event->hw, event->hw.idx); | 1449 | x86_perf_event_update(event); |
1423 | } | 1450 | } |
1424 | 1451 | ||
1425 | static const struct pmu pmu = { | 1452 | static const struct pmu pmu = { |
@@ -1675,28 +1702,16 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1675 | return entry; | 1702 | return entry; |
1676 | } | 1703 | } |
1677 | 1704 | ||
1678 | void hw_perf_event_setup_online(int cpu) | 1705 | #ifdef CONFIG_EVENT_TRACING |
1706 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | ||
1679 | { | 1707 | { |
1680 | init_debug_store_on_cpu(cpu); | 1708 | regs->ip = ip; |
1681 | 1709 | /* | |
1682 | switch (boot_cpu_data.x86_vendor) { | 1710 | * perf_arch_fetch_caller_regs adds another call, we need to increment |
1683 | case X86_VENDOR_AMD: | 1711 | * the skip level |
1684 | amd_pmu_cpu_online(cpu); | 1712 | */ |
1685 | break; | 1713 | regs->bp = rewind_frame_pointer(skip + 1); |
1686 | default: | 1714 | regs->cs = __KERNEL_CS; |
1687 | return; | 1715 | local_save_flags(regs->flags); |
1688 | } | ||
1689 | } | ||
1690 | |||
1691 | void hw_perf_event_setup_offline(int cpu) | ||
1692 | { | ||
1693 | init_debug_store_on_cpu(cpu); | ||
1694 | |||
1695 | switch (boot_cpu_data.x86_vendor) { | ||
1696 | case X86_VENDOR_AMD: | ||
1697 | amd_pmu_cpu_offline(cpu); | ||
1698 | break; | ||
1699 | default: | ||
1700 | return; | ||
1701 | } | ||
1702 | } | 1716 | } |
1717 | #endif | ||
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 8f3dbfda3c4f..573458f1caf2 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -271,28 +271,6 @@ done: | |||
271 | return &emptyconstraint; | 271 | return &emptyconstraint; |
272 | } | 272 | } |
273 | 273 | ||
274 | static __initconst struct x86_pmu amd_pmu = { | ||
275 | .name = "AMD", | ||
276 | .handle_irq = x86_pmu_handle_irq, | ||
277 | .disable_all = x86_pmu_disable_all, | ||
278 | .enable_all = x86_pmu_enable_all, | ||
279 | .enable = x86_pmu_enable_event, | ||
280 | .disable = x86_pmu_disable_event, | ||
281 | .eventsel = MSR_K7_EVNTSEL0, | ||
282 | .perfctr = MSR_K7_PERFCTR0, | ||
283 | .event_map = amd_pmu_event_map, | ||
284 | .raw_event = amd_pmu_raw_event, | ||
285 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
286 | .num_events = 4, | ||
287 | .event_bits = 48, | ||
288 | .event_mask = (1ULL << 48) - 1, | ||
289 | .apic = 1, | ||
290 | /* use highest bit to detect overflow */ | ||
291 | .max_period = (1ULL << 47) - 1, | ||
292 | .get_event_constraints = amd_get_event_constraints, | ||
293 | .put_event_constraints = amd_put_event_constraints | ||
294 | }; | ||
295 | |||
296 | static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | 274 | static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) |
297 | { | 275 | { |
298 | struct amd_nb *nb; | 276 | struct amd_nb *nb; |
@@ -309,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | |||
309 | * initialize all possible NB constraints | 287 | * initialize all possible NB constraints |
310 | */ | 288 | */ |
311 | for (i = 0; i < x86_pmu.num_events; i++) { | 289 | for (i = 0; i < x86_pmu.num_events; i++) { |
312 | set_bit(i, nb->event_constraints[i].idxmsk); | 290 | __set_bit(i, nb->event_constraints[i].idxmsk); |
313 | nb->event_constraints[i].weight = 1; | 291 | nb->event_constraints[i].weight = 1; |
314 | } | 292 | } |
315 | return nb; | 293 | return nb; |
@@ -378,6 +356,31 @@ static void amd_pmu_cpu_offline(int cpu) | |||
378 | raw_spin_unlock(&amd_nb_lock); | 356 | raw_spin_unlock(&amd_nb_lock); |
379 | } | 357 | } |
380 | 358 | ||
359 | static __initconst struct x86_pmu amd_pmu = { | ||
360 | .name = "AMD", | ||
361 | .handle_irq = x86_pmu_handle_irq, | ||
362 | .disable_all = x86_pmu_disable_all, | ||
363 | .enable_all = x86_pmu_enable_all, | ||
364 | .enable = x86_pmu_enable_event, | ||
365 | .disable = x86_pmu_disable_event, | ||
366 | .eventsel = MSR_K7_EVNTSEL0, | ||
367 | .perfctr = MSR_K7_PERFCTR0, | ||
368 | .event_map = amd_pmu_event_map, | ||
369 | .raw_event = amd_pmu_raw_event, | ||
370 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
371 | .num_events = 4, | ||
372 | .event_bits = 48, | ||
373 | .event_mask = (1ULL << 48) - 1, | ||
374 | .apic = 1, | ||
375 | /* use highest bit to detect overflow */ | ||
376 | .max_period = (1ULL << 47) - 1, | ||
377 | .get_event_constraints = amd_get_event_constraints, | ||
378 | .put_event_constraints = amd_put_event_constraints, | ||
379 | |||
380 | .cpu_prepare = amd_pmu_cpu_online, | ||
381 | .cpu_dead = amd_pmu_cpu_offline, | ||
382 | }; | ||
383 | |||
381 | static __init int amd_pmu_init(void) | 384 | static __init int amd_pmu_init(void) |
382 | { | 385 | { |
383 | /* Performance-monitoring supported from K7 and later: */ | 386 | /* Performance-monitoring supported from K7 and later: */ |
@@ -390,11 +393,6 @@ static __init int amd_pmu_init(void) | |||
390 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 393 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
391 | sizeof(hw_cache_event_ids)); | 394 | sizeof(hw_cache_event_ids)); |
392 | 395 | ||
393 | /* | ||
394 | * explicitly initialize the boot cpu, other cpus will get | ||
395 | * the cpu hotplug callbacks from smp_init() | ||
396 | */ | ||
397 | amd_pmu_cpu_online(smp_processor_id()); | ||
398 | return 0; | 396 | return 0; |
399 | } | 397 | } |
400 | 398 | ||
@@ -405,12 +403,4 @@ static int amd_pmu_init(void) | |||
405 | return 0; | 403 | return 0; |
406 | } | 404 | } |
407 | 405 | ||
408 | static void amd_pmu_cpu_online(int cpu) | ||
409 | { | ||
410 | } | ||
411 | |||
412 | static void amd_pmu_cpu_offline(int cpu) | ||
413 | { | ||
414 | } | ||
415 | |||
416 | #endif | 406 | #endif |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 44b60c852107..84bfde64a337 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
548 | } | 548 | } |
549 | 549 | ||
550 | static inline void | 550 | static inline void |
551 | intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) | 551 | intel_pmu_disable_fixed(struct hw_perf_event *hwc) |
552 | { | 552 | { |
553 | int idx = __idx - X86_PMC_IDX_FIXED; | 553 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
554 | u64 ctrl_val, mask; | 554 | u64 ctrl_val, mask; |
555 | 555 | ||
556 | mask = 0xfULL << (idx * 4); | 556 | mask = 0xfULL << (idx * 4); |
@@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void) | |||
621 | } | 621 | } |
622 | 622 | ||
623 | static inline void | 623 | static inline void |
624 | intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 624 | intel_pmu_disable_event(struct perf_event *event) |
625 | { | 625 | { |
626 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 626 | struct hw_perf_event *hwc = &event->hw; |
627 | |||
628 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | ||
627 | intel_pmu_disable_bts(); | 629 | intel_pmu_disable_bts(); |
628 | intel_pmu_drain_bts_buffer(); | 630 | intel_pmu_drain_bts_buffer(); |
629 | return; | 631 | return; |
630 | } | 632 | } |
631 | 633 | ||
632 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 634 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
633 | intel_pmu_disable_fixed(hwc, idx); | 635 | intel_pmu_disable_fixed(hwc); |
634 | return; | 636 | return; |
635 | } | 637 | } |
636 | 638 | ||
637 | x86_pmu_disable_event(hwc, idx); | 639 | x86_pmu_disable_event(event); |
638 | } | 640 | } |
639 | 641 | ||
640 | static inline void | 642 | static inline void |
641 | intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | 643 | intel_pmu_enable_fixed(struct hw_perf_event *hwc) |
642 | { | 644 | { |
643 | int idx = __idx - X86_PMC_IDX_FIXED; | 645 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
644 | u64 ctrl_val, bits, mask; | 646 | u64 ctrl_val, bits, mask; |
645 | int err; | 647 | int err; |
646 | 648 | ||
@@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | |||
670 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 672 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
671 | } | 673 | } |
672 | 674 | ||
673 | static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 675 | static void intel_pmu_enable_event(struct perf_event *event) |
674 | { | 676 | { |
675 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 677 | struct hw_perf_event *hwc = &event->hw; |
678 | |||
679 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | ||
676 | if (!__get_cpu_var(cpu_hw_events).enabled) | 680 | if (!__get_cpu_var(cpu_hw_events).enabled) |
677 | return; | 681 | return; |
678 | 682 | ||
@@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
681 | } | 685 | } |
682 | 686 | ||
683 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 687 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
684 | intel_pmu_enable_fixed(hwc, idx); | 688 | intel_pmu_enable_fixed(hwc); |
685 | return; | 689 | return; |
686 | } | 690 | } |
687 | 691 | ||
688 | __x86_pmu_enable_event(hwc, idx); | 692 | __x86_pmu_enable_event(hwc); |
689 | } | 693 | } |
690 | 694 | ||
691 | /* | 695 | /* |
@@ -694,14 +698,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
694 | */ | 698 | */ |
695 | static int intel_pmu_save_and_restart(struct perf_event *event) | 699 | static int intel_pmu_save_and_restart(struct perf_event *event) |
696 | { | 700 | { |
697 | struct hw_perf_event *hwc = &event->hw; | 701 | x86_perf_event_update(event); |
698 | int idx = hwc->idx; | 702 | return x86_perf_event_set_period(event); |
699 | int ret; | ||
700 | |||
701 | x86_perf_event_update(event, hwc, idx); | ||
702 | ret = x86_perf_event_set_period(event, hwc, idx); | ||
703 | |||
704 | return ret; | ||
705 | } | 703 | } |
706 | 704 | ||
707 | static void intel_pmu_reset(void) | 705 | static void intel_pmu_reset(void) |
@@ -745,11 +743,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
745 | 743 | ||
746 | cpuc = &__get_cpu_var(cpu_hw_events); | 744 | cpuc = &__get_cpu_var(cpu_hw_events); |
747 | 745 | ||
748 | perf_disable(); | 746 | intel_pmu_disable_all(); |
749 | intel_pmu_drain_bts_buffer(); | 747 | intel_pmu_drain_bts_buffer(); |
750 | status = intel_pmu_get_status(); | 748 | status = intel_pmu_get_status(); |
751 | if (!status) { | 749 | if (!status) { |
752 | perf_enable(); | 750 | intel_pmu_enable_all(); |
753 | return 0; | 751 | return 0; |
754 | } | 752 | } |
755 | 753 | ||
@@ -759,8 +757,7 @@ again: | |||
759 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | 757 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); |
760 | perf_event_print_debug(); | 758 | perf_event_print_debug(); |
761 | intel_pmu_reset(); | 759 | intel_pmu_reset(); |
762 | perf_enable(); | 760 | goto done; |
763 | return 1; | ||
764 | } | 761 | } |
765 | 762 | ||
766 | inc_irq_stat(apic_perf_irqs); | 763 | inc_irq_stat(apic_perf_irqs); |
@@ -768,7 +765,6 @@ again: | |||
768 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 765 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
769 | struct perf_event *event = cpuc->events[bit]; | 766 | struct perf_event *event = cpuc->events[bit]; |
770 | 767 | ||
771 | clear_bit(bit, (unsigned long *) &status); | ||
772 | if (!test_bit(bit, cpuc->active_mask)) | 768 | if (!test_bit(bit, cpuc->active_mask)) |
773 | continue; | 769 | continue; |
774 | 770 | ||
@@ -778,7 +774,7 @@ again: | |||
778 | data.period = event->hw.last_period; | 774 | data.period = event->hw.last_period; |
779 | 775 | ||
780 | if (perf_event_overflow(event, 1, &data, regs)) | 776 | if (perf_event_overflow(event, 1, &data, regs)) |
781 | intel_pmu_disable_event(&event->hw, bit); | 777 | x86_pmu_stop(event); |
782 | } | 778 | } |
783 | 779 | ||
784 | intel_pmu_ack_status(ack); | 780 | intel_pmu_ack_status(ack); |
@@ -790,8 +786,8 @@ again: | |||
790 | if (status) | 786 | if (status) |
791 | goto again; | 787 | goto again; |
792 | 788 | ||
793 | perf_enable(); | 789 | done: |
794 | 790 | intel_pmu_enable_all(); | |
795 | return 1; | 791 | return 1; |
796 | } | 792 | } |
797 | 793 | ||
@@ -870,7 +866,10 @@ static __initconst struct x86_pmu intel_pmu = { | |||
870 | .max_period = (1ULL << 31) - 1, | 866 | .max_period = (1ULL << 31) - 1, |
871 | .enable_bts = intel_pmu_enable_bts, | 867 | .enable_bts = intel_pmu_enable_bts, |
872 | .disable_bts = intel_pmu_disable_bts, | 868 | .disable_bts = intel_pmu_disable_bts, |
873 | .get_event_constraints = intel_get_event_constraints | 869 | .get_event_constraints = intel_get_event_constraints, |
870 | |||
871 | .cpu_starting = init_debug_store_on_cpu, | ||
872 | .cpu_dying = fini_debug_store_on_cpu, | ||
874 | }; | 873 | }; |
875 | 874 | ||
876 | static __init int intel_pmu_init(void) | 875 | static __init int intel_pmu_init(void) |
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index a4e67b99d91c..a330485d14da 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | static inline void | 79 | static inline void |
80 | p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 80 | p6_pmu_disable_event(struct perf_event *event) |
81 | { | 81 | { |
82 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 82 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
83 | struct hw_perf_event *hwc = &event->hw; | ||
83 | u64 val = P6_NOP_EVENT; | 84 | u64 val = P6_NOP_EVENT; |
84 | 85 | ||
85 | if (cpuc->enabled) | 86 | if (cpuc->enabled) |
86 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 87 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
87 | 88 | ||
88 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 89 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); |
89 | } | 90 | } |
90 | 91 | ||
91 | static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 92 | static void p6_pmu_enable_event(struct perf_event *event) |
92 | { | 93 | { |
93 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 94 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
95 | struct hw_perf_event *hwc = &event->hw; | ||
94 | u64 val; | 96 | u64 val; |
95 | 97 | ||
96 | val = hwc->config; | 98 | val = hwc->config; |
97 | if (cpuc->enabled) | 99 | if (cpuc->enabled) |
98 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 100 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
99 | 101 | ||
100 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 102 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); |
101 | } | 103 | } |
102 | 104 | ||
103 | static __initconst struct x86_pmu p6_pmu = { | 105 | static __initconst struct x86_pmu p6_pmu = { |
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 4fd1420faffa..29e5f7c845b2 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h | |||
@@ -29,4 +29,19 @@ struct stack_frame { | |||
29 | struct stack_frame *next_frame; | 29 | struct stack_frame *next_frame; |
30 | unsigned long return_address; | 30 | unsigned long return_address; |
31 | }; | 31 | }; |
32 | |||
33 | static inline unsigned long rewind_frame_pointer(int n) | ||
34 | { | ||
35 | struct stack_frame *frame; | ||
36 | |||
37 | get_bp(frame); | ||
38 | |||
39 | #ifdef CONFIG_FRAME_POINTER | ||
40 | while (n--) | ||
41 | frame = frame->next_frame; | ||
32 | #endif | 42 | #endif |
43 | |||
44 | return (unsigned long)frame; | ||
45 | } | ||
46 | |||
47 | #endif /* DUMPSTACK_H */ | ||
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index d5e2a2ebb627..272c9f1f05f3 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -208,7 +208,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
208 | if (in_irq_stack(stack, irq_stack, irq_stack_end)) { | 208 | if (in_irq_stack(stack, irq_stack, irq_stack_end)) { |
209 | if (ops->stack(data, "IRQ") < 0) | 209 | if (ops->stack(data, "IRQ") < 0) |
210 | break; | 210 | break; |
211 | bp = print_context_stack(tinfo, stack, bp, | 211 | bp = ops->walk_stack(tinfo, stack, bp, |
212 | ops, data, irq_stack_end, &graph); | 212 | ops, data, irq_stack_end, &graph); |
213 | /* | 213 | /* |
214 | * We link to the next stack (which would be | 214 | * We link to the next stack (which would be |
@@ -229,7 +229,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
229 | /* | 229 | /* |
230 | * This handles the process stack: | 230 | * This handles the process stack: |
231 | */ | 231 | */ |
232 | bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); | 232 | bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); |
233 | put_cpu(); | 233 | put_cpu(); |
234 | } | 234 | } |
235 | EXPORT_SYMBOL(dump_trace); | 235 | EXPORT_SYMBOL(dump_trace); |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 6b7c444ab8f6..c0f4b364c711 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -131,12 +131,12 @@ struct ftrace_event_call { | |||
131 | void *mod; | 131 | void *mod; |
132 | void *data; | 132 | void *data; |
133 | 133 | ||
134 | int profile_count; | 134 | int perf_refcount; |
135 | int (*profile_enable)(struct ftrace_event_call *); | 135 | int (*perf_event_enable)(struct ftrace_event_call *); |
136 | void (*profile_disable)(struct ftrace_event_call *); | 136 | void (*perf_event_disable)(struct ftrace_event_call *); |
137 | }; | 137 | }; |
138 | 138 | ||
139 | #define FTRACE_MAX_PROFILE_SIZE 2048 | 139 | #define PERF_MAX_TRACE_SIZE 2048 |
140 | 140 | ||
141 | #define MAX_FILTER_PRED 32 | 141 | #define MAX_FILTER_PRED 32 |
142 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ | 142 | #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ |
@@ -187,22 +187,25 @@ do { \ | |||
187 | 187 | ||
188 | #ifdef CONFIG_PERF_EVENTS | 188 | #ifdef CONFIG_PERF_EVENTS |
189 | struct perf_event; | 189 | struct perf_event; |
190 | extern int ftrace_profile_enable(int event_id); | 190 | |
191 | extern void ftrace_profile_disable(int event_id); | 191 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); |
192 | |||
193 | extern int perf_trace_enable(int event_id); | ||
194 | extern void perf_trace_disable(int event_id); | ||
192 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, | 195 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
193 | char *filter_str); | 196 | char *filter_str); |
194 | extern void ftrace_profile_free_filter(struct perf_event *event); | 197 | extern void ftrace_profile_free_filter(struct perf_event *event); |
195 | extern void * | 198 | extern void * |
196 | ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, | 199 | perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, |
197 | unsigned long *irq_flags); | 200 | unsigned long *irq_flags); |
198 | 201 | ||
199 | static inline void | 202 | static inline void |
200 | ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, | 203 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, |
201 | u64 count, unsigned long irq_flags) | 204 | u64 count, unsigned long irq_flags, struct pt_regs *regs) |
202 | { | 205 | { |
203 | struct trace_entry *entry = raw_data; | 206 | struct trace_entry *entry = raw_data; |
204 | 207 | ||
205 | perf_tp_event(entry->type, addr, count, raw_data, size); | 208 | perf_tp_event(entry->type, addr, count, raw_data, size, regs); |
206 | perf_swevent_put_recursion_context(rctx); | 209 | perf_swevent_put_recursion_context(rctx); |
207 | local_irq_restore(irq_flags); | 210 | local_irq_restore(irq_flags); |
208 | } | 211 | } |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6f8cd7da1a01..95477038a72a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -452,6 +452,8 @@ enum perf_callchain_context { | |||
452 | #include <linux/fs.h> | 452 | #include <linux/fs.h> |
453 | #include <linux/pid_namespace.h> | 453 | #include <linux/pid_namespace.h> |
454 | #include <linux/workqueue.h> | 454 | #include <linux/workqueue.h> |
455 | #include <linux/ftrace.h> | ||
456 | #include <linux/cpu.h> | ||
455 | #include <asm/atomic.h> | 457 | #include <asm/atomic.h> |
456 | 458 | ||
457 | #define PERF_MAX_STACK_DEPTH 255 | 459 | #define PERF_MAX_STACK_DEPTH 255 |
@@ -847,6 +849,44 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
847 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 849 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
848 | } | 850 | } |
849 | 851 | ||
852 | extern void | ||
853 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | ||
854 | |||
855 | /* | ||
856 | * Take a snapshot of the regs. Skip ip and frame pointer to | ||
857 | * the nth caller. We only need a few of the regs: | ||
858 | * - ip for PERF_SAMPLE_IP | ||
859 | * - cs for user_mode() tests | ||
860 | * - bp for callchains | ||
861 | * - eflags, for future purposes, just in case | ||
862 | */ | ||
863 | static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | ||
864 | { | ||
865 | unsigned long ip; | ||
866 | |||
867 | memset(regs, 0, sizeof(*regs)); | ||
868 | |||
869 | switch (skip) { | ||
870 | case 1 : | ||
871 | ip = CALLER_ADDR0; | ||
872 | break; | ||
873 | case 2 : | ||
874 | ip = CALLER_ADDR1; | ||
875 | break; | ||
876 | case 3 : | ||
877 | ip = CALLER_ADDR2; | ||
878 | break; | ||
879 | case 4: | ||
880 | ip = CALLER_ADDR3; | ||
881 | break; | ||
882 | /* No need to support further for now */ | ||
883 | default: | ||
884 | ip = 0; | ||
885 | } | ||
886 | |||
887 | return perf_arch_fetch_caller_regs(regs, ip, skip); | ||
888 | } | ||
889 | |||
850 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 890 | extern void __perf_event_mmap(struct vm_area_struct *vma); |
851 | 891 | ||
852 | static inline void perf_event_mmap(struct vm_area_struct *vma) | 892 | static inline void perf_event_mmap(struct vm_area_struct *vma) |
@@ -880,7 +920,8 @@ static inline bool perf_paranoid_kernel(void) | |||
880 | } | 920 | } |
881 | 921 | ||
882 | extern void perf_event_init(void); | 922 | extern void perf_event_init(void); |
883 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); | 923 | extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
924 | int entry_size, struct pt_regs *regs); | ||
884 | extern void perf_bp_event(struct perf_event *event, void *data); | 925 | extern void perf_bp_event(struct perf_event *event, void *data); |
885 | 926 | ||
886 | #ifndef perf_misc_flags | 927 | #ifndef perf_misc_flags |
@@ -936,5 +977,21 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
936 | #define perf_output_put(handle, x) \ | 977 | #define perf_output_put(handle, x) \ |
937 | perf_output_copy((handle), &(x), sizeof(x)) | 978 | perf_output_copy((handle), &(x), sizeof(x)) |
938 | 979 | ||
980 | /* | ||
981 | * This has to have a higher priority than migration_notifier in sched.c. | ||
982 | */ | ||
983 | #define perf_cpu_notifier(fn) \ | ||
984 | do { \ | ||
985 | static struct notifier_block fn##_nb __cpuinitdata = \ | ||
986 | { .notifier_call = fn, .priority = 20 }; \ | ||
987 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | ||
988 | (void *)(unsigned long)smp_processor_id()); \ | ||
989 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | ||
990 | (void *)(unsigned long)smp_processor_id()); \ | ||
991 | fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ | ||
992 | (void *)(unsigned long)smp_processor_id()); \ | ||
993 | register_cpu_notifier(&fn##_nb); \ | ||
994 | } while (0) | ||
995 | |||
939 | #endif /* __KERNEL__ */ | 996 | #endif /* __KERNEL__ */ |
940 | #endif /* _LINUX_PERF_EVENT_H */ | 997 | #endif /* _LINUX_PERF_EVENT_H */ |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 44f2ad0e8825..f994ae58a002 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -105,18 +105,18 @@ struct perf_event_attr; | |||
105 | 105 | ||
106 | #ifdef CONFIG_PERF_EVENTS | 106 | #ifdef CONFIG_PERF_EVENTS |
107 | 107 | ||
108 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 108 | #define TRACE_SYS_ENTER_PERF_INIT(sname) \ |
109 | .profile_enable = prof_sysenter_enable, \ | 109 | .perf_event_enable = perf_sysenter_enable, \ |
110 | .profile_disable = prof_sysenter_disable, | 110 | .perf_event_disable = perf_sysenter_disable, |
111 | 111 | ||
112 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 112 | #define TRACE_SYS_EXIT_PERF_INIT(sname) \ |
113 | .profile_enable = prof_sysexit_enable, \ | 113 | .perf_event_enable = perf_sysexit_enable, \ |
114 | .profile_disable = prof_sysexit_disable, | 114 | .perf_event_disable = perf_sysexit_disable, |
115 | #else | 115 | #else |
116 | #define TRACE_SYS_ENTER_PROFILE(sname) | 116 | #define TRACE_SYS_ENTER_PERF(sname) |
117 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) | 117 | #define TRACE_SYS_ENTER_PERF_INIT(sname) |
118 | #define TRACE_SYS_EXIT_PROFILE(sname) | 118 | #define TRACE_SYS_EXIT_PERF(sname) |
119 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) | 119 | #define TRACE_SYS_EXIT_PERF_INIT(sname) |
120 | #endif /* CONFIG_PERF_EVENTS */ | 120 | #endif /* CONFIG_PERF_EVENTS */ |
121 | 121 | ||
122 | #ifdef CONFIG_FTRACE_SYSCALLS | 122 | #ifdef CONFIG_FTRACE_SYSCALLS |
@@ -153,7 +153,7 @@ struct perf_event_attr; | |||
153 | .regfunc = reg_event_syscall_enter, \ | 153 | .regfunc = reg_event_syscall_enter, \ |
154 | .unregfunc = unreg_event_syscall_enter, \ | 154 | .unregfunc = unreg_event_syscall_enter, \ |
155 | .data = (void *)&__syscall_meta_##sname,\ | 155 | .data = (void *)&__syscall_meta_##sname,\ |
156 | TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 156 | TRACE_SYS_ENTER_PERF_INIT(sname) \ |
157 | } | 157 | } |
158 | 158 | ||
159 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ | 159 | #define SYSCALL_TRACE_EXIT_EVENT(sname) \ |
@@ -175,7 +175,7 @@ struct perf_event_attr; | |||
175 | .regfunc = reg_event_syscall_exit, \ | 175 | .regfunc = reg_event_syscall_exit, \ |
176 | .unregfunc = unreg_event_syscall_exit, \ | 176 | .unregfunc = unreg_event_syscall_exit, \ |
177 | .data = (void *)&__syscall_meta_##sname,\ | 177 | .data = (void *)&__syscall_meta_##sname,\ |
178 | TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 178 | TRACE_SYS_EXIT_PERF_INIT(sname) \ |
179 | } | 179 | } |
180 | 180 | ||
181 | #define SYSCALL_METADATA(sname, nb) \ | 181 | #define SYSCALL_METADATA(sname, nb) \ |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 601ad7744247..ea6f9d4a20e9 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
401 | #undef DEFINE_EVENT | 401 | #undef DEFINE_EVENT |
402 | #define DEFINE_EVENT(template, name, proto, args) \ | 402 | #define DEFINE_EVENT(template, name, proto, args) \ |
403 | \ | 403 | \ |
404 | static void ftrace_profile_##name(proto); \ | 404 | static void perf_trace_##name(proto); \ |
405 | \ | 405 | \ |
406 | static notrace int \ | 406 | static notrace int \ |
407 | ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ | 407 | perf_trace_enable_##name(struct ftrace_event_call *unused) \ |
408 | { \ | 408 | { \ |
409 | return register_trace_##name(ftrace_profile_##name); \ | 409 | return register_trace_##name(perf_trace_##name); \ |
410 | } \ | 410 | } \ |
411 | \ | 411 | \ |
412 | static notrace void \ | 412 | static notrace void \ |
413 | ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ | 413 | perf_trace_disable_##name(struct ftrace_event_call *unused) \ |
414 | { \ | 414 | { \ |
415 | unregister_trace_##name(ftrace_profile_##name); \ | 415 | unregister_trace_##name(perf_trace_##name); \ |
416 | } | 416 | } |
417 | 417 | ||
418 | #undef DEFINE_EVENT_PRINT | 418 | #undef DEFINE_EVENT_PRINT |
@@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ | |||
507 | 507 | ||
508 | #ifdef CONFIG_PERF_EVENTS | 508 | #ifdef CONFIG_PERF_EVENTS |
509 | 509 | ||
510 | #define _TRACE_PROFILE_INIT(call) \ | 510 | #define _TRACE_PERF_INIT(call) \ |
511 | .profile_enable = ftrace_profile_enable_##call, \ | 511 | .perf_event_enable = perf_trace_enable_##call, \ |
512 | .profile_disable = ftrace_profile_disable_##call, | 512 | .perf_event_disable = perf_trace_disable_##call, |
513 | 513 | ||
514 | #else | 514 | #else |
515 | #define _TRACE_PROFILE_INIT(call) | 515 | #define _TRACE_PERF_INIT(call) |
516 | #endif /* CONFIG_PERF_EVENTS */ | 516 | #endif /* CONFIG_PERF_EVENTS */ |
517 | 517 | ||
518 | #undef __entry | 518 | #undef __entry |
@@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
638 | .unregfunc = ftrace_raw_unreg_event_##call, \ | 638 | .unregfunc = ftrace_raw_unreg_event_##call, \ |
639 | .print_fmt = print_fmt_##template, \ | 639 | .print_fmt = print_fmt_##template, \ |
640 | .define_fields = ftrace_define_fields_##template, \ | 640 | .define_fields = ftrace_define_fields_##template, \ |
641 | _TRACE_PROFILE_INIT(call) \ | 641 | _TRACE_PERF_INIT(call) \ |
642 | } | 642 | } |
643 | 643 | ||
644 | #undef DEFINE_EVENT_PRINT | 644 | #undef DEFINE_EVENT_PRINT |
@@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
657 | .unregfunc = ftrace_raw_unreg_event_##call, \ | 657 | .unregfunc = ftrace_raw_unreg_event_##call, \ |
658 | .print_fmt = print_fmt_##call, \ | 658 | .print_fmt = print_fmt_##call, \ |
659 | .define_fields = ftrace_define_fields_##template, \ | 659 | .define_fields = ftrace_define_fields_##template, \ |
660 | _TRACE_PROFILE_INIT(call) \ | 660 | _TRACE_PERF_INIT(call) \ |
661 | } | 661 | } |
662 | 662 | ||
663 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | 663 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
664 | 664 | ||
665 | /* | 665 | /* |
666 | * Define the insertion callback to profile events | 666 | * Define the insertion callback to perf events |
667 | * | 667 | * |
668 | * The job is very similar to ftrace_raw_event_<call> except that we don't | 668 | * The job is very similar to ftrace_raw_event_<call> except that we don't |
669 | * insert in the ring buffer but in a perf counter. | 669 | * insert in the ring buffer but in a perf counter. |
670 | * | 670 | * |
671 | * static void ftrace_profile_<call>(proto) | 671 | * static void ftrace_perf_<call>(proto) |
672 | * { | 672 | * { |
673 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | 673 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; |
674 | * struct ftrace_event_call *event_call = &event_<call>; | 674 | * struct ftrace_event_call *event_call = &event_<call>; |
@@ -757,13 +757,14 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
757 | #undef DECLARE_EVENT_CLASS | 757 | #undef DECLARE_EVENT_CLASS |
758 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 758 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
759 | static notrace void \ | 759 | static notrace void \ |
760 | ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | 760 | perf_trace_templ_##call(struct ftrace_event_call *event_call, \ |
761 | proto) \ | 761 | proto) \ |
762 | { \ | 762 | { \ |
763 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 763 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
764 | struct ftrace_raw_##call *entry; \ | 764 | struct ftrace_raw_##call *entry; \ |
765 | u64 __addr = 0, __count = 1; \ | 765 | u64 __addr = 0, __count = 1; \ |
766 | unsigned long irq_flags; \ | 766 | unsigned long irq_flags; \ |
767 | struct pt_regs *__regs; \ | ||
767 | int __entry_size; \ | 768 | int __entry_size; \ |
768 | int __data_size; \ | 769 | int __data_size; \ |
769 | int rctx; \ | 770 | int rctx; \ |
@@ -773,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | |||
773 | sizeof(u64)); \ | 774 | sizeof(u64)); \ |
774 | __entry_size -= sizeof(u32); \ | 775 | __entry_size -= sizeof(u32); \ |
775 | \ | 776 | \ |
776 | if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ | 777 | if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ |
777 | "profile buffer not large enough")) \ | 778 | "profile buffer not large enough")) \ |
778 | return; \ | 779 | return; \ |
779 | entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ | 780 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ |
780 | __entry_size, event_call->id, &rctx, &irq_flags); \ | 781 | __entry_size, event_call->id, &rctx, &irq_flags); \ |
781 | if (!entry) \ | 782 | if (!entry) \ |
782 | return; \ | 783 | return; \ |
@@ -784,17 +785,20 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ | |||
784 | \ | 785 | \ |
785 | { assign; } \ | 786 | { assign; } \ |
786 | \ | 787 | \ |
787 | ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ | 788 | __regs = &__get_cpu_var(perf_trace_regs); \ |
788 | __count, irq_flags); \ | 789 | perf_fetch_caller_regs(__regs, 2); \ |
790 | \ | ||
791 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | ||
792 | __count, irq_flags, __regs); \ | ||
789 | } | 793 | } |
790 | 794 | ||
791 | #undef DEFINE_EVENT | 795 | #undef DEFINE_EVENT |
792 | #define DEFINE_EVENT(template, call, proto, args) \ | 796 | #define DEFINE_EVENT(template, call, proto, args) \ |
793 | static notrace void ftrace_profile_##call(proto) \ | 797 | static notrace void perf_trace_##call(proto) \ |
794 | { \ | 798 | { \ |
795 | struct ftrace_event_call *event_call = &event_##call; \ | 799 | struct ftrace_event_call *event_call = &event_##call; \ |
796 | \ | 800 | \ |
797 | ftrace_profile_templ_##template(event_call, args); \ | 801 | perf_trace_templ_##template(event_call, args); \ |
798 | } | 802 | } |
799 | 803 | ||
800 | #undef DEFINE_EVENT_PRINT | 804 | #undef DEFINE_EVENT_PRINT |
diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 0387100752f0..e5e5f48dbfb3 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h | |||
@@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifdef CONFIG_PERF_EVENTS | 49 | #ifdef CONFIG_PERF_EVENTS |
50 | int prof_sysenter_enable(struct ftrace_event_call *call); | 50 | int perf_sysenter_enable(struct ftrace_event_call *call); |
51 | void prof_sysenter_disable(struct ftrace_event_call *call); | 51 | void perf_sysenter_disable(struct ftrace_event_call *call); |
52 | int prof_sysexit_enable(struct ftrace_event_call *call); | 52 | int perf_sysexit_enable(struct ftrace_event_call *call); |
53 | void prof_sysexit_disable(struct ftrace_event_call *call); | 53 | void perf_sysexit_disable(struct ftrace_event_call *call); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #endif /* _TRACE_SYSCALL_H */ | 56 | #endif /* _TRACE_SYSCALL_H */ |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index fa034d29cf73..0ed46f3e51e9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -259,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, | |||
259 | struct kprobe_insn_page *kip; | 259 | struct kprobe_insn_page *kip; |
260 | 260 | ||
261 | list_for_each_entry(kip, &c->pages, list) { | 261 | list_for_each_entry(kip, &c->pages, list) { |
262 | long idx = ((long)slot - (long)kip->insns) / c->insn_size; | 262 | long idx = ((long)slot - (long)kip->insns) / |
263 | (c->insn_size * sizeof(kprobe_opcode_t)); | ||
263 | if (idx >= 0 && idx < slots_per_page(c)) { | 264 | if (idx >= 0 && idx < slots_per_page(c)) { |
264 | WARN_ON(kip->slot_used[idx] != SLOT_USED); | 265 | WARN_ON(kip->slot_used[idx] != SLOT_USED); |
265 | if (dirty) { | 266 | if (dirty) { |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 681bc2e1e187..c927a549db2c 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -3211,8 +3211,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3211 | { | 3211 | { |
3212 | unsigned long flags; | 3212 | unsigned long flags; |
3213 | 3213 | ||
3214 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
3215 | |||
3216 | if (unlikely(current->lockdep_recursion)) | 3214 | if (unlikely(current->lockdep_recursion)) |
3217 | return; | 3215 | return; |
3218 | 3216 | ||
@@ -3220,6 +3218,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3220 | check_flags(flags); | 3218 | check_flags(flags); |
3221 | 3219 | ||
3222 | current->lockdep_recursion = 1; | 3220 | current->lockdep_recursion = 1; |
3221 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
3223 | __lock_acquire(lock, subclass, trylock, read, check, | 3222 | __lock_acquire(lock, subclass, trylock, read, check, |
3224 | irqs_disabled_flags(flags), nest_lock, ip, 0); | 3223 | irqs_disabled_flags(flags), nest_lock, ip, 0); |
3225 | current->lockdep_recursion = 0; | 3224 | current->lockdep_recursion = 0; |
@@ -3232,14 +3231,13 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
3232 | { | 3231 | { |
3233 | unsigned long flags; | 3232 | unsigned long flags; |
3234 | 3233 | ||
3235 | trace_lock_release(lock, nested, ip); | ||
3236 | |||
3237 | if (unlikely(current->lockdep_recursion)) | 3234 | if (unlikely(current->lockdep_recursion)) |
3238 | return; | 3235 | return; |
3239 | 3236 | ||
3240 | raw_local_irq_save(flags); | 3237 | raw_local_irq_save(flags); |
3241 | check_flags(flags); | 3238 | check_flags(flags); |
3242 | current->lockdep_recursion = 1; | 3239 | current->lockdep_recursion = 1; |
3240 | trace_lock_release(lock, nested, ip); | ||
3243 | __lock_release(lock, nested, ip); | 3241 | __lock_release(lock, nested, ip); |
3244 | current->lockdep_recursion = 0; | 3242 | current->lockdep_recursion = 0; |
3245 | raw_local_irq_restore(flags); | 3243 | raw_local_irq_restore(flags); |
@@ -3413,8 +3411,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3413 | { | 3411 | { |
3414 | unsigned long flags; | 3412 | unsigned long flags; |
3415 | 3413 | ||
3416 | trace_lock_contended(lock, ip); | ||
3417 | |||
3418 | if (unlikely(!lock_stat)) | 3414 | if (unlikely(!lock_stat)) |
3419 | return; | 3415 | return; |
3420 | 3416 | ||
@@ -3424,6 +3420,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3424 | raw_local_irq_save(flags); | 3420 | raw_local_irq_save(flags); |
3425 | check_flags(flags); | 3421 | check_flags(flags); |
3426 | current->lockdep_recursion = 1; | 3422 | current->lockdep_recursion = 1; |
3423 | trace_lock_contended(lock, ip); | ||
3427 | __lock_contended(lock, ip); | 3424 | __lock_contended(lock, ip); |
3428 | current->lockdep_recursion = 0; | 3425 | current->lockdep_recursion = 0; |
3429 | raw_local_irq_restore(flags); | 3426 | raw_local_irq_restore(flags); |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 4393b9e73740..574ee58a3046 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
81 | void __weak hw_perf_disable(void) { barrier(); } | 81 | void __weak hw_perf_disable(void) { barrier(); } |
82 | void __weak hw_perf_enable(void) { barrier(); } | 82 | void __weak hw_perf_enable(void) { barrier(); } |
83 | 83 | ||
84 | void __weak hw_perf_event_setup(int cpu) { barrier(); } | ||
85 | void __weak hw_perf_event_setup_online(int cpu) { barrier(); } | ||
86 | void __weak hw_perf_event_setup_offline(int cpu) { barrier(); } | ||
87 | |||
88 | int __weak | 84 | int __weak |
89 | hw_perf_group_sched_in(struct perf_event *group_leader, | 85 | hw_perf_group_sched_in(struct perf_event *group_leader, |
90 | struct perf_cpu_context *cpuctx, | 86 | struct perf_cpu_context *cpuctx, |
@@ -97,25 +93,15 @@ void __weak perf_event_print_debug(void) { } | |||
97 | 93 | ||
98 | static DEFINE_PER_CPU(int, perf_disable_count); | 94 | static DEFINE_PER_CPU(int, perf_disable_count); |
99 | 95 | ||
100 | void __perf_disable(void) | ||
101 | { | ||
102 | __get_cpu_var(perf_disable_count)++; | ||
103 | } | ||
104 | |||
105 | bool __perf_enable(void) | ||
106 | { | ||
107 | return !--__get_cpu_var(perf_disable_count); | ||
108 | } | ||
109 | |||
110 | void perf_disable(void) | 96 | void perf_disable(void) |
111 | { | 97 | { |
112 | __perf_disable(); | 98 | if (!__get_cpu_var(perf_disable_count)++) |
113 | hw_perf_disable(); | 99 | hw_perf_disable(); |
114 | } | 100 | } |
115 | 101 | ||
116 | void perf_enable(void) | 102 | void perf_enable(void) |
117 | { | 103 | { |
118 | if (__perf_enable()) | 104 | if (!--__get_cpu_var(perf_disable_count)) |
119 | hw_perf_enable(); | 105 | hw_perf_enable(); |
120 | } | 106 | } |
121 | 107 | ||
@@ -1538,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1538 | */ | 1524 | */ |
1539 | if (interrupts == MAX_INTERRUPTS) { | 1525 | if (interrupts == MAX_INTERRUPTS) { |
1540 | perf_log_throttle(event, 1); | 1526 | perf_log_throttle(event, 1); |
1527 | perf_disable(); | ||
1541 | event->pmu->unthrottle(event); | 1528 | event->pmu->unthrottle(event); |
1529 | perf_enable(); | ||
1542 | } | 1530 | } |
1543 | 1531 | ||
1544 | if (!event->attr.freq || !event->attr.sample_freq) | 1532 | if (!event->attr.freq || !event->attr.sample_freq) |
1545 | continue; | 1533 | continue; |
1546 | 1534 | ||
1535 | perf_disable(); | ||
1547 | event->pmu->read(event); | 1536 | event->pmu->read(event); |
1548 | now = atomic64_read(&event->count); | 1537 | now = atomic64_read(&event->count); |
1549 | delta = now - hwc->freq_count_stamp; | 1538 | delta = now - hwc->freq_count_stamp; |
@@ -1551,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1551 | 1540 | ||
1552 | if (delta > 0) | 1541 | if (delta > 0) |
1553 | perf_adjust_period(event, TICK_NSEC, delta); | 1542 | perf_adjust_period(event, TICK_NSEC, delta); |
1543 | perf_enable(); | ||
1554 | } | 1544 | } |
1555 | raw_spin_unlock(&ctx->lock); | 1545 | raw_spin_unlock(&ctx->lock); |
1556 | } | 1546 | } |
@@ -1560,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1560 | */ | 1550 | */ |
1561 | static void rotate_ctx(struct perf_event_context *ctx) | 1551 | static void rotate_ctx(struct perf_event_context *ctx) |
1562 | { | 1552 | { |
1563 | if (!ctx->nr_events) | ||
1564 | return; | ||
1565 | |||
1566 | raw_spin_lock(&ctx->lock); | 1553 | raw_spin_lock(&ctx->lock); |
1567 | 1554 | ||
1568 | /* Rotate the first entry last of non-pinned groups */ | 1555 | /* Rotate the first entry last of non-pinned groups */ |
@@ -1575,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr) | |||
1575 | { | 1562 | { |
1576 | struct perf_cpu_context *cpuctx; | 1563 | struct perf_cpu_context *cpuctx; |
1577 | struct perf_event_context *ctx; | 1564 | struct perf_event_context *ctx; |
1565 | int rotate = 0; | ||
1578 | 1566 | ||
1579 | if (!atomic_read(&nr_events)) | 1567 | if (!atomic_read(&nr_events)) |
1580 | return; | 1568 | return; |
1581 | 1569 | ||
1582 | cpuctx = &__get_cpu_var(perf_cpu_context); | 1570 | cpuctx = &__get_cpu_var(perf_cpu_context); |
1583 | ctx = curr->perf_event_ctxp; | 1571 | if (cpuctx->ctx.nr_events && |
1572 | cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) | ||
1573 | rotate = 1; | ||
1584 | 1574 | ||
1585 | perf_disable(); | 1575 | ctx = curr->perf_event_ctxp; |
1576 | if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active) | ||
1577 | rotate = 1; | ||
1586 | 1578 | ||
1587 | perf_ctx_adjust_freq(&cpuctx->ctx); | 1579 | perf_ctx_adjust_freq(&cpuctx->ctx); |
1588 | if (ctx) | 1580 | if (ctx) |
1589 | perf_ctx_adjust_freq(ctx); | 1581 | perf_ctx_adjust_freq(ctx); |
1590 | 1582 | ||
1583 | if (!rotate) | ||
1584 | return; | ||
1585 | |||
1586 | perf_disable(); | ||
1591 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); | 1587 | cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); |
1592 | if (ctx) | 1588 | if (ctx) |
1593 | task_ctx_sched_out(ctx, EVENT_FLEXIBLE); | 1589 | task_ctx_sched_out(ctx, EVENT_FLEXIBLE); |
@@ -1599,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr) | |||
1599 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); | 1595 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); |
1600 | if (ctx) | 1596 | if (ctx) |
1601 | task_ctx_sched_in(curr, EVENT_FLEXIBLE); | 1597 | task_ctx_sched_in(curr, EVENT_FLEXIBLE); |
1602 | |||
1603 | perf_enable(); | 1598 | perf_enable(); |
1604 | } | 1599 | } |
1605 | 1600 | ||
@@ -2791,6 +2786,13 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2791 | return NULL; | 2786 | return NULL; |
2792 | } | 2787 | } |
2793 | 2788 | ||
2789 | #ifdef CONFIG_EVENT_TRACING | ||
2790 | __weak | ||
2791 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | ||
2792 | { | ||
2793 | } | ||
2794 | #endif | ||
2795 | |||
2794 | /* | 2796 | /* |
2795 | * Output | 2797 | * Output |
2796 | */ | 2798 | */ |
@@ -4318,9 +4320,8 @@ static const struct pmu perf_ops_task_clock = { | |||
4318 | #ifdef CONFIG_EVENT_TRACING | 4320 | #ifdef CONFIG_EVENT_TRACING |
4319 | 4321 | ||
4320 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | 4322 | void perf_tp_event(int event_id, u64 addr, u64 count, void *record, |
4321 | int entry_size) | 4323 | int entry_size, struct pt_regs *regs) |
4322 | { | 4324 | { |
4323 | struct pt_regs *regs = get_irq_regs(); | ||
4324 | struct perf_sample_data data; | 4325 | struct perf_sample_data data; |
4325 | struct perf_raw_record raw = { | 4326 | struct perf_raw_record raw = { |
4326 | .size = entry_size, | 4327 | .size = entry_size, |
@@ -4330,12 +4331,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | |||
4330 | perf_sample_data_init(&data, addr); | 4331 | perf_sample_data_init(&data, addr); |
4331 | data.raw = &raw; | 4332 | data.raw = &raw; |
4332 | 4333 | ||
4333 | if (!regs) | ||
4334 | regs = task_pt_regs(current); | ||
4335 | |||
4336 | /* Trace events already protected against recursion */ | 4334 | /* Trace events already protected against recursion */ |
4337 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, | 4335 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, |
4338 | &data, regs); | 4336 | &data, regs); |
4339 | } | 4337 | } |
4340 | EXPORT_SYMBOL_GPL(perf_tp_event); | 4338 | EXPORT_SYMBOL_GPL(perf_tp_event); |
4341 | 4339 | ||
@@ -4351,7 +4349,7 @@ static int perf_tp_event_match(struct perf_event *event, | |||
4351 | 4349 | ||
4352 | static void tp_perf_event_destroy(struct perf_event *event) | 4350 | static void tp_perf_event_destroy(struct perf_event *event) |
4353 | { | 4351 | { |
4354 | ftrace_profile_disable(event->attr.config); | 4352 | perf_trace_disable(event->attr.config); |
4355 | } | 4353 | } |
4356 | 4354 | ||
4357 | static const struct pmu *tp_perf_event_init(struct perf_event *event) | 4355 | static const struct pmu *tp_perf_event_init(struct perf_event *event) |
@@ -4365,7 +4363,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) | |||
4365 | !capable(CAP_SYS_ADMIN)) | 4363 | !capable(CAP_SYS_ADMIN)) |
4366 | return ERR_PTR(-EPERM); | 4364 | return ERR_PTR(-EPERM); |
4367 | 4365 | ||
4368 | if (ftrace_profile_enable(event->attr.config)) | 4366 | if (perf_trace_enable(event->attr.config)) |
4369 | return NULL; | 4367 | return NULL; |
4370 | 4368 | ||
4371 | event->destroy = tp_perf_event_destroy; | 4369 | event->destroy = tp_perf_event_destroy; |
@@ -5372,18 +5370,26 @@ int perf_event_init_task(struct task_struct *child) | |||
5372 | return ret; | 5370 | return ret; |
5373 | } | 5371 | } |
5374 | 5372 | ||
5373 | static void __init perf_event_init_all_cpus(void) | ||
5374 | { | ||
5375 | int cpu; | ||
5376 | struct perf_cpu_context *cpuctx; | ||
5377 | |||
5378 | for_each_possible_cpu(cpu) { | ||
5379 | cpuctx = &per_cpu(perf_cpu_context, cpu); | ||
5380 | __perf_event_init_context(&cpuctx->ctx, NULL); | ||
5381 | } | ||
5382 | } | ||
5383 | |||
5375 | static void __cpuinit perf_event_init_cpu(int cpu) | 5384 | static void __cpuinit perf_event_init_cpu(int cpu) |
5376 | { | 5385 | { |
5377 | struct perf_cpu_context *cpuctx; | 5386 | struct perf_cpu_context *cpuctx; |
5378 | 5387 | ||
5379 | cpuctx = &per_cpu(perf_cpu_context, cpu); | 5388 | cpuctx = &per_cpu(perf_cpu_context, cpu); |
5380 | __perf_event_init_context(&cpuctx->ctx, NULL); | ||
5381 | 5389 | ||
5382 | spin_lock(&perf_resource_lock); | 5390 | spin_lock(&perf_resource_lock); |
5383 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; | 5391 | cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; |
5384 | spin_unlock(&perf_resource_lock); | 5392 | spin_unlock(&perf_resource_lock); |
5385 | |||
5386 | hw_perf_event_setup(cpu); | ||
5387 | } | 5393 | } |
5388 | 5394 | ||
5389 | #ifdef CONFIG_HOTPLUG_CPU | 5395 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -5423,20 +5429,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
5423 | perf_event_init_cpu(cpu); | 5429 | perf_event_init_cpu(cpu); |
5424 | break; | 5430 | break; |
5425 | 5431 | ||
5426 | case CPU_ONLINE: | ||
5427 | case CPU_ONLINE_FROZEN: | ||
5428 | hw_perf_event_setup_online(cpu); | ||
5429 | break; | ||
5430 | |||
5431 | case CPU_DOWN_PREPARE: | 5432 | case CPU_DOWN_PREPARE: |
5432 | case CPU_DOWN_PREPARE_FROZEN: | 5433 | case CPU_DOWN_PREPARE_FROZEN: |
5433 | perf_event_exit_cpu(cpu); | 5434 | perf_event_exit_cpu(cpu); |
5434 | break; | 5435 | break; |
5435 | 5436 | ||
5436 | case CPU_DEAD: | ||
5437 | hw_perf_event_setup_offline(cpu); | ||
5438 | break; | ||
5439 | |||
5440 | default: | 5437 | default: |
5441 | break; | 5438 | break; |
5442 | } | 5439 | } |
@@ -5454,6 +5451,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = { | |||
5454 | 5451 | ||
5455 | void __init perf_event_init(void) | 5452 | void __init perf_event_init(void) |
5456 | { | 5453 | { |
5454 | perf_event_init_all_cpus(); | ||
5457 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 5455 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, |
5458 | (void *)(long)smp_processor_id()); | 5456 | (void *)(long)smp_processor_id()); |
5459 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | 5457 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d00c6fe23f54..78edc6490038 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o | |||
52 | obj-$(CONFIG_EVENT_TRACING) += trace_export.o | 52 | obj-$(CONFIG_EVENT_TRACING) += trace_export.o |
53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 53 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
54 | ifeq ($(CONFIG_PERF_EVENTS),y) | 54 | ifeq ($(CONFIG_PERF_EVENTS),y) |
55 | obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o |
56 | endif | 56 | endif |
57 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 57 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
58 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 58 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_perf.c index c1cc3ab633de..81f691eb3a30 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -1,32 +1,36 @@ | |||
1 | /* | 1 | /* |
2 | * trace event based perf counter profiling | 2 | * trace event based perf event profiling/tracing |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> | 4 | * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> |
5 | * | 5 | * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
10 | #include "trace.h" | 10 | #include "trace.h" |
11 | 11 | ||
12 | DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); | ||
13 | EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); | ||
14 | |||
15 | EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | ||
12 | 16 | ||
13 | static char *perf_trace_buf; | 17 | static char *perf_trace_buf; |
14 | static char *perf_trace_buf_nmi; | 18 | static char *perf_trace_buf_nmi; |
15 | 19 | ||
16 | typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; | 20 | typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; |
17 | 21 | ||
18 | /* Count the events in use (per event id, not per instance) */ | 22 | /* Count the events in use (per event id, not per instance) */ |
19 | static int total_profile_count; | 23 | static int total_ref_count; |
20 | 24 | ||
21 | static int ftrace_profile_enable_event(struct ftrace_event_call *event) | 25 | static int perf_trace_event_enable(struct ftrace_event_call *event) |
22 | { | 26 | { |
23 | char *buf; | 27 | char *buf; |
24 | int ret = -ENOMEM; | 28 | int ret = -ENOMEM; |
25 | 29 | ||
26 | if (event->profile_count++ > 0) | 30 | if (event->perf_refcount++ > 0) |
27 | return 0; | 31 | return 0; |
28 | 32 | ||
29 | if (!total_profile_count) { | 33 | if (!total_ref_count) { |
30 | buf = (char *)alloc_percpu(perf_trace_t); | 34 | buf = (char *)alloc_percpu(perf_trace_t); |
31 | if (!buf) | 35 | if (!buf) |
32 | goto fail_buf; | 36 | goto fail_buf; |
@@ -40,35 +44,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
40 | rcu_assign_pointer(perf_trace_buf_nmi, buf); | 44 | rcu_assign_pointer(perf_trace_buf_nmi, buf); |
41 | } | 45 | } |
42 | 46 | ||
43 | ret = event->profile_enable(event); | 47 | ret = event->perf_event_enable(event); |
44 | if (!ret) { | 48 | if (!ret) { |
45 | total_profile_count++; | 49 | total_ref_count++; |
46 | return 0; | 50 | return 0; |
47 | } | 51 | } |
48 | 52 | ||
49 | fail_buf_nmi: | 53 | fail_buf_nmi: |
50 | if (!total_profile_count) { | 54 | if (!total_ref_count) { |
51 | free_percpu(perf_trace_buf_nmi); | 55 | free_percpu(perf_trace_buf_nmi); |
52 | free_percpu(perf_trace_buf); | 56 | free_percpu(perf_trace_buf); |
53 | perf_trace_buf_nmi = NULL; | 57 | perf_trace_buf_nmi = NULL; |
54 | perf_trace_buf = NULL; | 58 | perf_trace_buf = NULL; |
55 | } | 59 | } |
56 | fail_buf: | 60 | fail_buf: |
57 | event->profile_count--; | 61 | event->perf_refcount--; |
58 | 62 | ||
59 | return ret; | 63 | return ret; |
60 | } | 64 | } |
61 | 65 | ||
62 | int ftrace_profile_enable(int event_id) | 66 | int perf_trace_enable(int event_id) |
63 | { | 67 | { |
64 | struct ftrace_event_call *event; | 68 | struct ftrace_event_call *event; |
65 | int ret = -EINVAL; | 69 | int ret = -EINVAL; |
66 | 70 | ||
67 | mutex_lock(&event_mutex); | 71 | mutex_lock(&event_mutex); |
68 | list_for_each_entry(event, &ftrace_events, list) { | 72 | list_for_each_entry(event, &ftrace_events, list) { |
69 | if (event->id == event_id && event->profile_enable && | 73 | if (event->id == event_id && event->perf_event_enable && |
70 | try_module_get(event->mod)) { | 74 | try_module_get(event->mod)) { |
71 | ret = ftrace_profile_enable_event(event); | 75 | ret = perf_trace_event_enable(event); |
72 | break; | 76 | break; |
73 | } | 77 | } |
74 | } | 78 | } |
@@ -77,16 +81,16 @@ int ftrace_profile_enable(int event_id) | |||
77 | return ret; | 81 | return ret; |
78 | } | 82 | } |
79 | 83 | ||
80 | static void ftrace_profile_disable_event(struct ftrace_event_call *event) | 84 | static void perf_trace_event_disable(struct ftrace_event_call *event) |
81 | { | 85 | { |
82 | char *buf, *nmi_buf; | 86 | char *buf, *nmi_buf; |
83 | 87 | ||
84 | if (--event->profile_count > 0) | 88 | if (--event->perf_refcount > 0) |
85 | return; | 89 | return; |
86 | 90 | ||
87 | event->profile_disable(event); | 91 | event->perf_event_disable(event); |
88 | 92 | ||
89 | if (!--total_profile_count) { | 93 | if (!--total_ref_count) { |
90 | buf = perf_trace_buf; | 94 | buf = perf_trace_buf; |
91 | rcu_assign_pointer(perf_trace_buf, NULL); | 95 | rcu_assign_pointer(perf_trace_buf, NULL); |
92 | 96 | ||
@@ -104,14 +108,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) | |||
104 | } | 108 | } |
105 | } | 109 | } |
106 | 110 | ||
107 | void ftrace_profile_disable(int event_id) | 111 | void perf_trace_disable(int event_id) |
108 | { | 112 | { |
109 | struct ftrace_event_call *event; | 113 | struct ftrace_event_call *event; |
110 | 114 | ||
111 | mutex_lock(&event_mutex); | 115 | mutex_lock(&event_mutex); |
112 | list_for_each_entry(event, &ftrace_events, list) { | 116 | list_for_each_entry(event, &ftrace_events, list) { |
113 | if (event->id == event_id) { | 117 | if (event->id == event_id) { |
114 | ftrace_profile_disable_event(event); | 118 | perf_trace_event_disable(event); |
115 | module_put(event->mod); | 119 | module_put(event->mod); |
116 | break; | 120 | break; |
117 | } | 121 | } |
@@ -119,8 +123,8 @@ void ftrace_profile_disable(int event_id) | |||
119 | mutex_unlock(&event_mutex); | 123 | mutex_unlock(&event_mutex); |
120 | } | 124 | } |
121 | 125 | ||
122 | __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, | 126 | __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, |
123 | int *rctxp, unsigned long *irq_flags) | 127 | int *rctxp, unsigned long *irq_flags) |
124 | { | 128 | { |
125 | struct trace_entry *entry; | 129 | struct trace_entry *entry; |
126 | char *trace_buf, *raw_data; | 130 | char *trace_buf, *raw_data; |
@@ -161,4 +165,4 @@ err_recursion: | |||
161 | local_irq_restore(*irq_flags); | 165 | local_irq_restore(*irq_flags); |
162 | return NULL; | 166 | return NULL; |
163 | } | 167 | } |
164 | EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); | 168 | EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3f972ad98d04..beab8bf2f310 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
938 | trace_create_file("enable", 0644, call->dir, call, | 938 | trace_create_file("enable", 0644, call->dir, call, |
939 | enable); | 939 | enable); |
940 | 940 | ||
941 | if (call->id && call->profile_enable) | 941 | if (call->id && call->perf_event_enable) |
942 | trace_create_file("id", 0444, call->dir, call, | 942 | trace_create_file("id", 0444, call->dir, call, |
943 | id); | 943 | id); |
944 | 944 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 505c92273b1a..1251e367bae9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp) | |||
1214 | #ifdef CONFIG_PERF_EVENTS | 1214 | #ifdef CONFIG_PERF_EVENTS |
1215 | 1215 | ||
1216 | /* Kprobe profile handler */ | 1216 | /* Kprobe profile handler */ |
1217 | static __kprobes void kprobe_profile_func(struct kprobe *kp, | 1217 | static __kprobes void kprobe_perf_func(struct kprobe *kp, |
1218 | struct pt_regs *regs) | 1218 | struct pt_regs *regs) |
1219 | { | 1219 | { |
1220 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1220 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, | |||
1227 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | 1227 | __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); |
1228 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1228 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1229 | size -= sizeof(u32); | 1229 | size -= sizeof(u32); |
1230 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 1230 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
1231 | "profile buffer not large enough")) | 1231 | "profile buffer not large enough")) |
1232 | return; | 1232 | return; |
1233 | 1233 | ||
1234 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); | 1234 | entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); |
1235 | if (!entry) | 1235 | if (!entry) |
1236 | return; | 1236 | return; |
1237 | 1237 | ||
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, | |||
1240 | for (i = 0; i < tp->nr_args; i++) | 1240 | for (i = 0; i < tp->nr_args; i++) |
1241 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1241 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
1242 | 1242 | ||
1243 | ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); | 1243 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | /* Kretprobe profile handler */ | 1246 | /* Kretprobe profile handler */ |
1247 | static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | 1247 | static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, |
1248 | struct pt_regs *regs) | 1248 | struct pt_regs *regs) |
1249 | { | 1249 | { |
1250 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1250 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | |||
1257 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | 1257 | __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); |
1258 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1258 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1259 | size -= sizeof(u32); | 1259 | size -= sizeof(u32); |
1260 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 1260 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
1261 | "profile buffer not large enough")) | 1261 | "profile buffer not large enough")) |
1262 | return; | 1262 | return; |
1263 | 1263 | ||
1264 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); | 1264 | entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); |
1265 | if (!entry) | 1265 | if (!entry) |
1266 | return; | 1266 | return; |
1267 | 1267 | ||
@@ -1271,10 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, | |||
1271 | for (i = 0; i < tp->nr_args; i++) | 1271 | for (i = 0; i < tp->nr_args; i++) |
1272 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1272 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
1273 | 1273 | ||
1274 | ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); | 1274 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, |
1275 | irq_flags, regs); | ||
1275 | } | 1276 | } |
1276 | 1277 | ||
1277 | static int probe_profile_enable(struct ftrace_event_call *call) | 1278 | static int probe_perf_enable(struct ftrace_event_call *call) |
1278 | { | 1279 | { |
1279 | struct trace_probe *tp = (struct trace_probe *)call->data; | 1280 | struct trace_probe *tp = (struct trace_probe *)call->data; |
1280 | 1281 | ||
@@ -1286,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call) | |||
1286 | return enable_kprobe(&tp->rp.kp); | 1287 | return enable_kprobe(&tp->rp.kp); |
1287 | } | 1288 | } |
1288 | 1289 | ||
1289 | static void probe_profile_disable(struct ftrace_event_call *call) | 1290 | static void probe_perf_disable(struct ftrace_event_call *call) |
1290 | { | 1291 | { |
1291 | struct trace_probe *tp = (struct trace_probe *)call->data; | 1292 | struct trace_probe *tp = (struct trace_probe *)call->data; |
1292 | 1293 | ||
@@ -1311,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | |||
1311 | kprobe_trace_func(kp, regs); | 1312 | kprobe_trace_func(kp, regs); |
1312 | #ifdef CONFIG_PERF_EVENTS | 1313 | #ifdef CONFIG_PERF_EVENTS |
1313 | if (tp->flags & TP_FLAG_PROFILE) | 1314 | if (tp->flags & TP_FLAG_PROFILE) |
1314 | kprobe_profile_func(kp, regs); | 1315 | kprobe_perf_func(kp, regs); |
1315 | #endif | 1316 | #endif |
1316 | return 0; /* We don't tweek kernel, so just return 0 */ | 1317 | return 0; /* We don't tweek kernel, so just return 0 */ |
1317 | } | 1318 | } |
@@ -1325,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) | |||
1325 | kretprobe_trace_func(ri, regs); | 1326 | kretprobe_trace_func(ri, regs); |
1326 | #ifdef CONFIG_PERF_EVENTS | 1327 | #ifdef CONFIG_PERF_EVENTS |
1327 | if (tp->flags & TP_FLAG_PROFILE) | 1328 | if (tp->flags & TP_FLAG_PROFILE) |
1328 | kretprobe_profile_func(ri, regs); | 1329 | kretprobe_perf_func(ri, regs); |
1329 | #endif | 1330 | #endif |
1330 | return 0; /* We don't tweek kernel, so just return 0 */ | 1331 | return 0; /* We don't tweek kernel, so just return 0 */ |
1331 | } | 1332 | } |
@@ -1358,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp) | |||
1358 | call->unregfunc = probe_event_disable; | 1359 | call->unregfunc = probe_event_disable; |
1359 | 1360 | ||
1360 | #ifdef CONFIG_PERF_EVENTS | 1361 | #ifdef CONFIG_PERF_EVENTS |
1361 | call->profile_enable = probe_profile_enable; | 1362 | call->perf_event_enable = probe_perf_enable; |
1362 | call->profile_disable = probe_profile_disable; | 1363 | call->perf_event_disable = probe_perf_disable; |
1363 | #endif | 1364 | #endif |
1364 | call->data = tp; | 1365 | call->data = tp; |
1365 | ret = trace_add_event_call(call); | 1366 | ret = trace_add_event_call(call); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cba47d7935cc..33c2a5b769dc 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls); | |||
428 | 428 | ||
429 | #ifdef CONFIG_PERF_EVENTS | 429 | #ifdef CONFIG_PERF_EVENTS |
430 | 430 | ||
431 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); | 431 | static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); |
432 | static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); | 432 | static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); |
433 | static int sys_prof_refcount_enter; | 433 | static int sys_perf_refcount_enter; |
434 | static int sys_prof_refcount_exit; | 434 | static int sys_perf_refcount_exit; |
435 | 435 | ||
436 | static void prof_syscall_enter(struct pt_regs *regs, long id) | 436 | static void perf_syscall_enter(struct pt_regs *regs, long id) |
437 | { | 437 | { |
438 | struct syscall_metadata *sys_data; | 438 | struct syscall_metadata *sys_data; |
439 | struct syscall_trace_enter *rec; | 439 | struct syscall_trace_enter *rec; |
@@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
443 | int size; | 443 | int size; |
444 | 444 | ||
445 | syscall_nr = syscall_get_nr(current, regs); | 445 | syscall_nr = syscall_get_nr(current, regs); |
446 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) | 446 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
447 | return; | 447 | return; |
448 | 448 | ||
449 | sys_data = syscall_nr_to_meta(syscall_nr); | 449 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
455 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 455 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
456 | size -= sizeof(u32); | 456 | size -= sizeof(u32); |
457 | 457 | ||
458 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 458 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
459 | "profile buffer not large enough")) | 459 | "perf buffer not large enough")) |
460 | return; | 460 | return; |
461 | 461 | ||
462 | rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, | 462 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, |
463 | sys_data->enter_event->id, &rctx, &flags); | 463 | sys_data->enter_event->id, &rctx, &flags); |
464 | if (!rec) | 464 | if (!rec) |
465 | return; | 465 | return; |
@@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
467 | rec->nr = syscall_nr; | 467 | rec->nr = syscall_nr; |
468 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 468 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
469 | (unsigned long *)&rec->args); | 469 | (unsigned long *)&rec->args); |
470 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); | 470 | perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); |
471 | } | 471 | } |
472 | 472 | ||
473 | int prof_sysenter_enable(struct ftrace_event_call *call) | 473 | int perf_sysenter_enable(struct ftrace_event_call *call) |
474 | { | 474 | { |
475 | int ret = 0; | 475 | int ret = 0; |
476 | int num; | 476 | int num; |
@@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call) | |||
478 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 478 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
479 | 479 | ||
480 | mutex_lock(&syscall_trace_lock); | 480 | mutex_lock(&syscall_trace_lock); |
481 | if (!sys_prof_refcount_enter) | 481 | if (!sys_perf_refcount_enter) |
482 | ret = register_trace_sys_enter(prof_syscall_enter); | 482 | ret = register_trace_sys_enter(perf_syscall_enter); |
483 | if (ret) { | 483 | if (ret) { |
484 | pr_info("event trace: Could not activate" | 484 | pr_info("event trace: Could not activate" |
485 | "syscall entry trace point"); | 485 | "syscall entry trace point"); |
486 | } else { | 486 | } else { |
487 | set_bit(num, enabled_prof_enter_syscalls); | 487 | set_bit(num, enabled_perf_enter_syscalls); |
488 | sys_prof_refcount_enter++; | 488 | sys_perf_refcount_enter++; |
489 | } | 489 | } |
490 | mutex_unlock(&syscall_trace_lock); | 490 | mutex_unlock(&syscall_trace_lock); |
491 | return ret; | 491 | return ret; |
492 | } | 492 | } |
493 | 493 | ||
494 | void prof_sysenter_disable(struct ftrace_event_call *call) | 494 | void perf_sysenter_disable(struct ftrace_event_call *call) |
495 | { | 495 | { |
496 | int num; | 496 | int num; |
497 | 497 | ||
498 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 498 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
499 | 499 | ||
500 | mutex_lock(&syscall_trace_lock); | 500 | mutex_lock(&syscall_trace_lock); |
501 | sys_prof_refcount_enter--; | 501 | sys_perf_refcount_enter--; |
502 | clear_bit(num, enabled_prof_enter_syscalls); | 502 | clear_bit(num, enabled_perf_enter_syscalls); |
503 | if (!sys_prof_refcount_enter) | 503 | if (!sys_perf_refcount_enter) |
504 | unregister_trace_sys_enter(prof_syscall_enter); | 504 | unregister_trace_sys_enter(perf_syscall_enter); |
505 | mutex_unlock(&syscall_trace_lock); | 505 | mutex_unlock(&syscall_trace_lock); |
506 | } | 506 | } |
507 | 507 | ||
508 | static void prof_syscall_exit(struct pt_regs *regs, long ret) | 508 | static void perf_syscall_exit(struct pt_regs *regs, long ret) |
509 | { | 509 | { |
510 | struct syscall_metadata *sys_data; | 510 | struct syscall_metadata *sys_data; |
511 | struct syscall_trace_exit *rec; | 511 | struct syscall_trace_exit *rec; |
@@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
515 | int size; | 515 | int size; |
516 | 516 | ||
517 | syscall_nr = syscall_get_nr(current, regs); | 517 | syscall_nr = syscall_get_nr(current, regs); |
518 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) | 518 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
519 | return; | 519 | return; |
520 | 520 | ||
521 | sys_data = syscall_nr_to_meta(syscall_nr); | 521 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
530 | * Impossible, but be paranoid with the future | 530 | * Impossible, but be paranoid with the future |
531 | * How to put this check outside runtime? | 531 | * How to put this check outside runtime? |
532 | */ | 532 | */ |
533 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 533 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
534 | "exit event has grown above profile buffer size")) | 534 | "exit event has grown above perf buffer size")) |
535 | return; | 535 | return; |
536 | 536 | ||
537 | rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, | 537 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, |
538 | sys_data->exit_event->id, &rctx, &flags); | 538 | sys_data->exit_event->id, &rctx, &flags); |
539 | if (!rec) | 539 | if (!rec) |
540 | return; | 540 | return; |
@@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
542 | rec->nr = syscall_nr; | 542 | rec->nr = syscall_nr; |
543 | rec->ret = syscall_get_return_value(current, regs); | 543 | rec->ret = syscall_get_return_value(current, regs); |
544 | 544 | ||
545 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); | 545 | perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); |
546 | } | 546 | } |
547 | 547 | ||
548 | int prof_sysexit_enable(struct ftrace_event_call *call) | 548 | int perf_sysexit_enable(struct ftrace_event_call *call) |
549 | { | 549 | { |
550 | int ret = 0; | 550 | int ret = 0; |
551 | int num; | 551 | int num; |
@@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call) | |||
553 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 553 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
554 | 554 | ||
555 | mutex_lock(&syscall_trace_lock); | 555 | mutex_lock(&syscall_trace_lock); |
556 | if (!sys_prof_refcount_exit) | 556 | if (!sys_perf_refcount_exit) |
557 | ret = register_trace_sys_exit(prof_syscall_exit); | 557 | ret = register_trace_sys_exit(perf_syscall_exit); |
558 | if (ret) { | 558 | if (ret) { |
559 | pr_info("event trace: Could not activate" | 559 | pr_info("event trace: Could not activate" |
560 | "syscall exit trace point"); | 560 | "syscall exit trace point"); |
561 | } else { | 561 | } else { |
562 | set_bit(num, enabled_prof_exit_syscalls); | 562 | set_bit(num, enabled_perf_exit_syscalls); |
563 | sys_prof_refcount_exit++; | 563 | sys_perf_refcount_exit++; |
564 | } | 564 | } |
565 | mutex_unlock(&syscall_trace_lock); | 565 | mutex_unlock(&syscall_trace_lock); |
566 | return ret; | 566 | return ret; |
567 | } | 567 | } |
568 | 568 | ||
569 | void prof_sysexit_disable(struct ftrace_event_call *call) | 569 | void perf_sysexit_disable(struct ftrace_event_call *call) |
570 | { | 570 | { |
571 | int num; | 571 | int num; |
572 | 572 | ||
573 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 573 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
574 | 574 | ||
575 | mutex_lock(&syscall_trace_lock); | 575 | mutex_lock(&syscall_trace_lock); |
576 | sys_prof_refcount_exit--; | 576 | sys_perf_refcount_exit--; |
577 | clear_bit(num, enabled_prof_exit_syscalls); | 577 | clear_bit(num, enabled_perf_exit_syscalls); |
578 | if (!sys_prof_refcount_exit) | 578 | if (!sys_perf_refcount_exit) |
579 | unregister_trace_sys_exit(prof_syscall_exit); | 579 | unregister_trace_sys_exit(perf_syscall_exit); |
580 | mutex_unlock(&syscall_trace_lock); | 580 | mutex_unlock(&syscall_trace_lock); |
581 | } | 581 | } |
582 | 582 | ||
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index bdd3b7ecad0a..bd498d496952 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile | |||
@@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT)) | |||
24 | DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) | 24 | DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) |
25 | DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) | 25 | DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) |
26 | 26 | ||
27 | # Make the path relative to DESTDIR, not prefix | ||
28 | ifndef DESTDIR | ||
27 | prefix?=$(HOME) | 29 | prefix?=$(HOME) |
30 | endif | ||
28 | bindir?=$(prefix)/bin | 31 | bindir?=$(prefix)/bin |
29 | htmldir?=$(prefix)/share/doc/perf-doc | 32 | htmldir?=$(prefix)/share/doc/perf-doc |
30 | pdfdir?=$(prefix)/share/doc/perf-doc | 33 | pdfdir?=$(prefix)/share/doc/perf-doc |
@@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man | |||
32 | man1dir=$(mandir)/man1 | 35 | man1dir=$(mandir)/man1 |
33 | man5dir=$(mandir)/man5 | 36 | man5dir=$(mandir)/man5 |
34 | man7dir=$(mandir)/man7 | 37 | man7dir=$(mandir)/man7 |
35 | # DESTDIR= | ||
36 | 38 | ||
37 | ASCIIDOC=asciidoc | 39 | ASCIIDOC=asciidoc |
38 | ASCIIDOC_EXTRA = --unsafe | 40 | ASCIIDOC_EXTRA = --unsafe |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2d537382c686..8a8f52db7e38 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -216,7 +216,10 @@ STRIP ?= strip | |||
216 | # runtime figures out where they are based on the path to the executable. | 216 | # runtime figures out where they are based on the path to the executable. |
217 | # This can help installing the suite in a relocatable way. | 217 | # This can help installing the suite in a relocatable way. |
218 | 218 | ||
219 | # Make the path relative to DESTDIR, not to prefix | ||
220 | ifndef DESTDIR | ||
219 | prefix = $(HOME) | 221 | prefix = $(HOME) |
222 | endif | ||
220 | bindir_relative = bin | 223 | bindir_relative = bin |
221 | bindir = $(prefix)/$(bindir_relative) | 224 | bindir = $(prefix)/$(bindir_relative) |
222 | mandir = share/man | 225 | mandir = share/man |
@@ -233,7 +236,6 @@ sysconfdir = $(prefix)/etc | |||
233 | ETC_PERFCONFIG = etc/perfconfig | 236 | ETC_PERFCONFIG = etc/perfconfig |
234 | endif | 237 | endif |
235 | lib = lib | 238 | lib = lib |
236 | # DESTDIR= | ||
237 | 239 | ||
238 | export prefix bindir sharedir sysconfdir | 240 | export prefix bindir sharedir sysconfdir |
239 | 241 | ||
@@ -387,6 +389,7 @@ LIB_H += util/thread.h | |||
387 | LIB_H += util/trace-event.h | 389 | LIB_H += util/trace-event.h |
388 | LIB_H += util/probe-finder.h | 390 | LIB_H += util/probe-finder.h |
389 | LIB_H += util/probe-event.h | 391 | LIB_H += util/probe-event.h |
392 | LIB_H += util/cpumap.h | ||
390 | 393 | ||
391 | LIB_OBJS += util/abspath.o | 394 | LIB_OBJS += util/abspath.o |
392 | LIB_OBJS += util/alias.o | 395 | LIB_OBJS += util/alias.o |
@@ -433,6 +436,7 @@ LIB_OBJS += util/sort.o | |||
433 | LIB_OBJS += util/hist.o | 436 | LIB_OBJS += util/hist.o |
434 | LIB_OBJS += util/probe-event.o | 437 | LIB_OBJS += util/probe-event.o |
435 | LIB_OBJS += util/util.o | 438 | LIB_OBJS += util/util.o |
439 | LIB_OBJS += util/cpumap.o | ||
436 | 440 | ||
437 | BUILTIN_OBJS += builtin-annotate.o | 441 | BUILTIN_OBJS += builtin-annotate.o |
438 | 442 | ||
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 5ec5de995872..6ad7148451c5 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -116,7 +116,7 @@ static int perf_session__add_hist_entry(struct perf_session *self, | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | he = __perf_session__add_hist_entry(self, al, NULL, count, &hit); | 119 | he = __perf_session__add_hist_entry(&self->hists, al, NULL, count, &hit); |
120 | if (he == NULL) | 120 | if (he == NULL) |
121 | return -ENOMEM; | 121 | return -ENOMEM; |
122 | 122 | ||
@@ -564,8 +564,8 @@ static int __cmd_annotate(void) | |||
564 | if (verbose > 2) | 564 | if (verbose > 2) |
565 | dsos__fprintf(stdout); | 565 | dsos__fprintf(stdout); |
566 | 566 | ||
567 | perf_session__collapse_resort(session); | 567 | perf_session__collapse_resort(&session->hists); |
568 | perf_session__output_resort(session, session->event_total[0]); | 568 | perf_session__output_resort(&session->hists, session->event_total[0]); |
569 | perf_session__find_annotations(session); | 569 | perf_session__find_annotations(session); |
570 | out_delete: | 570 | out_delete: |
571 | perf_session__delete(session); | 571 | perf_session__delete(session); |
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 18b3f505f9db..1ea15d8aeed1 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c | |||
@@ -26,7 +26,8 @@ static int perf_session__add_hist_entry(struct perf_session *self, | |||
26 | struct addr_location *al, u64 count) | 26 | struct addr_location *al, u64 count) |
27 | { | 27 | { |
28 | bool hit; | 28 | bool hit; |
29 | struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, | 29 | struct hist_entry *he = __perf_session__add_hist_entry(&self->hists, |
30 | al, NULL, | ||
30 | count, &hit); | 31 | count, &hit); |
31 | if (he == NULL) | 32 | if (he == NULL) |
32 | return -ENOMEM; | 33 | return -ENOMEM; |
@@ -114,7 +115,7 @@ static void perf_session__resort_hist_entries(struct perf_session *self) | |||
114 | 115 | ||
115 | static void perf_session__set_hist_entries_positions(struct perf_session *self) | 116 | static void perf_session__set_hist_entries_positions(struct perf_session *self) |
116 | { | 117 | { |
117 | perf_session__output_resort(self, self->events_stats.total); | 118 | perf_session__output_resort(&self->hists, self->events_stats.total); |
118 | perf_session__resort_hist_entries(self); | 119 | perf_session__resort_hist_entries(self); |
119 | } | 120 | } |
120 | 121 | ||
@@ -166,13 +167,15 @@ static int __cmd_diff(void) | |||
166 | goto out_delete; | 167 | goto out_delete; |
167 | } | 168 | } |
168 | 169 | ||
169 | perf_session__output_resort(session[1], session[1]->events_stats.total); | 170 | perf_session__output_resort(&session[1]->hists, |
171 | session[1]->events_stats.total); | ||
170 | if (show_displacement) | 172 | if (show_displacement) |
171 | perf_session__set_hist_entries_positions(session[0]); | 173 | perf_session__set_hist_entries_positions(session[0]); |
172 | 174 | ||
173 | perf_session__match_hists(session[0], session[1]); | 175 | perf_session__match_hists(session[0], session[1]); |
174 | perf_session__fprintf_hists(session[1], session[0], | 176 | perf_session__fprintf_hists(&session[1]->hists, session[0], |
175 | show_displacement, stdout); | 177 | show_displacement, stdout, |
178 | session[1]->events_stats.total); | ||
176 | out_delete: | 179 | out_delete: |
177 | for (i = 0; i < 2; ++i) | 180 | for (i = 0; i < 2; ++i) |
178 | perf_session__delete(session[i]); | 181 | perf_session__delete(session[i]); |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 771533ced6a8..3b8b6387c47c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "util/debug.h" | 22 | #include "util/debug.h" |
23 | #include "util/session.h" | 23 | #include "util/session.h" |
24 | #include "util/symbol.h" | 24 | #include "util/symbol.h" |
25 | #include "util/cpumap.h" | ||
25 | 26 | ||
26 | #include <unistd.h> | 27 | #include <unistd.h> |
27 | #include <sched.h> | 28 | #include <sched.h> |
@@ -244,6 +245,9 @@ static void create_counter(int counter, int cpu, pid_t pid) | |||
244 | 245 | ||
245 | attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; | 246 | attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; |
246 | 247 | ||
248 | if (nr_counters > 1) | ||
249 | attr->sample_type |= PERF_SAMPLE_ID; | ||
250 | |||
247 | if (freq) { | 251 | if (freq) { |
248 | attr->sample_type |= PERF_SAMPLE_PERIOD; | 252 | attr->sample_type |= PERF_SAMPLE_PERIOD; |
249 | attr->freq = 1; | 253 | attr->freq = 1; |
@@ -391,6 +395,9 @@ static int process_buildids(void) | |||
391 | { | 395 | { |
392 | u64 size = lseek(output, 0, SEEK_CUR); | 396 | u64 size = lseek(output, 0, SEEK_CUR); |
393 | 397 | ||
398 | if (size == 0) | ||
399 | return 0; | ||
400 | |||
394 | session->fd = output; | 401 | session->fd = output; |
395 | return __perf_session__process_events(session, post_processing_offset, | 402 | return __perf_session__process_events(session, post_processing_offset, |
396 | size - post_processing_offset, | 403 | size - post_processing_offset, |
@@ -418,9 +425,6 @@ static int __cmd_record(int argc, const char **argv) | |||
418 | char buf; | 425 | char buf; |
419 | 426 | ||
420 | page_size = sysconf(_SC_PAGE_SIZE); | 427 | page_size = sysconf(_SC_PAGE_SIZE); |
421 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | ||
422 | assert(nr_cpus <= MAX_NR_CPUS); | ||
423 | assert(nr_cpus >= 0); | ||
424 | 428 | ||
425 | atexit(sig_atexit); | 429 | atexit(sig_atexit); |
426 | signal(SIGCHLD, sig_handler); | 430 | signal(SIGCHLD, sig_handler); |
@@ -544,8 +548,9 @@ static int __cmd_record(int argc, const char **argv) | |||
544 | if ((!system_wide && !inherit) || profile_cpu != -1) { | 548 | if ((!system_wide && !inherit) || profile_cpu != -1) { |
545 | open_counters(profile_cpu, target_pid); | 549 | open_counters(profile_cpu, target_pid); |
546 | } else { | 550 | } else { |
551 | nr_cpus = read_cpu_map(); | ||
547 | for (i = 0; i < nr_cpus; i++) | 552 | for (i = 0; i < nr_cpus; i++) |
548 | open_counters(i, target_pid); | 553 | open_counters(cpumap[i], target_pid); |
549 | } | 554 | } |
550 | 555 | ||
551 | if (file_new) { | 556 | if (file_new) { |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index cfc655d40bb7..f815de25d0fc 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -45,28 +45,71 @@ static char *pretty_printing_style = default_pretty_printing_style; | |||
45 | 45 | ||
46 | static char callchain_default_opt[] = "fractal,0.5"; | 46 | static char callchain_default_opt[] = "fractal,0.5"; |
47 | 47 | ||
48 | static struct event_stat_id *get_stats(struct perf_session *self, | ||
49 | u64 event_stream, u32 type, u64 config) | ||
50 | { | ||
51 | struct rb_node **p = &self->stats_by_id.rb_node; | ||
52 | struct rb_node *parent = NULL; | ||
53 | struct event_stat_id *iter, *new; | ||
54 | |||
55 | while (*p != NULL) { | ||
56 | parent = *p; | ||
57 | iter = rb_entry(parent, struct event_stat_id, rb_node); | ||
58 | if (iter->config == config) | ||
59 | return iter; | ||
60 | |||
61 | |||
62 | if (config > iter->config) | ||
63 | p = &(*p)->rb_right; | ||
64 | else | ||
65 | p = &(*p)->rb_left; | ||
66 | } | ||
67 | |||
68 | new = malloc(sizeof(struct event_stat_id)); | ||
69 | if (new == NULL) | ||
70 | return NULL; | ||
71 | memset(new, 0, sizeof(struct event_stat_id)); | ||
72 | new->event_stream = event_stream; | ||
73 | new->config = config; | ||
74 | new->type = type; | ||
75 | rb_link_node(&new->rb_node, parent, p); | ||
76 | rb_insert_color(&new->rb_node, &self->stats_by_id); | ||
77 | return new; | ||
78 | } | ||
79 | |||
48 | static int perf_session__add_hist_entry(struct perf_session *self, | 80 | static int perf_session__add_hist_entry(struct perf_session *self, |
49 | struct addr_location *al, | 81 | struct addr_location *al, |
50 | struct ip_callchain *chain, u64 count) | 82 | struct sample_data *data) |
51 | { | 83 | { |
52 | struct symbol **syms = NULL, *parent = NULL; | 84 | struct symbol **syms = NULL, *parent = NULL; |
53 | bool hit; | 85 | bool hit; |
54 | struct hist_entry *he; | 86 | struct hist_entry *he; |
87 | struct event_stat_id *stats; | ||
88 | struct perf_event_attr *attr; | ||
55 | 89 | ||
56 | if ((sort__has_parent || symbol_conf.use_callchain) && chain) | 90 | if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) |
57 | syms = perf_session__resolve_callchain(self, al->thread, | 91 | syms = perf_session__resolve_callchain(self, al->thread, |
58 | chain, &parent); | 92 | data->callchain, &parent); |
59 | he = __perf_session__add_hist_entry(self, al, parent, count, &hit); | 93 | |
94 | attr = perf_header__find_attr(data->id, &self->header); | ||
95 | if (attr) | ||
96 | stats = get_stats(self, data->id, attr->type, attr->config); | ||
97 | else | ||
98 | stats = get_stats(self, data->id, 0, 0); | ||
99 | if (stats == NULL) | ||
100 | return -ENOMEM; | ||
101 | he = __perf_session__add_hist_entry(&stats->hists, al, parent, | ||
102 | data->period, &hit); | ||
60 | if (he == NULL) | 103 | if (he == NULL) |
61 | return -ENOMEM; | 104 | return -ENOMEM; |
62 | 105 | ||
63 | if (hit) | 106 | if (hit) |
64 | he->count += count; | 107 | he->count += data->period; |
65 | 108 | ||
66 | if (symbol_conf.use_callchain) { | 109 | if (symbol_conf.use_callchain) { |
67 | if (!hit) | 110 | if (!hit) |
68 | callchain_init(&he->callchain); | 111 | callchain_init(&he->callchain); |
69 | append_chain(&he->callchain, chain, syms); | 112 | append_chain(&he->callchain, data->callchain, syms); |
70 | free(syms); | 113 | free(syms); |
71 | } | 114 | } |
72 | 115 | ||
@@ -86,10 +129,30 @@ static int validate_chain(struct ip_callchain *chain, event_t *event) | |||
86 | return 0; | 129 | return 0; |
87 | } | 130 | } |
88 | 131 | ||
132 | static int add_event_total(struct perf_session *session, | ||
133 | struct sample_data *data, | ||
134 | struct perf_event_attr *attr) | ||
135 | { | ||
136 | struct event_stat_id *stats; | ||
137 | |||
138 | if (attr) | ||
139 | stats = get_stats(session, data->id, attr->type, attr->config); | ||
140 | else | ||
141 | stats = get_stats(session, data->id, 0, 0); | ||
142 | |||
143 | if (!stats) | ||
144 | return -ENOMEM; | ||
145 | |||
146 | stats->stats.total += data->period; | ||
147 | session->events_stats.total += data->period; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
89 | static int process_sample_event(event_t *event, struct perf_session *session) | 151 | static int process_sample_event(event_t *event, struct perf_session *session) |
90 | { | 152 | { |
91 | struct sample_data data = { .period = 1, }; | 153 | struct sample_data data = { .period = 1, }; |
92 | struct addr_location al; | 154 | struct addr_location al; |
155 | struct perf_event_attr *attr; | ||
93 | 156 | ||
94 | event__parse_sample(event, session->sample_type, &data); | 157 | event__parse_sample(event, session->sample_type, &data); |
95 | 158 | ||
@@ -123,12 +186,18 @@ static int process_sample_event(event_t *event, struct perf_session *session) | |||
123 | if (al.filtered || (hide_unresolved && al.sym == NULL)) | 186 | if (al.filtered || (hide_unresolved && al.sym == NULL)) |
124 | return 0; | 187 | return 0; |
125 | 188 | ||
126 | if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) { | 189 | if (perf_session__add_hist_entry(session, &al, &data)) { |
127 | pr_debug("problem incrementing symbol count, skipping event\n"); | 190 | pr_debug("problem incrementing symbol count, skipping event\n"); |
128 | return -1; | 191 | return -1; |
129 | } | 192 | } |
130 | 193 | ||
131 | session->events_stats.total += data.period; | 194 | attr = perf_header__find_attr(data.id, &session->header); |
195 | |||
196 | if (add_event_total(session, &data, attr)) { | ||
197 | pr_debug("problem adding event count\n"); | ||
198 | return -1; | ||
199 | } | ||
200 | |||
132 | return 0; | 201 | return 0; |
133 | } | 202 | } |
134 | 203 | ||
@@ -197,6 +266,7 @@ static int __cmd_report(void) | |||
197 | { | 266 | { |
198 | int ret = -EINVAL; | 267 | int ret = -EINVAL; |
199 | struct perf_session *session; | 268 | struct perf_session *session; |
269 | struct rb_node *next; | ||
200 | 270 | ||
201 | session = perf_session__new(input_name, O_RDONLY, force); | 271 | session = perf_session__new(input_name, O_RDONLY, force); |
202 | if (session == NULL) | 272 | if (session == NULL) |
@@ -224,10 +294,28 @@ static int __cmd_report(void) | |||
224 | if (verbose > 2) | 294 | if (verbose > 2) |
225 | dsos__fprintf(stdout); | 295 | dsos__fprintf(stdout); |
226 | 296 | ||
227 | perf_session__collapse_resort(session); | 297 | next = rb_first(&session->stats_by_id); |
228 | perf_session__output_resort(session, session->events_stats.total); | 298 | while (next) { |
229 | fprintf(stdout, "# Samples: %Ld\n#\n", session->events_stats.total); | 299 | struct event_stat_id *stats; |
230 | perf_session__fprintf_hists(session, NULL, false, stdout); | 300 | |
301 | stats = rb_entry(next, struct event_stat_id, rb_node); | ||
302 | perf_session__collapse_resort(&stats->hists); | ||
303 | perf_session__output_resort(&stats->hists, stats->stats.total); | ||
304 | if (rb_first(&session->stats_by_id) == | ||
305 | rb_last(&session->stats_by_id)) | ||
306 | fprintf(stdout, "# Samples: %Ld\n#\n", | ||
307 | stats->stats.total); | ||
308 | else | ||
309 | fprintf(stdout, "# Samples: %Ld %s\n#\n", | ||
310 | stats->stats.total, | ||
311 | __event_name(stats->type, stats->config)); | ||
312 | |||
313 | perf_session__fprintf_hists(&stats->hists, NULL, false, stdout, | ||
314 | stats->stats.total); | ||
315 | fprintf(stdout, "\n\n"); | ||
316 | next = rb_next(&stats->rb_node); | ||
317 | } | ||
318 | |||
231 | if (sort_order == default_sort_order && | 319 | if (sort_order == default_sort_order && |
232 | parent_pattern == default_parent_pattern) | 320 | parent_pattern == default_parent_pattern) |
233 | fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); | 321 | fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e8c85d5aec41..95db31cff6fd 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "util/event.h" | 45 | #include "util/event.h" |
46 | #include "util/debug.h" | 46 | #include "util/debug.h" |
47 | #include "util/header.h" | 47 | #include "util/header.h" |
48 | #include "util/cpumap.h" | ||
48 | 49 | ||
49 | #include <sys/prctl.h> | 50 | #include <sys/prctl.h> |
50 | #include <math.h> | 51 | #include <math.h> |
@@ -151,7 +152,7 @@ static void create_perf_stat_counter(int counter, int pid) | |||
151 | unsigned int cpu; | 152 | unsigned int cpu; |
152 | 153 | ||
153 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 154 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
154 | fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0); | 155 | fd[cpu][counter] = sys_perf_event_open(attr, -1, cpumap[cpu], -1, 0); |
155 | if (fd[cpu][counter] < 0 && verbose) | 156 | if (fd[cpu][counter] < 0 && verbose) |
156 | fprintf(stderr, ERR_PERF_OPEN, counter, | 157 | fprintf(stderr, ERR_PERF_OPEN, counter, |
157 | fd[cpu][counter], strerror(errno)); | 158 | fd[cpu][counter], strerror(errno)); |
@@ -519,9 +520,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
519 | nr_counters = ARRAY_SIZE(default_attrs); | 520 | nr_counters = ARRAY_SIZE(default_attrs); |
520 | } | 521 | } |
521 | 522 | ||
522 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | 523 | if (system_wide) |
523 | assert(nr_cpus <= MAX_NR_CPUS); | 524 | nr_cpus = read_cpu_map(); |
524 | assert((int)nr_cpus >= 0); | 525 | else |
526 | nr_cpus = 1; | ||
525 | 527 | ||
526 | /* | 528 | /* |
527 | * We dont want to block the signals - that would cause | 529 | * We dont want to block the signals - that would cause |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 31f2e597800c..0b719e3dde05 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/rbtree.h> | 28 | #include <linux/rbtree.h> |
29 | #include "util/parse-options.h" | 29 | #include "util/parse-options.h" |
30 | #include "util/parse-events.h" | 30 | #include "util/parse-events.h" |
31 | #include "util/cpumap.h" | ||
31 | 32 | ||
32 | #include "util/debug.h" | 33 | #include "util/debug.h" |
33 | 34 | ||
@@ -1123,7 +1124,7 @@ static void start_counter(int i, int counter) | |||
1123 | 1124 | ||
1124 | cpu = profile_cpu; | 1125 | cpu = profile_cpu; |
1125 | if (target_pid == -1 && profile_cpu == -1) | 1126 | if (target_pid == -1 && profile_cpu == -1) |
1126 | cpu = i; | 1127 | cpu = cpumap[i]; |
1127 | 1128 | ||
1128 | attr = attrs + counter; | 1129 | attr = attrs + counter; |
1129 | 1130 | ||
@@ -1347,12 +1348,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1347 | attrs[counter].sample_period = default_interval; | 1348 | attrs[counter].sample_period = default_interval; |
1348 | } | 1349 | } |
1349 | 1350 | ||
1350 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | ||
1351 | assert(nr_cpus <= MAX_NR_CPUS); | ||
1352 | assert(nr_cpus >= 0); | ||
1353 | |||
1354 | if (target_pid != -1 || profile_cpu != -1) | 1351 | if (target_pid != -1 || profile_cpu != -1) |
1355 | nr_cpus = 1; | 1352 | nr_cpus = 1; |
1353 | else | ||
1354 | nr_cpus = read_cpu_map(); | ||
1356 | 1355 | ||
1357 | get_term_dimensions(&winsize); | 1356 | get_term_dimensions(&winsize); |
1358 | if (print_entries == 0) { | 1357 | if (print_entries == 0) { |
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c new file mode 100644 index 000000000000..4e01490e51e5 --- /dev/null +++ b/tools/perf/util/cpumap.c | |||
@@ -0,0 +1,59 @@ | |||
1 | #include "util.h" | ||
2 | #include "../perf.h" | ||
3 | #include "cpumap.h" | ||
4 | #include <assert.h> | ||
5 | #include <stdio.h> | ||
6 | |||
7 | int cpumap[MAX_NR_CPUS]; | ||
8 | |||
9 | static int default_cpu_map(void) | ||
10 | { | ||
11 | int nr_cpus, i; | ||
12 | |||
13 | nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); | ||
14 | assert(nr_cpus <= MAX_NR_CPUS); | ||
15 | assert((int)nr_cpus >= 0); | ||
16 | |||
17 | for (i = 0; i < nr_cpus; ++i) | ||
18 | cpumap[i] = i; | ||
19 | |||
20 | return nr_cpus; | ||
21 | } | ||
22 | |||
23 | int read_cpu_map(void) | ||
24 | { | ||
25 | FILE *onlnf; | ||
26 | int nr_cpus = 0; | ||
27 | int n, cpu, prev; | ||
28 | char sep; | ||
29 | |||
30 | onlnf = fopen("/sys/devices/system/cpu/online", "r"); | ||
31 | if (!onlnf) | ||
32 | return default_cpu_map(); | ||
33 | |||
34 | sep = 0; | ||
35 | prev = -1; | ||
36 | for (;;) { | ||
37 | n = fscanf(onlnf, "%u%c", &cpu, &sep); | ||
38 | if (n <= 0) | ||
39 | break; | ||
40 | if (prev >= 0) { | ||
41 | assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS); | ||
42 | while (++prev < cpu) | ||
43 | cpumap[nr_cpus++] = prev; | ||
44 | } | ||
45 | assert (nr_cpus < MAX_NR_CPUS); | ||
46 | cpumap[nr_cpus++] = cpu; | ||
47 | if (n == 2 && sep == '-') | ||
48 | prev = cpu; | ||
49 | else | ||
50 | prev = -1; | ||
51 | if (n == 1 || sep == '\n') | ||
52 | break; | ||
53 | } | ||
54 | fclose(onlnf); | ||
55 | if (nr_cpus > 0) | ||
56 | return nr_cpus; | ||
57 | |||
58 | return default_cpu_map(); | ||
59 | } | ||
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h new file mode 100644 index 000000000000..86c78bb33098 --- /dev/null +++ b/tools/perf/util/cpumap.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef __PERF_CPUMAP_H | ||
2 | #define __PERF_CPUMAP_H | ||
3 | |||
4 | extern int read_cpu_map(void); | ||
5 | extern int cpumap[]; | ||
6 | |||
7 | #endif /* __PERF_CPUMAP_H */ | ||
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 50a7132887f5..a33b94952e34 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -99,6 +99,15 @@ struct events_stats { | |||
99 | u64 lost; | 99 | u64 lost; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | struct event_stat_id { | ||
103 | struct rb_node rb_node; | ||
104 | struct rb_root hists; | ||
105 | struct events_stats stats; | ||
106 | u64 config; | ||
107 | u64 event_stream; | ||
108 | u32 type; | ||
109 | }; | ||
110 | |||
102 | void event__print_totals(void); | 111 | void event__print_totals(void); |
103 | 112 | ||
104 | struct perf_session; | 113 | struct perf_session; |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 44408c2621cf..2be33c7dbf03 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -12,12 +12,12 @@ struct callchain_param callchain_param = { | |||
12 | * histogram, sorted on item, collects counts | 12 | * histogram, sorted on item, collects counts |
13 | */ | 13 | */ |
14 | 14 | ||
15 | struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, | 15 | struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists, |
16 | struct addr_location *al, | 16 | struct addr_location *al, |
17 | struct symbol *sym_parent, | 17 | struct symbol *sym_parent, |
18 | u64 count, bool *hit) | 18 | u64 count, bool *hit) |
19 | { | 19 | { |
20 | struct rb_node **p = &self->hists.rb_node; | 20 | struct rb_node **p = &hists->rb_node; |
21 | struct rb_node *parent = NULL; | 21 | struct rb_node *parent = NULL; |
22 | struct hist_entry *he; | 22 | struct hist_entry *he; |
23 | struct hist_entry entry = { | 23 | struct hist_entry entry = { |
@@ -53,7 +53,7 @@ struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, | |||
53 | return NULL; | 53 | return NULL; |
54 | *he = entry; | 54 | *he = entry; |
55 | rb_link_node(&he->rb_node, parent, p); | 55 | rb_link_node(&he->rb_node, parent, p); |
56 | rb_insert_color(&he->rb_node, &self->hists); | 56 | rb_insert_color(&he->rb_node, hists); |
57 | *hit = false; | 57 | *hit = false; |
58 | return he; | 58 | return he; |
59 | } | 59 | } |
@@ -130,7 +130,7 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he) | |||
130 | rb_insert_color(&he->rb_node, root); | 130 | rb_insert_color(&he->rb_node, root); |
131 | } | 131 | } |
132 | 132 | ||
133 | void perf_session__collapse_resort(struct perf_session *self) | 133 | void perf_session__collapse_resort(struct rb_root *hists) |
134 | { | 134 | { |
135 | struct rb_root tmp; | 135 | struct rb_root tmp; |
136 | struct rb_node *next; | 136 | struct rb_node *next; |
@@ -140,17 +140,17 @@ void perf_session__collapse_resort(struct perf_session *self) | |||
140 | return; | 140 | return; |
141 | 141 | ||
142 | tmp = RB_ROOT; | 142 | tmp = RB_ROOT; |
143 | next = rb_first(&self->hists); | 143 | next = rb_first(hists); |
144 | 144 | ||
145 | while (next) { | 145 | while (next) { |
146 | n = rb_entry(next, struct hist_entry, rb_node); | 146 | n = rb_entry(next, struct hist_entry, rb_node); |
147 | next = rb_next(&n->rb_node); | 147 | next = rb_next(&n->rb_node); |
148 | 148 | ||
149 | rb_erase(&n->rb_node, &self->hists); | 149 | rb_erase(&n->rb_node, hists); |
150 | collapse__insert_entry(&tmp, n); | 150 | collapse__insert_entry(&tmp, n); |
151 | } | 151 | } |
152 | 152 | ||
153 | self->hists = tmp; | 153 | *hists = tmp; |
154 | } | 154 | } |
155 | 155 | ||
156 | /* | 156 | /* |
@@ -183,7 +183,7 @@ static void perf_session__insert_output_hist_entry(struct rb_root *root, | |||
183 | rb_insert_color(&he->rb_node, root); | 183 | rb_insert_color(&he->rb_node, root); |
184 | } | 184 | } |
185 | 185 | ||
186 | void perf_session__output_resort(struct perf_session *self, u64 total_samples) | 186 | void perf_session__output_resort(struct rb_root *hists, u64 total_samples) |
187 | { | 187 | { |
188 | struct rb_root tmp; | 188 | struct rb_root tmp; |
189 | struct rb_node *next; | 189 | struct rb_node *next; |
@@ -194,18 +194,18 @@ void perf_session__output_resort(struct perf_session *self, u64 total_samples) | |||
194 | total_samples * (callchain_param.min_percent / 100); | 194 | total_samples * (callchain_param.min_percent / 100); |
195 | 195 | ||
196 | tmp = RB_ROOT; | 196 | tmp = RB_ROOT; |
197 | next = rb_first(&self->hists); | 197 | next = rb_first(hists); |
198 | 198 | ||
199 | while (next) { | 199 | while (next) { |
200 | n = rb_entry(next, struct hist_entry, rb_node); | 200 | n = rb_entry(next, struct hist_entry, rb_node); |
201 | next = rb_next(&n->rb_node); | 201 | next = rb_next(&n->rb_node); |
202 | 202 | ||
203 | rb_erase(&n->rb_node, &self->hists); | 203 | rb_erase(&n->rb_node, hists); |
204 | perf_session__insert_output_hist_entry(&tmp, n, | 204 | perf_session__insert_output_hist_entry(&tmp, n, |
205 | min_callchain_hits); | 205 | min_callchain_hits); |
206 | } | 206 | } |
207 | 207 | ||
208 | self->hists = tmp; | 208 | *hists = tmp; |
209 | } | 209 | } |
210 | 210 | ||
211 | static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) | 211 | static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) |
@@ -456,10 +456,10 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, | |||
456 | } | 456 | } |
457 | 457 | ||
458 | static size_t hist_entry__fprintf(struct hist_entry *self, | 458 | static size_t hist_entry__fprintf(struct hist_entry *self, |
459 | struct perf_session *session, | ||
460 | struct perf_session *pair_session, | 459 | struct perf_session *pair_session, |
461 | bool show_displacement, | 460 | bool show_displacement, |
462 | long displacement, FILE *fp) | 461 | long displacement, FILE *fp, |
462 | u64 session_total) | ||
463 | { | 463 | { |
464 | struct sort_entry *se; | 464 | struct sort_entry *se; |
465 | u64 count, total; | 465 | u64 count, total; |
@@ -474,7 +474,7 @@ static size_t hist_entry__fprintf(struct hist_entry *self, | |||
474 | total = pair_session->events_stats.total; | 474 | total = pair_session->events_stats.total; |
475 | } else { | 475 | } else { |
476 | count = self->count; | 476 | count = self->count; |
477 | total = session->events_stats.total; | 477 | total = session_total; |
478 | } | 478 | } |
479 | 479 | ||
480 | if (total) | 480 | if (total) |
@@ -496,8 +496,8 @@ static size_t hist_entry__fprintf(struct hist_entry *self, | |||
496 | 496 | ||
497 | if (total > 0) | 497 | if (total > 0) |
498 | old_percent = (count * 100.0) / total; | 498 | old_percent = (count * 100.0) / total; |
499 | if (session->events_stats.total > 0) | 499 | if (session_total > 0) |
500 | new_percent = (self->count * 100.0) / session->events_stats.total; | 500 | new_percent = (self->count * 100.0) / session_total; |
501 | 501 | ||
502 | diff = new_percent - old_percent; | 502 | diff = new_percent - old_percent; |
503 | 503 | ||
@@ -544,16 +544,17 @@ static size_t hist_entry__fprintf(struct hist_entry *self, | |||
544 | left_margin -= thread__comm_len(self->thread); | 544 | left_margin -= thread__comm_len(self->thread); |
545 | } | 545 | } |
546 | 546 | ||
547 | hist_entry_callchain__fprintf(fp, self, session->events_stats.total, | 547 | hist_entry_callchain__fprintf(fp, self, session_total, |
548 | left_margin); | 548 | left_margin); |
549 | } | 549 | } |
550 | 550 | ||
551 | return ret; | 551 | return ret; |
552 | } | 552 | } |
553 | 553 | ||
554 | size_t perf_session__fprintf_hists(struct perf_session *self, | 554 | size_t perf_session__fprintf_hists(struct rb_root *hists, |
555 | struct perf_session *pair, | 555 | struct perf_session *pair, |
556 | bool show_displacement, FILE *fp) | 556 | bool show_displacement, FILE *fp, |
557 | u64 session_total) | ||
557 | { | 558 | { |
558 | struct sort_entry *se; | 559 | struct sort_entry *se; |
559 | struct rb_node *nd; | 560 | struct rb_node *nd; |
@@ -641,7 +642,7 @@ size_t perf_session__fprintf_hists(struct perf_session *self, | |||
641 | fprintf(fp, "\n#\n"); | 642 | fprintf(fp, "\n#\n"); |
642 | 643 | ||
643 | print_entries: | 644 | print_entries: |
644 | for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) { | 645 | for (nd = rb_first(hists); nd; nd = rb_next(nd)) { |
645 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | 646 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
646 | 647 | ||
647 | if (show_displacement) { | 648 | if (show_displacement) { |
@@ -652,8 +653,13 @@ print_entries: | |||
652 | displacement = 0; | 653 | displacement = 0; |
653 | ++position; | 654 | ++position; |
654 | } | 655 | } |
655 | ret += hist_entry__fprintf(h, self, pair, show_displacement, | 656 | ret += hist_entry__fprintf(h, pair, show_displacement, |
656 | displacement, fp); | 657 | displacement, fp, session_total); |
658 | if (h->map == NULL && verbose > 1) { | ||
659 | __map_groups__fprintf_maps(&h->thread->mg, | ||
660 | MAP__FUNCTION, fp); | ||
661 | fprintf(fp, "%.10s end\n", graph_dotted_line); | ||
662 | } | ||
657 | } | 663 | } |
658 | 664 | ||
659 | free(rem_sq_bracket); | 665 | free(rem_sq_bracket); |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index e5f99b24048b..16f360cce5bf 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -10,8 +10,9 @@ struct perf_session; | |||
10 | struct hist_entry; | 10 | struct hist_entry; |
11 | struct addr_location; | 11 | struct addr_location; |
12 | struct symbol; | 12 | struct symbol; |
13 | struct rb_root; | ||
13 | 14 | ||
14 | struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, | 15 | struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists, |
15 | struct addr_location *al, | 16 | struct addr_location *al, |
16 | struct symbol *parent, | 17 | struct symbol *parent, |
17 | u64 count, bool *hit); | 18 | u64 count, bool *hit); |
@@ -19,9 +20,10 @@ extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); | |||
19 | extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); | 20 | extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); |
20 | void hist_entry__free(struct hist_entry *); | 21 | void hist_entry__free(struct hist_entry *); |
21 | 22 | ||
22 | void perf_session__output_resort(struct perf_session *self, u64 total_samples); | 23 | void perf_session__output_resort(struct rb_root *hists, u64 total_samples); |
23 | void perf_session__collapse_resort(struct perf_session *self); | 24 | void perf_session__collapse_resort(struct rb_root *hists); |
24 | size_t perf_session__fprintf_hists(struct perf_session *self, | 25 | size_t perf_session__fprintf_hists(struct rb_root *hists, |
25 | struct perf_session *pair, | 26 | struct perf_session *pair, |
26 | bool show_displacement, FILE *fp); | 27 | bool show_displacement, FILE *fp, |
28 | u64 session_total); | ||
27 | #endif /* __PERF_HIST_H */ | 29 | #endif /* __PERF_HIST_H */ |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index e77dc886760e..1e6c65ebbd80 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -169,7 +169,7 @@ static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname) | |||
169 | { | 169 | { |
170 | Dwarf_Files *files; | 170 | Dwarf_Files *files; |
171 | size_t nfiles, i; | 171 | size_t nfiles, i; |
172 | const char *src; | 172 | const char *src = NULL; |
173 | int ret; | 173 | int ret; |
174 | 174 | ||
175 | if (!fname) | 175 | if (!fname) |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 0de7258e70a5..eed1cb889008 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -70,6 +70,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc | |||
70 | 70 | ||
71 | memcpy(self->filename, filename, len); | 71 | memcpy(self->filename, filename, len); |
72 | self->threads = RB_ROOT; | 72 | self->threads = RB_ROOT; |
73 | self->stats_by_id = RB_ROOT; | ||
73 | self->last_match = NULL; | 74 | self->last_match = NULL; |
74 | self->mmap_window = 32; | 75 | self->mmap_window = 32; |
75 | self->cwd = NULL; | 76 | self->cwd = NULL; |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 31950fcd8a4d..5c33417eebb3 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -20,6 +20,7 @@ struct perf_session { | |||
20 | struct thread *last_match; | 20 | struct thread *last_match; |
21 | struct map *vmlinux_maps[MAP__NR_TYPES]; | 21 | struct map *vmlinux_maps[MAP__NR_TYPES]; |
22 | struct events_stats events_stats; | 22 | struct events_stats events_stats; |
23 | struct rb_root stats_by_id; | ||
23 | unsigned long event_total[PERF_RECORD_MAX]; | 24 | unsigned long event_total[PERF_RECORD_MAX]; |
24 | unsigned long unknown_events; | 25 | unsigned long unknown_events; |
25 | struct rb_root hists; | 26 | struct rb_root hists; |
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 21b92162282b..fa968312ee7d 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -79,8 +79,8 @@ int thread__comm_len(struct thread *self) | |||
79 | return self->comm_len; | 79 | return self->comm_len; |
80 | } | 80 | } |
81 | 81 | ||
82 | static size_t __map_groups__fprintf_maps(struct map_groups *self, | 82 | size_t __map_groups__fprintf_maps(struct map_groups *self, |
83 | enum map_type type, FILE *fp) | 83 | enum map_type type, FILE *fp) |
84 | { | 84 | { |
85 | size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); | 85 | size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); |
86 | struct rb_node *nd; | 86 | struct rb_node *nd; |
@@ -89,7 +89,7 @@ static size_t __map_groups__fprintf_maps(struct map_groups *self, | |||
89 | struct map *pos = rb_entry(nd, struct map, rb_node); | 89 | struct map *pos = rb_entry(nd, struct map, rb_node); |
90 | printed += fprintf(fp, "Map:"); | 90 | printed += fprintf(fp, "Map:"); |
91 | printed += map__fprintf(pos, fp); | 91 | printed += map__fprintf(pos, fp); |
92 | if (verbose > 1) { | 92 | if (verbose > 2) { |
93 | printed += dso__fprintf(pos->dso, type, fp); | 93 | printed += dso__fprintf(pos->dso, type, fp); |
94 | printed += fprintf(fp, "--\n"); | 94 | printed += fprintf(fp, "--\n"); |
95 | } | 95 | } |
@@ -183,8 +183,8 @@ struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) | |||
183 | return th; | 183 | return th; |
184 | } | 184 | } |
185 | 185 | ||
186 | static void map_groups__remove_overlappings(struct map_groups *self, | 186 | static int map_groups__fixup_overlappings(struct map_groups *self, |
187 | struct map *map) | 187 | struct map *map) |
188 | { | 188 | { |
189 | struct rb_root *root = &self->maps[map->type]; | 189 | struct rb_root *root = &self->maps[map->type]; |
190 | struct rb_node *next = rb_first(root); | 190 | struct rb_node *next = rb_first(root); |
@@ -209,7 +209,36 @@ static void map_groups__remove_overlappings(struct map_groups *self, | |||
209 | * list. | 209 | * list. |
210 | */ | 210 | */ |
211 | list_add_tail(&pos->node, &self->removed_maps[map->type]); | 211 | list_add_tail(&pos->node, &self->removed_maps[map->type]); |
212 | /* | ||
213 | * Now check if we need to create new maps for areas not | ||
214 | * overlapped by the new map: | ||
215 | */ | ||
216 | if (map->start > pos->start) { | ||
217 | struct map *before = map__clone(pos); | ||
218 | |||
219 | if (before == NULL) | ||
220 | return -ENOMEM; | ||
221 | |||
222 | before->end = map->start - 1; | ||
223 | map_groups__insert(self, before); | ||
224 | if (verbose >= 2) | ||
225 | map__fprintf(before, stderr); | ||
226 | } | ||
227 | |||
228 | if (map->end < pos->end) { | ||
229 | struct map *after = map__clone(pos); | ||
230 | |||
231 | if (after == NULL) | ||
232 | return -ENOMEM; | ||
233 | |||
234 | after->start = map->end + 1; | ||
235 | map_groups__insert(self, after); | ||
236 | if (verbose >= 2) | ||
237 | map__fprintf(after, stderr); | ||
238 | } | ||
212 | } | 239 | } |
240 | |||
241 | return 0; | ||
213 | } | 242 | } |
214 | 243 | ||
215 | void maps__insert(struct rb_root *maps, struct map *map) | 244 | void maps__insert(struct rb_root *maps, struct map *map) |
@@ -254,7 +283,7 @@ struct map *maps__find(struct rb_root *maps, u64 ip) | |||
254 | 283 | ||
255 | void thread__insert_map(struct thread *self, struct map *map) | 284 | void thread__insert_map(struct thread *self, struct map *map) |
256 | { | 285 | { |
257 | map_groups__remove_overlappings(&self->mg, map); | 286 | map_groups__fixup_overlappings(&self->mg, map); |
258 | map_groups__insert(&self->mg, map); | 287 | map_groups__insert(&self->mg, map); |
259 | } | 288 | } |
260 | 289 | ||
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 0a28f39de545..dcf70303e58e 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h | |||
@@ -10,6 +10,9 @@ struct map_groups { | |||
10 | struct list_head removed_maps[MAP__NR_TYPES]; | 10 | struct list_head removed_maps[MAP__NR_TYPES]; |
11 | }; | 11 | }; |
12 | 12 | ||
13 | size_t __map_groups__fprintf_maps(struct map_groups *self, | ||
14 | enum map_type type, FILE *fp); | ||
15 | |||
13 | struct thread { | 16 | struct thread { |
14 | struct rb_node rb_node; | 17 | struct rb_node rb_node; |
15 | struct map_groups mg; | 18 | struct map_groups mg; |