diff options
author | Markus Metzger <markus.t.metzger@intel.com> | 2009-09-15 07:00:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-18 14:43:20 -0400 |
commit | 5622f295b53fb60dbf9bed3e2c89d182490a8b7f (patch) | |
tree | 8279554bddd1607d53dc06e97f4b5a1d0c085ccd /kernel | |
parent | 4b77a7297795229eca96c41e1709a3c87909fabe (diff) |
x86, perf_counter, bts: Optimize BTS overflow handling
Draining the BTS buffer on a buffer overflow interrupt takes too
long resulting in a kernel lockup when tracing the kernel.
Restructure perf_counter sampling into sample creation and sample
output.
Prepare a single reference sample for BTS sampling and update the
from and to address fields when draining the BTS buffer. Drain the
entire BTS buffer between a single perf_output_begin() /
perf_output_end() pair.
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20090915130023.A16204@sedona.ch.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 312 |
1 files changed, 165 insertions, 147 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 29b73b6e8146..215845243a69 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2512,18 +2512,6 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2512 | /* | 2512 | /* |
2513 | * Output | 2513 | * Output |
2514 | */ | 2514 | */ |
2515 | |||
2516 | struct perf_output_handle { | ||
2517 | struct perf_counter *counter; | ||
2518 | struct perf_mmap_data *data; | ||
2519 | unsigned long head; | ||
2520 | unsigned long offset; | ||
2521 | int nmi; | ||
2522 | int sample; | ||
2523 | int locked; | ||
2524 | unsigned long flags; | ||
2525 | }; | ||
2526 | |||
2527 | static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, | 2515 | static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, |
2528 | unsigned long offset, unsigned long head) | 2516 | unsigned long offset, unsigned long head) |
2529 | { | 2517 | { |
@@ -2633,8 +2621,8 @@ out: | |||
2633 | local_irq_restore(handle->flags); | 2621 | local_irq_restore(handle->flags); |
2634 | } | 2622 | } |
2635 | 2623 | ||
2636 | static void perf_output_copy(struct perf_output_handle *handle, | 2624 | void perf_output_copy(struct perf_output_handle *handle, |
2637 | const void *buf, unsigned int len) | 2625 | const void *buf, unsigned int len) |
2638 | { | 2626 | { |
2639 | unsigned int pages_mask; | 2627 | unsigned int pages_mask; |
2640 | unsigned int offset; | 2628 | unsigned int offset; |
@@ -2669,12 +2657,9 @@ static void perf_output_copy(struct perf_output_handle *handle, | |||
2669 | WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); | 2657 | WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0); |
2670 | } | 2658 | } |
2671 | 2659 | ||
2672 | #define perf_output_put(handle, x) \ | 2660 | int perf_output_begin(struct perf_output_handle *handle, |
2673 | perf_output_copy((handle), &(x), sizeof(x)) | 2661 | struct perf_counter *counter, unsigned int size, |
2674 | 2662 | int nmi, int sample) | |
2675 | static int perf_output_begin(struct perf_output_handle *handle, | ||
2676 | struct perf_counter *counter, unsigned int size, | ||
2677 | int nmi, int sample) | ||
2678 | { | 2663 | { |
2679 | struct perf_counter *output_counter; | 2664 | struct perf_counter *output_counter; |
2680 | struct perf_mmap_data *data; | 2665 | struct perf_mmap_data *data; |
@@ -2756,7 +2741,7 @@ out: | |||
2756 | return -ENOSPC; | 2741 | return -ENOSPC; |
2757 | } | 2742 | } |
2758 | 2743 | ||
2759 | static void perf_output_end(struct perf_output_handle *handle) | 2744 | void perf_output_end(struct perf_output_handle *handle) |
2760 | { | 2745 | { |
2761 | struct perf_counter *counter = handle->counter; | 2746 | struct perf_counter *counter = handle->counter; |
2762 | struct perf_mmap_data *data = handle->data; | 2747 | struct perf_mmap_data *data = handle->data; |
@@ -2870,82 +2855,151 @@ static void perf_output_read(struct perf_output_handle *handle, | |||
2870 | perf_output_read_one(handle, counter); | 2855 | perf_output_read_one(handle, counter); |
2871 | } | 2856 | } |
2872 | 2857 | ||
2873 | void perf_counter_output(struct perf_counter *counter, int nmi, | 2858 | void perf_output_sample(struct perf_output_handle *handle, |
2874 | struct perf_sample_data *data) | 2859 | struct perf_event_header *header, |
2860 | struct perf_sample_data *data, | ||
2861 | struct perf_counter *counter) | ||
2862 | { | ||
2863 | u64 sample_type = data->type; | ||
2864 | |||
2865 | perf_output_put(handle, *header); | ||
2866 | |||
2867 | if (sample_type & PERF_SAMPLE_IP) | ||
2868 | perf_output_put(handle, data->ip); | ||
2869 | |||
2870 | if (sample_type & PERF_SAMPLE_TID) | ||
2871 | perf_output_put(handle, data->tid_entry); | ||
2872 | |||
2873 | if (sample_type & PERF_SAMPLE_TIME) | ||
2874 | perf_output_put(handle, data->time); | ||
2875 | |||
2876 | if (sample_type & PERF_SAMPLE_ADDR) | ||
2877 | perf_output_put(handle, data->addr); | ||
2878 | |||
2879 | if (sample_type & PERF_SAMPLE_ID) | ||
2880 | perf_output_put(handle, data->id); | ||
2881 | |||
2882 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
2883 | perf_output_put(handle, data->stream_id); | ||
2884 | |||
2885 | if (sample_type & PERF_SAMPLE_CPU) | ||
2886 | perf_output_put(handle, data->cpu_entry); | ||
2887 | |||
2888 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
2889 | perf_output_put(handle, data->period); | ||
2890 | |||
2891 | if (sample_type & PERF_SAMPLE_READ) | ||
2892 | perf_output_read(handle, counter); | ||
2893 | |||
2894 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | ||
2895 | if (data->callchain) { | ||
2896 | int size = 1; | ||
2897 | |||
2898 | if (data->callchain) | ||
2899 | size += data->callchain->nr; | ||
2900 | |||
2901 | size *= sizeof(u64); | ||
2902 | |||
2903 | perf_output_copy(handle, data->callchain, size); | ||
2904 | } else { | ||
2905 | u64 nr = 0; | ||
2906 | perf_output_put(handle, nr); | ||
2907 | } | ||
2908 | } | ||
2909 | |||
2910 | if (sample_type & PERF_SAMPLE_RAW) { | ||
2911 | if (data->raw) { | ||
2912 | perf_output_put(handle, data->raw->size); | ||
2913 | perf_output_copy(handle, data->raw->data, | ||
2914 | data->raw->size); | ||
2915 | } else { | ||
2916 | struct { | ||
2917 | u32 size; | ||
2918 | u32 data; | ||
2919 | } raw = { | ||
2920 | .size = sizeof(u32), | ||
2921 | .data = 0, | ||
2922 | }; | ||
2923 | perf_output_put(handle, raw); | ||
2924 | } | ||
2925 | } | ||
2926 | } | ||
2927 | |||
2928 | void perf_prepare_sample(struct perf_event_header *header, | ||
2929 | struct perf_sample_data *data, | ||
2930 | struct perf_counter *counter, | ||
2931 | struct pt_regs *regs) | ||
2875 | { | 2932 | { |
2876 | int ret; | ||
2877 | u64 sample_type = counter->attr.sample_type; | 2933 | u64 sample_type = counter->attr.sample_type; |
2878 | struct perf_output_handle handle; | ||
2879 | struct perf_event_header header; | ||
2880 | u64 ip; | ||
2881 | struct { | ||
2882 | u32 pid, tid; | ||
2883 | } tid_entry; | ||
2884 | struct perf_callchain_entry *callchain = NULL; | ||
2885 | int callchain_size = 0; | ||
2886 | u64 time; | ||
2887 | struct { | ||
2888 | u32 cpu, reserved; | ||
2889 | } cpu_entry; | ||
2890 | 2934 | ||
2891 | header.type = PERF_EVENT_SAMPLE; | 2935 | data->type = sample_type; |
2892 | header.size = sizeof(header); | ||
2893 | 2936 | ||
2894 | header.misc = 0; | 2937 | header->type = PERF_EVENT_SAMPLE; |
2895 | header.misc |= perf_misc_flags(data->regs); | 2938 | header->size = sizeof(*header); |
2939 | |||
2940 | header->misc = 0; | ||
2941 | header->misc |= perf_misc_flags(regs); | ||
2896 | 2942 | ||
2897 | if (sample_type & PERF_SAMPLE_IP) { | 2943 | if (sample_type & PERF_SAMPLE_IP) { |
2898 | ip = perf_instruction_pointer(data->regs); | 2944 | data->ip = perf_instruction_pointer(regs); |
2899 | header.size += sizeof(ip); | 2945 | |
2946 | header->size += sizeof(data->ip); | ||
2900 | } | 2947 | } |
2901 | 2948 | ||
2902 | if (sample_type & PERF_SAMPLE_TID) { | 2949 | if (sample_type & PERF_SAMPLE_TID) { |
2903 | /* namespace issues */ | 2950 | /* namespace issues */ |
2904 | tid_entry.pid = perf_counter_pid(counter, current); | 2951 | data->tid_entry.pid = perf_counter_pid(counter, current); |
2905 | tid_entry.tid = perf_counter_tid(counter, current); | 2952 | data->tid_entry.tid = perf_counter_tid(counter, current); |
2906 | 2953 | ||
2907 | header.size += sizeof(tid_entry); | 2954 | header->size += sizeof(data->tid_entry); |
2908 | } | 2955 | } |
2909 | 2956 | ||
2910 | if (sample_type & PERF_SAMPLE_TIME) { | 2957 | if (sample_type & PERF_SAMPLE_TIME) { |
2911 | /* | 2958 | /* |
2912 | * Maybe do better on x86 and provide cpu_clock_nmi() | 2959 | * Maybe do better on x86 and provide cpu_clock_nmi() |
2913 | */ | 2960 | */ |
2914 | time = sched_clock(); | 2961 | data->time = sched_clock(); |
2915 | 2962 | ||
2916 | header.size += sizeof(u64); | 2963 | header->size += sizeof(data->time); |
2917 | } | 2964 | } |
2918 | 2965 | ||
2919 | if (sample_type & PERF_SAMPLE_ADDR) | 2966 | if (sample_type & PERF_SAMPLE_ADDR) |
2920 | header.size += sizeof(u64); | 2967 | header->size += sizeof(data->addr); |
2921 | 2968 | ||
2922 | if (sample_type & PERF_SAMPLE_ID) | 2969 | if (sample_type & PERF_SAMPLE_ID) { |
2923 | header.size += sizeof(u64); | 2970 | data->id = primary_counter_id(counter); |
2924 | 2971 | ||
2925 | if (sample_type & PERF_SAMPLE_STREAM_ID) | 2972 | header->size += sizeof(data->id); |
2926 | header.size += sizeof(u64); | 2973 | } |
2974 | |||
2975 | if (sample_type & PERF_SAMPLE_STREAM_ID) { | ||
2976 | data->stream_id = counter->id; | ||
2977 | |||
2978 | header->size += sizeof(data->stream_id); | ||
2979 | } | ||
2927 | 2980 | ||
2928 | if (sample_type & PERF_SAMPLE_CPU) { | 2981 | if (sample_type & PERF_SAMPLE_CPU) { |
2929 | header.size += sizeof(cpu_entry); | 2982 | data->cpu_entry.cpu = raw_smp_processor_id(); |
2983 | data->cpu_entry.reserved = 0; | ||
2930 | 2984 | ||
2931 | cpu_entry.cpu = raw_smp_processor_id(); | 2985 | header->size += sizeof(data->cpu_entry); |
2932 | cpu_entry.reserved = 0; | ||
2933 | } | 2986 | } |
2934 | 2987 | ||
2935 | if (sample_type & PERF_SAMPLE_PERIOD) | 2988 | if (sample_type & PERF_SAMPLE_PERIOD) |
2936 | header.size += sizeof(u64); | 2989 | header->size += sizeof(data->period); |
2937 | 2990 | ||
2938 | if (sample_type & PERF_SAMPLE_READ) | 2991 | if (sample_type & PERF_SAMPLE_READ) |
2939 | header.size += perf_counter_read_size(counter); | 2992 | header->size += perf_counter_read_size(counter); |
2940 | 2993 | ||
2941 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 2994 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2942 | callchain = perf_callchain(data->regs); | 2995 | int size = 1; |
2943 | 2996 | ||
2944 | if (callchain) { | 2997 | data->callchain = perf_callchain(regs); |
2945 | callchain_size = (1 + callchain->nr) * sizeof(u64); | 2998 | |
2946 | header.size += callchain_size; | 2999 | if (data->callchain) |
2947 | } else | 3000 | size += data->callchain->nr; |
2948 | header.size += sizeof(u64); | 3001 | |
3002 | header->size += size * sizeof(u64); | ||
2949 | } | 3003 | } |
2950 | 3004 | ||
2951 | if (sample_type & PERF_SAMPLE_RAW) { | 3005 | if (sample_type & PERF_SAMPLE_RAW) { |
@@ -2957,69 +3011,23 @@ void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2957 | size += sizeof(u32); | 3011 | size += sizeof(u32); |
2958 | 3012 | ||
2959 | WARN_ON_ONCE(size & (sizeof(u64)-1)); | 3013 | WARN_ON_ONCE(size & (sizeof(u64)-1)); |
2960 | header.size += size; | 3014 | header->size += size; |
2961 | } | ||
2962 | |||
2963 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | ||
2964 | if (ret) | ||
2965 | return; | ||
2966 | |||
2967 | perf_output_put(&handle, header); | ||
2968 | |||
2969 | if (sample_type & PERF_SAMPLE_IP) | ||
2970 | perf_output_put(&handle, ip); | ||
2971 | |||
2972 | if (sample_type & PERF_SAMPLE_TID) | ||
2973 | perf_output_put(&handle, tid_entry); | ||
2974 | |||
2975 | if (sample_type & PERF_SAMPLE_TIME) | ||
2976 | perf_output_put(&handle, time); | ||
2977 | |||
2978 | if (sample_type & PERF_SAMPLE_ADDR) | ||
2979 | perf_output_put(&handle, data->addr); | ||
2980 | |||
2981 | if (sample_type & PERF_SAMPLE_ID) { | ||
2982 | u64 id = primary_counter_id(counter); | ||
2983 | |||
2984 | perf_output_put(&handle, id); | ||
2985 | } | 3015 | } |
3016 | } | ||
2986 | 3017 | ||
2987 | if (sample_type & PERF_SAMPLE_STREAM_ID) | 3018 | static void perf_counter_output(struct perf_counter *counter, int nmi, |
2988 | perf_output_put(&handle, counter->id); | 3019 | struct perf_sample_data *data, |
2989 | 3020 | struct pt_regs *regs) | |
2990 | if (sample_type & PERF_SAMPLE_CPU) | 3021 | { |
2991 | perf_output_put(&handle, cpu_entry); | 3022 | struct perf_output_handle handle; |
2992 | 3023 | struct perf_event_header header; | |
2993 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
2994 | perf_output_put(&handle, data->period); | ||
2995 | 3024 | ||
2996 | if (sample_type & PERF_SAMPLE_READ) | 3025 | perf_prepare_sample(&header, data, counter, regs); |
2997 | perf_output_read(&handle, counter); | ||
2998 | 3026 | ||
2999 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 3027 | if (perf_output_begin(&handle, counter, header.size, nmi, 1)) |
3000 | if (callchain) | 3028 | return; |
3001 | perf_output_copy(&handle, callchain, callchain_size); | ||
3002 | else { | ||
3003 | u64 nr = 0; | ||
3004 | perf_output_put(&handle, nr); | ||
3005 | } | ||
3006 | } | ||
3007 | 3029 | ||
3008 | if (sample_type & PERF_SAMPLE_RAW) { | 3030 | perf_output_sample(&handle, &header, data, counter); |
3009 | if (data->raw) { | ||
3010 | perf_output_put(&handle, data->raw->size); | ||
3011 | perf_output_copy(&handle, data->raw->data, data->raw->size); | ||
3012 | } else { | ||
3013 | struct { | ||
3014 | u32 size; | ||
3015 | u32 data; | ||
3016 | } raw = { | ||
3017 | .size = sizeof(u32), | ||
3018 | .data = 0, | ||
3019 | }; | ||
3020 | perf_output_put(&handle, raw); | ||
3021 | } | ||
3022 | } | ||
3023 | 3031 | ||
3024 | perf_output_end(&handle); | 3032 | perf_output_end(&handle); |
3025 | } | 3033 | } |
@@ -3501,7 +3509,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) | |||
3501 | */ | 3509 | */ |
3502 | 3510 | ||
3503 | static int __perf_counter_overflow(struct perf_counter *counter, int nmi, | 3511 | static int __perf_counter_overflow(struct perf_counter *counter, int nmi, |
3504 | int throttle, struct perf_sample_data *data) | 3512 | int throttle, struct perf_sample_data *data, |
3513 | struct pt_regs *regs) | ||
3505 | { | 3514 | { |
3506 | int events = atomic_read(&counter->event_limit); | 3515 | int events = atomic_read(&counter->event_limit); |
3507 | struct hw_perf_counter *hwc = &counter->hw; | 3516 | struct hw_perf_counter *hwc = &counter->hw; |
@@ -3557,14 +3566,15 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi, | |||
3557 | perf_counter_disable(counter); | 3566 | perf_counter_disable(counter); |
3558 | } | 3567 | } |
3559 | 3568 | ||
3560 | perf_counter_output(counter, nmi, data); | 3569 | perf_counter_output(counter, nmi, data, regs); |
3561 | return ret; | 3570 | return ret; |
3562 | } | 3571 | } |
3563 | 3572 | ||
3564 | int perf_counter_overflow(struct perf_counter *counter, int nmi, | 3573 | int perf_counter_overflow(struct perf_counter *counter, int nmi, |
3565 | struct perf_sample_data *data) | 3574 | struct perf_sample_data *data, |
3575 | struct pt_regs *regs) | ||
3566 | { | 3576 | { |
3567 | return __perf_counter_overflow(counter, nmi, 1, data); | 3577 | return __perf_counter_overflow(counter, nmi, 1, data, regs); |
3568 | } | 3578 | } |
3569 | 3579 | ||
3570 | /* | 3580 | /* |
@@ -3602,7 +3612,8 @@ again: | |||
3602 | } | 3612 | } |
3603 | 3613 | ||
3604 | static void perf_swcounter_overflow(struct perf_counter *counter, | 3614 | static void perf_swcounter_overflow(struct perf_counter *counter, |
3605 | int nmi, struct perf_sample_data *data) | 3615 | int nmi, struct perf_sample_data *data, |
3616 | struct pt_regs *regs) | ||
3606 | { | 3617 | { |
3607 | struct hw_perf_counter *hwc = &counter->hw; | 3618 | struct hw_perf_counter *hwc = &counter->hw; |
3608 | int throttle = 0; | 3619 | int throttle = 0; |
@@ -3615,7 +3626,8 @@ static void perf_swcounter_overflow(struct perf_counter *counter, | |||
3615 | return; | 3626 | return; |
3616 | 3627 | ||
3617 | for (; overflow; overflow--) { | 3628 | for (; overflow; overflow--) { |
3618 | if (__perf_counter_overflow(counter, nmi, throttle, data)) { | 3629 | if (__perf_counter_overflow(counter, nmi, throttle, |
3630 | data, regs)) { | ||
3619 | /* | 3631 | /* |
3620 | * We inhibit the overflow from happening when | 3632 | * We inhibit the overflow from happening when |
3621 | * hwc->interrupts == MAX_INTERRUPTS. | 3633 | * hwc->interrupts == MAX_INTERRUPTS. |
@@ -3634,7 +3646,8 @@ static void perf_swcounter_unthrottle(struct perf_counter *counter) | |||
3634 | } | 3646 | } |
3635 | 3647 | ||
3636 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | 3648 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, |
3637 | int nmi, struct perf_sample_data *data) | 3649 | int nmi, struct perf_sample_data *data, |
3650 | struct pt_regs *regs) | ||
3638 | { | 3651 | { |
3639 | struct hw_perf_counter *hwc = &counter->hw; | 3652 | struct hw_perf_counter *hwc = &counter->hw; |
3640 | 3653 | ||
@@ -3643,11 +3656,11 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | |||
3643 | if (!hwc->sample_period) | 3656 | if (!hwc->sample_period) |
3644 | return; | 3657 | return; |
3645 | 3658 | ||
3646 | if (!data->regs) | 3659 | if (!regs) |
3647 | return; | 3660 | return; |
3648 | 3661 | ||
3649 | if (!atomic64_add_negative(nr, &hwc->period_left)) | 3662 | if (!atomic64_add_negative(nr, &hwc->period_left)) |
3650 | perf_swcounter_overflow(counter, nmi, data); | 3663 | perf_swcounter_overflow(counter, nmi, data, regs); |
3651 | } | 3664 | } |
3652 | 3665 | ||
3653 | static int perf_swcounter_is_counting(struct perf_counter *counter) | 3666 | static int perf_swcounter_is_counting(struct perf_counter *counter) |
@@ -3706,7 +3719,8 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
3706 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | 3719 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, |
3707 | enum perf_type_id type, | 3720 | enum perf_type_id type, |
3708 | u32 event, u64 nr, int nmi, | 3721 | u32 event, u64 nr, int nmi, |
3709 | struct perf_sample_data *data) | 3722 | struct perf_sample_data *data, |
3723 | struct pt_regs *regs) | ||
3710 | { | 3724 | { |
3711 | struct perf_counter *counter; | 3725 | struct perf_counter *counter; |
3712 | 3726 | ||
@@ -3715,8 +3729,8 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | |||
3715 | 3729 | ||
3716 | rcu_read_lock(); | 3730 | rcu_read_lock(); |
3717 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 3731 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
3718 | if (perf_swcounter_match(counter, type, event, data->regs)) | 3732 | if (perf_swcounter_match(counter, type, event, regs)) |
3719 | perf_swcounter_add(counter, nr, nmi, data); | 3733 | perf_swcounter_add(counter, nr, nmi, data, regs); |
3720 | } | 3734 | } |
3721 | rcu_read_unlock(); | 3735 | rcu_read_unlock(); |
3722 | } | 3736 | } |
@@ -3737,7 +3751,8 @@ static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) | |||
3737 | 3751 | ||
3738 | static void do_perf_swcounter_event(enum perf_type_id type, u32 event, | 3752 | static void do_perf_swcounter_event(enum perf_type_id type, u32 event, |
3739 | u64 nr, int nmi, | 3753 | u64 nr, int nmi, |
3740 | struct perf_sample_data *data) | 3754 | struct perf_sample_data *data, |
3755 | struct pt_regs *regs) | ||
3741 | { | 3756 | { |
3742 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | 3757 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); |
3743 | int *recursion = perf_swcounter_recursion_context(cpuctx); | 3758 | int *recursion = perf_swcounter_recursion_context(cpuctx); |
@@ -3750,7 +3765,7 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event, | |||
3750 | barrier(); | 3765 | barrier(); |
3751 | 3766 | ||
3752 | perf_swcounter_ctx_event(&cpuctx->ctx, type, event, | 3767 | perf_swcounter_ctx_event(&cpuctx->ctx, type, event, |
3753 | nr, nmi, data); | 3768 | nr, nmi, data, regs); |
3754 | rcu_read_lock(); | 3769 | rcu_read_lock(); |
3755 | /* | 3770 | /* |
3756 | * doesn't really matter which of the child contexts the | 3771 | * doesn't really matter which of the child contexts the |
@@ -3758,7 +3773,7 @@ static void do_perf_swcounter_event(enum perf_type_id type, u32 event, | |||
3758 | */ | 3773 | */ |
3759 | ctx = rcu_dereference(current->perf_counter_ctxp); | 3774 | ctx = rcu_dereference(current->perf_counter_ctxp); |
3760 | if (ctx) | 3775 | if (ctx) |
3761 | perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data); | 3776 | perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs); |
3762 | rcu_read_unlock(); | 3777 | rcu_read_unlock(); |
3763 | 3778 | ||
3764 | barrier(); | 3779 | barrier(); |
@@ -3772,11 +3787,11 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi, | |||
3772 | struct pt_regs *regs, u64 addr) | 3787 | struct pt_regs *regs, u64 addr) |
3773 | { | 3788 | { |
3774 | struct perf_sample_data data = { | 3789 | struct perf_sample_data data = { |
3775 | .regs = regs, | ||
3776 | .addr = addr, | 3790 | .addr = addr, |
3777 | }; | 3791 | }; |
3778 | 3792 | ||
3779 | do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data); | 3793 | do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, |
3794 | &data, regs); | ||
3780 | } | 3795 | } |
3781 | 3796 | ||
3782 | static void perf_swcounter_read(struct perf_counter *counter) | 3797 | static void perf_swcounter_read(struct perf_counter *counter) |
@@ -3813,6 +3828,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | |||
3813 | { | 3828 | { |
3814 | enum hrtimer_restart ret = HRTIMER_RESTART; | 3829 | enum hrtimer_restart ret = HRTIMER_RESTART; |
3815 | struct perf_sample_data data; | 3830 | struct perf_sample_data data; |
3831 | struct pt_regs *regs; | ||
3816 | struct perf_counter *counter; | 3832 | struct perf_counter *counter; |
3817 | u64 period; | 3833 | u64 period; |
3818 | 3834 | ||
@@ -3820,17 +3836,17 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | |||
3820 | counter->pmu->read(counter); | 3836 | counter->pmu->read(counter); |
3821 | 3837 | ||
3822 | data.addr = 0; | 3838 | data.addr = 0; |
3823 | data.regs = get_irq_regs(); | 3839 | regs = get_irq_regs(); |
3824 | /* | 3840 | /* |
3825 | * In case we exclude kernel IPs or are somehow not in interrupt | 3841 | * In case we exclude kernel IPs or are somehow not in interrupt |
3826 | * context, provide the next best thing, the user IP. | 3842 | * context, provide the next best thing, the user IP. |
3827 | */ | 3843 | */ |
3828 | if ((counter->attr.exclude_kernel || !data.regs) && | 3844 | if ((counter->attr.exclude_kernel || !regs) && |
3829 | !counter->attr.exclude_user) | 3845 | !counter->attr.exclude_user) |
3830 | data.regs = task_pt_regs(current); | 3846 | regs = task_pt_regs(current); |
3831 | 3847 | ||
3832 | if (data.regs) { | 3848 | if (regs) { |
3833 | if (perf_counter_overflow(counter, 0, &data)) | 3849 | if (perf_counter_overflow(counter, 0, &data, regs)) |
3834 | ret = HRTIMER_NORESTART; | 3850 | ret = HRTIMER_NORESTART; |
3835 | } | 3851 | } |
3836 | 3852 | ||
@@ -3966,15 +3982,17 @@ void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, | |||
3966 | }; | 3982 | }; |
3967 | 3983 | ||
3968 | struct perf_sample_data data = { | 3984 | struct perf_sample_data data = { |
3969 | .regs = get_irq_regs(), | ||
3970 | .addr = addr, | 3985 | .addr = addr, |
3971 | .raw = &raw, | 3986 | .raw = &raw, |
3972 | }; | 3987 | }; |
3973 | 3988 | ||
3974 | if (!data.regs) | 3989 | struct pt_regs *regs = get_irq_regs(); |
3975 | data.regs = task_pt_regs(current); | 3990 | |
3991 | if (!regs) | ||
3992 | regs = task_pt_regs(current); | ||
3976 | 3993 | ||
3977 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); | 3994 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, |
3995 | &data, regs); | ||
3978 | } | 3996 | } |
3979 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 3997 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); |
3980 | 3998 | ||