diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-06-29 07:39:17 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-06-29 07:39:17 -0400 |
| commit | 57103eb7c6cad04c0611b7a5767a381b34b8b0ab (patch) | |
| tree | e9691621b8c0be4b1e3f82f6b17948c8140c43f9 | |
| parent | eed7d30e126dc5b883b77f3e26bbca6c5b0f4222 (diff) | |
| parent | 8b12b812f5367c2469fb937da7e28dd321ad8d7b (diff) | |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"Various fixes, most of them related to bugs perf fuzzing found in the
x86 code"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86/regs: Use PERF_REG_EXTENDED_MASK
perf/x86: Remove pmu->pebs_no_xmm_regs
perf/x86: Clean up PEBS_XMM_REGS
perf/x86/regs: Check reserved bits
perf/x86: Disable extended registers for non-supported PMUs
perf/ioctl: Add check for the sample_period value
perf/core: Fix perf_sample_regs_user() mm check
| -rw-r--r-- | arch/x86/events/core.c | 6 | ||||
| -rw-r--r-- | arch/x86/events/intel/ds.c | 9 | ||||
| -rw-r--r-- | arch/x86/events/perf_event.h | 21 | ||||
| -rw-r--r-- | arch/x86/include/uapi/asm/perf_regs.h | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/perf_regs.c | 7 | ||||
| -rw-r--r-- | include/linux/perf_event.h | 1 | ||||
| -rw-r--r-- | include/linux/perf_regs.h | 8 | ||||
| -rw-r--r-- | kernel/events/core.c | 23 | ||||
| -rw-r--r-- | tools/arch/x86/include/uapi/asm/perf_regs.h | 3 | ||||
| -rw-r--r-- | tools/perf/arch/x86/include/perf_regs.h | 1 | ||||
| -rw-r--r-- | tools/perf/arch/x86/util/perf_regs.c | 4 |
11 files changed, 48 insertions, 38 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index f315425d8468..52a97463cb24 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
| @@ -561,14 +561,14 @@ int x86_pmu_hw_config(struct perf_event *event) | |||
| 561 | } | 561 | } |
| 562 | 562 | ||
| 563 | /* sample_regs_user never support XMM registers */ | 563 | /* sample_regs_user never support XMM registers */ |
| 564 | if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS)) | 564 | if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) |
| 565 | return -EINVAL; | 565 | return -EINVAL; |
| 566 | /* | 566 | /* |
| 567 | * Besides the general purpose registers, XMM registers may | 567 | * Besides the general purpose registers, XMM registers may |
| 568 | * be collected in PEBS on some platforms, e.g. Icelake | 568 | * be collected in PEBS on some platforms, e.g. Icelake |
| 569 | */ | 569 | */ |
| 570 | if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) { | 570 | if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { |
| 571 | if (x86_pmu.pebs_no_xmm_regs) | 571 | if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) |
| 572 | return -EINVAL; | 572 | return -EINVAL; |
| 573 | 573 | ||
| 574 | if (!event->attr.precise_ip) | 574 | if (!event->attr.precise_ip) |
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 7acc526b4ad2..505c73dc6a73 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c | |||
| @@ -987,7 +987,7 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event) | |||
| 987 | pebs_data_cfg |= PEBS_DATACFG_GP; | 987 | pebs_data_cfg |= PEBS_DATACFG_GP; |
| 988 | 988 | ||
| 989 | if ((sample_type & PERF_SAMPLE_REGS_INTR) && | 989 | if ((sample_type & PERF_SAMPLE_REGS_INTR) && |
| 990 | (attr->sample_regs_intr & PEBS_XMM_REGS)) | 990 | (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) |
| 991 | pebs_data_cfg |= PEBS_DATACFG_XMMS; | 991 | pebs_data_cfg |= PEBS_DATACFG_XMMS; |
| 992 | 992 | ||
| 993 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { | 993 | if (sample_type & PERF_SAMPLE_BRANCH_STACK) { |
| @@ -1964,10 +1964,9 @@ void __init intel_ds_init(void) | |||
| 1964 | x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); | 1964 | x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); |
| 1965 | x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); | 1965 | x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); |
| 1966 | x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; | 1966 | x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; |
| 1967 | if (x86_pmu.version <= 4) { | 1967 | if (x86_pmu.version <= 4) |
| 1968 | x86_pmu.pebs_no_isolation = 1; | 1968 | x86_pmu.pebs_no_isolation = 1; |
| 1969 | x86_pmu.pebs_no_xmm_regs = 1; | 1969 | |
| 1970 | } | ||
| 1971 | if (x86_pmu.pebs) { | 1970 | if (x86_pmu.pebs) { |
| 1972 | char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; | 1971 | char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; |
| 1973 | char *pebs_qual = ""; | 1972 | char *pebs_qual = ""; |
| @@ -2020,9 +2019,9 @@ void __init intel_ds_init(void) | |||
| 2020 | PERF_SAMPLE_TIME; | 2019 | PERF_SAMPLE_TIME; |
| 2021 | x86_pmu.flags |= PMU_FL_PEBS_ALL; | 2020 | x86_pmu.flags |= PMU_FL_PEBS_ALL; |
| 2022 | pebs_qual = "-baseline"; | 2021 | pebs_qual = "-baseline"; |
| 2022 | x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; | ||
| 2023 | } else { | 2023 | } else { |
| 2024 | /* Only basic record supported */ | 2024 | /* Only basic record supported */ |
| 2025 | x86_pmu.pebs_no_xmm_regs = 1; | ||
| 2026 | x86_pmu.large_pebs_flags &= | 2025 | x86_pmu.large_pebs_flags &= |
| 2027 | ~(PERF_SAMPLE_ADDR | | 2026 | ~(PERF_SAMPLE_ADDR | |
| 2028 | PERF_SAMPLE_TIME | | 2027 | PERF_SAMPLE_TIME | |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index a6ac2f4f76fc..4e346856ee19 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
| @@ -121,24 +121,6 @@ struct amd_nb { | |||
| 121 | (1ULL << PERF_REG_X86_R14) | \ | 121 | (1ULL << PERF_REG_X86_R14) | \ |
| 122 | (1ULL << PERF_REG_X86_R15)) | 122 | (1ULL << PERF_REG_X86_R15)) |
| 123 | 123 | ||
| 124 | #define PEBS_XMM_REGS \ | ||
| 125 | ((1ULL << PERF_REG_X86_XMM0) | \ | ||
| 126 | (1ULL << PERF_REG_X86_XMM1) | \ | ||
| 127 | (1ULL << PERF_REG_X86_XMM2) | \ | ||
| 128 | (1ULL << PERF_REG_X86_XMM3) | \ | ||
| 129 | (1ULL << PERF_REG_X86_XMM4) | \ | ||
| 130 | (1ULL << PERF_REG_X86_XMM5) | \ | ||
| 131 | (1ULL << PERF_REG_X86_XMM6) | \ | ||
| 132 | (1ULL << PERF_REG_X86_XMM7) | \ | ||
| 133 | (1ULL << PERF_REG_X86_XMM8) | \ | ||
| 134 | (1ULL << PERF_REG_X86_XMM9) | \ | ||
| 135 | (1ULL << PERF_REG_X86_XMM10) | \ | ||
| 136 | (1ULL << PERF_REG_X86_XMM11) | \ | ||
| 137 | (1ULL << PERF_REG_X86_XMM12) | \ | ||
| 138 | (1ULL << PERF_REG_X86_XMM13) | \ | ||
| 139 | (1ULL << PERF_REG_X86_XMM14) | \ | ||
| 140 | (1ULL << PERF_REG_X86_XMM15)) | ||
| 141 | |||
| 142 | /* | 124 | /* |
| 143 | * Per register state. | 125 | * Per register state. |
| 144 | */ | 126 | */ |
| @@ -668,8 +650,7 @@ struct x86_pmu { | |||
| 668 | pebs_broken :1, | 650 | pebs_broken :1, |
| 669 | pebs_prec_dist :1, | 651 | pebs_prec_dist :1, |
| 670 | pebs_no_tlb :1, | 652 | pebs_no_tlb :1, |
| 671 | pebs_no_isolation :1, | 653 | pebs_no_isolation :1; |
| 672 | pebs_no_xmm_regs :1; | ||
| 673 | int pebs_record_size; | 654 | int pebs_record_size; |
| 674 | int pebs_buffer_size; | 655 | int pebs_buffer_size; |
| 675 | int max_pebs_events; | 656 | int max_pebs_events; |
diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h index ac67bbea10ca..7c9d2bb3833b 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h | |||
| @@ -52,4 +52,7 @@ enum perf_event_x86_regs { | |||
| 52 | /* These include both GPRs and XMMX registers */ | 52 | /* These include both GPRs and XMMX registers */ |
| 53 | PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, | 53 | PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, |
| 54 | }; | 54 | }; |
| 55 | |||
| 56 | #define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) | ||
| 57 | |||
| 55 | #endif /* _ASM_X86_PERF_REGS_H */ | 58 | #endif /* _ASM_X86_PERF_REGS_H */ |
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index 07c30ee17425..bb7e1132290b 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c | |||
| @@ -74,6 +74,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) | |||
| 74 | return regs_get_register(regs, pt_regs_offset[idx]); | 74 | return regs_get_register(regs, pt_regs_offset[idx]); |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | #define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \ | ||
| 78 | ~((1ULL << PERF_REG_X86_MAX) - 1)) | ||
| 79 | |||
| 77 | #ifdef CONFIG_X86_32 | 80 | #ifdef CONFIG_X86_32 |
| 78 | #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ | 81 | #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ |
| 79 | (1ULL << PERF_REG_X86_R9) | \ | 82 | (1ULL << PERF_REG_X86_R9) | \ |
| @@ -86,7 +89,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) | |||
| 86 | 89 | ||
| 87 | int perf_reg_validate(u64 mask) | 90 | int perf_reg_validate(u64 mask) |
| 88 | { | 91 | { |
| 89 | if (!mask || (mask & REG_NOSUPPORT)) | 92 | if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) |
| 90 | return -EINVAL; | 93 | return -EINVAL; |
| 91 | 94 | ||
| 92 | return 0; | 95 | return 0; |
| @@ -112,7 +115,7 @@ void perf_get_regs_user(struct perf_regs *regs_user, | |||
| 112 | 115 | ||
| 113 | int perf_reg_validate(u64 mask) | 116 | int perf_reg_validate(u64 mask) |
| 114 | { | 117 | { |
| 115 | if (!mask || (mask & REG_NOSUPPORT)) | 118 | if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) |
| 116 | return -EINVAL; | 119 | return -EINVAL; |
| 117 | 120 | ||
| 118 | return 0; | 121 | return 0; |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 0ab99c7b652d..2bca72f3028b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -241,6 +241,7 @@ struct perf_event; | |||
| 241 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 | 241 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 |
| 242 | #define PERF_PMU_CAP_NO_NMI 0x02 | 242 | #define PERF_PMU_CAP_NO_NMI 0x02 |
| 243 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 | 243 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 |
| 244 | #define PERF_PMU_CAP_EXTENDED_REGS 0x08 | ||
| 244 | #define PERF_PMU_CAP_EXCLUSIVE 0x10 | 245 | #define PERF_PMU_CAP_EXCLUSIVE 0x10 |
| 245 | #define PERF_PMU_CAP_ITRACE 0x20 | 246 | #define PERF_PMU_CAP_ITRACE 0x20 |
| 246 | #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 | 247 | #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 |
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index 476747456bca..2d12e97d5e7b 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h | |||
| @@ -11,6 +11,11 @@ struct perf_regs { | |||
| 11 | 11 | ||
| 12 | #ifdef CONFIG_HAVE_PERF_REGS | 12 | #ifdef CONFIG_HAVE_PERF_REGS |
| 13 | #include <asm/perf_regs.h> | 13 | #include <asm/perf_regs.h> |
| 14 | |||
| 15 | #ifndef PERF_REG_EXTENDED_MASK | ||
| 16 | #define PERF_REG_EXTENDED_MASK 0 | ||
| 17 | #endif | ||
| 18 | |||
| 14 | u64 perf_reg_value(struct pt_regs *regs, int idx); | 19 | u64 perf_reg_value(struct pt_regs *regs, int idx); |
| 15 | int perf_reg_validate(u64 mask); | 20 | int perf_reg_validate(u64 mask); |
| 16 | u64 perf_reg_abi(struct task_struct *task); | 21 | u64 perf_reg_abi(struct task_struct *task); |
| @@ -18,6 +23,9 @@ void perf_get_regs_user(struct perf_regs *regs_user, | |||
| 18 | struct pt_regs *regs, | 23 | struct pt_regs *regs, |
| 19 | struct pt_regs *regs_user_copy); | 24 | struct pt_regs *regs_user_copy); |
| 20 | #else | 25 | #else |
| 26 | |||
| 27 | #define PERF_REG_EXTENDED_MASK 0 | ||
| 28 | |||
| 21 | static inline u64 perf_reg_value(struct pt_regs *regs, int idx) | 29 | static inline u64 perf_reg_value(struct pt_regs *regs, int idx) |
| 22 | { | 30 | { |
| 23 | return 0; | 31 | return 0; |
diff --git a/kernel/events/core.c b/kernel/events/core.c index abbd4b3b96c2..f85929ce13be 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -5005,6 +5005,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
| 5005 | if (perf_event_check_period(event, value)) | 5005 | if (perf_event_check_period(event, value)) |
| 5006 | return -EINVAL; | 5006 | return -EINVAL; |
| 5007 | 5007 | ||
| 5008 | if (!event->attr.freq && (value & (1ULL << 63))) | ||
| 5009 | return -EINVAL; | ||
| 5010 | |||
| 5008 | event_function_call(event, __perf_event_period, &value); | 5011 | event_function_call(event, __perf_event_period, &value); |
| 5009 | 5012 | ||
| 5010 | return 0; | 5013 | return 0; |
| @@ -5923,7 +5926,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user, | |||
| 5923 | if (user_mode(regs)) { | 5926 | if (user_mode(regs)) { |
| 5924 | regs_user->abi = perf_reg_abi(current); | 5927 | regs_user->abi = perf_reg_abi(current); |
| 5925 | regs_user->regs = regs; | 5928 | regs_user->regs = regs; |
| 5926 | } else if (current->mm) { | 5929 | } else if (!(current->flags & PF_KTHREAD)) { |
| 5927 | perf_get_regs_user(regs_user, regs, regs_user_copy); | 5930 | perf_get_regs_user(regs_user, regs, regs_user_copy); |
| 5928 | } else { | 5931 | } else { |
| 5929 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; | 5932 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; |
| @@ -10033,6 +10036,12 @@ void perf_pmu_unregister(struct pmu *pmu) | |||
| 10033 | } | 10036 | } |
| 10034 | EXPORT_SYMBOL_GPL(perf_pmu_unregister); | 10037 | EXPORT_SYMBOL_GPL(perf_pmu_unregister); |
| 10035 | 10038 | ||
| 10039 | static inline bool has_extended_regs(struct perf_event *event) | ||
| 10040 | { | ||
| 10041 | return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || | ||
| 10042 | (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); | ||
| 10043 | } | ||
| 10044 | |||
| 10036 | static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) | 10045 | static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) |
| 10037 | { | 10046 | { |
| 10038 | struct perf_event_context *ctx = NULL; | 10047 | struct perf_event_context *ctx = NULL; |
| @@ -10064,12 +10073,16 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) | |||
| 10064 | perf_event_ctx_unlock(event->group_leader, ctx); | 10073 | perf_event_ctx_unlock(event->group_leader, ctx); |
| 10065 | 10074 | ||
| 10066 | if (!ret) { | 10075 | if (!ret) { |
| 10076 | if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && | ||
| 10077 | has_extended_regs(event)) | ||
| 10078 | ret = -EOPNOTSUPP; | ||
| 10079 | |||
| 10067 | if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && | 10080 | if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && |
| 10068 | event_has_any_exclude_flag(event)) { | 10081 | event_has_any_exclude_flag(event)) |
| 10069 | if (event->destroy) | ||
| 10070 | event->destroy(event); | ||
| 10071 | ret = -EINVAL; | 10082 | ret = -EINVAL; |
| 10072 | } | 10083 | |
| 10084 | if (ret && event->destroy) | ||
| 10085 | event->destroy(event); | ||
| 10073 | } | 10086 | } |
| 10074 | 10087 | ||
| 10075 | if (ret) | 10088 | if (ret) |
diff --git a/tools/arch/x86/include/uapi/asm/perf_regs.h b/tools/arch/x86/include/uapi/asm/perf_regs.h index ac67bbea10ca..7c9d2bb3833b 100644 --- a/tools/arch/x86/include/uapi/asm/perf_regs.h +++ b/tools/arch/x86/include/uapi/asm/perf_regs.h | |||
| @@ -52,4 +52,7 @@ enum perf_event_x86_regs { | |||
| 52 | /* These include both GPRs and XMMX registers */ | 52 | /* These include both GPRs and XMMX registers */ |
| 53 | PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, | 53 | PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, |
| 54 | }; | 54 | }; |
| 55 | |||
| 56 | #define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) | ||
| 57 | |||
| 55 | #endif /* _ASM_X86_PERF_REGS_H */ | 58 | #endif /* _ASM_X86_PERF_REGS_H */ |
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h index b7cd91a9014f..b7321337d100 100644 --- a/tools/perf/arch/x86/include/perf_regs.h +++ b/tools/perf/arch/x86/include/perf_regs.h | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | void perf_regs_load(u64 *regs); | 9 | void perf_regs_load(u64 *regs); |
| 10 | 10 | ||
| 11 | #define PERF_REGS_MAX PERF_REG_X86_XMM_MAX | 11 | #define PERF_REGS_MAX PERF_REG_X86_XMM_MAX |
| 12 | #define PERF_XMM_REGS_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) | ||
| 13 | #ifndef HAVE_ARCH_X86_64_SUPPORT | 12 | #ifndef HAVE_ARCH_X86_64_SUPPORT |
| 14 | #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1) | 13 | #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1) |
| 15 | #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32 | 14 | #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32 |
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c index 7886ca5263e3..3666c0076df9 100644 --- a/tools/perf/arch/x86/util/perf_regs.c +++ b/tools/perf/arch/x86/util/perf_regs.c | |||
| @@ -277,7 +277,7 @@ uint64_t arch__intr_reg_mask(void) | |||
| 277 | .type = PERF_TYPE_HARDWARE, | 277 | .type = PERF_TYPE_HARDWARE, |
| 278 | .config = PERF_COUNT_HW_CPU_CYCLES, | 278 | .config = PERF_COUNT_HW_CPU_CYCLES, |
| 279 | .sample_type = PERF_SAMPLE_REGS_INTR, | 279 | .sample_type = PERF_SAMPLE_REGS_INTR, |
| 280 | .sample_regs_intr = PERF_XMM_REGS_MASK, | 280 | .sample_regs_intr = PERF_REG_EXTENDED_MASK, |
| 281 | .precise_ip = 1, | 281 | .precise_ip = 1, |
| 282 | .disabled = 1, | 282 | .disabled = 1, |
| 283 | .exclude_kernel = 1, | 283 | .exclude_kernel = 1, |
| @@ -293,7 +293,7 @@ uint64_t arch__intr_reg_mask(void) | |||
| 293 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); | 293 | fd = sys_perf_event_open(&attr, 0, -1, -1, 0); |
| 294 | if (fd != -1) { | 294 | if (fd != -1) { |
| 295 | close(fd); | 295 | close(fd); |
| 296 | return (PERF_XMM_REGS_MASK | PERF_REGS_MASK); | 296 | return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK); |
| 297 | } | 297 | } |
| 298 | 298 | ||
| 299 | return PERF_REGS_MASK; | 299 | return PERF_REGS_MASK; |
