aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-11-29 01:12:36 -0500
committerIngo Molnar <mingo@kernel.org>2017-11-29 01:15:09 -0500
commit6e948c67c47211afcc65c9ccdeedbd5db5c57077 (patch)
tree6fa7d1aa3191f1c06cdd866a6bc1982dbfeb5448
parent4fc31ba13d052c2933bf91095c063cf9a39effd0 (diff)
parent1b3b5219abfd6a214e99018747e9fe98514b43ca (diff)
Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent
Pull perf tooling fixes from Arnaldo Carvalho de Melo: "- Fix window dimensions change handling in 'perf top' (Jiri Olsa) - Fix 'perf record -c/-F' options for CPU event aliases (Andi Kleen) - Generate PERF_RECORD_{MMAP,COMM,EXEC} with 'perf record --delay' fixing symbol resolution for processes created, maps put in place while --delay happens (Arnaldo Carvalho de Melo) - Fix up leftover perf_evsel_stat usage via evsel->priv, plugging a SEGV when using event groups as in: $ perf stat -e '{cpu-clock,instructions}' workload - Fix 'perf script --per-event-dump' for auxtrace synth evsels (Arnaldo Carvalho de Melo) - Ignore kptr_restrict when not sampling the kernel (Arnaldo Carvalho de Melo) - Synchronize kernel ABI headers wrt SPDX tags and ABI changes, taking minimal action to handle new syscall args and silencing perf build warnings (Arnaldo Carvalho de Melo, Ingo Molnar) - Fix header.size for namespace events (Jiri Olsa) - Fix a bug during strstart() conversion in 'perf help' (Namhyung Kim) - Do not truncate instruction names at 6 chars in 'perf annotate', there are really long instruction names in PPC (Ravi Bangoria) - Fixup discontiguous/sparse numa nodes in 'perf bench numa' (Satheesh Rajendran) - Fix an exit code of trace__symbols_init in 'perf trace' (Andrei Vagin) - Fix 'perf test' entries on s/390 (Thomas Richter) - Bring instruction decoder files used by Intel PT into line with the kernel, silencing build warning (Adrian Hunter)" Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/events/core.c5
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h537
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/include/uapi/asm-generic/mman.h1
-rw-r--r--tools/include/uapi/drm/drm.h41
-rw-r--r--tools/include/uapi/drm/i915_drm.h33
-rw-r--r--tools/include/uapi/linux/kcmp.h1
-rw-r--r--tools/include/uapi/linux/kvm.h1
-rw-r--r--tools/include/uapi/linux/perf_event.h1
-rw-r--r--tools/include/uapi/linux/prctl.h10
-rw-r--r--tools/perf/bench/numa.c56
-rw-r--r--tools/perf/builtin-help.c4
-rw-r--r--tools/perf/builtin-record.c42
-rw-r--r--tools/perf/builtin-report.c3
-rw-r--r--tools/perf/builtin-script.c31
-rw-r--r--tools/perf/builtin-top.c36
-rw-r--r--tools/perf/builtin-trace.c6
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh7
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh6
-rw-r--r--tools/perf/tests/task-exit.c4
-rw-r--r--tools/perf/trace/beauty/mmap.c3
-rw-r--r--tools/perf/util/annotate.c18
-rw-r--r--tools/perf/util/evlist.c14
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c14
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/inat.h10
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/pmu.c5
33 files changed, 593 insertions, 329 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6ceb10d87462..494eca1bc760 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6640,6 +6640,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
6640 struct perf_namespaces_event *namespaces_event = data; 6640 struct perf_namespaces_event *namespaces_event = data;
6641 struct perf_output_handle handle; 6641 struct perf_output_handle handle;
6642 struct perf_sample_data sample; 6642 struct perf_sample_data sample;
6643 u16 header_size = namespaces_event->event_id.header.size;
6643 int ret; 6644 int ret;
6644 6645
6645 if (!perf_event_namespaces_match(event)) 6646 if (!perf_event_namespaces_match(event))
@@ -6650,7 +6651,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
6650 ret = perf_output_begin(&handle, event, 6651 ret = perf_output_begin(&handle, event,
6651 namespaces_event->event_id.header.size); 6652 namespaces_event->event_id.header.size);
6652 if (ret) 6653 if (ret)
6653 return; 6654 goto out;
6654 6655
6655 namespaces_event->event_id.pid = perf_event_pid(event, 6656 namespaces_event->event_id.pid = perf_event_pid(event,
6656 namespaces_event->task); 6657 namespaces_event->task);
@@ -6662,6 +6663,8 @@ static void perf_event_namespaces_output(struct perf_event *event,
6662 perf_event__output_id_sample(event, &handle, &sample); 6663 perf_event__output_id_sample(event, &handle, &sample);
6663 6664
6664 perf_output_end(&handle); 6665 perf_output_end(&handle);
6666out:
6667 namespaces_event->event_id.header.size = header_size;
6665} 6668}
6666 6669
6667static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info, 6670static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 1f57bbe82b6f..6edd177bb1c7 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
152 (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) 152 (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
153#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) 153#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
154 154
155/* PL1 Physical Timer Registers */
156#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
157#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
158#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
159
160/* Virtual Timer Registers */
155#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) 161#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
156#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) 162#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
157#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) 163#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
@@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
216#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 222#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
217#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 223#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
218#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 224#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
225#define KVM_DEV_ARM_ITS_CTRL_RESET 4
219 226
220/* KVM_IRQ_LINE irq field index values */ 227/* KVM_IRQ_LINE irq field index values */
221#define KVM_ARM_IRQ_TYPE_SHIFT 24 228#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 51149ec75fe4..9abbf3044654 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
196 196
197#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) 197#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
198 198
199/* Physical Timer EL0 Registers */
200#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
201#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
202#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
203
204/* EL0 Virtual Timer Registers */
199#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) 205#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
200#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
201#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
228#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 234#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
229#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 235#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
230#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 236#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
237#define KVM_DEV_ARM_ITS_CTRL_RESET 4
231 238
232/* Device Control API on vcpu fd */ 239/* Device Control API on vcpu fd */
233#define KVM_ARM_VCPU_PMU_V3_CTRL 0 240#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 793690fbda36..c0b0e9e8aa66 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -13,173 +13,176 @@
13/* 13/*
14 * Defines x86 CPU feature bits 14 * Defines x86 CPU feature bits
15 */ 15 */
16#define NCAPINTS 18 /* N 32-bit words worth of info */ 16#define NCAPINTS 18 /* N 32-bit words worth of info */
17#define NBUGINTS 1 /* N 32-bit bug flags */ 17#define NBUGINTS 1 /* N 32-bit bug flags */
18 18
19/* 19/*
20 * Note: If the comment begins with a quoted string, that string is used 20 * Note: If the comment begins with a quoted string, that string is used
21 * in /proc/cpuinfo instead of the macro name. If the string is "", 21 * in /proc/cpuinfo instead of the macro name. If the string is "",
22 * this feature bit is not displayed in /proc/cpuinfo at all. 22 * this feature bit is not displayed in /proc/cpuinfo at all.
23 *
24 * When adding new features here that depend on other features,
25 * please update the table in kernel/cpu/cpuid-deps.c as well.
23 */ 26 */
24 27
25/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 28/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
26#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ 29#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
27#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ 30#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
28#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ 31#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
29#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ 32#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
30#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ 33#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
31#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ 34#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
32#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ 35#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
33#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ 36#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
34#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ 37#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
35#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ 38#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
36#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ 39#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
37#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ 40#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
38#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ 41#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
39#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ 42#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
40#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ 43#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
41 /* (plus FCMOVcc, FCOMI with FPU) */ 44#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
42#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ 45#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
43#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ 46#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
44#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ 47#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
45#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ 48#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
46#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ 49#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
47#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ 50#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
48#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ 51#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
49#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ 52#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
50#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ 53#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
51#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ 54#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
52#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ 55#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
53#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ 56#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
54#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ 57#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
55#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ 58#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
56#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
57 59
58/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 60/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
59/* Don't duplicate feature flags which are redundant with Intel! */ 61/* Don't duplicate feature flags which are redundant with Intel! */
60#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ 62#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
61#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ 63#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
62#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ 64#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
63#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ 65#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
64#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ 66#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
65#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ 67#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
66#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ 68#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
67#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ 69#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
68#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ 70#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
69#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ 71#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
70 72
71/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ 73/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
72#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ 74#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
73#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ 75#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
74#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ 76#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
75 77
76/* Other features, Linux-defined mapping, word 3 */ 78/* Other features, Linux-defined mapping, word 3 */
77/* This range is used for feature bits which conflict or are synthesized */ 79/* This range is used for feature bits which conflict or are synthesized */
78#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ 80#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
79#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ 81#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
80#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 82#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
81#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ 83#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
82/* cpu types for specific tunings: */ 84
83#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ 85/* CPU types for specific tunings: */
84#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ 86#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
85#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ 87#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
86#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ 88#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
87#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ 89#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
88#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ 90#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
89#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ 91#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
90#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ 92#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
91#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ 93#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
92#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ 94#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
93#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ 95#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
94#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ 96#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
95#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ 97#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
96#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ 98#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
97#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ 99#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
98#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ 100#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
99#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ 101#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
100#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ 102#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
101#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ 103#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
102#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ 104#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
103#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ 105#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
104#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ 106#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
105#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ 107#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
106#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ 108#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
107#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ 109#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
108#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ 110#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
109#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ 111#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
112#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
110 113
111/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 114/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
112#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ 115#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
113#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ 116#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
114#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ 117#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
115#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ 118#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
116#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ 119#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
117#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ 120#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
118#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ 121#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
119#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ 122#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
120#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ 123#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
121#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ 124#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
122#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ 125#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
123#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ 126#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
124#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ 127#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
125#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ 128#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
126#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ 129#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
127#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ 130#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
128#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ 131#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
129#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ 132#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
130#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ 133#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
131#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ 134#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
132#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ 135#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
133#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ 136#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
134#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ 137#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
135#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ 138#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
136#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ 139#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
137#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 140#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
138#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ 141#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
139#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ 142#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
140#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ 143#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
141#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ 144#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
142#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ 145#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
143 146
144/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 147/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
145#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ 148#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
146#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ 149#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
147#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ 150#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
148#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ 151#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
149#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ 152#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
150#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ 153#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
151#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ 154#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
152#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ 155#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
153#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ 156#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
154#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ 157#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
155 158
156/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 159/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
157#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ 160#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
158#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ 161#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
159#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ 162#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
160#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ 163#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
161#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ 164#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
162#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ 165#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
163#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ 166#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
164#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ 167#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
165#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ 168#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
166#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ 169#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
167#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ 170#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
168#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ 171#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
169#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ 172#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
170#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ 173#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
171#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ 174#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
172#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ 175#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
173#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ 176#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
174#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ 177#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
175#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ 178#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
176#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ 179#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
177#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ 180#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
178#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 181#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
179#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ 182#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
180#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ 183#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
181#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ 184#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
182#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ 185#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
183 186
184/* 187/*
185 * Auxiliary flags: Linux defined - For features scattered in various 188 * Auxiliary flags: Linux defined - For features scattered in various
@@ -187,146 +190,154 @@
187 * 190 *
188 * Reuse free bits when adding new feature flags! 191 * Reuse free bits when adding new feature flags!
189 */ 192 */
190#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ 193#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
191#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ 194#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
192#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 195#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
193#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 196#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
194#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ 197#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
195#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 198#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
196#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 199#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
197 200
198#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 201#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
199#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 202#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
200#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ 203#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
201 204
202#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 205#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
203#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 206#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
204#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ 207#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
205#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ 208#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
206 209
207#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 210#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
208 211
209/* Virtualization flags: Linux defined, word 8 */ 212/* Virtualization flags: Linux defined, word 8 */
210#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 213#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
211#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ 214#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
212#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 215#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
213#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 216#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
214#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 217#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
215 218
216#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ 219#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
217#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 220#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
218 221
219 222
220/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 223/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
221#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 224#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
222#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ 225#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
223#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ 226#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
224#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ 227#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
225#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ 228#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
226#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ 229#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
227#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ 230#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
228#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 231#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
229#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ 232#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
230#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ 233#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
231#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ 234#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
232#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ 235#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
233#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ 236#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
234#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ 237#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
235#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ 238#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
236#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ 239#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
237#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ 240#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
238#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 241#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
239#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ 242#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
240#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 243#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
241#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 244#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
242#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 245#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
243#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 246#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
244#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 247#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
245#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ 248#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
246#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ 249#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
247#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ 250#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
248 251
249/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ 252/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
250#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ 253#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
251#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ 254#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
252#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ 255#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
253#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ 256#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
254 257
255/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ 258/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
256#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ 259#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
257 260
258/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ 261/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
259#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ 262#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
260#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ 263#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
261#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ 264#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
262 265
263/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ 266/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
264#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ 267#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
265#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ 268#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
266 269
267/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ 270/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
268#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ 271#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
269#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ 272#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
270#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ 273#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
271#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ 274#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
272#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ 275#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
273#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ 276#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
274#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ 277#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
275#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ 278#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
276#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ 279#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
277#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ 280#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
278 281
279/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ 282/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
280#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ 283#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
281#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ 284#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
282#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ 285#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
283#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ 286#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
284#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ 287#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
285#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ 288#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
286#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ 289#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
287#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ 290#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
288#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ 291#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
289#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ 292#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
290#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ 293#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
291#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ 294#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
292#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ 295#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
293 296
294/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ 297/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
295#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ 298#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
296#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ 299#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
297#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ 300#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
298#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 301#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
299#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 302#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
300#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 303#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
304#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
305#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
306#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
307#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
308#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
309#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
310#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
301 311
302/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ 312/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
303#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ 313#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
304#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ 314#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
305#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ 315#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
306 316
307/* 317/*
308 * BUG word(s) 318 * BUG word(s)
309 */ 319 */
310#define X86_BUG(x) (NCAPINTS*32 + (x)) 320#define X86_BUG(x) (NCAPINTS*32 + (x))
311 321
312#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ 322#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
313#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ 323#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
314#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ 324#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
315#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ 325#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
316#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ 326#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
317#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ 327#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
318#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 328#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
319#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 329#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
320#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ 330#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
321#ifdef CONFIG_X86_32 331#ifdef CONFIG_X86_32
322/* 332/*
323 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional 333 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
324 * to avoid confusion. 334 * to avoid confusion.
325 */ 335 */
326#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ 336#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
327#endif 337#endif
328#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ 338#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
329#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ 339#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
330#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 340#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
331#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ 341#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
342
332#endif /* _ASM_X86_CPUFEATURES_H */ 343#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index c10c9128f54e..14d6d5007314 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) 16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
17#endif 17#endif
18 18
19#ifdef CONFIG_X86_INTEL_UMIP
20# define DISABLE_UMIP 0
21#else
22# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
23#endif
24
19#ifdef CONFIG_X86_64 25#ifdef CONFIG_X86_64
20# define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) 26# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
21# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) 27# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
@@ -63,7 +69,7 @@
63#define DISABLED_MASK13 0 69#define DISABLED_MASK13 0
64#define DISABLED_MASK14 0 70#define DISABLED_MASK14 0
65#define DISABLED_MASK15 0 71#define DISABLED_MASK15 0
66#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) 72#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
67#define DISABLED_MASK17 0 73#define DISABLED_MASK17 0
68#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 74#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
69 75
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 2dffcbf705b3..653687d9771b 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -13,6 +13,7 @@
13#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 13#define MAP_NONBLOCK 0x10000 /* do not block on IO */
14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ 14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ 15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
16#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
16 17
17/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ 18/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
18 19
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 97677cd6964d..6fdff5945c8a 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
737 __u32 pad; 737 __u32 pad;
738}; 738};
739 739
740/* Query current scanout sequence number */
741struct drm_crtc_get_sequence {
742 __u32 crtc_id; /* requested crtc_id */
743 __u32 active; /* return: crtc output is active */
744 __u64 sequence; /* return: most recent vblank sequence */
745 __s64 sequence_ns; /* return: most recent time of first pixel out */
746};
747
748/* Queue event to be delivered at specified sequence. Time stamp marks
749 * when the first pixel of the refresh cycle leaves the display engine
750 * for the display
751 */
752#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
753#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
754
755struct drm_crtc_queue_sequence {
756 __u32 crtc_id;
757 __u32 flags;
758 __u64 sequence; /* on input, target sequence. on output, actual sequence */
759 __u64 user_data; /* user data passed to event */
760};
761
740#if defined(__cplusplus) 762#if defined(__cplusplus)
741} 763}
742#endif 764#endif
@@ -819,6 +841,9 @@ extern "C" {
819 841
820#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) 842#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
821 843
844#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
845#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
846
822#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) 847#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
823 848
824#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) 849#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -863,6 +888,11 @@ extern "C" {
863#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) 888#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
864#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) 889#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
865 890
891#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
892#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
893#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
894#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
895
866/** 896/**
867 * Device specific ioctls should only be in their respective headers 897 * Device specific ioctls should only be in their respective headers
868 * The device specific ioctl range is from 0x40 to 0x9f. 898 * The device specific ioctl range is from 0x40 to 0x9f.
@@ -893,6 +923,7 @@ struct drm_event {
893 923
894#define DRM_EVENT_VBLANK 0x01 924#define DRM_EVENT_VBLANK 0x01
895#define DRM_EVENT_FLIP_COMPLETE 0x02 925#define DRM_EVENT_FLIP_COMPLETE 0x02
926#define DRM_EVENT_CRTC_SEQUENCE 0x03
896 927
897struct drm_event_vblank { 928struct drm_event_vblank {
898 struct drm_event base; 929 struct drm_event base;
@@ -903,6 +934,16 @@ struct drm_event_vblank {
903 __u32 crtc_id; /* 0 on older kernels that do not support this */ 934 __u32 crtc_id; /* 0 on older kernels that do not support this */
904}; 935};
905 936
937/* Event delivered at sequence. Time stamp marks when the first pixel
938 * of the refresh cycle leaves the display engine for the display
939 */
940struct drm_event_crtc_sequence {
941 struct drm_event base;
942 __u64 user_data;
943 __s64 time_ns;
944 __u64 sequence;
945};
946
906/* typedef area */ 947/* typedef area */
907#ifndef __KERNEL__ 948#ifndef __KERNEL__
908typedef struct drm_clip_rect drm_clip_rect_t; 949typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 9816590d3ad2..ac3c6503ca27 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
397#define I915_PARAM_MIN_EU_IN_POOL 39 397#define I915_PARAM_MIN_EU_IN_POOL 39
398#define I915_PARAM_MMAP_GTT_VERSION 40 398#define I915_PARAM_MMAP_GTT_VERSION 40
399 399
400/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 400/*
401 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
401 * priorities and the driver will attempt to execute batches in priority order. 402 * priorities and the driver will attempt to execute batches in priority order.
403 * The param returns a capability bitmask, nonzero implies that the scheduler
404 * is enabled, with different features present according to the mask.
405 *
406 * The initial priority for each batch is supplied by the context and is
407 * controlled via I915_CONTEXT_PARAM_PRIORITY.
402 */ 408 */
403#define I915_PARAM_HAS_SCHEDULER 41 409#define I915_PARAM_HAS_SCHEDULER 41
410#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
411#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
412#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
413
404#define I915_PARAM_HUC_STATUS 42 414#define I915_PARAM_HUC_STATUS 42
405 415
406/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 416/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
1309 * be specified 1319 * be specified
1310 */ 1320 */
1311 __u64 offset; 1321 __u64 offset;
1322#define I915_REG_READ_8B_WA (1ul << 0)
1323
1312 __u64 val; /* Return value */ 1324 __u64 val; /* Return value */
1313}; 1325};
1314/* Known registers: 1326/* Known registers:
1315 * 1327 *
1316 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1328 * Render engine timestamp - 0x2358 + 64bit - gen7+
1317 * - Note this register returns an invalid value if using the default 1329 * - Note this register returns an invalid value if using the default
1318 * single instruction 8byte read, in order to workaround that use 1330 * single instruction 8byte read, in order to workaround that pass
1319 * offset (0x2538 | 1) instead. 1331 * flag I915_REG_READ_8B_WA in offset field.
1320 * 1332 *
1321 */ 1333 */
1322 1334
@@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
1359#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1371#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1360#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1372#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1361#define I915_CONTEXT_PARAM_BANNABLE 0x5 1373#define I915_CONTEXT_PARAM_BANNABLE 0x5
1374#define I915_CONTEXT_PARAM_PRIORITY 0x6
1375#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1376#define I915_CONTEXT_DEFAULT_PRIORITY 0
1377#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1362 __u64 value; 1378 __u64 value;
1363}; 1379};
1364 1380
@@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
1510 __u32 n_boolean_regs; 1526 __u32 n_boolean_regs;
1511 __u32 n_flex_regs; 1527 __u32 n_flex_regs;
1512 1528
1513 __u64 __user mux_regs_ptr; 1529 /*
1514 __u64 __user boolean_regs_ptr; 1530 * These fields are pointers to tuples of u32 values (register
1515 __u64 __user flex_regs_ptr; 1531 * address, value). For example the expected length of the buffer
1532 * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1533 */
1534 __u64 mux_regs_ptr;
1535 __u64 boolean_regs_ptr;
1536 __u64 flex_regs_ptr;
1516}; 1537};
1517 1538
1518#if defined(__cplusplus) 1539#if defined(__cplusplus)
diff --git a/tools/include/uapi/linux/kcmp.h b/tools/include/uapi/linux/kcmp.h
index 481e103da78e..ef1305010925 100644
--- a/tools/include/uapi/linux/kcmp.h
+++ b/tools/include/uapi/linux/kcmp.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1#ifndef _UAPI_LINUX_KCMP_H 2#ifndef _UAPI_LINUX_KCMP_H
2#define _UAPI_LINUX_KCMP_H 3#define _UAPI_LINUX_KCMP_H
3 4
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 7e99999d6236..282d7613fce8 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
931#define KVM_CAP_PPC_SMT_POSSIBLE 147 931#define KVM_CAP_PPC_SMT_POSSIBLE 147
932#define KVM_CAP_HYPERV_SYNIC2 148 932#define KVM_CAP_HYPERV_SYNIC2 148
933#define KVM_CAP_HYPERV_VP_INDEX 149 933#define KVM_CAP_HYPERV_VP_INDEX 149
934#define KVM_CAP_S390_AIS_MIGRATION 150
934 935
935#ifdef KVM_CAP_IRQ_ROUTING 936#ifdef KVM_CAP_IRQ_ROUTING
936 937
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 362493a2f950..b9a4953018ed 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -942,6 +942,7 @@ enum perf_callchain_context {
942#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ 942#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
943#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ 943#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
944#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ 944#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
945#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
945 946
946#define PERF_FLAG_FD_NO_GROUP (1UL << 0) 947#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
947#define PERF_FLAG_FD_OUTPUT (1UL << 1) 948#define PERF_FLAG_FD_OUTPUT (1UL << 1)
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index a8d0759a9e40..af5f8c2df87a 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1#ifndef _LINUX_PRCTL_H 2#ifndef _LINUX_PRCTL_H
2#define _LINUX_PRCTL_H 3#define _LINUX_PRCTL_H
3 4
@@ -197,4 +198,13 @@ struct prctl_mm_map {
197# define PR_CAP_AMBIENT_LOWER 3 198# define PR_CAP_AMBIENT_LOWER 3
198# define PR_CAP_AMBIENT_CLEAR_ALL 4 199# define PR_CAP_AMBIENT_CLEAR_ALL 4
199 200
201/* arm64 Scalable Vector Extension controls */
202/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
203#define PR_SVE_SET_VL 50 /* set task vector length */
204# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
205#define PR_SVE_GET_VL 51 /* get task vector length */
206/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
207# define PR_SVE_VL_LEN_MASK 0xffff
208# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
209
200#endif /* _LINUX_PRCTL_H */ 210#endif /* _LINUX_PRCTL_H */
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index d95fdcc26f4b..944070e98a2c 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
216 NULL 216 NULL
217}; 217};
218 218
219/*
220 * To get number of numa nodes present.
221 */
222static int nr_numa_nodes(void)
223{
224 int i, nr_nodes = 0;
225
226 for (i = 0; i < g->p.nr_nodes; i++) {
227 if (numa_bitmask_isbitset(numa_nodes_ptr, i))
228 nr_nodes++;
229 }
230
231 return nr_nodes;
232}
233
234/*
235 * To check if given numa node is present.
236 */
237static int is_node_present(int node)
238{
239 return numa_bitmask_isbitset(numa_nodes_ptr, node);
240}
241
242/*
243 * To check given numa node has cpus.
244 */
245static bool node_has_cpus(int node)
246{
247 struct bitmask *cpu = numa_allocate_cpumask();
248 unsigned int i;
249
250 if (cpu && !numa_node_to_cpus(node, cpu)) {
251 for (i = 0; i < cpu->size; i++) {
252 if (numa_bitmask_isbitset(cpu, i))
253 return true;
254 }
255 }
256
257 return false; /* lets fall back to nocpus safely */
258}
259
219static cpu_set_t bind_to_cpu(int target_cpu) 260static cpu_set_t bind_to_cpu(int target_cpu)
220{ 261{
221 cpu_set_t orig_mask, mask; 262 cpu_set_t orig_mask, mask;
@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
244 285
245static cpu_set_t bind_to_node(int target_node) 286static cpu_set_t bind_to_node(int target_node)
246{ 287{
247 int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; 288 int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
248 cpu_set_t orig_mask, mask; 289 cpu_set_t orig_mask, mask;
249 int cpu; 290 int cpu;
250 int ret; 291 int ret;
251 292
252 BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); 293 BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
253 BUG_ON(!cpus_per_node); 294 BUG_ON(!cpus_per_node);
254 295
255 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); 296 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
649 int i; 690 int i;
650 691
651 for (i = 0; i < mul; i++) { 692 for (i = 0; i < mul; i++) {
652 if (t >= g->p.nr_tasks) { 693 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
653 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); 694 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
654 goto out; 695 goto out;
655 } 696 }
@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
964 sum = 0; 1005 sum = 0;
965 1006
966 for (node = 0; node < g->p.nr_nodes; node++) { 1007 for (node = 0; node < g->p.nr_nodes; node++) {
1008 if (!is_node_present(node))
1009 continue;
967 nr = nodes[node]; 1010 nr = nodes[node];
968 nr_min = min(nr, nr_min); 1011 nr_min = min(nr, nr_min);
969 nr_max = max(nr, nr_max); 1012 nr_max = max(nr, nr_max);
@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
984 process_groups = 0; 1027 process_groups = 0;
985 1028
986 for (node = 0; node < g->p.nr_nodes; node++) { 1029 for (node = 0; node < g->p.nr_nodes; node++) {
987 int processes = count_node_processes(node); 1030 int processes;
988 1031
1032 if (!is_node_present(node))
1033 continue;
1034 processes = count_node_processes(node);
989 nr = nodes[node]; 1035 nr = nodes[node];
990 tprintf(" %2d/%-2d", nr, processes); 1036 tprintf(" %2d/%-2d", nr, processes);
991 1037
@@ -1291,7 +1337,7 @@ static void print_summary(void)
1291 1337
1292 printf("\n ###\n"); 1338 printf("\n ###\n");
1293 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", 1339 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1294 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); 1340 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1295 printf(" # %5dx %5ldMB global shared mem operations\n", 1341 printf(" # %5dx %5ldMB global shared mem operations\n",
1296 g->p.nr_loops, g->p.bytes_global/1024/1024); 1342 g->p.nr_loops, g->p.bytes_global/1024/1024);
1297 printf(" # %5dx %5ldMB process shared mem operations\n", 1343 printf(" # %5dx %5ldMB process shared mem operations\n",
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index bd1fedef3d1c..a0f7ed2b869b 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
284 add_man_viewer(value); 284 add_man_viewer(value);
285 return 0; 285 return 0;
286 } 286 }
287 if (!strstarts(var, "man.")) 287 if (strstarts(var, "man."))
288 return add_man_viewer_info(var, value); 288 return add_man_viewer_info(var, value);
289 289
290 return 0; 290 return 0;
@@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
314 314
315 if (!perf_cmd) 315 if (!perf_cmd)
316 return "perf"; 316 return "perf";
317 else if (!strstarts(perf_cmd, "perf")) 317 else if (strstarts(perf_cmd, "perf"))
318 return perf_cmd; 318 return perf_cmd;
319 319
320 return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s; 320 return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3d7f33e19df2..003255910c05 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -339,6 +339,22 @@ static int record__open(struct record *rec)
339 struct perf_evsel_config_term *err_term; 339 struct perf_evsel_config_term *err_term;
340 int rc = 0; 340 int rc = 0;
341 341
342 /*
343 * For initial_delay we need to add a dummy event so that we can track
344 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
345 * real events, the ones asked by the user.
346 */
347 if (opts->initial_delay) {
348 if (perf_evlist__add_dummy(evlist))
349 return -ENOMEM;
350
351 pos = perf_evlist__first(evlist);
352 pos->tracking = 0;
353 pos = perf_evlist__last(evlist);
354 pos->tracking = 1;
355 pos->attr.enable_on_exec = 1;
356 }
357
342 perf_evlist__config(evlist, opts, &callchain_param); 358 perf_evlist__config(evlist, opts, &callchain_param);
343 359
344 evlist__for_each_entry(evlist, pos) { 360 evlist__for_each_entry(evlist, pos) {
@@ -749,17 +765,19 @@ static int record__synthesize(struct record *rec, bool tail)
749 goto out; 765 goto out;
750 } 766 }
751 767
752 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, 768 if (!perf_evlist__exclude_kernel(rec->evlist)) {
753 machine); 769 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
754 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n" 770 machine);
755 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 771 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
756 "Check /proc/kallsyms permission or run as root.\n"); 772 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
757 773 "Check /proc/kallsyms permission or run as root.\n");
758 err = perf_event__synthesize_modules(tool, process_synthesized_event, 774
759 machine); 775 err = perf_event__synthesize_modules(tool, process_synthesized_event,
760 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n" 776 machine);
761 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 777 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
762 "Check /proc/modules permission or run as root.\n"); 778 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
779 "Check /proc/modules permission or run as root.\n");
780 }
763 781
764 if (perf_guest) { 782 if (perf_guest) {
765 machines__process_guests(&session->machines, 783 machines__process_guests(&session->machines,
@@ -1693,7 +1711,7 @@ int cmd_record(int argc, const char **argv)
1693 1711
1694 err = -ENOMEM; 1712 err = -ENOMEM;
1695 1713
1696 if (symbol_conf.kptr_restrict) 1714 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
1697 pr_warning( 1715 pr_warning(
1698"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n" 1716"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1699"check /proc/sys/kernel/kptr_restrict.\n\n" 1717"check /proc/sys/kernel/kptr_restrict.\n\n"
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 1394cd8d96f7..af5dd038195e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -441,6 +441,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
441 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host); 441 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
442 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; 442 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
443 443
444 if (perf_evlist__exclude_kernel(rep->session->evlist))
445 return;
446
444 if (kernel_map == NULL || 447 if (kernel_map == NULL ||
445 (kernel_map->dso->hit && 448 (kernel_map->dso->hit &&
446 (kernel_kmap->ref_reloc_sym == NULL || 449 (kernel_kmap->ref_reloc_sym == NULL ||
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 68f36dc0344f..9b43bda45a41 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1955,6 +1955,16 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
1955 struct perf_evsel *evsel; 1955 struct perf_evsel *evsel;
1956 1956
1957 evlist__for_each_entry(script->session->evlist, evsel) { 1957 evlist__for_each_entry(script->session->evlist, evsel) {
1958 /*
1959 * Already setup? I.e. we may be called twice in cases like
1960 * Intel PT, one for the intel_pt// and dummy events, then
1961 * for the evsels syntheized from the auxtrace info.
1962 *
1963 * Ses perf_script__process_auxtrace_info.
1964 */
1965 if (evsel->priv != NULL)
1966 continue;
1967
1958 evsel->priv = perf_evsel_script__new(evsel, script->session->data); 1968 evsel->priv = perf_evsel_script__new(evsel, script->session->data);
1959 if (evsel->priv == NULL) 1969 if (evsel->priv == NULL)
1960 goto out_err_fclose; 1970 goto out_err_fclose;
@@ -2838,6 +2848,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
2838 return set_maps(script); 2848 return set_maps(script);
2839} 2849}
2840 2850
2851#ifdef HAVE_AUXTRACE_SUPPORT
2852static int perf_script__process_auxtrace_info(struct perf_tool *tool,
2853 union perf_event *event,
2854 struct perf_session *session)
2855{
2856 int ret = perf_event__process_auxtrace_info(tool, event, session);
2857
2858 if (ret == 0) {
2859 struct perf_script *script = container_of(tool, struct perf_script, tool);
2860
2861 ret = perf_script__setup_per_event_dump(script);
2862 }
2863
2864 return ret;
2865}
2866#else
2867#define perf_script__process_auxtrace_info 0
2868#endif
2869
2841int cmd_script(int argc, const char **argv) 2870int cmd_script(int argc, const char **argv)
2842{ 2871{
2843 bool show_full_info = false; 2872 bool show_full_info = false;
@@ -2866,7 +2895,7 @@ int cmd_script(int argc, const char **argv)
2866 .feature = perf_event__process_feature, 2895 .feature = perf_event__process_feature,
2867 .build_id = perf_event__process_build_id, 2896 .build_id = perf_event__process_build_id,
2868 .id_index = perf_event__process_id_index, 2897 .id_index = perf_event__process_id_index,
2869 .auxtrace_info = perf_event__process_auxtrace_info, 2898 .auxtrace_info = perf_script__process_auxtrace_info,
2870 .auxtrace = perf_event__process_auxtrace, 2899 .auxtrace = perf_event__process_auxtrace,
2871 .auxtrace_error = perf_event__process_auxtrace_error, 2900 .auxtrace_error = perf_event__process_auxtrace_error,
2872 .stat = perf_event__process_stat_event, 2901 .stat = perf_event__process_stat_event,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 477a8699f0b5..9e0d2645ae13 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -77,6 +77,7 @@
77#include "sane_ctype.h" 77#include "sane_ctype.h"
78 78
79static volatile int done; 79static volatile int done;
80static volatile int resize;
80 81
81#define HEADER_LINE_NR 5 82#define HEADER_LINE_NR 5
82 83
@@ -85,11 +86,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
85 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR; 86 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
86} 87}
87 88
88static void perf_top__sig_winch(int sig __maybe_unused, 89static void winch_sig(int sig __maybe_unused)
89 siginfo_t *info __maybe_unused, void *arg)
90{ 90{
91 struct perf_top *top = arg; 91 resize = 1;
92}
92 93
94static void perf_top__resize(struct perf_top *top)
95{
93 get_term_dimensions(&top->winsize); 96 get_term_dimensions(&top->winsize);
94 perf_top__update_print_entries(top); 97 perf_top__update_print_entries(top);
95} 98}
@@ -473,12 +476,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
473 case 'e': 476 case 'e':
474 prompt_integer(&top->print_entries, "Enter display entries (lines)"); 477 prompt_integer(&top->print_entries, "Enter display entries (lines)");
475 if (top->print_entries == 0) { 478 if (top->print_entries == 0) {
476 struct sigaction act = { 479 perf_top__resize(top);
477 .sa_sigaction = perf_top__sig_winch, 480 signal(SIGWINCH, winch_sig);
478 .sa_flags = SA_SIGINFO,
479 };
480 perf_top__sig_winch(SIGWINCH, NULL, top);
481 sigaction(SIGWINCH, &act, NULL);
482 } else { 481 } else {
483 signal(SIGWINCH, SIG_DFL); 482 signal(SIGWINCH, SIG_DFL);
484 } 483 }
@@ -732,14 +731,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
732 if (!machine->kptr_restrict_warned && 731 if (!machine->kptr_restrict_warned &&
733 symbol_conf.kptr_restrict && 732 symbol_conf.kptr_restrict &&
734 al.cpumode == PERF_RECORD_MISC_KERNEL) { 733 al.cpumode == PERF_RECORD_MISC_KERNEL) {
735 ui__warning( 734 if (!perf_evlist__exclude_kernel(top->session->evlist)) {
735 ui__warning(
736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
737"Check /proc/sys/kernel/kptr_restrict.\n\n" 737"Check /proc/sys/kernel/kptr_restrict.\n\n"
738"Kernel%s samples will not be resolved.\n", 738"Kernel%s samples will not be resolved.\n",
739 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? 739 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
740 " modules" : ""); 740 " modules" : "");
741 if (use_browser <= 0) 741 if (use_browser <= 0)
742 sleep(5); 742 sleep(5);
743 }
743 machine->kptr_restrict_warned = true; 744 machine->kptr_restrict_warned = true;
744 } 745 }
745 746
@@ -1030,6 +1031,11 @@ static int __cmd_top(struct perf_top *top)
1030 1031
1031 if (hits == top->samples) 1032 if (hits == top->samples)
1032 ret = perf_evlist__poll(top->evlist, 100); 1033 ret = perf_evlist__poll(top->evlist, 100);
1034
1035 if (resize) {
1036 perf_top__resize(top);
1037 resize = 0;
1038 }
1033 } 1039 }
1034 1040
1035 ret = 0; 1041 ret = 0;
@@ -1352,12 +1358,8 @@ int cmd_top(int argc, const char **argv)
1352 1358
1353 get_term_dimensions(&top.winsize); 1359 get_term_dimensions(&top.winsize);
1354 if (top.print_entries == 0) { 1360 if (top.print_entries == 0) {
1355 struct sigaction act = {
1356 .sa_sigaction = perf_top__sig_winch,
1357 .sa_flags = SA_SIGINFO,
1358 };
1359 perf_top__update_print_entries(&top); 1361 perf_top__update_print_entries(&top);
1360 sigaction(SIGWINCH, &act, NULL); 1362 signal(SIGWINCH, winch_sig);
1361 } 1363 }
1362 1364
1363 status = __cmd_top(&top); 1365 status = __cmd_top(&top);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f2757d38c7d7..84debdbad327 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1152,12 +1152,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1152 if (trace->host == NULL) 1152 if (trace->host == NULL)
1153 return -ENOMEM; 1153 return -ENOMEM;
1154 1154
1155 if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0) 1155 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1156 return -errno; 1156 if (err < 0)
1157 goto out;
1157 1158
1158 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1159 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1159 evlist->threads, trace__tool_process, false, 1160 evlist->threads, trace__tool_process, false,
1160 trace->opts.proc_map_timeout, 1); 1161 trace->opts.proc_map_timeout, 1);
1162out:
1161 if (err) 1163 if (err)
1162 symbol__exit(); 1164 symbol__exit();
1163 1165
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 7a84d73324e3..8b3da21a08f1 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,8 +10,8 @@
10 10
11. $(dirname $0)/lib/probe.sh 11. $(dirname $0)/lib/probe.sh
12 12
13ld=$(realpath /lib64/ld*.so.* | uniq) 13libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
14libc=$(echo $ld | sed 's/ld/libc/g') 14nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
15 15
16trace_libc_inet_pton_backtrace() { 16trace_libc_inet_pton_backtrace() {
17 idx=0 17 idx=0
@@ -37,6 +37,9 @@ trace_libc_inet_pton_backtrace() {
37 done 37 done
38} 38}
39 39
40# Check for IPv6 interface existence
41ip a sh lo | fgrep -q inet6 || exit 2
42
40skip_if_no_perf_probe && \ 43skip_if_no_perf_probe && \
41perf probe -q $libc inet_pton && \ 44perf probe -q $libc inet_pton && \
42trace_libc_inet_pton_backtrace 45trace_libc_inet_pton_backtrace
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 2e68c5f120da..2a9ef080efd0 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2
17file=$(mktemp /tmp/temporary_file.XXXXX) 17file=$(mktemp /tmp/temporary_file.XXXXX)
18 18
19trace_open_vfs_getname() { 19trace_open_vfs_getname() {
20 perf trace -e open touch $file 2>&1 | \ 20 test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
21 egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" 21
22 perf trace -e ${svc:-open} touch $file 2>&1 | \
23 egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
22} 24}
23 25
24 26
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index bc4a7344e274..89c8e1604ca7 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
84 84
85 evsel = perf_evlist__first(evlist); 85 evsel = perf_evlist__first(evlist);
86 evsel->attr.task = 1; 86 evsel->attr.task = 1;
87#ifdef __s390x__
88 evsel->attr.sample_freq = 1000000;
89#else
87 evsel->attr.sample_freq = 1; 90 evsel->attr.sample_freq = 1;
91#endif
88 evsel->attr.inherit = 0; 92 evsel->attr.inherit = 0;
89 evsel->attr.watermark = 0; 93 evsel->attr.watermark = 0;
90 evsel->attr.wakeup_events = 1; 94 evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index 9e1668b2c5d7..417e3ecfe9d7 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
62 P_MMAP_FLAG(POPULATE); 62 P_MMAP_FLAG(POPULATE);
63 P_MMAP_FLAG(STACK); 63 P_MMAP_FLAG(STACK);
64 P_MMAP_FLAG(UNINITIALIZED); 64 P_MMAP_FLAG(UNINITIALIZED);
65#ifdef MAP_SYNC
66 P_MMAP_FLAG(SYNC);
67#endif
65#undef P_MMAP_FLAG 68#undef P_MMAP_FLAG
66 69
67 if (flags) 70 if (flags)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index da1c4c4a0dd8..3369c7830260 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -165,7 +165,7 @@ static void ins__delete(struct ins_operands *ops)
165static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, 165static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
166 struct ins_operands *ops) 166 struct ins_operands *ops)
167{ 167{
168 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw); 168 return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
169} 169}
170 170
171int ins__scnprintf(struct ins *ins, char *bf, size_t size, 171int ins__scnprintf(struct ins *ins, char *bf, size_t size,
@@ -230,12 +230,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
230 struct ins_operands *ops) 230 struct ins_operands *ops)
231{ 231{
232 if (ops->target.name) 232 if (ops->target.name)
233 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); 233 return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
234 234
235 if (ops->target.addr == 0) 235 if (ops->target.addr == 0)
236 return ins__raw_scnprintf(ins, bf, size, ops); 236 return ins__raw_scnprintf(ins, bf, size, ops);
237 237
238 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); 238 return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
239} 239}
240 240
241static struct ins_ops call_ops = { 241static struct ins_ops call_ops = {
@@ -299,7 +299,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
299 c++; 299 c++;
300 } 300 }
301 301
302 return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64, 302 return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
303 ins->name, c ? c - ops->raw : 0, ops->raw, 303 ins->name, c ? c - ops->raw : 0, ops->raw,
304 ops->target.offset); 304 ops->target.offset);
305} 305}
@@ -372,7 +372,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
372 if (ops->locked.ins.ops == NULL) 372 if (ops->locked.ins.ops == NULL)
373 return ins__raw_scnprintf(ins, bf, size, ops); 373 return ins__raw_scnprintf(ins, bf, size, ops);
374 374
375 printed = scnprintf(bf, size, "%-6.6s ", ins->name); 375 printed = scnprintf(bf, size, "%-6s ", ins->name);
376 return printed + ins__scnprintf(&ops->locked.ins, bf + printed, 376 return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
377 size - printed, ops->locked.ops); 377 size - printed, ops->locked.ops);
378} 378}
@@ -448,7 +448,7 @@ out_free_source:
448static int mov__scnprintf(struct ins *ins, char *bf, size_t size, 448static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
449 struct ins_operands *ops) 449 struct ins_operands *ops)
450{ 450{
451 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name, 451 return scnprintf(bf, size, "%-6s %s,%s", ins->name,
452 ops->source.name ?: ops->source.raw, 452 ops->source.name ?: ops->source.raw,
453 ops->target.name ?: ops->target.raw); 453 ops->target.name ?: ops->target.raw);
454} 454}
@@ -488,7 +488,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
488static int dec__scnprintf(struct ins *ins, char *bf, size_t size, 488static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
489 struct ins_operands *ops) 489 struct ins_operands *ops)
490{ 490{
491 return scnprintf(bf, size, "%-6.6s %s", ins->name, 491 return scnprintf(bf, size, "%-6s %s", ins->name,
492 ops->target.name ?: ops->target.raw); 492 ops->target.name ?: ops->target.raw);
493} 493}
494 494
@@ -500,7 +500,7 @@ static struct ins_ops dec_ops = {
500static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, 500static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
501 struct ins_operands *ops __maybe_unused) 501 struct ins_operands *ops __maybe_unused)
502{ 502{
503 return scnprintf(bf, size, "%-6.6s", "nop"); 503 return scnprintf(bf, size, "%-6s", "nop");
504} 504}
505 505
506static struct ins_ops nop_ops = { 506static struct ins_ops nop_ops = {
@@ -924,7 +924,7 @@ void disasm_line__free(struct disasm_line *dl)
924int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw) 924int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
925{ 925{
926 if (raw || !dl->ins.ops) 926 if (raw || !dl->ins.ops)
927 return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw); 927 return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
928 928
929 return ins__scnprintf(&dl->ins, bf, size, &dl->ops); 929 return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
930} 930}
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c6c891e154a6..b62e523a7035 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -257,7 +257,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)
257 .config = PERF_COUNT_SW_DUMMY, 257 .config = PERF_COUNT_SW_DUMMY,
258 .size = sizeof(attr), /* to capture ABI version */ 258 .size = sizeof(attr), /* to capture ABI version */
259 }; 259 };
260 struct perf_evsel *evsel = perf_evsel__new(&attr); 260 struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
261 261
262 if (evsel == NULL) 262 if (evsel == NULL)
263 return -ENOMEM; 263 return -ENOMEM;
@@ -1786,3 +1786,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1786state_err: 1786state_err:
1787 return; 1787 return;
1788} 1788}
1789
1790bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1791{
1792 struct perf_evsel *evsel;
1793
1794 evlist__for_each_entry(evlist, evsel) {
1795 if (!evsel->attr.exclude_kernel)
1796 return false;
1797 }
1798
1799 return true;
1800}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index e72ae64c11ac..491f69542920 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -312,4 +312,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
312 312
313struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 313struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
314 union perf_event *event); 314 union perf_event *event);
315
316bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
315#endif /* __PERF_EVLIST_H */ 317#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f894893c203d..d5fbcf8c7aa7 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,
733 list_for_each_entry(term, config_terms, list) { 733 list_for_each_entry(term, config_terms, list) {
734 switch (term->type) { 734 switch (term->type) {
735 case PERF_EVSEL__CONFIG_TERM_PERIOD: 735 case PERF_EVSEL__CONFIG_TERM_PERIOD:
736 attr->sample_period = term->val.period; 736 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
737 attr->freq = 0; 737 attr->sample_period = term->val.period;
738 attr->freq = 0;
739 }
738 break; 740 break;
739 case PERF_EVSEL__CONFIG_TERM_FREQ: 741 case PERF_EVSEL__CONFIG_TERM_FREQ:
740 attr->sample_freq = term->val.freq; 742 if (!(term->weak && opts->user_freq != UINT_MAX)) {
741 attr->freq = 1; 743 attr->sample_freq = term->val.freq;
744 attr->freq = 1;
745 }
742 break; 746 break;
743 case PERF_EVSEL__CONFIG_TERM_TIME: 747 case PERF_EVSEL__CONFIG_TERM_TIME:
744 if (term->val.time) 748 if (term->val.time)
@@ -1371,7 +1375,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,
1371static int 1375static int
1372perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread) 1376perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
1373{ 1377{
1374 struct perf_stat_evsel *ps = leader->priv; 1378 struct perf_stat_evsel *ps = leader->stats;
1375 u64 read_format = leader->attr.read_format; 1379 u64 read_format = leader->attr.read_format;
1376 int size = perf_evsel__read_size(leader); 1380 int size = perf_evsel__read_size(leader);
1377 u64 *data = ps->group_data; 1381 u64 *data = ps->group_data;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 9277df96ffda..157f49e8a772 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -67,6 +67,7 @@ struct perf_evsel_config_term {
67 bool overwrite; 67 bool overwrite;
68 char *branch; 68 char *branch;
69 } val; 69 } val;
70 bool weak;
70}; 71};
71 72
72struct perf_stat_evsel; 73struct perf_stat_evsel;
diff --git a/tools/perf/util/intel-pt-decoder/inat.h b/tools/perf/util/intel-pt-decoder/inat.h
index 125ecd2a300d..52dc8d911173 100644
--- a/tools/perf/util/intel-pt-decoder/inat.h
+++ b/tools/perf/util/intel-pt-decoder/inat.h
@@ -97,6 +97,16 @@
97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) 97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) 98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
99 99
100/* Identifiers for segment registers */
101#define INAT_SEG_REG_IGNORE 0
102#define INAT_SEG_REG_DEFAULT 1
103#define INAT_SEG_REG_CS 2
104#define INAT_SEG_REG_SS 3
105#define INAT_SEG_REG_DS 4
106#define INAT_SEG_REG_ES 5
107#define INAT_SEG_REG_FS 6
108#define INAT_SEG_REG_GS 7
109
100/* Attribute search APIs */ 110/* Attribute search APIs */
101extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); 111extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
102extern int inat_get_last_prefix_id(insn_byte_t last_pfx); 112extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6a8d03c3d9b7..270f3223c6df 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -172,6 +172,9 @@ void machine__exit(struct machine *machine)
172{ 172{
173 int i; 173 int i;
174 174
175 if (machine == NULL)
176 return;
177
175 machine__destroy_kernel_maps(machine); 178 machine__destroy_kernel_maps(machine);
176 map_groups__exit(&machine->kmaps); 179 map_groups__exit(&machine->kmaps);
177 dsos__exit(&machine->dsos); 180 dsos__exit(&machine->dsos);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index a7fcd95961ef..170316795a18 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1116,6 +1116,7 @@ do { \
1116 INIT_LIST_HEAD(&__t->list); \ 1116 INIT_LIST_HEAD(&__t->list); \
1117 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \ 1117 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
1118 __t->val.__name = __val; \ 1118 __t->val.__name = __val; \
1119 __t->weak = term->weak; \
1119 list_add_tail(&__t->list, head_terms); \ 1120 list_add_tail(&__t->list, head_terms); \
1120} while (0) 1121} while (0)
1121 1122
@@ -2410,6 +2411,7 @@ static int new_term(struct parse_events_term **_term,
2410 2411
2411 *term = *temp; 2412 *term = *temp;
2412 INIT_LIST_HEAD(&term->list); 2413 INIT_LIST_HEAD(&term->list);
2414 term->weak = false;
2413 2415
2414 switch (term->type_val) { 2416 switch (term->type_val) {
2415 case PARSE_EVENTS__TERM_TYPE_NUM: 2417 case PARSE_EVENTS__TERM_TYPE_NUM:
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index be337c266697..88108cd11b4c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -101,6 +101,9 @@ struct parse_events_term {
101 /* error string indexes for within parsed string */ 101 /* error string indexes for within parsed string */
102 int err_term; 102 int err_term;
103 int err_val; 103 int err_val;
104
105 /* Coming from implicit alias */
106 bool weak;
104}; 107};
105 108
106struct parse_events_error { 109struct parse_events_error {
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 07cb2ac041d7..80fb1593913a 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -405,6 +405,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
405 parse_events_terms__purge(&list); 405 parse_events_terms__purge(&list);
406 return ret; 406 return ret;
407 } 407 }
408 /*
409 * Weak terms don't override command line options,
410 * which we don't want for implicit terms in aliases.
411 */
412 cloned->weak = true;
408 list_add_tail(&cloned->list, &list); 413 list_add_tail(&cloned->list, &list);
409 } 414 }
410 list_splice(&list, terms); 415 list_splice(&list, terms);