aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/arm64/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/s390/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm_perf.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/perf_regs.h44
-rw-r--r--tools/arch/s390/include/uapi/asm/ptrace.h457
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h538
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile2
-rw-r--r--tools/bpf/bpftool/Makefile7
-rw-r--r--tools/bpf/bpftool/main.c36
-rw-r--r--tools/bpf/bpftool/main.h5
-rw-r--r--tools/bpf/bpftool/map.c8
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/hv/hv_kvp_daemon.c70
-rw-r--r--tools/include/linux/compiler.h21
-rw-r--r--tools/include/linux/kmemcheck.h1
-rw-r--r--tools/include/linux/lockdep.h1
-rw-r--r--tools/include/uapi/asm-generic/bpf_perf_event.h9
-rw-r--r--tools/include/uapi/asm-generic/mman.h1
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h7
-rw-r--r--tools/include/uapi/drm/drm.h41
-rw-r--r--tools/include/uapi/drm/i915_drm.h33
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h6
-rw-r--r--tools/include/uapi/linux/kcmp.h1
-rw-r--r--tools/include/uapi/linux/kvm.h5
-rw-r--r--tools/include/uapi/linux/perf_event.h1
-rw-r--r--tools/include/uapi/linux/prctl.h10
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat74
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt4
-rw-r--r--tools/objtool/Makefile8
-rw-r--r--tools/objtool/arch/x86/decode.c2
-rw-r--r--tools/objtool/arch/x86/lib/x86-opcode-map.txt15
-rw-r--r--tools/objtool/builtin-orc.c4
-rw-r--r--tools/objtool/orc_dump.c7
-rw-r--r--tools/objtool/orc_gen.c2
-rw-r--r--tools/perf/Makefile.config9
-rw-r--r--tools/perf/arch/s390/Makefile1
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h2
-rw-r--r--tools/perf/arch/s390/util/dwarf-regs.c32
-rw-r--r--tools/perf/bench/numa.c56
-rw-r--r--tools/perf/builtin-help.c4
-rw-r--r--tools/perf/builtin-record.c42
-rw-r--r--tools/perf/builtin-report.c3
-rw-r--r--tools/perf/builtin-script.c31
-rw-r--r--tools/perf/builtin-top.c36
-rw-r--r--tools/perf/builtin-trace.c6
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/jvmti/jvmti_agent.c16
-rw-r--r--tools/perf/jvmti/jvmti_agent.h7
-rw-r--r--tools/perf/jvmti/libjvmti.c147
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh7
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh6
-rw-r--r--tools/perf/tests/task-exit.c4
-rw-r--r--tools/perf/trace/beauty/mmap.c3
-rw-r--r--tools/perf/util/annotate.c18
-rw-r--r--tools/perf/util/evlist.c14
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c14
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/inat.h10
-rw-r--r--tools/perf/util/intel-pt-decoder/x86-opcode-map.txt15
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/mmap.h2
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/pmu.c5
-rw-r--r--tools/power/cpupower/bench/system.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c9
-rw-r--r--tools/testing/selftests/bpf/Makefile5
-rw-r--r--tools/testing/selftests/bpf/test_progs.c8
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c629
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c7
-rw-r--r--tools/testing/selftests/net/config1
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c12
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c24
-rw-r--r--tools/usb/usbip/src/utils.c9
-rw-r--r--tools/virtio/ringtest/ptr_ring.c29
-rw-r--r--tools/vm/slabinfo-gnuplot.sh2
81 files changed, 2108 insertions, 602 deletions
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 1f57bbe82b6f..6edd177bb1c7 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
152 (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) 152 (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
153#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) 153#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
154 154
155/* PL1 Physical Timer Registers */
156#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
157#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
158#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
159
160/* Virtual Timer Registers */
155#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) 161#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
156#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) 162#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
157#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) 163#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
@@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
216#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 222#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
217#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 223#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
218#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 224#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
225#define KVM_DEV_ARM_ITS_CTRL_RESET 4
219 226
220/* KVM_IRQ_LINE irq field index values */ 227/* KVM_IRQ_LINE irq field index values */
221#define KVM_ARM_IRQ_TYPE_SHIFT 24 228#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..b551b741653d
--- /dev/null
+++ b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include <asm/ptrace.h>
6
7typedef struct user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 51149ec75fe4..9abbf3044654 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
196 196
197#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) 197#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
198 198
199/* Physical Timer EL0 Registers */
200#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
201#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
202#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
203
204/* EL0 Virtual Timer Registers */
199#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) 205#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
200#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
201#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
228#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 234#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
229#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 235#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
230#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 236#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
237#define KVM_DEV_ARM_ITS_CTRL_RESET 4
231 238
232/* Device Control API on vcpu fd */ 239/* Device Control API on vcpu fd */
233#define KVM_ARM_VCPU_PMU_V3_CTRL 0 240#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..0a8e37a519f2
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include "ptrace.h"
6
7typedef user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 9ad172dcd912..38535a57fef8 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -6,10 +6,6 @@
6 * 6 *
7 * Copyright IBM Corp. 2008 7 * Copyright IBM Corp. 2008
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License (version 2 only)
11 * as published by the Free Software Foundation.
12 *
13 * Author(s): Carsten Otte <cotte@de.ibm.com> 9 * Author(s): Carsten Otte <cotte@de.ibm.com>
14 * Christian Borntraeger <borntraeger@de.ibm.com> 10 * Christian Borntraeger <borntraeger@de.ibm.com>
15 */ 11 */
diff --git a/tools/arch/s390/include/uapi/asm/kvm_perf.h b/tools/arch/s390/include/uapi/asm/kvm_perf.h
index c36c97ffdc6f..84606b8cc49e 100644
--- a/tools/arch/s390/include/uapi/asm/kvm_perf.h
+++ b/tools/arch/s390/include/uapi/asm/kvm_perf.h
@@ -4,10 +4,6 @@
4 * 4 *
5 * Copyright 2014 IBM Corp. 5 * Copyright 2014 IBM Corp.
6 * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com> 6 * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License (version 2 only)
10 * as published by the Free Software Foundation.
11 */ 7 */
12 8
13#ifndef __LINUX_KVM_PERF_S390_H 9#ifndef __LINUX_KVM_PERF_S390_H
diff --git a/tools/arch/s390/include/uapi/asm/perf_regs.h b/tools/arch/s390/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d17dd9e5d516
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/perf_regs.h
@@ -0,0 +1,44 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_S390_PERF_REGS_H
3#define _ASM_S390_PERF_REGS_H
4
5enum perf_event_s390_regs {
6 PERF_REG_S390_R0,
7 PERF_REG_S390_R1,
8 PERF_REG_S390_R2,
9 PERF_REG_S390_R3,
10 PERF_REG_S390_R4,
11 PERF_REG_S390_R5,
12 PERF_REG_S390_R6,
13 PERF_REG_S390_R7,
14 PERF_REG_S390_R8,
15 PERF_REG_S390_R9,
16 PERF_REG_S390_R10,
17 PERF_REG_S390_R11,
18 PERF_REG_S390_R12,
19 PERF_REG_S390_R13,
20 PERF_REG_S390_R14,
21 PERF_REG_S390_R15,
22 PERF_REG_S390_FP0,
23 PERF_REG_S390_FP1,
24 PERF_REG_S390_FP2,
25 PERF_REG_S390_FP3,
26 PERF_REG_S390_FP4,
27 PERF_REG_S390_FP5,
28 PERF_REG_S390_FP6,
29 PERF_REG_S390_FP7,
30 PERF_REG_S390_FP8,
31 PERF_REG_S390_FP9,
32 PERF_REG_S390_FP10,
33 PERF_REG_S390_FP11,
34 PERF_REG_S390_FP12,
35 PERF_REG_S390_FP13,
36 PERF_REG_S390_FP14,
37 PERF_REG_S390_FP15,
38 PERF_REG_S390_MASK,
39 PERF_REG_S390_PC,
40
41 PERF_REG_S390_MAX
42};
43
44#endif /* _ASM_S390_PERF_REGS_H */
diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..543dd70e12c8
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/ptrace.h
@@ -0,0 +1,457 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
6 */
7
8#ifndef _UAPI_S390_PTRACE_H
9#define _UAPI_S390_PTRACE_H
10
11/*
12 * Offsets in the user_regs_struct. They are used for the ptrace
13 * system call and in entry.S
14 */
15#ifndef __s390x__
16
17#define PT_PSWMASK 0x00
18#define PT_PSWADDR 0x04
19#define PT_GPR0 0x08
20#define PT_GPR1 0x0C
21#define PT_GPR2 0x10
22#define PT_GPR3 0x14
23#define PT_GPR4 0x18
24#define PT_GPR5 0x1C
25#define PT_GPR6 0x20
26#define PT_GPR7 0x24
27#define PT_GPR8 0x28
28#define PT_GPR9 0x2C
29#define PT_GPR10 0x30
30#define PT_GPR11 0x34
31#define PT_GPR12 0x38
32#define PT_GPR13 0x3C
33#define PT_GPR14 0x40
34#define PT_GPR15 0x44
35#define PT_ACR0 0x48
36#define PT_ACR1 0x4C
37#define PT_ACR2 0x50
38#define PT_ACR3 0x54
39#define PT_ACR4 0x58
40#define PT_ACR5 0x5C
41#define PT_ACR6 0x60
42#define PT_ACR7 0x64
43#define PT_ACR8 0x68
44#define PT_ACR9 0x6C
45#define PT_ACR10 0x70
46#define PT_ACR11 0x74
47#define PT_ACR12 0x78
48#define PT_ACR13 0x7C
49#define PT_ACR14 0x80
50#define PT_ACR15 0x84
51#define PT_ORIGGPR2 0x88
52#define PT_FPC 0x90
53/*
54 * A nasty fact of life that the ptrace api
55 * only supports passing of longs.
56 */
57#define PT_FPR0_HI 0x98
58#define PT_FPR0_LO 0x9C
59#define PT_FPR1_HI 0xA0
60#define PT_FPR1_LO 0xA4
61#define PT_FPR2_HI 0xA8
62#define PT_FPR2_LO 0xAC
63#define PT_FPR3_HI 0xB0
64#define PT_FPR3_LO 0xB4
65#define PT_FPR4_HI 0xB8
66#define PT_FPR4_LO 0xBC
67#define PT_FPR5_HI 0xC0
68#define PT_FPR5_LO 0xC4
69#define PT_FPR6_HI 0xC8
70#define PT_FPR6_LO 0xCC
71#define PT_FPR7_HI 0xD0
72#define PT_FPR7_LO 0xD4
73#define PT_FPR8_HI 0xD8
74#define PT_FPR8_LO 0XDC
75#define PT_FPR9_HI 0xE0
76#define PT_FPR9_LO 0xE4
77#define PT_FPR10_HI 0xE8
78#define PT_FPR10_LO 0xEC
79#define PT_FPR11_HI 0xF0
80#define PT_FPR11_LO 0xF4
81#define PT_FPR12_HI 0xF8
82#define PT_FPR12_LO 0xFC
83#define PT_FPR13_HI 0x100
84#define PT_FPR13_LO 0x104
85#define PT_FPR14_HI 0x108
86#define PT_FPR14_LO 0x10C
87#define PT_FPR15_HI 0x110
88#define PT_FPR15_LO 0x114
89#define PT_CR_9 0x118
90#define PT_CR_10 0x11C
91#define PT_CR_11 0x120
92#define PT_IEEE_IP 0x13C
93#define PT_LASTOFF PT_IEEE_IP
94#define PT_ENDREGS 0x140-1
95
96#define GPR_SIZE 4
97#define CR_SIZE 4
98
99#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
100
101#else /* __s390x__ */
102
103#define PT_PSWMASK 0x00
104#define PT_PSWADDR 0x08
105#define PT_GPR0 0x10
106#define PT_GPR1 0x18
107#define PT_GPR2 0x20
108#define PT_GPR3 0x28
109#define PT_GPR4 0x30
110#define PT_GPR5 0x38
111#define PT_GPR6 0x40
112#define PT_GPR7 0x48
113#define PT_GPR8 0x50
114#define PT_GPR9 0x58
115#define PT_GPR10 0x60
116#define PT_GPR11 0x68
117#define PT_GPR12 0x70
118#define PT_GPR13 0x78
119#define PT_GPR14 0x80
120#define PT_GPR15 0x88
121#define PT_ACR0 0x90
122#define PT_ACR1 0x94
123#define PT_ACR2 0x98
124#define PT_ACR3 0x9C
125#define PT_ACR4 0xA0
126#define PT_ACR5 0xA4
127#define PT_ACR6 0xA8
128#define PT_ACR7 0xAC
129#define PT_ACR8 0xB0
130#define PT_ACR9 0xB4
131#define PT_ACR10 0xB8
132#define PT_ACR11 0xBC
133#define PT_ACR12 0xC0
134#define PT_ACR13 0xC4
135#define PT_ACR14 0xC8
136#define PT_ACR15 0xCC
137#define PT_ORIGGPR2 0xD0
138#define PT_FPC 0xD8
139#define PT_FPR0 0xE0
140#define PT_FPR1 0xE8
141#define PT_FPR2 0xF0
142#define PT_FPR3 0xF8
143#define PT_FPR4 0x100
144#define PT_FPR5 0x108
145#define PT_FPR6 0x110
146#define PT_FPR7 0x118
147#define PT_FPR8 0x120
148#define PT_FPR9 0x128
149#define PT_FPR10 0x130
150#define PT_FPR11 0x138
151#define PT_FPR12 0x140
152#define PT_FPR13 0x148
153#define PT_FPR14 0x150
154#define PT_FPR15 0x158
155#define PT_CR_9 0x160
156#define PT_CR_10 0x168
157#define PT_CR_11 0x170
158#define PT_IEEE_IP 0x1A8
159#define PT_LASTOFF PT_IEEE_IP
160#define PT_ENDREGS 0x1B0-1
161
162#define GPR_SIZE 8
163#define CR_SIZE 8
164
165#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
166
167#endif /* __s390x__ */
168
169#define NUM_GPRS 16
170#define NUM_FPRS 16
171#define NUM_CRS 16
172#define NUM_ACRS 16
173
174#define NUM_CR_WORDS 3
175
176#define FPR_SIZE 8
177#define FPC_SIZE 4
178#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */
179#define ACR_SIZE 4
180
181
182#define PTRACE_OLDSETOPTIONS 21
183
184#ifndef __ASSEMBLY__
185#include <linux/stddef.h>
186#include <linux/types.h>
187
188typedef union {
189 float f;
190 double d;
191 __u64 ui;
192 struct
193 {
194 __u32 hi;
195 __u32 lo;
196 } fp;
197} freg_t;
198
199typedef struct {
200 __u32 fpc;
201 __u32 pad;
202 freg_t fprs[NUM_FPRS];
203} s390_fp_regs;
204
205#define FPC_EXCEPTION_MASK 0xF8000000
206#define FPC_FLAGS_MASK 0x00F80000
207#define FPC_DXC_MASK 0x0000FF00
208#define FPC_RM_MASK 0x00000003
209
210/* this typedef defines how a Program Status Word looks like */
211typedef struct {
212 unsigned long mask;
213 unsigned long addr;
214} __attribute__ ((aligned(8))) psw_t;
215
216#ifndef __s390x__
217
218#define PSW_MASK_PER 0x40000000UL
219#define PSW_MASK_DAT 0x04000000UL
220#define PSW_MASK_IO 0x02000000UL
221#define PSW_MASK_EXT 0x01000000UL
222#define PSW_MASK_KEY 0x00F00000UL
223#define PSW_MASK_BASE 0x00080000UL /* always one */
224#define PSW_MASK_MCHECK 0x00040000UL
225#define PSW_MASK_WAIT 0x00020000UL
226#define PSW_MASK_PSTATE 0x00010000UL
227#define PSW_MASK_ASC 0x0000C000UL
228#define PSW_MASK_CC 0x00003000UL
229#define PSW_MASK_PM 0x00000F00UL
230#define PSW_MASK_RI 0x00000000UL
231#define PSW_MASK_EA 0x00000000UL
232#define PSW_MASK_BA 0x00000000UL
233
234#define PSW_MASK_USER 0x0000FF00UL
235
236#define PSW_ADDR_AMODE 0x80000000UL
237#define PSW_ADDR_INSN 0x7FFFFFFFUL
238
239#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
240
241#define PSW_ASC_PRIMARY 0x00000000UL
242#define PSW_ASC_ACCREG 0x00004000UL
243#define PSW_ASC_SECONDARY 0x00008000UL
244#define PSW_ASC_HOME 0x0000C000UL
245
246#else /* __s390x__ */
247
248#define PSW_MASK_PER 0x4000000000000000UL
249#define PSW_MASK_DAT 0x0400000000000000UL
250#define PSW_MASK_IO 0x0200000000000000UL
251#define PSW_MASK_EXT 0x0100000000000000UL
252#define PSW_MASK_BASE 0x0000000000000000UL
253#define PSW_MASK_KEY 0x00F0000000000000UL
254#define PSW_MASK_MCHECK 0x0004000000000000UL
255#define PSW_MASK_WAIT 0x0002000000000000UL
256#define PSW_MASK_PSTATE 0x0001000000000000UL
257#define PSW_MASK_ASC 0x0000C00000000000UL
258#define PSW_MASK_CC 0x0000300000000000UL
259#define PSW_MASK_PM 0x00000F0000000000UL
260#define PSW_MASK_RI 0x0000008000000000UL
261#define PSW_MASK_EA 0x0000000100000000UL
262#define PSW_MASK_BA 0x0000000080000000UL
263
264#define PSW_MASK_USER 0x0000FF0180000000UL
265
266#define PSW_ADDR_AMODE 0x0000000000000000UL
267#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
268
269#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
270
271#define PSW_ASC_PRIMARY 0x0000000000000000UL
272#define PSW_ASC_ACCREG 0x0000400000000000UL
273#define PSW_ASC_SECONDARY 0x0000800000000000UL
274#define PSW_ASC_HOME 0x0000C00000000000UL
275
276#endif /* __s390x__ */
277
278
279/*
280 * The s390_regs structure is used to define the elf_gregset_t.
281 */
282typedef struct {
283 psw_t psw;
284 unsigned long gprs[NUM_GPRS];
285 unsigned int acrs[NUM_ACRS];
286 unsigned long orig_gpr2;
287} s390_regs;
288
289/*
290 * The user_pt_regs structure exports the beginning of
291 * the in-kernel pt_regs structure to user space.
292 */
293typedef struct {
294 unsigned long args[1];
295 psw_t psw;
296 unsigned long gprs[NUM_GPRS];
297} user_pt_regs;
298
299/*
300 * Now for the user space program event recording (trace) definitions.
301 * The following structures are used only for the ptrace interface, don't
302 * touch or even look at it if you don't want to modify the user-space
303 * ptrace interface. In particular stay away from it for in-kernel PER.
304 */
305typedef struct {
306 unsigned long cr[NUM_CR_WORDS];
307} per_cr_words;
308
309#define PER_EM_MASK 0xE8000000UL
310
311typedef struct {
312#ifdef __s390x__
313 unsigned : 32;
314#endif /* __s390x__ */
315 unsigned em_branching : 1;
316 unsigned em_instruction_fetch : 1;
317 /*
318 * Switching on storage alteration automatically fixes
319 * the storage alteration event bit in the users std.
320 */
321 unsigned em_storage_alteration : 1;
322 unsigned em_gpr_alt_unused : 1;
323 unsigned em_store_real_address : 1;
324 unsigned : 3;
325 unsigned branch_addr_ctl : 1;
326 unsigned : 1;
327 unsigned storage_alt_space_ctl : 1;
328 unsigned : 21;
329 unsigned long starting_addr;
330 unsigned long ending_addr;
331} per_cr_bits;
332
333typedef struct {
334 unsigned short perc_atmid;
335 unsigned long address;
336 unsigned char access_id;
337} per_lowcore_words;
338
339typedef struct {
340 unsigned perc_branching : 1;
341 unsigned perc_instruction_fetch : 1;
342 unsigned perc_storage_alteration : 1;
343 unsigned perc_gpr_alt_unused : 1;
344 unsigned perc_store_real_address : 1;
345 unsigned : 3;
346 unsigned atmid_psw_bit_31 : 1;
347 unsigned atmid_validity_bit : 1;
348 unsigned atmid_psw_bit_32 : 1;
349 unsigned atmid_psw_bit_5 : 1;
350 unsigned atmid_psw_bit_16 : 1;
351 unsigned atmid_psw_bit_17 : 1;
352 unsigned si : 2;
353 unsigned long address;
354 unsigned : 4;
355 unsigned access_id : 4;
356} per_lowcore_bits;
357
358typedef struct {
359 union {
360 per_cr_words words;
361 per_cr_bits bits;
362 } control_regs;
363 /*
364 * The single_step and instruction_fetch bits are obsolete,
365 * the kernel always sets them to zero. To enable single
366 * stepping use ptrace(PTRACE_SINGLESTEP) instead.
367 */
368 unsigned single_step : 1;
369 unsigned instruction_fetch : 1;
370 unsigned : 30;
371 /*
372 * These addresses are copied into cr10 & cr11 if single
373 * stepping is switched off
374 */
375 unsigned long starting_addr;
376 unsigned long ending_addr;
377 union {
378 per_lowcore_words words;
379 per_lowcore_bits bits;
380 } lowcore;
381} per_struct;
382
383typedef struct {
384 unsigned int len;
385 unsigned long kernel_addr;
386 unsigned long process_addr;
387} ptrace_area;
388
389/*
390 * S/390 specific non posix ptrace requests. I chose unusual values so
391 * they are unlikely to clash with future ptrace definitions.
392 */
393#define PTRACE_PEEKUSR_AREA 0x5000
394#define PTRACE_POKEUSR_AREA 0x5001
395#define PTRACE_PEEKTEXT_AREA 0x5002
396#define PTRACE_PEEKDATA_AREA 0x5003
397#define PTRACE_POKETEXT_AREA 0x5004
398#define PTRACE_POKEDATA_AREA 0x5005
399#define PTRACE_GET_LAST_BREAK 0x5006
400#define PTRACE_PEEK_SYSTEM_CALL 0x5007
401#define PTRACE_POKE_SYSTEM_CALL 0x5008
402#define PTRACE_ENABLE_TE 0x5009
403#define PTRACE_DISABLE_TE 0x5010
404#define PTRACE_TE_ABORT_RAND 0x5011
405
406/*
407 * The numbers chosen here are somewhat arbitrary but absolutely MUST
408 * not overlap with any of the number assigned in <linux/ptrace.h>.
409 */
410#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
411
412/*
413 * PT_PROT definition is loosely based on hppa bsd definition in
414 * gdb/hppab-nat.c
415 */
416#define PTRACE_PROT 21
417
418typedef enum {
419 ptprot_set_access_watchpoint,
420 ptprot_set_write_watchpoint,
421 ptprot_disable_watchpoint
422} ptprot_flags;
423
424typedef struct {
425 unsigned long lowaddr;
426 unsigned long hiaddr;
427 ptprot_flags prot;
428} ptprot_area;
429
430/* Sequence of bytes for breakpoint illegal instruction. */
431#define S390_BREAKPOINT {0x0,0x1}
432#define S390_BREAKPOINT_U16 ((__u16)0x0001)
433#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
434#define S390_SYSCALL_SIZE 2
435
436/*
437 * The user_regs_struct defines the way the user registers are
438 * store on the stack for signal handling.
439 */
440struct user_regs_struct {
441 psw_t psw;
442 unsigned long gprs[NUM_GPRS];
443 unsigned int acrs[NUM_ACRS];
444 unsigned long orig_gpr2;
445 s390_fp_regs fp_regs;
446 /*
447 * These per registers are in here so that gdb can modify them
448 * itself as there is no "official" ptrace interface for hardware
449 * watchpoints. This is the way intel does it.
450 */
451 per_struct per_info;
452 unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
453};
454
455#endif /* __ASSEMBLY__ */
456
457#endif /* _UAPI_S390_PTRACE_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 793690fbda36..800104c8a3ed 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -13,173 +13,176 @@
13/* 13/*
14 * Defines x86 CPU feature bits 14 * Defines x86 CPU feature bits
15 */ 15 */
16#define NCAPINTS 18 /* N 32-bit words worth of info */ 16#define NCAPINTS 18 /* N 32-bit words worth of info */
17#define NBUGINTS 1 /* N 32-bit bug flags */ 17#define NBUGINTS 1 /* N 32-bit bug flags */
18 18
19/* 19/*
20 * Note: If the comment begins with a quoted string, that string is used 20 * Note: If the comment begins with a quoted string, that string is used
21 * in /proc/cpuinfo instead of the macro name. If the string is "", 21 * in /proc/cpuinfo instead of the macro name. If the string is "",
22 * this feature bit is not displayed in /proc/cpuinfo at all. 22 * this feature bit is not displayed in /proc/cpuinfo at all.
23 *
24 * When adding new features here that depend on other features,
25 * please update the table in kernel/cpu/cpuid-deps.c as well.
23 */ 26 */
24 27
25/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 28/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
26#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ 29#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
27#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ 30#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
28#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ 31#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
29#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ 32#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
30#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ 33#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
31#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ 34#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
32#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ 35#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
33#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ 36#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
34#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ 37#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
35#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ 38#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
36#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ 39#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
37#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ 40#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
38#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ 41#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
39#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ 42#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
40#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ 43#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
41 /* (plus FCMOVcc, FCOMI with FPU) */ 44#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
42#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ 45#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
43#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ 46#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
44#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ 47#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
45#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ 48#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
46#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ 49#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
47#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ 50#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
48#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ 51#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
49#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ 52#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
50#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ 53#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
51#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ 54#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
52#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ 55#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
53#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ 56#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
54#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ 57#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
55#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ 58#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
56#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
57 59
58/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 60/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
59/* Don't duplicate feature flags which are redundant with Intel! */ 61/* Don't duplicate feature flags which are redundant with Intel! */
60#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ 62#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
61#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ 63#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
62#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ 64#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
63#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ 65#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
64#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ 66#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
65#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ 67#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
66#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ 68#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
67#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ 69#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
68#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ 70#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
69#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ 71#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
70 72
71/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ 73/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
72#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ 74#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
73#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ 75#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
74#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ 76#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
75 77
76/* Other features, Linux-defined mapping, word 3 */ 78/* Other features, Linux-defined mapping, word 3 */
77/* This range is used for feature bits which conflict or are synthesized */ 79/* This range is used for feature bits which conflict or are synthesized */
78#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ 80#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
79#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ 81#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
80#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 82#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
81#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ 83#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
82/* cpu types for specific tunings: */ 84
83#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ 85/* CPU types for specific tunings: */
84#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ 86#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
85#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ 87#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
86#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ 88#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
87#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ 89#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
88#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ 90#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
89#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ 91#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
90#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ 92#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
91#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ 93#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
92#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ 94#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
93#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ 95#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
94#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ 96#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
95#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ 97#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
96#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ 98#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
97#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ 99#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
98#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ 100#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
99#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ 101#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
100#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ 102#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
101#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ 103#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
102#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ 104#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
103#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ 105#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
104#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ 106#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
105#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ 107#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
106#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ 108#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
107#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ 109#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
108#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ 110#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
109#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ 111#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
112#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
110 113
111/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 114/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
112#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ 115#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
113#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ 116#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
114#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ 117#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
115#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ 118#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
116#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ 119#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
117#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ 120#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
118#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ 121#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
119#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ 122#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
120#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ 123#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
121#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ 124#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
122#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ 125#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
123#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ 126#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
124#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ 127#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
125#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ 128#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
126#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ 129#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
127#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ 130#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
128#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ 131#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
129#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ 132#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
130#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ 133#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
131#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ 134#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
132#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ 135#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
133#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ 136#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
134#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ 137#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
135#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ 138#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
136#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ 139#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
137#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 140#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
138#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ 141#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
139#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ 142#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
140#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ 143#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
141#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ 144#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
142#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ 145#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
143 146
144/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 147/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
145#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ 148#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
146#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ 149#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
147#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ 150#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
148#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ 151#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
149#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ 152#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
150#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ 153#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
151#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ 154#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
152#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ 155#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
153#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ 156#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
154#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ 157#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
155 158
156/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 159/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
157#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ 160#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
158#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ 161#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
159#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ 162#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
160#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ 163#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
161#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ 164#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
162#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ 165#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
163#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ 166#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
164#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ 167#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
165#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ 168#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
166#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ 169#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
167#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ 170#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
168#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ 171#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
169#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ 172#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
170#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ 173#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
171#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ 174#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
172#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ 175#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
173#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ 176#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
174#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ 177#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
175#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ 178#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
176#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ 179#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
177#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ 180#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
178#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 181#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
179#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ 182#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
180#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ 183#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
181#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ 184#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
182#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ 185#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
183 186
184/* 187/*
185 * Auxiliary flags: Linux defined - For features scattered in various 188 * Auxiliary flags: Linux defined - For features scattered in various
@@ -187,146 +190,155 @@
187 * 190 *
188 * Reuse free bits when adding new feature flags! 191 * Reuse free bits when adding new feature flags!
189 */ 192 */
190#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ 193#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
191#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ 194#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
192#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 195#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
193#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 196#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
194#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ 197#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
195#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 198#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
196#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 199#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
197 200
198#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 201#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
199#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 202#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
200#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ 203#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
201 204
202#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 205#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
203#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 206#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
204#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ 207#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
205#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ 208#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
206 209
207#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 210#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
208 211
209/* Virtualization flags: Linux defined, word 8 */ 212/* Virtualization flags: Linux defined, word 8 */
210#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 213#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
211#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ 214#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
212#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 215#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
213#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 216#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
214#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 217#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
215 218
216#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ 219#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
217#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 220#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
218 221
219 222
220/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 223/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
221#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 224#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
222#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ 225#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
223#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ 226#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
224#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ 227#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
225#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ 228#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
226#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ 229#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
227#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ 230#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
228#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 231#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
229#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ 232#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
230#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ 233#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
231#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ 234#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
232#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ 235#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
233#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ 236#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
234#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ 237#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
235#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ 238#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
236#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ 239#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
237#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ 240#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
238#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 241#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
239#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ 242#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
240#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 243#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
241#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 244#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
242#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 245#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
243#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 246#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
244#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 247#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
245#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ 248#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
246#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ 249#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
247#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ 250#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
248 251
249/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ 252/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
250#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ 253#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
251#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ 254#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
252#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ 255#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
253#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ 256#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
254 257
255/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ 258/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
256#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ 259#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
257 260
258/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ 261/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
259#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ 262#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
260#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ 263#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
261#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ 264#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
262 265
263/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ 266/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
264#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ 267#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
265#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ 268#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
269#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
266 270
267/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ 271/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
268#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ 272#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
269#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ 273#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
270#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ 274#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
271#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ 275#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
272#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ 276#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
273#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ 277#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
274#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ 278#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
275#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ 279#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
276#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ 280#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
277#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ 281#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
278 282
279/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ 283/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
280#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ 284#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
281#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ 285#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
282#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ 286#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
283#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ 287#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
284#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ 288#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
285#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ 289#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
286#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ 290#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
287#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ 291#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
288#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ 292#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
289#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ 293#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
290#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ 294#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
291#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ 295#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
292#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ 296#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
293 297
294/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ 298/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
295#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ 299#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
296#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ 300#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
297#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ 301#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
298#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 302#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
299#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 303#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
300#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 304#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
305#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
306#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
307#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
308#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
309#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
310#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
311#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
301 312
302/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ 313/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
303#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ 314#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
304#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ 315#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
305#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ 316#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
306 317
307/* 318/*
308 * BUG word(s) 319 * BUG word(s)
309 */ 320 */
310#define X86_BUG(x) (NCAPINTS*32 + (x)) 321#define X86_BUG(x) (NCAPINTS*32 + (x))
311 322
312#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ 323#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
313#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ 324#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
314#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ 325#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
315#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ 326#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
316#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ 327#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
317#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ 328#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
318#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 329#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
319#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 330#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
320#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ 331#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
321#ifdef CONFIG_X86_32 332#ifdef CONFIG_X86_32
322/* 333/*
323 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional 334 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
324 * to avoid confusion. 335 * to avoid confusion.
325 */ 336 */
326#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ 337#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
327#endif 338#endif
328#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ 339#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
329#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ 340#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
330#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 341#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
331#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ 342#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
343
332#endif /* _ASM_X86_CPUFEATURES_H */ 344#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index c10c9128f54e..14d6d5007314 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) 16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
17#endif 17#endif
18 18
19#ifdef CONFIG_X86_INTEL_UMIP
20# define DISABLE_UMIP 0
21#else
22# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
23#endif
24
19#ifdef CONFIG_X86_64 25#ifdef CONFIG_X86_64
20# define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) 26# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
21# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) 27# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
@@ -63,7 +69,7 @@
63#define DISABLED_MASK13 0 69#define DISABLED_MASK13 0
64#define DISABLED_MASK14 0 70#define DISABLED_MASK14 0
65#define DISABLED_MASK15 0 71#define DISABLED_MASK15 0
66#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) 72#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
67#define DISABLED_MASK17 0 73#define DISABLED_MASK17 0
68#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 74#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
69 75
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index bde77d7c4390..37292bb5ce60 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -6,7 +6,7 @@ RM ?= rm -f
6 6
7# Make the path relative to DESTDIR, not prefix 7# Make the path relative to DESTDIR, not prefix
8ifndef DESTDIR 8ifndef DESTDIR
9prefix?=$(HOME) 9prefix ?= /usr/local
10endif 10endif
11mandir ?= $(prefix)/share/man 11mandir ?= $(prefix)/share/man
12man8dir = $(mandir)/man8 12man8dir = $(mandir)/man8
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 813826c50936..ec3052c0b004 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -45,8 +45,8 @@ $(LIBBPF)-clean:
45 $(call QUIET_CLEAN, libbpf) 45 $(call QUIET_CLEAN, libbpf)
46 $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null 46 $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
47 47
48prefix = /usr 48prefix = /usr/local
49bash_compdir ?= $(prefix)/share/bash-completion/completions 49bash_compdir ?= /usr/share/bash-completion/completions
50 50
51CC = gcc 51CC = gcc
52 52
@@ -76,6 +76,7 @@ clean: $(LIBBPF)-clean
76 $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d 76 $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
77 77
78install: 78install:
79 install -m 0755 -d $(prefix)/sbin
79 install $(OUTPUT)bpftool $(prefix)/sbin/bpftool 80 install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
80 install -m 0755 -d $(bash_compdir) 81 install -m 0755 -d $(bash_compdir)
81 install -m 0644 bash-completion/bpftool $(bash_compdir) 82 install -m 0644 bash-completion/bpftool $(bash_compdir)
@@ -88,5 +89,5 @@ doc-install:
88 89
89FORCE: 90FORCE:
90 91
91.PHONY: all clean FORCE 92.PHONY: all clean FORCE install doc doc-install
92.DEFAULT_GOAL := all 93.DEFAULT_GOAL := all
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d6e4762170a4..d294bc8168be 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -58,11 +58,19 @@ bool show_pinned;
58struct pinned_obj_table prog_table; 58struct pinned_obj_table prog_table;
59struct pinned_obj_table map_table; 59struct pinned_obj_table map_table;
60 60
61static void __noreturn clean_and_exit(int i)
62{
63 if (json_output)
64 jsonw_destroy(&json_wtr);
65
66 exit(i);
67}
68
61void usage(void) 69void usage(void)
62{ 70{
63 last_do_help(last_argc - 1, last_argv + 1); 71 last_do_help(last_argc - 1, last_argv + 1);
64 72
65 exit(-1); 73 clean_and_exit(-1);
66} 74}
67 75
68static int do_help(int argc, char **argv) 76static int do_help(int argc, char **argv)
@@ -280,6 +288,7 @@ int main(int argc, char **argv)
280 hash_init(prog_table.table); 288 hash_init(prog_table.table);
281 hash_init(map_table.table); 289 hash_init(map_table.table);
282 290
291 opterr = 0;
283 while ((opt = getopt_long(argc, argv, "Vhpjf", 292 while ((opt = getopt_long(argc, argv, "Vhpjf",
284 options, NULL)) >= 0) { 293 options, NULL)) >= 0) {
285 switch (opt) { 294 switch (opt) {
@@ -291,13 +300,25 @@ int main(int argc, char **argv)
291 pretty_output = true; 300 pretty_output = true;
292 /* fall through */ 301 /* fall through */
293 case 'j': 302 case 'j':
294 json_output = true; 303 if (!json_output) {
304 json_wtr = jsonw_new(stdout);
305 if (!json_wtr) {
306 p_err("failed to create JSON writer");
307 return -1;
308 }
309 json_output = true;
310 }
311 jsonw_pretty(json_wtr, pretty_output);
295 break; 312 break;
296 case 'f': 313 case 'f':
297 show_pinned = true; 314 show_pinned = true;
298 break; 315 break;
299 default: 316 default:
300 usage(); 317 p_err("unrecognized option '%s'", argv[optind - 1]);
318 if (json_output)
319 clean_and_exit(-1);
320 else
321 usage();
301 } 322 }
302 } 323 }
303 324
@@ -306,15 +327,6 @@ int main(int argc, char **argv)
306 if (argc < 0) 327 if (argc < 0)
307 usage(); 328 usage();
308 329
309 if (json_output) {
310 json_wtr = jsonw_new(stdout);
311 if (!json_wtr) {
312 p_err("failed to create JSON writer");
313 return -1;
314 }
315 jsonw_pretty(json_wtr, pretty_output);
316 }
317
318 bfd_init(); 330 bfd_init();
319 331
320 ret = cmd_select(cmds, argc, argv, do_help); 332 ret = cmd_select(cmds, argc, argv, do_help);
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 9c191e222d6f..bff330b49791 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -41,6 +41,7 @@
41#include <stdbool.h> 41#include <stdbool.h>
42#include <stdio.h> 42#include <stdio.h>
43#include <linux/bpf.h> 43#include <linux/bpf.h>
44#include <linux/compiler.h>
44#include <linux/kernel.h> 45#include <linux/kernel.h>
45#include <linux/hashtable.h> 46#include <linux/hashtable.h>
46 47
@@ -50,7 +51,7 @@
50 51
51#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); }) 52#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
52#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) 53#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
53#define BAD_ARG() ({ p_err("what is '%s'?\n", *argv); -1; }) 54#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
54 55
55#define ERR_MAX_LEN 1024 56#define ERR_MAX_LEN 1024
56 57
@@ -80,7 +81,7 @@ void p_info(const char *fmt, ...);
80 81
81bool is_prefix(const char *pfx, const char *str); 82bool is_prefix(const char *pfx, const char *str);
82void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep); 83void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
83void usage(void) __attribute__((noreturn)); 84void usage(void) __noreturn;
84 85
85struct pinned_obj_table { 86struct pinned_obj_table {
86 DECLARE_HASHTABLE(table, 16); 87 DECLARE_HASHTABLE(table, 16);
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e2450c8e88e6..a8c3a33dd185 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -523,21 +523,23 @@ static int do_show(int argc, char **argv)
523 break; 523 break;
524 p_err("can't get next map: %s%s", strerror(errno), 524 p_err("can't get next map: %s%s", strerror(errno),
525 errno == EINVAL ? " -- kernel too old?" : ""); 525 errno == EINVAL ? " -- kernel too old?" : "");
526 return -1; 526 break;
527 } 527 }
528 528
529 fd = bpf_map_get_fd_by_id(id); 529 fd = bpf_map_get_fd_by_id(id);
530 if (fd < 0) { 530 if (fd < 0) {
531 if (errno == ENOENT)
532 continue;
531 p_err("can't get map by id (%u): %s", 533 p_err("can't get map by id (%u): %s",
532 id, strerror(errno)); 534 id, strerror(errno));
533 return -1; 535 break;
534 } 536 }
535 537
536 err = bpf_obj_get_info_by_fd(fd, &info, &len); 538 err = bpf_obj_get_info_by_fd(fd, &info, &len);
537 if (err) { 539 if (err) {
538 p_err("can't get map info: %s", strerror(errno)); 540 p_err("can't get map info: %s", strerror(errno));
539 close(fd); 541 close(fd);
540 return -1; 542 break;
541 } 543 }
542 544
543 if (json_output) 545 if (json_output)
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index ad619b96c276..dded77345bfb 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -382,6 +382,8 @@ static int do_show(int argc, char **argv)
382 382
383 fd = bpf_prog_get_fd_by_id(id); 383 fd = bpf_prog_get_fd_by_id(id);
384 if (fd < 0) { 384 if (fd < 0) {
385 if (errno == ENOENT)
386 continue;
385 p_err("can't get prog by id (%u): %s", 387 p_err("can't get prog by id (%u): %s",
386 id, strerror(errno)); 388 id, strerror(errno));
387 err = -1; 389 err = -1;
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index eaa3bec273c8..4c99c57736ce 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
193 for (;;) { 193 for (;;) {
194 readp = &record[records_read]; 194 readp = &record[records_read];
195 records_read += fread(readp, sizeof(struct kvp_record), 195 records_read += fread(readp, sizeof(struct kvp_record),
196 ENTRIES_PER_BLOCK * num_blocks, 196 ENTRIES_PER_BLOCK * num_blocks - records_read,
197 filep); 197 filep);
198 198
199 if (ferror(filep)) { 199 if (ferror(filep)) {
200 syslog(LOG_ERR, "Failed to read file, pool: %d", pool); 200 syslog(LOG_ERR,
201 "Failed to read file, pool: %d; error: %d %s",
202 pool, errno, strerror(errno));
203 kvp_release_lock(pool);
201 exit(EXIT_FAILURE); 204 exit(EXIT_FAILURE);
202 } 205 }
203 206
@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
210 213
211 if (record == NULL) { 214 if (record == NULL) {
212 syslog(LOG_ERR, "malloc failed"); 215 syslog(LOG_ERR, "malloc failed");
216 kvp_release_lock(pool);
213 exit(EXIT_FAILURE); 217 exit(EXIT_FAILURE);
214 } 218 }
215 continue; 219 continue;
@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
224 fclose(filep); 228 fclose(filep);
225 kvp_release_lock(pool); 229 kvp_release_lock(pool);
226} 230}
231
227static int kvp_file_init(void) 232static int kvp_file_init(void)
228{ 233{
229 int fd; 234 int fd;
230 FILE *filep;
231 size_t records_read;
232 char *fname; 235 char *fname;
233 struct kvp_record *record;
234 struct kvp_record *readp;
235 int num_blocks;
236 int i; 236 int i;
237 int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; 237 int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
238 238
@@ -246,61 +246,19 @@ static int kvp_file_init(void)
246 246
247 for (i = 0; i < KVP_POOL_COUNT; i++) { 247 for (i = 0; i < KVP_POOL_COUNT; i++) {
248 fname = kvp_file_info[i].fname; 248 fname = kvp_file_info[i].fname;
249 records_read = 0;
250 num_blocks = 1;
251 sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i); 249 sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
252 fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */); 250 fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
253 251
254 if (fd == -1) 252 if (fd == -1)
255 return 1; 253 return 1;
256 254
257
258 filep = fopen(fname, "re");
259 if (!filep) {
260 close(fd);
261 return 1;
262 }
263
264 record = malloc(alloc_unit * num_blocks);
265 if (record == NULL) {
266 fclose(filep);
267 close(fd);
268 return 1;
269 }
270 for (;;) {
271 readp = &record[records_read];
272 records_read += fread(readp, sizeof(struct kvp_record),
273 ENTRIES_PER_BLOCK,
274 filep);
275
276 if (ferror(filep)) {
277 syslog(LOG_ERR, "Failed to read file, pool: %d",
278 i);
279 exit(EXIT_FAILURE);
280 }
281
282 if (!feof(filep)) {
283 /*
284 * We have more data to read.
285 */
286 num_blocks++;
287 record = realloc(record, alloc_unit *
288 num_blocks);
289 if (record == NULL) {
290 fclose(filep);
291 close(fd);
292 return 1;
293 }
294 continue;
295 }
296 break;
297 }
298 kvp_file_info[i].fd = fd; 255 kvp_file_info[i].fd = fd;
299 kvp_file_info[i].num_blocks = num_blocks; 256 kvp_file_info[i].num_blocks = 1;
300 kvp_file_info[i].records = record; 257 kvp_file_info[i].records = malloc(alloc_unit);
301 kvp_file_info[i].num_records = records_read; 258 if (kvp_file_info[i].records == NULL)
302 fclose(filep); 259 return 1;
303 260 kvp_file_info[i].num_records = 0;
261 kvp_update_mem_state(i);
304 } 262 }
305 263
306 return 0; 264 return 0;
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 07fd03c74a77..04e32f965ad7 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -84,8 +84,6 @@
84 84
85#define uninitialized_var(x) x = *(&(x)) 85#define uninitialized_var(x) x = *(&(x))
86 86
87#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
88
89#include <linux/types.h> 87#include <linux/types.h>
90 88
91/* 89/*
@@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
135/* 133/*
136 * Prevent the compiler from merging or refetching reads or writes. The 134 * Prevent the compiler from merging or refetching reads or writes. The
137 * compiler is also forbidden from reordering successive instances of 135 * compiler is also forbidden from reordering successive instances of
138 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 136 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
139 * compiler is aware of some particular ordering. One way to make the 137 * particular ordering. One way to make the compiler aware of ordering is to
140 * compiler aware of ordering is to put the two invocations of READ_ONCE, 138 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
141 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 139 * statements.
142 * 140 *
143 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 141 * These two macros will also work on aggregate data types like structs or
144 * data types like structs or unions. If the size of the accessed data 142 * unions. If the size of the accessed data type exceeds the word size of
145 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 143 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
146 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 144 * fall back to memcpy and print a compile-time warning.
147 * compile-time warning.
148 * 145 *
149 * Their two major use cases are: (1) Mediating communication between 146 * Their two major use cases are: (1) Mediating communication between
150 * process-level code and irq/NMI handlers, all running on the same CPU, 147 * process-level code and irq/NMI handlers, all running on the same CPU,
151 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 148 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
152 * mutilate accesses that either do not require ordering or that interact 149 * mutilate accesses that either do not require ordering or that interact
153 * with an explicit memory barrier or atomic instruction that provides the 150 * with an explicit memory barrier or atomic instruction that provides the
154 * required ordering. 151 * required ordering.
diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
deleted file mode 100644
index ea32a7d3cf1b..000000000000
--- a/tools/include/linux/kmemcheck.h
+++ /dev/null
@@ -1 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
index 940c1b075659..6b0c36a58fcb 100644
--- a/tools/include/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -48,6 +48,7 @@ static inline int debug_locks_off(void)
48#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) 48#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
49#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) 49#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
50#define pr_warn pr_err 50#define pr_warn pr_err
51#define pr_cont pr_err
51 52
52#define list_del_rcu list_del 53#define list_del_rcu list_del
53 54
diff --git a/tools/include/uapi/asm-generic/bpf_perf_event.h b/tools/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644
index 000000000000..53815d2cd047
--- /dev/null
+++ b/tools/include/uapi/asm-generic/bpf_perf_event.h
@@ -0,0 +1,9 @@
1#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
2#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
3
4#include <linux/ptrace.h>
5
6/* Export kernel pt_regs structure */
7typedef struct pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 2dffcbf705b3..653687d9771b 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -13,6 +13,7 @@
13#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 13#define MAP_NONBLOCK 0x10000 /* do not block on IO */
14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ 14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ 15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
16#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
16 17
17/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ 18/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
18 19
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..13a58531e6fa
--- /dev/null
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,7 @@
1#if defined(__aarch64__)
2#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
3#elif defined(__s390__)
4#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
5#else
6#include <uapi/asm-generic/bpf_perf_event.h>
7#endif
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 97677cd6964d..6fdff5945c8a 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
737 __u32 pad; 737 __u32 pad;
738}; 738};
739 739
740/* Query current scanout sequence number */
741struct drm_crtc_get_sequence {
742 __u32 crtc_id; /* requested crtc_id */
743 __u32 active; /* return: crtc output is active */
744 __u64 sequence; /* return: most recent vblank sequence */
745 __s64 sequence_ns; /* return: most recent time of first pixel out */
746};
747
748/* Queue event to be delivered at specified sequence. Time stamp marks
749 * when the first pixel of the refresh cycle leaves the display engine
750 * for the display
751 */
752#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
753#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
754
755struct drm_crtc_queue_sequence {
756 __u32 crtc_id;
757 __u32 flags;
758 __u64 sequence; /* on input, target sequence. on output, actual sequence */
759 __u64 user_data; /* user data passed to event */
760};
761
740#if defined(__cplusplus) 762#if defined(__cplusplus)
741} 763}
742#endif 764#endif
@@ -819,6 +841,9 @@ extern "C" {
819 841
820#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) 842#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
821 843
844#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
845#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
846
822#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) 847#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
823 848
824#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) 849#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -863,6 +888,11 @@ extern "C" {
863#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) 888#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
864#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) 889#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
865 890
891#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
892#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
893#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
894#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
895
866/** 896/**
867 * Device specific ioctls should only be in their respective headers 897 * Device specific ioctls should only be in their respective headers
868 * The device specific ioctl range is from 0x40 to 0x9f. 898 * The device specific ioctl range is from 0x40 to 0x9f.
@@ -893,6 +923,7 @@ struct drm_event {
893 923
894#define DRM_EVENT_VBLANK 0x01 924#define DRM_EVENT_VBLANK 0x01
895#define DRM_EVENT_FLIP_COMPLETE 0x02 925#define DRM_EVENT_FLIP_COMPLETE 0x02
926#define DRM_EVENT_CRTC_SEQUENCE 0x03
896 927
897struct drm_event_vblank { 928struct drm_event_vblank {
898 struct drm_event base; 929 struct drm_event base;
@@ -903,6 +934,16 @@ struct drm_event_vblank {
903 __u32 crtc_id; /* 0 on older kernels that do not support this */ 934 __u32 crtc_id; /* 0 on older kernels that do not support this */
904}; 935};
905 936
937/* Event delivered at sequence. Time stamp marks when the first pixel
938 * of the refresh cycle leaves the display engine for the display
939 */
940struct drm_event_crtc_sequence {
941 struct drm_event base;
942 __u64 user_data;
943 __s64 time_ns;
944 __u64 sequence;
945};
946
906/* typedef area */ 947/* typedef area */
907#ifndef __KERNEL__ 948#ifndef __KERNEL__
908typedef struct drm_clip_rect drm_clip_rect_t; 949typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 9816590d3ad2..ac3c6503ca27 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
397#define I915_PARAM_MIN_EU_IN_POOL 39 397#define I915_PARAM_MIN_EU_IN_POOL 39
398#define I915_PARAM_MMAP_GTT_VERSION 40 398#define I915_PARAM_MMAP_GTT_VERSION 40
399 399
400/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 400/*
401 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
401 * priorities and the driver will attempt to execute batches in priority order. 402 * priorities and the driver will attempt to execute batches in priority order.
403 * The param returns a capability bitmask, nonzero implies that the scheduler
404 * is enabled, with different features present according to the mask.
405 *
406 * The initial priority for each batch is supplied by the context and is
407 * controlled via I915_CONTEXT_PARAM_PRIORITY.
402 */ 408 */
403#define I915_PARAM_HAS_SCHEDULER 41 409#define I915_PARAM_HAS_SCHEDULER 41
410#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
411#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
412#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
413
404#define I915_PARAM_HUC_STATUS 42 414#define I915_PARAM_HUC_STATUS 42
405 415
406/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 416/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
1309 * be specified 1319 * be specified
1310 */ 1320 */
1311 __u64 offset; 1321 __u64 offset;
1322#define I915_REG_READ_8B_WA (1ul << 0)
1323
1312 __u64 val; /* Return value */ 1324 __u64 val; /* Return value */
1313}; 1325};
1314/* Known registers: 1326/* Known registers:
1315 * 1327 *
1316 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1328 * Render engine timestamp - 0x2358 + 64bit - gen7+
1317 * - Note this register returns an invalid value if using the default 1329 * - Note this register returns an invalid value if using the default
1318 * single instruction 8byte read, in order to workaround that use 1330 * single instruction 8byte read, in order to workaround that pass
1319 * offset (0x2538 | 1) instead. 1331 * flag I915_REG_READ_8B_WA in offset field.
1320 * 1332 *
1321 */ 1333 */
1322 1334
@@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
1359#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1371#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1360#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1372#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1361#define I915_CONTEXT_PARAM_BANNABLE 0x5 1373#define I915_CONTEXT_PARAM_BANNABLE 0x5
1374#define I915_CONTEXT_PARAM_PRIORITY 0x6
1375#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1376#define I915_CONTEXT_DEFAULT_PRIORITY 0
1377#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1362 __u64 value; 1378 __u64 value;
1363}; 1379};
1364 1380
@@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
1510 __u32 n_boolean_regs; 1526 __u32 n_boolean_regs;
1511 __u32 n_flex_regs; 1527 __u32 n_flex_regs;
1512 1528
1513 __u64 __user mux_regs_ptr; 1529 /*
1514 __u64 __user boolean_regs_ptr; 1530 * These fields are pointers to tuples of u32 values (register
1515 __u64 __user flex_regs_ptr; 1531 * address, value). For example the expected length of the buffer
1532 * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1533 */
1534 __u64 mux_regs_ptr;
1535 __u64 boolean_regs_ptr;
1536 __u64 flex_regs_ptr;
1516}; 1537};
1517 1538
1518#if defined(__cplusplus) 1539#if defined(__cplusplus)
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
index 067427259820..8f95303f9d80 100644
--- a/tools/include/uapi/linux/bpf_perf_event.h
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1/* Copyright (c) 2016 Facebook 2/* Copyright (c) 2016 Facebook
2 * 3 *
3 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
@@ -7,11 +8,10 @@
7#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__ 8#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
8#define _UAPI__LINUX_BPF_PERF_EVENT_H__ 9#define _UAPI__LINUX_BPF_PERF_EVENT_H__
9 10
10#include <linux/types.h> 11#include <asm/bpf_perf_event.h>
11#include <linux/ptrace.h>
12 12
13struct bpf_perf_event_data { 13struct bpf_perf_event_data {
14 struct pt_regs regs; 14 bpf_user_pt_regs_t regs;
15 __u64 sample_period; 15 __u64 sample_period;
16}; 16};
17 17
diff --git a/tools/include/uapi/linux/kcmp.h b/tools/include/uapi/linux/kcmp.h
index 481e103da78e..ef1305010925 100644
--- a/tools/include/uapi/linux/kcmp.h
+++ b/tools/include/uapi/linux/kcmp.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1#ifndef _UAPI_LINUX_KCMP_H 2#ifndef _UAPI_LINUX_KCMP_H
2#define _UAPI_LINUX_KCMP_H 3#define _UAPI_LINUX_KCMP_H
3 4
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 7e99999d6236..496e59a2738b 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
630 630
631struct kvm_s390_irq_state { 631struct kvm_s390_irq_state {
632 __u64 buf; 632 __u64 buf;
633 __u32 flags; 633 __u32 flags; /* will stay unused for compatibility reasons */
634 __u32 len; 634 __u32 len;
635 __u32 reserved[4]; 635 __u32 reserved[4]; /* will stay unused for compatibility reasons */
636}; 636};
637 637
638/* for KVM_SET_GUEST_DEBUG */ 638/* for KVM_SET_GUEST_DEBUG */
@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
931#define KVM_CAP_PPC_SMT_POSSIBLE 147 931#define KVM_CAP_PPC_SMT_POSSIBLE 147
932#define KVM_CAP_HYPERV_SYNIC2 148 932#define KVM_CAP_HYPERV_SYNIC2 148
933#define KVM_CAP_HYPERV_VP_INDEX 149 933#define KVM_CAP_HYPERV_VP_INDEX 149
934#define KVM_CAP_S390_AIS_MIGRATION 150
934 935
935#ifdef KVM_CAP_IRQ_ROUTING 936#ifdef KVM_CAP_IRQ_ROUTING
936 937
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 362493a2f950..b9a4953018ed 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -942,6 +942,7 @@ enum perf_callchain_context {
942#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ 942#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
943#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ 943#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
944#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ 944#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
945#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
945 946
946#define PERF_FLAG_FD_NO_GROUP (1UL << 0) 947#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
947#define PERF_FLAG_FD_OUTPUT (1UL << 1) 948#define PERF_FLAG_FD_OUTPUT (1UL << 1)
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index a8d0759a9e40..af5f8c2df87a 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1#ifndef _LINUX_PRCTL_H 2#ifndef _LINUX_PRCTL_H
2#define _LINUX_PRCTL_H 3#define _LINUX_PRCTL_H
3 4
@@ -197,4 +198,13 @@ struct prctl_mm_map {
197# define PR_CAP_AMBIENT_LOWER 3 198# define PR_CAP_AMBIENT_LOWER 3
198# define PR_CAP_AMBIENT_CLEAR_ALL 4 199# define PR_CAP_AMBIENT_CLEAR_ALL 4
199 200
201/* arm64 Scalable Vector Extension controls */
202/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
203#define PR_SVE_SET_VL 50 /* set task vector length */
204# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
205#define PR_SVE_GET_VL 51 /* get task vector length */
206/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
207# define PR_SVE_VL_LEN_MASK 0xffff
208# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
209
200#endif /* _LINUX_PRCTL_H */ 210#endif /* _LINUX_PRCTL_H */
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 217cf6f95c36..a5684d0968b4 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -478,7 +478,7 @@ class Provider(object):
478 @staticmethod 478 @staticmethod
479 def is_field_wanted(fields_filter, field): 479 def is_field_wanted(fields_filter, field):
480 """Indicate whether field is valid according to fields_filter.""" 480 """Indicate whether field is valid according to fields_filter."""
481 if not fields_filter or fields_filter == "help": 481 if not fields_filter:
482 return True 482 return True
483 return re.match(fields_filter, field) is not None 483 return re.match(fields_filter, field) is not None
484 484
@@ -549,8 +549,8 @@ class TracepointProvider(Provider):
549 549
550 def update_fields(self, fields_filter): 550 def update_fields(self, fields_filter):
551 """Refresh fields, applying fields_filter""" 551 """Refresh fields, applying fields_filter"""
552 self._fields = [field for field in self.get_available_fields() 552 self.fields = [field for field in self.get_available_fields()
553 if self.is_field_wanted(fields_filter, field)] 553 if self.is_field_wanted(fields_filter, field)]
554 554
555 @staticmethod 555 @staticmethod
556 def get_online_cpus(): 556 def get_online_cpus():
@@ -950,7 +950,8 @@ class Tui(object):
950 curses.nocbreak() 950 curses.nocbreak()
951 curses.endwin() 951 curses.endwin()
952 952
953 def get_all_gnames(self): 953 @staticmethod
954 def get_all_gnames():
954 """Returns a list of (pid, gname) tuples of all running guests""" 955 """Returns a list of (pid, gname) tuples of all running guests"""
955 res = [] 956 res = []
956 try: 957 try:
@@ -963,7 +964,7 @@ class Tui(object):
963 # perform a sanity check before calling the more expensive 964 # perform a sanity check before calling the more expensive
964 # function to possibly extract the guest name 965 # function to possibly extract the guest name
965 if ' -name ' in line[1]: 966 if ' -name ' in line[1]:
966 res.append((line[0], self.get_gname_from_pid(line[0]))) 967 res.append((line[0], Tui.get_gname_from_pid(line[0])))
967 child.stdout.close() 968 child.stdout.close()
968 969
969 return res 970 return res
@@ -984,7 +985,8 @@ class Tui(object):
984 except Exception: 985 except Exception:
985 self.screen.addstr(row + 1, 2, 'Not available') 986 self.screen.addstr(row + 1, 2, 'Not available')
986 987
987 def get_pid_from_gname(self, gname): 988 @staticmethod
989 def get_pid_from_gname(gname):
988 """Fuzzy function to convert guest name to QEMU process pid. 990 """Fuzzy function to convert guest name to QEMU process pid.
989 991
990 Returns a list of potential pids, can be empty if no match found. 992 Returns a list of potential pids, can be empty if no match found.
@@ -992,7 +994,7 @@ class Tui(object):
992 994
993 """ 995 """
994 pids = [] 996 pids = []
995 for line in self.get_all_gnames(): 997 for line in Tui.get_all_gnames():
996 if gname == line[1]: 998 if gname == line[1]:
997 pids.append(int(line[0])) 999 pids.append(int(line[0]))
998 1000
@@ -1090,15 +1092,16 @@ class Tui(object):
1090 # sort by totals 1092 # sort by totals
1091 return (0, -stats[x][0]) 1093 return (0, -stats[x][0])
1092 total = 0. 1094 total = 0.
1093 for val in stats.values(): 1095 for key in stats.keys():
1094 total += val[0] 1096 if key.find('(') is -1:
1097 total += stats[key][0]
1095 if self._sorting == SORT_DEFAULT: 1098 if self._sorting == SORT_DEFAULT:
1096 sortkey = sortCurAvg 1099 sortkey = sortCurAvg
1097 else: 1100 else:
1098 sortkey = sortTotal 1101 sortkey = sortTotal
1102 tavg = 0
1099 for key in sorted(stats.keys(), key=sortkey): 1103 for key in sorted(stats.keys(), key=sortkey):
1100 1104 if row >= self.screen.getmaxyx()[0] - 1:
1101 if row >= self.screen.getmaxyx()[0]:
1102 break 1105 break
1103 values = stats[key] 1106 values = stats[key]
1104 if not values[0] and not values[1]: 1107 if not values[0] and not values[1]:
@@ -1110,9 +1113,15 @@ class Tui(object):
1110 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % 1113 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
1111 (key, values[0], values[0] * 100 / total, 1114 (key, values[0], values[0] * 100 / total,
1112 cur)) 1115 cur))
1116 if cur is not '' and key.find('(') is -1:
1117 tavg += cur
1113 row += 1 1118 row += 1
1114 if row == 3: 1119 if row == 3:
1115 self.screen.addstr(4, 1, 'No matching events reported yet') 1120 self.screen.addstr(4, 1, 'No matching events reported yet')
1121 else:
1122 self.screen.addstr(row, 1, '%-40s %10d %8s' %
1123 ('Total', total, tavg if tavg else ''),
1124 curses.A_BOLD)
1116 self.screen.refresh() 1125 self.screen.refresh()
1117 1126
1118 def show_msg(self, text): 1127 def show_msg(self, text):
@@ -1358,7 +1367,7 @@ class Tui(object):
1358 if char == 'x': 1367 if char == 'x':
1359 self.update_drilldown() 1368 self.update_drilldown()
1360 # prevents display of current values on next refresh 1369 # prevents display of current values on next refresh
1361 self.stats.get() 1370 self.stats.get(self._display_guests)
1362 except KeyboardInterrupt: 1371 except KeyboardInterrupt:
1363 break 1372 break
1364 except curses.error: 1373 except curses.error:
@@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.
1451 try: 1460 try:
1452 pids = Tui.get_pid_from_gname(val) 1461 pids = Tui.get_pid_from_gname(val)
1453 except: 1462 except:
1454 raise optparse.OptionValueError('Error while searching for guest ' 1463 sys.exit('Error while searching for guest "{}". Use "-p" to '
1455 '"{}", use "-p" to specify a pid ' 1464 'specify a pid instead?'.format(val))
1456 'instead'.format(val))
1457 if len(pids) == 0: 1465 if len(pids) == 0:
1458 raise optparse.OptionValueError('No guest by the name "{}" ' 1466 sys.exit('Error: No guest by the name "{}" found'.format(val))
1459 'found'.format(val))
1460 if len(pids) > 1: 1467 if len(pids) > 1:
1461 raise optparse.OptionValueError('Multiple processes found (pids: ' 1468 sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
1462 '{}) - use "-p" to specify a pid ' 1469 'to specify the desired pid'.format(" ".join(pids)))
1463 'instead'.format(" ".join(pids)))
1464 parser.values.pid = pids[0] 1470 parser.values.pid = pids[0]
1465 1471
1466 optparser = optparse.OptionParser(description=description_text, 1472 optparser = optparse.OptionParser(description=description_text,
@@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.
1518 help='restrict statistics to guest by name', 1524 help='restrict statistics to guest by name',
1519 callback=cb_guest_to_pid, 1525 callback=cb_guest_to_pid,
1520 ) 1526 )
1521 (options, _) = optparser.parse_args(sys.argv) 1527 options, unkn = optparser.parse_args(sys.argv)
1528 if len(unkn) != 1:
1529 sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
1530 try:
1531 # verify that we were passed a valid regex up front
1532 re.compile(options.fields)
1533 except re.error:
1534 sys.exit('Error: "' + options.fields + '" is not a valid regular '
1535 'expression')
1536
1522 return options 1537 return options
1523 1538
1524 1539
@@ -1564,16 +1579,13 @@ def main():
1564 1579
1565 stats = Stats(options) 1580 stats = Stats(options)
1566 1581
1567 if options.fields == "help": 1582 if options.fields == 'help':
1568 event_list = "\n" 1583 stats.fields_filter = None
1569 s = stats.get() 1584 event_list = []
1570 for key in s.keys(): 1585 for key in stats.get().keys():
1571 if key.find('(') != -1: 1586 event_list.append(key.split('(', 1)[0])
1572 key = key[0:key.find('(')] 1587 sys.stdout.write(' ' + '\n '.join(sorted(set(event_list))) + '\n')
1573 if event_list.find('\n' + key + '\n') == -1: 1588 sys.exit(0)
1574 event_list += key + '\n'
1575 sys.stdout.write(event_list)
1576 return ""
1577 1589
1578 if options.log: 1590 if options.log:
1579 log(stats) 1591 log(stats)
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index e5cf836be8a1..b5b3810c9e94 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
50*s*:: set update interval 50*s*:: set update interval
51 51
52*x*:: toggle reporting of stats for child trace events 52*x*:: toggle reporting of stats for child trace events
53 :: *Note*: The stats for the parents summarize the respective child trace
54 events
53 55
54Press any other key to refresh statistics immediately. 56Press any other key to refresh statistics immediately.
55 57
@@ -86,7 +88,7 @@ OPTIONS
86 88
87-f<fields>:: 89-f<fields>::
88--fields=<fields>:: 90--fields=<fields>::
89 fields to display (regex) 91 fields to display (regex), "-f help" for a list of available events
90 92
91-h:: 93-h::
92--help:: 94--help::
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 0f94af3ccaaa..ae0272f9a091 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,9 +7,11 @@ ARCH := x86
7endif 7endif
8 8
9# always use the host compiler 9# always use the host compiler
10CC = gcc 10HOSTCC ?= gcc
11LD = ld 11HOSTLD ?= ld
12AR = ar 12CC = $(HOSTCC)
13LD = $(HOSTLD)
14AR = ar
13 15
14ifeq ($(srctree),) 16ifeq ($(srctree),)
15srctree := $(patsubst %/,%,$(dir $(CURDIR))) 17srctree := $(patsubst %/,%,$(dir $(CURDIR)))
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 8acfc47af70e..540a209b78ab 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
138 *type = INSN_STACK; 138 *type = INSN_STACK;
139 op->src.type = OP_SRC_ADD; 139 op->src.type = OP_SRC_ADD;
140 op->src.reg = op_to_cfi_reg[modrm_reg][rex_r]; 140 op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
141 op->dest.type = OP_SRC_REG; 141 op->dest.type = OP_DEST_REG;
142 op->dest.reg = CFI_SP; 142 op->dest.reg = CFI_SP;
143 } 143 }
144 break; 144 break;
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
896 896
897GrpTable: Grp3_1 897GrpTable: Grp3_1
8980: TEST Eb,Ib 8980: TEST Eb,Ib
8991: 8991: TEST Eb,Ib
9002: NOT Eb 9002: NOT Eb
9013: NEG Eb 9013: NEG Eb
9024: MUL AL,Eb 9024: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
index 4c6b5c9ef073..91e8e19ff5e0 100644
--- a/tools/objtool/builtin-orc.c
+++ b/tools/objtool/builtin-orc.c
@@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv)
44 const char *objname; 44 const char *objname;
45 45
46 argc--; argv++; 46 argc--; argv++;
47 if (argc <= 0)
48 usage_with_options(orc_usage, check_options);
49
47 if (!strncmp(argv[0], "gen", 3)) { 50 if (!strncmp(argv[0], "gen", 3)) {
48 argc = parse_options(argc, argv, check_options, orc_usage, 0); 51 argc = parse_options(argc, argv, check_options, orc_usage, 0);
49 if (argc != 1) 52 if (argc != 1)
@@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv)
52 objname = argv[0]; 55 objname = argv[0];
53 56
54 return check(objname, no_fp, no_unreachable, true); 57 return check(objname, no_fp, no_unreachable, true);
55
56 } 58 }
57 59
58 if (!strcmp(argv[0], "dump")) { 60 if (!strcmp(argv[0], "dump")) {
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 36c5bf6a2675..c3343820916a 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -76,7 +76,8 @@ int orc_dump(const char *_objname)
76 int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0; 76 int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
77 struct orc_entry *orc = NULL; 77 struct orc_entry *orc = NULL;
78 char *name; 78 char *name;
79 unsigned long nr_sections, orc_ip_addr = 0; 79 size_t nr_sections;
80 Elf64_Addr orc_ip_addr = 0;
80 size_t shstrtab_idx; 81 size_t shstrtab_idx;
81 Elf *elf; 82 Elf *elf;
82 Elf_Scn *scn; 83 Elf_Scn *scn;
@@ -187,10 +188,10 @@ int orc_dump(const char *_objname)
187 return -1; 188 return -1;
188 } 189 }
189 190
190 printf("%s+%lx:", name, rela.r_addend); 191 printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
191 192
192 } else { 193 } else {
193 printf("%lx:", orc_ip_addr + (i * sizeof(int)) + orc_ip[i]); 194 printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
194 } 195 }
195 196
196 197
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index e5ca31429c9b..e61fe703197b 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file)
165 165
166 /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */ 166 /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
167 sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx); 167 sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
168 if (!sec)
169 return -1;
168 170
169 ip_relasec = elf_create_rela_section(file->elf, sec); 171 ip_relasec = elf_create_rela_section(file->elf, sec);
170 if (!ip_relasec) 172 if (!ip_relasec)
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index ed65e82f034e..0294bfb6c5f8 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -188,9 +188,7 @@ ifdef PYTHON_CONFIG
188 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) 188 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
189 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil 189 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
190 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) 190 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
191 ifeq ($(CC_NO_CLANG), 1) 191 PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
192 PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
193 endif
194 FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) 192 FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
195endif 193endif
196 194
@@ -576,14 +574,15 @@ ifndef NO_GTK2
576 endif 574 endif
577endif 575endif
578 576
579
580ifdef NO_LIBPERL 577ifdef NO_LIBPERL
581 CFLAGS += -DNO_LIBPERL 578 CFLAGS += -DNO_LIBPERL
582else 579else
583 PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) 580 PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
584 PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) 581 PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
585 PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) 582 PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
586 PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` 583 PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
584 PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
585 PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
587 FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) 586 FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
588 587
589 ifneq ($(feature-libperl), 1) 588 ifneq ($(feature-libperl), 1)
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 21322e0385b8..09ba923debe8 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -2,3 +2,4 @@ ifndef NO_DWARF
2PERF_HAVE_DWARF_REGS := 1 2PERF_HAVE_DWARF_REGS := 1
3endif 3endif
4HAVE_KVM_STAT_SUPPORT := 1 4HAVE_KVM_STAT_SUPPORT := 1
5PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h
index d2df54a6bc5a..bcfbaed78cc2 100644
--- a/tools/perf/arch/s390/include/perf_regs.h
+++ b/tools/perf/arch/s390/include/perf_regs.h
@@ -3,7 +3,7 @@
3 3
4#include <stdlib.h> 4#include <stdlib.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <../../../../arch/s390/include/uapi/asm/perf_regs.h> 6#include <asm/perf_regs.h>
7 7
8void perf_regs_load(u64 *regs); 8void perf_regs_load(u64 *regs);
9 9
diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c
index f47576ce13ea..a8ace5cc6301 100644
--- a/tools/perf/arch/s390/util/dwarf-regs.c
+++ b/tools/perf/arch/s390/util/dwarf-regs.c
@@ -2,17 +2,43 @@
2/* 2/*
3 * Mapping of DWARF debug register numbers into register names. 3 * Mapping of DWARF debug register numbers into register names.
4 * 4 *
5 * Copyright IBM Corp. 2010 5 * Copyright IBM Corp. 2010, 2017
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7 * 8 *
8 */ 9 */
9 10
11#include <errno.h>
10#include <stddef.h> 12#include <stddef.h>
11#include <dwarf-regs.h> 13#include <stdlib.h>
12#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <asm/ptrace.h>
16#include <string.h>
17#include <dwarf-regs.h>
13#include "dwarf-regs-table.h" 18#include "dwarf-regs-table.h"
14 19
15const char *get_arch_regstr(unsigned int n) 20const char *get_arch_regstr(unsigned int n)
16{ 21{
17 return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n]; 22 return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n];
18} 23}
24
25/*
26 * Convert the register name into an offset to struct pt_regs (kernel).
27 * This is required by the BPF prologue generator. The BPF
28 * program is called in the BPF overflow handler in the perf
29 * core.
30 */
31int regs_query_register_offset(const char *name)
32{
33 unsigned long gpr;
34
35 if (!name || strncmp(name, "%r", 2))
36 return -EINVAL;
37
38 errno = 0;
39 gpr = strtoul(name + 2, NULL, 10);
40 if (errno || gpr >= 16)
41 return -EINVAL;
42
43 return offsetof(user_pt_regs, gprs) + 8 * gpr;
44}
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index d95fdcc26f4b..944070e98a2c 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
216 NULL 216 NULL
217}; 217};
218 218
219/*
220 * To get number of numa nodes present.
221 */
222static int nr_numa_nodes(void)
223{
224 int i, nr_nodes = 0;
225
226 for (i = 0; i < g->p.nr_nodes; i++) {
227 if (numa_bitmask_isbitset(numa_nodes_ptr, i))
228 nr_nodes++;
229 }
230
231 return nr_nodes;
232}
233
234/*
235 * To check if given numa node is present.
236 */
237static int is_node_present(int node)
238{
239 return numa_bitmask_isbitset(numa_nodes_ptr, node);
240}
241
242/*
243 * To check given numa node has cpus.
244 */
245static bool node_has_cpus(int node)
246{
247 struct bitmask *cpu = numa_allocate_cpumask();
248 unsigned int i;
249
250 if (cpu && !numa_node_to_cpus(node, cpu)) {
251 for (i = 0; i < cpu->size; i++) {
252 if (numa_bitmask_isbitset(cpu, i))
253 return true;
254 }
255 }
256
257 return false; /* lets fall back to nocpus safely */
258}
259
219static cpu_set_t bind_to_cpu(int target_cpu) 260static cpu_set_t bind_to_cpu(int target_cpu)
220{ 261{
221 cpu_set_t orig_mask, mask; 262 cpu_set_t orig_mask, mask;
@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
244 285
245static cpu_set_t bind_to_node(int target_node) 286static cpu_set_t bind_to_node(int target_node)
246{ 287{
247 int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; 288 int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
248 cpu_set_t orig_mask, mask; 289 cpu_set_t orig_mask, mask;
249 int cpu; 290 int cpu;
250 int ret; 291 int ret;
251 292
252 BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); 293 BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
253 BUG_ON(!cpus_per_node); 294 BUG_ON(!cpus_per_node);
254 295
255 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); 296 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
649 int i; 690 int i;
650 691
651 for (i = 0; i < mul; i++) { 692 for (i = 0; i < mul; i++) {
652 if (t >= g->p.nr_tasks) { 693 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
653 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); 694 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
654 goto out; 695 goto out;
655 } 696 }
@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
964 sum = 0; 1005 sum = 0;
965 1006
966 for (node = 0; node < g->p.nr_nodes; node++) { 1007 for (node = 0; node < g->p.nr_nodes; node++) {
1008 if (!is_node_present(node))
1009 continue;
967 nr = nodes[node]; 1010 nr = nodes[node];
968 nr_min = min(nr, nr_min); 1011 nr_min = min(nr, nr_min);
969 nr_max = max(nr, nr_max); 1012 nr_max = max(nr, nr_max);
@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
984 process_groups = 0; 1027 process_groups = 0;
985 1028
986 for (node = 0; node < g->p.nr_nodes; node++) { 1029 for (node = 0; node < g->p.nr_nodes; node++) {
987 int processes = count_node_processes(node); 1030 int processes;
988 1031
1032 if (!is_node_present(node))
1033 continue;
1034 processes = count_node_processes(node);
989 nr = nodes[node]; 1035 nr = nodes[node];
990 tprintf(" %2d/%-2d", nr, processes); 1036 tprintf(" %2d/%-2d", nr, processes);
991 1037
@@ -1291,7 +1337,7 @@ static void print_summary(void)
1291 1337
1292 printf("\n ###\n"); 1338 printf("\n ###\n");
1293 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", 1339 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1294 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); 1340 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1295 printf(" # %5dx %5ldMB global shared mem operations\n", 1341 printf(" # %5dx %5ldMB global shared mem operations\n",
1296 g->p.nr_loops, g->p.bytes_global/1024/1024); 1342 g->p.nr_loops, g->p.bytes_global/1024/1024);
1297 printf(" # %5dx %5ldMB process shared mem operations\n", 1343 printf(" # %5dx %5ldMB process shared mem operations\n",
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index bd1fedef3d1c..a0f7ed2b869b 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
284 add_man_viewer(value); 284 add_man_viewer(value);
285 return 0; 285 return 0;
286 } 286 }
287 if (!strstarts(var, "man.")) 287 if (strstarts(var, "man."))
288 return add_man_viewer_info(var, value); 288 return add_man_viewer_info(var, value);
289 289
290 return 0; 290 return 0;
@@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
314 314
315 if (!perf_cmd) 315 if (!perf_cmd)
316 return "perf"; 316 return "perf";
317 else if (!strstarts(perf_cmd, "perf")) 317 else if (strstarts(perf_cmd, "perf"))
318 return perf_cmd; 318 return perf_cmd;
319 319
320 return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s; 320 return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3d7f33e19df2..003255910c05 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -339,6 +339,22 @@ static int record__open(struct record *rec)
339 struct perf_evsel_config_term *err_term; 339 struct perf_evsel_config_term *err_term;
340 int rc = 0; 340 int rc = 0;
341 341
342 /*
343 * For initial_delay we need to add a dummy event so that we can track
344 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
345 * real events, the ones asked by the user.
346 */
347 if (opts->initial_delay) {
348 if (perf_evlist__add_dummy(evlist))
349 return -ENOMEM;
350
351 pos = perf_evlist__first(evlist);
352 pos->tracking = 0;
353 pos = perf_evlist__last(evlist);
354 pos->tracking = 1;
355 pos->attr.enable_on_exec = 1;
356 }
357
342 perf_evlist__config(evlist, opts, &callchain_param); 358 perf_evlist__config(evlist, opts, &callchain_param);
343 359
344 evlist__for_each_entry(evlist, pos) { 360 evlist__for_each_entry(evlist, pos) {
@@ -749,17 +765,19 @@ static int record__synthesize(struct record *rec, bool tail)
749 goto out; 765 goto out;
750 } 766 }
751 767
752 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, 768 if (!perf_evlist__exclude_kernel(rec->evlist)) {
753 machine); 769 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
754 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n" 770 machine);
755 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 771 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
756 "Check /proc/kallsyms permission or run as root.\n"); 772 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
757 773 "Check /proc/kallsyms permission or run as root.\n");
758 err = perf_event__synthesize_modules(tool, process_synthesized_event, 774
759 machine); 775 err = perf_event__synthesize_modules(tool, process_synthesized_event,
760 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n" 776 machine);
761 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 777 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
762 "Check /proc/modules permission or run as root.\n"); 778 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
779 "Check /proc/modules permission or run as root.\n");
780 }
763 781
764 if (perf_guest) { 782 if (perf_guest) {
765 machines__process_guests(&session->machines, 783 machines__process_guests(&session->machines,
@@ -1693,7 +1711,7 @@ int cmd_record(int argc, const char **argv)
1693 1711
1694 err = -ENOMEM; 1712 err = -ENOMEM;
1695 1713
1696 if (symbol_conf.kptr_restrict) 1714 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
1697 pr_warning( 1715 pr_warning(
1698"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n" 1716"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1699"check /proc/sys/kernel/kptr_restrict.\n\n" 1717"check /proc/sys/kernel/kptr_restrict.\n\n"
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 1394cd8d96f7..af5dd038195e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -441,6 +441,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
441 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host); 441 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
442 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; 442 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
443 443
444 if (perf_evlist__exclude_kernel(rep->session->evlist))
445 return;
446
444 if (kernel_map == NULL || 447 if (kernel_map == NULL ||
445 (kernel_map->dso->hit && 448 (kernel_map->dso->hit &&
446 (kernel_kmap->ref_reloc_sym == NULL || 449 (kernel_kmap->ref_reloc_sym == NULL ||
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 68f36dc0344f..9b43bda45a41 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1955,6 +1955,16 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
1955 struct perf_evsel *evsel; 1955 struct perf_evsel *evsel;
1956 1956
1957 evlist__for_each_entry(script->session->evlist, evsel) { 1957 evlist__for_each_entry(script->session->evlist, evsel) {
1958 /*
1959 * Already setup? I.e. we may be called twice in cases like
1960 * Intel PT, one for the intel_pt// and dummy events, then
1961 * for the evsels syntheized from the auxtrace info.
1962 *
1963 * Ses perf_script__process_auxtrace_info.
1964 */
1965 if (evsel->priv != NULL)
1966 continue;
1967
1958 evsel->priv = perf_evsel_script__new(evsel, script->session->data); 1968 evsel->priv = perf_evsel_script__new(evsel, script->session->data);
1959 if (evsel->priv == NULL) 1969 if (evsel->priv == NULL)
1960 goto out_err_fclose; 1970 goto out_err_fclose;
@@ -2838,6 +2848,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
2838 return set_maps(script); 2848 return set_maps(script);
2839} 2849}
2840 2850
2851#ifdef HAVE_AUXTRACE_SUPPORT
2852static int perf_script__process_auxtrace_info(struct perf_tool *tool,
2853 union perf_event *event,
2854 struct perf_session *session)
2855{
2856 int ret = perf_event__process_auxtrace_info(tool, event, session);
2857
2858 if (ret == 0) {
2859 struct perf_script *script = container_of(tool, struct perf_script, tool);
2860
2861 ret = perf_script__setup_per_event_dump(script);
2862 }
2863
2864 return ret;
2865}
2866#else
2867#define perf_script__process_auxtrace_info 0
2868#endif
2869
2841int cmd_script(int argc, const char **argv) 2870int cmd_script(int argc, const char **argv)
2842{ 2871{
2843 bool show_full_info = false; 2872 bool show_full_info = false;
@@ -2866,7 +2895,7 @@ int cmd_script(int argc, const char **argv)
2866 .feature = perf_event__process_feature, 2895 .feature = perf_event__process_feature,
2867 .build_id = perf_event__process_build_id, 2896 .build_id = perf_event__process_build_id,
2868 .id_index = perf_event__process_id_index, 2897 .id_index = perf_event__process_id_index,
2869 .auxtrace_info = perf_event__process_auxtrace_info, 2898 .auxtrace_info = perf_script__process_auxtrace_info,
2870 .auxtrace = perf_event__process_auxtrace, 2899 .auxtrace = perf_event__process_auxtrace,
2871 .auxtrace_error = perf_event__process_auxtrace_error, 2900 .auxtrace_error = perf_event__process_auxtrace_error,
2872 .stat = perf_event__process_stat_event, 2901 .stat = perf_event__process_stat_event,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 477a8699f0b5..9e0d2645ae13 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -77,6 +77,7 @@
77#include "sane_ctype.h" 77#include "sane_ctype.h"
78 78
79static volatile int done; 79static volatile int done;
80static volatile int resize;
80 81
81#define HEADER_LINE_NR 5 82#define HEADER_LINE_NR 5
82 83
@@ -85,11 +86,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
85 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR; 86 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
86} 87}
87 88
88static void perf_top__sig_winch(int sig __maybe_unused, 89static void winch_sig(int sig __maybe_unused)
89 siginfo_t *info __maybe_unused, void *arg)
90{ 90{
91 struct perf_top *top = arg; 91 resize = 1;
92}
92 93
94static void perf_top__resize(struct perf_top *top)
95{
93 get_term_dimensions(&top->winsize); 96 get_term_dimensions(&top->winsize);
94 perf_top__update_print_entries(top); 97 perf_top__update_print_entries(top);
95} 98}
@@ -473,12 +476,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
473 case 'e': 476 case 'e':
474 prompt_integer(&top->print_entries, "Enter display entries (lines)"); 477 prompt_integer(&top->print_entries, "Enter display entries (lines)");
475 if (top->print_entries == 0) { 478 if (top->print_entries == 0) {
476 struct sigaction act = { 479 perf_top__resize(top);
477 .sa_sigaction = perf_top__sig_winch, 480 signal(SIGWINCH, winch_sig);
478 .sa_flags = SA_SIGINFO,
479 };
480 perf_top__sig_winch(SIGWINCH, NULL, top);
481 sigaction(SIGWINCH, &act, NULL);
482 } else { 481 } else {
483 signal(SIGWINCH, SIG_DFL); 482 signal(SIGWINCH, SIG_DFL);
484 } 483 }
@@ -732,14 +731,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
732 if (!machine->kptr_restrict_warned && 731 if (!machine->kptr_restrict_warned &&
733 symbol_conf.kptr_restrict && 732 symbol_conf.kptr_restrict &&
734 al.cpumode == PERF_RECORD_MISC_KERNEL) { 733 al.cpumode == PERF_RECORD_MISC_KERNEL) {
735 ui__warning( 734 if (!perf_evlist__exclude_kernel(top->session->evlist)) {
735 ui__warning(
736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
737"Check /proc/sys/kernel/kptr_restrict.\n\n" 737"Check /proc/sys/kernel/kptr_restrict.\n\n"
738"Kernel%s samples will not be resolved.\n", 738"Kernel%s samples will not be resolved.\n",
739 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? 739 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
740 " modules" : ""); 740 " modules" : "");
741 if (use_browser <= 0) 741 if (use_browser <= 0)
742 sleep(5); 742 sleep(5);
743 }
743 machine->kptr_restrict_warned = true; 744 machine->kptr_restrict_warned = true;
744 } 745 }
745 746
@@ -1030,6 +1031,11 @@ static int __cmd_top(struct perf_top *top)
1030 1031
1031 if (hits == top->samples) 1032 if (hits == top->samples)
1032 ret = perf_evlist__poll(top->evlist, 100); 1033 ret = perf_evlist__poll(top->evlist, 100);
1034
1035 if (resize) {
1036 perf_top__resize(top);
1037 resize = 0;
1038 }
1033 } 1039 }
1034 1040
1035 ret = 0; 1041 ret = 0;
@@ -1352,12 +1358,8 @@ int cmd_top(int argc, const char **argv)
1352 1358
1353 get_term_dimensions(&top.winsize); 1359 get_term_dimensions(&top.winsize);
1354 if (top.print_entries == 0) { 1360 if (top.print_entries == 0) {
1355 struct sigaction act = {
1356 .sa_sigaction = perf_top__sig_winch,
1357 .sa_flags = SA_SIGINFO,
1358 };
1359 perf_top__update_print_entries(&top); 1361 perf_top__update_print_entries(&top);
1360 sigaction(SIGWINCH, &act, NULL); 1362 signal(SIGWINCH, winch_sig);
1361 } 1363 }
1362 1364
1363 status = __cmd_top(&top); 1365 status = __cmd_top(&top);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f2757d38c7d7..84debdbad327 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1152,12 +1152,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1152 if (trace->host == NULL) 1152 if (trace->host == NULL)
1153 return -ENOMEM; 1153 return -ENOMEM;
1154 1154
1155 if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0) 1155 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1156 return -errno; 1156 if (err < 0)
1157 goto out;
1157 1158
1158 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1159 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1159 evlist->threads, trace__tool_process, false, 1160 evlist->threads, trace__tool_process, false,
1160 trace->opts.proc_map_timeout, 1); 1161 trace->opts.proc_map_timeout, 1);
1162out:
1161 if (err) 1163 if (err)
1162 symbol__exit(); 1164 symbol__exit();
1163 1165
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 77406d25e521..3e64f10b6d66 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -21,6 +21,7 @@ arch/x86/include/asm/cpufeatures.h
21arch/arm/include/uapi/asm/perf_regs.h 21arch/arm/include/uapi/asm/perf_regs.h
22arch/arm64/include/uapi/asm/perf_regs.h 22arch/arm64/include/uapi/asm/perf_regs.h
23arch/powerpc/include/uapi/asm/perf_regs.h 23arch/powerpc/include/uapi/asm/perf_regs.h
24arch/s390/include/uapi/asm/perf_regs.h
24arch/x86/include/uapi/asm/perf_regs.h 25arch/x86/include/uapi/asm/perf_regs.h
25arch/x86/include/uapi/asm/kvm.h 26arch/x86/include/uapi/asm/kvm.h
26arch/x86/include/uapi/asm/kvm_perf.h 27arch/x86/include/uapi/asm/kvm_perf.h
@@ -30,6 +31,7 @@ arch/x86/include/uapi/asm/vmx.h
30arch/powerpc/include/uapi/asm/kvm.h 31arch/powerpc/include/uapi/asm/kvm.h
31arch/s390/include/uapi/asm/kvm.h 32arch/s390/include/uapi/asm/kvm.h
32arch/s390/include/uapi/asm/kvm_perf.h 33arch/s390/include/uapi/asm/kvm_perf.h
34arch/s390/include/uapi/asm/ptrace.h
33arch/s390/include/uapi/asm/sie.h 35arch/s390/include/uapi/asm/sie.h
34arch/arm/include/uapi/asm/kvm.h 36arch/arm/include/uapi/asm/kvm.h
35arch/arm64/include/uapi/asm/kvm.h 37arch/arm64/include/uapi/asm/kvm.h
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index cf36de7ea255..0c6d1002b524 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -384,13 +384,13 @@ jvmti_write_code(void *agent, char const *sym,
384} 384}
385 385
386int 386int
387jvmti_write_debug_info(void *agent, uint64_t code, const char *file, 387jvmti_write_debug_info(void *agent, uint64_t code,
388 jvmti_line_info_t *li, int nr_lines) 388 int nr_lines, jvmti_line_info_t *li,
389 const char * const * file_names)
389{ 390{
390 struct jr_code_debug_info rec; 391 struct jr_code_debug_info rec;
391 size_t sret, len, size, flen; 392 size_t sret, len, size, flen = 0;
392 uint64_t addr; 393 uint64_t addr;
393 const char *fn = file;
394 FILE *fp = agent; 394 FILE *fp = agent;
395 int i; 395 int i;
396 396
@@ -405,7 +405,9 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
405 return -1; 405 return -1;
406 } 406 }
407 407
408 flen = strlen(file) + 1; 408 for (i = 0; i < nr_lines; ++i) {
409 flen += strlen(file_names[i]) + 1;
410 }
409 411
410 rec.p.id = JIT_CODE_DEBUG_INFO; 412 rec.p.id = JIT_CODE_DEBUG_INFO;
411 size = sizeof(rec); 413 size = sizeof(rec);
@@ -421,7 +423,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
421 * file[] : source file name 423 * file[] : source file name
422 */ 424 */
423 size += nr_lines * sizeof(struct debug_entry); 425 size += nr_lines * sizeof(struct debug_entry);
424 size += flen * nr_lines; 426 size += flen;
425 rec.p.total_size = size; 427 rec.p.total_size = size;
426 428
427 /* 429 /*
@@ -452,7 +454,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
452 if (sret != 1) 454 if (sret != 1)
453 goto error; 455 goto error;
454 456
455 sret = fwrite_unlocked(fn, flen, 1, fp); 457 sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp);
456 if (sret != 1) 458 if (sret != 1)
457 goto error; 459 goto error;
458 } 460 }
diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h
index fe32d8344a82..6ed82f6c06dd 100644
--- a/tools/perf/jvmti/jvmti_agent.h
+++ b/tools/perf/jvmti/jvmti_agent.h
@@ -14,6 +14,7 @@ typedef struct {
14 unsigned long pc; 14 unsigned long pc;
15 int line_number; 15 int line_number;
16 int discrim; /* discriminator -- 0 for now */ 16 int discrim; /* discriminator -- 0 for now */
17 jmethodID methodID;
17} jvmti_line_info_t; 18} jvmti_line_info_t;
18 19
19void *jvmti_open(void); 20void *jvmti_open(void);
@@ -22,11 +23,9 @@ int jvmti_write_code(void *agent, char const *symbol_name,
22 uint64_t vma, void const *code, 23 uint64_t vma, void const *code,
23 const unsigned int code_size); 24 const unsigned int code_size);
24 25
25int jvmti_write_debug_info(void *agent, 26int jvmti_write_debug_info(void *agent, uint64_t code, int nr_lines,
26 uint64_t code,
27 const char *file,
28 jvmti_line_info_t *li, 27 jvmti_line_info_t *li,
29 int nr_lines); 28 const char * const * file_names);
30 29
31#if defined(__cplusplus) 30#if defined(__cplusplus)
32} 31}
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index c62c9fc9a525..6add3e982614 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -47,6 +47,7 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
47 tab[lines].pc = (unsigned long)pc; 47 tab[lines].pc = (unsigned long)pc;
48 tab[lines].line_number = loc_tab[i].line_number; 48 tab[lines].line_number = loc_tab[i].line_number;
49 tab[lines].discrim = 0; /* not yet used */ 49 tab[lines].discrim = 0; /* not yet used */
50 tab[lines].methodID = m;
50 lines++; 51 lines++;
51 } else { 52 } else {
52 break; 53 break;
@@ -125,6 +126,99 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
125 return JVMTI_ERROR_NONE; 126 return JVMTI_ERROR_NONE;
126} 127}
127 128
129static void
130copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length)
131{
132 /*
133 * Assume path name is class hierarchy, this is a common practice with Java programs
134 */
135 if (*class_sign == 'L') {
136 int j, i = 0;
137 char *p = strrchr(class_sign, '/');
138 if (p) {
139 /* drop the 'L' prefix and copy up to the final '/' */
140 for (i = 0; i < (p - class_sign); i++)
141 result[i] = class_sign[i+1];
142 }
143 /*
144 * append file name, we use loops and not string ops to avoid modifying
145 * class_sign which is used later for the symbol name
146 */
147 for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++)
148 result[i] = file_name[j];
149
150 result[i] = '\0';
151 } else {
152 /* fallback case */
153 size_t file_name_len = strlen(file_name);
154 strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
155 }
156}
157
158static jvmtiError
159get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer)
160{
161 jvmtiError ret;
162 jclass decl_class;
163 char *file_name = NULL;
164 char *class_sign = NULL;
165 char fn[PATH_MAX];
166 size_t len;
167
168 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class);
169 if (ret != JVMTI_ERROR_NONE) {
170 print_error(jvmti, "GetMethodDeclaringClass", ret);
171 return ret;
172 }
173
174 ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
175 if (ret != JVMTI_ERROR_NONE) {
176 print_error(jvmti, "GetSourceFileName", ret);
177 return ret;
178 }
179
180 ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL);
181 if (ret != JVMTI_ERROR_NONE) {
182 print_error(jvmti, "GetClassSignature", ret);
183 goto free_file_name_error;
184 }
185
186 copy_class_filename(class_sign, file_name, fn, PATH_MAX);
187 len = strlen(fn);
188 *buffer = malloc((len + 1) * sizeof(char));
189 if (!*buffer) {
190 print_error(jvmti, "GetClassSignature", ret);
191 ret = JVMTI_ERROR_OUT_OF_MEMORY;
192 goto free_class_sign_error;
193 }
194 strcpy(*buffer, fn);
195 ret = JVMTI_ERROR_NONE;
196
197free_class_sign_error:
198 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
199free_file_name_error:
200 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
201
202 return ret;
203}
204
205static jvmtiError
206fill_source_filenames(jvmtiEnv *jvmti, int nr_lines,
207 const jvmti_line_info_t * line_tab,
208 char ** file_names)
209{
210 int index;
211 jvmtiError ret;
212
213 for (index = 0; index < nr_lines; ++index) {
214 ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index]));
215 if (ret != JVMTI_ERROR_NONE)
216 return ret;
217 }
218
219 return JVMTI_ERROR_NONE;
220}
221
128static void JNICALL 222static void JNICALL
129compiled_method_load_cb(jvmtiEnv *jvmti, 223compiled_method_load_cb(jvmtiEnv *jvmti,
130 jmethodID method, 224 jmethodID method,
@@ -135,16 +229,18 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
135 const void *compile_info) 229 const void *compile_info)
136{ 230{
137 jvmti_line_info_t *line_tab = NULL; 231 jvmti_line_info_t *line_tab = NULL;
232 char ** line_file_names = NULL;
138 jclass decl_class; 233 jclass decl_class;
139 char *class_sign = NULL; 234 char *class_sign = NULL;
140 char *func_name = NULL; 235 char *func_name = NULL;
141 char *func_sign = NULL; 236 char *func_sign = NULL;
142 char *file_name= NULL; 237 char *file_name = NULL;
143 char fn[PATH_MAX]; 238 char fn[PATH_MAX];
144 uint64_t addr = (uint64_t)(uintptr_t)code_addr; 239 uint64_t addr = (uint64_t)(uintptr_t)code_addr;
145 jvmtiError ret; 240 jvmtiError ret;
146 int nr_lines = 0; /* in line_tab[] */ 241 int nr_lines = 0; /* in line_tab[] */
147 size_t len; 242 size_t len;
243 int output_debug_info = 0;
148 244
149 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method, 245 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
150 &decl_class); 246 &decl_class);
@@ -158,6 +254,19 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
158 if (ret != JVMTI_ERROR_NONE) { 254 if (ret != JVMTI_ERROR_NONE) {
159 warnx("jvmti: cannot get line table for method"); 255 warnx("jvmti: cannot get line table for method");
160 nr_lines = 0; 256 nr_lines = 0;
257 } else if (nr_lines > 0) {
258 line_file_names = malloc(sizeof(char*) * nr_lines);
259 if (!line_file_names) {
260 warnx("jvmti: cannot allocate space for line table method names");
261 } else {
262 memset(line_file_names, 0, sizeof(char*) * nr_lines);
263 ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names);
264 if (ret != JVMTI_ERROR_NONE) {
265 warnx("jvmti: fill_source_filenames failed");
266 } else {
267 output_debug_info = 1;
268 }
269 }
161 } 270 }
162 } 271 }
163 272
@@ -181,33 +290,14 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
181 goto error; 290 goto error;
182 } 291 }
183 292
184 /* 293 copy_class_filename(class_sign, file_name, fn, PATH_MAX);
185 * Assume path name is class hierarchy, this is a common practice with Java programs 294
186 */
187 if (*class_sign == 'L') {
188 int j, i = 0;
189 char *p = strrchr(class_sign, '/');
190 if (p) {
191 /* drop the 'L' prefix and copy up to the final '/' */
192 for (i = 0; i < (p - class_sign); i++)
193 fn[i] = class_sign[i+1];
194 }
195 /*
196 * append file name, we use loops and not string ops to avoid modifying
197 * class_sign which is used later for the symbol name
198 */
199 for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++)
200 fn[i] = file_name[j];
201 fn[i] = '\0';
202 } else {
203 /* fallback case */
204 strcpy(fn, file_name);
205 }
206 /* 295 /*
207 * write source line info record if we have it 296 * write source line info record if we have it
208 */ 297 */
209 if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines)) 298 if (output_debug_info)
210 warnx("jvmti: write_debug_info() failed"); 299 if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names))
300 warnx("jvmti: write_debug_info() failed");
211 301
212 len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2; 302 len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
213 { 303 {
@@ -223,6 +313,13 @@ error:
223 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign); 313 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
224 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name); 314 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
225 free(line_tab); 315 free(line_tab);
316 while (line_file_names && (nr_lines > 0)) {
317 if (line_file_names[nr_lines - 1]) {
318 free(line_file_names[nr_lines - 1]);
319 }
320 nr_lines -= 1;
321 }
322 free(line_file_names);
226} 323}
227 324
228static void JNICALL 325static void JNICALL
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 7a84d73324e3..8b3da21a08f1 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,8 +10,8 @@
10 10
11. $(dirname $0)/lib/probe.sh 11. $(dirname $0)/lib/probe.sh
12 12
13ld=$(realpath /lib64/ld*.so.* | uniq) 13libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
14libc=$(echo $ld | sed 's/ld/libc/g') 14nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
15 15
16trace_libc_inet_pton_backtrace() { 16trace_libc_inet_pton_backtrace() {
17 idx=0 17 idx=0
@@ -37,6 +37,9 @@ trace_libc_inet_pton_backtrace() {
37 done 37 done
38} 38}
39 39
40# Check for IPv6 interface existence
41ip a sh lo | fgrep -q inet6 || exit 2
42
40skip_if_no_perf_probe && \ 43skip_if_no_perf_probe && \
41perf probe -q $libc inet_pton && \ 44perf probe -q $libc inet_pton && \
42trace_libc_inet_pton_backtrace 45trace_libc_inet_pton_backtrace
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 2e68c5f120da..2a9ef080efd0 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2
17file=$(mktemp /tmp/temporary_file.XXXXX) 17file=$(mktemp /tmp/temporary_file.XXXXX)
18 18
19trace_open_vfs_getname() { 19trace_open_vfs_getname() {
20 perf trace -e open touch $file 2>&1 | \ 20 test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
21 egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" 21
22 perf trace -e ${svc:-open} touch $file 2>&1 | \
23 egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
22} 24}
23 25
24 26
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index bc4a7344e274..89c8e1604ca7 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
84 84
85 evsel = perf_evlist__first(evlist); 85 evsel = perf_evlist__first(evlist);
86 evsel->attr.task = 1; 86 evsel->attr.task = 1;
87#ifdef __s390x__
88 evsel->attr.sample_freq = 1000000;
89#else
87 evsel->attr.sample_freq = 1; 90 evsel->attr.sample_freq = 1;
91#endif
88 evsel->attr.inherit = 0; 92 evsel->attr.inherit = 0;
89 evsel->attr.watermark = 0; 93 evsel->attr.watermark = 0;
90 evsel->attr.wakeup_events = 1; 94 evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index 9e1668b2c5d7..417e3ecfe9d7 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
62 P_MMAP_FLAG(POPULATE); 62 P_MMAP_FLAG(POPULATE);
63 P_MMAP_FLAG(STACK); 63 P_MMAP_FLAG(STACK);
64 P_MMAP_FLAG(UNINITIALIZED); 64 P_MMAP_FLAG(UNINITIALIZED);
65#ifdef MAP_SYNC
66 P_MMAP_FLAG(SYNC);
67#endif
65#undef P_MMAP_FLAG 68#undef P_MMAP_FLAG
66 69
67 if (flags) 70 if (flags)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index da1c4c4a0dd8..3369c7830260 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -165,7 +165,7 @@ static void ins__delete(struct ins_operands *ops)
165static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, 165static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
166 struct ins_operands *ops) 166 struct ins_operands *ops)
167{ 167{
168 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw); 168 return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
169} 169}
170 170
171int ins__scnprintf(struct ins *ins, char *bf, size_t size, 171int ins__scnprintf(struct ins *ins, char *bf, size_t size,
@@ -230,12 +230,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
230 struct ins_operands *ops) 230 struct ins_operands *ops)
231{ 231{
232 if (ops->target.name) 232 if (ops->target.name)
233 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); 233 return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
234 234
235 if (ops->target.addr == 0) 235 if (ops->target.addr == 0)
236 return ins__raw_scnprintf(ins, bf, size, ops); 236 return ins__raw_scnprintf(ins, bf, size, ops);
237 237
238 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); 238 return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
239} 239}
240 240
241static struct ins_ops call_ops = { 241static struct ins_ops call_ops = {
@@ -299,7 +299,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
299 c++; 299 c++;
300 } 300 }
301 301
302 return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64, 302 return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
303 ins->name, c ? c - ops->raw : 0, ops->raw, 303 ins->name, c ? c - ops->raw : 0, ops->raw,
304 ops->target.offset); 304 ops->target.offset);
305} 305}
@@ -372,7 +372,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
372 if (ops->locked.ins.ops == NULL) 372 if (ops->locked.ins.ops == NULL)
373 return ins__raw_scnprintf(ins, bf, size, ops); 373 return ins__raw_scnprintf(ins, bf, size, ops);
374 374
375 printed = scnprintf(bf, size, "%-6.6s ", ins->name); 375 printed = scnprintf(bf, size, "%-6s ", ins->name);
376 return printed + ins__scnprintf(&ops->locked.ins, bf + printed, 376 return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
377 size - printed, ops->locked.ops); 377 size - printed, ops->locked.ops);
378} 378}
@@ -448,7 +448,7 @@ out_free_source:
448static int mov__scnprintf(struct ins *ins, char *bf, size_t size, 448static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
449 struct ins_operands *ops) 449 struct ins_operands *ops)
450{ 450{
451 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name, 451 return scnprintf(bf, size, "%-6s %s,%s", ins->name,
452 ops->source.name ?: ops->source.raw, 452 ops->source.name ?: ops->source.raw,
453 ops->target.name ?: ops->target.raw); 453 ops->target.name ?: ops->target.raw);
454} 454}
@@ -488,7 +488,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
488static int dec__scnprintf(struct ins *ins, char *bf, size_t size, 488static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
489 struct ins_operands *ops) 489 struct ins_operands *ops)
490{ 490{
491 return scnprintf(bf, size, "%-6.6s %s", ins->name, 491 return scnprintf(bf, size, "%-6s %s", ins->name,
492 ops->target.name ?: ops->target.raw); 492 ops->target.name ?: ops->target.raw);
493} 493}
494 494
@@ -500,7 +500,7 @@ static struct ins_ops dec_ops = {
500static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, 500static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
501 struct ins_operands *ops __maybe_unused) 501 struct ins_operands *ops __maybe_unused)
502{ 502{
503 return scnprintf(bf, size, "%-6.6s", "nop"); 503 return scnprintf(bf, size, "%-6s", "nop");
504} 504}
505 505
506static struct ins_ops nop_ops = { 506static struct ins_ops nop_ops = {
@@ -924,7 +924,7 @@ void disasm_line__free(struct disasm_line *dl)
924int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw) 924int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
925{ 925{
926 if (raw || !dl->ins.ops) 926 if (raw || !dl->ins.ops)
927 return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw); 927 return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
928 928
929 return ins__scnprintf(&dl->ins, bf, size, &dl->ops); 929 return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
930} 930}
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c6c891e154a6..b62e523a7035 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -257,7 +257,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)
257 .config = PERF_COUNT_SW_DUMMY, 257 .config = PERF_COUNT_SW_DUMMY,
258 .size = sizeof(attr), /* to capture ABI version */ 258 .size = sizeof(attr), /* to capture ABI version */
259 }; 259 };
260 struct perf_evsel *evsel = perf_evsel__new(&attr); 260 struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
261 261
262 if (evsel == NULL) 262 if (evsel == NULL)
263 return -ENOMEM; 263 return -ENOMEM;
@@ -1786,3 +1786,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1786state_err: 1786state_err:
1787 return; 1787 return;
1788} 1788}
1789
1790bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1791{
1792 struct perf_evsel *evsel;
1793
1794 evlist__for_each_entry(evlist, evsel) {
1795 if (!evsel->attr.exclude_kernel)
1796 return false;
1797 }
1798
1799 return true;
1800}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index e72ae64c11ac..491f69542920 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -312,4 +312,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
312 312
313struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 313struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
314 union perf_event *event); 314 union perf_event *event);
315
316bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
315#endif /* __PERF_EVLIST_H */ 317#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f894893c203d..d5fbcf8c7aa7 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,
733 list_for_each_entry(term, config_terms, list) { 733 list_for_each_entry(term, config_terms, list) {
734 switch (term->type) { 734 switch (term->type) {
735 case PERF_EVSEL__CONFIG_TERM_PERIOD: 735 case PERF_EVSEL__CONFIG_TERM_PERIOD:
736 attr->sample_period = term->val.period; 736 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
737 attr->freq = 0; 737 attr->sample_period = term->val.period;
738 attr->freq = 0;
739 }
738 break; 740 break;
739 case PERF_EVSEL__CONFIG_TERM_FREQ: 741 case PERF_EVSEL__CONFIG_TERM_FREQ:
740 attr->sample_freq = term->val.freq; 742 if (!(term->weak && opts->user_freq != UINT_MAX)) {
741 attr->freq = 1; 743 attr->sample_freq = term->val.freq;
744 attr->freq = 1;
745 }
742 break; 746 break;
743 case PERF_EVSEL__CONFIG_TERM_TIME: 747 case PERF_EVSEL__CONFIG_TERM_TIME:
744 if (term->val.time) 748 if (term->val.time)
@@ -1371,7 +1375,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,
1371static int 1375static int
1372perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread) 1376perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
1373{ 1377{
1374 struct perf_stat_evsel *ps = leader->priv; 1378 struct perf_stat_evsel *ps = leader->stats;
1375 u64 read_format = leader->attr.read_format; 1379 u64 read_format = leader->attr.read_format;
1376 int size = perf_evsel__read_size(leader); 1380 int size = perf_evsel__read_size(leader);
1377 u64 *data = ps->group_data; 1381 u64 *data = ps->group_data;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 9277df96ffda..157f49e8a772 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -67,6 +67,7 @@ struct perf_evsel_config_term {
67 bool overwrite; 67 bool overwrite;
68 char *branch; 68 char *branch;
69 } val; 69 } val;
70 bool weak;
70}; 71};
71 72
72struct perf_stat_evsel; 73struct perf_stat_evsel;
diff --git a/tools/perf/util/intel-pt-decoder/inat.h b/tools/perf/util/intel-pt-decoder/inat.h
index 125ecd2a300d..52dc8d911173 100644
--- a/tools/perf/util/intel-pt-decoder/inat.h
+++ b/tools/perf/util/intel-pt-decoder/inat.h
@@ -97,6 +97,16 @@
97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) 97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) 98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
99 99
100/* Identifiers for segment registers */
101#define INAT_SEG_REG_IGNORE 0
102#define INAT_SEG_REG_DEFAULT 1
103#define INAT_SEG_REG_CS 2
104#define INAT_SEG_REG_SS 3
105#define INAT_SEG_REG_DS 4
106#define INAT_SEG_REG_ES 5
107#define INAT_SEG_REG_FS 6
108#define INAT_SEG_REG_GS 7
109
100/* Attribute search APIs */ 110/* Attribute search APIs */
101extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); 111extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
102extern int inat_get_last_prefix_id(insn_byte_t last_pfx); 112extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
+++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
896 896
897GrpTable: Grp3_1 897GrpTable: Grp3_1
8980: TEST Eb,Ib 8980: TEST Eb,Ib
8991: 8991: TEST Eb,Ib
9002: NOT Eb 9002: NOT Eb
9013: NEG Eb 9013: NEG Eb
9024: MUL AL,Eb 9024: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6a8d03c3d9b7..270f3223c6df 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -172,6 +172,9 @@ void machine__exit(struct machine *machine)
172{ 172{
173 int i; 173 int i;
174 174
175 if (machine == NULL)
176 return;
177
175 machine__destroy_kernel_maps(machine); 178 machine__destroy_kernel_maps(machine);
176 map_groups__exit(&machine->kmaps); 179 map_groups__exit(&machine->kmaps);
177 dsos__exit(&machine->dsos); 180 dsos__exit(&machine->dsos);
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index efd78b827b05..3a5cb5a6e94a 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md);
70static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 70static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
71{ 71{
72 struct perf_event_mmap_page *pc = mm->base; 72 struct perf_event_mmap_page *pc = mm->base;
73 u64 head = ACCESS_ONCE(pc->data_head); 73 u64 head = READ_ONCE(pc->data_head);
74 rmb(); 74 rmb();
75 return head; 75 return head;
76} 76}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index a7fcd95961ef..170316795a18 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1116,6 +1116,7 @@ do { \
1116 INIT_LIST_HEAD(&__t->list); \ 1116 INIT_LIST_HEAD(&__t->list); \
1117 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \ 1117 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
1118 __t->val.__name = __val; \ 1118 __t->val.__name = __val; \
1119 __t->weak = term->weak; \
1119 list_add_tail(&__t->list, head_terms); \ 1120 list_add_tail(&__t->list, head_terms); \
1120} while (0) 1121} while (0)
1121 1122
@@ -2410,6 +2411,7 @@ static int new_term(struct parse_events_term **_term,
2410 2411
2411 *term = *temp; 2412 *term = *temp;
2412 INIT_LIST_HEAD(&term->list); 2413 INIT_LIST_HEAD(&term->list);
2414 term->weak = false;
2413 2415
2414 switch (term->type_val) { 2416 switch (term->type_val) {
2415 case PARSE_EVENTS__TERM_TYPE_NUM: 2417 case PARSE_EVENTS__TERM_TYPE_NUM:
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index be337c266697..88108cd11b4c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -101,6 +101,9 @@ struct parse_events_term {
101 /* error string indexes for within parsed string */ 101 /* error string indexes for within parsed string */
102 int err_term; 102 int err_term;
103 int err_val; 103 int err_val;
104
105 /* Coming from implicit alias */
106 bool weak;
104}; 107};
105 108
106struct parse_events_error { 109struct parse_events_error {
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 07cb2ac041d7..80fb1593913a 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -405,6 +405,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
405 parse_events_terms__purge(&list); 405 parse_events_terms__purge(&list);
406 return ret; 406 return ret;
407 } 407 }
408 /*
409 * Weak terms don't override command line options,
410 * which we don't want for implicit terms in aliases.
411 */
412 cloned->weak = true;
408 list_add_tail(&cloned->list, &list); 413 list_add_tail(&cloned->list, &list);
409 } 414 }
410 list_splice(&list, terms); 415 list_splice(&list, terms);
diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
index c25a74ae51ba..2bb3eef7d5c1 100644
--- a/tools/power/cpupower/bench/system.c
+++ b/tools/power/cpupower/bench/system.c
@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
61 61
62 dprintf("set %s as cpufreq governor\n", governor); 62 dprintf("set %s as cpufreq governor\n", governor);
63 63
64 if (cpupower_is_cpu_online(cpu) != 0) { 64 if (cpupower_is_cpu_online(cpu) != 1) {
65 perror("cpufreq_cpu_exists"); 65 perror("cpufreq_cpu_exists");
66 fprintf(stderr, "error: cpu %u does not exist\n", cpu); 66 fprintf(stderr, "error: cpu %u does not exist\n", cpu);
67 return -1; 67 return -1;
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index 1b5da0066ebf..5b3205f16217 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
130{ 130{
131 int num; 131 int num;
132 char *tmp; 132 char *tmp;
133 int this_cpu;
134
135 this_cpu = sched_getcpu();
133 136
134 /* Assume idle state count is the same for all CPUs */ 137 /* Assume idle state count is the same for all CPUs */
135 cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0); 138 cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
136 139
137 if (cpuidle_sysfs_monitor.hw_states_num <= 0) 140 if (cpuidle_sysfs_monitor.hw_states_num <= 0)
138 return NULL; 141 return NULL;
139 142
140 for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { 143 for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
141 tmp = cpuidle_state_name(0, num); 144 tmp = cpuidle_state_name(this_cpu, num);
142 if (tmp == NULL) 145 if (tmp == NULL)
143 continue; 146 continue;
144 147
@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
146 strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1); 149 strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
147 free(tmp); 150 free(tmp);
148 151
149 tmp = cpuidle_state_desc(0, num); 152 tmp = cpuidle_state_desc(this_cpu, num);
150 if (tmp == NULL) 153 if (tmp == NULL)
151 continue; 154 continue;
152 strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1); 155 strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 333a48655ee0..9316e648a880 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,4 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2
2LIBDIR := ../../../lib 3LIBDIR := ../../../lib
3BPFDIR := $(LIBDIR)/bpf 4BPFDIR := $(LIBDIR)/bpf
4APIDIR := ../../../include/uapi 5APIDIR := ../../../include/uapi
@@ -10,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
10endif 11endif
11 12
12CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 13CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
13LDLIBS += -lcap -lelf 14LDLIBS += -lcap -lelf -lrt
14 15
15TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ 16TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
16 test_align test_verifier_log test_dev_cgroup 17 test_align test_verifier_log test_dev_cgroup
@@ -38,7 +39,7 @@ $(BPFOBJ): force
38CLANG ?= clang 39CLANG ?= clang
39LLC ?= llc 40LLC ?= llc
40 41
41PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) 42PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
42 43
43# Let newer LLVM versions transparently probe the kernel for availability 44# Let newer LLVM versions transparently probe the kernel for availability
44# of full BPF instruction set. 45# of full BPF instruction set.
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 69427531408d..6761be18a91f 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)
351 info_len != sizeof(struct bpf_map_info) || 351 info_len != sizeof(struct bpf_map_info) ||
352 strcmp((char *)map_infos[i].name, expected_map_name), 352 strcmp((char *)map_infos[i].name, expected_map_name),
353 "get-map-info(fd)", 353 "get-map-info(fd)",
354 "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", 354 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
355 err, errno, 355 err, errno,
356 map_infos[i].type, BPF_MAP_TYPE_ARRAY, 356 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
357 info_len, sizeof(struct bpf_map_info), 357 info_len, sizeof(struct bpf_map_info),
@@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)
395 *(int *)prog_infos[i].map_ids != map_infos[i].id || 395 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
396 strcmp((char *)prog_infos[i].name, expected_prog_name), 396 strcmp((char *)prog_infos[i].name, expected_prog_name),
397 "get-prog-info(fd)", 397 "get-prog-info(fd)",
398 "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", 398 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
399 err, errno, i, 399 err, errno, i,
400 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, 400 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
401 info_len, sizeof(struct bpf_prog_info), 401 info_len, sizeof(struct bpf_prog_info),
@@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)
463 memcmp(&prog_info, &prog_infos[i], info_len) || 463 memcmp(&prog_info, &prog_infos[i], info_len) ||
464 *(int *)prog_info.map_ids != saved_map_id, 464 *(int *)prog_info.map_ids != saved_map_id,
465 "get-prog-info(next_id->fd)", 465 "get-prog-info(next_id->fd)",
466 "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n", 466 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
467 err, errno, info_len, sizeof(struct bpf_prog_info), 467 err, errno, info_len, sizeof(struct bpf_prog_info),
468 memcmp(&prog_info, &prog_infos[i], info_len), 468 memcmp(&prog_info, &prog_infos[i], info_len),
469 *(int *)prog_info.map_ids, saved_map_id); 469 *(int *)prog_info.map_ids, saved_map_id);
@@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)
509 memcmp(&map_info, &map_infos[i], info_len) || 509 memcmp(&map_info, &map_infos[i], info_len) ||
510 array_value != array_magic_value, 510 array_value != array_magic_value,
511 "check get-map-info(next_id->fd)", 511 "check get-map-info(next_id->fd)",
512 "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n", 512 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
513 err, errno, info_len, sizeof(struct bpf_map_info), 513 err, errno, info_len, sizeof(struct bpf_map_info),
514 memcmp(&map_info, &map_infos[i], info_len), 514 memcmp(&map_info, &map_infos[i], info_len),
515 array_value, array_magic_value); 515 array_value, array_magic_value);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 3c64f30cf63c..b51017404c62 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -422,9 +422,7 @@ static struct bpf_test tests[] = {
422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 BPF_EXIT_INSN(), 423 BPF_EXIT_INSN(),
424 }, 424 },
425 .errstr_unpriv = "R1 subtraction from stack pointer", 425 .errstr = "R1 subtraction from stack pointer",
426 .result_unpriv = REJECT,
427 .errstr = "R1 invalid mem access",
428 .result = REJECT, 426 .result = REJECT,
429 }, 427 },
430 { 428 {
@@ -606,7 +604,6 @@ static struct bpf_test tests[] = {
606 }, 604 },
607 .errstr = "misaligned stack access", 605 .errstr = "misaligned stack access",
608 .result = REJECT, 606 .result = REJECT,
609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
610 }, 607 },
611 { 608 {
612 "invalid map_fd for function call", 609 "invalid map_fd for function call",
@@ -1797,7 +1794,6 @@ static struct bpf_test tests[] = {
1797 }, 1794 },
1798 .result = REJECT, 1795 .result = REJECT,
1799 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", 1796 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1800 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1801 }, 1797 },
1802 { 1798 {
1803 "PTR_TO_STACK store/load - bad alignment on reg", 1799 "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +1806,6 @@ static struct bpf_test tests[] = {
1810 }, 1806 },
1811 .result = REJECT, 1807 .result = REJECT,
1812 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", 1808 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1813 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1814 }, 1809 },
1815 { 1810 {
1816 "PTR_TO_STACK store/load - out of bounds low", 1811 "PTR_TO_STACK store/load - out of bounds low",
@@ -1862,9 +1857,8 @@ static struct bpf_test tests[] = {
1862 BPF_MOV64_IMM(BPF_REG_0, 0), 1857 BPF_MOV64_IMM(BPF_REG_0, 0),
1863 BPF_EXIT_INSN(), 1858 BPF_EXIT_INSN(),
1864 }, 1859 },
1865 .result = ACCEPT, 1860 .result = REJECT,
1866 .result_unpriv = REJECT, 1861 .errstr = "R1 pointer += pointer",
1867 .errstr_unpriv = "R1 pointer += pointer",
1868 }, 1862 },
1869 { 1863 {
1870 "unpriv: neg pointer", 1864 "unpriv: neg pointer",
@@ -2592,7 +2586,8 @@ static struct bpf_test tests[] = {
2592 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2586 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2593 offsetof(struct __sk_buff, data)), 2587 offsetof(struct __sk_buff, data)),
2594 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), 2588 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 2589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2590 offsetof(struct __sk_buff, len)),
2596 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), 2591 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2597 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), 2592 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2598 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), 2593 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2899,7 +2894,7 @@ static struct bpf_test tests[] = {
2899 BPF_MOV64_IMM(BPF_REG_0, 0), 2894 BPF_MOV64_IMM(BPF_REG_0, 0),
2900 BPF_EXIT_INSN(), 2895 BPF_EXIT_INSN(),
2901 }, 2896 },
2902 .errstr = "invalid access to packet", 2897 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
2903 .result = REJECT, 2898 .result = REJECT,
2904 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2899 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2905 }, 2900 },
@@ -3885,9 +3880,7 @@ static struct bpf_test tests[] = {
3885 BPF_EXIT_INSN(), 3880 BPF_EXIT_INSN(),
3886 }, 3881 },
3887 .fixup_map2 = { 3, 11 }, 3882 .fixup_map2 = { 3, 11 },
3888 .errstr_unpriv = "R0 pointer += pointer", 3883 .errstr = "R0 pointer += pointer",
3889 .errstr = "R0 invalid mem access 'inv'",
3890 .result_unpriv = REJECT,
3891 .result = REJECT, 3884 .result = REJECT,
3892 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3885 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3893 }, 3886 },
@@ -3928,7 +3921,7 @@ static struct bpf_test tests[] = {
3928 BPF_EXIT_INSN(), 3921 BPF_EXIT_INSN(),
3929 }, 3922 },
3930 .fixup_map1 = { 4 }, 3923 .fixup_map1 = { 4 },
3931 .errstr = "R4 invalid mem access", 3924 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3932 .result = REJECT, 3925 .result = REJECT,
3933 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3926 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3934 }, 3927 },
@@ -3949,7 +3942,7 @@ static struct bpf_test tests[] = {
3949 BPF_EXIT_INSN(), 3942 BPF_EXIT_INSN(),
3950 }, 3943 },
3951 .fixup_map1 = { 4 }, 3944 .fixup_map1 = { 4 },
3952 .errstr = "R4 invalid mem access", 3945 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3953 .result = REJECT, 3946 .result = REJECT,
3954 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3947 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3955 }, 3948 },
@@ -3970,7 +3963,7 @@ static struct bpf_test tests[] = {
3970 BPF_EXIT_INSN(), 3963 BPF_EXIT_INSN(),
3971 }, 3964 },
3972 .fixup_map1 = { 4 }, 3965 .fixup_map1 = { 4 },
3973 .errstr = "R4 invalid mem access", 3966 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3974 .result = REJECT, 3967 .result = REJECT,
3975 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3968 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3976 }, 3969 },
@@ -5195,10 +5188,8 @@ static struct bpf_test tests[] = {
5195 BPF_EXIT_INSN(), 5188 BPF_EXIT_INSN(),
5196 }, 5189 },
5197 .fixup_map2 = { 3 }, 5190 .fixup_map2 = { 3 },
5198 .errstr_unpriv = "R0 bitwise operator &= on pointer", 5191 .errstr = "R0 bitwise operator &= on pointer",
5199 .errstr = "invalid mem access 'inv'",
5200 .result = REJECT, 5192 .result = REJECT,
5201 .result_unpriv = REJECT,
5202 }, 5193 },
5203 { 5194 {
5204 "map element value illegal alu op, 2", 5195 "map element value illegal alu op, 2",
@@ -5214,10 +5205,8 @@ static struct bpf_test tests[] = {
5214 BPF_EXIT_INSN(), 5205 BPF_EXIT_INSN(),
5215 }, 5206 },
5216 .fixup_map2 = { 3 }, 5207 .fixup_map2 = { 3 },
5217 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited", 5208 .errstr = "R0 32-bit pointer arithmetic prohibited",
5218 .errstr = "invalid mem access 'inv'",
5219 .result = REJECT, 5209 .result = REJECT,
5220 .result_unpriv = REJECT,
5221 }, 5210 },
5222 { 5211 {
5223 "map element value illegal alu op, 3", 5212 "map element value illegal alu op, 3",
@@ -5233,10 +5222,8 @@ static struct bpf_test tests[] = {
5233 BPF_EXIT_INSN(), 5222 BPF_EXIT_INSN(),
5234 }, 5223 },
5235 .fixup_map2 = { 3 }, 5224 .fixup_map2 = { 3 },
5236 .errstr_unpriv = "R0 pointer arithmetic with /= operator", 5225 .errstr = "R0 pointer arithmetic with /= operator",
5237 .errstr = "invalid mem access 'inv'",
5238 .result = REJECT, 5226 .result = REJECT,
5239 .result_unpriv = REJECT,
5240 }, 5227 },
5241 { 5228 {
5242 "map element value illegal alu op, 4", 5229 "map element value illegal alu op, 4",
@@ -6019,8 +6006,7 @@ static struct bpf_test tests[] = {
6019 BPF_EXIT_INSN(), 6006 BPF_EXIT_INSN(),
6020 }, 6007 },
6021 .fixup_map_in_map = { 3 }, 6008 .fixup_map_in_map = { 3 },
6022 .errstr = "R1 type=inv expected=map_ptr", 6009 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
6023 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
6024 .result = REJECT, 6010 .result = REJECT,
6025 }, 6011 },
6026 { 6012 {
@@ -6117,6 +6103,30 @@ static struct bpf_test tests[] = {
6117 .result = ACCEPT, 6103 .result = ACCEPT,
6118 }, 6104 },
6119 { 6105 {
6106 "ld_abs: tests on r6 and skb data reload helper",
6107 .insns = {
6108 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6109 BPF_LD_ABS(BPF_B, 0),
6110 BPF_LD_ABS(BPF_H, 0),
6111 BPF_LD_ABS(BPF_W, 0),
6112 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6113 BPF_MOV64_IMM(BPF_REG_6, 0),
6114 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6115 BPF_MOV64_IMM(BPF_REG_2, 1),
6116 BPF_MOV64_IMM(BPF_REG_3, 2),
6117 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6118 BPF_FUNC_skb_vlan_push),
6119 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6120 BPF_LD_ABS(BPF_B, 0),
6121 BPF_LD_ABS(BPF_H, 0),
6122 BPF_LD_ABS(BPF_W, 0),
6123 BPF_MOV64_IMM(BPF_REG_0, 42),
6124 BPF_EXIT_INSN(),
6125 },
6126 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6127 .result = ACCEPT,
6128 },
6129 {
6120 "ld_ind: check calling conv, r1", 6130 "ld_ind: check calling conv, r1",
6121 .insns = { 6131 .insns = {
6122 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6132 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
@@ -6300,7 +6310,7 @@ static struct bpf_test tests[] = {
6300 BPF_EXIT_INSN(), 6310 BPF_EXIT_INSN(),
6301 }, 6311 },
6302 .fixup_map1 = { 3 }, 6312 .fixup_map1 = { 3 },
6303 .errstr = "R0 min value is negative", 6313 .errstr = "unbounded min value",
6304 .result = REJECT, 6314 .result = REJECT,
6305 }, 6315 },
6306 { 6316 {
@@ -6324,7 +6334,7 @@ static struct bpf_test tests[] = {
6324 BPF_EXIT_INSN(), 6334 BPF_EXIT_INSN(),
6325 }, 6335 },
6326 .fixup_map1 = { 3 }, 6336 .fixup_map1 = { 3 },
6327 .errstr = "R0 min value is negative", 6337 .errstr = "unbounded min value",
6328 .result = REJECT, 6338 .result = REJECT,
6329 }, 6339 },
6330 { 6340 {
@@ -6350,7 +6360,7 @@ static struct bpf_test tests[] = {
6350 BPF_EXIT_INSN(), 6360 BPF_EXIT_INSN(),
6351 }, 6361 },
6352 .fixup_map1 = { 3 }, 6362 .fixup_map1 = { 3 },
6353 .errstr = "R8 invalid mem access 'inv'", 6363 .errstr = "unbounded min value",
6354 .result = REJECT, 6364 .result = REJECT,
6355 }, 6365 },
6356 { 6366 {
@@ -6375,7 +6385,7 @@ static struct bpf_test tests[] = {
6375 BPF_EXIT_INSN(), 6385 BPF_EXIT_INSN(),
6376 }, 6386 },
6377 .fixup_map1 = { 3 }, 6387 .fixup_map1 = { 3 },
6378 .errstr = "R8 invalid mem access 'inv'", 6388 .errstr = "unbounded min value",
6379 .result = REJECT, 6389 .result = REJECT,
6380 }, 6390 },
6381 { 6391 {
@@ -6423,7 +6433,7 @@ static struct bpf_test tests[] = {
6423 BPF_EXIT_INSN(), 6433 BPF_EXIT_INSN(),
6424 }, 6434 },
6425 .fixup_map1 = { 3 }, 6435 .fixup_map1 = { 3 },
6426 .errstr = "R0 min value is negative", 6436 .errstr = "unbounded min value",
6427 .result = REJECT, 6437 .result = REJECT,
6428 }, 6438 },
6429 { 6439 {
@@ -6494,7 +6504,7 @@ static struct bpf_test tests[] = {
6494 BPF_EXIT_INSN(), 6504 BPF_EXIT_INSN(),
6495 }, 6505 },
6496 .fixup_map1 = { 3 }, 6506 .fixup_map1 = { 3 },
6497 .errstr = "R0 min value is negative", 6507 .errstr = "unbounded min value",
6498 .result = REJECT, 6508 .result = REJECT,
6499 }, 6509 },
6500 { 6510 {
@@ -6545,7 +6555,7 @@ static struct bpf_test tests[] = {
6545 BPF_EXIT_INSN(), 6555 BPF_EXIT_INSN(),
6546 }, 6556 },
6547 .fixup_map1 = { 3 }, 6557 .fixup_map1 = { 3 },
6548 .errstr = "R0 min value is negative", 6558 .errstr = "unbounded min value",
6549 .result = REJECT, 6559 .result = REJECT,
6550 }, 6560 },
6551 { 6561 {
@@ -6572,7 +6582,7 @@ static struct bpf_test tests[] = {
6572 BPF_EXIT_INSN(), 6582 BPF_EXIT_INSN(),
6573 }, 6583 },
6574 .fixup_map1 = { 3 }, 6584 .fixup_map1 = { 3 },
6575 .errstr = "R0 min value is negative", 6585 .errstr = "unbounded min value",
6576 .result = REJECT, 6586 .result = REJECT,
6577 }, 6587 },
6578 { 6588 {
@@ -6598,7 +6608,7 @@ static struct bpf_test tests[] = {
6598 BPF_EXIT_INSN(), 6608 BPF_EXIT_INSN(),
6599 }, 6609 },
6600 .fixup_map1 = { 3 }, 6610 .fixup_map1 = { 3 },
6601 .errstr = "R0 min value is negative", 6611 .errstr = "unbounded min value",
6602 .result = REJECT, 6612 .result = REJECT,
6603 }, 6613 },
6604 { 6614 {
@@ -6627,7 +6637,7 @@ static struct bpf_test tests[] = {
6627 BPF_EXIT_INSN(), 6637 BPF_EXIT_INSN(),
6628 }, 6638 },
6629 .fixup_map1 = { 3 }, 6639 .fixup_map1 = { 3 },
6630 .errstr = "R0 min value is negative", 6640 .errstr = "unbounded min value",
6631 .result = REJECT, 6641 .result = REJECT,
6632 }, 6642 },
6633 { 6643 {
@@ -6657,7 +6667,7 @@ static struct bpf_test tests[] = {
6657 BPF_JMP_IMM(BPF_JA, 0, 0, -7), 6667 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6658 }, 6668 },
6659 .fixup_map1 = { 4 }, 6669 .fixup_map1 = { 4 },
6660 .errstr = "R0 min value is negative", 6670 .errstr = "unbounded min value",
6661 .result = REJECT, 6671 .result = REJECT,
6662 }, 6672 },
6663 { 6673 {
@@ -6685,8 +6695,7 @@ static struct bpf_test tests[] = {
6685 BPF_EXIT_INSN(), 6695 BPF_EXIT_INSN(),
6686 }, 6696 },
6687 .fixup_map1 = { 3 }, 6697 .fixup_map1 = { 3 },
6688 .errstr_unpriv = "R0 pointer comparison prohibited", 6698 .errstr = "unbounded min value",
6689 .errstr = "R0 min value is negative",
6690 .result = REJECT, 6699 .result = REJECT,
6691 .result_unpriv = REJECT, 6700 .result_unpriv = REJECT,
6692 }, 6701 },
@@ -6742,6 +6751,462 @@ static struct bpf_test tests[] = {
6742 .result = REJECT, 6751 .result = REJECT,
6743 }, 6752 },
6744 { 6753 {
6754 "bounds check based on zero-extended MOV",
6755 .insns = {
6756 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6759 BPF_LD_MAP_FD(BPF_REG_1, 0),
6760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6761 BPF_FUNC_map_lookup_elem),
6762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6763 /* r2 = 0x0000'0000'ffff'ffff */
6764 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6765 /* r2 = 0 */
6766 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6767 /* no-op */
6768 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6769 /* access at offset 0 */
6770 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6771 /* exit */
6772 BPF_MOV64_IMM(BPF_REG_0, 0),
6773 BPF_EXIT_INSN(),
6774 },
6775 .fixup_map1 = { 3 },
6776 .result = ACCEPT
6777 },
6778 {
6779 "bounds check based on sign-extended MOV. test1",
6780 .insns = {
6781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6782 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6784 BPF_LD_MAP_FD(BPF_REG_1, 0),
6785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6786 BPF_FUNC_map_lookup_elem),
6787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6788 /* r2 = 0xffff'ffff'ffff'ffff */
6789 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6790 /* r2 = 0xffff'ffff */
6791 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6792 /* r0 = <oob pointer> */
6793 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6794 /* access to OOB pointer */
6795 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6796 /* exit */
6797 BPF_MOV64_IMM(BPF_REG_0, 0),
6798 BPF_EXIT_INSN(),
6799 },
6800 .fixup_map1 = { 3 },
6801 .errstr = "map_value pointer and 4294967295",
6802 .result = REJECT
6803 },
6804 {
6805 "bounds check based on sign-extended MOV. test2",
6806 .insns = {
6807 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6808 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6810 BPF_LD_MAP_FD(BPF_REG_1, 0),
6811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6812 BPF_FUNC_map_lookup_elem),
6813 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6814 /* r2 = 0xffff'ffff'ffff'ffff */
6815 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6816 /* r2 = 0xfff'ffff */
6817 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6818 /* r0 = <oob pointer> */
6819 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6820 /* access to OOB pointer */
6821 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6822 /* exit */
6823 BPF_MOV64_IMM(BPF_REG_0, 0),
6824 BPF_EXIT_INSN(),
6825 },
6826 .fixup_map1 = { 3 },
6827 .errstr = "R0 min value is outside of the array range",
6828 .result = REJECT
6829 },
6830 {
6831 "bounds check based on reg_off + var_off + insn_off. test1",
6832 .insns = {
6833 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6834 offsetof(struct __sk_buff, mark)),
6835 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6836 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6838 BPF_LD_MAP_FD(BPF_REG_1, 0),
6839 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6840 BPF_FUNC_map_lookup_elem),
6841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6842 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6844 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6846 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6847 BPF_MOV64_IMM(BPF_REG_0, 0),
6848 BPF_EXIT_INSN(),
6849 },
6850 .fixup_map1 = { 4 },
6851 .errstr = "value_size=8 off=1073741825",
6852 .result = REJECT,
6853 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6854 },
6855 {
6856 "bounds check based on reg_off + var_off + insn_off. test2",
6857 .insns = {
6858 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6859 offsetof(struct __sk_buff, mark)),
6860 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6861 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6863 BPF_LD_MAP_FD(BPF_REG_1, 0),
6864 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6865 BPF_FUNC_map_lookup_elem),
6866 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6867 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6869 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6871 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6872 BPF_MOV64_IMM(BPF_REG_0, 0),
6873 BPF_EXIT_INSN(),
6874 },
6875 .fixup_map1 = { 4 },
6876 .errstr = "value 1073741823",
6877 .result = REJECT,
6878 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6879 },
6880 {
6881 "bounds check after truncation of non-boundary-crossing range",
6882 .insns = {
6883 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6884 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6886 BPF_LD_MAP_FD(BPF_REG_1, 0),
6887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6888 BPF_FUNC_map_lookup_elem),
6889 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6890 /* r1 = [0x00, 0xff] */
6891 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6892 BPF_MOV64_IMM(BPF_REG_2, 1),
6893 /* r2 = 0x10'0000'0000 */
6894 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6895 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6896 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6897 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6899 /* r1 = [0x00, 0xff] */
6900 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6901 /* r1 = 0 */
6902 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6903 /* no-op */
6904 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6905 /* access at offset 0 */
6906 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6907 /* exit */
6908 BPF_MOV64_IMM(BPF_REG_0, 0),
6909 BPF_EXIT_INSN(),
6910 },
6911 .fixup_map1 = { 3 },
6912 .result = ACCEPT
6913 },
6914 {
6915 "bounds check after truncation of boundary-crossing range (1)",
6916 .insns = {
6917 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6918 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6920 BPF_LD_MAP_FD(BPF_REG_1, 0),
6921 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6922 BPF_FUNC_map_lookup_elem),
6923 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6924 /* r1 = [0x00, 0xff] */
6925 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6927 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6929 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6930 * [0x0000'0000, 0x0000'007f]
6931 */
6932 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6933 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6934 /* r1 = [0x00, 0xff] or
6935 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6936 */
6937 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6938 /* r1 = 0 or
6939 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6940 */
6941 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6942 /* no-op or OOB pointer computation */
6943 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6944 /* potentially OOB access */
6945 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6946 /* exit */
6947 BPF_MOV64_IMM(BPF_REG_0, 0),
6948 BPF_EXIT_INSN(),
6949 },
6950 .fixup_map1 = { 3 },
6951 /* not actually fully unbounded, but the bound is very high */
6952 .errstr = "R0 unbounded memory access",
6953 .result = REJECT
6954 },
6955 {
6956 "bounds check after truncation of boundary-crossing range (2)",
6957 .insns = {
6958 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6959 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6961 BPF_LD_MAP_FD(BPF_REG_1, 0),
6962 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6963 BPF_FUNC_map_lookup_elem),
6964 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6965 /* r1 = [0x00, 0xff] */
6966 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6968 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6970 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6971 * [0x0000'0000, 0x0000'007f]
6972 * difference to previous test: truncation via MOV32
6973 * instead of ALU32.
6974 */
6975 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
6976 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6977 /* r1 = [0x00, 0xff] or
6978 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6979 */
6980 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6981 /* r1 = 0 or
6982 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6983 */
6984 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6985 /* no-op or OOB pointer computation */
6986 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6987 /* potentially OOB access */
6988 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6989 /* exit */
6990 BPF_MOV64_IMM(BPF_REG_0, 0),
6991 BPF_EXIT_INSN(),
6992 },
6993 .fixup_map1 = { 3 },
6994 /* not actually fully unbounded, but the bound is very high */
6995 .errstr = "R0 unbounded memory access",
6996 .result = REJECT
6997 },
6998 {
6999 "bounds check after wrapping 32-bit addition",
7000 .insns = {
7001 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7004 BPF_LD_MAP_FD(BPF_REG_1, 0),
7005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7006 BPF_FUNC_map_lookup_elem),
7007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7008 /* r1 = 0x7fff'ffff */
7009 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7010 /* r1 = 0xffff'fffe */
7011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7012 /* r1 = 0 */
7013 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7014 /* no-op */
7015 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7016 /* access at offset 0 */
7017 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7018 /* exit */
7019 BPF_MOV64_IMM(BPF_REG_0, 0),
7020 BPF_EXIT_INSN(),
7021 },
7022 .fixup_map1 = { 3 },
7023 .result = ACCEPT
7024 },
7025 {
7026 "bounds check after shift with oversized count operand",
7027 .insns = {
7028 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7031 BPF_LD_MAP_FD(BPF_REG_1, 0),
7032 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7033 BPF_FUNC_map_lookup_elem),
7034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7035 BPF_MOV64_IMM(BPF_REG_2, 32),
7036 BPF_MOV64_IMM(BPF_REG_1, 1),
7037 /* r1 = (u32)1 << (u32)32 = ? */
7038 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7039 /* r1 = [0x0000, 0xffff] */
7040 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7041 /* computes unknown pointer, potentially OOB */
7042 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7043 /* potentially OOB access */
7044 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7045 /* exit */
7046 BPF_MOV64_IMM(BPF_REG_0, 0),
7047 BPF_EXIT_INSN(),
7048 },
7049 .fixup_map1 = { 3 },
7050 .errstr = "R0 max value is outside of the array range",
7051 .result = REJECT
7052 },
7053 {
7054 "bounds check after right shift of maybe-negative number",
7055 .insns = {
7056 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7057 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7059 BPF_LD_MAP_FD(BPF_REG_1, 0),
7060 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7061 BPF_FUNC_map_lookup_elem),
7062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7063 /* r1 = [0x00, 0xff] */
7064 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7065 /* r1 = [-0x01, 0xfe] */
7066 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7067 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7068 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7069 /* r1 = 0 or 0xffff'ffff'ffff */
7070 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7071 /* computes unknown pointer, potentially OOB */
7072 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7073 /* potentially OOB access */
7074 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7075 /* exit */
7076 BPF_MOV64_IMM(BPF_REG_0, 0),
7077 BPF_EXIT_INSN(),
7078 },
7079 .fixup_map1 = { 3 },
7080 .errstr = "R0 unbounded memory access",
7081 .result = REJECT
7082 },
7083 {
7084 "bounds check map access with off+size signed 32bit overflow. test1",
7085 .insns = {
7086 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7087 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7089 BPF_LD_MAP_FD(BPF_REG_1, 0),
7090 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7091 BPF_FUNC_map_lookup_elem),
7092 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7093 BPF_EXIT_INSN(),
7094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7095 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7096 BPF_JMP_A(0),
7097 BPF_EXIT_INSN(),
7098 },
7099 .fixup_map1 = { 3 },
7100 .errstr = "map_value pointer and 2147483646",
7101 .result = REJECT
7102 },
7103 {
7104 "bounds check map access with off+size signed 32bit overflow. test2",
7105 .insns = {
7106 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7109 BPF_LD_MAP_FD(BPF_REG_1, 0),
7110 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7111 BPF_FUNC_map_lookup_elem),
7112 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7113 BPF_EXIT_INSN(),
7114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7117 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7118 BPF_JMP_A(0),
7119 BPF_EXIT_INSN(),
7120 },
7121 .fixup_map1 = { 3 },
7122 .errstr = "pointer offset 1073741822",
7123 .result = REJECT
7124 },
7125 {
7126 "bounds check map access with off+size signed 32bit overflow. test3",
7127 .insns = {
7128 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7131 BPF_LD_MAP_FD(BPF_REG_1, 0),
7132 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7133 BPF_FUNC_map_lookup_elem),
7134 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7135 BPF_EXIT_INSN(),
7136 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7137 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7138 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7139 BPF_JMP_A(0),
7140 BPF_EXIT_INSN(),
7141 },
7142 .fixup_map1 = { 3 },
7143 .errstr = "pointer offset -1073741822",
7144 .result = REJECT
7145 },
7146 {
7147 "bounds check map access with off+size signed 32bit overflow. test4",
7148 .insns = {
7149 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7152 BPF_LD_MAP_FD(BPF_REG_1, 0),
7153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7154 BPF_FUNC_map_lookup_elem),
7155 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7156 BPF_EXIT_INSN(),
7157 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7158 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7159 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7161 BPF_JMP_A(0),
7162 BPF_EXIT_INSN(),
7163 },
7164 .fixup_map1 = { 3 },
7165 .errstr = "map_value pointer and 1000000000000",
7166 .result = REJECT
7167 },
7168 {
7169 "pointer/scalar confusion in state equality check (way 1)",
7170 .insns = {
7171 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7172 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7174 BPF_LD_MAP_FD(BPF_REG_1, 0),
7175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7176 BPF_FUNC_map_lookup_elem),
7177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7178 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7179 BPF_JMP_A(1),
7180 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7181 BPF_JMP_A(0),
7182 BPF_EXIT_INSN(),
7183 },
7184 .fixup_map1 = { 3 },
7185 .result = ACCEPT,
7186 .result_unpriv = REJECT,
7187 .errstr_unpriv = "R0 leaks addr as return value"
7188 },
7189 {
7190 "pointer/scalar confusion in state equality check (way 2)",
7191 .insns = {
7192 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7195 BPF_LD_MAP_FD(BPF_REG_1, 0),
7196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7197 BPF_FUNC_map_lookup_elem),
7198 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7199 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7200 BPF_JMP_A(1),
7201 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7202 BPF_EXIT_INSN(),
7203 },
7204 .fixup_map1 = { 3 },
7205 .result = ACCEPT,
7206 .result_unpriv = REJECT,
7207 .errstr_unpriv = "R0 leaks addr as return value"
7208 },
7209 {
6745 "variable-offset ctx access", 7210 "variable-offset ctx access",
6746 .insns = { 7211 .insns = {
6747 /* Get an unknown value */ 7212 /* Get an unknown value */
@@ -6783,6 +7248,71 @@ static struct bpf_test tests[] = {
6783 .prog_type = BPF_PROG_TYPE_LWT_IN, 7248 .prog_type = BPF_PROG_TYPE_LWT_IN,
6784 }, 7249 },
6785 { 7250 {
7251 "indirect variable-offset stack access",
7252 .insns = {
7253 /* Fill the top 8 bytes of the stack */
7254 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7255 /* Get an unknown value */
7256 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7257 /* Make it small and 4-byte aligned */
7258 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7259 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7260 /* add it to fp. We now have either fp-4 or fp-8, but
7261 * we don't know which
7262 */
7263 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7264 /* dereference it indirectly */
7265 BPF_LD_MAP_FD(BPF_REG_1, 0),
7266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7267 BPF_FUNC_map_lookup_elem),
7268 BPF_MOV64_IMM(BPF_REG_0, 0),
7269 BPF_EXIT_INSN(),
7270 },
7271 .fixup_map1 = { 5 },
7272 .errstr = "variable stack read R2",
7273 .result = REJECT,
7274 .prog_type = BPF_PROG_TYPE_LWT_IN,
7275 },
7276 {
7277 "direct stack access with 32-bit wraparound. test1",
7278 .insns = {
7279 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7282 BPF_MOV32_IMM(BPF_REG_0, 0),
7283 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7284 BPF_EXIT_INSN()
7285 },
7286 .errstr = "fp pointer and 2147483647",
7287 .result = REJECT
7288 },
7289 {
7290 "direct stack access with 32-bit wraparound. test2",
7291 .insns = {
7292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7295 BPF_MOV32_IMM(BPF_REG_0, 0),
7296 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7297 BPF_EXIT_INSN()
7298 },
7299 .errstr = "fp pointer and 1073741823",
7300 .result = REJECT
7301 },
7302 {
7303 "direct stack access with 32-bit wraparound. test3",
7304 .insns = {
7305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7308 BPF_MOV32_IMM(BPF_REG_0, 0),
7309 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7310 BPF_EXIT_INSN()
7311 },
7312 .errstr = "fp pointer offset 1073741822",
7313 .result = REJECT
7314 },
7315 {
6786 "liveness pruning and write screening", 7316 "liveness pruning and write screening",
6787 .insns = { 7317 .insns = {
6788 /* Get an unknown value */ 7318 /* Get an unknown value */
@@ -7104,6 +7634,19 @@ static struct bpf_test tests[] = {
7104 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 7634 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7105 }, 7635 },
7106 { 7636 {
7637 "pkt_end - pkt_start is allowed",
7638 .insns = {
7639 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7640 offsetof(struct __sk_buff, data_end)),
7641 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7642 offsetof(struct __sk_buff, data)),
7643 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
7644 BPF_EXIT_INSN(),
7645 },
7646 .result = ACCEPT,
7647 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7648 },
7649 {
7107 "XDP pkt read, pkt_end mangling, bad access 1", 7650 "XDP pkt read, pkt_end mangling, bad access 1",
7108 .insns = { 7651 .insns = {
7109 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7652 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -7118,7 +7661,7 @@ static struct bpf_test tests[] = {
7118 BPF_MOV64_IMM(BPF_REG_0, 0), 7661 BPF_MOV64_IMM(BPF_REG_0, 0),
7119 BPF_EXIT_INSN(), 7662 BPF_EXIT_INSN(),
7120 }, 7663 },
7121 .errstr = "R1 offset is outside of the packet", 7664 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
7122 .result = REJECT, 7665 .result = REJECT,
7123 .prog_type = BPF_PROG_TYPE_XDP, 7666 .prog_type = BPF_PROG_TYPE_XDP,
7124 }, 7667 },
@@ -7137,7 +7680,7 @@ static struct bpf_test tests[] = {
7137 BPF_MOV64_IMM(BPF_REG_0, 0), 7680 BPF_MOV64_IMM(BPF_REG_0, 0),
7138 BPF_EXIT_INSN(), 7681 BPF_EXIT_INSN(),
7139 }, 7682 },
7140 .errstr = "R1 offset is outside of the packet", 7683 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
7141 .result = REJECT, 7684 .result = REJECT,
7142 .prog_type = BPF_PROG_TYPE_XDP, 7685 .prog_type = BPF_PROG_TYPE_XDP,
7143 }, 7686 },
diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c
index 3cc0b561489e..e9626cf5607a 100644
--- a/tools/testing/selftests/bpf/test_verifier_log.c
+++ b/tools/testing/selftests/bpf/test_verifier_log.c
@@ -3,6 +3,8 @@
3#include <stdio.h> 3#include <stdio.h>
4#include <string.h> 4#include <string.h>
5#include <unistd.h> 5#include <unistd.h>
6#include <sys/time.h>
7#include <sys/resource.h>
6 8
7#include <linux/bpf.h> 9#include <linux/bpf.h>
8#include <linux/filter.h> 10#include <linux/filter.h>
@@ -131,11 +133,16 @@ static void test_log_bad(char *log, size_t log_len, int log_level)
131 133
132int main(int argc, char **argv) 134int main(int argc, char **argv)
133{ 135{
136 struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
134 char full_log[LOG_SIZE]; 137 char full_log[LOG_SIZE];
135 char log[LOG_SIZE]; 138 char log[LOG_SIZE];
136 size_t want_len; 139 size_t want_len;
137 int i; 140 int i;
138 141
142 /* allow unlimited locked memory to have more consistent error code */
143 if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
144 perror("Unable to lift memlock rlimit");
145
139 memset(log, 1, LOG_SIZE); 146 memset(log, 1, LOG_SIZE);
140 147
141 /* Test incorrect attr */ 148 /* Test incorrect attr */
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index e57b4ac40e72..7177bea1fdfa 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -1,3 +1,4 @@
1CONFIG_USER_NS=y 1CONFIG_USER_NS=y
2CONFIG_BPF_SYSCALL=y 2CONFIG_BPF_SYSCALL=y
3CONFIG_TEST_BPF=m 3CONFIG_TEST_BPF=m
4CONFIG_NUMA=y
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 66e5ce5b91f0..1aef72df20a1 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -122,8 +122,7 @@ static void check_valid_segment(uint16_t index, int ldt,
122 * NB: Different Linux versions do different things with the 122 * NB: Different Linux versions do different things with the
123 * accessed bit in set_thread_area(). 123 * accessed bit in set_thread_area().
124 */ 124 */
125 if (ar != expected_ar && 125 if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) {
126 (ldt || ar != (expected_ar | AR_ACCESSED))) {
127 printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n", 126 printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
128 (ldt ? "LDT" : "GDT"), index, ar, expected_ar); 127 (ldt ? "LDT" : "GDT"), index, ar, expected_ar);
129 nerrs++; 128 nerrs++;
@@ -627,13 +626,10 @@ static void do_multicpu_tests(void)
627static int finish_exec_test(void) 626static int finish_exec_test(void)
628{ 627{
629 /* 628 /*
630 * In a sensible world, this would be check_invalid_segment(0, 1); 629 * Older kernel versions did inherit the LDT on exec() which is
631 * For better or for worse, though, the LDT is inherited across exec. 630 * wrong because exec() starts from a clean state.
632 * We can probably change this safely, but for now we test it.
633 */ 631 */
634 check_valid_segment(0, 1, 632 check_invalid_segment(0, 1);
635 AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB,
636 42, true);
637 633
638 return nerrs ? 1 : 0; 634 return nerrs ? 1 : 0;
639} 635}
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index 5727dfb15a83..c9c81614a66a 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -50,14 +50,14 @@ static int parse_status(const char *value)
50 50
51 while (*c != '\0') { 51 while (*c != '\0') {
52 int port, status, speed, devid; 52 int port, status, speed, devid;
53 unsigned long socket; 53 int sockfd;
54 char lbusid[SYSFS_BUS_ID_SIZE]; 54 char lbusid[SYSFS_BUS_ID_SIZE];
55 struct usbip_imported_device *idev; 55 struct usbip_imported_device *idev;
56 char hub[3]; 56 char hub[3];
57 57
58 ret = sscanf(c, "%2s %d %d %d %x %lx %31s\n", 58 ret = sscanf(c, "%2s %d %d %d %x %u %31s\n",
59 hub, &port, &status, &speed, 59 hub, &port, &status, &speed,
60 &devid, &socket, lbusid); 60 &devid, &sockfd, lbusid);
61 61
62 if (ret < 5) { 62 if (ret < 5) {
63 dbg("sscanf failed: %d", ret); 63 dbg("sscanf failed: %d", ret);
@@ -66,7 +66,7 @@ static int parse_status(const char *value)
66 66
67 dbg("hub %s port %d status %d speed %d devid %x", 67 dbg("hub %s port %d status %d speed %d devid %x",
68 hub, port, status, speed, devid); 68 hub, port, status, speed, devid);
69 dbg("socket %lx lbusid %s", socket, lbusid); 69 dbg("sockfd %u lbusid %s", sockfd, lbusid);
70 70
71 /* if a device is connected, look at it */ 71 /* if a device is connected, look at it */
72 idev = &vhci_driver->idev[port]; 72 idev = &vhci_driver->idev[port];
@@ -106,7 +106,7 @@ static int parse_status(const char *value)
106 return 0; 106 return 0;
107} 107}
108 108
109#define MAX_STATUS_NAME 16 109#define MAX_STATUS_NAME 18
110 110
111static int refresh_imported_device_list(void) 111static int refresh_imported_device_list(void)
112{ 112{
@@ -329,9 +329,17 @@ err:
329int usbip_vhci_get_free_port(uint32_t speed) 329int usbip_vhci_get_free_port(uint32_t speed)
330{ 330{
331 for (int i = 0; i < vhci_driver->nports; i++) { 331 for (int i = 0; i < vhci_driver->nports; i++) {
332 if (speed == USB_SPEED_SUPER && 332
333 vhci_driver->idev[i].hub != HUB_SPEED_SUPER) 333 switch (speed) {
334 continue; 334 case USB_SPEED_SUPER:
335 if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
336 continue;
337 break;
338 default:
339 if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH)
340 continue;
341 break;
342 }
335 343
336 if (vhci_driver->idev[i].status == VDEV_ST_NULL) 344 if (vhci_driver->idev[i].status == VDEV_ST_NULL)
337 return vhci_driver->idev[i].port; 345 return vhci_driver->idev[i].port;
diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
index 2b3d6d235015..3d7b42e77299 100644
--- a/tools/usb/usbip/src/utils.c
+++ b/tools/usb/usbip/src/utils.c
@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
30 char command[SYSFS_BUS_ID_SIZE + 4]; 30 char command[SYSFS_BUS_ID_SIZE + 4];
31 char match_busid_attr_path[SYSFS_PATH_MAX]; 31 char match_busid_attr_path[SYSFS_PATH_MAX];
32 int rc; 32 int rc;
33 int cmd_size;
33 34
34 snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), 35 snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
35 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, 36 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
37 attr_name); 38 attr_name);
38 39
39 if (add) 40 if (add)
40 snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); 41 cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
42 busid);
41 else 43 else
42 snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); 44 cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
45 busid);
43 46
44 rc = write_sysfs_attribute(match_busid_attr_path, command, 47 rc = write_sysfs_attribute(match_busid_attr_path, command,
45 sizeof(command)); 48 cmd_size);
46 if (rc < 0) { 49 if (rc < 0) {
47 dbg("failed to write match_busid: %s", strerror(errno)); 50 dbg("failed to write match_busid: %s", strerror(errno));
48 return -1; 51 return -1;
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index 38bb171aceba..e6e81305ef46 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -16,24 +16,41 @@
16#define unlikely(x) (__builtin_expect(!!(x), 0)) 16#define unlikely(x) (__builtin_expect(!!(x), 0))
17#define likely(x) (__builtin_expect(!!(x), 1)) 17#define likely(x) (__builtin_expect(!!(x), 1))
18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19#define SIZE_MAX (~(size_t)0)
20
19typedef pthread_spinlock_t spinlock_t; 21typedef pthread_spinlock_t spinlock_t;
20 22
21typedef int gfp_t; 23typedef int gfp_t;
22static void *kmalloc(unsigned size, gfp_t gfp) 24#define __GFP_ZERO 0x1
23{
24 return memalign(64, size);
25}
26 25
27static void *kzalloc(unsigned size, gfp_t gfp) 26static void *kmalloc(unsigned size, gfp_t gfp)
28{ 27{
29 void *p = memalign(64, size); 28 void *p = memalign(64, size);
30 if (!p) 29 if (!p)
31 return p; 30 return p;
32 memset(p, 0, size);
33 31
32 if (gfp & __GFP_ZERO)
33 memset(p, 0, size);
34 return p; 34 return p;
35} 35}
36 36
37static inline void *kzalloc(unsigned size, gfp_t flags)
38{
39 return kmalloc(size, flags | __GFP_ZERO);
40}
41
42static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
43{
44 if (size != 0 && n > SIZE_MAX / size)
45 return NULL;
46 return kmalloc(n * size, flags);
47}
48
49static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
50{
51 return kmalloc_array(n, size, flags | __GFP_ZERO);
52}
53
37static void kfree(void *p) 54static void kfree(void *p)
38{ 55{
39 if (p) 56 if (p)
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh
index 35b039864b77..0cf28aa6f21c 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/vm/slabinfo-gnuplot.sh
@@ -1,4 +1,4 @@
1#!/bin/sh 1#!/bin/bash
2 2
3# Sergey Senozhatsky, 2015 3# Sergey Senozhatsky, 2015
4# sergey.senozhatsky.work@gmail.com 4# sergey.senozhatsky.work@gmail.com