diff options
author | Ingo Molnar <mingo@kernel.org> | 2019-07-09 07:22:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-07-09 07:22:03 -0400 |
commit | d1d59b817939821bee149e870ce7723f61ffb512 (patch) | |
tree | bb9fe7c29717fad3ff3dac63364b63dd86b740ac /tools | |
parent | 552a031ba12a4236be107a5b082a399237758a5d (diff) | |
parent | 686cbe9e5d88ad639bbe26d963e7d5dafa1c1c28 (diff) |
Merge tag 'perf-urgent-for-mingo-5.3-20190708-2' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/urgent fixes from Arnaldo Carvalho de Melo:
core:
Arnaldo Carvalho de Melo:
- Allow references to thread objects after__machine_exit(), fixing a bug with
'perf sched lat' where that happens, i.e. after perf_session__delete() we
still have references to threads that were in a linked list whose head was
freed in perf_session__delete(), causing a segfault, fix it.
Jiri Olsa:
- Do not rely on errno values for precise_ip fallback, fixing the default
use case for 'perf record' on some AMD servers, when no events are specified
and we try to use "cycles:P", i.e. with the maximum precision level.
BPF:
Song Liu:
- Assign proper ff->ph in perf_event__synthesize_features(), fixing a bug
when using pipe mode, i.e. 'perf record -o -'.
tools headers:
Arnaldo Carvalho de Melo:
- Sync kvm headers with the kernel sources
perf tests:
Seeteena Thoufeek:
- Fix record+probe_libc_inet_pton.sh for powerpc64, where without the
debuginfo package for the 'ping' utility we can't resolve its symbols,
so admit getting "[unknown]" for that backtrace line.
perf python:
Arnaldo Carvalho de Melo:
- Remove -fstack-protector-strong if clang doesn't have it, fixing the build
with clang on fedora:30, oracleline:7, centos:7.
perf jvmti:
Jiri Olsa:
- Address gcc string overflow warning for strncpy()
build:
Arnaldo Carvalho de Melo:
- Check if gettid() is available before providing helper, as recent
versions of glibc started to provide gettid().
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/arch/arm64/include/uapi/asm/kvm.h | 7 | ||||
-rw-r--r-- | tools/arch/x86/include/asm/cpufeatures.h | 21 | ||||
-rw-r--r-- | tools/arch/x86/include/uapi/asm/kvm.h | 31 | ||||
-rwxr-xr-x | tools/perf/tests/shell/record+probe_libc_inet_pton.sh | 2 | ||||
-rw-r--r-- | tools/perf/util/evsel.c | 10 | ||||
-rw-r--r-- | tools/perf/util/header.c | 1 | ||||
-rw-r--r-- | tools/perf/util/machine.c | 25 | ||||
-rw-r--r-- | tools/perf/util/thread.c | 23 |
8 files changed, 90 insertions, 30 deletions
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 7b7ac0f6cec9..d819a3e8b552 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -260,6 +260,13 @@ struct kvm_vcpu_events { | |||
260 | KVM_REG_SIZE_U256 | \ | 260 | KVM_REG_SIZE_U256 | \ |
261 | ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) | 261 | ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) |
262 | 262 | ||
263 | /* | ||
264 | * Register values for KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() and | ||
265 | * KVM_REG_ARM64_SVE_FFR() are represented in memory in an endianness- | ||
266 | * invariant layout which differs from the layout used for the FPSIMD | ||
267 | * V-registers on big-endian systems: see sigcontext.h for more explanation. | ||
268 | */ | ||
269 | |||
263 | #define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN | 270 | #define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN |
264 | #define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX | 271 | #define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX |
265 | 272 | ||
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 75f27ee2c263..998c2cc08363 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
@@ -239,12 +239,14 @@ | |||
239 | #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ | 239 | #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ |
240 | #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ | 240 | #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ |
241 | #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ | 241 | #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ |
242 | #define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */ | ||
242 | #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ | 243 | #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ |
243 | #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ | 244 | #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ |
244 | #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ | 245 | #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */ |
245 | #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ | 246 | #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ |
246 | #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ | 247 | #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ |
247 | #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ | 248 | #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ |
249 | #define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */ | ||
248 | #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ | 250 | #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ |
249 | #define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ | 251 | #define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ |
250 | #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ | 252 | #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ |
@@ -269,13 +271,19 @@ | |||
269 | #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ | 271 | #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */ |
270 | #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ | 272 | #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */ |
271 | 273 | ||
272 | /* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */ | 274 | /* |
273 | #define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ | 275 | * Extended auxiliary flags: Linux defined - for features scattered in various |
276 | * CPUID levels like 0xf, etc. | ||
277 | * | ||
278 | * Reuse free bits when adding new feature flags! | ||
279 | */ | ||
280 | #define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */ | ||
281 | #define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */ | ||
282 | #define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */ | ||
283 | #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */ | ||
274 | 284 | ||
275 | /* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */ | 285 | /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ |
276 | #define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */ | 286 | #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ |
277 | #define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ | ||
278 | #define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ | ||
279 | 287 | ||
280 | /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ | 288 | /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ |
281 | #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ | 289 | #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ |
@@ -322,6 +330,7 @@ | |||
322 | #define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */ | 330 | #define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */ |
323 | #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ | 331 | #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ |
324 | #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ | 332 | #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ |
333 | #define X86_FEATURE_WAITPKG (16*32+ 5) /* UMONITOR/UMWAIT/TPAUSE Instructions */ | ||
325 | #define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ | 334 | #define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */ |
326 | #define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ | 335 | #define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */ |
327 | #define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ | 336 | #define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */ |
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 24a8cd229df6..d6ab5b4d15e5 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h | |||
@@ -383,6 +383,9 @@ struct kvm_sync_regs { | |||
383 | #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) | 383 | #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) |
384 | #define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) | 384 | #define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) |
385 | 385 | ||
386 | #define KVM_STATE_NESTED_FORMAT_VMX 0 | ||
387 | #define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */ | ||
388 | |||
386 | #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 | 389 | #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 |
387 | #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 | 390 | #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 |
388 | #define KVM_STATE_NESTED_EVMCS 0x00000004 | 391 | #define KVM_STATE_NESTED_EVMCS 0x00000004 |
@@ -390,7 +393,14 @@ struct kvm_sync_regs { | |||
390 | #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 | 393 | #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 |
391 | #define KVM_STATE_NESTED_SMM_VMXON 0x00000002 | 394 | #define KVM_STATE_NESTED_SMM_VMXON 0x00000002 |
392 | 395 | ||
393 | struct kvm_vmx_nested_state { | 396 | #define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000 |
397 | |||
398 | struct kvm_vmx_nested_state_data { | ||
399 | __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; | ||
400 | __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; | ||
401 | }; | ||
402 | |||
403 | struct kvm_vmx_nested_state_hdr { | ||
394 | __u64 vmxon_pa; | 404 | __u64 vmxon_pa; |
395 | __u64 vmcs12_pa; | 405 | __u64 vmcs12_pa; |
396 | 406 | ||
@@ -401,24 +411,25 @@ struct kvm_vmx_nested_state { | |||
401 | 411 | ||
402 | /* for KVM_CAP_NESTED_STATE */ | 412 | /* for KVM_CAP_NESTED_STATE */ |
403 | struct kvm_nested_state { | 413 | struct kvm_nested_state { |
404 | /* KVM_STATE_* flags */ | ||
405 | __u16 flags; | 414 | __u16 flags; |
406 | |||
407 | /* 0 for VMX, 1 for SVM. */ | ||
408 | __u16 format; | 415 | __u16 format; |
409 | |||
410 | /* 128 for SVM, 128 + VMCS size for VMX. */ | ||
411 | __u32 size; | 416 | __u32 size; |
412 | 417 | ||
413 | union { | 418 | union { |
414 | /* VMXON, VMCS */ | 419 | struct kvm_vmx_nested_state_hdr vmx; |
415 | struct kvm_vmx_nested_state vmx; | ||
416 | 420 | ||
417 | /* Pad the header to 128 bytes. */ | 421 | /* Pad the header to 128 bytes. */ |
418 | __u8 pad[120]; | 422 | __u8 pad[120]; |
419 | }; | 423 | } hdr; |
420 | 424 | ||
421 | __u8 data[0]; | 425 | /* |
426 | * Define data region as 0 bytes to preserve backwards-compatability | ||
427 | * to old definition of kvm_nested_state in order to avoid changing | ||
428 | * KVM_{GET,PUT}_NESTED_STATE ioctl values. | ||
429 | */ | ||
430 | union { | ||
431 | struct kvm_vmx_nested_state_data vmx[0]; | ||
432 | } data; | ||
422 | }; | 433 | }; |
423 | 434 | ||
424 | #endif /* _ASM_X86_KVM_H */ | 435 | #endif /* _ASM_X86_KVM_H */ |
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 9b7632ff70aa..f12a4e217968 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh | |||
@@ -45,7 +45,7 @@ trace_libc_inet_pton_backtrace() { | |||
45 | eventattr='max-stack=4' | 45 | eventattr='max-stack=4' |
46 | echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected | 46 | echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected |
47 | echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected | 47 | echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected |
48 | echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected | 48 | echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected |
49 | ;; | 49 | ;; |
50 | *) | 50 | *) |
51 | eventattr='max-stack=3' | 51 | eventattr='max-stack=3' |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 5ab31a4a658d..7fb4ae82f34c 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -1800,14 +1800,8 @@ static int perf_event_open(struct perf_evsel *evsel, | |||
1800 | if (fd >= 0) | 1800 | if (fd >= 0) |
1801 | break; | 1801 | break; |
1802 | 1802 | ||
1803 | /* | 1803 | /* Do not try less precise if not requested. */ |
1804 | * Do quick precise_ip fallback if: | 1804 | if (!evsel->precise_max) |
1805 | * - there is precise_ip set in perf_event_attr | ||
1806 | * - maximum precise is requested | ||
1807 | * - sys_perf_event_open failed with ENOTSUP error, | ||
1808 | * which is associated with wrong precise_ip | ||
1809 | */ | ||
1810 | if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP)) | ||
1811 | break; | 1805 | break; |
1812 | 1806 | ||
1813 | /* | 1807 | /* |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index bf26dc85eaaa..6a93ff5d8db5 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -3683,6 +3683,7 @@ int perf_event__synthesize_features(struct perf_tool *tool, | |||
3683 | return -ENOMEM; | 3683 | return -ENOMEM; |
3684 | 3684 | ||
3685 | ff.size = sz - sz_hdr; | 3685 | ff.size = sz - sz_hdr; |
3686 | ff.ph = &session->header; | ||
3686 | 3687 | ||
3687 | for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { | 3688 | for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { |
3688 | if (!feat_ops[feat].synthesize) { | 3689 | if (!feat_ops[feat].synthesize) { |
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 1b3d7265bca9..147ed85ea2bc 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
@@ -210,6 +210,18 @@ void machine__exit(struct machine *machine) | |||
210 | 210 | ||
211 | for (i = 0; i < THREADS__TABLE_SIZE; i++) { | 211 | for (i = 0; i < THREADS__TABLE_SIZE; i++) { |
212 | struct threads *threads = &machine->threads[i]; | 212 | struct threads *threads = &machine->threads[i]; |
213 | struct thread *thread, *n; | ||
214 | /* | ||
215 | * Forget about the dead, at this point whatever threads were | ||
216 | * left in the dead lists better have a reference count taken | ||
217 | * by who is using them, and then, when they drop those references | ||
218 | * and it finally hits zero, thread__put() will check and see that | ||
219 | * its not in the dead threads list and will not try to remove it | ||
220 | * from there, just calling thread__delete() straight away. | ||
221 | */ | ||
222 | list_for_each_entry_safe(thread, n, &threads->dead, node) | ||
223 | list_del_init(&thread->node); | ||
224 | |||
213 | exit_rwsem(&threads->lock); | 225 | exit_rwsem(&threads->lock); |
214 | } | 226 | } |
215 | } | 227 | } |
@@ -1759,9 +1771,11 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th, | |||
1759 | if (threads->last_match == th) | 1771 | if (threads->last_match == th) |
1760 | threads__set_last_match(threads, NULL); | 1772 | threads__set_last_match(threads, NULL); |
1761 | 1773 | ||
1762 | BUG_ON(refcount_read(&th->refcnt) == 0); | ||
1763 | if (lock) | 1774 | if (lock) |
1764 | down_write(&threads->lock); | 1775 | down_write(&threads->lock); |
1776 | |||
1777 | BUG_ON(refcount_read(&th->refcnt) == 0); | ||
1778 | |||
1765 | rb_erase_cached(&th->rb_node, &threads->entries); | 1779 | rb_erase_cached(&th->rb_node, &threads->entries); |
1766 | RB_CLEAR_NODE(&th->rb_node); | 1780 | RB_CLEAR_NODE(&th->rb_node); |
1767 | --threads->nr; | 1781 | --threads->nr; |
@@ -1771,9 +1785,16 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th, | |||
1771 | * will be called and we will remove it from the dead_threads list. | 1785 | * will be called and we will remove it from the dead_threads list. |
1772 | */ | 1786 | */ |
1773 | list_add_tail(&th->node, &threads->dead); | 1787 | list_add_tail(&th->node, &threads->dead); |
1788 | |||
1789 | /* | ||
1790 | * We need to do the put here because if this is the last refcount, | ||
1791 | * then we will be touching the threads->dead head when removing the | ||
1792 | * thread. | ||
1793 | */ | ||
1794 | thread__put(th); | ||
1795 | |||
1774 | if (lock) | 1796 | if (lock) |
1775 | up_write(&threads->lock); | 1797 | up_write(&threads->lock); |
1776 | thread__put(th); | ||
1777 | } | 1798 | } |
1778 | 1799 | ||
1779 | void machine__remove_thread(struct machine *machine, struct thread *th) | 1800 | void machine__remove_thread(struct machine *machine, struct thread *th) |
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index aab7807d445f..3e29a4e8b5e6 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c | |||
@@ -125,10 +125,27 @@ void thread__put(struct thread *thread) | |||
125 | { | 125 | { |
126 | if (thread && refcount_dec_and_test(&thread->refcnt)) { | 126 | if (thread && refcount_dec_and_test(&thread->refcnt)) { |
127 | /* | 127 | /* |
128 | * Remove it from the dead_threads list, as last reference | 128 | * Remove it from the dead threads list, as last reference is |
129 | * is gone. | 129 | * gone, if it is in a dead threads list. |
130 | * | ||
131 | * We may not be there anymore if say, the machine where it was | ||
132 | * stored was already deleted, so we already removed it from | ||
133 | * the dead threads and some other piece of code still keeps a | ||
134 | * reference. | ||
135 | * | ||
136 | * This is what 'perf sched' does and finally drops it in | ||
137 | * perf_sched__lat(), where it calls perf_sched__read_events(), | ||
138 | * that processes the events by creating a session and deleting | ||
139 | * it, which ends up destroying the list heads for the dead | ||
140 | * threads, but before it does that it removes all threads from | ||
141 | * it using list_del_init(). | ||
142 | * | ||
143 | * So we need to check here if it is in a dead threads list and | ||
144 | * if so, remove it before finally deleting the thread, to avoid | ||
145 | * an use after free situation. | ||
130 | */ | 146 | */ |
131 | list_del_init(&thread->node); | 147 | if (!list_empty(&thread->node)) |
148 | list_del_init(&thread->node); | ||
132 | thread__delete(thread); | 149 | thread__delete(thread); |
133 | } | 150 | } |
134 | } | 151 | } |