aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/Makefile22
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/arm64/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/s390/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm_perf.h4
-rw-r--r--tools/arch/s390/include/uapi/asm/perf_regs.h44
-rw-r--r--tools/arch/s390/include/uapi/asm/ptrace.h457
-rw-r--r--tools/arch/x86/include/asm/atomic.h2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h538
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/bpf/Makefile (renamed from tools/net/Makefile)18
-rw-r--r--tools/bpf/bpf_asm.c (renamed from tools/net/bpf_asm.c)0
-rw-r--r--tools/bpf/bpf_dbg.c (renamed from tools/net/bpf_dbg.c)0
-rw-r--r--tools/bpf/bpf_exp.l (renamed from tools/net/bpf_exp.l)0
-rw-r--r--tools/bpf/bpf_exp.y (renamed from tools/net/bpf_exp.y)0
-rw-r--r--tools/bpf/bpf_jit_disasm.c (renamed from tools/net/bpf_jit_disasm.c)3
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst131
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst150
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst56
-rw-r--r--tools/bpf/bpftool/Makefile93
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool354
-rw-r--r--tools/bpf/bpftool/common.c405
-rw-r--r--tools/bpf/bpftool/jit_disasm.c162
-rw-r--r--tools/bpf/bpftool/json_writer.c356
-rw-r--r--tools/bpf/bpftool/json_writer.h72
-rw-r--r--tools/bpf/bpftool/main.c343
-rw-r--r--tools/bpf/bpftool/main.h123
-rw-r--r--tools/bpf/bpftool/map.c901
-rw-r--r--tools/bpf/bpftool/prog.c674
-rw-r--r--tools/gpio/gpio-utils.c17
-rw-r--r--tools/hv/hv_kvp_daemon.c70
-rw-r--r--tools/include/asm-generic/atomic-gcc.h2
-rw-r--r--tools/include/linux/compiler.h21
-rw-r--r--tools/include/linux/kmemcheck.h9
-rw-r--r--tools/include/linux/lockdep.h1
-rw-r--r--tools/include/linux/poison.h5
-rw-r--r--tools/include/uapi/asm-generic/bpf_perf_event.h9
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h1
-rw-r--r--tools/include/uapi/asm-generic/mman.h1
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h7
-rw-r--r--tools/include/uapi/drm/drm.h41
-rw-r--r--tools/include/uapi/drm/i915_drm.h33
-rw-r--r--tools/include/uapi/linux/bpf.h152
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h6
-rw-r--r--tools/include/uapi/linux/kcmp.h28
-rw-r--r--tools/include/uapi/linux/kvm.h5
-rw-r--r--tools/include/uapi/linux/perf_event.h1
-rw-r--r--tools/include/uapi/linux/prctl.h210
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat104
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt4
-rw-r--r--tools/lib/bpf/bpf.c89
-rw-r--r--tools/lib/bpf/bpf.h27
-rw-r--r--tools/lib/bpf/libbpf.c179
-rw-r--r--tools/lib/bpf/libbpf.h1
-rw-r--r--tools/lib/traceevent/parse-filter.c6
-rw-r--r--tools/objtool/.gitignore2
-rw-r--r--tools/objtool/Makefile30
-rw-r--r--tools/objtool/arch/x86/Build10
-rw-r--r--tools/objtool/arch/x86/decode.c8
-rw-r--r--tools/objtool/arch/x86/include/asm/inat.h (renamed from tools/objtool/arch/x86/insn/inat.h)12
-rw-r--r--tools/objtool/arch/x86/include/asm/inat_types.h (renamed from tools/objtool/arch/x86/insn/inat_types.h)0
-rw-r--r--tools/objtool/arch/x86/include/asm/insn.h (renamed from tools/objtool/arch/x86/insn/insn.h)2
-rw-r--r--tools/objtool/arch/x86/include/asm/orc_types.h (renamed from tools/objtool/orc_types.h)0
-rw-r--r--tools/objtool/arch/x86/lib/inat.c (renamed from tools/objtool/arch/x86/insn/inat.c)2
-rw-r--r--tools/objtool/arch/x86/lib/insn.c (renamed from tools/objtool/arch/x86/insn/insn.c)4
-rw-r--r--tools/objtool/arch/x86/lib/x86-opcode-map.txt (renamed from tools/objtool/arch/x86/insn/x86-opcode-map.txt)15
-rw-r--r--tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk (renamed from tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk)0
-rw-r--r--tools/objtool/builtin-orc.c4
-rw-r--r--tools/objtool/orc.h2
-rw-r--r--tools/objtool/orc_dump.c7
-rw-r--r--tools/objtool/orc_gen.c2
-rwxr-xr-xtools/objtool/sync-check.sh29
-rw-r--r--tools/perf/Documentation/perf-list.txt11
-rw-r--r--tools/perf/Documentation/perf-record.txt2
-rw-r--r--tools/perf/Documentation/perf-report.txt3
-rw-r--r--tools/perf/Documentation/perf-sched.txt8
-rw-r--r--tools/perf/Documentation/perf-script.txt11
-rw-r--r--tools/perf/Documentation/perf-stat.txt7
-rw-r--r--tools/perf/Documentation/perf-top.txt3
-rw-r--r--tools/perf/Makefile.config15
-rw-r--r--tools/perf/Makefile.perf39
-rw-r--r--tools/perf/arch/arm/annotate/instructions.c3
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c3
-rw-r--r--tools/perf/arch/powerpc/annotate/instructions.c4
-rw-r--r--tools/perf/arch/s390/Makefile1
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c4
-rw-r--r--tools/perf/arch/s390/include/dwarf-regs-table.h71
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h95
-rw-r--r--tools/perf/arch/s390/util/Build3
-rw-r--r--tools/perf/arch/s390/util/auxtrace.c118
-rw-r--r--tools/perf/arch/s390/util/dwarf-regs.c41
-rw-r--r--tools/perf/arch/s390/util/unwind-libdw.c63
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c14
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h1
-rw-r--r--tools/perf/arch/x86/tests/Build1
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c4
-rw-r--r--tools/perf/bench/numa.c56
-rw-r--r--tools/perf/builtin-annotate.c10
-rw-r--r--tools/perf/builtin-buildid-cache.c8
-rw-r--r--tools/perf/builtin-buildid-list.c16
-rw-r--r--tools/perf/builtin-c2c.c11
-rw-r--r--tools/perf/builtin-config.c22
-rw-r--r--tools/perf/builtin-diff.c18
-rw-r--r--tools/perf/builtin-evlist.c12
-rw-r--r--tools/perf/builtin-help.c4
-rw-r--r--tools/perf/builtin-inject.c36
-rw-r--r--tools/perf/builtin-kmem.c13
-rw-r--r--tools/perf/builtin-kvm.c18
-rw-r--r--tools/perf/builtin-list.c7
-rw-r--r--tools/perf/builtin-lock.c12
-rw-r--r--tools/perf/builtin-mem.c13
-rw-r--r--tools/perf/builtin-record.c201
-rw-r--r--tools/perf/builtin-report.c17
-rw-r--r--tools/perf/builtin-sched.c28
-rw-r--r--tools/perf/builtin-script.c745
-rw-r--r--tools/perf/builtin-stat.c121
-rw-r--r--tools/perf/builtin-timechart.c18
-rw-r--r--tools/perf/builtin-top.c49
-rw-r--r--tools/perf/builtin-trace.c82
-rwxr-xr-xtools/perf/check-headers.sh9
-rw-r--r--tools/perf/jvmti/jvmti_agent.c16
-rw-r--r--tools/perf/jvmti/jvmti_agent.h7
-rw-r--r--tools/perf/jvmti/libjvmti.c147
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/cache.json1453
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/memory.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/other.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json544
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json218
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json140
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json140
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json164
-rw-r--r--tools/perf/pmu-events/jevents.c24
-rw-r--r--tools/perf/pmu-events/jevents.h2
-rw-r--r--tools/perf/pmu-events/pmu-events.h1
-rw-r--r--tools/perf/tests/attr.c2
-rw-r--r--tools/perf/tests/attr.py6
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/test-record-group1
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling2
-rw-r--r--tools/perf/tests/attr/test-record-group11
-rw-r--r--tools/perf/tests/attr/test-stat-C01
-rw-r--r--tools/perf/tests/attr/test-stat-basic1
-rw-r--r--tools/perf/tests/attr/test-stat-default4
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-18
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-213
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-313
-rw-r--r--tools/perf/tests/attr/test-stat-group2
-rw-r--r--tools/perf/tests/attr/test-stat-group12
-rw-r--r--tools/perf/tests/attr/test-stat-no-inherit1
-rw-r--r--tools/perf/tests/builtin-test.c1
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c2
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh7
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh6
-rw-r--r--tools/perf/tests/task-exit.c4
-rw-r--r--tools/perf/tests/topology.c22
-rw-r--r--tools/perf/trace/beauty/Build2
-rw-r--r--tools/perf/trace/beauty/beauty.h18
-rw-r--r--tools/perf/trace/beauty/kcmp.c44
-rwxr-xr-xtools/perf/trace/beauty/kcmp_type.sh10
-rwxr-xr-xtools/perf/trace/beauty/madvise_behavior.sh10
-rw-r--r--tools/perf/trace/beauty/mmap.c41
-rw-r--r--tools/perf/trace/beauty/prctl.c82
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh17
-rw-r--r--tools/perf/ui/browsers/hists.c180
-rw-r--r--tools/perf/ui/progress.c6
-rw-r--r--tools/perf/ui/progress.h12
-rw-r--r--tools/perf/ui/stdio/hist.c77
-rw-r--r--tools/perf/ui/tui/progress.c32
-rw-r--r--tools/perf/util/Build3
-rw-r--r--tools/perf/util/annotate.c28
-rw-r--r--tools/perf/util/auxtrace.c4
-rw-r--r--tools/perf/util/auxtrace.h4
-rw-r--r--tools/perf/util/callchain.c179
-rw-r--r--tools/perf/util/callchain.h6
-rw-r--r--tools/perf/util/comm.c18
-rw-r--r--tools/perf/util/config.c5
-rw-r--r--tools/perf/util/data-convert-bt.c12
-rw-r--r--tools/perf/util/data.c95
-rw-r--r--tools/perf/util/data.h38
-rw-r--r--tools/perf/util/debug.c31
-rw-r--r--tools/perf/util/dso.c20
-rw-r--r--tools/perf/util/dso.h6
-rw-r--r--tools/perf/util/event.c162
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/evlist.c262
-rw-r--r--tools/perf/util/evlist.h79
-rw-r--r--tools/perf/util/evsel.c21
-rw-r--r--tools/perf/util/evsel.h5
-rw-r--r--tools/perf/util/evsel_fprintf.c37
-rw-r--r--tools/perf/util/header.c20
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/intel-bts.c6
-rw-r--r--tools/perf/util/intel-pt-decoder/inat.h10
-rw-r--r--tools/perf/util/intel-pt-decoder/x86-opcode-map.txt15
-rw-r--r--tools/perf/util/intel-pt.c6
-rw-r--r--tools/perf/util/jit.h2
-rw-r--r--tools/perf/util/jitdump.c10
-rw-r--r--tools/perf/util/machine.c231
-rw-r--r--tools/perf/util/machine.h33
-rw-r--r--tools/perf/util/map.c34
-rw-r--r--tools/perf/util/map.h3
-rw-r--r--tools/perf/util/metricgroup.c490
-rw-r--r--tools/perf/util/metricgroup.h31
-rw-r--r--tools/perf/util/mmap.c352
-rw-r--r--tools/perf/util/mmap.h97
-rw-r--r--tools/perf/util/namespaces.c1
-rw-r--r--tools/perf/util/namespaces.h5
-rw-r--r--tools/perf/util/parse-events.c31
-rw-r--r--tools/perf/util/parse-events.h6
-rw-r--r--tools/perf/util/parse-events.l3
-rw-r--r--tools/perf/util/pmu.c60
-rw-r--r--tools/perf/util/pmu.h2
-rw-r--r--tools/perf/util/print_binary.c30
-rw-r--r--tools/perf/util/print_binary.h18
-rw-r--r--tools/perf/util/probe-file.c1
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/rb_resort.h5
-rw-r--r--tools/perf/util/rwsem.c32
-rw-r--r--tools/perf/util/rwsem.h19
-rw-r--r--tools/perf/util/session.c46
-rw-r--r--tools/perf/util/session.h6
-rw-r--r--tools/perf/util/sort.c6
-rw-r--r--tools/perf/util/sort.h1
-rw-r--r--tools/perf/util/srcline.c296
-rw-r--r--tools/perf/util/srcline.h26
-rw-r--r--tools/perf/util/stat-shadow.c158
-rw-r--r--tools/perf/util/stat.c24
-rw-r--r--tools/perf/util/stat.h6
-rw-r--r--tools/perf/util/symbol.c9
-rw-r--r--tools/perf/util/symbol.h2
-rw-r--r--tools/perf/util/thread.c57
-rw-r--r--tools/perf/util/thread.h3
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/trace-event-info.c1
-rw-r--r--tools/perf/util/trace-event-read.c1
-rw-r--r--tools/perf/util/util.c16
-rw-r--r--tools/perf/util/util.h7
-rw-r--r--tools/perf/util/vdso.c4
-rw-r--r--tools/perf/util/zlib.c1
-rw-r--r--tools/power/acpi/tools/acpidump/Makefile1
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c3
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c4
-rw-r--r--tools/power/cpupower/.gitignore3
-rw-r--r--tools/power/cpupower/Makefile8
-rw-r--r--tools/power/cpupower/bench/system.c2
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c9
-rw-r--r--tools/scripts/Makefile.include2
-rw-r--r--tools/testing/nvdimm/Kbuild1
-rw-r--r--tools/testing/nvdimm/test/nfit.c319
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h52
-rw-r--r--tools/testing/radix-tree/multiorder.c2
-rw-r--r--tools/testing/scatterlist/Makefile30
-rw-r--r--tools/testing/scatterlist/linux/mm.h125
-rw-r--r--tools/testing/scatterlist/main.c79
-rw-r--r--tools/testing/selftests/Makefile3
-rw-r--r--tools/testing/selftests/android/Makefile46
-rw-r--r--tools/testing/selftests/android/ion/.gitignore2
-rw-r--r--tools/testing/selftests/android/ion/Makefile16
-rw-r--r--tools/testing/selftests/android/ion/README101
-rw-r--r--tools/testing/selftests/android/ion/config4
-rw-r--r--tools/testing/selftests/android/ion/ion.h143
-rwxr-xr-xtools/testing/selftests/android/ion/ion_test.sh55
-rw-r--r--tools/testing/selftests/android/ion/ionapp_export.c135
-rw-r--r--tools/testing/selftests/android/ion/ionapp_import.c88
-rw-r--r--tools/testing/selftests/android/ion/ionutils.c259
-rw-r--r--tools/testing/selftests/android/ion/ionutils.h55
-rw-r--r--tools/testing/selftests/android/ion/ipcsocket.c227
-rw-r--r--tools/testing/selftests/android/ion/ipcsocket.h35
-rwxr-xr-xtools/testing/selftests/android/run.sh3
-rw-r--r--tools/testing/selftests/bpf/Makefile28
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h67
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c178
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h17
-rw-r--r--tools/testing/selftests/bpf/dev_cgroup.c60
-rw-r--r--tools/testing/selftests/bpf/sockmap_parse_prog.c3
-rw-r--r--tools/testing/selftests/bpf/sockmap_verdict_prog.c2
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c93
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c201
-rw-r--r--tools/testing/selftests/bpf/test_maps.c48
-rw-r--r--tools/testing/selftests/bpf/test_progs.c197
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c1623
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c178
-rw-r--r--tools/testing/selftests/bpf/test_xdp_meta.c53
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_meta.sh51
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c1
-rw-r--r--tools/testing/selftests/cpu-hotplug/config1
-rw-r--r--tools/testing/selftests/exec/execveat.c27
-rwxr-xr-xtools/testing/selftests/firmware/fw_fallback.sh38
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh34
-rw-r--r--tools/testing/selftests/ftrace/config4
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest7
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/basic4.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-enable.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-pid.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance-event.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/template1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc2
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c4
-rw-r--r--tools/testing/selftests/memory-hotplug/Makefile4
-rw-r--r--tools/testing/selftests/net/config1
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh271
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c17
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr.h2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_default_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c6
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-unavailable.c371
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config_override.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-torture.sh2
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h5
-rw-r--r--tools/testing/selftests/seccomp/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt12
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/example.json55
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/template.json15
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json469
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json52
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json223
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json527
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/simple.json130
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json320
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json372
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tests.json1165
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py29
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py14
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config_local_template.py23
-rw-r--r--tools/testing/selftests/tc-testing/tdc_helper.py4
-rw-r--r--tools/testing/selftests/timers/.gitignore2
-rw-r--r--tools/testing/selftests/vDSO/vdso_test.c19
-rw-r--r--tools/testing/selftests/vm/.gitignore2
-rw-r--r--tools/testing/selftests/vm/Makefile1
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c91
-rw-r--r--tools/testing/selftests/x86/5lvl.c177
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c89
-rw-r--r--tools/testing/selftests/x86/mpx-hw.h4
-rw-r--r--tools/testing/selftests/x86/pkey-helpers.h5
-rw-r--r--tools/testing/selftests/x86/protection_keys.c10
-rw-r--r--tools/testing/vsock/.gitignore2
-rw-r--r--tools/testing/vsock/Makefile9
-rw-r--r--tools/testing/vsock/README36
-rw-r--r--tools/testing/vsock/control.c219
-rw-r--r--tools/testing/vsock/control.h13
-rw-r--r--tools/testing/vsock/timeout.c64
-rw-r--r--tools/testing/vsock/timeout.h14
-rw-r--r--tools/testing/vsock/vsock_diag_test.c681
-rw-r--r--tools/thermal/tmon/Makefile18
-rw-r--r--tools/usb/usbip/Makefile.am3
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c24
-rw-r--r--tools/usb/usbip/src/utils.c9
-rw-r--r--tools/virtio/ringtest/main.h4
-rw-r--r--tools/virtio/ringtest/ptr_ring.c29
-rw-r--r--tools/vm/slabinfo-gnuplot.sh2
-rw-r--r--tools/vm/slabinfo.c11
-rw-r--r--tools/wmi/Makefile18
-rw-r--r--tools/wmi/dell-smbios-example.c210
400 files changed, 23163 insertions, 3943 deletions
diff --git a/tools/Makefile b/tools/Makefile
index c4f41ef9a7a7..be02c8b904db 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -20,7 +20,7 @@ help:
20 @echo ' kvm_stat - top-like utility for displaying kvm statistics' 20 @echo ' kvm_stat - top-like utility for displaying kvm statistics'
21 @echo ' leds - LEDs tools' 21 @echo ' leds - LEDs tools'
22 @echo ' liblockdep - user-space wrapper for kernel locking-validator' 22 @echo ' liblockdep - user-space wrapper for kernel locking-validator'
23 @echo ' net - misc networking tools' 23 @echo ' bpf - misc BPF tools'
24 @echo ' perf - Linux performance measurement and analysis tool' 24 @echo ' perf - Linux performance measurement and analysis tool'
25 @echo ' selftests - various kernel selftests' 25 @echo ' selftests - various kernel selftests'
26 @echo ' spi - spi tools' 26 @echo ' spi - spi tools'
@@ -30,6 +30,7 @@ help:
30 @echo ' usb - USB testing tools' 30 @echo ' usb - USB testing tools'
31 @echo ' virtio - vhost test module' 31 @echo ' virtio - vhost test module'
32 @echo ' vm - misc vm tools' 32 @echo ' vm - misc vm tools'
33 @echo ' wmi - WMI interface examples'
33 @echo ' x86_energy_perf_policy - Intel energy policy tool' 34 @echo ' x86_energy_perf_policy - Intel energy policy tool'
34 @echo '' 35 @echo ''
35 @echo 'You can do:' 36 @echo 'You can do:'
@@ -58,7 +59,7 @@ acpi: FORCE
58cpupower: FORCE 59cpupower: FORCE
59 $(call descend,power/$@) 60 $(call descend,power/$@)
60 61
61cgroup firewire hv guest spi usb virtio vm net iio gpio objtool leds: FORCE 62cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi: FORCE
62 $(call descend,$@) 63 $(call descend,$@)
63 64
64liblockdep: FORCE 65liblockdep: FORCE
@@ -92,8 +93,8 @@ kvm_stat: FORCE
92 93
93all: acpi cgroup cpupower gpio hv firewire liblockdep \ 94all: acpi cgroup cpupower gpio hv firewire liblockdep \
94 perf selftests spi turbostat usb \ 95 perf selftests spi turbostat usb \
95 virtio vm net x86_energy_perf_policy \ 96 virtio vm bpf x86_energy_perf_policy \
96 tmon freefall iio objtool kvm_stat 97 tmon freefall iio objtool kvm_stat wmi
97 98
98acpi_install: 99acpi_install:
99 $(call descend,power/$(@:_install=),install) 100 $(call descend,power/$(@:_install=),install)
@@ -101,7 +102,7 @@ acpi_install:
101cpupower_install: 102cpupower_install:
102 $(call descend,power/$(@:_install=),install) 103 $(call descend,power/$(@:_install=),install)
103 104
104cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install net_install objtool_install: 105cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install:
105 $(call descend,$(@:_install=),install) 106 $(call descend,$(@:_install=),install)
106 107
107liblockdep_install: 108liblockdep_install:
@@ -125,8 +126,9 @@ kvm_stat_install:
125install: acpi_install cgroup_install cpupower_install gpio_install \ 126install: acpi_install cgroup_install cpupower_install gpio_install \
126 hv_install firewire_install iio_install liblockdep_install \ 127 hv_install firewire_install iio_install liblockdep_install \
127 perf_install selftests_install turbostat_install usb_install \ 128 perf_install selftests_install turbostat_install usb_install \
128 virtio_install vm_install net_install x86_energy_perf_policy_install \ 129 virtio_install vm_install bpf_install x86_energy_perf_policy_install \
129 tmon_install freefall_install objtool_install kvm_stat_install 130 tmon_install freefall_install objtool_install kvm_stat_install \
131 wmi_install
130 132
131acpi_clean: 133acpi_clean:
132 $(call descend,power/acpi,clean) 134 $(call descend,power/acpi,clean)
@@ -134,7 +136,7 @@ acpi_clean:
134cpupower_clean: 136cpupower_clean:
135 $(call descend,power/cpupower,clean) 137 $(call descend,power/cpupower,clean)
136 138
137cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean net_clean iio_clean gpio_clean objtool_clean leds_clean: 139cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean:
138 $(call descend,$(@:_clean=),clean) 140 $(call descend,$(@:_clean=),clean)
139 141
140liblockdep_clean: 142liblockdep_clean:
@@ -170,8 +172,8 @@ build_clean:
170 172
171clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \ 173clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
172 perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \ 174 perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
173 vm_clean net_clean iio_clean x86_energy_perf_policy_clean tmon_clean \ 175 vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
174 freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \ 176 freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
175 gpio_clean objtool_clean leds_clean 177 gpio_clean objtool_clean leds_clean wmi_clean
176 178
177.PHONY: FORCE 179.PHONY: FORCE
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 1f57bbe82b6f..6edd177bb1c7 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -152,6 +152,12 @@ struct kvm_arch_memory_slot {
152 (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) 152 (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
153#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) 153#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
154 154
155/* PL1 Physical Timer Registers */
156#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
157#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
158#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
159
160/* Virtual Timer Registers */
155#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) 161#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
156#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) 162#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
157#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) 163#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
@@ -216,6 +222,7 @@ struct kvm_arch_memory_slot {
216#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 222#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
217#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 223#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
218#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 224#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
225#define KVM_DEV_ARM_ITS_CTRL_RESET 4
219 226
220/* KVM_IRQ_LINE irq field index values */ 227/* KVM_IRQ_LINE irq field index values */
221#define KVM_ARM_IRQ_TYPE_SHIFT 24 228#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..b551b741653d
--- /dev/null
+++ b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include <asm/ptrace.h>
6
7typedef struct user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 51149ec75fe4..9abbf3044654 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -196,6 +196,12 @@ struct kvm_arch_memory_slot {
196 196
197#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) 197#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
198 198
199/* Physical Timer EL0 Registers */
200#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
201#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
202#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
203
204/* EL0 Virtual Timer Registers */
199#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) 205#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
200#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 206#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
201#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 207#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -228,6 +234,7 @@ struct kvm_arch_memory_slot {
228#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 234#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
229#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 235#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
230#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 236#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
237#define KVM_DEV_ARM_ITS_CTRL_RESET 4
231 238
232/* Device Control API on vcpu fd */ 239/* Device Control API on vcpu fd */
233#define KVM_ARM_VCPU_PMU_V3_CTRL 0 240#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..0a8e37a519f2
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4
5#include "ptrace.h"
6
7typedef user_pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 9ad172dcd912..38535a57fef8 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -6,10 +6,6 @@
6 * 6 *
7 * Copyright IBM Corp. 2008 7 * Copyright IBM Corp. 2008
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License (version 2 only)
11 * as published by the Free Software Foundation.
12 *
13 * Author(s): Carsten Otte <cotte@de.ibm.com> 9 * Author(s): Carsten Otte <cotte@de.ibm.com>
14 * Christian Borntraeger <borntraeger@de.ibm.com> 10 * Christian Borntraeger <borntraeger@de.ibm.com>
15 */ 11 */
diff --git a/tools/arch/s390/include/uapi/asm/kvm_perf.h b/tools/arch/s390/include/uapi/asm/kvm_perf.h
index c36c97ffdc6f..84606b8cc49e 100644
--- a/tools/arch/s390/include/uapi/asm/kvm_perf.h
+++ b/tools/arch/s390/include/uapi/asm/kvm_perf.h
@@ -4,10 +4,6 @@
4 * 4 *
5 * Copyright 2014 IBM Corp. 5 * Copyright 2014 IBM Corp.
6 * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com> 6 * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License (version 2 only)
10 * as published by the Free Software Foundation.
11 */ 7 */
12 8
13#ifndef __LINUX_KVM_PERF_S390_H 9#ifndef __LINUX_KVM_PERF_S390_H
diff --git a/tools/arch/s390/include/uapi/asm/perf_regs.h b/tools/arch/s390/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d17dd9e5d516
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/perf_regs.h
@@ -0,0 +1,44 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _ASM_S390_PERF_REGS_H
3#define _ASM_S390_PERF_REGS_H
4
5enum perf_event_s390_regs {
6 PERF_REG_S390_R0,
7 PERF_REG_S390_R1,
8 PERF_REG_S390_R2,
9 PERF_REG_S390_R3,
10 PERF_REG_S390_R4,
11 PERF_REG_S390_R5,
12 PERF_REG_S390_R6,
13 PERF_REG_S390_R7,
14 PERF_REG_S390_R8,
15 PERF_REG_S390_R9,
16 PERF_REG_S390_R10,
17 PERF_REG_S390_R11,
18 PERF_REG_S390_R12,
19 PERF_REG_S390_R13,
20 PERF_REG_S390_R14,
21 PERF_REG_S390_R15,
22 PERF_REG_S390_FP0,
23 PERF_REG_S390_FP1,
24 PERF_REG_S390_FP2,
25 PERF_REG_S390_FP3,
26 PERF_REG_S390_FP4,
27 PERF_REG_S390_FP5,
28 PERF_REG_S390_FP6,
29 PERF_REG_S390_FP7,
30 PERF_REG_S390_FP8,
31 PERF_REG_S390_FP9,
32 PERF_REG_S390_FP10,
33 PERF_REG_S390_FP11,
34 PERF_REG_S390_FP12,
35 PERF_REG_S390_FP13,
36 PERF_REG_S390_FP14,
37 PERF_REG_S390_FP15,
38 PERF_REG_S390_MASK,
39 PERF_REG_S390_PC,
40
41 PERF_REG_S390_MAX
42};
43
44#endif /* _ASM_S390_PERF_REGS_H */
diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..543dd70e12c8
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/ptrace.h
@@ -0,0 +1,457 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
6 */
7
8#ifndef _UAPI_S390_PTRACE_H
9#define _UAPI_S390_PTRACE_H
10
11/*
12 * Offsets in the user_regs_struct. They are used for the ptrace
13 * system call and in entry.S
14 */
15#ifndef __s390x__
16
17#define PT_PSWMASK 0x00
18#define PT_PSWADDR 0x04
19#define PT_GPR0 0x08
20#define PT_GPR1 0x0C
21#define PT_GPR2 0x10
22#define PT_GPR3 0x14
23#define PT_GPR4 0x18
24#define PT_GPR5 0x1C
25#define PT_GPR6 0x20
26#define PT_GPR7 0x24
27#define PT_GPR8 0x28
28#define PT_GPR9 0x2C
29#define PT_GPR10 0x30
30#define PT_GPR11 0x34
31#define PT_GPR12 0x38
32#define PT_GPR13 0x3C
33#define PT_GPR14 0x40
34#define PT_GPR15 0x44
35#define PT_ACR0 0x48
36#define PT_ACR1 0x4C
37#define PT_ACR2 0x50
38#define PT_ACR3 0x54
39#define PT_ACR4 0x58
40#define PT_ACR5 0x5C
41#define PT_ACR6 0x60
42#define PT_ACR7 0x64
43#define PT_ACR8 0x68
44#define PT_ACR9 0x6C
45#define PT_ACR10 0x70
46#define PT_ACR11 0x74
47#define PT_ACR12 0x78
48#define PT_ACR13 0x7C
49#define PT_ACR14 0x80
50#define PT_ACR15 0x84
51#define PT_ORIGGPR2 0x88
52#define PT_FPC 0x90
53/*
54 * A nasty fact of life that the ptrace api
55 * only supports passing of longs.
56 */
57#define PT_FPR0_HI 0x98
58#define PT_FPR0_LO 0x9C
59#define PT_FPR1_HI 0xA0
60#define PT_FPR1_LO 0xA4
61#define PT_FPR2_HI 0xA8
62#define PT_FPR2_LO 0xAC
63#define PT_FPR3_HI 0xB0
64#define PT_FPR3_LO 0xB4
65#define PT_FPR4_HI 0xB8
66#define PT_FPR4_LO 0xBC
67#define PT_FPR5_HI 0xC0
68#define PT_FPR5_LO 0xC4
69#define PT_FPR6_HI 0xC8
70#define PT_FPR6_LO 0xCC
71#define PT_FPR7_HI 0xD0
72#define PT_FPR7_LO 0xD4
73#define PT_FPR8_HI 0xD8
74#define PT_FPR8_LO 0XDC
75#define PT_FPR9_HI 0xE0
76#define PT_FPR9_LO 0xE4
77#define PT_FPR10_HI 0xE8
78#define PT_FPR10_LO 0xEC
79#define PT_FPR11_HI 0xF0
80#define PT_FPR11_LO 0xF4
81#define PT_FPR12_HI 0xF8
82#define PT_FPR12_LO 0xFC
83#define PT_FPR13_HI 0x100
84#define PT_FPR13_LO 0x104
85#define PT_FPR14_HI 0x108
86#define PT_FPR14_LO 0x10C
87#define PT_FPR15_HI 0x110
88#define PT_FPR15_LO 0x114
89#define PT_CR_9 0x118
90#define PT_CR_10 0x11C
91#define PT_CR_11 0x120
92#define PT_IEEE_IP 0x13C
93#define PT_LASTOFF PT_IEEE_IP
94#define PT_ENDREGS 0x140-1
95
96#define GPR_SIZE 4
97#define CR_SIZE 4
98
99#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
100
101#else /* __s390x__ */
102
103#define PT_PSWMASK 0x00
104#define PT_PSWADDR 0x08
105#define PT_GPR0 0x10
106#define PT_GPR1 0x18
107#define PT_GPR2 0x20
108#define PT_GPR3 0x28
109#define PT_GPR4 0x30
110#define PT_GPR5 0x38
111#define PT_GPR6 0x40
112#define PT_GPR7 0x48
113#define PT_GPR8 0x50
114#define PT_GPR9 0x58
115#define PT_GPR10 0x60
116#define PT_GPR11 0x68
117#define PT_GPR12 0x70
118#define PT_GPR13 0x78
119#define PT_GPR14 0x80
120#define PT_GPR15 0x88
121#define PT_ACR0 0x90
122#define PT_ACR1 0x94
123#define PT_ACR2 0x98
124#define PT_ACR3 0x9C
125#define PT_ACR4 0xA0
126#define PT_ACR5 0xA4
127#define PT_ACR6 0xA8
128#define PT_ACR7 0xAC
129#define PT_ACR8 0xB0
130#define PT_ACR9 0xB4
131#define PT_ACR10 0xB8
132#define PT_ACR11 0xBC
133#define PT_ACR12 0xC0
134#define PT_ACR13 0xC4
135#define PT_ACR14 0xC8
136#define PT_ACR15 0xCC
137#define PT_ORIGGPR2 0xD0
138#define PT_FPC 0xD8
139#define PT_FPR0 0xE0
140#define PT_FPR1 0xE8
141#define PT_FPR2 0xF0
142#define PT_FPR3 0xF8
143#define PT_FPR4 0x100
144#define PT_FPR5 0x108
145#define PT_FPR6 0x110
146#define PT_FPR7 0x118
147#define PT_FPR8 0x120
148#define PT_FPR9 0x128
149#define PT_FPR10 0x130
150#define PT_FPR11 0x138
151#define PT_FPR12 0x140
152#define PT_FPR13 0x148
153#define PT_FPR14 0x150
154#define PT_FPR15 0x158
155#define PT_CR_9 0x160
156#define PT_CR_10 0x168
157#define PT_CR_11 0x170
158#define PT_IEEE_IP 0x1A8
159#define PT_LASTOFF PT_IEEE_IP
160#define PT_ENDREGS 0x1B0-1
161
162#define GPR_SIZE 8
163#define CR_SIZE 8
164
165#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
166
167#endif /* __s390x__ */
168
169#define NUM_GPRS 16
170#define NUM_FPRS 16
171#define NUM_CRS 16
172#define NUM_ACRS 16
173
174#define NUM_CR_WORDS 3
175
176#define FPR_SIZE 8
177#define FPC_SIZE 4
178#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */
179#define ACR_SIZE 4
180
181
182#define PTRACE_OLDSETOPTIONS 21
183
184#ifndef __ASSEMBLY__
185#include <linux/stddef.h>
186#include <linux/types.h>
187
188typedef union {
189 float f;
190 double d;
191 __u64 ui;
192 struct
193 {
194 __u32 hi;
195 __u32 lo;
196 } fp;
197} freg_t;
198
199typedef struct {
200 __u32 fpc;
201 __u32 pad;
202 freg_t fprs[NUM_FPRS];
203} s390_fp_regs;
204
205#define FPC_EXCEPTION_MASK 0xF8000000
206#define FPC_FLAGS_MASK 0x00F80000
207#define FPC_DXC_MASK 0x0000FF00
208#define FPC_RM_MASK 0x00000003
209
210/* this typedef defines how a Program Status Word looks like */
211typedef struct {
212 unsigned long mask;
213 unsigned long addr;
214} __attribute__ ((aligned(8))) psw_t;
215
216#ifndef __s390x__
217
218#define PSW_MASK_PER 0x40000000UL
219#define PSW_MASK_DAT 0x04000000UL
220#define PSW_MASK_IO 0x02000000UL
221#define PSW_MASK_EXT 0x01000000UL
222#define PSW_MASK_KEY 0x00F00000UL
223#define PSW_MASK_BASE 0x00080000UL /* always one */
224#define PSW_MASK_MCHECK 0x00040000UL
225#define PSW_MASK_WAIT 0x00020000UL
226#define PSW_MASK_PSTATE 0x00010000UL
227#define PSW_MASK_ASC 0x0000C000UL
228#define PSW_MASK_CC 0x00003000UL
229#define PSW_MASK_PM 0x00000F00UL
230#define PSW_MASK_RI 0x00000000UL
231#define PSW_MASK_EA 0x00000000UL
232#define PSW_MASK_BA 0x00000000UL
233
234#define PSW_MASK_USER 0x0000FF00UL
235
236#define PSW_ADDR_AMODE 0x80000000UL
237#define PSW_ADDR_INSN 0x7FFFFFFFUL
238
239#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
240
241#define PSW_ASC_PRIMARY 0x00000000UL
242#define PSW_ASC_ACCREG 0x00004000UL
243#define PSW_ASC_SECONDARY 0x00008000UL
244#define PSW_ASC_HOME 0x0000C000UL
245
246#else /* __s390x__ */
247
248#define PSW_MASK_PER 0x4000000000000000UL
249#define PSW_MASK_DAT 0x0400000000000000UL
250#define PSW_MASK_IO 0x0200000000000000UL
251#define PSW_MASK_EXT 0x0100000000000000UL
252#define PSW_MASK_BASE 0x0000000000000000UL
253#define PSW_MASK_KEY 0x00F0000000000000UL
254#define PSW_MASK_MCHECK 0x0004000000000000UL
255#define PSW_MASK_WAIT 0x0002000000000000UL
256#define PSW_MASK_PSTATE 0x0001000000000000UL
257#define PSW_MASK_ASC 0x0000C00000000000UL
258#define PSW_MASK_CC 0x0000300000000000UL
259#define PSW_MASK_PM 0x00000F0000000000UL
260#define PSW_MASK_RI 0x0000008000000000UL
261#define PSW_MASK_EA 0x0000000100000000UL
262#define PSW_MASK_BA 0x0000000080000000UL
263
264#define PSW_MASK_USER 0x0000FF0180000000UL
265
266#define PSW_ADDR_AMODE 0x0000000000000000UL
267#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
268
269#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
270
271#define PSW_ASC_PRIMARY 0x0000000000000000UL
272#define PSW_ASC_ACCREG 0x0000400000000000UL
273#define PSW_ASC_SECONDARY 0x0000800000000000UL
274#define PSW_ASC_HOME 0x0000C00000000000UL
275
276#endif /* __s390x__ */
277
278
279/*
280 * The s390_regs structure is used to define the elf_gregset_t.
281 */
282typedef struct {
283 psw_t psw;
284 unsigned long gprs[NUM_GPRS];
285 unsigned int acrs[NUM_ACRS];
286 unsigned long orig_gpr2;
287} s390_regs;
288
289/*
290 * The user_pt_regs structure exports the beginning of
291 * the in-kernel pt_regs structure to user space.
292 */
293typedef struct {
294 unsigned long args[1];
295 psw_t psw;
296 unsigned long gprs[NUM_GPRS];
297} user_pt_regs;
298
299/*
300 * Now for the user space program event recording (trace) definitions.
301 * The following structures are used only for the ptrace interface, don't
302 * touch or even look at it if you don't want to modify the user-space
303 * ptrace interface. In particular stay away from it for in-kernel PER.
304 */
305typedef struct {
306 unsigned long cr[NUM_CR_WORDS];
307} per_cr_words;
308
309#define PER_EM_MASK 0xE8000000UL
310
311typedef struct {
312#ifdef __s390x__
313 unsigned : 32;
314#endif /* __s390x__ */
315 unsigned em_branching : 1;
316 unsigned em_instruction_fetch : 1;
317 /*
318 * Switching on storage alteration automatically fixes
319 * the storage alteration event bit in the users std.
320 */
321 unsigned em_storage_alteration : 1;
322 unsigned em_gpr_alt_unused : 1;
323 unsigned em_store_real_address : 1;
324 unsigned : 3;
325 unsigned branch_addr_ctl : 1;
326 unsigned : 1;
327 unsigned storage_alt_space_ctl : 1;
328 unsigned : 21;
329 unsigned long starting_addr;
330 unsigned long ending_addr;
331} per_cr_bits;
332
333typedef struct {
334 unsigned short perc_atmid;
335 unsigned long address;
336 unsigned char access_id;
337} per_lowcore_words;
338
339typedef struct {
340 unsigned perc_branching : 1;
341 unsigned perc_instruction_fetch : 1;
342 unsigned perc_storage_alteration : 1;
343 unsigned perc_gpr_alt_unused : 1;
344 unsigned perc_store_real_address : 1;
345 unsigned : 3;
346 unsigned atmid_psw_bit_31 : 1;
347 unsigned atmid_validity_bit : 1;
348 unsigned atmid_psw_bit_32 : 1;
349 unsigned atmid_psw_bit_5 : 1;
350 unsigned atmid_psw_bit_16 : 1;
351 unsigned atmid_psw_bit_17 : 1;
352 unsigned si : 2;
353 unsigned long address;
354 unsigned : 4;
355 unsigned access_id : 4;
356} per_lowcore_bits;
357
358typedef struct {
359 union {
360 per_cr_words words;
361 per_cr_bits bits;
362 } control_regs;
363 /*
364 * The single_step and instruction_fetch bits are obsolete,
365 * the kernel always sets them to zero. To enable single
366 * stepping use ptrace(PTRACE_SINGLESTEP) instead.
367 */
368 unsigned single_step : 1;
369 unsigned instruction_fetch : 1;
370 unsigned : 30;
371 /*
372 * These addresses are copied into cr10 & cr11 if single
373 * stepping is switched off
374 */
375 unsigned long starting_addr;
376 unsigned long ending_addr;
377 union {
378 per_lowcore_words words;
379 per_lowcore_bits bits;
380 } lowcore;
381} per_struct;
382
383typedef struct {
384 unsigned int len;
385 unsigned long kernel_addr;
386 unsigned long process_addr;
387} ptrace_area;
388
389/*
390 * S/390 specific non posix ptrace requests. I chose unusual values so
391 * they are unlikely to clash with future ptrace definitions.
392 */
393#define PTRACE_PEEKUSR_AREA 0x5000
394#define PTRACE_POKEUSR_AREA 0x5001
395#define PTRACE_PEEKTEXT_AREA 0x5002
396#define PTRACE_PEEKDATA_AREA 0x5003
397#define PTRACE_POKETEXT_AREA 0x5004
398#define PTRACE_POKEDATA_AREA 0x5005
399#define PTRACE_GET_LAST_BREAK 0x5006
400#define PTRACE_PEEK_SYSTEM_CALL 0x5007
401#define PTRACE_POKE_SYSTEM_CALL 0x5008
402#define PTRACE_ENABLE_TE 0x5009
403#define PTRACE_DISABLE_TE 0x5010
404#define PTRACE_TE_ABORT_RAND 0x5011
405
406/*
407 * The numbers chosen here are somewhat arbitrary but absolutely MUST
408 * not overlap with any of the number assigned in <linux/ptrace.h>.
409 */
410#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
411
412/*
413 * PT_PROT definition is loosely based on hppa bsd definition in
414 * gdb/hppab-nat.c
415 */
416#define PTRACE_PROT 21
417
418typedef enum {
419 ptprot_set_access_watchpoint,
420 ptprot_set_write_watchpoint,
421 ptprot_disable_watchpoint
422} ptprot_flags;
423
424typedef struct {
425 unsigned long lowaddr;
426 unsigned long hiaddr;
427 ptprot_flags prot;
428} ptprot_area;
429
430/* Sequence of bytes for breakpoint illegal instruction. */
431#define S390_BREAKPOINT {0x0,0x1}
432#define S390_BREAKPOINT_U16 ((__u16)0x0001)
433#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
434#define S390_SYSCALL_SIZE 2
435
436/*
437 * The user_regs_struct defines the way the user registers are
438 * store on the stack for signal handling.
439 */
440struct user_regs_struct {
441 psw_t psw;
442 unsigned long gprs[NUM_GPRS];
443 unsigned int acrs[NUM_ACRS];
444 unsigned long orig_gpr2;
445 s390_fp_regs fp_regs;
446 /*
447 * These per registers are in here so that gdb can modify them
448 * itself as there is no "official" ptrace interface for hardware
449 * watchpoints. This is the way intel does it.
450 */
451 per_struct per_info;
452 unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
453};
454
455#endif /* __ASSEMBLY__ */
456
457#endif /* _UAPI_S390_PTRACE_H */
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h
index 7d8c3261a50d..1f5e26aae9fc 100644
--- a/tools/arch/x86/include/asm/atomic.h
+++ b/tools/arch/x86/include/asm/atomic.h
@@ -25,7 +25,7 @@
25 */ 25 */
26static inline int atomic_read(const atomic_t *v) 26static inline int atomic_read(const atomic_t *v)
27{ 27{
28 return ACCESS_ONCE((v)->counter); 28 return READ_ONCE((v)->counter);
29} 29}
30 30
31/** 31/**
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 793690fbda36..800104c8a3ed 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -13,173 +13,176 @@
13/* 13/*
14 * Defines x86 CPU feature bits 14 * Defines x86 CPU feature bits
15 */ 15 */
16#define NCAPINTS 18 /* N 32-bit words worth of info */ 16#define NCAPINTS 18 /* N 32-bit words worth of info */
17#define NBUGINTS 1 /* N 32-bit bug flags */ 17#define NBUGINTS 1 /* N 32-bit bug flags */
18 18
19/* 19/*
20 * Note: If the comment begins with a quoted string, that string is used 20 * Note: If the comment begins with a quoted string, that string is used
21 * in /proc/cpuinfo instead of the macro name. If the string is "", 21 * in /proc/cpuinfo instead of the macro name. If the string is "",
22 * this feature bit is not displayed in /proc/cpuinfo at all. 22 * this feature bit is not displayed in /proc/cpuinfo at all.
23 *
24 * When adding new features here that depend on other features,
25 * please update the table in kernel/cpu/cpuid-deps.c as well.
23 */ 26 */
24 27
25/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ 28/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
26#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ 29#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
27#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ 30#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
28#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ 31#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
29#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ 32#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
30#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ 33#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
31#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ 34#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
32#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ 35#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
33#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ 36#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
34#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ 37#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
35#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ 38#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
36#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ 39#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
37#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ 40#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
38#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ 41#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
39#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ 42#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
40#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ 43#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
41 /* (plus FCMOVcc, FCOMI with FPU) */ 44#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
42#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ 45#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
43#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ 46#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
44#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ 47#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
45#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ 48#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
46#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ 49#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
47#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ 50#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
48#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ 51#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
49#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ 52#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
50#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ 53#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
51#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ 54#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
52#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ 55#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
53#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ 56#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
54#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ 57#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
55#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ 58#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
56#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
57 59
58/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ 60/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
59/* Don't duplicate feature flags which are redundant with Intel! */ 61/* Don't duplicate feature flags which are redundant with Intel! */
60#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ 62#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
61#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ 63#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
62#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ 64#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
63#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ 65#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
64#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ 66#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
65#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ 67#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
66#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ 68#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
67#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ 69#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
68#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ 70#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
69#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ 71#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
70 72
71/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ 73/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
72#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ 74#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
73#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ 75#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
74#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ 76#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
75 77
76/* Other features, Linux-defined mapping, word 3 */ 78/* Other features, Linux-defined mapping, word 3 */
77/* This range is used for feature bits which conflict or are synthesized */ 79/* This range is used for feature bits which conflict or are synthesized */
78#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ 80#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
79#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ 81#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
80#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ 82#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
81#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ 83#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
82/* cpu types for specific tunings: */ 84
83#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ 85/* CPU types for specific tunings: */
84#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ 86#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
85#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ 87#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
86#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ 88#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
87#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ 89#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
88#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ 90#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
89#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ 91#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
90#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ 92#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
91#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ 93#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
92#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ 94#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
93#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ 95#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
94#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ 96#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
95#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ 97#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
96#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ 98#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
97#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ 99#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
98#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ 100#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
99#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ 101#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
100#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ 102#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
101#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ 103#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
102#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ 104#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
103#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ 105#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
104#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */ 106#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
105#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ 107#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
106#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ 108#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
107#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ 109#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
108#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ 110#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
109#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */ 111#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
112#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
110 113
111/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 114/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
112#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ 115#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
113#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ 116#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
114#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ 117#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
115#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ 118#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
116#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ 119#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
117#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ 120#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
118#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ 121#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
119#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ 122#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
120#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ 123#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
121#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ 124#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
122#define X86_FEATURE_CID ( 4*32+10) /* Context ID */ 125#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
123#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ 126#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
124#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ 127#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
125#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ 128#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
126#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ 129#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
127#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ 130#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
128#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ 131#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
129#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ 132#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
130#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ 133#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
131#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ 134#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
132#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ 135#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
133#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ 136#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
134#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ 137#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
135#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ 138#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
136#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ 139#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
137#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 140#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
138#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ 141#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
139#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ 142#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
140#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ 143#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
141#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ 144#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
142#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ 145#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
143 146
144/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 147/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
145#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ 148#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
146#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ 149#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
147#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ 150#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
148#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ 151#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
149#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ 152#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
150#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ 153#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
151#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ 154#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
152#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ 155#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
153#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ 156#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
154#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ 157#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
155 158
156/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ 159/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
157#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ 160#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
158#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ 161#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
159#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ 162#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
160#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ 163#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
161#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ 164#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
162#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ 165#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
163#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ 166#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
164#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ 167#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
165#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ 168#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
166#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ 169#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
167#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ 170#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
168#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ 171#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
169#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ 172#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
170#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ 173#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
171#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ 174#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
172#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ 175#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
173#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ 176#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
174#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ 177#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
175#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ 178#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
176#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ 179#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
177#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ 180#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
178#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ 181#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
179#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ 182#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
180#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */ 183#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
181#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */ 184#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
182#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ 185#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
183 186
184/* 187/*
185 * Auxiliary flags: Linux defined - For features scattered in various 188 * Auxiliary flags: Linux defined - For features scattered in various
@@ -187,146 +190,155 @@
187 * 190 *
188 * Reuse free bits when adding new feature flags! 191 * Reuse free bits when adding new feature flags!
189 */ 192 */
190#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */ 193#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
191#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */ 194#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
192#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ 195#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
193#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ 196#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
194#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ 197#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
195#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 198#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
196#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 199#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
197 200
198#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 201#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
199#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 202#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
200#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ 203#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
201 204
202#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 205#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
203#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 206#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
204#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ 207#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
205#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ 208#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
206 209
207#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 210#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
208 211
209/* Virtualization flags: Linux defined, word 8 */ 212/* Virtualization flags: Linux defined, word 8 */
210#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 213#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
211#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ 214#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
212#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ 215#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
213#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ 216#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
214#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ 217#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
215 218
216#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ 219#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
217#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 220#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
218 221
219 222
220/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 223/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
221#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 224#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
222#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ 225#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
223#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ 226#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
224#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ 227#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
225#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ 228#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
226#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ 229#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
227#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ 230#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
228#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ 231#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
229#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ 232#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
230#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ 233#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
231#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ 234#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
232#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ 235#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
233#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */ 236#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
234#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ 237#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
235#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */ 238#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
236#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ 239#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
237#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ 240#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
238#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ 241#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
239#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */ 242#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
240#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ 243#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
241#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ 244#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
242#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ 245#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
243#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ 246#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
244#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ 247#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
245#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ 248#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
246#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */ 249#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
247#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */ 250#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
248 251
249/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ 252/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
250#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ 253#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
251#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ 254#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
252#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ 255#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
253#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ 256#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
254 257
255/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ 258/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
256#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ 259#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
257 260
258/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ 261/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
259#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ 262#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
260#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */ 263#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
261#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */ 264#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
262 265
263/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ 266/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
264#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ 267#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
265#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ 268#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
269#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
266 270
267/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ 271/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
268#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ 272#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
269#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ 273#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
270#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ 274#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
271#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ 275#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
272#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ 276#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
273#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ 277#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
274#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ 278#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
275#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ 279#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
276#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ 280#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
277#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ 281#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
278 282
279/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ 283/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
280#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ 284#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
281#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ 285#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
282#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ 286#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
283#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ 287#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
284#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ 288#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
285#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ 289#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
286#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ 290#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
287#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ 291#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
288#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ 292#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
289#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ 293#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
290#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ 294#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
291#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ 295#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
292#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ 296#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
293 297
294/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ 298/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
295#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ 299#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
296#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ 300#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
297#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ 301#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
298#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 302#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
299#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 303#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
300#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 304#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
305#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
306#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
307#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
308#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
309#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
310#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
311#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
301 312
302/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ 313/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
303#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ 314#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
304#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */ 315#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
305#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */ 316#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
306 317
307/* 318/*
308 * BUG word(s) 319 * BUG word(s)
309 */ 320 */
310#define X86_BUG(x) (NCAPINTS*32 + (x)) 321#define X86_BUG(x) (NCAPINTS*32 + (x))
311 322
312#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ 323#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
313#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ 324#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
314#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ 325#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
315#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ 326#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
316#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ 327#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
317#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ 328#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
318#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ 329#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
319#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ 330#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
320#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ 331#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
321#ifdef CONFIG_X86_32 332#ifdef CONFIG_X86_32
322/* 333/*
323 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional 334 * 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
324 * to avoid confusion. 335 * to avoid confusion.
325 */ 336 */
326#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */ 337#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
327#endif 338#endif
328#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */ 339#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
329#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ 340#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
330#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ 341#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
331#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ 342#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
343
332#endif /* _ASM_X86_CPUFEATURES_H */ 344#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index c10c9128f54e..14d6d5007314 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) 16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
17#endif 17#endif
18 18
19#ifdef CONFIG_X86_INTEL_UMIP
20# define DISABLE_UMIP 0
21#else
22# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
23#endif
24
19#ifdef CONFIG_X86_64 25#ifdef CONFIG_X86_64
20# define DISABLE_VME (1<<(X86_FEATURE_VME & 31)) 26# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
21# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) 27# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
@@ -63,7 +69,7 @@
63#define DISABLED_MASK13 0 69#define DISABLED_MASK13 0
64#define DISABLED_MASK14 0 70#define DISABLED_MASK14 0
65#define DISABLED_MASK15 0 71#define DISABLED_MASK15 0
66#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) 72#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
67#define DISABLED_MASK17 0 73#define DISABLED_MASK17 0
68#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) 74#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
69 75
diff --git a/tools/net/Makefile b/tools/bpf/Makefile
index 5830670feae1..07a6697466ef 100644
--- a/tools/net/Makefile
+++ b/tools/bpf/Makefile
@@ -4,6 +4,7 @@ prefix = /usr
4CC = gcc 4CC = gcc
5LEX = flex 5LEX = flex
6YACC = bison 6YACC = bison
7MAKE = make
7 8
8CFLAGS += -Wall -O2 9CFLAGS += -Wall -O2
9CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include 10CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
@@ -14,7 +15,7 @@ CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
14%.lex.c: %.l 15%.lex.c: %.l
15 $(LEX) -o $@ $< 16 $(LEX) -o $@ $<
16 17
17all : bpf_jit_disasm bpf_dbg bpf_asm 18all: bpf_jit_disasm bpf_dbg bpf_asm bpftool
18 19
19bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm' 20bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
20bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl 21bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
@@ -27,10 +28,21 @@ bpf_asm : LDLIBS =
27bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o 28bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
28bpf_exp.lex.o : bpf_exp.yacc.c 29bpf_exp.lex.o : bpf_exp.yacc.c
29 30
30clean : 31clean: bpftool_clean
31 rm -rf *.o bpf_jit_disasm bpf_dbg bpf_asm bpf_exp.yacc.* bpf_exp.lex.* 32 rm -rf *.o bpf_jit_disasm bpf_dbg bpf_asm bpf_exp.yacc.* bpf_exp.lex.*
32 33
33install : 34install: bpftool_install
34 install bpf_jit_disasm $(prefix)/bin/bpf_jit_disasm 35 install bpf_jit_disasm $(prefix)/bin/bpf_jit_disasm
35 install bpf_dbg $(prefix)/bin/bpf_dbg 36 install bpf_dbg $(prefix)/bin/bpf_dbg
36 install bpf_asm $(prefix)/bin/bpf_asm 37 install bpf_asm $(prefix)/bin/bpf_asm
38
39bpftool:
40 $(MAKE) -C bpftool
41
42bpftool_install:
43 $(MAKE) -C bpftool install
44
45bpftool_clean:
46 $(MAKE) -C bpftool clean
47
48.PHONY: bpftool FORCE
diff --git a/tools/net/bpf_asm.c b/tools/bpf/bpf_asm.c
index c15aef097b04..c15aef097b04 100644
--- a/tools/net/bpf_asm.c
+++ b/tools/bpf/bpf_asm.c
diff --git a/tools/net/bpf_dbg.c b/tools/bpf/bpf_dbg.c
index 4f254bcc4423..4f254bcc4423 100644
--- a/tools/net/bpf_dbg.c
+++ b/tools/bpf/bpf_dbg.c
diff --git a/tools/net/bpf_exp.l b/tools/bpf/bpf_exp.l
index bd83149e7be0..bd83149e7be0 100644
--- a/tools/net/bpf_exp.l
+++ b/tools/bpf/bpf_exp.l
diff --git a/tools/net/bpf_exp.y b/tools/bpf/bpf_exp.y
index 56ba1de50784..56ba1de50784 100644
--- a/tools/net/bpf_exp.y
+++ b/tools/bpf/bpf_exp.y
diff --git a/tools/net/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index 422d9abd666a..75bf526a0168 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -27,6 +27,7 @@
27#include <sys/klog.h> 27#include <sys/klog.h>
28#include <sys/types.h> 28#include <sys/types.h>
29#include <sys/stat.h> 29#include <sys/stat.h>
30#include <limits.h>
30 31
31#define CMD_ACTION_SIZE_BUFFER 10 32#define CMD_ACTION_SIZE_BUFFER 10
32#define CMD_ACTION_READ_ALL 3 33#define CMD_ACTION_READ_ALL 3
@@ -51,7 +52,7 @@ static void get_exec_path(char *tpath, size_t size)
51static void get_asm_insns(uint8_t *image, size_t len, int opcodes) 52static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
52{ 53{
53 int count, i, pc = 0; 54 int count, i, pc = 0;
54 char tpath[256]; 55 char tpath[PATH_MAX];
55 struct disassemble_info info; 56 struct disassemble_info info;
56 disassembler_ftype disassemble; 57 disassembler_ftype disassemble;
57 bfd *bfdf; 58 bfd *bfdf;
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
new file mode 100644
index 000000000000..37292bb5ce60
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -0,0 +1,34 @@
1include ../../../scripts/Makefile.include
2include ../../../scripts/utilities.mak
3
4INSTALL ?= install
5RM ?= rm -f
6
7# Make the path relative to DESTDIR, not prefix
8ifndef DESTDIR
9prefix ?= /usr/local
10endif
11mandir ?= $(prefix)/share/man
12man8dir = $(mandir)/man8
13
14MAN8_RST = $(wildcard *.rst)
15
16_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
17DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
18
19man: man8
20man8: $(DOC_MAN8)
21
22$(OUTPUT)%.8: %.rst
23 rst2man $< > $@
24
25clean:
26 $(call QUIET_CLEAN, Documentation) $(RM) $(DOC_MAN8)
27
28install: man
29 $(call QUIET_INSTALL, Documentation-man) \
30 $(INSTALL) -d -m 755 $(DESTDIR)$(man8dir); \
31 $(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir);
32
33.PHONY: man man8 clean install
34.DEFAULT_GOAL := man
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
new file mode 100644
index 000000000000..9f51a268eb06
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -0,0 +1,131 @@
1================
2bpftool-map
3================
4-------------------------------------------------------------------------------
5tool for inspection and simple manipulation of eBPF maps
6-------------------------------------------------------------------------------
7
8:Manual section: 8
9
10SYNOPSIS
11========
12
13 **bpftool** [*OPTIONS*] **map** *COMMAND*
14
15 *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
16
17 *COMMANDS* :=
18 { **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
19 | **pin** | **help** }
20
21MAP COMMANDS
22=============
23
24| **bpftool** **map show** [*MAP*]
25| **bpftool** **map dump** *MAP*
26| **bpftool** **map update** *MAP* **key** *BYTES* **value** *VALUE* [*UPDATE_FLAGS*]
27| **bpftool** **map lookup** *MAP* **key** *BYTES*
28| **bpftool** **map getnext** *MAP* [**key** *BYTES*]
29| **bpftool** **map delete** *MAP* **key** *BYTES*
30| **bpftool** **map pin** *MAP* *FILE*
31| **bpftool** **map help**
32|
33| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
34| *VALUE* := { *BYTES* | *MAP* | *PROGRAM* }
35| *UPDATE_FLAGS* := { **any** | **exist** | **noexist** }
36
37DESCRIPTION
38===========
39 **bpftool map show** [*MAP*]
40 Show information about loaded maps. If *MAP* is specified
41 show information only about given map, otherwise list all
42 maps currently loaded on the system.
43
44 Output will start with map ID followed by map type and
45 zero or more named attributes (depending on kernel version).
46
47 **bpftool map dump** *MAP*
48 Dump all entries in a given *MAP*.
49
50 **bpftool map update** *MAP* **key** *BYTES* **value** *VALUE* [*UPDATE_FLAGS*]
51 Update map entry for a given *KEY*.
52
53 *UPDATE_FLAGS* can be one of: **any** update existing entry
54 or add if doesn't exit; **exist** update only if entry already
55 exists; **noexist** update only if entry doesn't exist.
56
57 **bpftool map lookup** *MAP* **key** *BYTES*
58 Lookup **key** in the map.
59
60 **bpftool map getnext** *MAP* [**key** *BYTES*]
61 Get next key. If *key* is not specified, get first key.
62
63 **bpftool map delete** *MAP* **key** *BYTES*
64 Remove entry from the map.
65
66 **bpftool map pin** *MAP* *FILE*
67 Pin map *MAP* as *FILE*.
68
69 Note: *FILE* must be located in *bpffs* mount.
70
71 **bpftool map help**
72 Print short help message.
73
74OPTIONS
75=======
76 -h, --help
77 Print short generic help message (similar to **bpftool help**).
78
79 -v, --version
80 Print version number (similar to **bpftool version**).
81
82 -j, --json
83 Generate JSON output. For commands that cannot produce JSON, this
84 option has no effect.
85
86 -p, --pretty
87 Generate human-readable JSON output. Implies **-j**.
88
89 -f, --bpffs
90 Show file names of pinned maps.
91
92EXAMPLES
93========
94**# bpftool map show**
95::
96
97 10: hash name some_map flags 0x0
98 key 4B value 8B max_entries 2048 memlock 167936B
99
100**# bpftool map update id 10 key 13 00 07 00 value 02 00 00 00 01 02 03 04**
101
102**# bpftool map lookup id 10 key 0 1 2 3**
103
104::
105
106 key: 00 01 02 03 value: 00 01 02 03 04 05 06 07
107
108
109**# bpftool map dump id 10**
110::
111
112 key: 00 01 02 03 value: 00 01 02 03 04 05 06 07
113 key: 0d 00 07 00 value: 02 00 00 00 01 02 03 04
114 Found 2 elements
115
116**# bpftool map getnext id 10 key 0 1 2 3**
117::
118
119 key:
120 00 01 02 03
121 next key:
122 0d 00 07 00
123
124|
125| **# mount -t bpf none /sys/fs/bpf/**
126| **# bpftool map pin id 10 /sys/fs/bpf/map**
127| **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00**
128
129SEE ALSO
130========
131 **bpftool**\ (8), **bpftool-prog**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
new file mode 100644
index 000000000000..36e8d1c3c40d
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -0,0 +1,150 @@
1================
2bpftool-prog
3================
4-------------------------------------------------------------------------------
5tool for inspection and simple manipulation of eBPF progs
6-------------------------------------------------------------------------------
7
8:Manual section: 8
9
10SYNOPSIS
11========
12
13 **bpftool** [*OPTIONS*] **prog** *COMMAND*
14
15 *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
16
17 *COMMANDS* :=
18 { **show** | **dump xlated** | **dump jited** | **pin** | **help** }
19
20MAP COMMANDS
21=============
22
23| **bpftool** **prog show** [*PROG*]
24| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}]
25| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
26| **bpftool** **prog pin** *PROG* *FILE*
27| **bpftool** **prog help**
28|
29| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
30
31DESCRIPTION
32===========
33 **bpftool prog show** [*PROG*]
34 Show information about loaded programs. If *PROG* is
35 specified show information only about given program, otherwise
36 list all programs currently loaded on the system.
37
38 Output will start with program ID followed by program type and
39 zero or more named attributes (depending on kernel version).
40
41 **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** }]
42 Dump eBPF instructions of the program from the kernel.
43 If *FILE* is specified image will be written to a file,
44 otherwise it will be disassembled and printed to stdout.
45
46 **opcodes** controls if raw opcodes will be printed.
47
48 **bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** }]
49 Dump jited image (host machine code) of the program.
50 If *FILE* is specified image will be written to a file,
51 otherwise it will be disassembled and printed to stdout.
52
53 **opcodes** controls if raw opcodes will be printed.
54
55 **bpftool prog pin** *PROG* *FILE*
56 Pin program *PROG* as *FILE*.
57
58 Note: *FILE* must be located in *bpffs* mount.
59
60 **bpftool prog help**
61 Print short help message.
62
63OPTIONS
64=======
65 -h, --help
66 Print short generic help message (similar to **bpftool help**).
67
68 -v, --version
69 Print version number (similar to **bpftool version**).
70
71 -j, --json
72 Generate JSON output. For commands that cannot produce JSON, this
73 option has no effect.
74
75 -p, --pretty
76 Generate human-readable JSON output. Implies **-j**.
77
78 -f, --bpffs
79 Show file names of pinned programs.
80
81EXAMPLES
82========
83**# bpftool prog show**
84::
85
86 10: xdp name some_prog tag 005a3d2123620c8b
87 loaded_at Sep 29/20:11 uid 0
88 xlated 528B jited 370B memlock 4096B map_ids 10
89
90**# bpftool --json --pretty prog show**
91
92::
93
94 {
95 "programs": [{
96 "id": 10,
97 "type": "xdp",
98 "tag": "005a3d2123620c8b",
99 "loaded_at": "Sep 29/20:11",
100 "uid": 0,
101 "bytes_xlated": 528,
102 "jited": true,
103 "bytes_jited": 370,
104 "bytes_memlock": 4096,
105 "map_ids": [10
106 ]
107 }
108 ]
109 }
110
111|
112| **# bpftool prog dump xlated id 10 file /tmp/t**
113| **# ls -l /tmp/t**
114| -rw------- 1 root root 560 Jul 22 01:42 /tmp/t
115
116**# bpftool prog dum jited tag 005a3d2123620c8b**
117
118::
119
120 push %rbp
121 mov %rsp,%rbp
122 sub $0x228,%rsp
123 sub $0x28,%rbp
124 mov %rbx,0x0(%rbp)
125
126|
127| **# mount -t bpf none /sys/fs/bpf/**
128| **# bpftool prog pin id 10 /sys/fs/bpf/prog**
129| **# ls -l /sys/fs/bpf/**
130| -rw------- 1 root root 0 Jul 22 01:43 prog
131
132**# bpftool prog dum jited pinned /sys/fs/bpf/prog opcodes**
133
134::
135
136 push %rbp
137 55
138 mov %rsp,%rbp
139 48 89 e5
140 sub $0x228,%rsp
141 48 81 ec 28 02 00 00
142 sub $0x28,%rbp
143 48 83 ed 28
144 mov %rbx,0x0(%rbp)
145 48 89 5d 00
146
147
148SEE ALSO
149========
150 **bpftool**\ (8), **bpftool-map**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
new file mode 100644
index 000000000000..926c03d5a8da
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -0,0 +1,56 @@
1================
2BPFTOOL
3================
4-------------------------------------------------------------------------------
5tool for inspection and simple manipulation of eBPF programs and maps
6-------------------------------------------------------------------------------
7
8:Manual section: 8
9
10SYNOPSIS
11========
12
13 **bpftool** [*OPTIONS*] *OBJECT* { *COMMAND* | **help** }
14
15 **bpftool** **batch file** *FILE*
16
17 **bpftool** **version**
18
19 *OBJECT* := { **map** | **program** }
20
21 *OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
22 | { **-j** | **--json** } [{ **-p** | **--pretty** }] }
23
24 *MAP-COMMANDS* :=
25 { **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
26 | **pin** | **help** }
27
28 *PROG-COMMANDS* := { **show** | **dump jited** | **dump xlated** | **pin**
29 | **help** }
30
31DESCRIPTION
32===========
33 *bpftool* allows for inspection and simple modification of BPF objects
34 on the system.
35
36 Note that format of the output of all tools is not guaranteed to be
37 stable and should not be depended upon.
38
39OPTIONS
40=======
41 -h, --help
42 Print short help message (similar to **bpftool help**).
43
44 -v, --version
45 Print version number (similar to **bpftool version**).
46
47 -j, --json
48 Generate JSON output. For commands that cannot produce JSON, this
49 option has no effect.
50
51 -p, --pretty
52 Generate human-readable JSON output. Implies **-j**.
53
54SEE ALSO
55========
56 **bpftool-map**\ (8), **bpftool-prog**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
new file mode 100644
index 000000000000..ec3052c0b004
--- /dev/null
+++ b/tools/bpf/bpftool/Makefile
@@ -0,0 +1,93 @@
1include ../../scripts/Makefile.include
2
3include ../../scripts/utilities.mak
4
5ifeq ($(srctree),)
6srctree := $(patsubst %/,%,$(dir $(CURDIR)))
7srctree := $(patsubst %/,%,$(dir $(srctree)))
8srctree := $(patsubst %/,%,$(dir $(srctree)))
9#$(info Determined 'srctree' to be $(srctree))
10endif
11
12ifneq ($(objtree),)
13#$(info Determined 'objtree' to be $(objtree))
14endif
15
16ifneq ($(OUTPUT),)
17#$(info Determined 'OUTPUT' to be $(OUTPUT))
18# Adding $(OUTPUT) as a directory to look for source files,
19# because use generated output files as sources dependency
20# for flex/bison parsers.
21VPATH += $(OUTPUT)
22export VPATH
23endif
24
25ifeq ($(V),1)
26 Q =
27else
28 Q = @
29endif
30
31BPF_DIR = $(srctree)/tools/lib/bpf/
32
33ifneq ($(OUTPUT),)
34 BPF_PATH=$(OUTPUT)
35else
36 BPF_PATH=$(BPF_DIR)
37endif
38
39LIBBPF = $(BPF_PATH)libbpf.a
40
41$(LIBBPF): FORCE
42 $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
43
44$(LIBBPF)-clean:
45 $(call QUIET_CLEAN, libbpf)
46 $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
47
48prefix = /usr/local
49bash_compdir ?= /usr/share/bash-completion/completions
50
51CC = gcc
52
53CFLAGS += -O2
54CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow
55CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
56LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
57
58include $(wildcard *.d)
59
60all: $(OUTPUT)bpftool
61
62SRCS=$(wildcard *.c)
63OBJS=$(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
64
65$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
66 $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
67
68$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
69 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ $(LIBS)
70
71$(OUTPUT)%.o: %.c
72 $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
73
74clean: $(LIBBPF)-clean
75 $(call QUIET_CLEAN, bpftool)
76 $(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
77
78install:
79 install -m 0755 -d $(prefix)/sbin
80 install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
81 install -m 0755 -d $(bash_compdir)
82 install -m 0644 bash-completion/bpftool $(bash_compdir)
83
84doc:
85 $(Q)$(MAKE) -C Documentation/
86
87doc-install:
88 $(Q)$(MAKE) -C Documentation/ install
89
90FORCE:
91
92.PHONY: all clean FORCE install doc doc-install
93.DEFAULT_GOAL := all
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
new file mode 100644
index 000000000000..7febee05c8e7
--- /dev/null
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -0,0 +1,354 @@
1# bpftool(8) bash completion -*- shell-script -*-
2#
3# Copyright (C) 2017 Netronome Systems, Inc.
4#
5# This software is dual licensed under the GNU General License
6# Version 2, June 1991 as shown in the file COPYING in the top-level
7# directory of this source tree or the BSD 2-Clause License provided
8# below. You have the option to license this software under the
9# complete terms of either license.
10#
11# The BSD 2-Clause License:
12#
13# Redistribution and use in source and binary forms, with or
14# without modification, are permitted provided that the following
15# conditions are met:
16#
17# 1. Redistributions of source code must retain the above
18# copyright notice, this list of conditions and the following
19# disclaimer.
20#
21# 2. Redistributions in binary form must reproduce the above
22# copyright notice, this list of conditions and the following
23# disclaimer in the documentation and/or other materials
24# provided with the distribution.
25#
26# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33# SOFTWARE.
34#
35# Author: Quentin Monnet <quentin.monnet@netronome.com>
36
37# Takes a list of words in argument; each one of them is added to COMPREPLY if
38# it is not already present on the command line. Returns no value.
39_bpftool_once_attr()
40{
41 local w idx found
42 for w in $*; do
43 found=0
44 for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
45 if [[ $w == ${words[idx]} ]]; then
46 found=1
47 break
48 fi
49 done
50 [[ $found -eq 0 ]] && \
51 COMPREPLY+=( $( compgen -W "$w" -- "$cur" ) )
52 done
53}
54
55# Takes a list of words in argument; adds them all to COMPREPLY if none of them
56# is already present on the command line. Returns no value.
57_bpftool_one_of_list()
58{
59 local w idx
60 for w in $*; do
61 for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
62 [[ $w == ${words[idx]} ]] && return 1
63 done
64 done
65 COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) )
66}
67
68_bpftool_get_map_ids()
69{
70 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
71 command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
72}
73
74_bpftool_get_prog_ids()
75{
76 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
77 command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
78}
79
80_bpftool_get_prog_tags()
81{
82 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
83 command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) )
84}
85
86# For bpftool map update: retrieve type of the map to update.
87_bpftool_map_update_map_type()
88{
89 local keyword ref
90 for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
91 if [[ ${words[$((idx-2))]} == "update" ]]; then
92 keyword=${words[$((idx-1))]}
93 ref=${words[$((idx))]}
94 fi
95 done
96 [[ -z $ref ]] && return 0
97
98 local type
99 type=$(bpftool -jp map show $keyword $ref | \
100 command sed -n 's/.*"type": "\(.*\)",$/\1/p')
101 printf $type
102}
103
104_bpftool_map_update_get_id()
105{
106 # Is it the map to update, or a map to insert into the map to update?
107 # Search for "value" keyword.
108 local idx value
109 for (( idx=7; idx < ${#words[@]}-1; idx++ )); do
110 if [[ ${words[idx]} == "value" ]]; then
111 value=1
112 break
113 fi
114 done
115 [[ $value -eq 0 ]] && _bpftool_get_map_ids && return 0
116
117 # Id to complete is for a value. It can be either prog id or map id. This
118 # depends on the type of the map to update.
119 local type=$(_bpftool_map_update_map_type)
120 case $type in
121 array_of_maps|hash_of_maps)
122 _bpftool_get_map_ids
123 return 0
124 ;;
125 prog_array)
126 _bpftool_get_prog_ids
127 return 0
128 ;;
129 *)
130 return 0
131 ;;
132 esac
133}
134
135_bpftool()
136{
137 local cur prev words objword
138 _init_completion || return
139
140 # Deal with simplest keywords
141 case $prev in
142 help|key|opcodes)
143 return 0
144 ;;
145 tag)
146 _bpftool_get_prog_tags
147 return 0
148 ;;
149 file|pinned)
150 _filedir
151 return 0
152 ;;
153 batch)
154 COMPREPLY=( $( compgen -W 'file' -- "$cur" ) )
155 return 0
156 ;;
157 esac
158
159 # Search for object and command
160 local object command cmdword
161 for (( cmdword=1; cmdword < ${#words[@]}-1; cmdword++ )); do
162 [[ -n $object ]] && command=${words[cmdword]} && break
163 [[ ${words[cmdword]} != -* ]] && object=${words[cmdword]}
164 done
165
166 if [[ -z $object ]]; then
167 case $cur in
168 -*)
169 local c='--version --json --pretty'
170 COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
171 return 0
172 ;;
173 *)
174 COMPREPLY=( $( compgen -W "$( bpftool help 2>&1 | \
175 command sed \
176 -e '/OBJECT := /!d' \
177 -e 's/.*{//' \
178 -e 's/}.*//' \
179 -e 's/|//g' )" -- "$cur" ) )
180 COMPREPLY+=( $( compgen -W 'batch help' -- "$cur" ) )
181 return 0
182 ;;
183 esac
184 fi
185
186 [[ $command == help ]] && return 0
187
188 # Completion depends on object and command in use
189 case $object in
190 prog)
191 case $prev in
192 id)
193 _bpftool_get_prog_ids
194 return 0
195 ;;
196 esac
197
198 local PROG_TYPE='id pinned tag'
199 case $command in
200 show)
201 [[ $prev != "$command" ]] && return 0
202 COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
203 return 0
204 ;;
205 dump)
206 case $prev in
207 $command)
208 COMPREPLY+=( $( compgen -W "xlated jited" -- \
209 "$cur" ) )
210 return 0
211 ;;
212 xlated|jited)
213 COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
214 "$cur" ) )
215 return 0
216 ;;
217 *)
218 _bpftool_once_attr 'file'
219 COMPREPLY+=( $( compgen -W 'opcodes' -- \
220 "$cur" ) )
221 return 0
222 ;;
223 esac
224 ;;
225 pin)
226 if [[ $prev == "$command" ]]; then
227 COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
228 else
229 _filedir
230 fi
231 return 0
232 ;;
233 *)
234 [[ $prev == $object ]] && \
235 COMPREPLY=( $( compgen -W 'dump help pin show' -- \
236 "$cur" ) )
237 ;;
238 esac
239 ;;
240 map)
241 local MAP_TYPE='id pinned'
242 case $command in
243 show|dump)
244 case $prev in
245 $command)
246 COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
247 return 0
248 ;;
249 id)
250 _bpftool_get_map_ids
251 return 0
252 ;;
253 *)
254 return 0
255 ;;
256 esac
257 ;;
258 lookup|getnext|delete)
259 case $prev in
260 $command)
261 COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
262 return 0
263 ;;
264 id)
265 _bpftool_get_map_ids
266 return 0
267 ;;
268 key)
269 return 0
270 ;;
271 *)
272 _bpftool_once_attr 'key'
273 return 0
274 ;;
275 esac
276 ;;
277 update)
278 case $prev in
279 $command)
280 COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
281 return 0
282 ;;
283 id)
284 _bpftool_map_update_get_id
285 return 0
286 ;;
287 key)
288 return 0
289 ;;
290 value)
291 # We can have bytes, or references to a prog or a
292 # map, depending on the type of the map to update.
293 case $(_bpftool_map_update_map_type) in
294 array_of_maps|hash_of_maps)
295 local MAP_TYPE='id pinned'
296 COMPREPLY+=( $( compgen -W "$MAP_TYPE" \
297 -- "$cur" ) )
298 return 0
299 ;;
300 prog_array)
301 local PROG_TYPE='id pinned tag'
302 COMPREPLY+=( $( compgen -W "$PROG_TYPE" \
303 -- "$cur" ) )
304 return 0
305 ;;
306 *)
307 return 0
308 ;;
309 esac
310 return 0
311 ;;
312 *)
313 _bpftool_once_attr 'key'
314 local UPDATE_FLAGS='any exist noexist'
315 for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
316 if [[ ${words[idx]} == 'value' ]]; then
317 # 'value' is present, but is not the last
318 # word i.e. we can now have UPDATE_FLAGS.
319 _bpftool_one_of_list "$UPDATE_FLAGS"
320 return 0
321 fi
322 done
323 for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
324 if [[ ${words[idx]} == 'key' ]]; then
325 # 'key' is present, but is not the last
326 # word i.e. we can now have 'value'.
327 _bpftool_once_attr 'value'
328 return 0
329 fi
330 done
331 return 0
332 ;;
333 esac
334 ;;
335 pin)
336 if [[ $prev == "$command" ]]; then
337 COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
338 else
339 _filedir
340 fi
341 return 0
342 ;;
343 *)
344 [[ $prev == $object ]] && \
345 COMPREPLY=( $( compgen -W 'delete dump getnext help \
346 lookup pin show update' -- "$cur" ) )
347 ;;
348 esac
349 ;;
350 esac
351} &&
352complete -F _bpftool bpftool
353
354# ex: ts=4 sw=4 et filetype=sh
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
new file mode 100644
index 000000000000..2bd3b280e6dd
--- /dev/null
+++ b/tools/bpf/bpftool/common.c
@@ -0,0 +1,405 @@
1/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/* Author: Jakub Kicinski <kubakici@wp.pl> */
35
36#include <errno.h>
37#include <fts.h>
38#include <libgen.h>
39#include <mntent.h>
40#include <stdbool.h>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <unistd.h>
45#include <linux/limits.h>
46#include <linux/magic.h>
47#include <sys/mount.h>
48#include <sys/types.h>
49#include <sys/vfs.h>
50
51#include <bpf.h>
52
53#include "main.h"
54
55void p_err(const char *fmt, ...)
56{
57 va_list ap;
58
59 va_start(ap, fmt);
60 if (json_output) {
61 jsonw_start_object(json_wtr);
62 jsonw_name(json_wtr, "error");
63 jsonw_vprintf_enquote(json_wtr, fmt, ap);
64 jsonw_end_object(json_wtr);
65 } else {
66 fprintf(stderr, "Error: ");
67 vfprintf(stderr, fmt, ap);
68 fprintf(stderr, "\n");
69 }
70 va_end(ap);
71}
72
73void p_info(const char *fmt, ...)
74{
75 va_list ap;
76
77 if (json_output)
78 return;
79
80 va_start(ap, fmt);
81 vfprintf(stderr, fmt, ap);
82 fprintf(stderr, "\n");
83 va_end(ap);
84}
85
86static bool is_bpffs(char *path)
87{
88 struct statfs st_fs;
89
90 if (statfs(path, &st_fs) < 0)
91 return false;
92
93 return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
94}
95
96static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
97{
98 bool bind_done = false;
99
100 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
101 if (errno != EINVAL || bind_done) {
102 snprintf(buff, bufflen,
103 "mount --make-private %s failed: %s",
104 target, strerror(errno));
105 return -1;
106 }
107
108 if (mount(target, target, "none", MS_BIND, NULL)) {
109 snprintf(buff, bufflen,
110 "mount --bind %s %s failed: %s",
111 target, target, strerror(errno));
112 return -1;
113 }
114
115 bind_done = true;
116 }
117
118 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
119 snprintf(buff, bufflen, "mount -t bpf bpf %s failed: %s",
120 target, strerror(errno));
121 return -1;
122 }
123
124 return 0;
125}
126
127int open_obj_pinned(char *path)
128{
129 int fd;
130
131 fd = bpf_obj_get(path);
132 if (fd < 0) {
133 p_err("bpf obj get (%s): %s", path,
134 errno == EACCES && !is_bpffs(dirname(path)) ?
135 "directory not in bpf file system (bpffs)" :
136 strerror(errno));
137 return -1;
138 }
139
140 return fd;
141}
142
143int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
144{
145 enum bpf_obj_type type;
146 int fd;
147
148 fd = open_obj_pinned(path);
149 if (fd < 0)
150 return -1;
151
152 type = get_fd_type(fd);
153 if (type < 0) {
154 close(fd);
155 return type;
156 }
157 if (type != exp_type) {
158 p_err("incorrect object type: %s", get_fd_type_name(type));
159 close(fd);
160 return -1;
161 }
162
163 return fd;
164}
165
166int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
167{
168 char err_str[ERR_MAX_LEN];
169 unsigned int id;
170 char *endptr;
171 char *file;
172 char *dir;
173 int err;
174 int fd;
175
176 if (!is_prefix(*argv, "id")) {
177 p_err("expected 'id' got %s", *argv);
178 return -1;
179 }
180 NEXT_ARG();
181
182 id = strtoul(*argv, &endptr, 0);
183 if (*endptr) {
184 p_err("can't parse %s as ID", *argv);
185 return -1;
186 }
187 NEXT_ARG();
188
189 if (argc != 1)
190 usage();
191
192 fd = get_fd_by_id(id);
193 if (fd < 0) {
194 p_err("can't get prog by id (%u): %s", id, strerror(errno));
195 return -1;
196 }
197
198 err = bpf_obj_pin(fd, *argv);
199 if (!err)
200 goto out_close;
201
202 file = malloc(strlen(*argv) + 1);
203 strcpy(file, *argv);
204 dir = dirname(file);
205
206 if (errno != EPERM || is_bpffs(dir)) {
207 p_err("can't pin the object (%s): %s", *argv, strerror(errno));
208 goto out_free;
209 }
210
211 /* Attempt to mount bpffs, then retry pinning. */
212 err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
213 if (!err) {
214 err = bpf_obj_pin(fd, *argv);
215 if (err)
216 p_err("can't pin the object (%s): %s", *argv,
217 strerror(errno));
218 } else {
219 err_str[ERR_MAX_LEN - 1] = '\0';
220 p_err("can't mount BPF file system to pin the object (%s): %s",
221 *argv, err_str);
222 }
223
224out_free:
225 free(file);
226out_close:
227 close(fd);
228 return err;
229}
230
231const char *get_fd_type_name(enum bpf_obj_type type)
232{
233 static const char * const names[] = {
234 [BPF_OBJ_UNKNOWN] = "unknown",
235 [BPF_OBJ_PROG] = "prog",
236 [BPF_OBJ_MAP] = "map",
237 };
238
239 if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
240 return names[BPF_OBJ_UNKNOWN];
241
242 return names[type];
243}
244
245int get_fd_type(int fd)
246{
247 char path[PATH_MAX];
248 char buf[512];
249 ssize_t n;
250
251 snprintf(path, sizeof(path), "/proc/%d/fd/%d", getpid(), fd);
252
253 n = readlink(path, buf, sizeof(buf));
254 if (n < 0) {
255 p_err("can't read link type: %s", strerror(errno));
256 return -1;
257 }
258 if (n == sizeof(path)) {
259 p_err("can't read link type: path too long!");
260 return -1;
261 }
262
263 if (strstr(buf, "bpf-map"))
264 return BPF_OBJ_MAP;
265 else if (strstr(buf, "bpf-prog"))
266 return BPF_OBJ_PROG;
267
268 return BPF_OBJ_UNKNOWN;
269}
270
271char *get_fdinfo(int fd, const char *key)
272{
273 char path[PATH_MAX];
274 char *line = NULL;
275 size_t line_n = 0;
276 ssize_t n;
277 FILE *fdi;
278
279 snprintf(path, sizeof(path), "/proc/%d/fdinfo/%d", getpid(), fd);
280
281 fdi = fopen(path, "r");
282 if (!fdi) {
283 p_err("can't open fdinfo: %s", strerror(errno));
284 return NULL;
285 }
286
287 while ((n = getline(&line, &line_n, fdi))) {
288 char *value;
289 int len;
290
291 if (!strstr(line, key))
292 continue;
293
294 fclose(fdi);
295
296 value = strchr(line, '\t');
297 if (!value || !value[1]) {
298 p_err("malformed fdinfo!?");
299 free(line);
300 return NULL;
301 }
302 value++;
303
304 len = strlen(value);
305 memmove(line, value, len);
306 line[len - 1] = '\0';
307
308 return line;
309 }
310
311 p_err("key '%s' not found in fdinfo", key);
312 free(line);
313 fclose(fdi);
314 return NULL;
315}
316
317void print_hex_data_json(uint8_t *data, size_t len)
318{
319 unsigned int i;
320
321 jsonw_start_array(json_wtr);
322 for (i = 0; i < len; i++)
323 jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
324 jsonw_end_array(json_wtr);
325}
326
327int build_pinned_obj_table(struct pinned_obj_table *tab,
328 enum bpf_obj_type type)
329{
330 struct bpf_prog_info pinned_info = {};
331 struct pinned_obj *obj_node = NULL;
332 __u32 len = sizeof(pinned_info);
333 struct mntent *mntent = NULL;
334 enum bpf_obj_type objtype;
335 FILE *mntfile = NULL;
336 FTSENT *ftse = NULL;
337 FTS *fts = NULL;
338 int fd, err;
339
340 mntfile = setmntent("/proc/mounts", "r");
341 if (!mntfile)
342 return -1;
343
344 while ((mntent = getmntent(mntfile))) {
345 char *path[] = { mntent->mnt_dir, NULL };
346
347 if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
348 continue;
349
350 fts = fts_open(path, 0, NULL);
351 if (!fts)
352 continue;
353
354 while ((ftse = fts_read(fts))) {
355 if (!(ftse->fts_info & FTS_F))
356 continue;
357 fd = open_obj_pinned(ftse->fts_path);
358 if (fd < 0)
359 continue;
360
361 objtype = get_fd_type(fd);
362 if (objtype != type) {
363 close(fd);
364 continue;
365 }
366 memset(&pinned_info, 0, sizeof(pinned_info));
367 err = bpf_obj_get_info_by_fd(fd, &pinned_info, &len);
368 if (err) {
369 close(fd);
370 continue;
371 }
372
373 obj_node = malloc(sizeof(*obj_node));
374 if (!obj_node) {
375 close(fd);
376 fts_close(fts);
377 fclose(mntfile);
378 return -1;
379 }
380
381 memset(obj_node, 0, sizeof(*obj_node));
382 obj_node->id = pinned_info.id;
383 obj_node->path = strdup(ftse->fts_path);
384 hash_add(tab->table, &obj_node->hash, obj_node->id);
385
386 close(fd);
387 }
388 fts_close(fts);
389 }
390 fclose(mntfile);
391 return 0;
392}
393
394void delete_pinned_obj_table(struct pinned_obj_table *tab)
395{
396 struct pinned_obj *obj;
397 struct hlist_node *tmp;
398 unsigned int bkt;
399
400 hash_for_each_safe(tab->table, bkt, tmp, obj, hash) {
401 hash_del(&obj->hash);
402 free(obj->path);
403 free(obj);
404 }
405}
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
new file mode 100644
index 000000000000..1551d3918d4c
--- /dev/null
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -0,0 +1,162 @@
1/*
2 * Based on:
3 *
4 * Minimal BPF JIT image disassembler
5 *
6 * Disassembles BPF JIT compiler emitted opcodes back to asm insn's for
7 * debugging or verification purposes.
8 *
9 * Copyright 2013 Daniel Borkmann <daniel@iogearbox.net>
10 * Licensed under the GNU General Public License, version 2.0 (GPLv2)
11 */
12
13#include <stdarg.h>
14#include <stdint.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <assert.h>
18#include <unistd.h>
19#include <string.h>
20#include <bfd.h>
21#include <dis-asm.h>
22#include <sys/types.h>
23#include <sys/stat.h>
24#include <limits.h>
25
26#include "json_writer.h"
27#include "main.h"
28
29static void get_exec_path(char *tpath, size_t size)
30{
31 ssize_t len;
32 char *path;
33
34 snprintf(tpath, size, "/proc/%d/exe", (int) getpid());
35 tpath[size - 1] = 0;
36
37 path = strdup(tpath);
38 assert(path);
39
40 len = readlink(path, tpath, size - 1);
41 assert(len > 0);
42 tpath[len] = 0;
43
44 free(path);
45}
46
47static int oper_count;
48static int fprintf_json(void *out, const char *fmt, ...)
49{
50 va_list ap;
51 char *s;
52
53 va_start(ap, fmt);
54 if (!oper_count) {
55 int i;
56
57 s = va_arg(ap, char *);
58
59 /* Strip trailing spaces */
60 i = strlen(s) - 1;
61 while (s[i] == ' ')
62 s[i--] = '\0';
63
64 jsonw_string_field(json_wtr, "operation", s);
65 jsonw_name(json_wtr, "operands");
66 jsonw_start_array(json_wtr);
67 oper_count++;
68 } else if (!strcmp(fmt, ",")) {
69 /* Skip */
70 } else {
71 s = va_arg(ap, char *);
72 jsonw_string(json_wtr, s);
73 oper_count++;
74 }
75 va_end(ap);
76 return 0;
77}
78
79void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
80{
81 disassembler_ftype disassemble;
82 struct disassemble_info info;
83 int count, i, pc = 0;
84 char tpath[PATH_MAX];
85 bfd *bfdf;
86
87 if (!len)
88 return;
89
90 memset(tpath, 0, sizeof(tpath));
91 get_exec_path(tpath, sizeof(tpath));
92
93 bfdf = bfd_openr(tpath, NULL);
94 assert(bfdf);
95 assert(bfd_check_format(bfdf, bfd_object));
96
97 if (json_output)
98 init_disassemble_info(&info, stdout,
99 (fprintf_ftype) fprintf_json);
100 else
101 init_disassemble_info(&info, stdout,
102 (fprintf_ftype) fprintf);
103 info.arch = bfd_get_arch(bfdf);
104 info.mach = bfd_get_mach(bfdf);
105 info.buffer = image;
106 info.buffer_length = len;
107
108 disassemble_init_for_target(&info);
109
110 disassemble = disassembler(bfdf);
111 assert(disassemble);
112
113 if (json_output)
114 jsonw_start_array(json_wtr);
115 do {
116 if (json_output) {
117 jsonw_start_object(json_wtr);
118 oper_count = 0;
119 jsonw_name(json_wtr, "pc");
120 jsonw_printf(json_wtr, "\"0x%x\"", pc);
121 } else {
122 printf("%4x:\t", pc);
123 }
124
125 count = disassemble(pc, &info);
126 if (json_output) {
127 /* Operand array, was started in fprintf_json. Before
128 * that, make sure we have a _null_ value if no operand
129 * other than operation code was present.
130 */
131 if (oper_count == 1)
132 jsonw_null(json_wtr);
133 jsonw_end_array(json_wtr);
134 }
135
136 if (opcodes) {
137 if (json_output) {
138 jsonw_name(json_wtr, "opcodes");
139 jsonw_start_array(json_wtr);
140 for (i = 0; i < count; ++i)
141 jsonw_printf(json_wtr, "\"0x%02hhx\"",
142 (uint8_t)image[pc + i]);
143 jsonw_end_array(json_wtr);
144 } else {
145 printf("\n\t");
146 for (i = 0; i < count; ++i)
147 printf("%02x ",
148 (uint8_t)image[pc + i]);
149 }
150 }
151 if (json_output)
152 jsonw_end_object(json_wtr);
153 else
154 printf("\n");
155
156 pc += count;
157 } while (count > 0 && pc < len);
158 if (json_output)
159 jsonw_end_array(json_wtr);
160
161 bfd_close(bfdf);
162}
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
new file mode 100644
index 000000000000..c6eef76322ae
--- /dev/null
+++ b/tools/bpf/bpftool/json_writer.c
@@ -0,0 +1,356 @@
1/*
2 * Simple streaming JSON writer
3 *
4 * This takes care of the annoying bits of JSON syntax like the commas
5 * after elements
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Authors: Stephen Hemminger <stephen@networkplumber.org>
13 */
14
15#include <stdio.h>
16#include <stdbool.h>
17#include <stdarg.h>
18#include <assert.h>
19#include <malloc.h>
20#include <inttypes.h>
21#include <stdint.h>
22
23#include "json_writer.h"
24
25struct json_writer {
26 FILE *out; /* output file */
27 unsigned depth; /* nesting */
28 bool pretty; /* optional whitepace */
29 char sep; /* either nul or comma */
30};
31
32/* indentation for pretty print */
33static void jsonw_indent(json_writer_t *self)
34{
35 unsigned i;
36 for (i = 0; i < self->depth; ++i)
37 fputs(" ", self->out);
38}
39
40/* end current line and indent if pretty printing */
41static void jsonw_eol(json_writer_t *self)
42{
43 if (!self->pretty)
44 return;
45
46 putc('\n', self->out);
47 jsonw_indent(self);
48}
49
50/* If current object is not empty print a comma */
51static void jsonw_eor(json_writer_t *self)
52{
53 if (self->sep != '\0')
54 putc(self->sep, self->out);
55 self->sep = ',';
56}
57
58
59/* Output JSON encoded string */
60/* Handles C escapes, does not do Unicode */
61static void jsonw_puts(json_writer_t *self, const char *str)
62{
63 putc('"', self->out);
64 for (; *str; ++str)
65 switch (*str) {
66 case '\t':
67 fputs("\\t", self->out);
68 break;
69 case '\n':
70 fputs("\\n", self->out);
71 break;
72 case '\r':
73 fputs("\\r", self->out);
74 break;
75 case '\f':
76 fputs("\\f", self->out);
77 break;
78 case '\b':
79 fputs("\\b", self->out);
80 break;
81 case '\\':
82 fputs("\\n", self->out);
83 break;
84 case '"':
85 fputs("\\\"", self->out);
86 break;
87 case '\'':
88 fputs("\\\'", self->out);
89 break;
90 default:
91 putc(*str, self->out);
92 }
93 putc('"', self->out);
94}
95
96/* Create a new JSON stream */
97json_writer_t *jsonw_new(FILE *f)
98{
99 json_writer_t *self = malloc(sizeof(*self));
100 if (self) {
101 self->out = f;
102 self->depth = 0;
103 self->pretty = false;
104 self->sep = '\0';
105 }
106 return self;
107}
108
109/* End output to JSON stream */
110void jsonw_destroy(json_writer_t **self_p)
111{
112 json_writer_t *self = *self_p;
113
114 assert(self->depth == 0);
115 fputs("\n", self->out);
116 fflush(self->out);
117 free(self);
118 *self_p = NULL;
119}
120
121void jsonw_pretty(json_writer_t *self, bool on)
122{
123 self->pretty = on;
124}
125
126/* Basic blocks */
127static void jsonw_begin(json_writer_t *self, int c)
128{
129 jsonw_eor(self);
130 putc(c, self->out);
131 ++self->depth;
132 self->sep = '\0';
133}
134
135static void jsonw_end(json_writer_t *self, int c)
136{
137 assert(self->depth > 0);
138
139 --self->depth;
140 if (self->sep != '\0')
141 jsonw_eol(self);
142 putc(c, self->out);
143 self->sep = ',';
144}
145
146
147/* Add a JSON property name */
148void jsonw_name(json_writer_t *self, const char *name)
149{
150 jsonw_eor(self);
151 jsonw_eol(self);
152 self->sep = '\0';
153 jsonw_puts(self, name);
154 putc(':', self->out);
155 if (self->pretty)
156 putc(' ', self->out);
157}
158
159void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
160{
161 jsonw_eor(self);
162 putc('"', self->out);
163 vfprintf(self->out, fmt, ap);
164 putc('"', self->out);
165}
166
167void jsonw_printf(json_writer_t *self, const char *fmt, ...)
168{
169 va_list ap;
170
171 va_start(ap, fmt);
172 jsonw_eor(self);
173 vfprintf(self->out, fmt, ap);
174 va_end(ap);
175}
176
177/* Collections */
178void jsonw_start_object(json_writer_t *self)
179{
180 jsonw_begin(self, '{');
181}
182
183void jsonw_end_object(json_writer_t *self)
184{
185 jsonw_end(self, '}');
186}
187
188void jsonw_start_array(json_writer_t *self)
189{
190 jsonw_begin(self, '[');
191}
192
193void jsonw_end_array(json_writer_t *self)
194{
195 jsonw_end(self, ']');
196}
197
198/* JSON value types */
199void jsonw_string(json_writer_t *self, const char *value)
200{
201 jsonw_eor(self);
202 jsonw_puts(self, value);
203}
204
205void jsonw_bool(json_writer_t *self, bool val)
206{
207 jsonw_printf(self, "%s", val ? "true" : "false");
208}
209
210void jsonw_null(json_writer_t *self)
211{
212 jsonw_printf(self, "null");
213}
214
215void jsonw_float_fmt(json_writer_t *self, const char *fmt, double num)
216{
217 jsonw_printf(self, fmt, num);
218}
219
220#ifdef notused
221void jsonw_float(json_writer_t *self, double num)
222{
223 jsonw_printf(self, "%g", num);
224}
225#endif
226
227void jsonw_hu(json_writer_t *self, unsigned short num)
228{
229 jsonw_printf(self, "%hu", num);
230}
231
232void jsonw_uint(json_writer_t *self, uint64_t num)
233{
234 jsonw_printf(self, "%"PRIu64, num);
235}
236
237void jsonw_lluint(json_writer_t *self, unsigned long long int num)
238{
239 jsonw_printf(self, "%llu", num);
240}
241
242void jsonw_int(json_writer_t *self, int64_t num)
243{
244 jsonw_printf(self, "%"PRId64, num);
245}
246
247/* Basic name/value objects */
248void jsonw_string_field(json_writer_t *self, const char *prop, const char *val)
249{
250 jsonw_name(self, prop);
251 jsonw_string(self, val);
252}
253
254void jsonw_bool_field(json_writer_t *self, const char *prop, bool val)
255{
256 jsonw_name(self, prop);
257 jsonw_bool(self, val);
258}
259
260#ifdef notused
261void jsonw_float_field(json_writer_t *self, const char *prop, double val)
262{
263 jsonw_name(self, prop);
264 jsonw_float(self, val);
265}
266#endif
267
268void jsonw_float_field_fmt(json_writer_t *self,
269 const char *prop,
270 const char *fmt,
271 double val)
272{
273 jsonw_name(self, prop);
274 jsonw_float_fmt(self, fmt, val);
275}
276
277void jsonw_uint_field(json_writer_t *self, const char *prop, uint64_t num)
278{
279 jsonw_name(self, prop);
280 jsonw_uint(self, num);
281}
282
283void jsonw_hu_field(json_writer_t *self, const char *prop, unsigned short num)
284{
285 jsonw_name(self, prop);
286 jsonw_hu(self, num);
287}
288
289void jsonw_lluint_field(json_writer_t *self,
290 const char *prop,
291 unsigned long long int num)
292{
293 jsonw_name(self, prop);
294 jsonw_lluint(self, num);
295}
296
297void jsonw_int_field(json_writer_t *self, const char *prop, int64_t num)
298{
299 jsonw_name(self, prop);
300 jsonw_int(self, num);
301}
302
303void jsonw_null_field(json_writer_t *self, const char *prop)
304{
305 jsonw_name(self, prop);
306 jsonw_null(self);
307}
308
309#ifdef TEST
310int main(int argc, char **argv)
311{
312 json_writer_t *wr = jsonw_new(stdout);
313
314 jsonw_start_object(wr);
315 jsonw_pretty(wr, true);
316 jsonw_name(wr, "Vyatta");
317 jsonw_start_object(wr);
318 jsonw_string_field(wr, "url", "http://vyatta.com");
319 jsonw_uint_field(wr, "downloads", 2000000ul);
320 jsonw_float_field(wr, "stock", 8.16);
321
322 jsonw_name(wr, "ARGV");
323 jsonw_start_array(wr);
324 while (--argc)
325 jsonw_string(wr, *++argv);
326 jsonw_end_array(wr);
327
328 jsonw_name(wr, "empty");
329 jsonw_start_array(wr);
330 jsonw_end_array(wr);
331
332 jsonw_name(wr, "NIL");
333 jsonw_start_object(wr);
334 jsonw_end_object(wr);
335
336 jsonw_null_field(wr, "my_null");
337
338 jsonw_name(wr, "special chars");
339 jsonw_start_array(wr);
340 jsonw_string_field(wr, "slash", "/");
341 jsonw_string_field(wr, "newline", "\n");
342 jsonw_string_field(wr, "tab", "\t");
343 jsonw_string_field(wr, "ff", "\f");
344 jsonw_string_field(wr, "quote", "\"");
345 jsonw_string_field(wr, "tick", "\'");
346 jsonw_string_field(wr, "backslash", "\\");
347 jsonw_end_array(wr);
348
349 jsonw_end_object(wr);
350
351 jsonw_end_object(wr);
352 jsonw_destroy(&wr);
353 return 0;
354}
355
356#endif
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
new file mode 100644
index 000000000000..0fa2fb1b6351
--- /dev/null
+++ b/tools/bpf/bpftool/json_writer.h
@@ -0,0 +1,72 @@
1/*
2 * Simple streaming JSON writer
3 *
4 * This takes care of the annoying bits of JSON syntax like the commas
5 * after elements
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Authors: Stephen Hemminger <stephen@networkplumber.org>
13 */
14
15#ifndef _JSON_WRITER_H_
16#define _JSON_WRITER_H_
17
18#include <stdbool.h>
19#include <stdint.h>
20#include <stdarg.h>
21
22/* Opaque class structure */
23typedef struct json_writer json_writer_t;
24
25/* Create a new JSON stream */
26json_writer_t *jsonw_new(FILE *f);
27/* End output to JSON stream */
28void jsonw_destroy(json_writer_t **self_p);
29
30/* Cause output to have pretty whitespace */
31void jsonw_pretty(json_writer_t *self, bool on);
32
33/* Add property name */
34void jsonw_name(json_writer_t *self, const char *name);
35
36/* Add value */
37void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap);
38void jsonw_printf(json_writer_t *self, const char *fmt, ...);
39void jsonw_string(json_writer_t *self, const char *value);
40void jsonw_bool(json_writer_t *self, bool value);
41void jsonw_float(json_writer_t *self, double number);
42void jsonw_float_fmt(json_writer_t *self, const char *fmt, double num);
43void jsonw_uint(json_writer_t *self, uint64_t number);
44void jsonw_hu(json_writer_t *self, unsigned short number);
45void jsonw_int(json_writer_t *self, int64_t number);
46void jsonw_null(json_writer_t *self);
47void jsonw_lluint(json_writer_t *self, unsigned long long int num);
48
49/* Useful Combinations of name and value */
50void jsonw_string_field(json_writer_t *self, const char *prop, const char *val);
51void jsonw_bool_field(json_writer_t *self, const char *prop, bool value);
52void jsonw_float_field(json_writer_t *self, const char *prop, double num);
53void jsonw_uint_field(json_writer_t *self, const char *prop, uint64_t num);
54void jsonw_hu_field(json_writer_t *self, const char *prop, unsigned short num);
55void jsonw_int_field(json_writer_t *self, const char *prop, int64_t num);
56void jsonw_null_field(json_writer_t *self, const char *prop);
57void jsonw_lluint_field(json_writer_t *self, const char *prop,
58 unsigned long long int num);
59void jsonw_float_field_fmt(json_writer_t *self, const char *prop,
60 const char *fmt, double val);
61
62/* Collections */
63void jsonw_start_object(json_writer_t *self);
64void jsonw_end_object(json_writer_t *self);
65
66void jsonw_start_array(json_writer_t *self);
67void jsonw_end_array(json_writer_t *self);
68
69/* Override default exception handling */
70typedef void (jsonw_err_handler_fn)(const char *);
71
72#endif /* _JSON_WRITER_H_ */
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
new file mode 100644
index 000000000000..d294bc8168be
--- /dev/null
+++ b/tools/bpf/bpftool/main.c
@@ -0,0 +1,343 @@
1/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/* Author: Jakub Kicinski <kubakici@wp.pl> */
35
36#include <bfd.h>
37#include <ctype.h>
38#include <errno.h>
39#include <getopt.h>
40#include <linux/bpf.h>
41#include <linux/version.h>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45
46#include <bpf.h>
47
48#include "main.h"
49
50const char *bin_name;
51static int last_argc;
52static char **last_argv;
53static int (*last_do_help)(int argc, char **argv);
54json_writer_t *json_wtr;
55bool pretty_output;
56bool json_output;
57bool show_pinned;
58struct pinned_obj_table prog_table;
59struct pinned_obj_table map_table;
60
61static void __noreturn clean_and_exit(int i)
62{
63 if (json_output)
64 jsonw_destroy(&json_wtr);
65
66 exit(i);
67}
68
69void usage(void)
70{
71 last_do_help(last_argc - 1, last_argv + 1);
72
73 clean_and_exit(-1);
74}
75
76static int do_help(int argc, char **argv)
77{
78 if (json_output) {
79 jsonw_null(json_wtr);
80 return 0;
81 }
82
83 fprintf(stderr,
84 "Usage: %s [OPTIONS] OBJECT { COMMAND | help }\n"
85 " %s batch file FILE\n"
86 " %s version\n"
87 "\n"
88 " OBJECT := { prog | map }\n"
89 " " HELP_SPEC_OPTIONS "\n"
90 "",
91 bin_name, bin_name, bin_name);
92
93 return 0;
94}
95
96static int do_version(int argc, char **argv)
97{
98 unsigned int version[3];
99
100 version[0] = LINUX_VERSION_CODE >> 16;
101 version[1] = LINUX_VERSION_CODE >> 8 & 0xf;
102 version[2] = LINUX_VERSION_CODE & 0xf;
103
104 if (json_output) {
105 jsonw_start_object(json_wtr);
106 jsonw_name(json_wtr, "version");
107 jsonw_printf(json_wtr, "\"%u.%u.%u\"",
108 version[0], version[1], version[2]);
109 jsonw_end_object(json_wtr);
110 } else {
111 printf("%s v%u.%u.%u\n", bin_name,
112 version[0], version[1], version[2]);
113 }
114 return 0;
115}
116
117int cmd_select(const struct cmd *cmds, int argc, char **argv,
118 int (*help)(int argc, char **argv))
119{
120 unsigned int i;
121
122 last_argc = argc;
123 last_argv = argv;
124 last_do_help = help;
125
126 if (argc < 1 && cmds[0].func)
127 return cmds[0].func(argc, argv);
128
129 for (i = 0; cmds[i].func; i++)
130 if (is_prefix(*argv, cmds[i].cmd))
131 return cmds[i].func(argc - 1, argv + 1);
132
133 help(argc - 1, argv + 1);
134
135 return -1;
136}
137
138bool is_prefix(const char *pfx, const char *str)
139{
140 if (!pfx)
141 return false;
142 if (strlen(str) < strlen(pfx))
143 return false;
144
145 return !memcmp(str, pfx, strlen(pfx));
146}
147
148void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep)
149{
150 unsigned char *data = arg;
151 unsigned int i;
152
153 for (i = 0; i < n; i++) {
154 const char *pfx = "";
155
156 if (!i)
157 /* nothing */;
158 else if (!(i % 16))
159 fprintf(f, "\n");
160 else if (!(i % 8))
161 fprintf(f, " ");
162 else
163 pfx = sep;
164
165 fprintf(f, "%s%02hhx", i ? pfx : "", data[i]);
166 }
167}
168
169static int do_batch(int argc, char **argv);
170
171static const struct cmd cmds[] = {
172 { "help", do_help },
173 { "batch", do_batch },
174 { "prog", do_prog },
175 { "map", do_map },
176 { "version", do_version },
177 { 0 }
178};
179
180static int do_batch(int argc, char **argv)
181{
182 unsigned int lines = 0;
183 char *n_argv[4096];
184 char buf[65536];
185 int n_argc;
186 FILE *fp;
187 int err;
188 int i;
189
190 if (argc < 2) {
191 p_err("too few parameters for batch");
192 return -1;
193 } else if (!is_prefix(*argv, "file")) {
194 p_err("expected 'file', got: %s", *argv);
195 return -1;
196 } else if (argc > 2) {
197 p_err("too many parameters for batch");
198 return -1;
199 }
200 NEXT_ARG();
201
202 fp = fopen(*argv, "r");
203 if (!fp) {
204 p_err("Can't open file (%s): %s", *argv, strerror(errno));
205 return -1;
206 }
207
208 if (json_output)
209 jsonw_start_array(json_wtr);
210 while (fgets(buf, sizeof(buf), fp)) {
211 if (strlen(buf) == sizeof(buf) - 1) {
212 errno = E2BIG;
213 break;
214 }
215
216 n_argc = 0;
217 n_argv[n_argc] = strtok(buf, " \t\n");
218
219 while (n_argv[n_argc]) {
220 n_argc++;
221 if (n_argc == ARRAY_SIZE(n_argv)) {
222 p_err("line %d has too many arguments, skip",
223 lines);
224 n_argc = 0;
225 break;
226 }
227 n_argv[n_argc] = strtok(NULL, " \t\n");
228 }
229
230 if (!n_argc)
231 continue;
232
233 if (json_output) {
234 jsonw_start_object(json_wtr);
235 jsonw_name(json_wtr, "command");
236 jsonw_start_array(json_wtr);
237 for (i = 0; i < n_argc; i++)
238 jsonw_string(json_wtr, n_argv[i]);
239 jsonw_end_array(json_wtr);
240 jsonw_name(json_wtr, "output");
241 }
242
243 err = cmd_select(cmds, n_argc, n_argv, do_help);
244
245 if (json_output)
246 jsonw_end_object(json_wtr);
247
248 if (err)
249 goto err_close;
250
251 lines++;
252 }
253
254 if (errno && errno != ENOENT) {
255 perror("reading batch file failed");
256 err = -1;
257 } else {
258 p_info("processed %d lines", lines);
259 err = 0;
260 }
261err_close:
262 fclose(fp);
263
264 if (json_output)
265 jsonw_end_array(json_wtr);
266
267 return err;
268}
269
270int main(int argc, char **argv)
271{
272 static const struct option options[] = {
273 { "json", no_argument, NULL, 'j' },
274 { "help", no_argument, NULL, 'h' },
275 { "pretty", no_argument, NULL, 'p' },
276 { "version", no_argument, NULL, 'V' },
277 { "bpffs", no_argument, NULL, 'f' },
278 { 0 }
279 };
280 int opt, ret;
281
282 last_do_help = do_help;
283 pretty_output = false;
284 json_output = false;
285 show_pinned = false;
286 bin_name = argv[0];
287
288 hash_init(prog_table.table);
289 hash_init(map_table.table);
290
291 opterr = 0;
292 while ((opt = getopt_long(argc, argv, "Vhpjf",
293 options, NULL)) >= 0) {
294 switch (opt) {
295 case 'V':
296 return do_version(argc, argv);
297 case 'h':
298 return do_help(argc, argv);
299 case 'p':
300 pretty_output = true;
301 /* fall through */
302 case 'j':
303 if (!json_output) {
304 json_wtr = jsonw_new(stdout);
305 if (!json_wtr) {
306 p_err("failed to create JSON writer");
307 return -1;
308 }
309 json_output = true;
310 }
311 jsonw_pretty(json_wtr, pretty_output);
312 break;
313 case 'f':
314 show_pinned = true;
315 break;
316 default:
317 p_err("unrecognized option '%s'", argv[optind - 1]);
318 if (json_output)
319 clean_and_exit(-1);
320 else
321 usage();
322 }
323 }
324
325 argc -= optind;
326 argv += optind;
327 if (argc < 0)
328 usage();
329
330 bfd_init();
331
332 ret = cmd_select(cmds, argc, argv, do_help);
333
334 if (json_output)
335 jsonw_destroy(&json_wtr);
336
337 if (show_pinned) {
338 delete_pinned_obj_table(&prog_table);
339 delete_pinned_obj_table(&map_table);
340 }
341
342 return ret;
343}
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
new file mode 100644
index 000000000000..bff330b49791
--- /dev/null
+++ b/tools/bpf/bpftool/main.h
@@ -0,0 +1,123 @@
1/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/* Author: Jakub Kicinski <kubakici@wp.pl> */
35
36#ifndef __BPF_TOOL_H
37#define __BPF_TOOL_H
38
39/* BFD and kernel.h both define GCC_VERSION, differently */
40#undef GCC_VERSION
41#include <stdbool.h>
42#include <stdio.h>
43#include <linux/bpf.h>
44#include <linux/compiler.h>
45#include <linux/kernel.h>
46#include <linux/hashtable.h>
47
48#include "json_writer.h"
49
50#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
51
52#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
53#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
54#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
55
56#define ERR_MAX_LEN 1024
57
58#define BPF_TAG_FMT "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
59
60#define HELP_SPEC_PROGRAM \
61 "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }"
62#define HELP_SPEC_OPTIONS \
63 "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} }"
64
65enum bpf_obj_type {
66 BPF_OBJ_UNKNOWN,
67 BPF_OBJ_PROG,
68 BPF_OBJ_MAP,
69};
70
71extern const char *bin_name;
72
73extern json_writer_t *json_wtr;
74extern bool json_output;
75extern bool show_pinned;
76extern struct pinned_obj_table prog_table;
77extern struct pinned_obj_table map_table;
78
79void p_err(const char *fmt, ...);
80void p_info(const char *fmt, ...);
81
82bool is_prefix(const char *pfx, const char *str);
83void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
84void usage(void) __noreturn;
85
86struct pinned_obj_table {
87 DECLARE_HASHTABLE(table, 16);
88};
89
90struct pinned_obj {
91 __u32 id;
92 char *path;
93 struct hlist_node hash;
94};
95
96int build_pinned_obj_table(struct pinned_obj_table *table,
97 enum bpf_obj_type type);
98void delete_pinned_obj_table(struct pinned_obj_table *tab);
99
100struct cmd {
101 const char *cmd;
102 int (*func)(int argc, char **argv);
103};
104
105int cmd_select(const struct cmd *cmds, int argc, char **argv,
106 int (*help)(int argc, char **argv));
107
108int get_fd_type(int fd);
109const char *get_fd_type_name(enum bpf_obj_type type);
110char *get_fdinfo(int fd, const char *key);
111int open_obj_pinned(char *path);
112int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
113int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
114
115int do_prog(int argc, char **arg);
116int do_map(int argc, char **arg);
117
118int prog_parse_fd(int *argc, char ***argv);
119
120void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes);
121void print_hex_data_json(uint8_t *data, size_t len);
122
123#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
new file mode 100644
index 000000000000..a8c3a33dd185
--- /dev/null
+++ b/tools/bpf/bpftool/map.c
@@ -0,0 +1,901 @@
1/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/* Author: Jakub Kicinski <kubakici@wp.pl> */
35
36#include <assert.h>
37#include <ctype.h>
38#include <errno.h>
39#include <fcntl.h>
40#include <stdbool.h>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <unistd.h>
45#include <sys/types.h>
46#include <sys/stat.h>
47
48#include <bpf.h>
49
50#include "main.h"
51
52static const char * const map_type_name[] = {
53 [BPF_MAP_TYPE_UNSPEC] = "unspec",
54 [BPF_MAP_TYPE_HASH] = "hash",
55 [BPF_MAP_TYPE_ARRAY] = "array",
56 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
57 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
58 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
59 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
60 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
61 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
62 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
63 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
64 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
65 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
66 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
67 [BPF_MAP_TYPE_DEVMAP] = "devmap",
68 [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
69};
70
71static unsigned int get_possible_cpus(void)
72{
73 static unsigned int result;
74 char buf[128];
75 long int n;
76 char *ptr;
77 int fd;
78
79 if (result)
80 return result;
81
82 fd = open("/sys/devices/system/cpu/possible", O_RDONLY);
83 if (fd < 0) {
84 p_err("can't open sysfs possible cpus");
85 exit(-1);
86 }
87
88 n = read(fd, buf, sizeof(buf));
89 if (n < 2) {
90 p_err("can't read sysfs possible cpus");
91 exit(-1);
92 }
93 close(fd);
94
95 if (n == sizeof(buf)) {
96 p_err("read sysfs possible cpus overflow");
97 exit(-1);
98 }
99
100 ptr = buf;
101 n = 0;
102 while (*ptr && *ptr != '\n') {
103 unsigned int a, b;
104
105 if (sscanf(ptr, "%u-%u", &a, &b) == 2) {
106 n += b - a + 1;
107
108 ptr = strchr(ptr, '-') + 1;
109 } else if (sscanf(ptr, "%u", &a) == 1) {
110 n++;
111 } else {
112 assert(0);
113 }
114
115 while (isdigit(*ptr))
116 ptr++;
117 if (*ptr == ',')
118 ptr++;
119 }
120
121 result = n;
122
123 return result;
124}
125
126static bool map_is_per_cpu(__u32 type)
127{
128 return type == BPF_MAP_TYPE_PERCPU_HASH ||
129 type == BPF_MAP_TYPE_PERCPU_ARRAY ||
130 type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
131}
132
133static bool map_is_map_of_maps(__u32 type)
134{
135 return type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
136 type == BPF_MAP_TYPE_HASH_OF_MAPS;
137}
138
139static bool map_is_map_of_progs(__u32 type)
140{
141 return type == BPF_MAP_TYPE_PROG_ARRAY;
142}
143
144static void *alloc_value(struct bpf_map_info *info)
145{
146 if (map_is_per_cpu(info->type))
147 return malloc(info->value_size * get_possible_cpus());
148 else
149 return malloc(info->value_size);
150}
151
152static int map_parse_fd(int *argc, char ***argv)
153{
154 int fd;
155
156 if (is_prefix(**argv, "id")) {
157 unsigned int id;
158 char *endptr;
159
160 NEXT_ARGP();
161
162 id = strtoul(**argv, &endptr, 0);
163 if (*endptr) {
164 p_err("can't parse %s as ID", **argv);
165 return -1;
166 }
167 NEXT_ARGP();
168
169 fd = bpf_map_get_fd_by_id(id);
170 if (fd < 0)
171 p_err("get map by id (%u): %s", id, strerror(errno));
172 return fd;
173 } else if (is_prefix(**argv, "pinned")) {
174 char *path;
175
176 NEXT_ARGP();
177
178 path = **argv;
179 NEXT_ARGP();
180
181 return open_obj_pinned_any(path, BPF_OBJ_MAP);
182 }
183
184 p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
185 return -1;
186}
187
188static int
189map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
190{
191 int err;
192 int fd;
193
194 fd = map_parse_fd(argc, argv);
195 if (fd < 0)
196 return -1;
197
198 err = bpf_obj_get_info_by_fd(fd, info, info_len);
199 if (err) {
200 p_err("can't get map info: %s", strerror(errno));
201 close(fd);
202 return err;
203 }
204
205 return fd;
206}
207
208static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
209 unsigned char *value)
210{
211 jsonw_start_object(json_wtr);
212
213 if (!map_is_per_cpu(info->type)) {
214 jsonw_name(json_wtr, "key");
215 print_hex_data_json(key, info->key_size);
216 jsonw_name(json_wtr, "value");
217 print_hex_data_json(value, info->value_size);
218 } else {
219 unsigned int i, n;
220
221 n = get_possible_cpus();
222
223 jsonw_name(json_wtr, "key");
224 print_hex_data_json(key, info->key_size);
225
226 jsonw_name(json_wtr, "values");
227 jsonw_start_array(json_wtr);
228 for (i = 0; i < n; i++) {
229 jsonw_start_object(json_wtr);
230
231 jsonw_int_field(json_wtr, "cpu", i);
232
233 jsonw_name(json_wtr, "value");
234 print_hex_data_json(value + i * info->value_size,
235 info->value_size);
236
237 jsonw_end_object(json_wtr);
238 }
239 jsonw_end_array(json_wtr);
240 }
241
242 jsonw_end_object(json_wtr);
243}
244
245static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
246 unsigned char *value)
247{
248 if (!map_is_per_cpu(info->type)) {
249 bool single_line, break_names;
250
251 break_names = info->key_size > 16 || info->value_size > 16;
252 single_line = info->key_size + info->value_size <= 24 &&
253 !break_names;
254
255 printf("key:%c", break_names ? '\n' : ' ');
256 fprint_hex(stdout, key, info->key_size, " ");
257
258 printf(single_line ? " " : "\n");
259
260 printf("value:%c", break_names ? '\n' : ' ');
261 fprint_hex(stdout, value, info->value_size, " ");
262
263 printf("\n");
264 } else {
265 unsigned int i, n;
266
267 n = get_possible_cpus();
268
269 printf("key:\n");
270 fprint_hex(stdout, key, info->key_size, " ");
271 printf("\n");
272 for (i = 0; i < n; i++) {
273 printf("value (CPU %02d):%c",
274 i, info->value_size > 16 ? '\n' : ' ');
275 fprint_hex(stdout, value + i * info->value_size,
276 info->value_size, " ");
277 printf("\n");
278 }
279 }
280}
281
282static char **parse_bytes(char **argv, const char *name, unsigned char *val,
283 unsigned int n)
284{
285 unsigned int i = 0;
286 char *endptr;
287
288 while (i < n && argv[i]) {
289 val[i] = strtoul(argv[i], &endptr, 0);
290 if (*endptr) {
291 p_err("error parsing byte: %s", argv[i]);
292 return NULL;
293 }
294 i++;
295 }
296
297 if (i != n) {
298 p_err("%s expected %d bytes got %d", name, n, i);
299 return NULL;
300 }
301
302 return argv + i;
303}
304
305static int parse_elem(char **argv, struct bpf_map_info *info,
306 void *key, void *value, __u32 key_size, __u32 value_size,
307 __u32 *flags, __u32 **value_fd)
308{
309 if (!*argv) {
310 if (!key && !value)
311 return 0;
312 p_err("did not find %s", key ? "key" : "value");
313 return -1;
314 }
315
316 if (is_prefix(*argv, "key")) {
317 if (!key) {
318 if (key_size)
319 p_err("duplicate key");
320 else
321 p_err("unnecessary key");
322 return -1;
323 }
324
325 argv = parse_bytes(argv + 1, "key", key, key_size);
326 if (!argv)
327 return -1;
328
329 return parse_elem(argv, info, NULL, value, key_size, value_size,
330 flags, value_fd);
331 } else if (is_prefix(*argv, "value")) {
332 int fd;
333
334 if (!value) {
335 if (value_size)
336 p_err("duplicate value");
337 else
338 p_err("unnecessary value");
339 return -1;
340 }
341
342 argv++;
343
344 if (map_is_map_of_maps(info->type)) {
345 int argc = 2;
346
347 if (value_size != 4) {
348 p_err("value smaller than 4B for map in map?");
349 return -1;
350 }
351 if (!argv[0] || !argv[1]) {
352 p_err("not enough value arguments for map in map");
353 return -1;
354 }
355
356 fd = map_parse_fd(&argc, &argv);
357 if (fd < 0)
358 return -1;
359
360 *value_fd = value;
361 **value_fd = fd;
362 } else if (map_is_map_of_progs(info->type)) {
363 int argc = 2;
364
365 if (value_size != 4) {
366 p_err("value smaller than 4B for map of progs?");
367 return -1;
368 }
369 if (!argv[0] || !argv[1]) {
370 p_err("not enough value arguments for map of progs");
371 return -1;
372 }
373
374 fd = prog_parse_fd(&argc, &argv);
375 if (fd < 0)
376 return -1;
377
378 *value_fd = value;
379 **value_fd = fd;
380 } else {
381 argv = parse_bytes(argv, "value", value, value_size);
382 if (!argv)
383 return -1;
384 }
385
386 return parse_elem(argv, info, key, NULL, key_size, value_size,
387 flags, NULL);
388 } else if (is_prefix(*argv, "any") || is_prefix(*argv, "noexist") ||
389 is_prefix(*argv, "exist")) {
390 if (!flags) {
391 p_err("flags specified multiple times: %s", *argv);
392 return -1;
393 }
394
395 if (is_prefix(*argv, "any"))
396 *flags = BPF_ANY;
397 else if (is_prefix(*argv, "noexist"))
398 *flags = BPF_NOEXIST;
399 else if (is_prefix(*argv, "exist"))
400 *flags = BPF_EXIST;
401
402 return parse_elem(argv + 1, info, key, value, key_size,
403 value_size, NULL, value_fd);
404 }
405
406 p_err("expected key or value, got: %s", *argv);
407 return -1;
408}
409
410static int show_map_close_json(int fd, struct bpf_map_info *info)
411{
412 char *memlock;
413
414 memlock = get_fdinfo(fd, "memlock");
415 close(fd);
416
417 jsonw_start_object(json_wtr);
418
419 jsonw_uint_field(json_wtr, "id", info->id);
420 if (info->type < ARRAY_SIZE(map_type_name))
421 jsonw_string_field(json_wtr, "type",
422 map_type_name[info->type]);
423 else
424 jsonw_uint_field(json_wtr, "type", info->type);
425
426 if (*info->name)
427 jsonw_string_field(json_wtr, "name", info->name);
428
429 jsonw_name(json_wtr, "flags");
430 jsonw_printf(json_wtr, "%#x", info->map_flags);
431 jsonw_uint_field(json_wtr, "bytes_key", info->key_size);
432 jsonw_uint_field(json_wtr, "bytes_value", info->value_size);
433 jsonw_uint_field(json_wtr, "max_entries", info->max_entries);
434
435 if (memlock)
436 jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
437 free(memlock);
438
439 if (!hash_empty(map_table.table)) {
440 struct pinned_obj *obj;
441
442 jsonw_name(json_wtr, "pinned");
443 jsonw_start_array(json_wtr);
444 hash_for_each_possible(map_table.table, obj, hash, info->id) {
445 if (obj->id == info->id)
446 jsonw_string(json_wtr, obj->path);
447 }
448 jsonw_end_array(json_wtr);
449 }
450
451 jsonw_end_object(json_wtr);
452
453 return 0;
454}
455
456static int show_map_close_plain(int fd, struct bpf_map_info *info)
457{
458 char *memlock;
459
460 memlock = get_fdinfo(fd, "memlock");
461 close(fd);
462
463 printf("%u: ", info->id);
464 if (info->type < ARRAY_SIZE(map_type_name))
465 printf("%s ", map_type_name[info->type]);
466 else
467 printf("type %u ", info->type);
468
469 if (*info->name)
470 printf("name %s ", info->name);
471
472 printf("flags 0x%x\n", info->map_flags);
473 printf("\tkey %uB value %uB max_entries %u",
474 info->key_size, info->value_size, info->max_entries);
475
476 if (memlock)
477 printf(" memlock %sB", memlock);
478 free(memlock);
479
480 printf("\n");
481 if (!hash_empty(map_table.table)) {
482 struct pinned_obj *obj;
483
484 hash_for_each_possible(map_table.table, obj, hash, info->id) {
485 if (obj->id == info->id)
486 printf("\tpinned %s\n", obj->path);
487 }
488 }
489 return 0;
490}
491
492static int do_show(int argc, char **argv)
493{
494 struct bpf_map_info info = {};
495 __u32 len = sizeof(info);
496 __u32 id = 0;
497 int err;
498 int fd;
499
500 if (show_pinned)
501 build_pinned_obj_table(&map_table, BPF_OBJ_MAP);
502
503 if (argc == 2) {
504 fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
505 if (fd < 0)
506 return -1;
507
508 if (json_output)
509 return show_map_close_json(fd, &info);
510 else
511 return show_map_close_plain(fd, &info);
512 }
513
514 if (argc)
515 return BAD_ARG();
516
517 if (json_output)
518 jsonw_start_array(json_wtr);
519 while (true) {
520 err = bpf_map_get_next_id(id, &id);
521 if (err) {
522 if (errno == ENOENT)
523 break;
524 p_err("can't get next map: %s%s", strerror(errno),
525 errno == EINVAL ? " -- kernel too old?" : "");
526 break;
527 }
528
529 fd = bpf_map_get_fd_by_id(id);
530 if (fd < 0) {
531 if (errno == ENOENT)
532 continue;
533 p_err("can't get map by id (%u): %s",
534 id, strerror(errno));
535 break;
536 }
537
538 err = bpf_obj_get_info_by_fd(fd, &info, &len);
539 if (err) {
540 p_err("can't get map info: %s", strerror(errno));
541 close(fd);
542 break;
543 }
544
545 if (json_output)
546 show_map_close_json(fd, &info);
547 else
548 show_map_close_plain(fd, &info);
549 }
550 if (json_output)
551 jsonw_end_array(json_wtr);
552
553 return errno == ENOENT ? 0 : -1;
554}
555
556static int do_dump(int argc, char **argv)
557{
558 void *key, *value, *prev_key;
559 unsigned int num_elems = 0;
560 struct bpf_map_info info = {};
561 __u32 len = sizeof(info);
562 int err;
563 int fd;
564
565 if (argc != 2)
566 usage();
567
568 fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
569 if (fd < 0)
570 return -1;
571
572 if (map_is_map_of_maps(info.type) || map_is_map_of_progs(info.type)) {
573 p_err("Dumping maps of maps and program maps not supported");
574 close(fd);
575 return -1;
576 }
577
578 key = malloc(info.key_size);
579 value = alloc_value(&info);
580 if (!key || !value) {
581 p_err("mem alloc failed");
582 err = -1;
583 goto exit_free;
584 }
585
586 prev_key = NULL;
587 if (json_output)
588 jsonw_start_array(json_wtr);
589 while (true) {
590 err = bpf_map_get_next_key(fd, prev_key, key);
591 if (err) {
592 if (errno == ENOENT)
593 err = 0;
594 break;
595 }
596
597 if (!bpf_map_lookup_elem(fd, key, value)) {
598 if (json_output)
599 print_entry_json(&info, key, value);
600 else
601 print_entry_plain(&info, key, value);
602 } else {
603 if (json_output) {
604 jsonw_name(json_wtr, "key");
605 print_hex_data_json(key, info.key_size);
606 jsonw_name(json_wtr, "value");
607 jsonw_start_object(json_wtr);
608 jsonw_string_field(json_wtr, "error",
609 "can't lookup element");
610 jsonw_end_object(json_wtr);
611 } else {
612 p_info("can't lookup element with key: ");
613 fprint_hex(stderr, key, info.key_size, " ");
614 fprintf(stderr, "\n");
615 }
616 }
617
618 prev_key = key;
619 num_elems++;
620 }
621
622 if (json_output)
623 jsonw_end_array(json_wtr);
624 else
625 printf("Found %u element%s\n", num_elems,
626 num_elems != 1 ? "s" : "");
627
628exit_free:
629 free(key);
630 free(value);
631 close(fd);
632
633 return err;
634}
635
636static int do_update(int argc, char **argv)
637{
638 struct bpf_map_info info = {};
639 __u32 len = sizeof(info);
640 __u32 *value_fd = NULL;
641 __u32 flags = BPF_ANY;
642 void *key, *value;
643 int fd, err;
644
645 if (argc < 2)
646 usage();
647
648 fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
649 if (fd < 0)
650 return -1;
651
652 key = malloc(info.key_size);
653 value = alloc_value(&info);
654 if (!key || !value) {
655 p_err("mem alloc failed");
656 err = -1;
657 goto exit_free;
658 }
659
660 err = parse_elem(argv, &info, key, value, info.key_size,
661 info.value_size, &flags, &value_fd);
662 if (err)
663 goto exit_free;
664
665 err = bpf_map_update_elem(fd, key, value, flags);
666 if (err) {
667 p_err("update failed: %s", strerror(errno));
668 goto exit_free;
669 }
670
671exit_free:
672 if (value_fd)
673 close(*value_fd);
674 free(key);
675 free(value);
676 close(fd);
677
678 if (!err && json_output)
679 jsonw_null(json_wtr);
680 return err;
681}
682
683static int do_lookup(int argc, char **argv)
684{
685 struct bpf_map_info info = {};
686 __u32 len = sizeof(info);
687 void *key, *value;
688 int err;
689 int fd;
690
691 if (argc < 2)
692 usage();
693
694 fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
695 if (fd < 0)
696 return -1;
697
698 key = malloc(info.key_size);
699 value = alloc_value(&info);
700 if (!key || !value) {
701 p_err("mem alloc failed");
702 err = -1;
703 goto exit_free;
704 }
705
706 err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
707 if (err)
708 goto exit_free;
709
710 err = bpf_map_lookup_elem(fd, key, value);
711 if (!err) {
712 if (json_output)
713 print_entry_json(&info, key, value);
714 else
715 print_entry_plain(&info, key, value);
716 } else if (errno == ENOENT) {
717 if (json_output) {
718 jsonw_null(json_wtr);
719 } else {
720 printf("key:\n");
721 fprint_hex(stdout, key, info.key_size, " ");
722 printf("\n\nNot found\n");
723 }
724 } else {
725 p_err("lookup failed: %s", strerror(errno));
726 }
727
728exit_free:
729 free(key);
730 free(value);
731 close(fd);
732
733 return err;
734}
735
736static int do_getnext(int argc, char **argv)
737{
738 struct bpf_map_info info = {};
739 __u32 len = sizeof(info);
740 void *key, *nextkey;
741 int err;
742 int fd;
743
744 if (argc < 2)
745 usage();
746
747 fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
748 if (fd < 0)
749 return -1;
750
751 key = malloc(info.key_size);
752 nextkey = malloc(info.key_size);
753 if (!key || !nextkey) {
754 p_err("mem alloc failed");
755 err = -1;
756 goto exit_free;
757 }
758
759 if (argc) {
760 err = parse_elem(argv, &info, key, NULL, info.key_size, 0,
761 NULL, NULL);
762 if (err)
763 goto exit_free;
764 } else {
765 free(key);
766 key = NULL;
767 }
768
769 err = bpf_map_get_next_key(fd, key, nextkey);
770 if (err) {
771 p_err("can't get next key: %s", strerror(errno));
772 goto exit_free;
773 }
774
775 if (json_output) {
776 jsonw_start_object(json_wtr);
777 if (key) {
778 jsonw_name(json_wtr, "key");
779 print_hex_data_json(key, info.key_size);
780 } else {
781 jsonw_null_field(json_wtr, "key");
782 }
783 jsonw_name(json_wtr, "next_key");
784 print_hex_data_json(nextkey, info.key_size);
785 jsonw_end_object(json_wtr);
786 } else {
787 if (key) {
788 printf("key:\n");
789 fprint_hex(stdout, key, info.key_size, " ");
790 printf("\n");
791 } else {
792 printf("key: None\n");
793 }
794 printf("next key:\n");
795 fprint_hex(stdout, nextkey, info.key_size, " ");
796 printf("\n");
797 }
798
799exit_free:
800 free(nextkey);
801 free(key);
802 close(fd);
803
804 return err;
805}
806
807static int do_delete(int argc, char **argv)
808{
809 struct bpf_map_info info = {};
810 __u32 len = sizeof(info);
811 void *key;
812 int err;
813 int fd;
814
815 if (argc < 2)
816 usage();
817
818 fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
819 if (fd < 0)
820 return -1;
821
822 key = malloc(info.key_size);
823 if (!key) {
824 p_err("mem alloc failed");
825 err = -1;
826 goto exit_free;
827 }
828
829 err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
830 if (err)
831 goto exit_free;
832
833 err = bpf_map_delete_elem(fd, key);
834 if (err)
835 p_err("delete failed: %s", strerror(errno));
836
837exit_free:
838 free(key);
839 close(fd);
840
841 if (!err && json_output)
842 jsonw_null(json_wtr);
843 return err;
844}
845
846static int do_pin(int argc, char **argv)
847{
848 int err;
849
850 err = do_pin_any(argc, argv, bpf_map_get_fd_by_id);
851 if (!err && json_output)
852 jsonw_null(json_wtr);
853 return err;
854}
855
856static int do_help(int argc, char **argv)
857{
858 if (json_output) {
859 jsonw_null(json_wtr);
860 return 0;
861 }
862
863 fprintf(stderr,
864 "Usage: %s %s show [MAP]\n"
865 " %s %s dump MAP\n"
866 " %s %s update MAP key BYTES value VALUE [UPDATE_FLAGS]\n"
867 " %s %s lookup MAP key BYTES\n"
868 " %s %s getnext MAP [key BYTES]\n"
869 " %s %s delete MAP key BYTES\n"
870 " %s %s pin MAP FILE\n"
871 " %s %s help\n"
872 "\n"
873 " MAP := { id MAP_ID | pinned FILE }\n"
874 " " HELP_SPEC_PROGRAM "\n"
875 " VALUE := { BYTES | MAP | PROG }\n"
876 " UPDATE_FLAGS := { any | exist | noexist }\n"
877 " " HELP_SPEC_OPTIONS "\n"
878 "",
879 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
880 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
881 bin_name, argv[-2], bin_name, argv[-2]);
882
883 return 0;
884}
885
886static const struct cmd cmds[] = {
887 { "show", do_show },
888 { "help", do_help },
889 { "dump", do_dump },
890 { "update", do_update },
891 { "lookup", do_lookup },
892 { "getnext", do_getnext },
893 { "delete", do_delete },
894 { "pin", do_pin },
895 { 0 }
896};
897
898int do_map(int argc, char **argv)
899{
900 return cmd_select(cmds, argc, argv, do_help);
901}
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
new file mode 100644
index 000000000000..dded77345bfb
--- /dev/null
+++ b/tools/bpf/bpftool/prog.c
@@ -0,0 +1,674 @@
1/*
2 * Copyright (C) 2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/* Author: Jakub Kicinski <kubakici@wp.pl> */
35
36#include <errno.h>
37#include <fcntl.h>
38#include <stdarg.h>
39#include <stdio.h>
40#include <stdlib.h>
41#include <string.h>
42#include <time.h>
43#include <unistd.h>
44#include <sys/types.h>
45#include <sys/stat.h>
46
47#include <bpf.h>
48
49#include "main.h"
50#include "disasm.h"
51
52static const char * const prog_type_name[] = {
53 [BPF_PROG_TYPE_UNSPEC] = "unspec",
54 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
55 [BPF_PROG_TYPE_KPROBE] = "kprobe",
56 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
57 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
58 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
59 [BPF_PROG_TYPE_XDP] = "xdp",
60 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
61 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
62 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
63 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
64 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
65 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
66 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
67 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
68};
69
70static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
71{
72 struct timespec real_time_ts, boot_time_ts;
73 time_t wallclock_secs;
74 struct tm load_tm;
75
76 buf[--size] = '\0';
77
78 if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
79 clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
80 perror("Can't read clocks");
81 snprintf(buf, size, "%llu", nsecs / 1000000000);
82 return;
83 }
84
85 wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
86 nsecs / 1000000000;
87
88 if (!localtime_r(&wallclock_secs, &load_tm)) {
89 snprintf(buf, size, "%llu", nsecs / 1000000000);
90 return;
91 }
92
93 strftime(buf, size, "%b %d/%H:%M", &load_tm);
94}
95
96static int prog_fd_by_tag(unsigned char *tag)
97{
98 struct bpf_prog_info info = {};
99 __u32 len = sizeof(info);
100 unsigned int id = 0;
101 int err;
102 int fd;
103
104 while (true) {
105 err = bpf_prog_get_next_id(id, &id);
106 if (err) {
107 p_err("%s", strerror(errno));
108 return -1;
109 }
110
111 fd = bpf_prog_get_fd_by_id(id);
112 if (fd < 0) {
113 p_err("can't get prog by id (%u): %s",
114 id, strerror(errno));
115 return -1;
116 }
117
118 err = bpf_obj_get_info_by_fd(fd, &info, &len);
119 if (err) {
120 p_err("can't get prog info (%u): %s",
121 id, strerror(errno));
122 close(fd);
123 return -1;
124 }
125
126 if (!memcmp(tag, info.tag, BPF_TAG_SIZE))
127 return fd;
128
129 close(fd);
130 }
131}
132
133int prog_parse_fd(int *argc, char ***argv)
134{
135 int fd;
136
137 if (is_prefix(**argv, "id")) {
138 unsigned int id;
139 char *endptr;
140
141 NEXT_ARGP();
142
143 id = strtoul(**argv, &endptr, 0);
144 if (*endptr) {
145 p_err("can't parse %s as ID", **argv);
146 return -1;
147 }
148 NEXT_ARGP();
149
150 fd = bpf_prog_get_fd_by_id(id);
151 if (fd < 0)
152 p_err("get by id (%u): %s", id, strerror(errno));
153 return fd;
154 } else if (is_prefix(**argv, "tag")) {
155 unsigned char tag[BPF_TAG_SIZE];
156
157 NEXT_ARGP();
158
159 if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
160 tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
161 != BPF_TAG_SIZE) {
162 p_err("can't parse tag");
163 return -1;
164 }
165 NEXT_ARGP();
166
167 return prog_fd_by_tag(tag);
168 } else if (is_prefix(**argv, "pinned")) {
169 char *path;
170
171 NEXT_ARGP();
172
173 path = **argv;
174 NEXT_ARGP();
175
176 return open_obj_pinned_any(path, BPF_OBJ_PROG);
177 }
178
179 p_err("expected 'id', 'tag' or 'pinned', got: '%s'?", **argv);
180 return -1;
181}
182
183static void show_prog_maps(int fd, u32 num_maps)
184{
185 struct bpf_prog_info info = {};
186 __u32 len = sizeof(info);
187 __u32 map_ids[num_maps];
188 unsigned int i;
189 int err;
190
191 info.nr_map_ids = num_maps;
192 info.map_ids = ptr_to_u64(map_ids);
193
194 err = bpf_obj_get_info_by_fd(fd, &info, &len);
195 if (err || !info.nr_map_ids)
196 return;
197
198 if (json_output) {
199 jsonw_name(json_wtr, "map_ids");
200 jsonw_start_array(json_wtr);
201 for (i = 0; i < info.nr_map_ids; i++)
202 jsonw_uint(json_wtr, map_ids[i]);
203 jsonw_end_array(json_wtr);
204 } else {
205 printf(" map_ids ");
206 for (i = 0; i < info.nr_map_ids; i++)
207 printf("%u%s", map_ids[i],
208 i == info.nr_map_ids - 1 ? "" : ",");
209 }
210}
211
212static void print_prog_json(struct bpf_prog_info *info, int fd)
213{
214 char *memlock;
215
216 jsonw_start_object(json_wtr);
217 jsonw_uint_field(json_wtr, "id", info->id);
218 if (info->type < ARRAY_SIZE(prog_type_name))
219 jsonw_string_field(json_wtr, "type",
220 prog_type_name[info->type]);
221 else
222 jsonw_uint_field(json_wtr, "type", info->type);
223
224 if (*info->name)
225 jsonw_string_field(json_wtr, "name", info->name);
226
227 jsonw_name(json_wtr, "tag");
228 jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
229 info->tag[0], info->tag[1], info->tag[2], info->tag[3],
230 info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
231
232 if (info->load_time) {
233 char buf[32];
234
235 print_boot_time(info->load_time, buf, sizeof(buf));
236
237 /* Piggy back on load_time, since 0 uid is a valid one */
238 jsonw_string_field(json_wtr, "loaded_at", buf);
239 jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
240 }
241
242 jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
243
244 if (info->jited_prog_len) {
245 jsonw_bool_field(json_wtr, "jited", true);
246 jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
247 } else {
248 jsonw_bool_field(json_wtr, "jited", false);
249 }
250
251 memlock = get_fdinfo(fd, "memlock");
252 if (memlock)
253 jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
254 free(memlock);
255
256 if (info->nr_map_ids)
257 show_prog_maps(fd, info->nr_map_ids);
258
259 if (!hash_empty(prog_table.table)) {
260 struct pinned_obj *obj;
261
262 jsonw_name(json_wtr, "pinned");
263 jsonw_start_array(json_wtr);
264 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
265 if (obj->id == info->id)
266 jsonw_string(json_wtr, obj->path);
267 }
268 jsonw_end_array(json_wtr);
269 }
270
271 jsonw_end_object(json_wtr);
272}
273
274static void print_prog_plain(struct bpf_prog_info *info, int fd)
275{
276 char *memlock;
277
278 printf("%u: ", info->id);
279 if (info->type < ARRAY_SIZE(prog_type_name))
280 printf("%s ", prog_type_name[info->type]);
281 else
282 printf("type %u ", info->type);
283
284 if (*info->name)
285 printf("name %s ", info->name);
286
287 printf("tag ");
288 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
289 printf("\n");
290
291 if (info->load_time) {
292 char buf[32];
293
294 print_boot_time(info->load_time, buf, sizeof(buf));
295
296 /* Piggy back on load_time, since 0 uid is a valid one */
297 printf("\tloaded_at %s uid %u\n", buf, info->created_by_uid);
298 }
299
300 printf("\txlated %uB", info->xlated_prog_len);
301
302 if (info->jited_prog_len)
303 printf(" jited %uB", info->jited_prog_len);
304 else
305 printf(" not jited");
306
307 memlock = get_fdinfo(fd, "memlock");
308 if (memlock)
309 printf(" memlock %sB", memlock);
310 free(memlock);
311
312 if (info->nr_map_ids)
313 show_prog_maps(fd, info->nr_map_ids);
314
315 if (!hash_empty(prog_table.table)) {
316 struct pinned_obj *obj;
317
318 printf("\n");
319 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
320 if (obj->id == info->id)
321 printf("\tpinned %s\n", obj->path);
322 }
323 }
324
325 printf("\n");
326}
327
328static int show_prog(int fd)
329{
330 struct bpf_prog_info info = {};
331 __u32 len = sizeof(info);
332 int err;
333
334 err = bpf_obj_get_info_by_fd(fd, &info, &len);
335 if (err) {
336 p_err("can't get prog info: %s", strerror(errno));
337 return -1;
338 }
339
340 if (json_output)
341 print_prog_json(&info, fd);
342 else
343 print_prog_plain(&info, fd);
344
345 return 0;
346}
347
348static int do_show(int argc, char **argv)
349{
350 __u32 id = 0;
351 int err;
352 int fd;
353
354 if (show_pinned)
355 build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
356
357 if (argc == 2) {
358 fd = prog_parse_fd(&argc, &argv);
359 if (fd < 0)
360 return -1;
361
362 return show_prog(fd);
363 }
364
365 if (argc)
366 return BAD_ARG();
367
368 if (json_output)
369 jsonw_start_array(json_wtr);
370 while (true) {
371 err = bpf_prog_get_next_id(id, &id);
372 if (err) {
373 if (errno == ENOENT) {
374 err = 0;
375 break;
376 }
377 p_err("can't get next program: %s%s", strerror(errno),
378 errno == EINVAL ? " -- kernel too old?" : "");
379 err = -1;
380 break;
381 }
382
383 fd = bpf_prog_get_fd_by_id(id);
384 if (fd < 0) {
385 if (errno == ENOENT)
386 continue;
387 p_err("can't get prog by id (%u): %s",
388 id, strerror(errno));
389 err = -1;
390 break;
391 }
392
393 err = show_prog(fd);
394 close(fd);
395 if (err)
396 break;
397 }
398
399 if (json_output)
400 jsonw_end_array(json_wtr);
401
402 return err;
403}
404
405static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
406{
407 va_list args;
408
409 va_start(args, fmt);
410 vprintf(fmt, args);
411 va_end(args);
412}
413
414static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
415{
416 struct bpf_insn *insn = buf;
417 bool double_insn = false;
418 unsigned int i;
419
420 for (i = 0; i < len / sizeof(*insn); i++) {
421 if (double_insn) {
422 double_insn = false;
423 continue;
424 }
425
426 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
427
428 printf("% 4d: ", i);
429 print_bpf_insn(print_insn, NULL, insn + i, true);
430
431 if (opcodes) {
432 printf(" ");
433 fprint_hex(stdout, insn + i, 8, " ");
434 if (double_insn && i < len - 1) {
435 printf(" ");
436 fprint_hex(stdout, insn + i + 1, 8, " ");
437 }
438 printf("\n");
439 }
440 }
441}
442
443static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
444{
445 unsigned int l = strlen(fmt);
446 char chomped_fmt[l];
447 va_list args;
448
449 va_start(args, fmt);
450 if (l > 0) {
451 strncpy(chomped_fmt, fmt, l - 1);
452 chomped_fmt[l - 1] = '\0';
453 }
454 jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
455 va_end(args);
456}
457
458static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
459{
460 struct bpf_insn *insn = buf;
461 bool double_insn = false;
462 unsigned int i;
463
464 jsonw_start_array(json_wtr);
465 for (i = 0; i < len / sizeof(*insn); i++) {
466 if (double_insn) {
467 double_insn = false;
468 continue;
469 }
470 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
471
472 jsonw_start_object(json_wtr);
473 jsonw_name(json_wtr, "disasm");
474 print_bpf_insn(print_insn_json, NULL, insn + i, true);
475
476 if (opcodes) {
477 jsonw_name(json_wtr, "opcodes");
478 jsonw_start_object(json_wtr);
479
480 jsonw_name(json_wtr, "code");
481 jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
482
483 jsonw_name(json_wtr, "src_reg");
484 jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
485
486 jsonw_name(json_wtr, "dst_reg");
487 jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
488
489 jsonw_name(json_wtr, "off");
490 print_hex_data_json((uint8_t *)(&insn[i].off), 2);
491
492 jsonw_name(json_wtr, "imm");
493 if (double_insn && i < len - 1)
494 print_hex_data_json((uint8_t *)(&insn[i].imm),
495 12);
496 else
497 print_hex_data_json((uint8_t *)(&insn[i].imm),
498 4);
499 jsonw_end_object(json_wtr);
500 }
501 jsonw_end_object(json_wtr);
502 }
503 jsonw_end_array(json_wtr);
504}
505
506static int do_dump(int argc, char **argv)
507{
508 struct bpf_prog_info info = {};
509 __u32 len = sizeof(info);
510 unsigned int buf_size;
511 char *filepath = NULL;
512 bool opcodes = false;
513 unsigned char *buf;
514 __u32 *member_len;
515 __u64 *member_ptr;
516 ssize_t n;
517 int err;
518 int fd;
519
520 if (is_prefix(*argv, "jited")) {
521 member_len = &info.jited_prog_len;
522 member_ptr = &info.jited_prog_insns;
523 } else if (is_prefix(*argv, "xlated")) {
524 member_len = &info.xlated_prog_len;
525 member_ptr = &info.xlated_prog_insns;
526 } else {
527 p_err("expected 'xlated' or 'jited', got: %s", *argv);
528 return -1;
529 }
530 NEXT_ARG();
531
532 if (argc < 2)
533 usage();
534
535 fd = prog_parse_fd(&argc, &argv);
536 if (fd < 0)
537 return -1;
538
539 if (is_prefix(*argv, "file")) {
540 NEXT_ARG();
541 if (!argc) {
542 p_err("expected file path");
543 return -1;
544 }
545
546 filepath = *argv;
547 NEXT_ARG();
548 } else if (is_prefix(*argv, "opcodes")) {
549 opcodes = true;
550 NEXT_ARG();
551 }
552
553 if (argc) {
554 usage();
555 return -1;
556 }
557
558 err = bpf_obj_get_info_by_fd(fd, &info, &len);
559 if (err) {
560 p_err("can't get prog info: %s", strerror(errno));
561 return -1;
562 }
563
564 if (!*member_len) {
565 p_info("no instructions returned");
566 close(fd);
567 return 0;
568 }
569
570 buf_size = *member_len;
571
572 buf = malloc(buf_size);
573 if (!buf) {
574 p_err("mem alloc failed");
575 close(fd);
576 return -1;
577 }
578
579 memset(&info, 0, sizeof(info));
580
581 *member_ptr = ptr_to_u64(buf);
582 *member_len = buf_size;
583
584 err = bpf_obj_get_info_by_fd(fd, &info, &len);
585 close(fd);
586 if (err) {
587 p_err("can't get prog info: %s", strerror(errno));
588 goto err_free;
589 }
590
591 if (*member_len > buf_size) {
592 p_err("too many instructions returned");
593 goto err_free;
594 }
595
596 if (filepath) {
597 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
598 if (fd < 0) {
599 p_err("can't open file %s: %s", filepath,
600 strerror(errno));
601 goto err_free;
602 }
603
604 n = write(fd, buf, *member_len);
605 close(fd);
606 if (n != *member_len) {
607 p_err("error writing output file: %s",
608 n < 0 ? strerror(errno) : "short write");
609 goto err_free;
610 }
611 } else {
612 if (member_len == &info.jited_prog_len)
613 disasm_print_insn(buf, *member_len, opcodes);
614 else
615 if (json_output)
616 dump_xlated_json(buf, *member_len, opcodes);
617 else
618 dump_xlated_plain(buf, *member_len, opcodes);
619 }
620
621 free(buf);
622
623 return 0;
624
625err_free:
626 free(buf);
627 return -1;
628}
629
630static int do_pin(int argc, char **argv)
631{
632 int err;
633
634 err = do_pin_any(argc, argv, bpf_prog_get_fd_by_id);
635 if (!err && json_output)
636 jsonw_null(json_wtr);
637 return err;
638}
639
640static int do_help(int argc, char **argv)
641{
642 if (json_output) {
643 jsonw_null(json_wtr);
644 return 0;
645 }
646
647 fprintf(stderr,
648 "Usage: %s %s show [PROG]\n"
649 " %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
650 " %s %s dump jited PROG [{ file FILE | opcodes }]\n"
651 " %s %s pin PROG FILE\n"
652 " %s %s help\n"
653 "\n"
654 " " HELP_SPEC_PROGRAM "\n"
655 " " HELP_SPEC_OPTIONS "\n"
656 "",
657 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
658 bin_name, argv[-2], bin_name, argv[-2]);
659
660 return 0;
661}
662
663static const struct cmd cmds[] = {
664 { "show", do_show },
665 { "help", do_help },
666 { "dump", do_dump },
667 { "pin", do_pin },
668 { 0 }
669};
670
671int do_prog(int argc, char **argv)
672{
673 return cmd_select(cmds, argc, argv, do_help);
674}
diff --git a/tools/gpio/gpio-utils.c b/tools/gpio/gpio-utils.c
index b86a32d90d88..cf7e2f3419ee 100644
--- a/tools/gpio/gpio-utils.c
+++ b/tools/gpio/gpio-utils.c
@@ -76,7 +76,8 @@ int gpiotools_request_linehandle(const char *device_name, unsigned int *lines,
76 fd = open(chrdev_name, 0); 76 fd = open(chrdev_name, 0);
77 if (fd == -1) { 77 if (fd == -1) {
78 ret = -errno; 78 ret = -errno;
79 fprintf(stderr, "Failed to open %s\n", chrdev_name); 79 fprintf(stderr, "Failed to open %s, %s\n",
80 chrdev_name, strerror(errno));
80 goto exit_close_error; 81 goto exit_close_error;
81 } 82 }
82 83
@@ -92,8 +93,8 @@ int gpiotools_request_linehandle(const char *device_name, unsigned int *lines,
92 ret = ioctl(fd, GPIO_GET_LINEHANDLE_IOCTL, &req); 93 ret = ioctl(fd, GPIO_GET_LINEHANDLE_IOCTL, &req);
93 if (ret == -1) { 94 if (ret == -1) {
94 ret = -errno; 95 ret = -errno;
95 fprintf(stderr, "Failed to issue GET LINEHANDLE IOCTL (%d)\n", 96 fprintf(stderr, "Failed to issue %s (%d), %s\n",
96 ret); 97 "GPIO_GET_LINEHANDLE_IOCTL", ret, strerror(errno));
97 } 98 }
98 99
99exit_close_error: 100exit_close_error:
@@ -118,8 +119,9 @@ int gpiotools_set_values(const int fd, struct gpiohandle_data *data)
118 ret = ioctl(fd, GPIOHANDLE_SET_LINE_VALUES_IOCTL, data); 119 ret = ioctl(fd, GPIOHANDLE_SET_LINE_VALUES_IOCTL, data);
119 if (ret == -1) { 120 if (ret == -1) {
120 ret = -errno; 121 ret = -errno;
121 fprintf(stderr, "Failed to issue %s (%d)\n", 122 fprintf(stderr, "Failed to issue %s (%d), %s\n",
122 "GPIOHANDLE_SET_LINE_VALUES_IOCTL", ret); 123 "GPIOHANDLE_SET_LINE_VALUES_IOCTL", ret,
124 strerror(errno));
123 } 125 }
124 126
125 return ret; 127 return ret;
@@ -141,8 +143,9 @@ int gpiotools_get_values(const int fd, struct gpiohandle_data *data)
141 ret = ioctl(fd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, data); 143 ret = ioctl(fd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, data);
142 if (ret == -1) { 144 if (ret == -1) {
143 ret = -errno; 145 ret = -errno;
144 fprintf(stderr, "Failed to issue %s (%d)\n", 146 fprintf(stderr, "Failed to issue %s (%d), %s\n",
145 "GPIOHANDLE_GET_LINE_VALUES_IOCTL", ret); 147 "GPIOHANDLE_GET_LINE_VALUES_IOCTL", ret,
148 strerror(errno));
146 } 149 }
147 150
148 return ret; 151 return ret;
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index eaa3bec273c8..4c99c57736ce 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
193 for (;;) { 193 for (;;) {
194 readp = &record[records_read]; 194 readp = &record[records_read];
195 records_read += fread(readp, sizeof(struct kvp_record), 195 records_read += fread(readp, sizeof(struct kvp_record),
196 ENTRIES_PER_BLOCK * num_blocks, 196 ENTRIES_PER_BLOCK * num_blocks - records_read,
197 filep); 197 filep);
198 198
199 if (ferror(filep)) { 199 if (ferror(filep)) {
200 syslog(LOG_ERR, "Failed to read file, pool: %d", pool); 200 syslog(LOG_ERR,
201 "Failed to read file, pool: %d; error: %d %s",
202 pool, errno, strerror(errno));
203 kvp_release_lock(pool);
201 exit(EXIT_FAILURE); 204 exit(EXIT_FAILURE);
202 } 205 }
203 206
@@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
210 213
211 if (record == NULL) { 214 if (record == NULL) {
212 syslog(LOG_ERR, "malloc failed"); 215 syslog(LOG_ERR, "malloc failed");
216 kvp_release_lock(pool);
213 exit(EXIT_FAILURE); 217 exit(EXIT_FAILURE);
214 } 218 }
215 continue; 219 continue;
@@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
224 fclose(filep); 228 fclose(filep);
225 kvp_release_lock(pool); 229 kvp_release_lock(pool);
226} 230}
231
227static int kvp_file_init(void) 232static int kvp_file_init(void)
228{ 233{
229 int fd; 234 int fd;
230 FILE *filep;
231 size_t records_read;
232 char *fname; 235 char *fname;
233 struct kvp_record *record;
234 struct kvp_record *readp;
235 int num_blocks;
236 int i; 236 int i;
237 int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; 237 int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
238 238
@@ -246,61 +246,19 @@ static int kvp_file_init(void)
246 246
247 for (i = 0; i < KVP_POOL_COUNT; i++) { 247 for (i = 0; i < KVP_POOL_COUNT; i++) {
248 fname = kvp_file_info[i].fname; 248 fname = kvp_file_info[i].fname;
249 records_read = 0;
250 num_blocks = 1;
251 sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i); 249 sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
252 fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */); 250 fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
253 251
254 if (fd == -1) 252 if (fd == -1)
255 return 1; 253 return 1;
256 254
257
258 filep = fopen(fname, "re");
259 if (!filep) {
260 close(fd);
261 return 1;
262 }
263
264 record = malloc(alloc_unit * num_blocks);
265 if (record == NULL) {
266 fclose(filep);
267 close(fd);
268 return 1;
269 }
270 for (;;) {
271 readp = &record[records_read];
272 records_read += fread(readp, sizeof(struct kvp_record),
273 ENTRIES_PER_BLOCK,
274 filep);
275
276 if (ferror(filep)) {
277 syslog(LOG_ERR, "Failed to read file, pool: %d",
278 i);
279 exit(EXIT_FAILURE);
280 }
281
282 if (!feof(filep)) {
283 /*
284 * We have more data to read.
285 */
286 num_blocks++;
287 record = realloc(record, alloc_unit *
288 num_blocks);
289 if (record == NULL) {
290 fclose(filep);
291 close(fd);
292 return 1;
293 }
294 continue;
295 }
296 break;
297 }
298 kvp_file_info[i].fd = fd; 255 kvp_file_info[i].fd = fd;
299 kvp_file_info[i].num_blocks = num_blocks; 256 kvp_file_info[i].num_blocks = 1;
300 kvp_file_info[i].records = record; 257 kvp_file_info[i].records = malloc(alloc_unit);
301 kvp_file_info[i].num_records = records_read; 258 if (kvp_file_info[i].records == NULL)
302 fclose(filep); 259 return 1;
303 260 kvp_file_info[i].num_records = 0;
261 kvp_update_mem_state(i);
304 } 262 }
305 263
306 return 0; 264 return 0;
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h
index 40b231fb95bd..4c1966f7c77a 100644
--- a/tools/include/asm-generic/atomic-gcc.h
+++ b/tools/include/asm-generic/atomic-gcc.h
@@ -22,7 +22,7 @@
22 */ 22 */
23static inline int atomic_read(const atomic_t *v) 23static inline int atomic_read(const atomic_t *v)
24{ 24{
25 return ACCESS_ONCE((v)->counter); 25 return READ_ONCE((v)->counter);
26} 26}
27 27
28/** 28/**
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 07fd03c74a77..04e32f965ad7 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -84,8 +84,6 @@
84 84
85#define uninitialized_var(x) x = *(&(x)) 85#define uninitialized_var(x) x = *(&(x))
86 86
87#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
88
89#include <linux/types.h> 87#include <linux/types.h>
90 88
91/* 89/*
@@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
135/* 133/*
136 * Prevent the compiler from merging or refetching reads or writes. The 134 * Prevent the compiler from merging or refetching reads or writes. The
137 * compiler is also forbidden from reordering successive instances of 135 * compiler is also forbidden from reordering successive instances of
138 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 136 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
139 * compiler is aware of some particular ordering. One way to make the 137 * particular ordering. One way to make the compiler aware of ordering is to
140 * compiler aware of ordering is to put the two invocations of READ_ONCE, 138 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
141 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 139 * statements.
142 * 140 *
143 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 141 * These two macros will also work on aggregate data types like structs or
144 * data types like structs or unions. If the size of the accessed data 142 * unions. If the size of the accessed data type exceeds the word size of
145 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 143 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
146 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 144 * fall back to memcpy and print a compile-time warning.
147 * compile-time warning.
148 * 145 *
149 * Their two major use cases are: (1) Mediating communication between 146 * Their two major use cases are: (1) Mediating communication between
150 * process-level code and irq/NMI handlers, all running on the same CPU, 147 * process-level code and irq/NMI handlers, all running on the same CPU,
151 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 148 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
152 * mutilate accesses that either do not require ordering or that interact 149 * mutilate accesses that either do not require ordering or that interact
153 * with an explicit memory barrier or atomic instruction that provides the 150 * with an explicit memory barrier or atomic instruction that provides the
154 * required ordering. 151 * required ordering.
diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
deleted file mode 100644
index 2bccd2c7b897..000000000000
--- a/tools/include/linux/kmemcheck.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_
3#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_
4
5static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
6{
7}
8
9#endif
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
index 940c1b075659..6b0c36a58fcb 100644
--- a/tools/include/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -48,6 +48,7 @@ static inline int debug_locks_off(void)
48#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) 48#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
49#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) 49#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
50#define pr_warn pr_err 50#define pr_warn pr_err
51#define pr_cont pr_err
51 52
52#define list_del_rcu list_del 53#define list_del_rcu list_del
53 54
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
index 4bf6777a8a03..9fdcd3eaac3b 100644
--- a/tools/include/linux/poison.h
+++ b/tools/include/linux/poison.h
@@ -15,6 +15,10 @@
15# define POISON_POINTER_DELTA 0 15# define POISON_POINTER_DELTA 0
16#endif 16#endif
17 17
18#ifdef __cplusplus
19#define LIST_POISON1 NULL
20#define LIST_POISON2 NULL
21#else
18/* 22/*
19 * These are non-NULL pointers that will result in page faults 23 * These are non-NULL pointers that will result in page faults
20 * under normal circumstances, used to verify that nobody uses 24 * under normal circumstances, used to verify that nobody uses
@@ -22,6 +26,7 @@
22 */ 26 */
23#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) 27#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
24#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) 28#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
29#endif
25 30
26/********** include/linux/timer.h **********/ 31/********** include/linux/timer.h **********/
27/* 32/*
diff --git a/tools/include/uapi/asm-generic/bpf_perf_event.h b/tools/include/uapi/asm-generic/bpf_perf_event.h
new file mode 100644
index 000000000000..53815d2cd047
--- /dev/null
+++ b/tools/include/uapi/asm-generic/bpf_perf_event.h
@@ -0,0 +1,9 @@
1#ifndef _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
2#define _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__
3
4#include <linux/ptrace.h>
5
6/* Export kernel pt_regs structure */
7typedef struct pt_regs bpf_user_pt_regs_t;
8
9#endif /* _UAPI__ASM_GENERIC_BPF_PERF_EVENT_H__ */
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 6d319c46fd90..f8b134f5608f 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -17,6 +17,7 @@
17 17
18#define MAP_SHARED 0x01 /* Share changes */ 18#define MAP_SHARED 0x01 /* Share changes */
19#define MAP_PRIVATE 0x02 /* Changes are private */ 19#define MAP_PRIVATE 0x02 /* Changes are private */
20#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
20#define MAP_TYPE 0x0f /* Mask for type of mapping */ 21#define MAP_TYPE 0x0f /* Mask for type of mapping */
21#define MAP_FIXED 0x10 /* Interpret addr exactly */ 22#define MAP_FIXED 0x10 /* Interpret addr exactly */
22#define MAP_ANONYMOUS 0x20 /* don't use a file */ 23#define MAP_ANONYMOUS 0x20 /* don't use a file */
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 2dffcbf705b3..653687d9771b 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -13,6 +13,7 @@
13#define MAP_NONBLOCK 0x10000 /* do not block on IO */ 13#define MAP_NONBLOCK 0x10000 /* do not block on IO */
14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ 14#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */ 15#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
16#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
16 17
17/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */ 18/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
18 19
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..13a58531e6fa
--- /dev/null
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,7 @@
1#if defined(__aarch64__)
2#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
3#elif defined(__s390__)
4#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
5#else
6#include <uapi/asm-generic/bpf_perf_event.h>
7#endif
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 97677cd6964d..6fdff5945c8a 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
737 __u32 pad; 737 __u32 pad;
738}; 738};
739 739
740/* Query current scanout sequence number */
741struct drm_crtc_get_sequence {
742 __u32 crtc_id; /* requested crtc_id */
743 __u32 active; /* return: crtc output is active */
744 __u64 sequence; /* return: most recent vblank sequence */
745 __s64 sequence_ns; /* return: most recent time of first pixel out */
746};
747
748/* Queue event to be delivered at specified sequence. Time stamp marks
749 * when the first pixel of the refresh cycle leaves the display engine
750 * for the display
751 */
752#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
753#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
754
755struct drm_crtc_queue_sequence {
756 __u32 crtc_id;
757 __u32 flags;
758 __u64 sequence; /* on input, target sequence. on output, actual sequence */
759 __u64 user_data; /* user data passed to event */
760};
761
740#if defined(__cplusplus) 762#if defined(__cplusplus)
741} 763}
742#endif 764#endif
@@ -819,6 +841,9 @@ extern "C" {
819 841
820#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank) 842#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
821 843
844#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
845#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
846
822#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) 847#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
823 848
824#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) 849#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -863,6 +888,11 @@ extern "C" {
863#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array) 888#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
864#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array) 889#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
865 890
891#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
892#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
893#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
894#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
895
866/** 896/**
867 * Device specific ioctls should only be in their respective headers 897 * Device specific ioctls should only be in their respective headers
868 * The device specific ioctl range is from 0x40 to 0x9f. 898 * The device specific ioctl range is from 0x40 to 0x9f.
@@ -893,6 +923,7 @@ struct drm_event {
893 923
894#define DRM_EVENT_VBLANK 0x01 924#define DRM_EVENT_VBLANK 0x01
895#define DRM_EVENT_FLIP_COMPLETE 0x02 925#define DRM_EVENT_FLIP_COMPLETE 0x02
926#define DRM_EVENT_CRTC_SEQUENCE 0x03
896 927
897struct drm_event_vblank { 928struct drm_event_vblank {
898 struct drm_event base; 929 struct drm_event base;
@@ -903,6 +934,16 @@ struct drm_event_vblank {
903 __u32 crtc_id; /* 0 on older kernels that do not support this */ 934 __u32 crtc_id; /* 0 on older kernels that do not support this */
904}; 935};
905 936
937/* Event delivered at sequence. Time stamp marks when the first pixel
938 * of the refresh cycle leaves the display engine for the display
939 */
940struct drm_event_crtc_sequence {
941 struct drm_event base;
942 __u64 user_data;
943 __s64 time_ns;
944 __u64 sequence;
945};
946
906/* typedef area */ 947/* typedef area */
907#ifndef __KERNEL__ 948#ifndef __KERNEL__
908typedef struct drm_clip_rect drm_clip_rect_t; 949typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 9816590d3ad2..ac3c6503ca27 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
397#define I915_PARAM_MIN_EU_IN_POOL 39 397#define I915_PARAM_MIN_EU_IN_POOL 39
398#define I915_PARAM_MMAP_GTT_VERSION 40 398#define I915_PARAM_MMAP_GTT_VERSION 40
399 399
400/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 400/*
401 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
401 * priorities and the driver will attempt to execute batches in priority order. 402 * priorities and the driver will attempt to execute batches in priority order.
403 * The param returns a capability bitmask, nonzero implies that the scheduler
404 * is enabled, with different features present according to the mask.
405 *
406 * The initial priority for each batch is supplied by the context and is
407 * controlled via I915_CONTEXT_PARAM_PRIORITY.
402 */ 408 */
403#define I915_PARAM_HAS_SCHEDULER 41 409#define I915_PARAM_HAS_SCHEDULER 41
410#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
411#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
412#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
413
404#define I915_PARAM_HUC_STATUS 42 414#define I915_PARAM_HUC_STATUS 42
405 415
406/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 416/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
1309 * be specified 1319 * be specified
1310 */ 1320 */
1311 __u64 offset; 1321 __u64 offset;
1322#define I915_REG_READ_8B_WA (1ul << 0)
1323
1312 __u64 val; /* Return value */ 1324 __u64 val; /* Return value */
1313}; 1325};
1314/* Known registers: 1326/* Known registers:
1315 * 1327 *
1316 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1328 * Render engine timestamp - 0x2358 + 64bit - gen7+
1317 * - Note this register returns an invalid value if using the default 1329 * - Note this register returns an invalid value if using the default
1318 * single instruction 8byte read, in order to workaround that use 1330 * single instruction 8byte read, in order to workaround that pass
1319 * offset (0x2538 | 1) instead. 1331 * flag I915_REG_READ_8B_WA in offset field.
1320 * 1332 *
1321 */ 1333 */
1322 1334
@@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
1359#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1371#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1360#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1372#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1361#define I915_CONTEXT_PARAM_BANNABLE 0x5 1373#define I915_CONTEXT_PARAM_BANNABLE 0x5
1374#define I915_CONTEXT_PARAM_PRIORITY 0x6
1375#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1376#define I915_CONTEXT_DEFAULT_PRIORITY 0
1377#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1362 __u64 value; 1378 __u64 value;
1363}; 1379};
1364 1380
@@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
1510 __u32 n_boolean_regs; 1526 __u32 n_boolean_regs;
1511 __u32 n_flex_regs; 1527 __u32 n_flex_regs;
1512 1528
1513 __u64 __user mux_regs_ptr; 1529 /*
1514 __u64 __user boolean_regs_ptr; 1530 * These fields are pointers to tuples of u32 values (register
1515 __u64 __user flex_regs_ptr; 1531 * address, value). For example the expected length of the buffer
1532 * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1533 */
1534 __u64 mux_regs_ptr;
1535 __u64 boolean_regs_ptr;
1536 __u64 flex_regs_ptr;
1516}; 1537};
1517 1538
1518#if defined(__cplusplus) 1539#if defined(__cplusplus)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 30f2ce76b517..4c223ab30293 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -93,6 +93,7 @@ enum bpf_cmd {
93 BPF_PROG_GET_FD_BY_ID, 93 BPF_PROG_GET_FD_BY_ID,
94 BPF_MAP_GET_FD_BY_ID, 94 BPF_MAP_GET_FD_BY_ID,
95 BPF_OBJ_GET_INFO_BY_FD, 95 BPF_OBJ_GET_INFO_BY_FD,
96 BPF_PROG_QUERY,
96}; 97};
97 98
98enum bpf_map_type { 99enum bpf_map_type {
@@ -112,6 +113,7 @@ enum bpf_map_type {
112 BPF_MAP_TYPE_HASH_OF_MAPS, 113 BPF_MAP_TYPE_HASH_OF_MAPS,
113 BPF_MAP_TYPE_DEVMAP, 114 BPF_MAP_TYPE_DEVMAP,
114 BPF_MAP_TYPE_SOCKMAP, 115 BPF_MAP_TYPE_SOCKMAP,
116 BPF_MAP_TYPE_CPUMAP,
115}; 117};
116 118
117enum bpf_prog_type { 119enum bpf_prog_type {
@@ -130,6 +132,7 @@ enum bpf_prog_type {
130 BPF_PROG_TYPE_LWT_XMIT, 132 BPF_PROG_TYPE_LWT_XMIT,
131 BPF_PROG_TYPE_SOCK_OPS, 133 BPF_PROG_TYPE_SOCK_OPS,
132 BPF_PROG_TYPE_SK_SKB, 134 BPF_PROG_TYPE_SK_SKB,
135 BPF_PROG_TYPE_CGROUP_DEVICE,
133}; 136};
134 137
135enum bpf_attach_type { 138enum bpf_attach_type {
@@ -139,16 +142,53 @@ enum bpf_attach_type {
139 BPF_CGROUP_SOCK_OPS, 142 BPF_CGROUP_SOCK_OPS,
140 BPF_SK_SKB_STREAM_PARSER, 143 BPF_SK_SKB_STREAM_PARSER,
141 BPF_SK_SKB_STREAM_VERDICT, 144 BPF_SK_SKB_STREAM_VERDICT,
145 BPF_CGROUP_DEVICE,
142 __MAX_BPF_ATTACH_TYPE 146 __MAX_BPF_ATTACH_TYPE
143}; 147};
144 148
145#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 149#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
146 150
147/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command 151/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
148 * to the given target_fd cgroup the descendent cgroup will be able to 152 *
149 * override effective bpf program that was inherited from this cgroup 153 * NONE(default): No further bpf programs allowed in the subtree.
154 *
155 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
156 * the program in this cgroup yields to sub-cgroup program.
157 *
158 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
159 * that cgroup program gets run in addition to the program in this cgroup.
160 *
161 * Only one program is allowed to be attached to a cgroup with
162 * NONE or BPF_F_ALLOW_OVERRIDE flag.
163 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
164 * release old program and attach the new one. Attach flags has to match.
165 *
166 * Multiple programs are allowed to be attached to a cgroup with
167 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
168 * (those that were attached first, run first)
169 * The programs of sub-cgroup are executed first, then programs of
170 * this cgroup and then programs of parent cgroup.
171 * When children program makes decision (like picking TCP CA or sock bind)
172 * parent program has a chance to override it.
173 *
174 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
175 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
176 * Ex1:
177 * cgrp1 (MULTI progs A, B) ->
178 * cgrp2 (OVERRIDE prog C) ->
179 * cgrp3 (MULTI prog D) ->
180 * cgrp4 (OVERRIDE prog E) ->
181 * cgrp5 (NONE prog F)
182 * the event in cgrp5 triggers execution of F,D,A,B in that order.
183 * if prog F is detached, the execution is E,D,A,B
184 * if prog F and D are detached, the execution is E,A,B
185 * if prog F, E and D are detached, the execution is C,A,B
186 *
187 * All eligible programs are executed regardless of return code from
188 * earlier programs.
150 */ 189 */
151#define BPF_F_ALLOW_OVERRIDE (1U << 0) 190#define BPF_F_ALLOW_OVERRIDE (1U << 0)
191#define BPF_F_ALLOW_MULTI (1U << 1)
152 192
153/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the 193/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
154 * verifier will perform strict alignment checking as if the kernel 194 * verifier will perform strict alignment checking as if the kernel
@@ -176,6 +216,15 @@ enum bpf_attach_type {
176/* Specify numa node during map creation */ 216/* Specify numa node during map creation */
177#define BPF_F_NUMA_NODE (1U << 2) 217#define BPF_F_NUMA_NODE (1U << 2)
178 218
219/* flags for BPF_PROG_QUERY */
220#define BPF_F_QUERY_EFFECTIVE (1U << 0)
221
222#define BPF_OBJ_NAME_LEN 16U
223
224/* Flags for accessing BPF object */
225#define BPF_F_RDONLY (1U << 3)
226#define BPF_F_WRONLY (1U << 4)
227
179union bpf_attr { 228union bpf_attr {
180 struct { /* anonymous struct used by BPF_MAP_CREATE command */ 229 struct { /* anonymous struct used by BPF_MAP_CREATE command */
181 __u32 map_type; /* one of enum bpf_map_type */ 230 __u32 map_type; /* one of enum bpf_map_type */
@@ -189,6 +238,7 @@ union bpf_attr {
189 __u32 numa_node; /* numa node (effective only if 238 __u32 numa_node; /* numa node (effective only if
190 * BPF_F_NUMA_NODE is set). 239 * BPF_F_NUMA_NODE is set).
191 */ 240 */
241 char map_name[BPF_OBJ_NAME_LEN];
192 }; 242 };
193 243
194 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 244 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -211,11 +261,14 @@ union bpf_attr {
211 __aligned_u64 log_buf; /* user supplied buffer */ 261 __aligned_u64 log_buf; /* user supplied buffer */
212 __u32 kern_version; /* checked when prog_type=kprobe */ 262 __u32 kern_version; /* checked when prog_type=kprobe */
213 __u32 prog_flags; 263 __u32 prog_flags;
264 char prog_name[BPF_OBJ_NAME_LEN];
265 __u32 prog_ifindex; /* ifindex of netdev to prep for */
214 }; 266 };
215 267
216 struct { /* anonymous struct used by BPF_OBJ_* commands */ 268 struct { /* anonymous struct used by BPF_OBJ_* commands */
217 __aligned_u64 pathname; 269 __aligned_u64 pathname;
218 __u32 bpf_fd; 270 __u32 bpf_fd;
271 __u32 file_flags;
219 }; 272 };
220 273
221 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ 274 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
@@ -243,6 +296,7 @@ union bpf_attr {
243 __u32 map_id; 296 __u32 map_id;
244 }; 297 };
245 __u32 next_id; 298 __u32 next_id;
299 __u32 open_flags;
246 }; 300 };
247 301
248 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ 302 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
@@ -250,6 +304,15 @@ union bpf_attr {
250 __u32 info_len; 304 __u32 info_len;
251 __aligned_u64 info; 305 __aligned_u64 info;
252 } info; 306 } info;
307
308 struct { /* anonymous struct used by BPF_PROG_QUERY command */
309 __u32 target_fd; /* container object to query */
310 __u32 attach_type;
311 __u32 query_flags;
312 __u32 attach_flags;
313 __aligned_u64 prog_ids;
314 __u32 prog_cnt;
315 } query;
253} __attribute__((aligned(8))); 316} __attribute__((aligned(8)));
254 317
255/* BPF helper function descriptions: 318/* BPF helper function descriptions:
@@ -554,12 +617,22 @@ union bpf_attr {
554 * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen) 617 * int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
555 * Calls setsockopt. Not all opts are available, only those with 618 * Calls setsockopt. Not all opts are available, only those with
556 * integer optvals plus TCP_CONGESTION. 619 * integer optvals plus TCP_CONGESTION.
557 * Supported levels: SOL_SOCKET and IPROTO_TCP 620 * Supported levels: SOL_SOCKET and IPPROTO_TCP
621 * @bpf_socket: pointer to bpf_socket
622 * @level: SOL_SOCKET or IPPROTO_TCP
623 * @optname: option name
624 * @optval: pointer to option value
625 * @optlen: length of optval in bytes
626 * Return: 0 or negative error
627 *
628 * int bpf_getsockopt(bpf_socket, level, optname, optval, optlen)
629 * Calls getsockopt. Not all opts are available.
630 * Supported levels: IPPROTO_TCP
558 * @bpf_socket: pointer to bpf_socket 631 * @bpf_socket: pointer to bpf_socket
559 * @level: SOL_SOCKET or IPROTO_TCP 632 * @level: IPPROTO_TCP
560 * @optname: option name 633 * @optname: option name
561 * @optval: pointer to option value 634 * @optval: pointer to option value
562 * @optlen: length of optval in byes 635 * @optlen: length of optval in bytes
563 * Return: 0 or negative error 636 * Return: 0 or negative error
564 * 637 *
565 * int bpf_skb_adjust_room(skb, len_diff, mode, flags) 638 * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
@@ -583,6 +656,27 @@ union bpf_attr {
583 * @map: pointer to sockmap to update 656 * @map: pointer to sockmap to update
584 * @key: key to insert/update sock in map 657 * @key: key to insert/update sock in map
585 * @flags: same flags as map update elem 658 * @flags: same flags as map update elem
659 *
660 * int bpf_xdp_adjust_meta(xdp_md, delta)
661 * Adjust the xdp_md.data_meta by delta
662 * @xdp_md: pointer to xdp_md
663 * @delta: An positive/negative integer to be added to xdp_md.data_meta
664 * Return: 0 on success or negative on error
665 *
666 * int bpf_perf_event_read_value(map, flags, buf, buf_size)
667 * read perf event counter value and perf event enabled/running time
668 * @map: pointer to perf_event_array map
669 * @flags: index of event in the map or bitmask flags
670 * @buf: buf to fill
671 * @buf_size: size of the buf
672 * Return: 0 on success or negative error code
673 *
674 * int bpf_perf_prog_read_value(ctx, buf, buf_size)
675 * read perf prog attached perf event counter and enabled/running time
676 * @ctx: pointer to ctx
677 * @buf: buf to fill
678 * @buf_size: size of the buf
679 * Return : 0 on success or negative error code
586 */ 680 */
587#define __BPF_FUNC_MAPPER(FN) \ 681#define __BPF_FUNC_MAPPER(FN) \
588 FN(unspec), \ 682 FN(unspec), \
@@ -639,6 +733,10 @@ union bpf_attr {
639 FN(redirect_map), \ 733 FN(redirect_map), \
640 FN(sk_redirect_map), \ 734 FN(sk_redirect_map), \
641 FN(sock_map_update), \ 735 FN(sock_map_update), \
736 FN(xdp_adjust_meta), \
737 FN(perf_event_read_value), \
738 FN(perf_prog_read_value), \
739 FN(getsockopt),
642 740
643/* integer value in 'imm' field of BPF_CALL instruction selects which helper 741/* integer value in 'imm' field of BPF_CALL instruction selects which helper
644 * function eBPF program intends to call 742 * function eBPF program intends to call
@@ -682,7 +780,9 @@ enum bpf_func_id {
682#define BPF_F_ZERO_CSUM_TX (1ULL << 1) 780#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
683#define BPF_F_DONT_FRAGMENT (1ULL << 2) 781#define BPF_F_DONT_FRAGMENT (1ULL << 2)
684 782
685/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ 783/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
784 * BPF_FUNC_perf_event_read_value flags.
785 */
686#define BPF_F_INDEX_MASK 0xffffffffULL 786#define BPF_F_INDEX_MASK 0xffffffffULL
687#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK 787#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
688/* BPF_FUNC_perf_event_output for sk_buff input context. */ 788/* BPF_FUNC_perf_event_output for sk_buff input context. */
@@ -716,7 +816,7 @@ struct __sk_buff {
716 __u32 data_end; 816 __u32 data_end;
717 __u32 napi_id; 817 __u32 napi_id;
718 818
719 /* accessed by BPF_PROG_TYPE_sk_skb types */ 819 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
720 __u32 family; 820 __u32 family;
721 __u32 remote_ip4; /* Stored in network byte order */ 821 __u32 remote_ip4; /* Stored in network byte order */
722 __u32 local_ip4; /* Stored in network byte order */ 822 __u32 local_ip4; /* Stored in network byte order */
@@ -724,6 +824,9 @@ struct __sk_buff {
724 __u32 local_ip6[4]; /* Stored in network byte order */ 824 __u32 local_ip6[4]; /* Stored in network byte order */
725 __u32 remote_port; /* Stored in network byte order */ 825 __u32 remote_port; /* Stored in network byte order */
726 __u32 local_port; /* stored in host byte order */ 826 __u32 local_port; /* stored in host byte order */
827 /* ... here. */
828
829 __u32 data_meta;
727}; 830};
728 831
729struct bpf_tunnel_key { 832struct bpf_tunnel_key {
@@ -784,6 +887,7 @@ enum xdp_action {
784struct xdp_md { 887struct xdp_md {
785 __u32 data; 888 __u32 data;
786 __u32 data_end; 889 __u32 data_end;
890 __u32 data_meta;
787}; 891};
788 892
789enum sk_action { 893enum sk_action {
@@ -801,6 +905,11 @@ struct bpf_prog_info {
801 __u32 xlated_prog_len; 905 __u32 xlated_prog_len;
802 __aligned_u64 jited_prog_insns; 906 __aligned_u64 jited_prog_insns;
803 __aligned_u64 xlated_prog_insns; 907 __aligned_u64 xlated_prog_insns;
908 __u64 load_time; /* ns since boottime */
909 __u32 created_by_uid;
910 __u32 nr_map_ids;
911 __aligned_u64 map_ids;
912 char name[BPF_OBJ_NAME_LEN];
804} __attribute__((aligned(8))); 913} __attribute__((aligned(8)));
805 914
806struct bpf_map_info { 915struct bpf_map_info {
@@ -810,6 +919,7 @@ struct bpf_map_info {
810 __u32 value_size; 919 __u32 value_size;
811 __u32 max_entries; 920 __u32 max_entries;
812 __u32 map_flags; 921 __u32 map_flags;
922 char name[BPF_OBJ_NAME_LEN];
813} __attribute__((aligned(8))); 923} __attribute__((aligned(8)));
814 924
815/* User bpf_sock_ops struct to access socket values and specify request ops 925/* User bpf_sock_ops struct to access socket values and specify request ops
@@ -859,9 +969,35 @@ enum {
859 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control 969 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
860 * needs ECN 970 * needs ECN
861 */ 971 */
972 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
973 * based on the path and may be
974 * dependent on the congestion control
975 * algorithm. In general it indicates
976 * a congestion threshold. RTTs above
977 * this indicate congestion
978 */
862}; 979};
863 980
864#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ 981#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
865#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ 982#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
866 983
984struct bpf_perf_event_value {
985 __u64 counter;
986 __u64 enabled;
987 __u64 running;
988};
989
990#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
991#define BPF_DEVCG_ACC_READ (1ULL << 1)
992#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
993
994#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
995#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
996
997struct bpf_cgroup_dev_ctx {
998 __u32 access_type; /* (access << 16) | type */
999 __u32 major;
1000 __u32 minor;
1001};
1002
867#endif /* _UAPI__LINUX_BPF_H__ */ 1003#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
index 067427259820..8f95303f9d80 100644
--- a/tools/include/uapi/linux/bpf_perf_event.h
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1/* Copyright (c) 2016 Facebook 2/* Copyright (c) 2016 Facebook
2 * 3 *
3 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
@@ -7,11 +8,10 @@
7#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__ 8#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__
8#define _UAPI__LINUX_BPF_PERF_EVENT_H__ 9#define _UAPI__LINUX_BPF_PERF_EVENT_H__
9 10
10#include <linux/types.h> 11#include <asm/bpf_perf_event.h>
11#include <linux/ptrace.h>
12 12
13struct bpf_perf_event_data { 13struct bpf_perf_event_data {
14 struct pt_regs regs; 14 bpf_user_pt_regs_t regs;
15 __u64 sample_period; 15 __u64 sample_period;
16}; 16};
17 17
diff --git a/tools/include/uapi/linux/kcmp.h b/tools/include/uapi/linux/kcmp.h
new file mode 100644
index 000000000000..ef1305010925
--- /dev/null
+++ b/tools/include/uapi/linux/kcmp.h
@@ -0,0 +1,28 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _UAPI_LINUX_KCMP_H
3#define _UAPI_LINUX_KCMP_H
4
5#include <linux/types.h>
6
7/* Comparison type */
8enum kcmp_type {
9 KCMP_FILE,
10 KCMP_VM,
11 KCMP_FILES,
12 KCMP_FS,
13 KCMP_SIGHAND,
14 KCMP_IO,
15 KCMP_SYSVSEM,
16 KCMP_EPOLL_TFD,
17
18 KCMP_TYPES,
19};
20
21/* Slot for KCMP_EPOLL_TFD */
22struct kcmp_epoll_slot {
23 __u32 efd; /* epoll file descriptor */
24 __u32 tfd; /* target file number */
25 __u32 toff; /* target offset within same numbered sequence */
26};
27
28#endif /* _UAPI_LINUX_KCMP_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 7e99999d6236..496e59a2738b 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
630 630
631struct kvm_s390_irq_state { 631struct kvm_s390_irq_state {
632 __u64 buf; 632 __u64 buf;
633 __u32 flags; 633 __u32 flags; /* will stay unused for compatibility reasons */
634 __u32 len; 634 __u32 len;
635 __u32 reserved[4]; 635 __u32 reserved[4]; /* will stay unused for compatibility reasons */
636}; 636};
637 637
638/* for KVM_SET_GUEST_DEBUG */ 638/* for KVM_SET_GUEST_DEBUG */
@@ -931,6 +931,7 @@ struct kvm_ppc_resize_hpt {
931#define KVM_CAP_PPC_SMT_POSSIBLE 147 931#define KVM_CAP_PPC_SMT_POSSIBLE 147
932#define KVM_CAP_HYPERV_SYNIC2 148 932#define KVM_CAP_HYPERV_SYNIC2 148
933#define KVM_CAP_HYPERV_VP_INDEX 149 933#define KVM_CAP_HYPERV_VP_INDEX 149
934#define KVM_CAP_S390_AIS_MIGRATION 150
934 935
935#ifdef KVM_CAP_IRQ_ROUTING 936#ifdef KVM_CAP_IRQ_ROUTING
936 937
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 362493a2f950..b9a4953018ed 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -942,6 +942,7 @@ enum perf_callchain_context {
942#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ 942#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
943#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ 943#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
944#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ 944#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
945#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
945 946
946#define PERF_FLAG_FD_NO_GROUP (1UL << 0) 947#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
947#define PERF_FLAG_FD_OUTPUT (1UL << 1) 948#define PERF_FLAG_FD_OUTPUT (1UL << 1)
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
new file mode 100644
index 000000000000..af5f8c2df87a
--- /dev/null
+++ b/tools/include/uapi/linux/prctl.h
@@ -0,0 +1,210 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef _LINUX_PRCTL_H
3#define _LINUX_PRCTL_H
4
5#include <linux/types.h>
6
7/* Values to pass as first argument to prctl() */
8
9#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */
10#define PR_GET_PDEATHSIG 2 /* Second arg is a ptr to return the signal */
11
12/* Get/set current->mm->dumpable */
13#define PR_GET_DUMPABLE 3
14#define PR_SET_DUMPABLE 4
15
16/* Get/set unaligned access control bits (if meaningful) */
17#define PR_GET_UNALIGN 5
18#define PR_SET_UNALIGN 6
19# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */
20# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */
21
22/* Get/set whether or not to drop capabilities on setuid() away from
23 * uid 0 (as per security/commoncap.c) */
24#define PR_GET_KEEPCAPS 7
25#define PR_SET_KEEPCAPS 8
26
27/* Get/set floating-point emulation control bits (if meaningful) */
28#define PR_GET_FPEMU 9
29#define PR_SET_FPEMU 10
30# define PR_FPEMU_NOPRINT 1 /* silently emulate fp operations accesses */
31# define PR_FPEMU_SIGFPE 2 /* don't emulate fp operations, send SIGFPE instead */
32
33/* Get/set floating-point exception mode (if meaningful) */
34#define PR_GET_FPEXC 11
35#define PR_SET_FPEXC 12
36# define PR_FP_EXC_SW_ENABLE 0x80 /* Use FPEXC for FP exception enables */
37# define PR_FP_EXC_DIV 0x010000 /* floating point divide by zero */
38# define PR_FP_EXC_OVF 0x020000 /* floating point overflow */
39# define PR_FP_EXC_UND 0x040000 /* floating point underflow */
40# define PR_FP_EXC_RES 0x080000 /* floating point inexact result */
41# define PR_FP_EXC_INV 0x100000 /* floating point invalid operation */
42# define PR_FP_EXC_DISABLED 0 /* FP exceptions disabled */
43# define PR_FP_EXC_NONRECOV 1 /* async non-recoverable exc. mode */
44# define PR_FP_EXC_ASYNC 2 /* async recoverable exception mode */
45# define PR_FP_EXC_PRECISE 3 /* precise exception mode */
46
47/* Get/set whether we use statistical process timing or accurate timestamp
48 * based process timing */
49#define PR_GET_TIMING 13
50#define PR_SET_TIMING 14
51# define PR_TIMING_STATISTICAL 0 /* Normal, traditional,
52 statistical process timing */
53# define PR_TIMING_TIMESTAMP 1 /* Accurate timestamp based
54 process timing */
55
56#define PR_SET_NAME 15 /* Set process name */
57#define PR_GET_NAME 16 /* Get process name */
58
59/* Get/set process endian */
60#define PR_GET_ENDIAN 19
61#define PR_SET_ENDIAN 20
62# define PR_ENDIAN_BIG 0
63# define PR_ENDIAN_LITTLE 1 /* True little endian mode */
64# define PR_ENDIAN_PPC_LITTLE 2 /* "PowerPC" pseudo little endian */
65
66/* Get/set process seccomp mode */
67#define PR_GET_SECCOMP 21
68#define PR_SET_SECCOMP 22
69
70/* Get/set the capability bounding set (as per security/commoncap.c) */
71#define PR_CAPBSET_READ 23
72#define PR_CAPBSET_DROP 24
73
74/* Get/set the process' ability to use the timestamp counter instruction */
75#define PR_GET_TSC 25
76#define PR_SET_TSC 26
77# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
78# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
79
80/* Get/set securebits (as per security/commoncap.c) */
81#define PR_GET_SECUREBITS 27
82#define PR_SET_SECUREBITS 28
83
84/*
85 * Get/set the timerslack as used by poll/select/nanosleep
86 * A value of 0 means "use default"
87 */
88#define PR_SET_TIMERSLACK 29
89#define PR_GET_TIMERSLACK 30
90
91#define PR_TASK_PERF_EVENTS_DISABLE 31
92#define PR_TASK_PERF_EVENTS_ENABLE 32
93
94/*
95 * Set early/late kill mode for hwpoison memory corruption.
96 * This influences when the process gets killed on a memory corruption.
97 */
98#define PR_MCE_KILL 33
99# define PR_MCE_KILL_CLEAR 0
100# define PR_MCE_KILL_SET 1
101
102# define PR_MCE_KILL_LATE 0
103# define PR_MCE_KILL_EARLY 1
104# define PR_MCE_KILL_DEFAULT 2
105
106#define PR_MCE_KILL_GET 34
107
108/*
109 * Tune up process memory map specifics.
110 */
111#define PR_SET_MM 35
112# define PR_SET_MM_START_CODE 1
113# define PR_SET_MM_END_CODE 2
114# define PR_SET_MM_START_DATA 3
115# define PR_SET_MM_END_DATA 4
116# define PR_SET_MM_START_STACK 5
117# define PR_SET_MM_START_BRK 6
118# define PR_SET_MM_BRK 7
119# define PR_SET_MM_ARG_START 8
120# define PR_SET_MM_ARG_END 9
121# define PR_SET_MM_ENV_START 10
122# define PR_SET_MM_ENV_END 11
123# define PR_SET_MM_AUXV 12
124# define PR_SET_MM_EXE_FILE 13
125# define PR_SET_MM_MAP 14
126# define PR_SET_MM_MAP_SIZE 15
127
128/*
129 * This structure provides new memory descriptor
130 * map which mostly modifies /proc/pid/stat[m]
131 * output for a task. This mostly done in a
132 * sake of checkpoint/restore functionality.
133 */
134struct prctl_mm_map {
135 __u64 start_code; /* code section bounds */
136 __u64 end_code;
137 __u64 start_data; /* data section bounds */
138 __u64 end_data;
139 __u64 start_brk; /* heap for brk() syscall */
140 __u64 brk;
141 __u64 start_stack; /* stack starts at */
142 __u64 arg_start; /* command line arguments bounds */
143 __u64 arg_end;
144 __u64 env_start; /* environment variables bounds */
145 __u64 env_end;
146 __u64 *auxv; /* auxiliary vector */
147 __u32 auxv_size; /* vector size */
148 __u32 exe_fd; /* /proc/$pid/exe link file */
149};
150
151/*
152 * Set specific pid that is allowed to ptrace the current task.
153 * A value of 0 mean "no process".
154 */
155#define PR_SET_PTRACER 0x59616d61
156# define PR_SET_PTRACER_ANY ((unsigned long)-1)
157
158#define PR_SET_CHILD_SUBREAPER 36
159#define PR_GET_CHILD_SUBREAPER 37
160
161/*
162 * If no_new_privs is set, then operations that grant new privileges (i.e.
163 * execve) will either fail or not grant them. This affects suid/sgid,
164 * file capabilities, and LSMs.
165 *
166 * Operations that merely manipulate or drop existing privileges (setresuid,
167 * capset, etc.) will still work. Drop those privileges if you want them gone.
168 *
169 * Changing LSM security domain is considered a new privilege. So, for example,
170 * asking selinux for a specific new context (e.g. with runcon) will result
171 * in execve returning -EPERM.
172 *
173 * See Documentation/prctl/no_new_privs.txt for more details.
174 */
175#define PR_SET_NO_NEW_PRIVS 38
176#define PR_GET_NO_NEW_PRIVS 39
177
178#define PR_GET_TID_ADDRESS 40
179
180#define PR_SET_THP_DISABLE 41
181#define PR_GET_THP_DISABLE 42
182
183/*
184 * Tell the kernel to start/stop helping userspace manage bounds tables.
185 */
186#define PR_MPX_ENABLE_MANAGEMENT 43
187#define PR_MPX_DISABLE_MANAGEMENT 44
188
189#define PR_SET_FP_MODE 45
190#define PR_GET_FP_MODE 46
191# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
192# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
193
194/* Control the ambient capability set */
195#define PR_CAP_AMBIENT 47
196# define PR_CAP_AMBIENT_IS_SET 1
197# define PR_CAP_AMBIENT_RAISE 2
198# define PR_CAP_AMBIENT_LOWER 3
199# define PR_CAP_AMBIENT_CLEAR_ALL 4
200
201/* arm64 Scalable Vector Extension controls */
202/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
203#define PR_SVE_SET_VL 50 /* set task vector length */
204# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
205#define PR_SVE_GET_VL 51 /* get task vector length */
206/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
207# define PR_SVE_VL_LEN_MASK 0xffff
208# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
209
210#endif /* _LINUX_PRCTL_H */
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 32283d88701a..a5684d0968b4 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -19,9 +19,11 @@ Three different ways of output formatting are available:
19 19
20The data is sampled from the KVM's debugfs entries and its perf events. 20The data is sampled from the KVM's debugfs entries and its perf events.
21""" 21"""
22from __future__ import print_function
22 23
23import curses 24import curses
24import sys 25import sys
26import locale
25import os 27import os
26import time 28import time
27import optparse 29import optparse
@@ -225,6 +227,8 @@ IOCTL_NUMBERS = {
225 'RESET': 0x00002403, 227 'RESET': 0x00002403,
226} 228}
227 229
230ENCODING = locale.getpreferredencoding(False)
231
228 232
229class Arch(object): 233class Arch(object):
230 """Encapsulates global architecture specific data. 234 """Encapsulates global architecture specific data.
@@ -474,7 +478,7 @@ class Provider(object):
474 @staticmethod 478 @staticmethod
475 def is_field_wanted(fields_filter, field): 479 def is_field_wanted(fields_filter, field):
476 """Indicate whether field is valid according to fields_filter.""" 480 """Indicate whether field is valid according to fields_filter."""
477 if not fields_filter or fields_filter == "help": 481 if not fields_filter:
478 return True 482 return True
479 return re.match(fields_filter, field) is not None 483 return re.match(fields_filter, field) is not None
480 484
@@ -545,8 +549,8 @@ class TracepointProvider(Provider):
545 549
546 def update_fields(self, fields_filter): 550 def update_fields(self, fields_filter):
547 """Refresh fields, applying fields_filter""" 551 """Refresh fields, applying fields_filter"""
548 self._fields = [field for field in self.get_available_fields() 552 self.fields = [field for field in self.get_available_fields()
549 if self.is_field_wanted(fields_filter, field)] 553 if self.is_field_wanted(fields_filter, field)]
550 554
551 @staticmethod 555 @staticmethod
552 def get_online_cpus(): 556 def get_online_cpus():
@@ -666,7 +670,7 @@ class TracepointProvider(Provider):
666 """Returns 'event name: current value' for all enabled events.""" 670 """Returns 'event name: current value' for all enabled events."""
667 ret = defaultdict(int) 671 ret = defaultdict(int)
668 for group in self.group_leaders: 672 for group in self.group_leaders:
669 for name, val in group.read().iteritems(): 673 for name, val in group.read().items():
670 if name in self._fields: 674 if name in self._fields:
671 ret[name] += val 675 ret[name] += val
672 return ret 676 return ret
@@ -946,7 +950,8 @@ class Tui(object):
946 curses.nocbreak() 950 curses.nocbreak()
947 curses.endwin() 951 curses.endwin()
948 952
949 def get_all_gnames(self): 953 @staticmethod
954 def get_all_gnames():
950 """Returns a list of (pid, gname) tuples of all running guests""" 955 """Returns a list of (pid, gname) tuples of all running guests"""
951 res = [] 956 res = []
952 try: 957 try:
@@ -955,11 +960,11 @@ class Tui(object):
955 except: 960 except:
956 raise Exception 961 raise Exception
957 for line in child.stdout: 962 for line in child.stdout:
958 line = line.lstrip().split(' ', 1) 963 line = line.decode(ENCODING).lstrip().split(' ', 1)
959 # perform a sanity check before calling the more expensive 964 # perform a sanity check before calling the more expensive
960 # function to possibly extract the guest name 965 # function to possibly extract the guest name
961 if ' -name ' in line[1]: 966 if ' -name ' in line[1]:
962 res.append((line[0], self.get_gname_from_pid(line[0]))) 967 res.append((line[0], Tui.get_gname_from_pid(line[0])))
963 child.stdout.close() 968 child.stdout.close()
964 969
965 return res 970 return res
@@ -980,7 +985,8 @@ class Tui(object):
980 except Exception: 985 except Exception:
981 self.screen.addstr(row + 1, 2, 'Not available') 986 self.screen.addstr(row + 1, 2, 'Not available')
982 987
983 def get_pid_from_gname(self, gname): 988 @staticmethod
989 def get_pid_from_gname(gname):
984 """Fuzzy function to convert guest name to QEMU process pid. 990 """Fuzzy function to convert guest name to QEMU process pid.
985 991
986 Returns a list of potential pids, can be empty if no match found. 992 Returns a list of potential pids, can be empty if no match found.
@@ -988,7 +994,7 @@ class Tui(object):
988 994
989 """ 995 """
990 pids = [] 996 pids = []
991 for line in self.get_all_gnames(): 997 for line in Tui.get_all_gnames():
992 if gname == line[1]: 998 if gname == line[1]:
993 pids.append(int(line[0])) 999 pids.append(int(line[0]))
994 1000
@@ -1005,7 +1011,7 @@ class Tui(object):
1005 name = '' 1011 name = ''
1006 try: 1012 try:
1007 line = open('/proc/{}/cmdline' 1013 line = open('/proc/{}/cmdline'
1008 .format(pid), 'rb').read().split('\0') 1014 .format(pid), 'r').read().split('\0')
1009 parms = line[line.index('-name') + 1].split(',') 1015 parms = line[line.index('-name') + 1].split(',')
1010 while '' in parms: 1016 while '' in parms:
1011 # commas are escaped (i.e. ',,'), hence e.g. 'foo,bar' results 1017 # commas are escaped (i.e. ',,'), hence e.g. 'foo,bar' results
@@ -1086,15 +1092,16 @@ class Tui(object):
1086 # sort by totals 1092 # sort by totals
1087 return (0, -stats[x][0]) 1093 return (0, -stats[x][0])
1088 total = 0. 1094 total = 0.
1089 for val in stats.values(): 1095 for key in stats.keys():
1090 total += val[0] 1096 if key.find('(') is -1:
1097 total += stats[key][0]
1091 if self._sorting == SORT_DEFAULT: 1098 if self._sorting == SORT_DEFAULT:
1092 sortkey = sortCurAvg 1099 sortkey = sortCurAvg
1093 else: 1100 else:
1094 sortkey = sortTotal 1101 sortkey = sortTotal
1102 tavg = 0
1095 for key in sorted(stats.keys(), key=sortkey): 1103 for key in sorted(stats.keys(), key=sortkey):
1096 1104 if row >= self.screen.getmaxyx()[0] - 1:
1097 if row >= self.screen.getmaxyx()[0]:
1098 break 1105 break
1099 values = stats[key] 1106 values = stats[key]
1100 if not values[0] and not values[1]: 1107 if not values[0] and not values[1]:
@@ -1106,9 +1113,15 @@ class Tui(object):
1106 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % 1113 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
1107 (key, values[0], values[0] * 100 / total, 1114 (key, values[0], values[0] * 100 / total,
1108 cur)) 1115 cur))
1116 if cur is not '' and key.find('(') is -1:
1117 tavg += cur
1109 row += 1 1118 row += 1
1110 if row == 3: 1119 if row == 3:
1111 self.screen.addstr(4, 1, 'No matching events reported yet') 1120 self.screen.addstr(4, 1, 'No matching events reported yet')
1121 else:
1122 self.screen.addstr(row, 1, '%-40s %10d %8s' %
1123 ('Total', total, tavg if tavg else ''),
1124 curses.A_BOLD)
1112 self.screen.refresh() 1125 self.screen.refresh()
1113 1126
1114 def show_msg(self, text): 1127 def show_msg(self, text):
@@ -1170,7 +1183,7 @@ class Tui(object):
1170 .format(self.stats.fields_filter)) 1183 .format(self.stats.fields_filter))
1171 self.screen.addstr(3, 0, "New regex: ") 1184 self.screen.addstr(3, 0, "New regex: ")
1172 curses.echo() 1185 curses.echo()
1173 regex = self.screen.getstr() 1186 regex = self.screen.getstr().decode(ENCODING)
1174 curses.noecho() 1187 curses.noecho()
1175 if len(regex) == 0: 1188 if len(regex) == 0:
1176 self.stats.fields_filter = DEFAULT_REGEX 1189 self.stats.fields_filter = DEFAULT_REGEX
@@ -1204,7 +1217,7 @@ class Tui(object):
1204 1217
1205 curses.echo() 1218 curses.echo()
1206 self.screen.addstr(3, 0, "Pid [0 or pid]: ") 1219 self.screen.addstr(3, 0, "Pid [0 or pid]: ")
1207 pid = self.screen.getstr() 1220 pid = self.screen.getstr().decode(ENCODING)
1208 curses.noecho() 1221 curses.noecho()
1209 1222
1210 try: 1223 try:
@@ -1233,7 +1246,7 @@ class Tui(object):
1233 self.screen.addstr(2, 0, 'Change delay from %.1fs to ' % 1246 self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
1234 self._delay_regular) 1247 self._delay_regular)
1235 curses.echo() 1248 curses.echo()
1236 val = self.screen.getstr() 1249 val = self.screen.getstr().decode(ENCODING)
1237 curses.noecho() 1250 curses.noecho()
1238 1251
1239 try: 1252 try:
@@ -1273,7 +1286,7 @@ class Tui(object):
1273 self.print_all_gnames(7) 1286 self.print_all_gnames(7)
1274 curses.echo() 1287 curses.echo()
1275 self.screen.addstr(3, 0, "Guest [ENTER or guest]: ") 1288 self.screen.addstr(3, 0, "Guest [ENTER or guest]: ")
1276 gname = self.screen.getstr() 1289 gname = self.screen.getstr().decode(ENCODING)
1277 curses.noecho() 1290 curses.noecho()
1278 1291
1279 if not gname: 1292 if not gname:
@@ -1354,7 +1367,7 @@ class Tui(object):
1354 if char == 'x': 1367 if char == 'x':
1355 self.update_drilldown() 1368 self.update_drilldown()
1356 # prevents display of current values on next refresh 1369 # prevents display of current values on next refresh
1357 self.stats.get() 1370 self.stats.get(self._display_guests)
1358 except KeyboardInterrupt: 1371 except KeyboardInterrupt:
1359 break 1372 break
1360 except curses.error: 1373 except curses.error:
@@ -1369,25 +1382,25 @@ def batch(stats):
1369 s = stats.get() 1382 s = stats.get()
1370 for key in sorted(s.keys()): 1383 for key in sorted(s.keys()):
1371 values = s[key] 1384 values = s[key]
1372 print '%-42s%10d%10d' % (key, values[0], values[1]) 1385 print('%-42s%10d%10d' % (key, values[0], values[1]))
1373 except KeyboardInterrupt: 1386 except KeyboardInterrupt:
1374 pass 1387 pass
1375 1388
1376 1389
1377def log(stats): 1390def log(stats):
1378 """Prints statistics as reiterating key block, multiple value blocks.""" 1391 """Prints statistics as reiterating key block, multiple value blocks."""
1379 keys = sorted(stats.get().iterkeys()) 1392 keys = sorted(stats.get().keys())
1380 1393
1381 def banner(): 1394 def banner():
1382 for k in keys: 1395 for k in keys:
1383 print '%s' % k, 1396 print(k, end=' ')
1384 print 1397 print()
1385 1398
1386 def statline(): 1399 def statline():
1387 s = stats.get() 1400 s = stats.get()
1388 for k in keys: 1401 for k in keys:
1389 print ' %9d' % s[k][1], 1402 print(' %9d' % s[k][1], end=' ')
1390 print 1403 print()
1391 line = 0 1404 line = 0
1392 banner_repeat = 20 1405 banner_repeat = 20
1393 while True: 1406 while True:
@@ -1447,16 +1460,13 @@ Press any other key to refresh statistics immediately.
1447 try: 1460 try:
1448 pids = Tui.get_pid_from_gname(val) 1461 pids = Tui.get_pid_from_gname(val)
1449 except: 1462 except:
1450 raise optparse.OptionValueError('Error while searching for guest ' 1463 sys.exit('Error while searching for guest "{}". Use "-p" to '
1451 '"{}", use "-p" to specify a pid ' 1464 'specify a pid instead?'.format(val))
1452 'instead'.format(val))
1453 if len(pids) == 0: 1465 if len(pids) == 0:
1454 raise optparse.OptionValueError('No guest by the name "{}" ' 1466 sys.exit('Error: No guest by the name "{}" found'.format(val))
1455 'found'.format(val))
1456 if len(pids) > 1: 1467 if len(pids) > 1:
1457 raise optparse.OptionValueError('Multiple processes found (pids: ' 1468 sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
1458 '{}) - use "-p" to specify a pid ' 1469 'to specify the desired pid'.format(" ".join(pids)))
1459 'instead'.format(" ".join(pids)))
1460 parser.values.pid = pids[0] 1470 parser.values.pid = pids[0]
1461 1471
1462 optparser = optparse.OptionParser(description=description_text, 1472 optparser = optparse.OptionParser(description=description_text,
@@ -1514,7 +1524,16 @@ Press any other key to refresh statistics immediately.
1514 help='restrict statistics to guest by name', 1524 help='restrict statistics to guest by name',
1515 callback=cb_guest_to_pid, 1525 callback=cb_guest_to_pid,
1516 ) 1526 )
1517 (options, _) = optparser.parse_args(sys.argv) 1527 options, unkn = optparser.parse_args(sys.argv)
1528 if len(unkn) != 1:
1529 sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
1530 try:
1531 # verify that we were passed a valid regex up front
1532 re.compile(options.fields)
1533 except re.error:
1534 sys.exit('Error: "' + options.fields + '" is not a valid regular '
1535 'expression')
1536
1518 return options 1537 return options
1519 1538
1520 1539
@@ -1560,16 +1579,13 @@ def main():
1560 1579
1561 stats = Stats(options) 1580 stats = Stats(options)
1562 1581
1563 if options.fields == "help": 1582 if options.fields == 'help':
1564 event_list = "\n" 1583 stats.fields_filter = None
1565 s = stats.get() 1584 event_list = []
1566 for key in s.keys(): 1585 for key in stats.get().keys():
1567 if key.find('(') != -1: 1586 event_list.append(key.split('(', 1)[0])
1568 key = key[0:key.find('(')] 1587 sys.stdout.write(' ' + '\n '.join(sorted(set(event_list))) + '\n')
1569 if event_list.find('\n' + key + '\n') == -1: 1588 sys.exit(0)
1570 event_list += key + '\n'
1571 sys.stdout.write(event_list)
1572 return ""
1573 1589
1574 if options.log: 1590 if options.log:
1575 log(stats) 1591 log(stats)
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index e5cf836be8a1..b5b3810c9e94 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
50*s*:: set update interval 50*s*:: set update interval
51 51
52*x*:: toggle reporting of stats for child trace events 52*x*:: toggle reporting of stats for child trace events
53 :: *Note*: The stats for the parents summarize the respective child trace
54 events
53 55
54Press any other key to refresh statistics immediately. 56Press any other key to refresh statistics immediately.
55 57
@@ -86,7 +88,7 @@ OPTIONS
86 88
87-f<fields>:: 89-f<fields>::
88--fields=<fields>:: 90--fields=<fields>::
89 fields to display (regex) 91 fields to display (regex), "-f help" for a list of available events
90 92
91-h:: 93-h::
92--help:: 94--help::
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 1d6907d379c9..5128677e4117 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -46,6 +46,8 @@
46# endif 46# endif
47#endif 47#endif
48 48
49#define min(x, y) ((x) < (y) ? (x) : (y))
50
49static inline __u64 ptr_to_u64(const void *ptr) 51static inline __u64 ptr_to_u64(const void *ptr)
50{ 52{
51 return (__u64) (unsigned long) ptr; 53 return (__u64) (unsigned long) ptr;
@@ -57,10 +59,11 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
57 return syscall(__NR_bpf, cmd, attr, size); 59 return syscall(__NR_bpf, cmd, attr, size);
58} 60}
59 61
60int bpf_create_map_node(enum bpf_map_type map_type, int key_size, 62int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
61 int value_size, int max_entries, __u32 map_flags, 63 int key_size, int value_size, int max_entries,
62 int node) 64 __u32 map_flags, int node)
63{ 65{
66 __u32 name_len = name ? strlen(name) : 0;
64 union bpf_attr attr; 67 union bpf_attr attr;
65 68
66 memset(&attr, '\0', sizeof(attr)); 69 memset(&attr, '\0', sizeof(attr));
@@ -70,6 +73,8 @@ int bpf_create_map_node(enum bpf_map_type map_type, int key_size,
70 attr.value_size = value_size; 73 attr.value_size = value_size;
71 attr.max_entries = max_entries; 74 attr.max_entries = max_entries;
72 attr.map_flags = map_flags; 75 attr.map_flags = map_flags;
76 memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
77
73 if (node >= 0) { 78 if (node >= 0) {
74 attr.map_flags |= BPF_F_NUMA_NODE; 79 attr.map_flags |= BPF_F_NUMA_NODE;
75 attr.numa_node = node; 80 attr.numa_node = node;
@@ -81,14 +86,23 @@ int bpf_create_map_node(enum bpf_map_type map_type, int key_size,
81int bpf_create_map(enum bpf_map_type map_type, int key_size, 86int bpf_create_map(enum bpf_map_type map_type, int key_size,
82 int value_size, int max_entries, __u32 map_flags) 87 int value_size, int max_entries, __u32 map_flags)
83{ 88{
84 return bpf_create_map_node(map_type, key_size, value_size, 89 return bpf_create_map_node(map_type, NULL, key_size, value_size,
85 max_entries, map_flags, -1); 90 max_entries, map_flags, -1);
86} 91}
87 92
88int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size, 93int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
89 int inner_map_fd, int max_entries, 94 int key_size, int value_size, int max_entries,
95 __u32 map_flags)
96{
97 return bpf_create_map_node(map_type, name, key_size, value_size,
98 max_entries, map_flags, -1);
99}
100
101int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
102 int key_size, int inner_map_fd, int max_entries,
90 __u32 map_flags, int node) 103 __u32 map_flags, int node)
91{ 104{
105 __u32 name_len = name ? strlen(name) : 0;
92 union bpf_attr attr; 106 union bpf_attr attr;
93 107
94 memset(&attr, '\0', sizeof(attr)); 108 memset(&attr, '\0', sizeof(attr));
@@ -99,6 +113,8 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size,
99 attr.inner_map_fd = inner_map_fd; 113 attr.inner_map_fd = inner_map_fd;
100 attr.max_entries = max_entries; 114 attr.max_entries = max_entries;
101 attr.map_flags = map_flags; 115 attr.map_flags = map_flags;
116 memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
117
102 if (node >= 0) { 118 if (node >= 0) {
103 attr.map_flags |= BPF_F_NUMA_NODE; 119 attr.map_flags |= BPF_F_NUMA_NODE;
104 attr.numa_node = node; 120 attr.numa_node = node;
@@ -107,19 +123,24 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size,
107 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 123 return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
108} 124}
109 125
110int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, 126int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
111 int inner_map_fd, int max_entries, __u32 map_flags) 127 int key_size, int inner_map_fd, int max_entries,
128 __u32 map_flags)
112{ 129{
113 return bpf_create_map_in_map_node(map_type, key_size, inner_map_fd, 130 return bpf_create_map_in_map_node(map_type, name, key_size,
114 max_entries, map_flags, -1); 131 inner_map_fd, max_entries, map_flags,
132 -1);
115} 133}
116 134
117int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, 135int bpf_load_program_name(enum bpf_prog_type type, const char *name,
118 size_t insns_cnt, const char *license, 136 const struct bpf_insn *insns,
119 __u32 kern_version, char *log_buf, size_t log_buf_sz) 137 size_t insns_cnt, const char *license,
138 __u32 kern_version, char *log_buf,
139 size_t log_buf_sz)
120{ 140{
121 int fd; 141 int fd;
122 union bpf_attr attr; 142 union bpf_attr attr;
143 __u32 name_len = name ? strlen(name) : 0;
123 144
124 bzero(&attr, sizeof(attr)); 145 bzero(&attr, sizeof(attr));
125 attr.prog_type = type; 146 attr.prog_type = type;
@@ -130,6 +151,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
130 attr.log_size = 0; 151 attr.log_size = 0;
131 attr.log_level = 0; 152 attr.log_level = 0;
132 attr.kern_version = kern_version; 153 attr.kern_version = kern_version;
154 memcpy(attr.prog_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
133 155
134 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 156 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
135 if (fd >= 0 || !log_buf || !log_buf_sz) 157 if (fd >= 0 || !log_buf || !log_buf_sz)
@@ -143,6 +165,15 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
143 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 165 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
144} 166}
145 167
168int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
169 size_t insns_cnt, const char *license,
170 __u32 kern_version, char *log_buf,
171 size_t log_buf_sz)
172{
173 return bpf_load_program_name(type, NULL, insns, insns_cnt, license,
174 kern_version, log_buf, log_buf_sz);
175}
176
146int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, 177int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
147 size_t insns_cnt, int strict_alignment, 178 size_t insns_cnt, int strict_alignment,
148 const char *license, __u32 kern_version, 179 const char *license, __u32 kern_version,
@@ -260,6 +291,38 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
260 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); 291 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
261} 292}
262 293
294int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
295{
296 union bpf_attr attr;
297
298 bzero(&attr, sizeof(attr));
299 attr.target_fd = target_fd;
300 attr.attach_bpf_fd = prog_fd;
301 attr.attach_type = type;
302
303 return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
304}
305
306int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
307 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
308{
309 union bpf_attr attr;
310 int ret;
311
312 bzero(&attr, sizeof(attr));
313 attr.query.target_fd = target_fd;
314 attr.query.attach_type = type;
315 attr.query.query_flags = query_flags;
316 attr.query.prog_cnt = *prog_cnt;
317 attr.query.prog_ids = ptr_to_u64(prog_ids);
318
319 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
320 if (attach_flags)
321 *attach_flags = attr.query.attach_flags;
322 *prog_cnt = attr.query.prog_cnt;
323 return ret;
324}
325
263int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, 326int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
264 void *data_out, __u32 *size_out, __u32 *retval, 327 void *data_out, __u32 *size_out, __u32 *retval,
265 __u32 *duration) 328 __u32 *duration)
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index b8ea5843c39e..6534889e2b2f 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -24,19 +24,28 @@
24#include <linux/bpf.h> 24#include <linux/bpf.h>
25#include <stddef.h> 25#include <stddef.h>
26 26
27int bpf_create_map_node(enum bpf_map_type map_type, int key_size, 27int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
28 int value_size, int max_entries, __u32 map_flags, 28 int key_size, int value_size, int max_entries,
29 int node); 29 __u32 map_flags, int node);
30int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
31 int key_size, int value_size, int max_entries,
32 __u32 map_flags);
30int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, 33int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
31 int max_entries, __u32 map_flags); 34 int max_entries, __u32 map_flags);
32int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size, 35int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
33 int inner_map_fd, int max_entries, 36 int key_size, int inner_map_fd, int max_entries,
34 __u32 map_flags, int node); 37 __u32 map_flags, int node);
35int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, 38int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
36 int inner_map_fd, int max_entries, __u32 map_flags); 39 int key_size, int inner_map_fd, int max_entries,
40 __u32 map_flags);
37 41
38/* Recommend log buffer size */ 42/* Recommend log buffer size */
39#define BPF_LOG_BUF_SIZE 65536 43#define BPF_LOG_BUF_SIZE 65536
44int bpf_load_program_name(enum bpf_prog_type type, const char *name,
45 const struct bpf_insn *insns,
46 size_t insns_cnt, const char *license,
47 __u32 kern_version, char *log_buf,
48 size_t log_buf_sz);
40int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, 49int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
41 size_t insns_cnt, const char *license, 50 size_t insns_cnt, const char *license,
42 __u32 kern_version, char *log_buf, 51 __u32 kern_version, char *log_buf,
@@ -57,6 +66,7 @@ int bpf_obj_get(const char *pathname);
57int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, 66int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
58 unsigned int flags); 67 unsigned int flags);
59int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); 68int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
69int bpf_prog_detach2(int prog_fd, int attachable_fd, enum bpf_attach_type type);
60int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, 70int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
61 void *data_out, __u32 *size_out, __u32 *retval, 71 void *data_out, __u32 *size_out, __u32 *retval,
62 __u32 *duration); 72 __u32 *duration);
@@ -65,5 +75,6 @@ int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
65int bpf_prog_get_fd_by_id(__u32 id); 75int bpf_prog_get_fd_by_id(__u32 id);
66int bpf_map_get_fd_by_id(__u32 id); 76int bpf_map_get_fd_by_id(__u32 id);
67int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len); 77int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len);
68 78int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
79 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt);
69#endif 80#endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 35f6dfcdc565..5aa45f89da93 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -171,6 +171,7 @@ int libbpf_strerror(int err, char *buf, size_t size)
171struct bpf_program { 171struct bpf_program {
172 /* Index in elf obj file, for relocation use. */ 172 /* Index in elf obj file, for relocation use. */
173 int idx; 173 int idx;
174 char *name;
174 char *section_name; 175 char *section_name;
175 struct bpf_insn *insns; 176 struct bpf_insn *insns;
176 size_t insns_cnt; 177 size_t insns_cnt;
@@ -283,6 +284,7 @@ static void bpf_program__exit(struct bpf_program *prog)
283 prog->clear_priv = NULL; 284 prog->clear_priv = NULL;
284 285
285 bpf_program__unload(prog); 286 bpf_program__unload(prog);
287 zfree(&prog->name);
286 zfree(&prog->section_name); 288 zfree(&prog->section_name);
287 zfree(&prog->insns); 289 zfree(&prog->insns);
288 zfree(&prog->reloc_desc); 290 zfree(&prog->reloc_desc);
@@ -293,26 +295,27 @@ static void bpf_program__exit(struct bpf_program *prog)
293} 295}
294 296
295static int 297static int
296bpf_program__init(void *data, size_t size, char *name, int idx, 298bpf_program__init(void *data, size_t size, char *section_name, int idx,
297 struct bpf_program *prog) 299 struct bpf_program *prog)
298{ 300{
299 if (size < sizeof(struct bpf_insn)) { 301 if (size < sizeof(struct bpf_insn)) {
300 pr_warning("corrupted section '%s'\n", name); 302 pr_warning("corrupted section '%s'\n", section_name);
301 return -EINVAL; 303 return -EINVAL;
302 } 304 }
303 305
304 bzero(prog, sizeof(*prog)); 306 bzero(prog, sizeof(*prog));
305 307
306 prog->section_name = strdup(name); 308 prog->section_name = strdup(section_name);
307 if (!prog->section_name) { 309 if (!prog->section_name) {
308 pr_warning("failed to alloc name for prog %s\n", 310 pr_warning("failed to alloc name for prog under section %s\n",
309 name); 311 section_name);
310 goto errout; 312 goto errout;
311 } 313 }
312 314
313 prog->insns = malloc(size); 315 prog->insns = malloc(size);
314 if (!prog->insns) { 316 if (!prog->insns) {
315 pr_warning("failed to alloc insns for %s\n", name); 317 pr_warning("failed to alloc insns for prog under section %s\n",
318 section_name);
316 goto errout; 319 goto errout;
317 } 320 }
318 prog->insns_cnt = size / sizeof(struct bpf_insn); 321 prog->insns_cnt = size / sizeof(struct bpf_insn);
@@ -331,12 +334,12 @@ errout:
331 334
332static int 335static int
333bpf_object__add_program(struct bpf_object *obj, void *data, size_t size, 336bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
334 char *name, int idx) 337 char *section_name, int idx)
335{ 338{
336 struct bpf_program prog, *progs; 339 struct bpf_program prog, *progs;
337 int nr_progs, err; 340 int nr_progs, err;
338 341
339 err = bpf_program__init(data, size, name, idx, &prog); 342 err = bpf_program__init(data, size, section_name, idx, &prog);
340 if (err) 343 if (err)
341 return err; 344 return err;
342 345
@@ -350,8 +353,8 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
350 * is still valid, so don't need special treat for 353 * is still valid, so don't need special treat for
351 * bpf_close_object(). 354 * bpf_close_object().
352 */ 355 */
353 pr_warning("failed to alloc a new program '%s'\n", 356 pr_warning("failed to alloc a new program under section '%s'\n",
354 name); 357 section_name);
355 bpf_program__exit(&prog); 358 bpf_program__exit(&prog);
356 return -ENOMEM; 359 return -ENOMEM;
357 } 360 }
@@ -364,6 +367,54 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
364 return 0; 367 return 0;
365} 368}
366 369
370static int
371bpf_object__init_prog_names(struct bpf_object *obj)
372{
373 Elf_Data *symbols = obj->efile.symbols;
374 struct bpf_program *prog;
375 size_t pi, si;
376
377 for (pi = 0; pi < obj->nr_programs; pi++) {
378 char *name = NULL;
379
380 prog = &obj->programs[pi];
381
382 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
383 si++) {
384 GElf_Sym sym;
385
386 if (!gelf_getsym(symbols, si, &sym))
387 continue;
388 if (sym.st_shndx != prog->idx)
389 continue;
390
391 name = elf_strptr(obj->efile.elf,
392 obj->efile.strtabidx,
393 sym.st_name);
394 if (!name) {
395 pr_warning("failed to get sym name string for prog %s\n",
396 prog->section_name);
397 return -LIBBPF_ERRNO__LIBELF;
398 }
399 }
400
401 if (!name) {
402 pr_warning("failed to find sym for prog %s\n",
403 prog->section_name);
404 return -EINVAL;
405 }
406
407 prog->name = strdup(name);
408 if (!prog->name) {
409 pr_warning("failed to allocate memory for prog sym %s\n",
410 name);
411 return -ENOMEM;
412 }
413 }
414
415 return 0;
416}
417
367static struct bpf_object *bpf_object__new(const char *path, 418static struct bpf_object *bpf_object__new(const char *path,
368 void *obj_buf, 419 void *obj_buf,
369 size_t obj_buf_sz) 420 size_t obj_buf_sz)
@@ -528,31 +579,6 @@ bpf_object__init_kversion(struct bpf_object *obj,
528 return 0; 579 return 0;
529} 580}
530 581
531static int
532bpf_object__validate_maps(struct bpf_object *obj)
533{
534 int i;
535
536 /*
537 * If there's only 1 map, the only error case should have been
538 * catched in bpf_object__init_maps().
539 */
540 if (!obj->maps || !obj->nr_maps || (obj->nr_maps == 1))
541 return 0;
542
543 for (i = 1; i < obj->nr_maps; i++) {
544 const struct bpf_map *a = &obj->maps[i - 1];
545 const struct bpf_map *b = &obj->maps[i];
546
547 if (b->offset - a->offset < sizeof(struct bpf_map_def)) {
548 pr_warning("corrupted map section in %s: map \"%s\" too small\n",
549 obj->path, a->name);
550 return -EINVAL;
551 }
552 }
553 return 0;
554}
555
556static int compare_bpf_map(const void *_a, const void *_b) 582static int compare_bpf_map(const void *_a, const void *_b)
557{ 583{
558 const struct bpf_map *a = _a; 584 const struct bpf_map *a = _a;
@@ -564,7 +590,7 @@ static int compare_bpf_map(const void *_a, const void *_b)
564static int 590static int
565bpf_object__init_maps(struct bpf_object *obj) 591bpf_object__init_maps(struct bpf_object *obj)
566{ 592{
567 int i, map_idx, nr_maps = 0; 593 int i, map_idx, map_def_sz, nr_maps = 0;
568 Elf_Scn *scn; 594 Elf_Scn *scn;
569 Elf_Data *data; 595 Elf_Data *data;
570 Elf_Data *symbols = obj->efile.symbols; 596 Elf_Data *symbols = obj->efile.symbols;
@@ -607,6 +633,15 @@ bpf_object__init_maps(struct bpf_object *obj)
607 if (!nr_maps) 633 if (!nr_maps)
608 return 0; 634 return 0;
609 635
636 /* Assume equally sized map definitions */
637 map_def_sz = data->d_size / nr_maps;
638 if (!data->d_size || (data->d_size % nr_maps) != 0) {
639 pr_warning("unable to determine map definition size "
640 "section %s, %d maps in %zd bytes\n",
641 obj->path, nr_maps, data->d_size);
642 return -EINVAL;
643 }
644
610 obj->maps = calloc(nr_maps, sizeof(obj->maps[0])); 645 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
611 if (!obj->maps) { 646 if (!obj->maps) {
612 pr_warning("alloc maps for object failed\n"); 647 pr_warning("alloc maps for object failed\n");
@@ -639,7 +674,7 @@ bpf_object__init_maps(struct bpf_object *obj)
639 obj->efile.strtabidx, 674 obj->efile.strtabidx,
640 sym.st_name); 675 sym.st_name);
641 obj->maps[map_idx].offset = sym.st_value; 676 obj->maps[map_idx].offset = sym.st_value;
642 if (sym.st_value + sizeof(struct bpf_map_def) > data->d_size) { 677 if (sym.st_value + map_def_sz > data->d_size) {
643 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", 678 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
644 obj->path, map_name); 679 obj->path, map_name);
645 return -EINVAL; 680 return -EINVAL;
@@ -653,12 +688,40 @@ bpf_object__init_maps(struct bpf_object *obj)
653 pr_debug("map %d is \"%s\"\n", map_idx, 688 pr_debug("map %d is \"%s\"\n", map_idx,
654 obj->maps[map_idx].name); 689 obj->maps[map_idx].name);
655 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); 690 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
656 obj->maps[map_idx].def = *def; 691 /*
692 * If the definition of the map in the object file fits in
693 * bpf_map_def, copy it. Any extra fields in our version
694 * of bpf_map_def will default to zero as a result of the
695 * calloc above.
696 */
697 if (map_def_sz <= sizeof(struct bpf_map_def)) {
698 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
699 } else {
700 /*
701 * Here the map structure being read is bigger than what
702 * we expect, truncate if the excess bits are all zero.
703 * If they are not zero, reject this map as
704 * incompatible.
705 */
706 char *b;
707 for (b = ((char *)def) + sizeof(struct bpf_map_def);
708 b < ((char *)def) + map_def_sz; b++) {
709 if (*b != 0) {
710 pr_warning("maps section in %s: \"%s\" "
711 "has unrecognized, non-zero "
712 "options\n",
713 obj->path, map_name);
714 return -EINVAL;
715 }
716 }
717 memcpy(&obj->maps[map_idx].def, def,
718 sizeof(struct bpf_map_def));
719 }
657 map_idx++; 720 map_idx++;
658 } 721 }
659 722
660 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map); 723 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
661 return bpf_object__validate_maps(obj); 724 return 0;
662} 725}
663 726
664static int bpf_object__elf_collect(struct bpf_object *obj) 727static int bpf_object__elf_collect(struct bpf_object *obj)
@@ -766,8 +829,12 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
766 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 829 pr_warning("Corrupted ELF file: index of strtab invalid\n");
767 return LIBBPF_ERRNO__FORMAT; 830 return LIBBPF_ERRNO__FORMAT;
768 } 831 }
769 if (obj->efile.maps_shndx >= 0) 832 if (obj->efile.maps_shndx >= 0) {
770 err = bpf_object__init_maps(obj); 833 err = bpf_object__init_maps(obj);
834 if (err)
835 goto out;
836 }
837 err = bpf_object__init_prog_names(obj);
771out: 838out:
772 return err; 839 return err;
773} 840}
@@ -870,11 +937,12 @@ bpf_object__create_maps(struct bpf_object *obj)
870 struct bpf_map_def *def = &obj->maps[i].def; 937 struct bpf_map_def *def = &obj->maps[i].def;
871 int *pfd = &obj->maps[i].fd; 938 int *pfd = &obj->maps[i].fd;
872 939
873 *pfd = bpf_create_map(def->type, 940 *pfd = bpf_create_map_name(def->type,
874 def->key_size, 941 obj->maps[i].name,
875 def->value_size, 942 def->key_size,
876 def->max_entries, 943 def->value_size,
877 0); 944 def->max_entries,
945 def->map_flags);
878 if (*pfd < 0) { 946 if (*pfd < 0) {
879 size_t j; 947 size_t j;
880 int err = *pfd; 948 int err = *pfd;
@@ -982,7 +1050,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
982} 1050}
983 1051
984static int 1052static int
985load_program(enum bpf_prog_type type, struct bpf_insn *insns, 1053load_program(enum bpf_prog_type type, const char *name, struct bpf_insn *insns,
986 int insns_cnt, char *license, u32 kern_version, int *pfd) 1054 int insns_cnt, char *license, u32 kern_version, int *pfd)
987{ 1055{
988 int ret; 1056 int ret;
@@ -995,8 +1063,8 @@ load_program(enum bpf_prog_type type, struct bpf_insn *insns,
995 if (!log_buf) 1063 if (!log_buf)
996 pr_warning("Alloc log buffer for bpf loader error, continue without log\n"); 1064 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
997 1065
998 ret = bpf_load_program(type, insns, insns_cnt, license, 1066 ret = bpf_load_program_name(type, name, insns, insns_cnt, license,
999 kern_version, log_buf, BPF_LOG_BUF_SIZE); 1067 kern_version, log_buf, BPF_LOG_BUF_SIZE);
1000 1068
1001 if (ret >= 0) { 1069 if (ret >= 0) {
1002 *pfd = ret; 1070 *pfd = ret;
@@ -1021,9 +1089,9 @@ load_program(enum bpf_prog_type type, struct bpf_insn *insns,
1021 if (type != BPF_PROG_TYPE_KPROBE) { 1089 if (type != BPF_PROG_TYPE_KPROBE) {
1022 int fd; 1090 int fd;
1023 1091
1024 fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns, 1092 fd = bpf_load_program_name(BPF_PROG_TYPE_KPROBE, name,
1025 insns_cnt, license, kern_version, 1093 insns, insns_cnt, license,
1026 NULL, 0); 1094 kern_version, NULL, 0);
1027 if (fd >= 0) { 1095 if (fd >= 0) {
1028 close(fd); 1096 close(fd);
1029 ret = -LIBBPF_ERRNO__PROGTYPE; 1097 ret = -LIBBPF_ERRNO__PROGTYPE;
@@ -1067,8 +1135,8 @@ bpf_program__load(struct bpf_program *prog,
1067 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n", 1135 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1068 prog->section_name, prog->instances.nr); 1136 prog->section_name, prog->instances.nr);
1069 } 1137 }
1070 err = load_program(prog->type, prog->insns, prog->insns_cnt, 1138 err = load_program(prog->type, prog->name, prog->insns,
1071 license, kern_version, &fd); 1139 prog->insns_cnt, license, kern_version, &fd);
1072 if (!err) 1140 if (!err)
1073 prog->instances.fds[0] = fd; 1141 prog->instances.fds[0] = fd;
1074 goto out; 1142 goto out;
@@ -1096,7 +1164,8 @@ bpf_program__load(struct bpf_program *prog,
1096 continue; 1164 continue;
1097 } 1165 }
1098 1166
1099 err = load_program(prog->type, result.new_insn_ptr, 1167 err = load_program(prog->type, prog->name,
1168 result.new_insn_ptr,
1100 result.new_insn_cnt, 1169 result.new_insn_cnt,
1101 license, kern_version, &fd); 1170 license, kern_version, &fd);
1102 1171
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 7959086eb9c9..6e20003109e0 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -207,6 +207,7 @@ struct bpf_map_def {
207 unsigned int key_size; 207 unsigned int key_size;
208 unsigned int value_size; 208 unsigned int value_size;
209 unsigned int max_entries; 209 unsigned int max_entries;
210 unsigned int map_flags;
210}; 211};
211 212
212/* 213/*
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 7c214ceb9386..315df0a70265 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -436,13 +436,13 @@ create_arg_exp(enum filter_exp_type etype)
436 return NULL; 436 return NULL;
437 437
438 arg->type = FILTER_ARG_EXP; 438 arg->type = FILTER_ARG_EXP;
439 arg->op.type = etype; 439 arg->exp.type = etype;
440 440
441 return arg; 441 return arg;
442} 442}
443 443
444static struct filter_arg * 444static struct filter_arg *
445create_arg_cmp(enum filter_exp_type etype) 445create_arg_cmp(enum filter_cmp_type ctype)
446{ 446{
447 struct filter_arg *arg; 447 struct filter_arg *arg;
448 448
@@ -452,7 +452,7 @@ create_arg_cmp(enum filter_exp_type etype)
452 452
453 /* Use NUM and change if necessary */ 453 /* Use NUM and change if necessary */
454 arg->type = FILTER_ARG_NUM; 454 arg->type = FILTER_ARG_NUM;
455 arg->op.type = etype; 455 arg->num.type = ctype;
456 456
457 return arg; 457 return arg;
458} 458}
diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore
index d3102c865a95..914cff12899b 100644
--- a/tools/objtool/.gitignore
+++ b/tools/objtool/.gitignore
@@ -1,3 +1,3 @@
1arch/x86/insn/inat-tables.c 1arch/x86/lib/inat-tables.c
2objtool 2objtool
3fixdep 3fixdep
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 424b1965d06f..ae0272f9a091 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,9 +7,11 @@ ARCH := x86
7endif 7endif
8 8
9# always use the host compiler 9# always use the host compiler
10CC = gcc 10HOSTCC ?= gcc
11LD = ld 11HOSTLD ?= ld
12AR = ar 12CC = $(HOSTCC)
13LD = $(HOSTLD)
14AR = ar
13 15
14ifeq ($(srctree),) 16ifeq ($(srctree),)
15srctree := $(patsubst %/,%,$(dir $(CURDIR))) 17srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -25,7 +27,9 @@ OBJTOOL_IN := $(OBJTOOL)-in.o
25 27
26all: $(OBJTOOL) 28all: $(OBJTOOL)
27 29
28INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi 30INCLUDES := -I$(srctree)/tools/include \
31 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
32 -I$(srctree)/tools/objtool/arch/$(ARCH)/include
29WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed 33WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
30CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) 34CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
31LDFLAGS += -lelf $(LIBSUBCMD) 35LDFLAGS += -lelf $(LIBSUBCMD)
@@ -41,22 +45,8 @@ include $(srctree)/tools/build/Makefile.include
41$(OBJTOOL_IN): fixdep FORCE 45$(OBJTOOL_IN): fixdep FORCE
42 @$(MAKE) $(build)=objtool 46 @$(MAKE) $(build)=objtool
43 47
44# Busybox's diff doesn't have -I, avoid warning in that case
45#
46$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) 48$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
47 @(diff -I 2>&1 | grep -q 'option requires an argument' && \ 49 @./sync-check.sh
48 test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
49 diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
50 diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
51 diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
52 diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
53 diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
54 diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
55 diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
56 || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
57 @(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
58 diff ../../arch/x86/include/asm/orc_types.h orc_types.h >/dev/null) \
59 || echo "warning: objtool: orc_types.h differs from kernel" >&2 )) || true
60 $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ 50 $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
61 51
62 52
@@ -66,7 +56,7 @@ $(LIBSUBCMD): fixdep FORCE
66clean: 56clean:
67 $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL) 57 $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
68 $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete 58 $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
69 $(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep 59 $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
70 60
71FORCE: 61FORCE:
72 62
diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
index debbdb0b5c43..b998412c017d 100644
--- a/tools/objtool/arch/x86/Build
+++ b/tools/objtool/arch/x86/Build
@@ -1,12 +1,12 @@
1objtool-y += decode.o 1objtool-y += decode.o
2 2
3inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk 3inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
4inat_tables_maps = arch/x86/insn/x86-opcode-map.txt 4inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
5 5
6$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) 6$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
7 $(call rule_mkdir) 7 $(call rule_mkdir)
8 $(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ 8 $(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
9 9
10$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c 10$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
11 11
12CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn 12CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 34a579f806e3..540a209b78ab 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -19,9 +19,9 @@
19#include <stdlib.h> 19#include <stdlib.h>
20 20
21#define unlikely(cond) (cond) 21#define unlikely(cond) (cond)
22#include "insn/insn.h" 22#include <asm/insn.h>
23#include "insn/inat.c" 23#include "lib/inat.c"
24#include "insn/insn.c" 24#include "lib/insn.c"
25 25
26#include "../../elf.h" 26#include "../../elf.h"
27#include "../../arch.h" 27#include "../../arch.h"
@@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
138 *type = INSN_STACK; 138 *type = INSN_STACK;
139 op->src.type = OP_SRC_ADD; 139 op->src.type = OP_SRC_ADD;
140 op->src.reg = op_to_cfi_reg[modrm_reg][rex_r]; 140 op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
141 op->dest.type = OP_SRC_REG; 141 op->dest.type = OP_DEST_REG;
142 op->dest.reg = CFI_SP; 142 op->dest.reg = CFI_SP;
143 } 143 }
144 break; 144 break;
diff --git a/tools/objtool/arch/x86/insn/inat.h b/tools/objtool/arch/x86/include/asm/inat.h
index 125ecd2a300d..1c78580e58be 100644
--- a/tools/objtool/arch/x86/insn/inat.h
+++ b/tools/objtool/arch/x86/include/asm/inat.h
@@ -20,7 +20,7 @@
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * 21 *
22 */ 22 */
23#include "inat_types.h" 23#include <asm/inat_types.h>
24 24
25/* 25/*
26 * Internal bits. Don't use bitmasks directly, because these bits are 26 * Internal bits. Don't use bitmasks directly, because these bits are
@@ -97,6 +97,16 @@
97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) 97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) 98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
99 99
100/* Identifiers for segment registers */
101#define INAT_SEG_REG_IGNORE 0
102#define INAT_SEG_REG_DEFAULT 1
103#define INAT_SEG_REG_CS 2
104#define INAT_SEG_REG_SS 3
105#define INAT_SEG_REG_DS 4
106#define INAT_SEG_REG_ES 5
107#define INAT_SEG_REG_FS 6
108#define INAT_SEG_REG_GS 7
109
100/* Attribute search APIs */ 110/* Attribute search APIs */
101extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); 111extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
102extern int inat_get_last_prefix_id(insn_byte_t last_pfx); 112extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
diff --git a/tools/objtool/arch/x86/insn/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h
index cb3c20ce39cf..cb3c20ce39cf 100644
--- a/tools/objtool/arch/x86/insn/inat_types.h
+++ b/tools/objtool/arch/x86/include/asm/inat_types.h
diff --git a/tools/objtool/arch/x86/insn/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
index e23578c7b1be..b3e32b010ab1 100644
--- a/tools/objtool/arch/x86/insn/insn.h
+++ b/tools/objtool/arch/x86/include/asm/insn.h
@@ -21,7 +21,7 @@
21 */ 21 */
22 22
23/* insn_attr_t is defined in inat.h */ 23/* insn_attr_t is defined in inat.h */
24#include "inat.h" 24#include <asm/inat.h>
25 25
26struct insn_field { 26struct insn_field {
27 union { 27 union {
diff --git a/tools/objtool/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
index 9c9dc579bd7d..9c9dc579bd7d 100644
--- a/tools/objtool/orc_types.h
+++ b/tools/objtool/arch/x86/include/asm/orc_types.h
diff --git a/tools/objtool/arch/x86/insn/inat.c b/tools/objtool/arch/x86/lib/inat.c
index e4bf28e6f4c7..c1f01a8e9f65 100644
--- a/tools/objtool/arch/x86/insn/inat.c
+++ b/tools/objtool/arch/x86/lib/inat.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * 19 *
20 */ 20 */
21#include "insn.h" 21#include <asm/insn.h>
22 22
23/* Attribute tables are generated from opcode map */ 23/* Attribute tables are generated from opcode map */
24#include "inat-tables.c" 24#include "inat-tables.c"
diff --git a/tools/objtool/arch/x86/insn/insn.c b/tools/objtool/arch/x86/lib/insn.c
index ca983e2bea8b..1088eb8f3a5f 100644
--- a/tools/objtool/arch/x86/insn/insn.c
+++ b/tools/objtool/arch/x86/lib/insn.c
@@ -23,8 +23,8 @@
23#else 23#else
24#include <string.h> 24#include <string.h>
25#endif 25#endif
26#include "inat.h" 26#include <asm/inat.h>
27#include "insn.h" 27#include <asm/insn.h>
28 28
29/* Verify next sizeof(t) bytes can be on the same instruction */ 29/* Verify next sizeof(t) bytes can be on the same instruction */
30#define validate_next(t, insn, n) \ 30#define validate_next(t, insn, n) \
diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
896 896
897GrpTable: Grp3_1 897GrpTable: Grp3_1
8980: TEST Eb,Ib 8980: TEST Eb,Ib
8991: 8991: TEST Eb,Ib
9002: NOT Eb 9002: NOT Eb
9013: NEG Eb 9013: NEG Eb
9024: MUL AL,Eb 9024: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
index b02a36b2c14f..b02a36b2c14f 100644
--- a/tools/objtool/arch/x86/insn/gen-insn-attr-x86.awk
+++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
index 4c6b5c9ef073..91e8e19ff5e0 100644
--- a/tools/objtool/builtin-orc.c
+++ b/tools/objtool/builtin-orc.c
@@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv)
44 const char *objname; 44 const char *objname;
45 45
46 argc--; argv++; 46 argc--; argv++;
47 if (argc <= 0)
48 usage_with_options(orc_usage, check_options);
49
47 if (!strncmp(argv[0], "gen", 3)) { 50 if (!strncmp(argv[0], "gen", 3)) {
48 argc = parse_options(argc, argv, check_options, orc_usage, 0); 51 argc = parse_options(argc, argv, check_options, orc_usage, 0);
49 if (argc != 1) 52 if (argc != 1)
@@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv)
52 objname = argv[0]; 55 objname = argv[0];
53 56
54 return check(objname, no_fp, no_unreachable, true); 57 return check(objname, no_fp, no_unreachable, true);
55
56 } 58 }
57 59
58 if (!strcmp(argv[0], "dump")) { 60 if (!strcmp(argv[0], "dump")) {
diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h
index a4139e386ef3..b0e92a6d0903 100644
--- a/tools/objtool/orc.h
+++ b/tools/objtool/orc.h
@@ -18,7 +18,7 @@
18#ifndef _ORC_H 18#ifndef _ORC_H
19#define _ORC_H 19#define _ORC_H
20 20
21#include "orc_types.h" 21#include <asm/orc_types.h>
22 22
23struct objtool_file; 23struct objtool_file;
24 24
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 36c5bf6a2675..c3343820916a 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -76,7 +76,8 @@ int orc_dump(const char *_objname)
76 int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0; 76 int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
77 struct orc_entry *orc = NULL; 77 struct orc_entry *orc = NULL;
78 char *name; 78 char *name;
79 unsigned long nr_sections, orc_ip_addr = 0; 79 size_t nr_sections;
80 Elf64_Addr orc_ip_addr = 0;
80 size_t shstrtab_idx; 81 size_t shstrtab_idx;
81 Elf *elf; 82 Elf *elf;
82 Elf_Scn *scn; 83 Elf_Scn *scn;
@@ -187,10 +188,10 @@ int orc_dump(const char *_objname)
187 return -1; 188 return -1;
188 } 189 }
189 190
190 printf("%s+%lx:", name, rela.r_addend); 191 printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
191 192
192 } else { 193 } else {
193 printf("%lx:", orc_ip_addr + (i * sizeof(int)) + orc_ip[i]); 194 printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
194 } 195 }
195 196
196 197
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index e5ca31429c9b..e61fe703197b 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file)
165 165
166 /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */ 166 /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
167 sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx); 167 sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
168 if (!sec)
169 return -1;
168 170
169 ip_relasec = elf_create_rela_section(file->elf, sec); 171 ip_relasec = elf_create_rela_section(file->elf, sec);
170 if (!ip_relasec) 172 if (!ip_relasec)
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
new file mode 100755
index 000000000000..1470e74e9d66
--- /dev/null
+++ b/tools/objtool/sync-check.sh
@@ -0,0 +1,29 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3
4FILES='
5arch/x86/lib/insn.c
6arch/x86/lib/inat.c
7arch/x86/lib/x86-opcode-map.txt
8arch/x86/tools/gen-insn-attr-x86.awk
9arch/x86/include/asm/insn.h
10arch/x86/include/asm/inat.h
11arch/x86/include/asm/inat_types.h
12arch/x86/include/asm/orc_types.h
13'
14
15check()
16{
17 local file=$1
18
19 diff $file ../../$file > /dev/null ||
20 echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
21}
22
23if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
24 exit 0
25fi
26
27for i in $FILES; do
28 check $i
29done
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index f709de54707b..e2a897ae3596 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -8,7 +8,8 @@ perf-list - List all symbolic event types
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf list' [--no-desc] [--long-desc] [hw|sw|cache|tracepoint|pmu|sdt|event_glob] 11'perf list' [--no-desc] [--long-desc]
12 [hw|sw|cache|tracepoint|pmu|sdt|metric|metricgroup|event_glob]
12 13
13DESCRIPTION 14DESCRIPTION
14----------- 15-----------
@@ -47,6 +48,8 @@ counted. The following modifiers exist:
47 P - use maximum detected precise level 48 P - use maximum detected precise level
48 S - read sample value (PERF_SAMPLE_READ) 49 S - read sample value (PERF_SAMPLE_READ)
49 D - pin the event to the PMU 50 D - pin the event to the PMU
51 W - group is weak and will fallback to non-group if not schedulable,
52 only supported in 'perf stat' for now.
50 53
51The 'p' modifier can be used for specifying how precise the instruction 54The 'p' modifier can be used for specifying how precise the instruction
52address should be. The 'p' modifier can be specified multiple times: 55address should be. The 'p' modifier can be specified multiple times:
@@ -201,7 +204,7 @@ For example Intel Core CPUs typically have four generic performance counters
201for the core, plus three fixed counters for instructions, cycles and 204for the core, plus three fixed counters for instructions, cycles and
202ref-cycles. Some special events have restrictions on which counter they 205ref-cycles. Some special events have restrictions on which counter they
203can schedule, and may not support multiple instances in a single group. 206can schedule, and may not support multiple instances in a single group.
204When too many events are specified in the group none of them will not 207When too many events are specified in the group some of them will not
205be measured. 208be measured.
206 209
207Globally pinned events can limit the number of counters available for 210Globally pinned events can limit the number of counters available for
@@ -246,6 +249,10 @@ To limit the list use:
246 249
247. 'sdt' to list all Statically Defined Tracepoint events. 250. 'sdt' to list all Statically Defined Tracepoint events.
248 251
252. 'metric' to list metrics
253
254. 'metricgroup' to list metricgroups with metrics.
255
249. If none of the above is matched, it will apply the supplied glob to all 256. If none of the above is matched, it will apply the supplied glob to all
250 events, printing the ones that match. 257 events, printing the ones that match.
251 258
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 63526f4416ea..5a626ef666c2 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -377,6 +377,8 @@ symbolic names, e.g. on x86, ax, si. To list the available registers use
377--intr-regs=\?. To name registers, pass a comma separated list such as 377--intr-regs=\?. To name registers, pass a comma separated list such as
378--intr-regs=ax,bx. The list of register is architecture dependent. 378--intr-regs=ax,bx. The list of register is architecture dependent.
379 379
380--user-regs::
381Capture user registers at sample time. Same arguments as -I.
380 382
381--running-time:: 383--running-time::
382Record running and enabled time for read events (:S) 384Record running and enabled time for read events (:S)
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 383a98d992ed..ddde2b54af57 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -434,7 +434,8 @@ include::itrace.txt[]
434 434
435--inline:: 435--inline::
436 If a callgraph address belongs to an inlined function, the inline stack 436 If a callgraph address belongs to an inlined function, the inline stack
437 will be printed. Each entry is function name or file/line. 437 will be printed. Each entry is function name or file/line. Enabled by
438 default, disable with --no-inline.
438 439
439include::callchain-overhead-calculation.txt[] 440include::callchain-overhead-calculation.txt[]
440 441
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index a092a2499e8f..55b67338548e 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -106,6 +106,14 @@ OPTIONS for 'perf sched timehist'
106--max-stack:: 106--max-stack::
107 Maximum number of functions to display in backtrace, default 5. 107 Maximum number of functions to display in backtrace, default 5.
108 108
109-p=::
110--pid=::
111 Only show events for given process ID (comma separated list).
112
113-t=::
114--tid=::
115 Only show events for given thread ID (comma separated list).
116
109-s:: 117-s::
110--summary:: 118--summary::
111 Show only a summary of scheduling by thread with min, max, and average 119 Show only a summary of scheduling by thread with min, max, and average
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 18dfcfa38454..2811fcf684cb 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -116,8 +116,8 @@ OPTIONS
116--fields:: 116--fields::
117 Comma separated list of fields to print. Options are: 117 Comma separated list of fields to print. Options are:
118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, 118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
119 srcline, period, iregs, brstack, brstacksym, flags, bpf-output, brstackinsn, brstackoff, 119 srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
120 callindent, insn, insnlen, synth, phys_addr. 120 brstackoff, callindent, insn, insnlen, synth, phys_addr.
121 Field list can be prepended with the type, trace, sw or hw, 121 Field list can be prepended with the type, trace, sw or hw,
122 to indicate to which event type the field list applies. 122 to indicate to which event type the field list applies.
123 e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace 123 e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@@ -325,9 +325,14 @@ include::itrace.txt[]
325 Set the maximum number of program blocks to print with brstackasm for 325 Set the maximum number of program blocks to print with brstackasm for
326 each sample. 326 each sample.
327 327
328--per-event-dump::
329 Create per event files with a "perf.data.EVENT.dump" name instead of
330 printing to stdout, useful, for instance, for generating flamegraphs.
331
328--inline:: 332--inline::
329 If a callgraph address belongs to an inlined function, the inline stack 333 If a callgraph address belongs to an inlined function, the inline stack
330 will be printed. Each entry has function name and file/line. 334 will be printed. Each entry has function name and file/line. Enabled by
335 default, disable with --no-inline.
331 336
332SEE ALSO 337SEE ALSO
333-------- 338--------
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index c37d61682dfb..823fce7674bb 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -199,6 +199,13 @@ Aggregate counts per processor socket for system-wide mode measurements.
199--per-core:: 199--per-core::
200Aggregate counts per physical processor for system-wide mode measurements. 200Aggregate counts per physical processor for system-wide mode measurements.
201 201
202-M::
203--metrics::
204Print metrics or metricgroups specified in a comma separated list.
205For a group all metrics from the group are added.
206The events from the metrics are automatically measured.
207See perf list output for the possble metrics and metricgroups.
208
202-A:: 209-A::
203--no-aggr:: 210--no-aggr::
204Do not aggregate counts across all monitored CPUs. 211Do not aggregate counts across all monitored CPUs.
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index d864ea6fd367..4353262bc462 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -240,6 +240,9 @@ Default is to monitor all CPUS.
240--force:: 240--force::
241 Don't do ownership validation. 241 Don't do ownership validation.
242 242
243--num-thread-synthesize::
244 The number of threads to run when synthesizing events for existing processes.
245 By default, the number of threads equals to the number of online CPUs.
243 246
244INTERACTIVE PROMPTING KEYS 247INTERACTIVE PROMPTING KEYS
245-------------------------- 248--------------------------
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 63f534a0902f..0294bfb6c5f8 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -53,6 +53,10 @@ ifeq ($(SRCARCH),arm64)
53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
54endif 54endif
55 55
56ifeq ($(ARCH),s390)
57 NO_PERF_REGS := 0
58endif
59
56ifeq ($(NO_PERF_REGS),0) 60ifeq ($(NO_PERF_REGS),0)
57 $(call detected,CONFIG_PERF_REGS) 61 $(call detected,CONFIG_PERF_REGS)
58endif 62endif
@@ -61,7 +65,7 @@ endif
61# Disable it on all other architectures in case libdw unwind 65# Disable it on all other architectures in case libdw unwind
62# support is detected in system. Add supported architectures 66# support is detected in system. Add supported architectures
63# to the check. 67# to the check.
64ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm powerpc)) 68ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm powerpc s390))
65 NO_LIBDW_DWARF_UNWIND := 1 69 NO_LIBDW_DWARF_UNWIND := 1
66endif 70endif
67 71
@@ -184,9 +188,7 @@ ifdef PYTHON_CONFIG
184 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) 188 PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
185 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil 189 PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
186 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) 190 PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
187 ifeq ($(CC_NO_CLANG), 1) 191 PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
188 PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
189 endif
190 FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) 192 FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
191endif 193endif
192 194
@@ -572,14 +574,15 @@ ifndef NO_GTK2
572 endif 574 endif
573endif 575endif
574 576
575
576ifdef NO_LIBPERL 577ifdef NO_LIBPERL
577 CFLAGS += -DNO_LIBPERL 578 CFLAGS += -DNO_LIBPERL
578else 579else
579 PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) 580 PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
580 PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) 581 PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
581 PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) 582 PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
582 PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` 583 PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
584 PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
585 PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
583 FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) 586 FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
584 587
585 ifneq ($(feature-libperl), 1) 588 ifneq ($(feature-libperl), 1)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 91ef44bfaf3e..68cf1360a3f3 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -173,7 +173,7 @@ AWK = awk
173# non-config cases 173# non-config cases
174config := 1 174config := 1
175 175
176NON_CONFIG_TARGETS := clean TAGS tags cscope help install-doc install-man install-html install-info install-pdf doc man html info pdf 176NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help install-doc install-man install-html install-info install-pdf doc man html info pdf
177 177
178ifdef MAKECMDGOALS 178ifdef MAKECMDGOALS
179ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),) 179ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
@@ -420,6 +420,13 @@ sndrv_pcm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
420$(sndrv_pcm_ioctl_array): $(sndrv_pcm_hdr_dir)/asound.h $(sndrv_pcm_ioctl_tbl) 420$(sndrv_pcm_ioctl_array): $(sndrv_pcm_hdr_dir)/asound.h $(sndrv_pcm_ioctl_tbl)
421 $(Q)$(SHELL) '$(sndrv_pcm_ioctl_tbl)' $(sndrv_pcm_hdr_dir) > $@ 421 $(Q)$(SHELL) '$(sndrv_pcm_ioctl_tbl)' $(sndrv_pcm_hdr_dir) > $@
422 422
423kcmp_type_array := $(beauty_outdir)/kcmp_type_array.c
424kcmp_hdr_dir := $(srctree)/tools/include/uapi/linux/
425kcmp_type_tbl := $(srctree)/tools/perf/trace/beauty/kcmp_type.sh
426
427$(kcmp_type_array): $(kcmp_hdr_dir)/kcmp.h $(kcmp_type_tbl)
428 $(Q)$(SHELL) '$(kcmp_type_tbl)' $(kcmp_hdr_dir) > $@
429
423kvm_ioctl_array := $(beauty_ioctl_outdir)/kvm_ioctl_array.c 430kvm_ioctl_array := $(beauty_ioctl_outdir)/kvm_ioctl_array.c
424kvm_hdr_dir := $(srctree)/tools/include/uapi/linux 431kvm_hdr_dir := $(srctree)/tools/include/uapi/linux
425kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh 432kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh
@@ -441,6 +448,20 @@ perf_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/perf_ioctl.sh
441$(perf_ioctl_array): $(perf_hdr_dir)/perf_event.h $(perf_ioctl_tbl) 448$(perf_ioctl_array): $(perf_hdr_dir)/perf_event.h $(perf_ioctl_tbl)
442 $(Q)$(SHELL) '$(perf_ioctl_tbl)' $(perf_hdr_dir) > $@ 449 $(Q)$(SHELL) '$(perf_ioctl_tbl)' $(perf_hdr_dir) > $@
443 450
451madvise_behavior_array := $(beauty_outdir)/madvise_behavior_array.c
452madvise_hdr_dir := $(srctree)/tools/include/uapi/asm-generic/
453madvise_behavior_tbl := $(srctree)/tools/perf/trace/beauty/madvise_behavior.sh
454
455$(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_tbl)
456 $(Q)$(SHELL) '$(madvise_behavior_tbl)' $(madvise_hdr_dir) > $@
457
458prctl_option_array := $(beauty_outdir)/prctl_option_array.c
459prctl_hdr_dir := $(srctree)/tools/include/uapi/linux/
460prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
461
462$(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl)
463 $(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@
464
444all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) 465all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
445 466
446$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) 467$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
@@ -539,9 +560,12 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
539 $(pkey_alloc_access_rights_array) \ 560 $(pkey_alloc_access_rights_array) \
540 $(sndrv_pcm_ioctl_array) \ 561 $(sndrv_pcm_ioctl_array) \
541 $(sndrv_ctl_ioctl_array) \ 562 $(sndrv_ctl_ioctl_array) \
563 $(kcmp_type_array) \
542 $(kvm_ioctl_array) \ 564 $(kvm_ioctl_array) \
543 $(vhost_virtio_ioctl_array) \ 565 $(vhost_virtio_ioctl_array) \
544 $(perf_ioctl_array) 566 $(madvise_behavior_array) \
567 $(perf_ioctl_array) \
568 $(prctl_option_array)
545 569
546$(OUTPUT)%.o: %.c prepare FORCE 570$(OUTPUT)%.o: %.c prepare FORCE
547 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@ 571 $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -802,7 +826,10 @@ config-clean:
802 $(call QUIET_CLEAN, config) 826 $(call QUIET_CLEAN, config)
803 $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ $(if $(OUTPUT),OUTPUT=$(OUTPUT)feature/,) clean >/dev/null 827 $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ $(if $(OUTPUT),OUTPUT=$(OUTPUT)feature/,) clean >/dev/null
804 828
805clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean 829python-clean:
830 $(python-clean)
831
832clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean python-clean
806 $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS) 833 $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
807 $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete 834 $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
808 $(Q)$(RM) $(OUTPUT).config-detected 835 $(Q)$(RM) $(OUTPUT).config-detected
@@ -811,15 +838,17 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
811 $(OUTPUT)util/intel-pt-decoder/inat-tables.c \ 838 $(OUTPUT)util/intel-pt-decoder/inat-tables.c \
812 $(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \ 839 $(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
813 $(OUTPUT)pmu-events/pmu-events.c \ 840 $(OUTPUT)pmu-events/pmu-events.c \
841 $(OUTPUT)$(madvise_behavior_array) \
814 $(OUTPUT)$(drm_ioctl_array) \ 842 $(OUTPUT)$(drm_ioctl_array) \
815 $(OUTPUT)$(pkey_alloc_access_rights_array) \ 843 $(OUTPUT)$(pkey_alloc_access_rights_array) \
816 $(OUTPUT)$(sndrv_ctl_ioctl_array) \ 844 $(OUTPUT)$(sndrv_ctl_ioctl_array) \
817 $(OUTPUT)$(sndrv_pcm_ioctl_array) \ 845 $(OUTPUT)$(sndrv_pcm_ioctl_array) \
818 $(OUTPUT)$(kvm_ioctl_array) \ 846 $(OUTPUT)$(kvm_ioctl_array) \
847 $(OUTPUT)$(kcmp_type_array) \
819 $(OUTPUT)$(vhost_virtio_ioctl_array) \ 848 $(OUTPUT)$(vhost_virtio_ioctl_array) \
820 $(OUTPUT)$(perf_ioctl_array) 849 $(OUTPUT)$(perf_ioctl_array) \
850 $(OUTPUT)$(prctl_option_array)
821 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean 851 $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
822 $(python-clean)
823 852
824# 853#
825# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY) 854# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY)
diff --git a/tools/perf/arch/arm/annotate/instructions.c b/tools/perf/arch/arm/annotate/instructions.c
index b39b16395aac..f64516d5b23e 100644
--- a/tools/perf/arch/arm/annotate/instructions.c
+++ b/tools/perf/arch/arm/annotate/instructions.c
@@ -1,4 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
2#include <sys/types.h> 3#include <sys/types.h>
3#include <regex.h> 4#include <regex.h>
4 5
@@ -24,7 +25,7 @@ static struct ins_ops *arm__associate_instruction_ops(struct arch *arch, const c
24 return ops; 25 return ops;
25} 26}
26 27
27static int arm__annotate_init(struct arch *arch) 28static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
28{ 29{
29 struct arm_annotate *arm; 30 struct arm_annotate *arm;
30 int err; 31 int err;
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 9a3e0523e2c9..6688977e4ac7 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -1,4 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
2#include <sys/types.h> 3#include <sys/types.h>
3#include <regex.h> 4#include <regex.h>
4 5
@@ -26,7 +27,7 @@ static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const
26 return ops; 27 return ops;
27} 28}
28 29
29static int arm64__annotate_init(struct arch *arch) 30static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
30{ 31{
31 struct arm64_annotate *arm; 32 struct arm64_annotate *arm;
32 int err; 33 int err;
diff --git a/tools/perf/arch/powerpc/annotate/instructions.c b/tools/perf/arch/powerpc/annotate/instructions.c
index b7bc04980fe8..a3f423c27cae 100644
--- a/tools/perf/arch/powerpc/annotate/instructions.c
+++ b/tools/perf/arch/powerpc/annotate/instructions.c
@@ -1,4 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
3
2static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, const char *name) 4static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, const char *name)
3{ 5{
4 int i; 6 int i;
@@ -47,7 +49,7 @@ static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, con
47 return ops; 49 return ops;
48} 50}
49 51
50static int powerpc__annotate_init(struct arch *arch) 52static int powerpc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
51{ 53{
52 if (!arch->initialized) { 54 if (!arch->initialized) {
53 arch->initialized = true; 55 arch->initialized = true;
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 21322e0385b8..09ba923debe8 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -2,3 +2,4 @@ ifndef NO_DWARF
2PERF_HAVE_DWARF_REGS := 1 2PERF_HAVE_DWARF_REGS := 1
3endif 3endif
4HAVE_KVM_STAT_SUPPORT := 1 4HAVE_KVM_STAT_SUPPORT := 1
5PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index c9a81673e8aa..e0e466c650df 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -1,4 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
3
2static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name) 4static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name)
3{ 5{
4 struct ins_ops *ops = NULL; 6 struct ins_ops *ops = NULL;
@@ -20,7 +22,7 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na
20 return ops; 22 return ops;
21} 23}
22 24
23static int s390__annotate_init(struct arch *arch) 25static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
24{ 26{
25 if (!arch->initialized) { 27 if (!arch->initialized) {
26 arch->initialized = true; 28 arch->initialized = true;
diff --git a/tools/perf/arch/s390/include/dwarf-regs-table.h b/tools/perf/arch/s390/include/dwarf-regs-table.h
index 792d4c277225..671553525f41 100644
--- a/tools/perf/arch/s390/include/dwarf-regs-table.h
+++ b/tools/perf/arch/s390/include/dwarf-regs-table.h
@@ -1,9 +1,72 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef DEFINE_DWARF_REGSTR_TABLE 2#ifndef S390_DWARF_REGS_TABLE_H
3/* This is included in perf/util/dwarf-regs.c */ 3#define S390_DWARF_REGS_TABLE_H
4 4
5static const char * const s390_regstr_tbl[] = { 5#define REG_DWARFNUM_NAME(reg, idx) [idx] = "%" #reg
6
7/*
8 * For reference, see DWARF register mapping:
9 * http://refspecs.linuxfoundation.org/ELF/zSeries/lzsabi0_s390/x1542.html
10 */
11static const char * const s390_dwarf_regs[] = {
6 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", 12 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
7 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", 13 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
14 REG_DWARFNUM_NAME(f0, 16),
15 REG_DWARFNUM_NAME(f1, 20),
16 REG_DWARFNUM_NAME(f2, 17),
17 REG_DWARFNUM_NAME(f3, 21),
18 REG_DWARFNUM_NAME(f4, 18),
19 REG_DWARFNUM_NAME(f5, 22),
20 REG_DWARFNUM_NAME(f6, 19),
21 REG_DWARFNUM_NAME(f7, 23),
22 REG_DWARFNUM_NAME(f8, 24),
23 REG_DWARFNUM_NAME(f9, 28),
24 REG_DWARFNUM_NAME(f10, 25),
25 REG_DWARFNUM_NAME(f11, 29),
26 REG_DWARFNUM_NAME(f12, 26),
27 REG_DWARFNUM_NAME(f13, 30),
28 REG_DWARFNUM_NAME(f14, 27),
29 REG_DWARFNUM_NAME(f15, 31),
30 REG_DWARFNUM_NAME(c0, 32),
31 REG_DWARFNUM_NAME(c1, 33),
32 REG_DWARFNUM_NAME(c2, 34),
33 REG_DWARFNUM_NAME(c3, 35),
34 REG_DWARFNUM_NAME(c4, 36),
35 REG_DWARFNUM_NAME(c5, 37),
36 REG_DWARFNUM_NAME(c6, 38),
37 REG_DWARFNUM_NAME(c7, 39),
38 REG_DWARFNUM_NAME(c8, 40),
39 REG_DWARFNUM_NAME(c9, 41),
40 REG_DWARFNUM_NAME(c10, 42),
41 REG_DWARFNUM_NAME(c11, 43),
42 REG_DWARFNUM_NAME(c12, 44),
43 REG_DWARFNUM_NAME(c13, 45),
44 REG_DWARFNUM_NAME(c14, 46),
45 REG_DWARFNUM_NAME(c15, 47),
46 REG_DWARFNUM_NAME(a0, 48),
47 REG_DWARFNUM_NAME(a1, 49),
48 REG_DWARFNUM_NAME(a2, 50),
49 REG_DWARFNUM_NAME(a3, 51),
50 REG_DWARFNUM_NAME(a4, 52),
51 REG_DWARFNUM_NAME(a5, 53),
52 REG_DWARFNUM_NAME(a6, 54),
53 REG_DWARFNUM_NAME(a7, 55),
54 REG_DWARFNUM_NAME(a8, 56),
55 REG_DWARFNUM_NAME(a9, 57),
56 REG_DWARFNUM_NAME(a10, 58),
57 REG_DWARFNUM_NAME(a11, 59),
58 REG_DWARFNUM_NAME(a12, 60),
59 REG_DWARFNUM_NAME(a13, 61),
60 REG_DWARFNUM_NAME(a14, 62),
61 REG_DWARFNUM_NAME(a15, 63),
62 REG_DWARFNUM_NAME(pswm, 64),
63 REG_DWARFNUM_NAME(pswa, 65),
8}; 64};
9#endif 65
66#ifdef DEFINE_DWARF_REGSTR_TABLE
67/* This is included in perf/util/dwarf-regs.c */
68
69#define s390_regstr_tbl s390_dwarf_regs
70
71#endif /* DEFINE_DWARF_REGSTR_TABLE */
72#endif /* S390_DWARF_REGS_TABLE_H */
diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h
new file mode 100644
index 000000000000..bcfbaed78cc2
--- /dev/null
+++ b/tools/perf/arch/s390/include/perf_regs.h
@@ -0,0 +1,95 @@
1#ifndef ARCH_PERF_REGS_H
2#define ARCH_PERF_REGS_H
3
4#include <stdlib.h>
5#include <linux/types.h>
6#include <asm/perf_regs.h>
7
8void perf_regs_load(u64 *regs);
9
10#define PERF_REGS_MASK ((1ULL << PERF_REG_S390_MAX) - 1)
11#define PERF_REGS_MAX PERF_REG_S390_MAX
12#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
13
14#define PERF_REG_IP PERF_REG_S390_PC
15#define PERF_REG_SP PERF_REG_S390_R15
16
17static inline const char *perf_reg_name(int id)
18{
19 switch (id) {
20 case PERF_REG_S390_R0:
21 return "R0";
22 case PERF_REG_S390_R1:
23 return "R1";
24 case PERF_REG_S390_R2:
25 return "R2";
26 case PERF_REG_S390_R3:
27 return "R3";
28 case PERF_REG_S390_R4:
29 return "R4";
30 case PERF_REG_S390_R5:
31 return "R5";
32 case PERF_REG_S390_R6:
33 return "R6";
34 case PERF_REG_S390_R7:
35 return "R7";
36 case PERF_REG_S390_R8:
37 return "R8";
38 case PERF_REG_S390_R9:
39 return "R9";
40 case PERF_REG_S390_R10:
41 return "R10";
42 case PERF_REG_S390_R11:
43 return "R11";
44 case PERF_REG_S390_R12:
45 return "R12";
46 case PERF_REG_S390_R13:
47 return "R13";
48 case PERF_REG_S390_R14:
49 return "R14";
50 case PERF_REG_S390_R15:
51 return "R15";
52 case PERF_REG_S390_FP0:
53 return "FP0";
54 case PERF_REG_S390_FP1:
55 return "FP1";
56 case PERF_REG_S390_FP2:
57 return "FP2";
58 case PERF_REG_S390_FP3:
59 return "FP3";
60 case PERF_REG_S390_FP4:
61 return "FP4";
62 case PERF_REG_S390_FP5:
63 return "FP5";
64 case PERF_REG_S390_FP6:
65 return "FP6";
66 case PERF_REG_S390_FP7:
67 return "FP7";
68 case PERF_REG_S390_FP8:
69 return "FP8";
70 case PERF_REG_S390_FP9:
71 return "FP9";
72 case PERF_REG_S390_FP10:
73 return "FP10";
74 case PERF_REG_S390_FP11:
75 return "FP11";
76 case PERF_REG_S390_FP12:
77 return "FP12";
78 case PERF_REG_S390_FP13:
79 return "FP13";
80 case PERF_REG_S390_FP14:
81 return "FP14";
82 case PERF_REG_S390_FP15:
83 return "FP15";
84 case PERF_REG_S390_MASK:
85 return "MASK";
86 case PERF_REG_S390_PC:
87 return "PC";
88 default:
89 return NULL;
90 }
91
92 return NULL;
93}
94
95#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index 5bd7b9260cc0..4a233683c684 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -2,5 +2,8 @@ libperf-y += header.o
2libperf-y += kvm-stat.o 2libperf-y += kvm-stat.o
3 3
4libperf-$(CONFIG_DWARF) += dwarf-regs.o 4libperf-$(CONFIG_DWARF) += dwarf-regs.o
5libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
5 6
6libperf-y += machine.o 7libperf-y += machine.o
8
9libperf-$(CONFIG_AUXTRACE) += auxtrace.o
diff --git a/tools/perf/arch/s390/util/auxtrace.c b/tools/perf/arch/s390/util/auxtrace.c
new file mode 100644
index 000000000000..6cb48e4cffd9
--- /dev/null
+++ b/tools/perf/arch/s390/util/auxtrace.c
@@ -0,0 +1,118 @@
1#include <stdbool.h>
2#include <linux/kernel.h>
3#include <linux/types.h>
4#include <linux/bitops.h>
5#include <linux/log2.h>
6
7#include "../../util/evlist.h"
8#include "../../util/auxtrace.h"
9#include "../../util/evsel.h"
10
11#define PERF_EVENT_CPUM_SF 0xB0000 /* Event: Basic-sampling */
12#define PERF_EVENT_CPUM_SF_DIAG 0xBD000 /* Event: Combined-sampling */
13#define DEFAULT_AUX_PAGES 128
14#define DEFAULT_FREQ 4000
15
16static void cpumsf_free(struct auxtrace_record *itr)
17{
18 free(itr);
19}
20
21static size_t cpumsf_info_priv_size(struct auxtrace_record *itr __maybe_unused,
22 struct perf_evlist *evlist __maybe_unused)
23{
24 return 0;
25}
26
27static int
28cpumsf_info_fill(struct auxtrace_record *itr __maybe_unused,
29 struct perf_session *session __maybe_unused,
30 struct auxtrace_info_event *auxtrace_info __maybe_unused,
31 size_t priv_size __maybe_unused)
32{
33 return 0;
34}
35
36static unsigned long
37cpumsf_reference(struct auxtrace_record *itr __maybe_unused)
38{
39 return 0;
40}
41
42static int
43cpumsf_recording_options(struct auxtrace_record *ar __maybe_unused,
44 struct perf_evlist *evlist __maybe_unused,
45 struct record_opts *opts)
46{
47 unsigned int factor = 1;
48 unsigned int pages;
49
50 opts->full_auxtrace = true;
51
52 /*
53 * The AUX buffer size should be set properly to avoid
54 * overflow of samples if it is not set explicitly.
55 * DEFAULT_AUX_PAGES is an proper size when sampling frequency
56 * is DEFAULT_FREQ. It is expected to hold about 1/2 second
57 * of sampling data. The size used for AUX buffer will scale
58 * according to the specified frequency and DEFAULT_FREQ.
59 */
60 if (!opts->auxtrace_mmap_pages) {
61 if (opts->user_freq != UINT_MAX)
62 factor = (opts->user_freq + DEFAULT_FREQ
63 - 1) / DEFAULT_FREQ;
64 pages = DEFAULT_AUX_PAGES * factor;
65 opts->auxtrace_mmap_pages = roundup_pow_of_two(pages);
66 }
67
68 return 0;
69}
70
71static int
72cpumsf_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
73 struct record_opts *opts __maybe_unused,
74 const char *str __maybe_unused)
75{
76 return 0;
77}
78
79/*
80 * auxtrace_record__init is called when perf record
81 * check if the event really need auxtrace
82 */
83struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
84 int *err)
85{
86 struct auxtrace_record *aux;
87 struct perf_evsel *pos;
88 int diagnose = 0;
89
90 if (evlist->nr_entries == 0)
91 return NULL;
92
93 evlist__for_each_entry(evlist, pos) {
94 if (pos->attr.config == PERF_EVENT_CPUM_SF_DIAG) {
95 diagnose = 1;
96 break;
97 }
98 }
99
100 if (!diagnose)
101 return NULL;
102
103 /* sampling in diagnose mode. alloc aux buffer */
104 aux = zalloc(sizeof(*aux));
105 if (aux == NULL) {
106 *err = -ENOMEM;
107 return NULL;
108 }
109
110 aux->parse_snapshot_options = cpumsf_parse_snapshot_options;
111 aux->recording_options = cpumsf_recording_options;
112 aux->info_priv_size = cpumsf_info_priv_size;
113 aux->info_fill = cpumsf_info_fill;
114 aux->free = cpumsf_free;
115 aux->reference = cpumsf_reference;
116
117 return aux;
118}
diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c
index 0dff5b2ed1e5..a8ace5cc6301 100644
--- a/tools/perf/arch/s390/util/dwarf-regs.c
+++ b/tools/perf/arch/s390/util/dwarf-regs.c
@@ -2,22 +2,43 @@
2/* 2/*
3 * Mapping of DWARF debug register numbers into register names. 3 * Mapping of DWARF debug register numbers into register names.
4 * 4 *
5 * Copyright IBM Corp. 2010 5 * Copyright IBM Corp. 2010, 2017
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7 * 8 *
8 */ 9 */
9 10
11#include <errno.h>
10#include <stddef.h> 12#include <stddef.h>
13#include <stdlib.h>
14#include <linux/kernel.h>
15#include <asm/ptrace.h>
16#include <string.h>
11#include <dwarf-regs.h> 17#include <dwarf-regs.h>
12 18#include "dwarf-regs-table.h"
13#define NUM_GPRS 16
14
15static const char *gpr_names[NUM_GPRS] = {
16 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
17 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
18};
19 19
20const char *get_arch_regstr(unsigned int n) 20const char *get_arch_regstr(unsigned int n)
21{ 21{
22 return (n >= NUM_GPRS) ? NULL : gpr_names[n]; 22 return (n >= ARRAY_SIZE(s390_dwarf_regs)) ? NULL : s390_dwarf_regs[n];
23}
24
25/*
26 * Convert the register name into an offset to struct pt_regs (kernel).
27 * This is required by the BPF prologue generator. The BPF
28 * program is called in the BPF overflow handler in the perf
29 * core.
30 */
31int regs_query_register_offset(const char *name)
32{
33 unsigned long gpr;
34
35 if (!name || strncmp(name, "%r", 2))
36 return -EINVAL;
37
38 errno = 0;
39 gpr = strtoul(name + 2, NULL, 10);
40 if (errno || gpr >= 16)
41 return -EINVAL;
42
43 return offsetof(user_pt_regs, gprs) + 8 * gpr;
23} 44}
diff --git a/tools/perf/arch/s390/util/unwind-libdw.c b/tools/perf/arch/s390/util/unwind-libdw.c
new file mode 100644
index 000000000000..387c698cdd1b
--- /dev/null
+++ b/tools/perf/arch/s390/util/unwind-libdw.c
@@ -0,0 +1,63 @@
1#include <linux/kernel.h>
2#include <elfutils/libdwfl.h>
3#include "../../util/unwind-libdw.h"
4#include "../../util/perf_regs.h"
5#include "../../util/event.h"
6#include "dwarf-regs-table.h"
7
8
9bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
10{
11 struct unwind_info *ui = arg;
12 struct regs_dump *user_regs = &ui->sample->user_regs;
13 Dwarf_Word dwarf_regs[ARRAY_SIZE(s390_dwarf_regs)];
14
15#define REG(r) ({ \
16 Dwarf_Word val = 0; \
17 perf_reg_value(&val, user_regs, PERF_REG_S390_##r); \
18 val; \
19})
20 /*
21 * For DWARF register mapping details,
22 * see also perf/arch/s390/include/dwarf-regs-table.h
23 */
24 dwarf_regs[0] = REG(R0);
25 dwarf_regs[1] = REG(R1);
26 dwarf_regs[2] = REG(R2);
27 dwarf_regs[3] = REG(R3);
28 dwarf_regs[4] = REG(R4);
29 dwarf_regs[5] = REG(R5);
30 dwarf_regs[6] = REG(R6);
31 dwarf_regs[7] = REG(R7);
32 dwarf_regs[8] = REG(R8);
33 dwarf_regs[9] = REG(R9);
34 dwarf_regs[10] = REG(R10);
35 dwarf_regs[11] = REG(R11);
36 dwarf_regs[12] = REG(R12);
37 dwarf_regs[13] = REG(R13);
38 dwarf_regs[14] = REG(R14);
39 dwarf_regs[15] = REG(R15);
40
41 dwarf_regs[16] = REG(FP0);
42 dwarf_regs[17] = REG(FP2);
43 dwarf_regs[18] = REG(FP4);
44 dwarf_regs[19] = REG(FP6);
45 dwarf_regs[20] = REG(FP1);
46 dwarf_regs[21] = REG(FP3);
47 dwarf_regs[22] = REG(FP5);
48 dwarf_regs[23] = REG(FP7);
49 dwarf_regs[24] = REG(FP8);
50 dwarf_regs[25] = REG(FP10);
51 dwarf_regs[26] = REG(FP12);
52 dwarf_regs[27] = REG(FP14);
53 dwarf_regs[28] = REG(FP9);
54 dwarf_regs[29] = REG(FP11);
55 dwarf_regs[30] = REG(FP13);
56 dwarf_regs[31] = REG(FP15);
57
58 dwarf_regs[64] = REG(MASK);
59 dwarf_regs[65] = REG(PC);
60
61 dwfl_thread_state_register_pc(thread, dwarf_regs[65]);
62 return dwfl_thread_state_registers(thread, 0, 32, dwarf_regs);
63}
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index 4adfb4ce2864..5bd1ba8c0282 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -123,3 +123,17 @@ static int x86__cpuid_parse(struct arch *arch, char *cpuid)
123 123
124 return -1; 124 return -1;
125} 125}
126
127static int x86__annotate_init(struct arch *arch, char *cpuid)
128{
129 int err = 0;
130
131 if (arch->initialized)
132 return 0;
133
134 if (cpuid)
135 err = x86__cpuid_parse(arch, cpuid);
136
137 arch->initialized = true;
138 return err;
139}
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index 9834fdc7c59e..c1bd979b957b 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -9,7 +9,6 @@ struct test;
9int test__rdpmc(struct test *test __maybe_unused, int subtest); 9int test__rdpmc(struct test *test __maybe_unused, int subtest);
10int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest); 10int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
11int test__insn_x86(struct test *test __maybe_unused, int subtest); 11int test__insn_x86(struct test *test __maybe_unused, int subtest);
12int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subtest);
13 12
14#ifdef HAVE_DWARF_UNWIND_SUPPORT 13#ifdef HAVE_DWARF_UNWIND_SUPPORT
15struct thread; 14struct thread;
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index cbb7e978166b..8e2c5a38c3b9 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -5,4 +5,3 @@ libperf-y += arch-tests.o
5libperf-y += rdpmc.o 5libperf-y += rdpmc.o
6libperf-y += perf-time-to-tsc.o 6libperf-y += perf-time-to-tsc.o
7libperf-$(CONFIG_AUXTRACE) += insn-x86.o 7libperf-$(CONFIG_AUXTRACE) += insn-x86.o
8libperf-y += intel-cqm.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index 34a078136a47..cc1802ff5410 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -25,10 +25,6 @@ struct test arch_tests[] = {
25 }, 25 },
26#endif 26#endif
27 { 27 {
28 .desc = "Intel cqm nmi context read",
29 .func = test__intel_cqm_count_nmi_context,
30 },
31 {
32 .func = NULL, 28 .func = NULL,
33 }, 29 },
34 30
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index d95fdcc26f4b..944070e98a2c 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -216,6 +216,47 @@ static const char * const numa_usage[] = {
216 NULL 216 NULL
217}; 217};
218 218
219/*
220 * To get number of numa nodes present.
221 */
222static int nr_numa_nodes(void)
223{
224 int i, nr_nodes = 0;
225
226 for (i = 0; i < g->p.nr_nodes; i++) {
227 if (numa_bitmask_isbitset(numa_nodes_ptr, i))
228 nr_nodes++;
229 }
230
231 return nr_nodes;
232}
233
234/*
235 * To check if given numa node is present.
236 */
237static int is_node_present(int node)
238{
239 return numa_bitmask_isbitset(numa_nodes_ptr, node);
240}
241
242/*
243 * To check given numa node has cpus.
244 */
245static bool node_has_cpus(int node)
246{
247 struct bitmask *cpu = numa_allocate_cpumask();
248 unsigned int i;
249
250 if (cpu && !numa_node_to_cpus(node, cpu)) {
251 for (i = 0; i < cpu->size; i++) {
252 if (numa_bitmask_isbitset(cpu, i))
253 return true;
254 }
255 }
256
257 return false; /* lets fall back to nocpus safely */
258}
259
219static cpu_set_t bind_to_cpu(int target_cpu) 260static cpu_set_t bind_to_cpu(int target_cpu)
220{ 261{
221 cpu_set_t orig_mask, mask; 262 cpu_set_t orig_mask, mask;
@@ -244,12 +285,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
244 285
245static cpu_set_t bind_to_node(int target_node) 286static cpu_set_t bind_to_node(int target_node)
246{ 287{
247 int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes; 288 int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
248 cpu_set_t orig_mask, mask; 289 cpu_set_t orig_mask, mask;
249 int cpu; 290 int cpu;
250 int ret; 291 int ret;
251 292
252 BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus); 293 BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
253 BUG_ON(!cpus_per_node); 294 BUG_ON(!cpus_per_node);
254 295
255 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask); 296 ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
@@ -649,7 +690,7 @@ static int parse_setup_node_list(void)
649 int i; 690 int i;
650 691
651 for (i = 0; i < mul; i++) { 692 for (i = 0; i < mul; i++) {
652 if (t >= g->p.nr_tasks) { 693 if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
653 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node); 694 printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
654 goto out; 695 goto out;
655 } 696 }
@@ -964,6 +1005,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
964 sum = 0; 1005 sum = 0;
965 1006
966 for (node = 0; node < g->p.nr_nodes; node++) { 1007 for (node = 0; node < g->p.nr_nodes; node++) {
1008 if (!is_node_present(node))
1009 continue;
967 nr = nodes[node]; 1010 nr = nodes[node];
968 nr_min = min(nr, nr_min); 1011 nr_min = min(nr, nr_min);
969 nr_max = max(nr, nr_max); 1012 nr_max = max(nr, nr_max);
@@ -984,8 +1027,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
984 process_groups = 0; 1027 process_groups = 0;
985 1028
986 for (node = 0; node < g->p.nr_nodes; node++) { 1029 for (node = 0; node < g->p.nr_nodes; node++) {
987 int processes = count_node_processes(node); 1030 int processes;
988 1031
1032 if (!is_node_present(node))
1033 continue;
1034 processes = count_node_processes(node);
989 nr = nodes[node]; 1035 nr = nodes[node];
990 tprintf(" %2d/%-2d", nr, processes); 1036 tprintf(" %2d/%-2d", nr, processes);
991 1037
@@ -1291,7 +1337,7 @@ static void print_summary(void)
1291 1337
1292 printf("\n ###\n"); 1338 printf("\n ###\n");
1293 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n", 1339 printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1294 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus); 1340 g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1295 printf(" # %5dx %5ldMB global shared mem operations\n", 1341 printf(" # %5dx %5ldMB global shared mem operations\n",
1296 g->p.nr_loops, g->p.bytes_global/1024/1024); 1342 g->p.nr_loops, g->p.bytes_global/1024/1024);
1297 printf(" # %5dx %5ldMB process shared mem operations\n", 1343 printf(" # %5dx %5ldMB process shared mem operations\n",
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 3d32aa45016d..f15731a3d438 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -357,7 +357,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
357 } 357 }
358 358
359 if (total_nr_samples == 0) { 359 if (total_nr_samples == 0) {
360 ui__error("The %s file has no samples!\n", session->file->path); 360 ui__error("The %s file has no samples!\n", session->data->file.path);
361 goto out; 361 goto out;
362 } 362 }
363 363
@@ -401,7 +401,7 @@ int cmd_annotate(int argc, const char **argv)
401 .ordering_requires_timestamps = true, 401 .ordering_requires_timestamps = true,
402 }, 402 },
403 }; 403 };
404 struct perf_data_file file = { 404 struct perf_data data = {
405 .mode = PERF_DATA_MODE_READ, 405 .mode = PERF_DATA_MODE_READ,
406 }; 406 };
407 struct option options[] = { 407 struct option options[] = {
@@ -411,7 +411,7 @@ int cmd_annotate(int argc, const char **argv)
411 "only consider symbols in these dsos"), 411 "only consider symbols in these dsos"),
412 OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol", 412 OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
413 "symbol to annotate"), 413 "symbol to annotate"),
414 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), 414 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
415 OPT_INCR('v', "verbose", &verbose, 415 OPT_INCR('v', "verbose", &verbose,
416 "be more verbose (show symbol address, etc)"), 416 "be more verbose (show symbol address, etc)"),
417 OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"), 417 OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"),
@@ -483,9 +483,9 @@ int cmd_annotate(int argc, const char **argv)
483 if (quiet) 483 if (quiet)
484 perf_quiet_option(); 484 perf_quiet_option();
485 485
486 file.path = input_name; 486 data.file.path = input_name;
487 487
488 annotate.session = perf_session__new(&file, false, &annotate.tool); 488 annotate.session = perf_session__new(&data, false, &annotate.tool);
489 if (annotate.session == NULL) 489 if (annotate.session == NULL)
490 return -1; 490 return -1;
491 491
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 5f53a7ad5ef3..3d354ba6e9c5 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -312,7 +312,7 @@ int cmd_buildid_cache(int argc, const char **argv)
312 *kcore_filename = NULL; 312 *kcore_filename = NULL;
313 char sbuf[STRERR_BUFSIZE]; 313 char sbuf[STRERR_BUFSIZE];
314 314
315 struct perf_data_file file = { 315 struct perf_data data = {
316 .mode = PERF_DATA_MODE_READ, 316 .mode = PERF_DATA_MODE_READ,
317 }; 317 };
318 struct perf_session *session = NULL; 318 struct perf_session *session = NULL;
@@ -353,10 +353,10 @@ int cmd_buildid_cache(int argc, const char **argv)
353 nsi = nsinfo__new(ns_id); 353 nsi = nsinfo__new(ns_id);
354 354
355 if (missing_filename) { 355 if (missing_filename) {
356 file.path = missing_filename; 356 data.file.path = missing_filename;
357 file.force = force; 357 data.force = force;
358 358
359 session = perf_session__new(&file, false, NULL); 359 session = perf_session__new(&data, false, NULL);
360 if (session == NULL) 360 if (session == NULL)
361 return -1; 361 return -1;
362 } 362 }
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index ec2f327cd79d..78abbe8d9d5f 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -51,10 +51,12 @@ static bool dso__skip_buildid(struct dso *dso, int with_hits)
51static int perf_session__list_build_ids(bool force, bool with_hits) 51static int perf_session__list_build_ids(bool force, bool with_hits)
52{ 52{
53 struct perf_session *session; 53 struct perf_session *session;
54 struct perf_data_file file = { 54 struct perf_data data = {
55 .path = input_name, 55 .file = {
56 .mode = PERF_DATA_MODE_READ, 56 .path = input_name,
57 .force = force, 57 },
58 .mode = PERF_DATA_MODE_READ,
59 .force = force,
58 }; 60 };
59 61
60 symbol__elf_init(); 62 symbol__elf_init();
@@ -64,7 +66,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
64 if (filename__fprintf_build_id(input_name, stdout) > 0) 66 if (filename__fprintf_build_id(input_name, stdout) > 0)
65 goto out; 67 goto out;
66 68
67 session = perf_session__new(&file, false, &build_id__mark_dso_hit_ops); 69 session = perf_session__new(&data, false, &build_id__mark_dso_hit_ops);
68 if (session == NULL) 70 if (session == NULL)
69 return -1; 71 return -1;
70 72
@@ -72,7 +74,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
72 * We take all buildids when the file contains AUX area tracing data 74 * We take all buildids when the file contains AUX area tracing data
73 * because we do not decode the trace because it would take too long. 75 * because we do not decode the trace because it would take too long.
74 */ 76 */
75 if (!perf_data_file__is_pipe(&file) && 77 if (!perf_data__is_pipe(&data) &&
76 perf_header__has_feat(&session->header, HEADER_AUXTRACE)) 78 perf_header__has_feat(&session->header, HEADER_AUXTRACE))
77 with_hits = false; 79 with_hits = false;
78 80
@@ -80,7 +82,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
80 * in pipe-mode, the only way to get the buildids is to parse 82 * in pipe-mode, the only way to get the buildids is to parse
81 * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID 83 * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
82 */ 84 */
83 if (with_hits || perf_data_file__is_pipe(&file)) 85 if (with_hits || perf_data__is_pipe(&data))
84 perf_session__process_events(session); 86 perf_session__process_events(session);
85 87
86 perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits); 88 perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits);
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index fd32ad08c6d4..17855c4626a0 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2524,7 +2524,7 @@ static int perf_c2c__report(int argc, const char **argv)
2524{ 2524{
2525 struct perf_session *session; 2525 struct perf_session *session;
2526 struct ui_progress prog; 2526 struct ui_progress prog;
2527 struct perf_data_file file = { 2527 struct perf_data data = {
2528 .mode = PERF_DATA_MODE_READ, 2528 .mode = PERF_DATA_MODE_READ,
2529 }; 2529 };
2530 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT; 2530 char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
@@ -2573,8 +2573,8 @@ static int perf_c2c__report(int argc, const char **argv)
2573 if (!input_name || !strlen(input_name)) 2573 if (!input_name || !strlen(input_name))
2574 input_name = "perf.data"; 2574 input_name = "perf.data";
2575 2575
2576 file.path = input_name; 2576 data.file.path = input_name;
2577 file.force = symbol_conf.force; 2577 data.force = symbol_conf.force;
2578 2578
2579 err = setup_display(display); 2579 err = setup_display(display);
2580 if (err) 2580 if (err)
@@ -2592,7 +2592,7 @@ static int perf_c2c__report(int argc, const char **argv)
2592 goto out; 2592 goto out;
2593 } 2593 }
2594 2594
2595 session = perf_session__new(&file, 0, &c2c.tool); 2595 session = perf_session__new(&data, 0, &c2c.tool);
2596 if (session == NULL) { 2596 if (session == NULL) {
2597 pr_debug("No memory for session\n"); 2597 pr_debug("No memory for session\n");
2598 goto out; 2598 goto out;
@@ -2612,7 +2612,7 @@ static int perf_c2c__report(int argc, const char **argv)
2612 goto out_session; 2612 goto out_session;
2613 2613
2614 /* No pipe support at the moment. */ 2614 /* No pipe support at the moment. */
2615 if (perf_data_file__is_pipe(session->file)) { 2615 if (perf_data__is_pipe(session->data)) {
2616 pr_debug("No pipe support at the moment.\n"); 2616 pr_debug("No pipe support at the moment.\n");
2617 goto out_session; 2617 goto out_session;
2618 } 2618 }
@@ -2733,6 +2733,7 @@ static int perf_c2c__record(int argc, const char **argv)
2733 if (!perf_mem_events[j].supported) { 2733 if (!perf_mem_events[j].supported) {
2734 pr_err("failed: event '%s' not supported\n", 2734 pr_err("failed: event '%s' not supported\n",
2735 perf_mem_events[j].name); 2735 perf_mem_events[j].name);
2736 free(rec_argv);
2736 return -1; 2737 return -1;
2737 } 2738 }
2738 2739
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index abfa49eaf7fd..514f70f95b57 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -35,8 +35,7 @@ static struct option config_options[] = {
35 OPT_END() 35 OPT_END()
36}; 36};
37 37
38static int set_config(struct perf_config_set *set, const char *file_name, 38static int set_config(struct perf_config_set *set, const char *file_name)
39 const char *var, const char *value)
40{ 39{
41 struct perf_config_section *section = NULL; 40 struct perf_config_section *section = NULL;
42 struct perf_config_item *item = NULL; 41 struct perf_config_item *item = NULL;
@@ -50,7 +49,6 @@ static int set_config(struct perf_config_set *set, const char *file_name,
50 if (!fp) 49 if (!fp)
51 return -1; 50 return -1;
52 51
53 perf_config_set__collect(set, file_name, var, value);
54 fprintf(fp, "%s\n", first_line); 52 fprintf(fp, "%s\n", first_line);
55 53
56 /* overwrite configvariables */ 54 /* overwrite configvariables */
@@ -162,6 +160,7 @@ int cmd_config(int argc, const char **argv)
162 struct perf_config_set *set; 160 struct perf_config_set *set;
163 char *user_config = mkpath("%s/.perfconfig", getenv("HOME")); 161 char *user_config = mkpath("%s/.perfconfig", getenv("HOME"));
164 const char *config_filename; 162 const char *config_filename;
163 bool changed = false;
165 164
166 argc = parse_options(argc, argv, config_options, config_usage, 165 argc = parse_options(argc, argv, config_options, config_usage,
167 PARSE_OPT_STOP_AT_NON_OPTION); 166 PARSE_OPT_STOP_AT_NON_OPTION);
@@ -232,15 +231,26 @@ int cmd_config(int argc, const char **argv)
232 goto out_err; 231 goto out_err;
233 } 232 }
234 } else { 233 } else {
235 if (set_config(set, config_filename, var, value) < 0) { 234 if (perf_config_set__collect(set, config_filename,
236 pr_err("Failed to set '%s=%s' on %s\n", 235 var, value) < 0) {
237 var, value, config_filename); 236 pr_err("Failed to add '%s=%s'\n",
237 var, value);
238 free(arg); 238 free(arg);
239 goto out_err; 239 goto out_err;
240 } 240 }
241 changed = true;
241 } 242 }
242 free(arg); 243 free(arg);
243 } 244 }
245
246 if (!changed)
247 break;
248
249 if (set_config(set, config_filename) < 0) {
250 pr_err("Failed to set the configs on %s\n",
251 config_filename);
252 goto out_err;
253 }
244 } 254 }
245 255
246 ret = 0; 256 ret = 0;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 56223bdfa205..d660cb7b222b 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -48,7 +48,7 @@ struct diff_hpp_fmt {
48 48
49struct data__file { 49struct data__file {
50 struct perf_session *session; 50 struct perf_session *session;
51 struct perf_data_file file; 51 struct perf_data data;
52 int idx; 52 int idx;
53 struct hists *hists; 53 struct hists *hists;
54 struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX]; 54 struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX];
@@ -708,7 +708,7 @@ static void data__fprintf(void)
708 708
709 data__for_each_file(i, d) 709 data__for_each_file(i, d)
710 fprintf(stdout, "# [%d] %s %s\n", 710 fprintf(stdout, "# [%d] %s %s\n",
711 d->idx, d->file.path, 711 d->idx, d->data.file.path,
712 !d->idx ? "(Baseline)" : ""); 712 !d->idx ? "(Baseline)" : "");
713 713
714 fprintf(stdout, "#\n"); 714 fprintf(stdout, "#\n");
@@ -777,16 +777,16 @@ static int __cmd_diff(void)
777 int ret = -EINVAL, i; 777 int ret = -EINVAL, i;
778 778
779 data__for_each_file(i, d) { 779 data__for_each_file(i, d) {
780 d->session = perf_session__new(&d->file, false, &tool); 780 d->session = perf_session__new(&d->data, false, &tool);
781 if (!d->session) { 781 if (!d->session) {
782 pr_err("Failed to open %s\n", d->file.path); 782 pr_err("Failed to open %s\n", d->data.file.path);
783 ret = -1; 783 ret = -1;
784 goto out_delete; 784 goto out_delete;
785 } 785 }
786 786
787 ret = perf_session__process_events(d->session); 787 ret = perf_session__process_events(d->session);
788 if (ret) { 788 if (ret) {
789 pr_err("Failed to process %s\n", d->file.path); 789 pr_err("Failed to process %s\n", d->data.file.path);
790 goto out_delete; 790 goto out_delete;
791 } 791 }
792 792
@@ -1287,11 +1287,11 @@ static int data_init(int argc, const char **argv)
1287 return -ENOMEM; 1287 return -ENOMEM;
1288 1288
1289 data__for_each_file(i, d) { 1289 data__for_each_file(i, d) {
1290 struct perf_data_file *file = &d->file; 1290 struct perf_data *data = &d->data;
1291 1291
1292 file->path = use_default ? defaults[i] : argv[i]; 1292 data->file.path = use_default ? defaults[i] : argv[i];
1293 file->mode = PERF_DATA_MODE_READ, 1293 data->mode = PERF_DATA_MODE_READ,
1294 file->force = force, 1294 data->force = force,
1295 1295
1296 d->idx = i; 1296 d->idx = i;
1297 } 1297 }
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index cdd145613f60..e06e822ce634 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -22,14 +22,16 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
22{ 22{
23 struct perf_session *session; 23 struct perf_session *session;
24 struct perf_evsel *pos; 24 struct perf_evsel *pos;
25 struct perf_data_file file = { 25 struct perf_data data = {
26 .path = file_name, 26 .file = {
27 .mode = PERF_DATA_MODE_READ, 27 .path = file_name,
28 .force = details->force, 28 },
29 .mode = PERF_DATA_MODE_READ,
30 .force = details->force,
29 }; 31 };
30 bool has_tracepoint = false; 32 bool has_tracepoint = false;
31 33
32 session = perf_session__new(&file, 0, NULL); 34 session = perf_session__new(&data, 0, NULL);
33 if (session == NULL) 35 if (session == NULL)
34 return -1; 36 return -1;
35 37
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index bd1fedef3d1c..a0f7ed2b869b 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -284,7 +284,7 @@ static int perf_help_config(const char *var, const char *value, void *cb)
284 add_man_viewer(value); 284 add_man_viewer(value);
285 return 0; 285 return 0;
286 } 286 }
287 if (!strstarts(var, "man.")) 287 if (strstarts(var, "man."))
288 return add_man_viewer_info(var, value); 288 return add_man_viewer_info(var, value);
289 289
290 return 0; 290 return 0;
@@ -314,7 +314,7 @@ static const char *cmd_to_page(const char *perf_cmd)
314 314
315 if (!perf_cmd) 315 if (!perf_cmd)
316 return "perf"; 316 return "perf";
317 else if (!strstarts(perf_cmd, "perf")) 317 else if (strstarts(perf_cmd, "perf"))
318 return perf_cmd; 318 return perf_cmd;
319 319
320 return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s; 320 return asprintf(&s, "perf-%s", perf_cmd) < 0 ? NULL : s;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 3e0e73b0dc67..16a28547ca86 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -36,7 +36,7 @@ struct perf_inject {
36 bool strip; 36 bool strip;
37 bool jit_mode; 37 bool jit_mode;
38 const char *input_name; 38 const char *input_name;
39 struct perf_data_file output; 39 struct perf_data output;
40 u64 bytes_written; 40 u64 bytes_written;
41 u64 aux_id; 41 u64 aux_id;
42 struct list_head samples; 42 struct list_head samples;
@@ -53,7 +53,7 @@ static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
53{ 53{
54 ssize_t size; 54 ssize_t size;
55 55
56 size = perf_data_file__write(&inject->output, buf, sz); 56 size = perf_data__write(&inject->output, buf, sz);
57 if (size < 0) 57 if (size < 0)
58 return -errno; 58 return -errno;
59 59
@@ -146,7 +146,7 @@ static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
146 if (!inject->output.is_pipe) { 146 if (!inject->output.is_pipe) {
147 off_t offset; 147 off_t offset;
148 148
149 offset = lseek(inject->output.fd, 0, SEEK_CUR); 149 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
150 if (offset == -1) 150 if (offset == -1)
151 return -errno; 151 return -errno;
152 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index, 152 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
@@ -155,11 +155,11 @@ static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
155 return ret; 155 return ret;
156 } 156 }
157 157
158 if (perf_data_file__is_pipe(session->file) || !session->one_mmap) { 158 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
159 ret = output_bytes(inject, event, event->header.size); 159 ret = output_bytes(inject, event, event->header.size);
160 if (ret < 0) 160 if (ret < 0)
161 return ret; 161 return ret;
162 ret = copy_bytes(inject, perf_data_file__fd(session->file), 162 ret = copy_bytes(inject, perf_data__fd(session->data),
163 event->auxtrace.size); 163 event->auxtrace.size);
164 } else { 164 } else {
165 ret = output_bytes(inject, event, 165 ret = output_bytes(inject, event,
@@ -638,8 +638,8 @@ static int __cmd_inject(struct perf_inject *inject)
638{ 638{
639 int ret = -EINVAL; 639 int ret = -EINVAL;
640 struct perf_session *session = inject->session; 640 struct perf_session *session = inject->session;
641 struct perf_data_file *file_out = &inject->output; 641 struct perf_data *data_out = &inject->output;
642 int fd = perf_data_file__fd(file_out); 642 int fd = perf_data__fd(data_out);
643 u64 output_data_offset; 643 u64 output_data_offset;
644 644
645 signal(SIGINT, sig_handler); 645 signal(SIGINT, sig_handler);
@@ -694,14 +694,14 @@ static int __cmd_inject(struct perf_inject *inject)
694 if (!inject->itrace_synth_opts.set) 694 if (!inject->itrace_synth_opts.set)
695 auxtrace_index__free(&session->auxtrace_index); 695 auxtrace_index__free(&session->auxtrace_index);
696 696
697 if (!file_out->is_pipe) 697 if (!data_out->is_pipe)
698 lseek(fd, output_data_offset, SEEK_SET); 698 lseek(fd, output_data_offset, SEEK_SET);
699 699
700 ret = perf_session__process_events(session); 700 ret = perf_session__process_events(session);
701 if (ret) 701 if (ret)
702 return ret; 702 return ret;
703 703
704 if (!file_out->is_pipe) { 704 if (!data_out->is_pipe) {
705 if (inject->build_ids) 705 if (inject->build_ids)
706 perf_header__set_feat(&session->header, 706 perf_header__set_feat(&session->header,
707 HEADER_BUILD_ID); 707 HEADER_BUILD_ID);
@@ -776,11 +776,13 @@ int cmd_inject(int argc, const char **argv)
776 .input_name = "-", 776 .input_name = "-",
777 .samples = LIST_HEAD_INIT(inject.samples), 777 .samples = LIST_HEAD_INIT(inject.samples),
778 .output = { 778 .output = {
779 .path = "-", 779 .file = {
780 .mode = PERF_DATA_MODE_WRITE, 780 .path = "-",
781 },
782 .mode = PERF_DATA_MODE_WRITE,
781 }, 783 },
782 }; 784 };
783 struct perf_data_file file = { 785 struct perf_data data = {
784 .mode = PERF_DATA_MODE_READ, 786 .mode = PERF_DATA_MODE_READ,
785 }; 787 };
786 int ret; 788 int ret;
@@ -790,7 +792,7 @@ int cmd_inject(int argc, const char **argv)
790 "Inject build-ids into the output stream"), 792 "Inject build-ids into the output stream"),
791 OPT_STRING('i', "input", &inject.input_name, "file", 793 OPT_STRING('i', "input", &inject.input_name, "file",
792 "input file name"), 794 "input file name"),
793 OPT_STRING('o', "output", &inject.output.path, "file", 795 OPT_STRING('o', "output", &inject.output.file.path, "file",
794 "output file name"), 796 "output file name"),
795 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat, 797 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
796 "Merge sched-stat and sched-switch for getting events " 798 "Merge sched-stat and sched-switch for getting events "
@@ -802,7 +804,7 @@ int cmd_inject(int argc, const char **argv)
802 "be more verbose (show build ids, etc)"), 804 "be more verbose (show build ids, etc)"),
803 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file", 805 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
804 "kallsyms pathname"), 806 "kallsyms pathname"),
805 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), 807 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
806 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts, 808 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
807 NULL, "opts", "Instruction Tracing options", 809 NULL, "opts", "Instruction Tracing options",
808 itrace_parse_synth_opts), 810 itrace_parse_synth_opts),
@@ -830,15 +832,15 @@ int cmd_inject(int argc, const char **argv)
830 return -1; 832 return -1;
831 } 833 }
832 834
833 if (perf_data_file__open(&inject.output)) { 835 if (perf_data__open(&inject.output)) {
834 perror("failed to create output file"); 836 perror("failed to create output file");
835 return -1; 837 return -1;
836 } 838 }
837 839
838 inject.tool.ordered_events = inject.sched_stat; 840 inject.tool.ordered_events = inject.sched_stat;
839 841
840 file.path = inject.input_name; 842 data.file.path = inject.input_name;
841 inject.session = perf_session__new(&file, true, &inject.tool); 843 inject.session = perf_session__new(&data, true, &inject.tool);
842 if (inject.session == NULL) 844 if (inject.session == NULL)
843 return -1; 845 return -1;
844 846
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 35d4b9c9a9e8..ae11e4c3516a 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -641,7 +641,6 @@ static const struct {
641 { "__GFP_ATOMIC", "_A" }, 641 { "__GFP_ATOMIC", "_A" },
642 { "__GFP_IO", "I" }, 642 { "__GFP_IO", "I" },
643 { "__GFP_FS", "F" }, 643 { "__GFP_FS", "F" },
644 { "__GFP_COLD", "CO" },
645 { "__GFP_NOWARN", "NWR" }, 644 { "__GFP_NOWARN", "NWR" },
646 { "__GFP_RETRY_MAYFAIL", "R" }, 645 { "__GFP_RETRY_MAYFAIL", "R" },
647 { "__GFP_NOFAIL", "NF" }, 646 { "__GFP_NOFAIL", "NF" },
@@ -655,7 +654,6 @@ static const struct {
655 { "__GFP_RECLAIMABLE", "RC" }, 654 { "__GFP_RECLAIMABLE", "RC" },
656 { "__GFP_MOVABLE", "M" }, 655 { "__GFP_MOVABLE", "M" },
657 { "__GFP_ACCOUNT", "AC" }, 656 { "__GFP_ACCOUNT", "AC" },
658 { "__GFP_NOTRACK", "NT" },
659 { "__GFP_WRITE", "WR" }, 657 { "__GFP_WRITE", "WR" },
660 { "__GFP_RECLAIM", "R" }, 658 { "__GFP_RECLAIM", "R" },
661 { "__GFP_DIRECT_RECLAIM", "DR" }, 659 { "__GFP_DIRECT_RECLAIM", "DR" },
@@ -1894,7 +1892,7 @@ int cmd_kmem(int argc, const char **argv)
1894{ 1892{
1895 const char * const default_slab_sort = "frag,hit,bytes"; 1893 const char * const default_slab_sort = "frag,hit,bytes";
1896 const char * const default_page_sort = "bytes,hit"; 1894 const char * const default_page_sort = "bytes,hit";
1897 struct perf_data_file file = { 1895 struct perf_data data = {
1898 .mode = PERF_DATA_MODE_READ, 1896 .mode = PERF_DATA_MODE_READ,
1899 }; 1897 };
1900 const struct option kmem_options[] = { 1898 const struct option kmem_options[] = {
@@ -1910,7 +1908,7 @@ int cmd_kmem(int argc, const char **argv)
1910 "page, order, migtype, gfp", parse_sort_opt), 1908 "page, order, migtype, gfp", parse_sort_opt),
1911 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt), 1909 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1912 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), 1910 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1913 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), 1911 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1914 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator", 1912 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1915 parse_slab_opt), 1913 parse_slab_opt),
1916 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator", 1914 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
@@ -1950,9 +1948,9 @@ int cmd_kmem(int argc, const char **argv)
1950 return __cmd_record(argc, argv); 1948 return __cmd_record(argc, argv);
1951 } 1949 }
1952 1950
1953 file.path = input_name; 1951 data.file.path = input_name;
1954 1952
1955 kmem_session = session = perf_session__new(&file, false, &perf_kmem); 1953 kmem_session = session = perf_session__new(&data, false, &perf_kmem);
1956 if (session == NULL) 1954 if (session == NULL)
1957 return -1; 1955 return -1;
1958 1956
@@ -1984,7 +1982,8 @@ int cmd_kmem(int argc, const char **argv)
1984 1982
1985 if (perf_time__parse_str(&ptime, time_str) != 0) { 1983 if (perf_time__parse_str(&ptime, time_str) != 0) {
1986 pr_err("Invalid time string\n"); 1984 pr_err("Invalid time string\n");
1987 return -EINVAL; 1985 ret = -EINVAL;
1986 goto out_delete;
1988 } 1987 }
1989 1988
1990 if (!strcmp(argv[0], "stat")) { 1989 if (!strcmp(argv[0], "stat")) {
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 5fb40368d5d1..0c36f2ac6a0e 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -35,7 +35,6 @@
35#include <termios.h> 35#include <termios.h>
36#include <semaphore.h> 36#include <semaphore.h>
37#include <signal.h> 37#include <signal.h>
38#include <pthread.h>
39#include <math.h> 38#include <math.h>
40 39
41static const char *get_filename_for_perf_kvm(void) 40static const char *get_filename_for_perf_kvm(void)
@@ -1069,10 +1068,12 @@ static int read_events(struct perf_kvm_stat *kvm)
1069 .namespaces = perf_event__process_namespaces, 1068 .namespaces = perf_event__process_namespaces,
1070 .ordered_events = true, 1069 .ordered_events = true,
1071 }; 1070 };
1072 struct perf_data_file file = { 1071 struct perf_data file = {
1073 .path = kvm->file_name, 1072 .file = {
1074 .mode = PERF_DATA_MODE_READ, 1073 .path = kvm->file_name,
1075 .force = kvm->force, 1074 },
1075 .mode = PERF_DATA_MODE_READ,
1076 .force = kvm->force,
1076 }; 1077 };
1077 1078
1078 kvm->tool = eops; 1079 kvm->tool = eops;
@@ -1360,7 +1361,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
1360 "perf kvm stat live [<options>]", 1361 "perf kvm stat live [<options>]",
1361 NULL 1362 NULL
1362 }; 1363 };
1363 struct perf_data_file file = { 1364 struct perf_data data = {
1364 .mode = PERF_DATA_MODE_WRITE, 1365 .mode = PERF_DATA_MODE_WRITE,
1365 }; 1366 };
1366 1367
@@ -1434,7 +1435,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
1434 /* 1435 /*
1435 * perf session 1436 * perf session
1436 */ 1437 */
1437 kvm->session = perf_session__new(&file, false, &kvm->tool); 1438 kvm->session = perf_session__new(&data, false, &kvm->tool);
1438 if (kvm->session == NULL) { 1439 if (kvm->session == NULL) {
1439 err = -1; 1440 err = -1;
1440 goto out; 1441 goto out;
@@ -1443,7 +1444,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
1443 perf_session__set_id_hdr_size(kvm->session); 1444 perf_session__set_id_hdr_size(kvm->session);
1444 ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true); 1445 ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
1445 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target, 1446 machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
1446 kvm->evlist->threads, false, kvm->opts.proc_map_timeout); 1447 kvm->evlist->threads, false,
1448 kvm->opts.proc_map_timeout, 1);
1447 err = kvm_live_open_events(kvm); 1449 err = kvm_live_open_events(kvm);
1448 if (err) 1450 if (err)
1449 goto out; 1451 goto out;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index eeedbe433776..ead221e49f00 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -16,6 +16,7 @@
16#include "util/cache.h" 16#include "util/cache.h"
17#include "util/pmu.h" 17#include "util/pmu.h"
18#include "util/debug.h" 18#include "util/debug.h"
19#include "util/metricgroup.h"
19#include <subcmd/parse-options.h> 20#include <subcmd/parse-options.h>
20 21
21static bool desc_flag = true; 22static bool desc_flag = true;
@@ -80,6 +81,10 @@ int cmd_list(int argc, const char **argv)
80 long_desc_flag, details_flag); 81 long_desc_flag, details_flag);
81 else if (strcmp(argv[i], "sdt") == 0) 82 else if (strcmp(argv[i], "sdt") == 0)
82 print_sdt_events(NULL, NULL, raw_dump); 83 print_sdt_events(NULL, NULL, raw_dump);
84 else if (strcmp(argv[i], "metric") == 0)
85 metricgroup__print(true, false, NULL, raw_dump);
86 else if (strcmp(argv[i], "metricgroup") == 0)
87 metricgroup__print(false, true, NULL, raw_dump);
83 else if ((sep = strchr(argv[i], ':')) != NULL) { 88 else if ((sep = strchr(argv[i], ':')) != NULL) {
84 int sep_idx; 89 int sep_idx;
85 90
@@ -97,6 +102,7 @@ int cmd_list(int argc, const char **argv)
97 s[sep_idx] = '\0'; 102 s[sep_idx] = '\0';
98 print_tracepoint_events(s, s + sep_idx + 1, raw_dump); 103 print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
99 print_sdt_events(s, s + sep_idx + 1, raw_dump); 104 print_sdt_events(s, s + sep_idx + 1, raw_dump);
105 metricgroup__print(true, true, s, raw_dump);
100 free(s); 106 free(s);
101 } else { 107 } else {
102 if (asprintf(&s, "*%s*", argv[i]) < 0) { 108 if (asprintf(&s, "*%s*", argv[i]) < 0) {
@@ -113,6 +119,7 @@ int cmd_list(int argc, const char **argv)
113 details_flag); 119 details_flag);
114 print_tracepoint_events(NULL, s, raw_dump); 120 print_tracepoint_events(NULL, s, raw_dump);
115 print_sdt_events(NULL, s, raw_dump); 121 print_sdt_events(NULL, s, raw_dump);
122 metricgroup__print(true, true, NULL, raw_dump);
116 free(s); 123 free(s);
117 } 124 }
118 } 125 }
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index fe69cd6b89e1..6e0189df2b3b 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -865,13 +865,15 @@ static int __cmd_report(bool display_info)
865 .namespaces = perf_event__process_namespaces, 865 .namespaces = perf_event__process_namespaces,
866 .ordered_events = true, 866 .ordered_events = true,
867 }; 867 };
868 struct perf_data_file file = { 868 struct perf_data data = {
869 .path = input_name, 869 .file = {
870 .mode = PERF_DATA_MODE_READ, 870 .path = input_name,
871 .force = force, 871 },
872 .mode = PERF_DATA_MODE_READ,
873 .force = force,
872 }; 874 };
873 875
874 session = perf_session__new(&file, false, &eops); 876 session = perf_session__new(&data, false, &eops);
875 if (!session) { 877 if (!session) {
876 pr_err("Initializing perf session failed\n"); 878 pr_err("Initializing perf session failed\n");
877 return -1; 879 return -1;
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 4db960085273..506564651cda 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -113,6 +113,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
113 if (!perf_mem_events[j].supported) { 113 if (!perf_mem_events[j].supported) {
114 pr_err("failed: event '%s' not supported\n", 114 pr_err("failed: event '%s' not supported\n",
115 perf_mem_events__name(j)); 115 perf_mem_events__name(j));
116 free(rec_argv);
116 return -1; 117 return -1;
117 } 118 }
118 119
@@ -236,13 +237,15 @@ static int process_sample_event(struct perf_tool *tool,
236 237
237static int report_raw_events(struct perf_mem *mem) 238static int report_raw_events(struct perf_mem *mem)
238{ 239{
239 struct perf_data_file file = { 240 struct perf_data data = {
240 .path = input_name, 241 .file = {
241 .mode = PERF_DATA_MODE_READ, 242 .path = input_name,
242 .force = mem->force, 243 },
244 .mode = PERF_DATA_MODE_READ,
245 .force = mem->force,
243 }; 246 };
244 int ret; 247 int ret;
245 struct perf_session *session = perf_session__new(&file, false, 248 struct perf_session *session = perf_session__new(&data, false,
246 &mem->tool); 249 &mem->tool);
247 250
248 if (session == NULL) 251 if (session == NULL)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0c95ffefb6cc..003255910c05 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -67,7 +67,7 @@ struct record {
67 struct perf_tool tool; 67 struct perf_tool tool;
68 struct record_opts opts; 68 struct record_opts opts;
69 u64 bytes_written; 69 u64 bytes_written;
70 struct perf_data_file file; 70 struct perf_data data;
71 struct auxtrace_record *itr; 71 struct auxtrace_record *itr;
72 struct perf_evlist *evlist; 72 struct perf_evlist *evlist;
73 struct perf_session *session; 73 struct perf_session *session;
@@ -108,7 +108,7 @@ static bool switch_output_time(struct record *rec)
108 108
109static int record__write(struct record *rec, void *bf, size_t size) 109static int record__write(struct record *rec, void *bf, size_t size)
110{ 110{
111 if (perf_data_file__write(rec->session->file, bf, size) < 0) { 111 if (perf_data__write(rec->session->data, bf, size) < 0) {
112 pr_err("failed to write perf data, error: %m\n"); 112 pr_err("failed to write perf data, error: %m\n");
113 return -1; 113 return -1;
114 } 114 }
@@ -130,107 +130,12 @@ static int process_synthesized_event(struct perf_tool *tool,
130 return record__write(rec, event, event->header.size); 130 return record__write(rec, event, event->header.size);
131} 131}
132 132
133static int 133static int record__pushfn(void *to, void *bf, size_t size)
134backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
135{ 134{
136 struct perf_event_header *pheader; 135 struct record *rec = to;
137 u64 evt_head = head;
138 int size = mask + 1;
139
140 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
141 pheader = (struct perf_event_header *)(buf + (head & mask));
142 *start = head;
143 while (true) {
144 if (evt_head - head >= (unsigned int)size) {
145 pr_debug("Finished reading backward ring buffer: rewind\n");
146 if (evt_head - head > (unsigned int)size)
147 evt_head -= pheader->size;
148 *end = evt_head;
149 return 0;
150 }
151
152 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
153
154 if (pheader->size == 0) {
155 pr_debug("Finished reading backward ring buffer: get start\n");
156 *end = evt_head;
157 return 0;
158 }
159
160 evt_head += pheader->size;
161 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
162 }
163 WARN_ONCE(1, "Shouldn't get here\n");
164 return -1;
165}
166
167static int
168rb_find_range(void *data, int mask, u64 head, u64 old,
169 u64 *start, u64 *end, bool backward)
170{
171 if (!backward) {
172 *start = old;
173 *end = head;
174 return 0;
175 }
176
177 return backward_rb_find_range(data, mask, head, start, end);
178}
179
180static int
181record__mmap_read(struct record *rec, struct perf_mmap *md,
182 bool overwrite, bool backward)
183{
184 u64 head = perf_mmap__read_head(md);
185 u64 old = md->prev;
186 u64 end = head, start = old;
187 unsigned char *data = md->base + page_size;
188 unsigned long size;
189 void *buf;
190 int rc = 0;
191
192 if (rb_find_range(data, md->mask, head,
193 old, &start, &end, backward))
194 return -1;
195
196 if (start == end)
197 return 0;
198 136
199 rec->samples++; 137 rec->samples++;
200 138 return record__write(rec, bf, size);
201 size = end - start;
202 if (size > (unsigned long)(md->mask) + 1) {
203 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
204
205 md->prev = head;
206 perf_mmap__consume(md, overwrite || backward);
207 return 0;
208 }
209
210 if ((start & md->mask) + size != (end & md->mask)) {
211 buf = &data[start & md->mask];
212 size = md->mask + 1 - (start & md->mask);
213 start += size;
214
215 if (record__write(rec, buf, size) < 0) {
216 rc = -1;
217 goto out;
218 }
219 }
220
221 buf = &data[start & md->mask];
222 size = end - start;
223 start += size;
224
225 if (record__write(rec, buf, size) < 0) {
226 rc = -1;
227 goto out;
228 }
229
230 md->prev = head;
231 perf_mmap__consume(md, overwrite || backward);
232out:
233 return rc;
234} 139}
235 140
236static volatile int done; 141static volatile int done;
@@ -269,13 +174,13 @@ static int record__process_auxtrace(struct perf_tool *tool,
269 size_t len1, void *data2, size_t len2) 174 size_t len1, void *data2, size_t len2)
270{ 175{
271 struct record *rec = container_of(tool, struct record, tool); 176 struct record *rec = container_of(tool, struct record, tool);
272 struct perf_data_file *file = &rec->file; 177 struct perf_data *data = &rec->data;
273 size_t padding; 178 size_t padding;
274 u8 pad[8] = {0}; 179 u8 pad[8] = {0};
275 180
276 if (!perf_data_file__is_pipe(file)) { 181 if (!perf_data__is_pipe(data)) {
277 off_t file_offset; 182 off_t file_offset;
278 int fd = perf_data_file__fd(file); 183 int fd = perf_data__fd(data);
279 int err; 184 int err;
280 185
281 file_offset = lseek(fd, 0, SEEK_CUR); 186 file_offset = lseek(fd, 0, SEEK_CUR);
@@ -434,6 +339,22 @@ static int record__open(struct record *rec)
434 struct perf_evsel_config_term *err_term; 339 struct perf_evsel_config_term *err_term;
435 int rc = 0; 340 int rc = 0;
436 341
342 /*
343 * For initial_delay we need to add a dummy event so that we can track
344 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
345 * real events, the ones asked by the user.
346 */
347 if (opts->initial_delay) {
348 if (perf_evlist__add_dummy(evlist))
349 return -ENOMEM;
350
351 pos = perf_evlist__first(evlist);
352 pos->tracking = 0;
353 pos = perf_evlist__last(evlist);
354 pos->tracking = 1;
355 pos->attr.enable_on_exec = 1;
356 }
357
437 perf_evlist__config(evlist, opts, &callchain_param); 358 perf_evlist__config(evlist, opts, &callchain_param);
438 359
439 evlist__for_each_entry(evlist, pos) { 360 evlist__for_each_entry(evlist, pos) {
@@ -494,10 +415,10 @@ static int process_sample_event(struct perf_tool *tool,
494 415
495static int process_buildids(struct record *rec) 416static int process_buildids(struct record *rec)
496{ 417{
497 struct perf_data_file *file = &rec->file; 418 struct perf_data *data = &rec->data;
498 struct perf_session *session = rec->session; 419 struct perf_session *session = rec->session;
499 420
500 if (file->size == 0) 421 if (data->size == 0)
501 return 0; 422 return 0;
502 423
503 /* 424 /*
@@ -577,8 +498,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
577 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap; 498 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
578 499
579 if (maps[i].base) { 500 if (maps[i].base) {
580 if (record__mmap_read(rec, &maps[i], 501 if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
581 evlist->overwrite, backward) != 0) {
582 rc = -1; 502 rc = -1;
583 goto out; 503 goto out;
584 } 504 }
@@ -641,14 +561,14 @@ static void record__init_features(struct record *rec)
641static void 561static void
642record__finish_output(struct record *rec) 562record__finish_output(struct record *rec)
643{ 563{
644 struct perf_data_file *file = &rec->file; 564 struct perf_data *data = &rec->data;
645 int fd = perf_data_file__fd(file); 565 int fd = perf_data__fd(data);
646 566
647 if (file->is_pipe) 567 if (data->is_pipe)
648 return; 568 return;
649 569
650 rec->session->header.data_size += rec->bytes_written; 570 rec->session->header.data_size += rec->bytes_written;
651 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR); 571 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
652 572
653 if (!rec->no_buildid) { 573 if (!rec->no_buildid) {
654 process_buildids(rec); 574 process_buildids(rec);
@@ -687,7 +607,7 @@ static int record__synthesize(struct record *rec, bool tail);
687static int 607static int
688record__switch_output(struct record *rec, bool at_exit) 608record__switch_output(struct record *rec, bool at_exit)
689{ 609{
690 struct perf_data_file *file = &rec->file; 610 struct perf_data *data = &rec->data;
691 int fd, err; 611 int fd, err;
692 612
693 /* Same Size: "2015122520103046"*/ 613 /* Same Size: "2015122520103046"*/
@@ -705,7 +625,7 @@ record__switch_output(struct record *rec, bool at_exit)
705 return -EINVAL; 625 return -EINVAL;
706 } 626 }
707 627
708 fd = perf_data_file__switch(file, timestamp, 628 fd = perf_data__switch(data, timestamp,
709 rec->session->header.data_offset, 629 rec->session->header.data_offset,
710 at_exit); 630 at_exit);
711 if (fd >= 0 && !at_exit) { 631 if (fd >= 0 && !at_exit) {
@@ -715,7 +635,7 @@ record__switch_output(struct record *rec, bool at_exit)
715 635
716 if (!quiet) 636 if (!quiet)
717 fprintf(stderr, "[ perf record: Dump %s.%s ]\n", 637 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
718 file->path, timestamp); 638 data->file.path, timestamp);
719 639
720 /* Output tracking events */ 640 /* Output tracking events */
721 if (!at_exit) { 641 if (!at_exit) {
@@ -790,16 +710,16 @@ static int record__synthesize(struct record *rec, bool tail)
790{ 710{
791 struct perf_session *session = rec->session; 711 struct perf_session *session = rec->session;
792 struct machine *machine = &session->machines.host; 712 struct machine *machine = &session->machines.host;
793 struct perf_data_file *file = &rec->file; 713 struct perf_data *data = &rec->data;
794 struct record_opts *opts = &rec->opts; 714 struct record_opts *opts = &rec->opts;
795 struct perf_tool *tool = &rec->tool; 715 struct perf_tool *tool = &rec->tool;
796 int fd = perf_data_file__fd(file); 716 int fd = perf_data__fd(data);
797 int err = 0; 717 int err = 0;
798 718
799 if (rec->opts.tail_synthesize != tail) 719 if (rec->opts.tail_synthesize != tail)
800 return 0; 720 return 0;
801 721
802 if (file->is_pipe) { 722 if (data->is_pipe) {
803 err = perf_event__synthesize_features( 723 err = perf_event__synthesize_features(
804 tool, session, rec->evlist, process_synthesized_event); 724 tool, session, rec->evlist, process_synthesized_event);
805 if (err < 0) { 725 if (err < 0) {
@@ -845,17 +765,19 @@ static int record__synthesize(struct record *rec, bool tail)
845 goto out; 765 goto out;
846 } 766 }
847 767
848 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, 768 if (!perf_evlist__exclude_kernel(rec->evlist)) {
849 machine); 769 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
850 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n" 770 machine);
851 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 771 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
852 "Check /proc/kallsyms permission or run as root.\n"); 772 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
853 773 "Check /proc/kallsyms permission or run as root.\n");
854 err = perf_event__synthesize_modules(tool, process_synthesized_event, 774
855 machine); 775 err = perf_event__synthesize_modules(tool, process_synthesized_event,
856 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n" 776 machine);
857 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" 777 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
858 "Check /proc/modules permission or run as root.\n"); 778 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
779 "Check /proc/modules permission or run as root.\n");
780 }
859 781
860 if (perf_guest) { 782 if (perf_guest) {
861 machines__process_guests(&session->machines, 783 machines__process_guests(&session->machines,
@@ -864,7 +786,7 @@ static int record__synthesize(struct record *rec, bool tail)
864 786
865 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads, 787 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
866 process_synthesized_event, opts->sample_address, 788 process_synthesized_event, opts->sample_address,
867 opts->proc_map_timeout); 789 opts->proc_map_timeout, 1);
868out: 790out:
869 return err; 791 return err;
870} 792}
@@ -878,7 +800,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
878 struct machine *machine; 800 struct machine *machine;
879 struct perf_tool *tool = &rec->tool; 801 struct perf_tool *tool = &rec->tool;
880 struct record_opts *opts = &rec->opts; 802 struct record_opts *opts = &rec->opts;
881 struct perf_data_file *file = &rec->file; 803 struct perf_data *data = &rec->data;
882 struct perf_session *session; 804 struct perf_session *session;
883 bool disabled = false, draining = false; 805 bool disabled = false, draining = false;
884 int fd; 806 int fd;
@@ -904,20 +826,20 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
904 signal(SIGUSR2, SIG_IGN); 826 signal(SIGUSR2, SIG_IGN);
905 } 827 }
906 828
907 session = perf_session__new(file, false, tool); 829 session = perf_session__new(data, false, tool);
908 if (session == NULL) { 830 if (session == NULL) {
909 pr_err("Perf session creation failed.\n"); 831 pr_err("Perf session creation failed.\n");
910 return -1; 832 return -1;
911 } 833 }
912 834
913 fd = perf_data_file__fd(file); 835 fd = perf_data__fd(data);
914 rec->session = session; 836 rec->session = session;
915 837
916 record__init_features(rec); 838 record__init_features(rec);
917 839
918 if (forks) { 840 if (forks) {
919 err = perf_evlist__prepare_workload(rec->evlist, &opts->target, 841 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
920 argv, file->is_pipe, 842 argv, data->is_pipe,
921 workload_exec_failed_signal); 843 workload_exec_failed_signal);
922 if (err < 0) { 844 if (err < 0) {
923 pr_err("Couldn't run the workload!\n"); 845 pr_err("Couldn't run the workload!\n");
@@ -953,7 +875,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
953 if (!rec->evlist->nr_groups) 875 if (!rec->evlist->nr_groups)
954 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); 876 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
955 877
956 if (file->is_pipe) { 878 if (data->is_pipe) {
957 err = perf_header__write_pipe(fd); 879 err = perf_header__write_pipe(fd);
958 if (err < 0) 880 if (err < 0)
959 goto out_child; 881 goto out_child;
@@ -1214,8 +1136,8 @@ out_child:
1214 samples[0] = '\0'; 1136 samples[0] = '\0';
1215 1137
1216 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n", 1138 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
1217 perf_data_file__size(file) / 1024.0 / 1024.0, 1139 perf_data__size(data) / 1024.0 / 1024.0,
1218 file->path, postfix, samples); 1140 data->file.path, postfix, samples);
1219 } 1141 }
1220 1142
1221out_delete_session: 1143out_delete_session:
@@ -1579,7 +1501,7 @@ static struct option __record_options[] = {
1579 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu", 1501 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1580 "list of cpus to monitor"), 1502 "list of cpus to monitor"),
1581 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"), 1503 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1582 OPT_STRING('o', "output", &record.file.path, "file", 1504 OPT_STRING('o', "output", &record.data.file.path, "file",
1583 "output file name"), 1505 "output file name"),
1584 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit, 1506 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1585 &record.opts.no_inherit_set, 1507 &record.opts.no_inherit_set,
@@ -1644,6 +1566,9 @@ static struct option __record_options[] = {
1644 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register", 1566 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1645 "sample selected machine registers on interrupt," 1567 "sample selected machine registers on interrupt,"
1646 " use -I ? to list register names", parse_regs), 1568 " use -I ? to list register names", parse_regs),
1569 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1570 "sample selected machine registers on interrupt,"
1571 " use -I ? to list register names", parse_regs),
1647 OPT_BOOLEAN(0, "running-time", &record.opts.running_time, 1572 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1648 "Record running/enabled time of read (:S) events"), 1573 "Record running/enabled time of read (:S) events"),
1649 OPT_CALLBACK('k', "clockid", &record.opts, 1574 OPT_CALLBACK('k', "clockid", &record.opts,
@@ -1786,7 +1711,7 @@ int cmd_record(int argc, const char **argv)
1786 1711
1787 err = -ENOMEM; 1712 err = -ENOMEM;
1788 1713
1789 if (symbol_conf.kptr_restrict) 1714 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
1790 pr_warning( 1715 pr_warning(
1791"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n" 1716"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1792"check /proc/sys/kernel/kptr_restrict.\n\n" 1717"check /proc/sys/kernel/kptr_restrict.\n\n"
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index fae4b0340750..af5dd038195e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -258,7 +258,7 @@ static int report__setup_sample_type(struct report *rep)
258{ 258{
259 struct perf_session *session = rep->session; 259 struct perf_session *session = rep->session;
260 u64 sample_type = perf_evlist__combined_sample_type(session->evlist); 260 u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
261 bool is_pipe = perf_data_file__is_pipe(session->file); 261 bool is_pipe = perf_data__is_pipe(session->data);
262 262
263 if (session->itrace_synth_opts->callchain || 263 if (session->itrace_synth_opts->callchain ||
264 (!is_pipe && 264 (!is_pipe &&
@@ -441,6 +441,9 @@ static void report__warn_kptr_restrict(const struct report *rep)
441 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host); 441 struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
442 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL; 442 struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
443 443
444 if (perf_evlist__exclude_kernel(rep->session->evlist))
445 return;
446
444 if (kernel_map == NULL || 447 if (kernel_map == NULL ||
445 (kernel_map->dso->hit && 448 (kernel_map->dso->hit &&
446 (kernel_kmap->ref_reloc_sym == NULL || 449 (kernel_kmap->ref_reloc_sym == NULL ||
@@ -569,7 +572,7 @@ static int __cmd_report(struct report *rep)
569 int ret; 572 int ret;
570 struct perf_session *session = rep->session; 573 struct perf_session *session = rep->session;
571 struct perf_evsel *pos; 574 struct perf_evsel *pos;
572 struct perf_data_file *file = session->file; 575 struct perf_data *data = session->data;
573 576
574 signal(SIGINT, sig_handler); 577 signal(SIGINT, sig_handler);
575 578
@@ -638,7 +641,7 @@ static int __cmd_report(struct report *rep)
638 rep->nr_entries += evsel__hists(pos)->nr_entries; 641 rep->nr_entries += evsel__hists(pos)->nr_entries;
639 642
640 if (rep->nr_entries == 0) { 643 if (rep->nr_entries == 0) {
641 ui__error("The %s file has no samples!\n", file->path); 644 ui__error("The %s file has no samples!\n", data->file.path);
642 return 0; 645 return 0;
643 } 646 }
644 647
@@ -880,7 +883,7 @@ int cmd_report(int argc, const char **argv)
880 "Show inline function"), 883 "Show inline function"),
881 OPT_END() 884 OPT_END()
882 }; 885 };
883 struct perf_data_file file = { 886 struct perf_data data = {
884 .mode = PERF_DATA_MODE_READ, 887 .mode = PERF_DATA_MODE_READ,
885 }; 888 };
886 int ret = hists__init(); 889 int ret = hists__init();
@@ -941,11 +944,11 @@ int cmd_report(int argc, const char **argv)
941 input_name = "perf.data"; 944 input_name = "perf.data";
942 } 945 }
943 946
944 file.path = input_name; 947 data.file.path = input_name;
945 file.force = symbol_conf.force; 948 data.force = symbol_conf.force;
946 949
947repeat: 950repeat:
948 session = perf_session__new(&file, false, &report.tool); 951 session = perf_session__new(&data, false, &report.tool);
949 if (session == NULL) 952 if (session == NULL)
950 return -1; 953 return -1;
951 954
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index f380d91ee609..83283fedb00f 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1701,14 +1701,16 @@ static int perf_sched__read_events(struct perf_sched *sched)
1701 { "sched:sched_migrate_task", process_sched_migrate_task_event, }, 1701 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1702 }; 1702 };
1703 struct perf_session *session; 1703 struct perf_session *session;
1704 struct perf_data_file file = { 1704 struct perf_data data = {
1705 .path = input_name, 1705 .file = {
1706 .mode = PERF_DATA_MODE_READ, 1706 .path = input_name,
1707 .force = sched->force, 1707 },
1708 .mode = PERF_DATA_MODE_READ,
1709 .force = sched->force,
1708 }; 1710 };
1709 int rc = -1; 1711 int rc = -1;
1710 1712
1711 session = perf_session__new(&file, false, &sched->tool); 1713 session = perf_session__new(&data, false, &sched->tool);
1712 if (session == NULL) { 1714 if (session == NULL) {
1713 pr_debug("No Memory for session\n"); 1715 pr_debug("No Memory for session\n");
1714 return -1; 1716 return -1;
@@ -2903,10 +2905,12 @@ static int perf_sched__timehist(struct perf_sched *sched)
2903 const struct perf_evsel_str_handler migrate_handlers[] = { 2905 const struct perf_evsel_str_handler migrate_handlers[] = {
2904 { "sched:sched_migrate_task", timehist_migrate_task_event, }, 2906 { "sched:sched_migrate_task", timehist_migrate_task_event, },
2905 }; 2907 };
2906 struct perf_data_file file = { 2908 struct perf_data data = {
2907 .path = input_name, 2909 .file = {
2908 .mode = PERF_DATA_MODE_READ, 2910 .path = input_name,
2909 .force = sched->force, 2911 },
2912 .mode = PERF_DATA_MODE_READ,
2913 .force = sched->force,
2910 }; 2914 };
2911 2915
2912 struct perf_session *session; 2916 struct perf_session *session;
@@ -2931,7 +2935,7 @@ static int perf_sched__timehist(struct perf_sched *sched)
2931 2935
2932 symbol_conf.use_callchain = sched->show_callchain; 2936 symbol_conf.use_callchain = sched->show_callchain;
2933 2937
2934 session = perf_session__new(&file, false, &sched->tool); 2938 session = perf_session__new(&data, false, &sched->tool);
2935 if (session == NULL) 2939 if (session == NULL)
2936 return -ENOMEM; 2940 return -ENOMEM;
2937 2941
@@ -3364,6 +3368,10 @@ int cmd_sched(int argc, const char **argv)
3364 OPT_STRING(0, "time", &sched.time_str, "str", 3368 OPT_STRING(0, "time", &sched.time_str, "str",
3365 "Time span for analysis (start,stop)"), 3369 "Time span for analysis (start,stop)"),
3366 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"), 3370 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3371 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3372 "analyze events only for given process id(s)"),
3373 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3374 "analyze events only for given thread id(s)"),
3367 OPT_PARENT(sched_options) 3375 OPT_PARENT(sched_options)
3368 }; 3376 };
3369 3377
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 0fe02758de7d..9b43bda45a41 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -89,6 +89,7 @@ enum perf_output_field {
89 PERF_OUTPUT_BRSTACKOFF = 1U << 24, 89 PERF_OUTPUT_BRSTACKOFF = 1U << 24,
90 PERF_OUTPUT_SYNTH = 1U << 25, 90 PERF_OUTPUT_SYNTH = 1U << 25,
91 PERF_OUTPUT_PHYS_ADDR = 1U << 26, 91 PERF_OUTPUT_PHYS_ADDR = 1U << 26,
92 PERF_OUTPUT_UREGS = 1U << 27,
92}; 93};
93 94
94struct output_option { 95struct output_option {
@@ -110,6 +111,7 @@ struct output_option {
110 {.str = "srcline", .field = PERF_OUTPUT_SRCLINE}, 111 {.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
111 {.str = "period", .field = PERF_OUTPUT_PERIOD}, 112 {.str = "period", .field = PERF_OUTPUT_PERIOD},
112 {.str = "iregs", .field = PERF_OUTPUT_IREGS}, 113 {.str = "iregs", .field = PERF_OUTPUT_IREGS},
114 {.str = "uregs", .field = PERF_OUTPUT_UREGS},
113 {.str = "brstack", .field = PERF_OUTPUT_BRSTACK}, 115 {.str = "brstack", .field = PERF_OUTPUT_BRSTACK},
114 {.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM}, 116 {.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM},
115 {.str = "data_src", .field = PERF_OUTPUT_DATA_SRC}, 117 {.str = "data_src", .field = PERF_OUTPUT_DATA_SRC},
@@ -209,6 +211,51 @@ static struct {
209 }, 211 },
210}; 212};
211 213
214struct perf_evsel_script {
215 char *filename;
216 FILE *fp;
217 u64 samples;
218};
219
220static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel,
221 struct perf_data *data)
222{
223 struct perf_evsel_script *es = malloc(sizeof(*es));
224
225 if (es != NULL) {
226 if (asprintf(&es->filename, "%s.%s.dump", data->file.path, perf_evsel__name(evsel)) < 0)
227 goto out_free;
228 es->fp = fopen(es->filename, "w");
229 if (es->fp == NULL)
230 goto out_free_filename;
231 es->samples = 0;
232 }
233
234 return es;
235out_free_filename:
236 zfree(&es->filename);
237out_free:
238 free(es);
239 return NULL;
240}
241
242static void perf_evsel_script__delete(struct perf_evsel_script *es)
243{
244 zfree(&es->filename);
245 fclose(es->fp);
246 es->fp = NULL;
247 free(es);
248}
249
250static int perf_evsel_script__fprintf(struct perf_evsel_script *es, FILE *fp)
251{
252 struct stat st;
253
254 fstat(fileno(es->fp), &st);
255 return fprintf(fp, "[ perf script: Wrote %.3f MB %s (%" PRIu64 " samples) ]\n",
256 st.st_size / 1024.0 / 1024.0, es->filename, es->samples);
257}
258
212static inline int output_type(unsigned int type) 259static inline int output_type(unsigned int type)
213{ 260{
214 switch (type) { 261 switch (type) {
@@ -386,6 +433,11 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
386 PERF_OUTPUT_IREGS)) 433 PERF_OUTPUT_IREGS))
387 return -EINVAL; 434 return -EINVAL;
388 435
436 if (PRINT_FIELD(UREGS) &&
437 perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS",
438 PERF_OUTPUT_UREGS))
439 return -EINVAL;
440
389 if (PRINT_FIELD(PHYS_ADDR) && 441 if (PRINT_FIELD(PHYS_ADDR) &&
390 perf_evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", 442 perf_evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR",
391 PERF_OUTPUT_PHYS_ADDR)) 443 PERF_OUTPUT_PHYS_ADDR))
@@ -494,51 +546,76 @@ out:
494 return 0; 546 return 0;
495} 547}
496 548
497static void print_sample_iregs(struct perf_sample *sample, 549static int perf_sample__fprintf_iregs(struct perf_sample *sample,
498 struct perf_event_attr *attr) 550 struct perf_event_attr *attr, FILE *fp)
499{ 551{
500 struct regs_dump *regs = &sample->intr_regs; 552 struct regs_dump *regs = &sample->intr_regs;
501 uint64_t mask = attr->sample_regs_intr; 553 uint64_t mask = attr->sample_regs_intr;
502 unsigned i = 0, r; 554 unsigned i = 0, r;
555 int printed = 0;
503 556
504 if (!regs) 557 if (!regs)
505 return; 558 return 0;
506 559
507 for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) { 560 for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
508 u64 val = regs->regs[i++]; 561 u64 val = regs->regs[i++];
509 printf("%5s:0x%"PRIx64" ", perf_reg_name(r), val); 562 printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
510 } 563 }
564
565 return printed;
511} 566}
512 567
513static void print_sample_start(struct perf_sample *sample, 568static int perf_sample__fprintf_uregs(struct perf_sample *sample,
514 struct thread *thread, 569 struct perf_event_attr *attr, FILE *fp)
515 struct perf_evsel *evsel) 570{
571 struct regs_dump *regs = &sample->user_regs;
572 uint64_t mask = attr->sample_regs_user;
573 unsigned i = 0, r;
574 int printed = 0;
575
576 if (!regs || !regs->regs)
577 return 0;
578
579 printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi);
580
581 for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
582 u64 val = regs->regs[i++];
583 printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
584 }
585
586 return printed;
587}
588
589static int perf_sample__fprintf_start(struct perf_sample *sample,
590 struct thread *thread,
591 struct perf_evsel *evsel, FILE *fp)
516{ 592{
517 struct perf_event_attr *attr = &evsel->attr; 593 struct perf_event_attr *attr = &evsel->attr;
518 unsigned long secs; 594 unsigned long secs;
519 unsigned long long nsecs; 595 unsigned long long nsecs;
596 int printed = 0;
520 597
521 if (PRINT_FIELD(COMM)) { 598 if (PRINT_FIELD(COMM)) {
522 if (latency_format) 599 if (latency_format)
523 printf("%8.8s ", thread__comm_str(thread)); 600 printed += fprintf(fp, "%8.8s ", thread__comm_str(thread));
524 else if (PRINT_FIELD(IP) && symbol_conf.use_callchain) 601 else if (PRINT_FIELD(IP) && symbol_conf.use_callchain)
525 printf("%s ", thread__comm_str(thread)); 602 printed += fprintf(fp, "%s ", thread__comm_str(thread));
526 else 603 else
527 printf("%16s ", thread__comm_str(thread)); 604 printed += fprintf(fp, "%16s ", thread__comm_str(thread));
528 } 605 }
529 606
530 if (PRINT_FIELD(PID) && PRINT_FIELD(TID)) 607 if (PRINT_FIELD(PID) && PRINT_FIELD(TID))
531 printf("%5d/%-5d ", sample->pid, sample->tid); 608 printed += fprintf(fp, "%5d/%-5d ", sample->pid, sample->tid);
532 else if (PRINT_FIELD(PID)) 609 else if (PRINT_FIELD(PID))
533 printf("%5d ", sample->pid); 610 printed += fprintf(fp, "%5d ", sample->pid);
534 else if (PRINT_FIELD(TID)) 611 else if (PRINT_FIELD(TID))
535 printf("%5d ", sample->tid); 612 printed += fprintf(fp, "%5d ", sample->tid);
536 613
537 if (PRINT_FIELD(CPU)) { 614 if (PRINT_FIELD(CPU)) {
538 if (latency_format) 615 if (latency_format)
539 printf("%3d ", sample->cpu); 616 printed += fprintf(fp, "%3d ", sample->cpu);
540 else 617 else
541 printf("[%03d] ", sample->cpu); 618 printed += fprintf(fp, "[%03d] ", sample->cpu);
542 } 619 }
543 620
544 if (PRINT_FIELD(TIME)) { 621 if (PRINT_FIELD(TIME)) {
@@ -547,13 +624,15 @@ static void print_sample_start(struct perf_sample *sample,
547 nsecs -= secs * NSEC_PER_SEC; 624 nsecs -= secs * NSEC_PER_SEC;
548 625
549 if (nanosecs) 626 if (nanosecs)
550 printf("%5lu.%09llu: ", secs, nsecs); 627 printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
551 else { 628 else {
552 char sample_time[32]; 629 char sample_time[32];
553 timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time)); 630 timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
554 printf("%12s: ", sample_time); 631 printed += fprintf(fp, "%12s: ", sample_time);
555 } 632 }
556 } 633 }
634
635 return printed;
557} 636}
558 637
559static inline char 638static inline char
@@ -565,16 +644,17 @@ mispred_str(struct branch_entry *br)
565 return br->flags.predicted ? 'P' : 'M'; 644 return br->flags.predicted ? 'P' : 'M';
566} 645}
567 646
568static void print_sample_brstack(struct perf_sample *sample, 647static int perf_sample__fprintf_brstack(struct perf_sample *sample,
569 struct thread *thread, 648 struct thread *thread,
570 struct perf_event_attr *attr) 649 struct perf_event_attr *attr, FILE *fp)
571{ 650{
572 struct branch_stack *br = sample->branch_stack; 651 struct branch_stack *br = sample->branch_stack;
573 struct addr_location alf, alt; 652 struct addr_location alf, alt;
574 u64 i, from, to; 653 u64 i, from, to;
654 int printed = 0;
575 655
576 if (!(br && br->nr)) 656 if (!(br && br->nr))
577 return; 657 return 0;
578 658
579 for (i = 0; i < br->nr; i++) { 659 for (i = 0; i < br->nr; i++) {
580 from = br->entries[i].from; 660 from = br->entries[i].from;
@@ -587,38 +667,41 @@ static void print_sample_brstack(struct perf_sample *sample,
587 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); 667 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
588 } 668 }
589 669
590 printf(" 0x%"PRIx64, from); 670 printed += fprintf(fp, " 0x%"PRIx64, from);
591 if (PRINT_FIELD(DSO)) { 671 if (PRINT_FIELD(DSO)) {
592 printf("("); 672 printed += fprintf(fp, "(");
593 map__fprintf_dsoname(alf.map, stdout); 673 printed += map__fprintf_dsoname(alf.map, fp);
594 printf(")"); 674 printed += fprintf(fp, ")");
595 } 675 }
596 676
597 printf("/0x%"PRIx64, to); 677 printed += fprintf(fp, "/0x%"PRIx64, to);
598 if (PRINT_FIELD(DSO)) { 678 if (PRINT_FIELD(DSO)) {
599 printf("("); 679 printed += fprintf(fp, "(");
600 map__fprintf_dsoname(alt.map, stdout); 680 printed += map__fprintf_dsoname(alt.map, fp);
601 printf(")"); 681 printed += fprintf(fp, ")");
602 } 682 }
603 683
604 printf("/%c/%c/%c/%d ", 684 printed += fprintf(fp, "/%c/%c/%c/%d ",
605 mispred_str( br->entries + i), 685 mispred_str( br->entries + i),
606 br->entries[i].flags.in_tx? 'X' : '-', 686 br->entries[i].flags.in_tx? 'X' : '-',
607 br->entries[i].flags.abort? 'A' : '-', 687 br->entries[i].flags.abort? 'A' : '-',
608 br->entries[i].flags.cycles); 688 br->entries[i].flags.cycles);
609 } 689 }
690
691 return printed;
610} 692}
611 693
612static void print_sample_brstacksym(struct perf_sample *sample, 694static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
613 struct thread *thread, 695 struct thread *thread,
614 struct perf_event_attr *attr) 696 struct perf_event_attr *attr, FILE *fp)
615{ 697{
616 struct branch_stack *br = sample->branch_stack; 698 struct branch_stack *br = sample->branch_stack;
617 struct addr_location alf, alt; 699 struct addr_location alf, alt;
618 u64 i, from, to; 700 u64 i, from, to;
701 int printed = 0;
619 702
620 if (!(br && br->nr)) 703 if (!(br && br->nr))
621 return; 704 return 0;
622 705
623 for (i = 0; i < br->nr; i++) { 706 for (i = 0; i < br->nr; i++) {
624 707
@@ -635,37 +718,40 @@ static void print_sample_brstacksym(struct perf_sample *sample,
635 if (alt.map) 718 if (alt.map)
636 alt.sym = map__find_symbol(alt.map, alt.addr); 719 alt.sym = map__find_symbol(alt.map, alt.addr);
637 720
638 symbol__fprintf_symname_offs(alf.sym, &alf, stdout); 721 printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
639 if (PRINT_FIELD(DSO)) { 722 if (PRINT_FIELD(DSO)) {
640 printf("("); 723 printed += fprintf(fp, "(");
641 map__fprintf_dsoname(alf.map, stdout); 724 printed += map__fprintf_dsoname(alf.map, fp);
642 printf(")"); 725 printed += fprintf(fp, ")");
643 } 726 }
644 putchar('/'); 727 printed += fprintf(fp, "%c", '/');
645 symbol__fprintf_symname_offs(alt.sym, &alt, stdout); 728 printed += symbol__fprintf_symname_offs(alt.sym, &alt, fp);
646 if (PRINT_FIELD(DSO)) { 729 if (PRINT_FIELD(DSO)) {
647 printf("("); 730 printed += fprintf(fp, "(");
648 map__fprintf_dsoname(alt.map, stdout); 731 printed += map__fprintf_dsoname(alt.map, fp);
649 printf(")"); 732 printed += fprintf(fp, ")");
650 } 733 }
651 printf("/%c/%c/%c/%d ", 734 printed += fprintf(fp, "/%c/%c/%c/%d ",
652 mispred_str( br->entries + i), 735 mispred_str( br->entries + i),
653 br->entries[i].flags.in_tx? 'X' : '-', 736 br->entries[i].flags.in_tx? 'X' : '-',
654 br->entries[i].flags.abort? 'A' : '-', 737 br->entries[i].flags.abort? 'A' : '-',
655 br->entries[i].flags.cycles); 738 br->entries[i].flags.cycles);
656 } 739 }
740
741 return printed;
657} 742}
658 743
659static void print_sample_brstackoff(struct perf_sample *sample, 744static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
660 struct thread *thread, 745 struct thread *thread,
661 struct perf_event_attr *attr) 746 struct perf_event_attr *attr, FILE *fp)
662{ 747{
663 struct branch_stack *br = sample->branch_stack; 748 struct branch_stack *br = sample->branch_stack;
664 struct addr_location alf, alt; 749 struct addr_location alf, alt;
665 u64 i, from, to; 750 u64 i, from, to;
751 int printed = 0;
666 752
667 if (!(br && br->nr)) 753 if (!(br && br->nr))
668 return; 754 return 0;
669 755
670 for (i = 0; i < br->nr; i++) { 756 for (i = 0; i < br->nr; i++) {
671 757
@@ -682,24 +768,26 @@ static void print_sample_brstackoff(struct perf_sample *sample,
682 if (alt.map && !alt.map->dso->adjust_symbols) 768 if (alt.map && !alt.map->dso->adjust_symbols)
683 to = map__map_ip(alt.map, to); 769 to = map__map_ip(alt.map, to);
684 770
685 printf(" 0x%"PRIx64, from); 771 printed += fprintf(fp, " 0x%"PRIx64, from);
686 if (PRINT_FIELD(DSO)) { 772 if (PRINT_FIELD(DSO)) {
687 printf("("); 773 printed += fprintf(fp, "(");
688 map__fprintf_dsoname(alf.map, stdout); 774 printed += map__fprintf_dsoname(alf.map, fp);
689 printf(")"); 775 printed += fprintf(fp, ")");
690 } 776 }
691 printf("/0x%"PRIx64, to); 777 printed += fprintf(fp, "/0x%"PRIx64, to);
692 if (PRINT_FIELD(DSO)) { 778 if (PRINT_FIELD(DSO)) {
693 printf("("); 779 printed += fprintf(fp, "(");
694 map__fprintf_dsoname(alt.map, stdout); 780 printed += map__fprintf_dsoname(alt.map, fp);
695 printf(")"); 781 printed += fprintf(fp, ")");
696 } 782 }
697 printf("/%c/%c/%c/%d ", 783 printed += fprintf(fp, "/%c/%c/%c/%d ",
698 mispred_str(br->entries + i), 784 mispred_str(br->entries + i),
699 br->entries[i].flags.in_tx ? 'X' : '-', 785 br->entries[i].flags.in_tx ? 'X' : '-',
700 br->entries[i].flags.abort ? 'A' : '-', 786 br->entries[i].flags.abort ? 'A' : '-',
701 br->entries[i].flags.cycles); 787 br->entries[i].flags.cycles);
702 } 788 }
789
790 return printed;
703} 791}
704#define MAXBB 16384UL 792#define MAXBB 16384UL
705 793
@@ -727,27 +815,26 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
727 * but the exit is not. Let the caller patch it up. 815 * but the exit is not. Let the caller patch it up.
728 */ 816 */
729 if (kernel != machine__kernel_ip(machine, end)) { 817 if (kernel != machine__kernel_ip(machine, end)) {
730 printf("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n", 818 pr_debug("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n", start, end);
731 start, end);
732 return -ENXIO; 819 return -ENXIO;
733 } 820 }
734 821
735 memset(&al, 0, sizeof(al)); 822 memset(&al, 0, sizeof(al));
736 if (end - start > MAXBB - MAXINSN) { 823 if (end - start > MAXBB - MAXINSN) {
737 if (last) 824 if (last)
738 printf("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end); 825 pr_debug("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end);
739 else 826 else
740 printf("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start); 827 pr_debug("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start);
741 return 0; 828 return 0;
742 } 829 }
743 830
744 thread__find_addr_map(thread, *cpumode, MAP__FUNCTION, start, &al); 831 thread__find_addr_map(thread, *cpumode, MAP__FUNCTION, start, &al);
745 if (!al.map || !al.map->dso) { 832 if (!al.map || !al.map->dso) {
746 printf("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); 833 pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
747 return 0; 834 return 0;
748 } 835 }
749 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR) { 836 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR) {
750 printf("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end); 837 pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
751 return 0; 838 return 0;
752 } 839 }
753 840
@@ -760,36 +847,35 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
760 847
761 *is64bit = al.map->dso->is_64_bit; 848 *is64bit = al.map->dso->is_64_bit;
762 if (len <= 0) 849 if (len <= 0)
763 printf("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n", 850 pr_debug("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n",
764 start, end); 851 start, end);
765 return len; 852 return len;
766} 853}
767 854
768static void print_jump(uint64_t ip, struct branch_entry *en, 855static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
769 struct perf_insn *x, u8 *inbuf, int len, 856 struct perf_insn *x, u8 *inbuf, int len,
770 int insn) 857 int insn, FILE *fp)
771{ 858{
772 printf("\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", 859 int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", ip,
773 ip, 860 dump_insn(x, ip, inbuf, len, NULL),
774 dump_insn(x, ip, inbuf, len, NULL), 861 en->flags.predicted ? " PRED" : "",
775 en->flags.predicted ? " PRED" : "", 862 en->flags.mispred ? " MISPRED" : "",
776 en->flags.mispred ? " MISPRED" : "", 863 en->flags.in_tx ? " INTX" : "",
777 en->flags.in_tx ? " INTX" : "", 864 en->flags.abort ? " ABORT" : "");
778 en->flags.abort ? " ABORT" : "");
779 if (en->flags.cycles) { 865 if (en->flags.cycles) {
780 printf(" %d cycles", en->flags.cycles); 866 printed += fprintf(fp, " %d cycles", en->flags.cycles);
781 if (insn) 867 if (insn)
782 printf(" %.2f IPC", (float)insn / en->flags.cycles); 868 printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles);
783 } 869 }
784 putchar('\n'); 870 return printed + fprintf(fp, "\n");
785} 871}
786 872
787static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu, 873static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
788 uint64_t addr, struct symbol **lastsym, 874 u8 cpumode, int cpu, struct symbol **lastsym,
789 struct perf_event_attr *attr) 875 struct perf_event_attr *attr, FILE *fp)
790{ 876{
791 struct addr_location al; 877 struct addr_location al;
792 int off; 878 int off, printed = 0;
793 879
794 memset(&al, 0, sizeof(al)); 880 memset(&al, 0, sizeof(al));
795 881
@@ -798,7 +884,7 @@ static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu,
798 thread__find_addr_map(thread, cpumode, MAP__VARIABLE, 884 thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
799 addr, &al); 885 addr, &al);
800 if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end) 886 if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end)
801 return; 887 return 0;
802 888
803 al.cpu = cpu; 889 al.cpu = cpu;
804 al.sym = NULL; 890 al.sym = NULL;
@@ -806,37 +892,39 @@ static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu,
806 al.sym = map__find_symbol(al.map, al.addr); 892 al.sym = map__find_symbol(al.map, al.addr);
807 893
808 if (!al.sym) 894 if (!al.sym)
809 return; 895 return 0;
810 896
811 if (al.addr < al.sym->end) 897 if (al.addr < al.sym->end)
812 off = al.addr - al.sym->start; 898 off = al.addr - al.sym->start;
813 else 899 else
814 off = al.addr - al.map->start - al.sym->start; 900 off = al.addr - al.map->start - al.sym->start;
815 printf("\t%s", al.sym->name); 901 printed += fprintf(fp, "\t%s", al.sym->name);
816 if (off) 902 if (off)
817 printf("%+d", off); 903 printed += fprintf(fp, "%+d", off);
818 putchar(':'); 904 printed += fprintf(fp, ":");
819 if (PRINT_FIELD(SRCLINE)) 905 if (PRINT_FIELD(SRCLINE))
820 map__fprintf_srcline(al.map, al.addr, "\t", stdout); 906 printed += map__fprintf_srcline(al.map, al.addr, "\t", fp);
821 putchar('\n'); 907 printed += fprintf(fp, "\n");
822 *lastsym = al.sym; 908 *lastsym = al.sym;
909
910 return printed;
823} 911}
824 912
825static void print_sample_brstackinsn(struct perf_sample *sample, 913static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
826 struct thread *thread, 914 struct thread *thread,
827 struct perf_event_attr *attr, 915 struct perf_event_attr *attr,
828 struct machine *machine) 916 struct machine *machine, FILE *fp)
829{ 917{
830 struct branch_stack *br = sample->branch_stack; 918 struct branch_stack *br = sample->branch_stack;
831 u64 start, end; 919 u64 start, end;
832 int i, insn, len, nr, ilen; 920 int i, insn, len, nr, ilen, printed = 0;
833 struct perf_insn x; 921 struct perf_insn x;
834 u8 buffer[MAXBB]; 922 u8 buffer[MAXBB];
835 unsigned off; 923 unsigned off;
836 struct symbol *lastsym = NULL; 924 struct symbol *lastsym = NULL;
837 925
838 if (!(br && br->nr)) 926 if (!(br && br->nr))
839 return; 927 return 0;
840 nr = br->nr; 928 nr = br->nr;
841 if (max_blocks && nr > max_blocks + 1) 929 if (max_blocks && nr > max_blocks + 1)
842 nr = max_blocks + 1; 930 nr = max_blocks + 1;
@@ -844,17 +932,17 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
844 x.thread = thread; 932 x.thread = thread;
845 x.cpu = sample->cpu; 933 x.cpu = sample->cpu;
846 934
847 putchar('\n'); 935 printed += fprintf(fp, "%c", '\n');
848 936
849 /* Handle first from jump, of which we don't know the entry. */ 937 /* Handle first from jump, of which we don't know the entry. */
850 len = grab_bb(buffer, br->entries[nr-1].from, 938 len = grab_bb(buffer, br->entries[nr-1].from,
851 br->entries[nr-1].from, 939 br->entries[nr-1].from,
852 machine, thread, &x.is64bit, &x.cpumode, false); 940 machine, thread, &x.is64bit, &x.cpumode, false);
853 if (len > 0) { 941 if (len > 0) {
854 print_ip_sym(thread, x.cpumode, x.cpu, 942 printed += ip__fprintf_sym(br->entries[nr - 1].from, thread,
855 br->entries[nr - 1].from, &lastsym, attr); 943 x.cpumode, x.cpu, &lastsym, attr, fp);
856 print_jump(br->entries[nr - 1].from, &br->entries[nr - 1], 944 printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1],
857 &x, buffer, len, 0); 945 &x, buffer, len, 0, fp);
858 } 946 }
859 947
860 /* Print all blocks */ 948 /* Print all blocks */
@@ -880,13 +968,13 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
880 for (off = 0;; off += ilen) { 968 for (off = 0;; off += ilen) {
881 uint64_t ip = start + off; 969 uint64_t ip = start + off;
882 970
883 print_ip_sym(thread, x.cpumode, x.cpu, ip, &lastsym, attr); 971 printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
884 if (ip == end) { 972 if (ip == end) {
885 print_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn); 973 printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp);
886 break; 974 break;
887 } else { 975 } else {
888 printf("\t%016" PRIx64 "\t%s\n", ip, 976 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
889 dump_insn(&x, ip, buffer + off, len - off, &ilen)); 977 dump_insn(&x, ip, buffer + off, len - off, &ilen));
890 if (ilen == 0) 978 if (ilen == 0)
891 break; 979 break;
892 insn++; 980 insn++;
@@ -899,9 +987,9 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
899 * has not been executed yet. 987 * has not been executed yet.
900 */ 988 */
901 if (br->entries[0].from == sample->ip) 989 if (br->entries[0].from == sample->ip)
902 return; 990 goto out;
903 if (br->entries[0].flags.abort) 991 if (br->entries[0].flags.abort)
904 return; 992 goto out;
905 993
906 /* 994 /*
907 * Print final block upto sample 995 * Print final block upto sample
@@ -909,58 +997,61 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
909 start = br->entries[0].to; 997 start = br->entries[0].to;
910 end = sample->ip; 998 end = sample->ip;
911 len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true); 999 len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
912 print_ip_sym(thread, x.cpumode, x.cpu, start, &lastsym, attr); 1000 printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
913 if (len <= 0) { 1001 if (len <= 0) {
914 /* Print at least last IP if basic block did not work */ 1002 /* Print at least last IP if basic block did not work */
915 len = grab_bb(buffer, sample->ip, sample->ip, 1003 len = grab_bb(buffer, sample->ip, sample->ip,
916 machine, thread, &x.is64bit, &x.cpumode, false); 1004 machine, thread, &x.is64bit, &x.cpumode, false);
917 if (len <= 0) 1005 if (len <= 0)
918 return; 1006 goto out;
919 1007
920 printf("\t%016" PRIx64 "\t%s\n", sample->ip, 1008 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip,
921 dump_insn(&x, sample->ip, buffer, len, NULL)); 1009 dump_insn(&x, sample->ip, buffer, len, NULL));
922 return; 1010 goto out;
923 } 1011 }
924 for (off = 0; off <= end - start; off += ilen) { 1012 for (off = 0; off <= end - start; off += ilen) {
925 printf("\t%016" PRIx64 "\t%s\n", start + off, 1013 printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off,
926 dump_insn(&x, start + off, buffer + off, len - off, &ilen)); 1014 dump_insn(&x, start + off, buffer + off, len - off, &ilen));
927 if (ilen == 0) 1015 if (ilen == 0)
928 break; 1016 break;
929 } 1017 }
1018out:
1019 return printed;
930} 1020}
931 1021
932static void print_sample_addr(struct perf_sample *sample, 1022static int perf_sample__fprintf_addr(struct perf_sample *sample,
933 struct thread *thread, 1023 struct thread *thread,
934 struct perf_event_attr *attr) 1024 struct perf_event_attr *attr, FILE *fp)
935{ 1025{
936 struct addr_location al; 1026 struct addr_location al;
937 1027 int printed = fprintf(fp, "%16" PRIx64, sample->addr);
938 printf("%16" PRIx64, sample->addr);
939 1028
940 if (!sample_addr_correlates_sym(attr)) 1029 if (!sample_addr_correlates_sym(attr))
941 return; 1030 goto out;
942 1031
943 thread__resolve(thread, &al, sample); 1032 thread__resolve(thread, &al, sample);
944 1033
945 if (PRINT_FIELD(SYM)) { 1034 if (PRINT_FIELD(SYM)) {
946 printf(" "); 1035 printed += fprintf(fp, " ");
947 if (PRINT_FIELD(SYMOFFSET)) 1036 if (PRINT_FIELD(SYMOFFSET))
948 symbol__fprintf_symname_offs(al.sym, &al, stdout); 1037 printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
949 else 1038 else
950 symbol__fprintf_symname(al.sym, stdout); 1039 printed += symbol__fprintf_symname(al.sym, fp);
951 } 1040 }
952 1041
953 if (PRINT_FIELD(DSO)) { 1042 if (PRINT_FIELD(DSO)) {
954 printf(" ("); 1043 printed += fprintf(fp, " (");
955 map__fprintf_dsoname(al.map, stdout); 1044 printed += map__fprintf_dsoname(al.map, fp);
956 printf(")"); 1045 printed += fprintf(fp, ")");
957 } 1046 }
1047out:
1048 return printed;
958} 1049}
959 1050
960static void print_sample_callindent(struct perf_sample *sample, 1051static int perf_sample__fprintf_callindent(struct perf_sample *sample,
961 struct perf_evsel *evsel, 1052 struct perf_evsel *evsel,
962 struct thread *thread, 1053 struct thread *thread,
963 struct addr_location *al) 1054 struct addr_location *al, FILE *fp)
964{ 1055{
965 struct perf_event_attr *attr = &evsel->attr; 1056 struct perf_event_attr *attr = &evsel->attr;
966 size_t depth = thread_stack__depth(thread); 1057 size_t depth = thread_stack__depth(thread);
@@ -995,12 +1086,12 @@ static void print_sample_callindent(struct perf_sample *sample,
995 } 1086 }
996 1087
997 if (name) 1088 if (name)
998 len = printf("%*s%s", (int)depth * 4, "", name); 1089 len = fprintf(fp, "%*s%s", (int)depth * 4, "", name);
999 else if (ip) 1090 else if (ip)
1000 len = printf("%*s%16" PRIx64, (int)depth * 4, "", ip); 1091 len = fprintf(fp, "%*s%16" PRIx64, (int)depth * 4, "", ip);
1001 1092
1002 if (len < 0) 1093 if (len < 0)
1003 return; 1094 return len;
1004 1095
1005 /* 1096 /*
1006 * Try to keep the output length from changing frequently so that the 1097 * Try to keep the output length from changing frequently so that the
@@ -1010,39 +1101,46 @@ static void print_sample_callindent(struct perf_sample *sample,
1010 spacing = round_up(len + 4, 32); 1101 spacing = round_up(len + 4, 32);
1011 1102
1012 if (len < spacing) 1103 if (len < spacing)
1013 printf("%*s", spacing - len, ""); 1104 len += fprintf(fp, "%*s", spacing - len, "");
1105
1106 return len;
1014} 1107}
1015 1108
1016static void print_insn(struct perf_sample *sample, 1109static int perf_sample__fprintf_insn(struct perf_sample *sample,
1017 struct perf_event_attr *attr, 1110 struct perf_event_attr *attr,
1018 struct thread *thread, 1111 struct thread *thread,
1019 struct machine *machine) 1112 struct machine *machine, FILE *fp)
1020{ 1113{
1114 int printed = 0;
1115
1021 if (PRINT_FIELD(INSNLEN)) 1116 if (PRINT_FIELD(INSNLEN))
1022 printf(" ilen: %d", sample->insn_len); 1117 printed += fprintf(fp, " ilen: %d", sample->insn_len);
1023 if (PRINT_FIELD(INSN)) { 1118 if (PRINT_FIELD(INSN)) {
1024 int i; 1119 int i;
1025 1120
1026 printf(" insn:"); 1121 printed += fprintf(fp, " insn:");
1027 for (i = 0; i < sample->insn_len; i++) 1122 for (i = 0; i < sample->insn_len; i++)
1028 printf(" %02x", (unsigned char)sample->insn[i]); 1123 printed += fprintf(fp, " %02x", (unsigned char)sample->insn[i]);
1029 } 1124 }
1030 if (PRINT_FIELD(BRSTACKINSN)) 1125 if (PRINT_FIELD(BRSTACKINSN))
1031 print_sample_brstackinsn(sample, thread, attr, machine); 1126 printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp);
1127
1128 return printed;
1032} 1129}
1033 1130
1034static void print_sample_bts(struct perf_sample *sample, 1131static int perf_sample__fprintf_bts(struct perf_sample *sample,
1035 struct perf_evsel *evsel, 1132 struct perf_evsel *evsel,
1036 struct thread *thread, 1133 struct thread *thread,
1037 struct addr_location *al, 1134 struct addr_location *al,
1038 struct machine *machine) 1135 struct machine *machine, FILE *fp)
1039{ 1136{
1040 struct perf_event_attr *attr = &evsel->attr; 1137 struct perf_event_attr *attr = &evsel->attr;
1041 unsigned int type = output_type(attr->type); 1138 unsigned int type = output_type(attr->type);
1042 bool print_srcline_last = false; 1139 bool print_srcline_last = false;
1140 int printed = 0;
1043 1141
1044 if (PRINT_FIELD(CALLINDENT)) 1142 if (PRINT_FIELD(CALLINDENT))
1045 print_sample_callindent(sample, evsel, thread, al); 1143 printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, fp);
1046 1144
1047 /* print branch_from information */ 1145 /* print branch_from information */
1048 if (PRINT_FIELD(IP)) { 1146 if (PRINT_FIELD(IP)) {
@@ -1055,31 +1153,30 @@ static void print_sample_bts(struct perf_sample *sample,
1055 cursor = &callchain_cursor; 1153 cursor = &callchain_cursor;
1056 1154
1057 if (cursor == NULL) { 1155 if (cursor == NULL) {
1058 putchar(' '); 1156 printed += fprintf(fp, " ");
1059 if (print_opts & EVSEL__PRINT_SRCLINE) { 1157 if (print_opts & EVSEL__PRINT_SRCLINE) {
1060 print_srcline_last = true; 1158 print_srcline_last = true;
1061 print_opts &= ~EVSEL__PRINT_SRCLINE; 1159 print_opts &= ~EVSEL__PRINT_SRCLINE;
1062 } 1160 }
1063 } else 1161 } else
1064 putchar('\n'); 1162 printed += fprintf(fp, "\n");
1065 1163
1066 sample__fprintf_sym(sample, al, 0, print_opts, cursor, stdout); 1164 printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor, fp);
1067 } 1165 }
1068 1166
1069 /* print branch_to information */ 1167 /* print branch_to information */
1070 if (PRINT_FIELD(ADDR) || 1168 if (PRINT_FIELD(ADDR) ||
1071 ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) && 1169 ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
1072 !output[type].user_set)) { 1170 !output[type].user_set)) {
1073 printf(" => "); 1171 printed += fprintf(fp, " => ");
1074 print_sample_addr(sample, thread, attr); 1172 printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
1075 } 1173 }
1076 1174
1077 if (print_srcline_last) 1175 if (print_srcline_last)
1078 map__fprintf_srcline(al->map, al->addr, "\n ", stdout); 1176 printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
1079
1080 print_insn(sample, attr, thread, machine);
1081 1177
1082 printf("\n"); 1178 printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
1179 return printed + fprintf(fp, "\n");
1083} 1180}
1084 1181
1085static struct { 1182static struct {
@@ -1102,7 +1199,7 @@ static struct {
1102 {0, NULL} 1199 {0, NULL}
1103}; 1200};
1104 1201
1105static void print_sample_flags(u32 flags) 1202static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
1106{ 1203{
1107 const char *chars = PERF_IP_FLAG_CHARS; 1204 const char *chars = PERF_IP_FLAG_CHARS;
1108 const int n = strlen(PERF_IP_FLAG_CHARS); 1205 const int n = strlen(PERF_IP_FLAG_CHARS);
@@ -1129,9 +1226,9 @@ static void print_sample_flags(u32 flags)
1129 str[pos] = 0; 1226 str[pos] = 0;
1130 1227
1131 if (name) 1228 if (name)
1132 printf(" %-7s%4s ", name, in_tx ? "(x)" : ""); 1229 return fprintf(fp, " %-7s%4s ", name, in_tx ? "(x)" : "");
1133 else 1230
1134 printf(" %-11s ", str); 1231 return fprintf(fp, " %-11s ", str);
1135} 1232}
1136 1233
1137struct printer_data { 1234struct printer_data {
@@ -1140,40 +1237,40 @@ struct printer_data {
1140 bool is_printable; 1237 bool is_printable;
1141}; 1238};
1142 1239
1143static void 1240static int sample__fprintf_bpf_output(enum binary_printer_ops op,
1144print_sample_bpf_output_printer(enum binary_printer_ops op, 1241 unsigned int val,
1145 unsigned int val, 1242 void *extra, FILE *fp)
1146 void *extra)
1147{ 1243{
1148 unsigned char ch = (unsigned char)val; 1244 unsigned char ch = (unsigned char)val;
1149 struct printer_data *printer_data = extra; 1245 struct printer_data *printer_data = extra;
1246 int printed = 0;
1150 1247
1151 switch (op) { 1248 switch (op) {
1152 case BINARY_PRINT_DATA_BEGIN: 1249 case BINARY_PRINT_DATA_BEGIN:
1153 printf("\n"); 1250 printed += fprintf(fp, "\n");
1154 break; 1251 break;
1155 case BINARY_PRINT_LINE_BEGIN: 1252 case BINARY_PRINT_LINE_BEGIN:
1156 printf("%17s", !printer_data->line_no ? "BPF output:" : 1253 printed += fprintf(fp, "%17s", !printer_data->line_no ? "BPF output:" :
1157 " "); 1254 " ");
1158 break; 1255 break;
1159 case BINARY_PRINT_ADDR: 1256 case BINARY_PRINT_ADDR:
1160 printf(" %04x:", val); 1257 printed += fprintf(fp, " %04x:", val);
1161 break; 1258 break;
1162 case BINARY_PRINT_NUM_DATA: 1259 case BINARY_PRINT_NUM_DATA:
1163 printf(" %02x", val); 1260 printed += fprintf(fp, " %02x", val);
1164 break; 1261 break;
1165 case BINARY_PRINT_NUM_PAD: 1262 case BINARY_PRINT_NUM_PAD:
1166 printf(" "); 1263 printed += fprintf(fp, " ");
1167 break; 1264 break;
1168 case BINARY_PRINT_SEP: 1265 case BINARY_PRINT_SEP:
1169 printf(" "); 1266 printed += fprintf(fp, " ");
1170 break; 1267 break;
1171 case BINARY_PRINT_CHAR_DATA: 1268 case BINARY_PRINT_CHAR_DATA:
1172 if (printer_data->hit_nul && ch) 1269 if (printer_data->hit_nul && ch)
1173 printer_data->is_printable = false; 1270 printer_data->is_printable = false;
1174 1271
1175 if (!isprint(ch)) { 1272 if (!isprint(ch)) {
1176 printf("%c", '.'); 1273 printed += fprintf(fp, "%c", '.');
1177 1274
1178 if (!printer_data->is_printable) 1275 if (!printer_data->is_printable)
1179 break; 1276 break;
@@ -1183,154 +1280,154 @@ print_sample_bpf_output_printer(enum binary_printer_ops op,
1183 else 1280 else
1184 printer_data->is_printable = false; 1281 printer_data->is_printable = false;
1185 } else { 1282 } else {
1186 printf("%c", ch); 1283 printed += fprintf(fp, "%c", ch);
1187 } 1284 }
1188 break; 1285 break;
1189 case BINARY_PRINT_CHAR_PAD: 1286 case BINARY_PRINT_CHAR_PAD:
1190 printf(" "); 1287 printed += fprintf(fp, " ");
1191 break; 1288 break;
1192 case BINARY_PRINT_LINE_END: 1289 case BINARY_PRINT_LINE_END:
1193 printf("\n"); 1290 printed += fprintf(fp, "\n");
1194 printer_data->line_no++; 1291 printer_data->line_no++;
1195 break; 1292 break;
1196 case BINARY_PRINT_DATA_END: 1293 case BINARY_PRINT_DATA_END:
1197 default: 1294 default:
1198 break; 1295 break;
1199 } 1296 }
1297
1298 return printed;
1200} 1299}
1201 1300
1202static void print_sample_bpf_output(struct perf_sample *sample) 1301static int perf_sample__fprintf_bpf_output(struct perf_sample *sample, FILE *fp)
1203{ 1302{
1204 unsigned int nr_bytes = sample->raw_size; 1303 unsigned int nr_bytes = sample->raw_size;
1205 struct printer_data printer_data = {0, false, true}; 1304 struct printer_data printer_data = {0, false, true};
1206 1305 int printed = binary__fprintf(sample->raw_data, nr_bytes, 8,
1207 print_binary(sample->raw_data, nr_bytes, 8, 1306 sample__fprintf_bpf_output, &printer_data, fp);
1208 print_sample_bpf_output_printer, &printer_data);
1209 1307
1210 if (printer_data.is_printable && printer_data.hit_nul) 1308 if (printer_data.is_printable && printer_data.hit_nul)
1211 printf("%17s \"%s\"\n", "BPF string:", 1309 printed += fprintf(fp, "%17s \"%s\"\n", "BPF string:", (char *)(sample->raw_data));
1212 (char *)(sample->raw_data)); 1310
1311 return printed;
1213} 1312}
1214 1313
1215static void print_sample_spacing(int len, int spacing) 1314static int perf_sample__fprintf_spacing(int len, int spacing, FILE *fp)
1216{ 1315{
1217 if (len > 0 && len < spacing) 1316 if (len > 0 && len < spacing)
1218 printf("%*s", spacing - len, ""); 1317 return fprintf(fp, "%*s", spacing - len, "");
1318
1319 return 0;
1219} 1320}
1220 1321
1221static void print_sample_pt_spacing(int len) 1322static int perf_sample__fprintf_pt_spacing(int len, FILE *fp)
1222{ 1323{
1223 print_sample_spacing(len, 34); 1324 return perf_sample__fprintf_spacing(len, 34, fp);
1224} 1325}
1225 1326
1226static void print_sample_synth_ptwrite(struct perf_sample *sample) 1327static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
1227{ 1328{
1228 struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample); 1329 struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
1229 int len; 1330 int len;
1230 1331
1231 if (perf_sample__bad_synth_size(sample, *data)) 1332 if (perf_sample__bad_synth_size(sample, *data))
1232 return; 1333 return 0;
1233 1334
1234 len = printf(" IP: %u payload: %#" PRIx64 " ", 1335 len = fprintf(fp, " IP: %u payload: %#" PRIx64 " ",
1235 data->ip, le64_to_cpu(data->payload)); 1336 data->ip, le64_to_cpu(data->payload));
1236 print_sample_pt_spacing(len); 1337 return len + perf_sample__fprintf_pt_spacing(len, fp);
1237} 1338}
1238 1339
1239static void print_sample_synth_mwait(struct perf_sample *sample) 1340static int perf_sample__fprintf_synth_mwait(struct perf_sample *sample, FILE *fp)
1240{ 1341{
1241 struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample); 1342 struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
1242 int len; 1343 int len;
1243 1344
1244 if (perf_sample__bad_synth_size(sample, *data)) 1345 if (perf_sample__bad_synth_size(sample, *data))
1245 return; 1346 return 0;
1246 1347
1247 len = printf(" hints: %#x extensions: %#x ", 1348 len = fprintf(fp, " hints: %#x extensions: %#x ",
1248 data->hints, data->extensions); 1349 data->hints, data->extensions);
1249 print_sample_pt_spacing(len); 1350 return len + perf_sample__fprintf_pt_spacing(len, fp);
1250} 1351}
1251 1352
1252static void print_sample_synth_pwre(struct perf_sample *sample) 1353static int perf_sample__fprintf_synth_pwre(struct perf_sample *sample, FILE *fp)
1253{ 1354{
1254 struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample); 1355 struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
1255 int len; 1356 int len;
1256 1357
1257 if (perf_sample__bad_synth_size(sample, *data)) 1358 if (perf_sample__bad_synth_size(sample, *data))
1258 return; 1359 return 0;
1259 1360
1260 len = printf(" hw: %u cstate: %u sub-cstate: %u ", 1361 len = fprintf(fp, " hw: %u cstate: %u sub-cstate: %u ",
1261 data->hw, data->cstate, data->subcstate); 1362 data->hw, data->cstate, data->subcstate);
1262 print_sample_pt_spacing(len); 1363 return len + perf_sample__fprintf_pt_spacing(len, fp);
1263} 1364}
1264 1365
1265static void print_sample_synth_exstop(struct perf_sample *sample) 1366static int perf_sample__fprintf_synth_exstop(struct perf_sample *sample, FILE *fp)
1266{ 1367{
1267 struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample); 1368 struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
1268 int len; 1369 int len;
1269 1370
1270 if (perf_sample__bad_synth_size(sample, *data)) 1371 if (perf_sample__bad_synth_size(sample, *data))
1271 return; 1372 return 0;
1272 1373
1273 len = printf(" IP: %u ", data->ip); 1374 len = fprintf(fp, " IP: %u ", data->ip);
1274 print_sample_pt_spacing(len); 1375 return len + perf_sample__fprintf_pt_spacing(len, fp);
1275} 1376}
1276 1377
1277static void print_sample_synth_pwrx(struct perf_sample *sample) 1378static int perf_sample__fprintf_synth_pwrx(struct perf_sample *sample, FILE *fp)
1278{ 1379{
1279 struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample); 1380 struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
1280 int len; 1381 int len;
1281 1382
1282 if (perf_sample__bad_synth_size(sample, *data)) 1383 if (perf_sample__bad_synth_size(sample, *data))
1283 return; 1384 return 0;
1284 1385
1285 len = printf(" deepest cstate: %u last cstate: %u wake reason: %#x ", 1386 len = fprintf(fp, " deepest cstate: %u last cstate: %u wake reason: %#x ",
1286 data->deepest_cstate, data->last_cstate, 1387 data->deepest_cstate, data->last_cstate,
1287 data->wake_reason); 1388 data->wake_reason);
1288 print_sample_pt_spacing(len); 1389 return len + perf_sample__fprintf_pt_spacing(len, fp);
1289} 1390}
1290 1391
1291static void print_sample_synth_cbr(struct perf_sample *sample) 1392static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
1292{ 1393{
1293 struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample); 1394 struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
1294 unsigned int percent, freq; 1395 unsigned int percent, freq;
1295 int len; 1396 int len;
1296 1397
1297 if (perf_sample__bad_synth_size(sample, *data)) 1398 if (perf_sample__bad_synth_size(sample, *data))
1298 return; 1399 return 0;
1299 1400
1300 freq = (le32_to_cpu(data->freq) + 500) / 1000; 1401 freq = (le32_to_cpu(data->freq) + 500) / 1000;
1301 len = printf(" cbr: %2u freq: %4u MHz ", data->cbr, freq); 1402 len = fprintf(fp, " cbr: %2u freq: %4u MHz ", data->cbr, freq);
1302 if (data->max_nonturbo) { 1403 if (data->max_nonturbo) {
1303 percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10; 1404 percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
1304 len += printf("(%3u%%) ", percent); 1405 len += fprintf(fp, "(%3u%%) ", percent);
1305 } 1406 }
1306 print_sample_pt_spacing(len); 1407 return len + perf_sample__fprintf_pt_spacing(len, fp);
1307} 1408}
1308 1409
1309static void print_sample_synth(struct perf_sample *sample, 1410static int perf_sample__fprintf_synth(struct perf_sample *sample,
1310 struct perf_evsel *evsel) 1411 struct perf_evsel *evsel, FILE *fp)
1311{ 1412{
1312 switch (evsel->attr.config) { 1413 switch (evsel->attr.config) {
1313 case PERF_SYNTH_INTEL_PTWRITE: 1414 case PERF_SYNTH_INTEL_PTWRITE:
1314 print_sample_synth_ptwrite(sample); 1415 return perf_sample__fprintf_synth_ptwrite(sample, fp);
1315 break;
1316 case PERF_SYNTH_INTEL_MWAIT: 1416 case PERF_SYNTH_INTEL_MWAIT:
1317 print_sample_synth_mwait(sample); 1417 return perf_sample__fprintf_synth_mwait(sample, fp);
1318 break;
1319 case PERF_SYNTH_INTEL_PWRE: 1418 case PERF_SYNTH_INTEL_PWRE:
1320 print_sample_synth_pwre(sample); 1419 return perf_sample__fprintf_synth_pwre(sample, fp);
1321 break;
1322 case PERF_SYNTH_INTEL_EXSTOP: 1420 case PERF_SYNTH_INTEL_EXSTOP:
1323 print_sample_synth_exstop(sample); 1421 return perf_sample__fprintf_synth_exstop(sample, fp);
1324 break;
1325 case PERF_SYNTH_INTEL_PWRX: 1422 case PERF_SYNTH_INTEL_PWRX:
1326 print_sample_synth_pwrx(sample); 1423 return perf_sample__fprintf_synth_pwrx(sample, fp);
1327 break;
1328 case PERF_SYNTH_INTEL_CBR: 1424 case PERF_SYNTH_INTEL_CBR:
1329 print_sample_synth_cbr(sample); 1425 return perf_sample__fprintf_synth_cbr(sample, fp);
1330 break;
1331 default: 1426 default:
1332 break; 1427 break;
1333 } 1428 }
1429
1430 return 0;
1334} 1431}
1335 1432
1336struct perf_script { 1433struct perf_script {
@@ -1341,6 +1438,7 @@ struct perf_script {
1341 bool show_switch_events; 1438 bool show_switch_events;
1342 bool show_namespace_events; 1439 bool show_namespace_events;
1343 bool allocated; 1440 bool allocated;
1441 bool per_event_dump;
1344 struct cpu_map *cpus; 1442 struct cpu_map *cpus;
1345 struct thread_map *threads; 1443 struct thread_map *threads;
1346 int name_width; 1444 int name_width;
@@ -1362,7 +1460,7 @@ static int perf_evlist__max_name_len(struct perf_evlist *evlist)
1362 return max; 1460 return max;
1363} 1461}
1364 1462
1365static size_t data_src__printf(u64 data_src) 1463static int data_src__fprintf(u64 data_src, FILE *fp)
1366{ 1464{
1367 struct mem_info mi = { .data_src.val = data_src }; 1465 struct mem_info mi = { .data_src.val = data_src };
1368 char decode[100]; 1466 char decode[100];
@@ -1376,7 +1474,7 @@ static size_t data_src__printf(u64 data_src)
1376 if (maxlen < len) 1474 if (maxlen < len)
1377 maxlen = len; 1475 maxlen = len;
1378 1476
1379 return printf("%-*s", maxlen, out); 1477 return fprintf(fp, "%-*s", maxlen, out);
1380} 1478}
1381 1479
1382static void process_event(struct perf_script *script, 1480static void process_event(struct perf_script *script,
@@ -1387,14 +1485,18 @@ static void process_event(struct perf_script *script,
1387 struct thread *thread = al->thread; 1485 struct thread *thread = al->thread;
1388 struct perf_event_attr *attr = &evsel->attr; 1486 struct perf_event_attr *attr = &evsel->attr;
1389 unsigned int type = output_type(attr->type); 1487 unsigned int type = output_type(attr->type);
1488 struct perf_evsel_script *es = evsel->priv;
1489 FILE *fp = es->fp;
1390 1490
1391 if (output[type].fields == 0) 1491 if (output[type].fields == 0)
1392 return; 1492 return;
1393 1493
1394 print_sample_start(sample, thread, evsel); 1494 ++es->samples;
1495
1496 perf_sample__fprintf_start(sample, thread, evsel, fp);
1395 1497
1396 if (PRINT_FIELD(PERIOD)) 1498 if (PRINT_FIELD(PERIOD))
1397 printf("%10" PRIu64 " ", sample->period); 1499 fprintf(fp, "%10" PRIu64 " ", sample->period);
1398 1500
1399 if (PRINT_FIELD(EVNAME)) { 1501 if (PRINT_FIELD(EVNAME)) {
1400 const char *evname = perf_evsel__name(evsel); 1502 const char *evname = perf_evsel__name(evsel);
@@ -1402,33 +1504,33 @@ static void process_event(struct perf_script *script,
1402 if (!script->name_width) 1504 if (!script->name_width)
1403 script->name_width = perf_evlist__max_name_len(script->session->evlist); 1505 script->name_width = perf_evlist__max_name_len(script->session->evlist);
1404 1506
1405 printf("%*s: ", script->name_width, 1507 fprintf(fp, "%*s: ", script->name_width, evname ?: "[unknown]");
1406 evname ? evname : "[unknown]");
1407 } 1508 }
1408 1509
1409 if (print_flags) 1510 if (print_flags)
1410 print_sample_flags(sample->flags); 1511 perf_sample__fprintf_flags(sample->flags, fp);
1411 1512
1412 if (is_bts_event(attr)) { 1513 if (is_bts_event(attr)) {
1413 print_sample_bts(sample, evsel, thread, al, machine); 1514 perf_sample__fprintf_bts(sample, evsel, thread, al, machine, fp);
1414 return; 1515 return;
1415 } 1516 }
1416 1517
1417 if (PRINT_FIELD(TRACE)) 1518 if (PRINT_FIELD(TRACE)) {
1418 event_format__print(evsel->tp_format, sample->cpu, 1519 event_format__fprintf(evsel->tp_format, sample->cpu,
1419 sample->raw_data, sample->raw_size); 1520 sample->raw_data, sample->raw_size, fp);
1521 }
1420 1522
1421 if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH)) 1523 if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
1422 print_sample_synth(sample, evsel); 1524 perf_sample__fprintf_synth(sample, evsel, fp);
1423 1525
1424 if (PRINT_FIELD(ADDR)) 1526 if (PRINT_FIELD(ADDR))
1425 print_sample_addr(sample, thread, attr); 1527 perf_sample__fprintf_addr(sample, thread, attr, fp);
1426 1528
1427 if (PRINT_FIELD(DATA_SRC)) 1529 if (PRINT_FIELD(DATA_SRC))
1428 data_src__printf(sample->data_src); 1530 data_src__fprintf(sample->data_src, fp);
1429 1531
1430 if (PRINT_FIELD(WEIGHT)) 1532 if (PRINT_FIELD(WEIGHT))
1431 printf("%16" PRIu64, sample->weight); 1533 fprintf(fp, "%16" PRIu64, sample->weight);
1432 1534
1433 if (PRINT_FIELD(IP)) { 1535 if (PRINT_FIELD(IP)) {
1434 struct callchain_cursor *cursor = NULL; 1536 struct callchain_cursor *cursor = NULL;
@@ -1438,27 +1540,30 @@ static void process_event(struct perf_script *script,
1438 sample, NULL, NULL, scripting_max_stack) == 0) 1540 sample, NULL, NULL, scripting_max_stack) == 0)
1439 cursor = &callchain_cursor; 1541 cursor = &callchain_cursor;
1440 1542
1441 putchar(cursor ? '\n' : ' '); 1543 fputc(cursor ? '\n' : ' ', fp);
1442 sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, stdout); 1544 sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, fp);
1443 } 1545 }
1444 1546
1445 if (PRINT_FIELD(IREGS)) 1547 if (PRINT_FIELD(IREGS))
1446 print_sample_iregs(sample, attr); 1548 perf_sample__fprintf_iregs(sample, attr, fp);
1549
1550 if (PRINT_FIELD(UREGS))
1551 perf_sample__fprintf_uregs(sample, attr, fp);
1447 1552
1448 if (PRINT_FIELD(BRSTACK)) 1553 if (PRINT_FIELD(BRSTACK))
1449 print_sample_brstack(sample, thread, attr); 1554 perf_sample__fprintf_brstack(sample, thread, attr, fp);
1450 else if (PRINT_FIELD(BRSTACKSYM)) 1555 else if (PRINT_FIELD(BRSTACKSYM))
1451 print_sample_brstacksym(sample, thread, attr); 1556 perf_sample__fprintf_brstacksym(sample, thread, attr, fp);
1452 else if (PRINT_FIELD(BRSTACKOFF)) 1557 else if (PRINT_FIELD(BRSTACKOFF))
1453 print_sample_brstackoff(sample, thread, attr); 1558 perf_sample__fprintf_brstackoff(sample, thread, attr, fp);
1454 1559
1455 if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT)) 1560 if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
1456 print_sample_bpf_output(sample); 1561 perf_sample__fprintf_bpf_output(sample, fp);
1457 print_insn(sample, attr, thread, machine); 1562 perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
1458 1563
1459 if (PRINT_FIELD(PHYS_ADDR)) 1564 if (PRINT_FIELD(PHYS_ADDR))
1460 printf("%16" PRIx64, sample->phys_addr); 1565 fprintf(fp, "%16" PRIx64, sample->phys_addr);
1461 printf("\n"); 1566 fprintf(fp, "\n");
1462} 1567}
1463 1568
1464static struct scripting_ops *scripting_ops; 1569static struct scripting_ops *scripting_ops;
@@ -1632,7 +1737,7 @@ static int process_comm_event(struct perf_tool *tool,
1632 sample->tid = event->comm.tid; 1737 sample->tid = event->comm.tid;
1633 sample->pid = event->comm.pid; 1738 sample->pid = event->comm.pid;
1634 } 1739 }
1635 print_sample_start(sample, thread, evsel); 1740 perf_sample__fprintf_start(sample, thread, evsel, stdout);
1636 perf_event__fprintf(event, stdout); 1741 perf_event__fprintf(event, stdout);
1637 ret = 0; 1742 ret = 0;
1638out: 1743out:
@@ -1667,7 +1772,7 @@ static int process_namespaces_event(struct perf_tool *tool,
1667 sample->tid = event->namespaces.tid; 1772 sample->tid = event->namespaces.tid;
1668 sample->pid = event->namespaces.pid; 1773 sample->pid = event->namespaces.pid;
1669 } 1774 }
1670 print_sample_start(sample, thread, evsel); 1775 perf_sample__fprintf_start(sample, thread, evsel, stdout);
1671 perf_event__fprintf(event, stdout); 1776 perf_event__fprintf(event, stdout);
1672 ret = 0; 1777 ret = 0;
1673out: 1778out:
@@ -1700,7 +1805,7 @@ static int process_fork_event(struct perf_tool *tool,
1700 sample->tid = event->fork.tid; 1805 sample->tid = event->fork.tid;
1701 sample->pid = event->fork.pid; 1806 sample->pid = event->fork.pid;
1702 } 1807 }
1703 print_sample_start(sample, thread, evsel); 1808 perf_sample__fprintf_start(sample, thread, evsel, stdout);
1704 perf_event__fprintf(event, stdout); 1809 perf_event__fprintf(event, stdout);
1705 thread__put(thread); 1810 thread__put(thread);
1706 1811
@@ -1729,7 +1834,7 @@ static int process_exit_event(struct perf_tool *tool,
1729 sample->tid = event->fork.tid; 1834 sample->tid = event->fork.tid;
1730 sample->pid = event->fork.pid; 1835 sample->pid = event->fork.pid;
1731 } 1836 }
1732 print_sample_start(sample, thread, evsel); 1837 perf_sample__fprintf_start(sample, thread, evsel, stdout);
1733 perf_event__fprintf(event, stdout); 1838 perf_event__fprintf(event, stdout);
1734 1839
1735 if (perf_event__process_exit(tool, event, sample, machine) < 0) 1840 if (perf_event__process_exit(tool, event, sample, machine) < 0)
@@ -1764,7 +1869,7 @@ static int process_mmap_event(struct perf_tool *tool,
1764 sample->tid = event->mmap.tid; 1869 sample->tid = event->mmap.tid;
1765 sample->pid = event->mmap.pid; 1870 sample->pid = event->mmap.pid;
1766 } 1871 }
1767 print_sample_start(sample, thread, evsel); 1872 perf_sample__fprintf_start(sample, thread, evsel, stdout);
1768 perf_event__fprintf(event, stdout); 1873 perf_event__fprintf(event, stdout);
1769 thread__put(thread); 1874 thread__put(thread);
1770 return 0; 1875 return 0;
@@ -1795,7 +1900,7 @@ static int process_mmap2_event(struct perf_tool *tool,
1795 sample->tid = event->mmap2.tid; 1900 sample->tid = event->mmap2.tid;
1796 sample->pid = event->mmap2.pid; 1901 sample->pid = event->mmap2.pid;
1797 } 1902 }
1798 print_sample_start(sample, thread, evsel); 1903 perf_sample__fprintf_start(sample, thread, evsel, stdout);
1799 perf_event__fprintf(event, stdout); 1904 perf_event__fprintf(event, stdout);
1800 thread__put(thread); 1905 thread__put(thread);
1801 return 0; 1906 return 0;
@@ -1821,7 +1926,7 @@ static int process_switch_event(struct perf_tool *tool,
1821 return -1; 1926 return -1;
1822 } 1927 }
1823 1928
1824 print_sample_start(sample, thread, evsel); 1929 perf_sample__fprintf_start(sample, thread, evsel, stdout);
1825 perf_event__fprintf(event, stdout); 1930 perf_event__fprintf(event, stdout);
1826 thread__put(thread); 1931 thread__put(thread);
1827 return 0; 1932 return 0;
@@ -1832,6 +1937,75 @@ static void sig_handler(int sig __maybe_unused)
1832 session_done = 1; 1937 session_done = 1;
1833} 1938}
1834 1939
1940static void perf_script__fclose_per_event_dump(struct perf_script *script)
1941{
1942 struct perf_evlist *evlist = script->session->evlist;
1943 struct perf_evsel *evsel;
1944
1945 evlist__for_each_entry(evlist, evsel) {
1946 if (!evsel->priv)
1947 break;
1948 perf_evsel_script__delete(evsel->priv);
1949 evsel->priv = NULL;
1950 }
1951}
1952
1953static int perf_script__fopen_per_event_dump(struct perf_script *script)
1954{
1955 struct perf_evsel *evsel;
1956
1957 evlist__for_each_entry(script->session->evlist, evsel) {
1958 /*
1959 * Already setup? I.e. we may be called twice in cases like
1960 * Intel PT, one for the intel_pt// and dummy events, then
1961 * for the evsels syntheized from the auxtrace info.
1962 *
1963 * Ses perf_script__process_auxtrace_info.
1964 */
1965 if (evsel->priv != NULL)
1966 continue;
1967
1968 evsel->priv = perf_evsel_script__new(evsel, script->session->data);
1969 if (evsel->priv == NULL)
1970 goto out_err_fclose;
1971 }
1972
1973 return 0;
1974
1975out_err_fclose:
1976 perf_script__fclose_per_event_dump(script);
1977 return -1;
1978}
1979
1980static int perf_script__setup_per_event_dump(struct perf_script *script)
1981{
1982 struct perf_evsel *evsel;
1983 static struct perf_evsel_script es_stdout;
1984
1985 if (script->per_event_dump)
1986 return perf_script__fopen_per_event_dump(script);
1987
1988 es_stdout.fp = stdout;
1989
1990 evlist__for_each_entry(script->session->evlist, evsel)
1991 evsel->priv = &es_stdout;
1992
1993 return 0;
1994}
1995
1996static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
1997{
1998 struct perf_evsel *evsel;
1999
2000 evlist__for_each_entry(script->session->evlist, evsel) {
2001 struct perf_evsel_script *es = evsel->priv;
2002
2003 perf_evsel_script__fprintf(es, stdout);
2004 perf_evsel_script__delete(es);
2005 evsel->priv = NULL;
2006 }
2007}
2008
1835static int __cmd_script(struct perf_script *script) 2009static int __cmd_script(struct perf_script *script)
1836{ 2010{
1837 int ret; 2011 int ret;
@@ -1853,8 +2027,16 @@ static int __cmd_script(struct perf_script *script)
1853 if (script->show_namespace_events) 2027 if (script->show_namespace_events)
1854 script->tool.namespaces = process_namespaces_event; 2028 script->tool.namespaces = process_namespaces_event;
1855 2029
2030 if (perf_script__setup_per_event_dump(script)) {
2031 pr_err("Couldn't create the per event dump files\n");
2032 return -1;
2033 }
2034
1856 ret = perf_session__process_events(script->session); 2035 ret = perf_session__process_events(script->session);
1857 2036
2037 if (script->per_event_dump)
2038 perf_script__exit_per_event_dump_stats(script);
2039
1858 if (debug_mode) 2040 if (debug_mode)
1859 pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); 2041 pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
1860 2042
@@ -2419,14 +2601,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
2419 char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN]; 2601 char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
2420 DIR *scripts_dir, *lang_dir; 2602 DIR *scripts_dir, *lang_dir;
2421 struct perf_session *session; 2603 struct perf_session *session;
2422 struct perf_data_file file = { 2604 struct perf_data data = {
2423 .path = input_name, 2605 .file = {
2424 .mode = PERF_DATA_MODE_READ, 2606 .path = input_name,
2607 },
2608 .mode = PERF_DATA_MODE_READ,
2425 }; 2609 };
2426 char *temp; 2610 char *temp;
2427 int i = 0; 2611 int i = 0;
2428 2612
2429 session = perf_session__new(&file, false, NULL); 2613 session = perf_session__new(&data, false, NULL);
2430 if (!session) 2614 if (!session)
2431 return -1; 2615 return -1;
2432 2616
@@ -2664,6 +2848,25 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
2664 return set_maps(script); 2848 return set_maps(script);
2665} 2849}
2666 2850
2851#ifdef HAVE_AUXTRACE_SUPPORT
2852static int perf_script__process_auxtrace_info(struct perf_tool *tool,
2853 union perf_event *event,
2854 struct perf_session *session)
2855{
2856 int ret = perf_event__process_auxtrace_info(tool, event, session);
2857
2858 if (ret == 0) {
2859 struct perf_script *script = container_of(tool, struct perf_script, tool);
2860
2861 ret = perf_script__setup_per_event_dump(script);
2862 }
2863
2864 return ret;
2865}
2866#else
2867#define perf_script__process_auxtrace_info 0
2868#endif
2869
2667int cmd_script(int argc, const char **argv) 2870int cmd_script(int argc, const char **argv)
2668{ 2871{
2669 bool show_full_info = false; 2872 bool show_full_info = false;
@@ -2692,7 +2895,7 @@ int cmd_script(int argc, const char **argv)
2692 .feature = perf_event__process_feature, 2895 .feature = perf_event__process_feature,
2693 .build_id = perf_event__process_build_id, 2896 .build_id = perf_event__process_build_id,
2694 .id_index = perf_event__process_id_index, 2897 .id_index = perf_event__process_id_index,
2695 .auxtrace_info = perf_event__process_auxtrace_info, 2898 .auxtrace_info = perf_script__process_auxtrace_info,
2696 .auxtrace = perf_event__process_auxtrace, 2899 .auxtrace = perf_event__process_auxtrace,
2697 .auxtrace_error = perf_event__process_auxtrace_error, 2900 .auxtrace_error = perf_event__process_auxtrace_error,
2698 .stat = perf_event__process_stat_event, 2901 .stat = perf_event__process_stat_event,
@@ -2704,7 +2907,7 @@ int cmd_script(int argc, const char **argv)
2704 .ordering_requires_timestamps = true, 2907 .ordering_requires_timestamps = true,
2705 }, 2908 },
2706 }; 2909 };
2707 struct perf_data_file file = { 2910 struct perf_data data = {
2708 .mode = PERF_DATA_MODE_READ, 2911 .mode = PERF_DATA_MODE_READ,
2709 }; 2912 };
2710 const struct option options[] = { 2913 const struct option options[] = {
@@ -2740,7 +2943,7 @@ int cmd_script(int argc, const char **argv)
2740 "+field to add and -field to remove." 2943 "+field to add and -field to remove."
2741 "Valid types: hw,sw,trace,raw,synth. " 2944 "Valid types: hw,sw,trace,raw,synth. "
2742 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso," 2945 "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
2743 "addr,symoff,period,iregs,brstack,brstacksym,flags," 2946 "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags,"
2744 "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr", 2947 "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr",
2745 parse_output_fields), 2948 parse_output_fields),
2746 OPT_BOOLEAN('a', "all-cpus", &system_wide, 2949 OPT_BOOLEAN('a', "all-cpus", &system_wide,
@@ -2772,6 +2975,8 @@ int cmd_script(int argc, const char **argv)
2772 "Show context switch events (if recorded)"), 2975 "Show context switch events (if recorded)"),
2773 OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events, 2976 OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
2774 "Show namespace events (if recorded)"), 2977 "Show namespace events (if recorded)"),
2978 OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
2979 "Dump trace output to files named by the monitored events"),
2775 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), 2980 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
2776 OPT_INTEGER(0, "max-blocks", &max_blocks, 2981 OPT_INTEGER(0, "max-blocks", &max_blocks,
2777 "Maximum number of code blocks to dump with brstackinsn"), 2982 "Maximum number of code blocks to dump with brstackinsn"),
@@ -2802,13 +3007,15 @@ int cmd_script(int argc, const char **argv)
2802 NULL 3007 NULL
2803 }; 3008 };
2804 3009
3010 perf_set_singlethreaded();
3011
2805 setup_scripting(); 3012 setup_scripting();
2806 3013
2807 argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage, 3014 argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
2808 PARSE_OPT_STOP_AT_NON_OPTION); 3015 PARSE_OPT_STOP_AT_NON_OPTION);
2809 3016
2810 file.path = input_name; 3017 data.file.path = input_name;
2811 file.force = symbol_conf.force; 3018 data.force = symbol_conf.force;
2812 3019
2813 if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) { 3020 if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
2814 rec_script_path = get_script_path(argv[1], RECORD_SUFFIX); 3021 rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
@@ -2975,7 +3182,7 @@ int cmd_script(int argc, const char **argv)
2975 if (!script_name) 3182 if (!script_name)
2976 setup_pager(); 3183 setup_pager();
2977 3184
2978 session = perf_session__new(&file, false, &script.tool); 3185 session = perf_session__new(&data, false, &script.tool);
2979 if (session == NULL) 3186 if (session == NULL)
2980 return -1; 3187 return -1;
2981 3188
@@ -3016,7 +3223,8 @@ int cmd_script(int argc, const char **argv)
3016 machine__resolve_kernel_addr, 3223 machine__resolve_kernel_addr,
3017 &session->machines.host) < 0) { 3224 &session->machines.host) < 0) {
3018 pr_err("%s: failed to set libtraceevent function resolver\n", __func__); 3225 pr_err("%s: failed to set libtraceevent function resolver\n", __func__);
3019 return -1; 3226 err = -1;
3227 goto out_delete;
3020 } 3228 }
3021 3229
3022 if (generate_script_lang) { 3230 if (generate_script_lang) {
@@ -3030,7 +3238,7 @@ int cmd_script(int argc, const char **argv)
3030 goto out_delete; 3238 goto out_delete;
3031 } 3239 }
3032 3240
3033 input = open(file.path, O_RDONLY); /* input_name */ 3241 input = open(data.file.path, O_RDONLY); /* input_name */
3034 if (input < 0) { 3242 if (input < 0) {
3035 err = -errno; 3243 err = -errno;
3036 perror("failed to open file"); 3244 perror("failed to open file");
@@ -3076,7 +3284,8 @@ int cmd_script(int argc, const char **argv)
3076 /* needs to be parsed after looking up reference time */ 3284 /* needs to be parsed after looking up reference time */
3077 if (perf_time__parse_str(&script.ptime, script.time_str) != 0) { 3285 if (perf_time__parse_str(&script.ptime, script.time_str) != 0) {
3078 pr_err("Invalid time string\n"); 3286 pr_err("Invalid time string\n");
3079 return -EINVAL; 3287 err = -EINVAL;
3288 goto out_delete;
3080 } 3289 }
3081 3290
3082 err = __cmd_script(&script); 3291 err = __cmd_script(&script);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 69523ed55894..59af5a8419e2 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -65,6 +65,7 @@
65#include "util/tool.h" 65#include "util/tool.h"
66#include "util/group.h" 66#include "util/group.h"
67#include "util/string2.h" 67#include "util/string2.h"
68#include "util/metricgroup.h"
68#include "asm/bug.h" 69#include "asm/bug.h"
69 70
70#include <linux/time64.h> 71#include <linux/time64.h>
@@ -133,6 +134,8 @@ static const char *smi_cost_attrs = {
133 134
134static struct perf_evlist *evsel_list; 135static struct perf_evlist *evsel_list;
135 136
137static struct rblist metric_events;
138
136static struct target target = { 139static struct target target = {
137 .uid = UINT_MAX, 140 .uid = UINT_MAX,
138}; 141};
@@ -172,7 +175,7 @@ static int print_free_counters_hint;
172 175
173struct perf_stat { 176struct perf_stat {
174 bool record; 177 bool record;
175 struct perf_data_file file; 178 struct perf_data data;
176 struct perf_session *session; 179 struct perf_session *session;
177 u64 bytes_written; 180 u64 bytes_written;
178 struct perf_tool tool; 181 struct perf_tool tool;
@@ -192,6 +195,11 @@ static struct perf_stat_config stat_config = {
192 .scale = true, 195 .scale = true,
193}; 196};
194 197
198static bool is_duration_time(struct perf_evsel *evsel)
199{
200 return !strcmp(evsel->name, "duration_time");
201}
202
195static inline void diff_timespec(struct timespec *r, struct timespec *a, 203static inline void diff_timespec(struct timespec *r, struct timespec *a,
196 struct timespec *b) 204 struct timespec *b)
197{ 205{
@@ -245,7 +253,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
245 * by attr->sample_type != 0, and we can't run it on 253 * by attr->sample_type != 0, and we can't run it on
246 * stat sessions. 254 * stat sessions.
247 */ 255 */
248 if (!(STAT_RECORD && perf_stat.file.is_pipe)) 256 if (!(STAT_RECORD && perf_stat.data.is_pipe))
249 attr->sample_type = PERF_SAMPLE_IDENTIFIER; 257 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
250 258
251 /* 259 /*
@@ -287,7 +295,7 @@ static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
287 struct perf_sample *sample __maybe_unused, 295 struct perf_sample *sample __maybe_unused,
288 struct machine *machine __maybe_unused) 296 struct machine *machine __maybe_unused)
289{ 297{
290 if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) { 298 if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
291 pr_err("failed to write perf data, error: %m\n"); 299 pr_err("failed to write perf data, error: %m\n");
292 return -1; 300 return -1;
293 } 301 }
@@ -407,6 +415,8 @@ static void process_interval(void)
407 pr_err("failed to write stat round event\n"); 415 pr_err("failed to write stat round event\n");
408 } 416 }
409 417
418 init_stats(&walltime_nsecs_stats);
419 update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
410 print_counters(&rs, 0, NULL); 420 print_counters(&rs, 0, NULL);
411} 421}
412 422
@@ -582,6 +592,32 @@ static bool perf_evsel__should_store_id(struct perf_evsel *counter)
582 return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID; 592 return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
583} 593}
584 594
595static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
596{
597 struct perf_evsel *c2, *leader;
598 bool is_open = true;
599
600 leader = evsel->leader;
601 pr_debug("Weak group for %s/%d failed\n",
602 leader->name, leader->nr_members);
603
604 /*
605 * for_each_group_member doesn't work here because it doesn't
606 * include the first entry.
607 */
608 evlist__for_each_entry(evsel_list, c2) {
609 if (c2 == evsel)
610 is_open = false;
611 if (c2->leader == leader) {
612 if (is_open)
613 perf_evsel__close(c2);
614 c2->leader = c2;
615 c2->nr_members = 0;
616 }
617 }
618 return leader;
619}
620
585static int __run_perf_stat(int argc, const char **argv) 621static int __run_perf_stat(int argc, const char **argv)
586{ 622{
587 int interval = stat_config.interval; 623 int interval = stat_config.interval;
@@ -592,7 +628,7 @@ static int __run_perf_stat(int argc, const char **argv)
592 size_t l; 628 size_t l;
593 int status = 0; 629 int status = 0;
594 const bool forks = (argc > 0); 630 const bool forks = (argc > 0);
595 bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false; 631 bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
596 struct perf_evsel_config_term *err_term; 632 struct perf_evsel_config_term *err_term;
597 633
598 if (interval) { 634 if (interval) {
@@ -618,6 +654,15 @@ static int __run_perf_stat(int argc, const char **argv)
618 evlist__for_each_entry(evsel_list, counter) { 654 evlist__for_each_entry(evsel_list, counter) {
619try_again: 655try_again:
620 if (create_perf_stat_counter(counter) < 0) { 656 if (create_perf_stat_counter(counter) < 0) {
657
658 /* Weak group failed. Reset the group. */
659 if ((errno == EINVAL || errno == EBADF) &&
660 counter->leader != counter &&
661 counter->weak_group) {
662 counter = perf_evsel__reset_weak_group(counter);
663 goto try_again;
664 }
665
621 /* 666 /*
622 * PPC returns ENXIO for HW counters until 2.6.37 667 * PPC returns ENXIO for HW counters until 2.6.37
623 * (behavior changed with commit b0a873e). 668 * (behavior changed with commit b0a873e).
@@ -674,10 +719,10 @@ try_again:
674 } 719 }
675 720
676 if (STAT_RECORD) { 721 if (STAT_RECORD) {
677 int err, fd = perf_data_file__fd(&perf_stat.file); 722 int err, fd = perf_data__fd(&perf_stat.data);
678 723
679 if (is_pipe) { 724 if (is_pipe) {
680 err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file)); 725 err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
681 } else { 726 } else {
682 err = perf_session__write_header(perf_stat.session, evsel_list, 727 err = perf_session__write_header(perf_stat.session, evsel_list,
683 fd, false); 728 fd, false);
@@ -800,7 +845,7 @@ static void print_noise(struct perf_evsel *evsel, double avg)
800 if (run_count == 1) 845 if (run_count == 1)
801 return; 846 return;
802 847
803 ps = evsel->priv; 848 ps = evsel->stats;
804 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg); 849 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
805} 850}
806 851
@@ -1199,7 +1244,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
1199 1244
1200 perf_stat__print_shadow_stats(counter, uval, 1245 perf_stat__print_shadow_stats(counter, uval,
1201 first_shadow_cpu(counter, id), 1246 first_shadow_cpu(counter, id),
1202 &out); 1247 &out, &metric_events);
1203 if (!csv_output && !metric_only) { 1248 if (!csv_output && !metric_only) {
1204 print_noise(counter, noise); 1249 print_noise(counter, noise);
1205 print_running(run, ena); 1250 print_running(run, ena);
@@ -1222,8 +1267,7 @@ static void aggr_update_shadow(void)
1222 continue; 1267 continue;
1223 val += perf_counts(counter->counts, cpu, 0)->val; 1268 val += perf_counts(counter->counts, cpu, 0)->val;
1224 } 1269 }
1225 val = val * counter->scale; 1270 perf_stat__update_shadow_stats(counter, val,
1226 perf_stat__update_shadow_stats(counter, &val,
1227 first_shadow_cpu(counter, id)); 1271 first_shadow_cpu(counter, id));
1228 } 1272 }
1229 } 1273 }
@@ -1325,6 +1369,9 @@ static void print_aggr(char *prefix)
1325 ad.id = id = aggr_map->map[s]; 1369 ad.id = id = aggr_map->map[s];
1326 first = true; 1370 first = true;
1327 evlist__for_each_entry(evsel_list, counter) { 1371 evlist__for_each_entry(evsel_list, counter) {
1372 if (is_duration_time(counter))
1373 continue;
1374
1328 ad.val = ad.ena = ad.run = 0; 1375 ad.val = ad.ena = ad.run = 0;
1329 ad.nr = 0; 1376 ad.nr = 0;
1330 if (!collect_data(counter, aggr_cb, &ad)) 1377 if (!collect_data(counter, aggr_cb, &ad))
@@ -1384,7 +1431,7 @@ static void counter_aggr_cb(struct perf_evsel *counter, void *data,
1384 bool first __maybe_unused) 1431 bool first __maybe_unused)
1385{ 1432{
1386 struct caggr_data *cd = data; 1433 struct caggr_data *cd = data;
1387 struct perf_stat_evsel *ps = counter->priv; 1434 struct perf_stat_evsel *ps = counter->stats;
1388 1435
1389 cd->avg += avg_stats(&ps->res_stats[0]); 1436 cd->avg += avg_stats(&ps->res_stats[0]);
1390 cd->avg_enabled += avg_stats(&ps->res_stats[1]); 1437 cd->avg_enabled += avg_stats(&ps->res_stats[1]);
@@ -1468,6 +1515,8 @@ static void print_no_aggr_metric(char *prefix)
1468 if (prefix) 1515 if (prefix)
1469 fputs(prefix, stat_config.output); 1516 fputs(prefix, stat_config.output);
1470 evlist__for_each_entry(evsel_list, counter) { 1517 evlist__for_each_entry(evsel_list, counter) {
1518 if (is_duration_time(counter))
1519 continue;
1471 if (first) { 1520 if (first) {
1472 aggr_printout(counter, cpu, 0); 1521 aggr_printout(counter, cpu, 0);
1473 first = false; 1522 first = false;
@@ -1522,6 +1571,8 @@ static void print_metric_headers(const char *prefix, bool no_indent)
1522 1571
1523 /* Print metrics headers only */ 1572 /* Print metrics headers only */
1524 evlist__for_each_entry(evsel_list, counter) { 1573 evlist__for_each_entry(evsel_list, counter) {
1574 if (is_duration_time(counter))
1575 continue;
1525 os.evsel = counter; 1576 os.evsel = counter;
1526 out.ctx = &os; 1577 out.ctx = &os;
1527 out.print_metric = print_metric_header; 1578 out.print_metric = print_metric_header;
@@ -1530,7 +1581,8 @@ static void print_metric_headers(const char *prefix, bool no_indent)
1530 os.evsel = counter; 1581 os.evsel = counter;
1531 perf_stat__print_shadow_stats(counter, 0, 1582 perf_stat__print_shadow_stats(counter, 0,
1532 0, 1583 0,
1533 &out); 1584 &out,
1585 &metric_events);
1534 } 1586 }
1535 fputc('\n', stat_config.output); 1587 fputc('\n', stat_config.output);
1536} 1588}
@@ -1643,7 +1695,7 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
1643 char buf[64], *prefix = NULL; 1695 char buf[64], *prefix = NULL;
1644 1696
1645 /* Do not print anything if we record to the pipe. */ 1697 /* Do not print anything if we record to the pipe. */
1646 if (STAT_RECORD && perf_stat.file.is_pipe) 1698 if (STAT_RECORD && perf_stat.data.is_pipe)
1647 return; 1699 return;
1648 1700
1649 if (interval) 1701 if (interval)
@@ -1668,12 +1720,18 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
1668 print_aggr(prefix); 1720 print_aggr(prefix);
1669 break; 1721 break;
1670 case AGGR_THREAD: 1722 case AGGR_THREAD:
1671 evlist__for_each_entry(evsel_list, counter) 1723 evlist__for_each_entry(evsel_list, counter) {
1724 if (is_duration_time(counter))
1725 continue;
1672 print_aggr_thread(counter, prefix); 1726 print_aggr_thread(counter, prefix);
1727 }
1673 break; 1728 break;
1674 case AGGR_GLOBAL: 1729 case AGGR_GLOBAL:
1675 evlist__for_each_entry(evsel_list, counter) 1730 evlist__for_each_entry(evsel_list, counter) {
1731 if (is_duration_time(counter))
1732 continue;
1676 print_counter_aggr(counter, prefix); 1733 print_counter_aggr(counter, prefix);
1734 }
1677 if (metric_only) 1735 if (metric_only)
1678 fputc('\n', stat_config.output); 1736 fputc('\n', stat_config.output);
1679 break; 1737 break;
@@ -1681,8 +1739,11 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
1681 if (metric_only) 1739 if (metric_only)
1682 print_no_aggr_metric(prefix); 1740 print_no_aggr_metric(prefix);
1683 else { 1741 else {
1684 evlist__for_each_entry(evsel_list, counter) 1742 evlist__for_each_entry(evsel_list, counter) {
1743 if (is_duration_time(counter))
1744 continue;
1685 print_counter(counter, prefix); 1745 print_counter(counter, prefix);
1746 }
1686 } 1747 }
1687 break; 1748 break;
1688 case AGGR_UNSET: 1749 case AGGR_UNSET:
@@ -1754,6 +1815,13 @@ static int enable_metric_only(const struct option *opt __maybe_unused,
1754 return 0; 1815 return 0;
1755} 1816}
1756 1817
1818static int parse_metric_groups(const struct option *opt,
1819 const char *str,
1820 int unset __maybe_unused)
1821{
1822 return metricgroup__parse_groups(opt, str, &metric_events);
1823}
1824
1757static const struct option stat_options[] = { 1825static const struct option stat_options[] = {
1758 OPT_BOOLEAN('T', "transaction", &transaction_run, 1826 OPT_BOOLEAN('T', "transaction", &transaction_run,
1759 "hardware transaction statistics"), 1827 "hardware transaction statistics"),
@@ -1819,6 +1887,9 @@ static const struct option stat_options[] = {
1819 "measure topdown level 1 statistics"), 1887 "measure topdown level 1 statistics"),
1820 OPT_BOOLEAN(0, "smi-cost", &smi_cost, 1888 OPT_BOOLEAN(0, "smi-cost", &smi_cost,
1821 "measure SMI cost"), 1889 "measure SMI cost"),
1890 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1891 "monitor specified metrics or metric groups (separated by ,)",
1892 parse_metric_groups),
1822 OPT_END() 1893 OPT_END()
1823}; 1894};
1824 1895
@@ -2334,20 +2405,20 @@ static void init_features(struct perf_session *session)
2334static int __cmd_record(int argc, const char **argv) 2405static int __cmd_record(int argc, const char **argv)
2335{ 2406{
2336 struct perf_session *session; 2407 struct perf_session *session;
2337 struct perf_data_file *file = &perf_stat.file; 2408 struct perf_data *data = &perf_stat.data;
2338 2409
2339 argc = parse_options(argc, argv, stat_options, stat_record_usage, 2410 argc = parse_options(argc, argv, stat_options, stat_record_usage,
2340 PARSE_OPT_STOP_AT_NON_OPTION); 2411 PARSE_OPT_STOP_AT_NON_OPTION);
2341 2412
2342 if (output_name) 2413 if (output_name)
2343 file->path = output_name; 2414 data->file.path = output_name;
2344 2415
2345 if (run_count != 1 || forever) { 2416 if (run_count != 1 || forever) {
2346 pr_err("Cannot use -r option with perf stat record.\n"); 2417 pr_err("Cannot use -r option with perf stat record.\n");
2347 return -1; 2418 return -1;
2348 } 2419 }
2349 2420
2350 session = perf_session__new(file, false, NULL); 2421 session = perf_session__new(data, false, NULL);
2351 if (session == NULL) { 2422 if (session == NULL) {
2352 pr_err("Perf session creation failed.\n"); 2423 pr_err("Perf session creation failed.\n");
2353 return -1; 2424 return -1;
@@ -2405,7 +2476,7 @@ int process_stat_config_event(struct perf_tool *tool,
2405 if (st->aggr_mode != AGGR_UNSET) 2476 if (st->aggr_mode != AGGR_UNSET)
2406 stat_config.aggr_mode = st->aggr_mode; 2477 stat_config.aggr_mode = st->aggr_mode;
2407 2478
2408 if (perf_stat.file.is_pipe) 2479 if (perf_stat.data.is_pipe)
2409 perf_stat_init_aggr_mode(); 2480 perf_stat_init_aggr_mode();
2410 else 2481 else
2411 perf_stat_init_aggr_mode_file(st); 2482 perf_stat_init_aggr_mode_file(st);
@@ -2513,10 +2584,10 @@ static int __cmd_report(int argc, const char **argv)
2513 input_name = "perf.data"; 2584 input_name = "perf.data";
2514 } 2585 }
2515 2586
2516 perf_stat.file.path = input_name; 2587 perf_stat.data.file.path = input_name;
2517 perf_stat.file.mode = PERF_DATA_MODE_READ; 2588 perf_stat.data.mode = PERF_DATA_MODE_READ;
2518 2589
2519 session = perf_session__new(&perf_stat.file, false, &perf_stat.tool); 2590 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
2520 if (session == NULL) 2591 if (session == NULL)
2521 return -1; 2592 return -1;
2522 2593
@@ -2787,7 +2858,7 @@ int cmd_stat(int argc, const char **argv)
2787 * records, but the need to suppress the kptr_restrict messages in older 2858 * records, but the need to suppress the kptr_restrict messages in older
2788 * tools remain -acme 2859 * tools remain -acme
2789 */ 2860 */
2790 int fd = perf_data_file__fd(&perf_stat.file); 2861 int fd = perf_data__fd(&perf_stat.data);
2791 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, 2862 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
2792 process_synthesized_event, 2863 process_synthesized_event,
2793 &perf_stat.session->machines.host); 2864 &perf_stat.session->machines.host);
@@ -2801,7 +2872,7 @@ int cmd_stat(int argc, const char **argv)
2801 pr_err("failed to write stat round event\n"); 2872 pr_err("failed to write stat round event\n");
2802 } 2873 }
2803 2874
2804 if (!perf_stat.file.is_pipe) { 2875 if (!perf_stat.data.is_pipe) {
2805 perf_stat.session->header.data_size += perf_stat.bytes_written; 2876 perf_stat.session->header.data_size += perf_stat.bytes_written;
2806 perf_session__write_header(perf_stat.session, evsel_list, fd, true); 2877 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2807 } 2878 }
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 4e2e61695986..813698a9b8c7 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1601,13 +1601,15 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1601 { "syscalls:sys_exit_pselect6", process_exit_poll }, 1601 { "syscalls:sys_exit_pselect6", process_exit_poll },
1602 { "syscalls:sys_exit_select", process_exit_poll }, 1602 { "syscalls:sys_exit_select", process_exit_poll },
1603 }; 1603 };
1604 struct perf_data_file file = { 1604 struct perf_data data = {
1605 .path = input_name, 1605 .file = {
1606 .mode = PERF_DATA_MODE_READ, 1606 .path = input_name,
1607 .force = tchart->force, 1607 },
1608 .mode = PERF_DATA_MODE_READ,
1609 .force = tchart->force,
1608 }; 1610 };
1609 1611
1610 struct perf_session *session = perf_session__new(&file, false, 1612 struct perf_session *session = perf_session__new(&data, false,
1611 &tchart->tool); 1613 &tchart->tool);
1612 int ret = -EINVAL; 1614 int ret = -EINVAL;
1613 1615
@@ -1617,7 +1619,7 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1617 symbol__init(&session->header.env); 1619 symbol__init(&session->header.env);
1618 1620
1619 (void)perf_header__process_sections(&session->header, 1621 (void)perf_header__process_sections(&session->header,
1620 perf_data_file__fd(session->file), 1622 perf_data__fd(session->data),
1621 tchart, 1623 tchart,
1622 process_header); 1624 process_header);
1623 1625
@@ -1732,8 +1734,10 @@ static int timechart__io_record(int argc, const char **argv)
1732 if (rec_argv == NULL) 1734 if (rec_argv == NULL)
1733 return -ENOMEM; 1735 return -ENOMEM;
1734 1736
1735 if (asprintf(&filter, "common_pid != %d", getpid()) < 0) 1737 if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
1738 free(rec_argv);
1736 return -ENOMEM; 1739 return -ENOMEM;
1740 }
1737 1741
1738 p = rec_argv; 1742 p = rec_argv;
1739 for (i = 0; i < common_args_nr; i++) 1743 for (i = 0; i < common_args_nr; i++)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ee954bde7e3e..9e0d2645ae13 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -77,6 +77,7 @@
77#include "sane_ctype.h" 77#include "sane_ctype.h"
78 78
79static volatile int done; 79static volatile int done;
80static volatile int resize;
80 81
81#define HEADER_LINE_NR 5 82#define HEADER_LINE_NR 5
82 83
@@ -85,11 +86,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
85 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR; 86 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
86} 87}
87 88
88static void perf_top__sig_winch(int sig __maybe_unused, 89static void winch_sig(int sig __maybe_unused)
89 siginfo_t *info __maybe_unused, void *arg)
90{ 90{
91 struct perf_top *top = arg; 91 resize = 1;
92}
92 93
94static void perf_top__resize(struct perf_top *top)
95{
93 get_term_dimensions(&top->winsize); 96 get_term_dimensions(&top->winsize);
94 perf_top__update_print_entries(top); 97 perf_top__update_print_entries(top);
95} 98}
@@ -473,12 +476,8 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
473 case 'e': 476 case 'e':
474 prompt_integer(&top->print_entries, "Enter display entries (lines)"); 477 prompt_integer(&top->print_entries, "Enter display entries (lines)");
475 if (top->print_entries == 0) { 478 if (top->print_entries == 0) {
476 struct sigaction act = { 479 perf_top__resize(top);
477 .sa_sigaction = perf_top__sig_winch, 480 signal(SIGWINCH, winch_sig);
478 .sa_flags = SA_SIGINFO,
479 };
480 perf_top__sig_winch(SIGWINCH, NULL, top);
481 sigaction(SIGWINCH, &act, NULL);
482 } else { 481 } else {
483 signal(SIGWINCH, SIG_DFL); 482 signal(SIGWINCH, SIG_DFL);
484 } 483 }
@@ -732,14 +731,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
732 if (!machine->kptr_restrict_warned && 731 if (!machine->kptr_restrict_warned &&
733 symbol_conf.kptr_restrict && 732 symbol_conf.kptr_restrict &&
734 al.cpumode == PERF_RECORD_MISC_KERNEL) { 733 al.cpumode == PERF_RECORD_MISC_KERNEL) {
735 ui__warning( 734 if (!perf_evlist__exclude_kernel(top->session->evlist)) {
735 ui__warning(
736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 736"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
737"Check /proc/sys/kernel/kptr_restrict.\n\n" 737"Check /proc/sys/kernel/kptr_restrict.\n\n"
738"Kernel%s samples will not be resolved.\n", 738"Kernel%s samples will not be resolved.\n",
739 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? 739 al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
740 " modules" : ""); 740 " modules" : "");
741 if (use_browser <= 0) 741 if (use_browser <= 0)
742 sleep(5); 742 sleep(5);
743 }
743 machine->kptr_restrict_warned = true; 744 machine->kptr_restrict_warned = true;
744 } 745 }
745 746
@@ -958,8 +959,16 @@ static int __cmd_top(struct perf_top *top)
958 if (perf_session__register_idle_thread(top->session) < 0) 959 if (perf_session__register_idle_thread(top->session) < 0)
959 goto out_delete; 960 goto out_delete;
960 961
962 if (top->nr_threads_synthesize > 1)
963 perf_set_multithreaded();
964
961 machine__synthesize_threads(&top->session->machines.host, &opts->target, 965 machine__synthesize_threads(&top->session->machines.host, &opts->target,
962 top->evlist->threads, false, opts->proc_map_timeout); 966 top->evlist->threads, false,
967 opts->proc_map_timeout,
968 top->nr_threads_synthesize);
969
970 if (top->nr_threads_synthesize > 1)
971 perf_set_singlethreaded();
963 972
964 if (perf_hpp_list.socket) { 973 if (perf_hpp_list.socket) {
965 ret = perf_env__read_cpu_topology_map(&perf_env); 974 ret = perf_env__read_cpu_topology_map(&perf_env);
@@ -1022,6 +1031,11 @@ static int __cmd_top(struct perf_top *top)
1022 1031
1023 if (hits == top->samples) 1032 if (hits == top->samples)
1024 ret = perf_evlist__poll(top->evlist, 100); 1033 ret = perf_evlist__poll(top->evlist, 100);
1034
1035 if (resize) {
1036 perf_top__resize(top);
1037 resize = 0;
1038 }
1025 } 1039 }
1026 1040
1027 ret = 0; 1041 ret = 0;
@@ -1112,6 +1126,7 @@ int cmd_top(int argc, const char **argv)
1112 }, 1126 },
1113 .max_stack = sysctl_perf_event_max_stack, 1127 .max_stack = sysctl_perf_event_max_stack,
1114 .sym_pcnt_filter = 5, 1128 .sym_pcnt_filter = 5,
1129 .nr_threads_synthesize = UINT_MAX,
1115 }; 1130 };
1116 struct record_opts *opts = &top.record_opts; 1131 struct record_opts *opts = &top.record_opts;
1117 struct target *target = &opts->target; 1132 struct target *target = &opts->target;
@@ -1221,6 +1236,8 @@ int cmd_top(int argc, const char **argv)
1221 OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy, 1236 OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1222 "Show entries in a hierarchy"), 1237 "Show entries in a hierarchy"),
1223 OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"), 1238 OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
1239 OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1240 "number of thread to run event synthesize"),
1224 OPT_END() 1241 OPT_END()
1225 }; 1242 };
1226 const char * const top_usage[] = { 1243 const char * const top_usage[] = {
@@ -1341,12 +1358,8 @@ int cmd_top(int argc, const char **argv)
1341 1358
1342 get_term_dimensions(&top.winsize); 1359 get_term_dimensions(&top.winsize);
1343 if (top.print_entries == 0) { 1360 if (top.print_entries == 0) {
1344 struct sigaction act = {
1345 .sa_sigaction = perf_top__sig_winch,
1346 .sa_flags = SA_SIGINFO,
1347 };
1348 perf_top__update_print_entries(&top); 1361 perf_top__update_print_entries(&top);
1349 sigaction(SIGWINCH, &act, NULL); 1362 signal(SIGWINCH, winch_sig);
1350 } 1363 }
1351 1364
1352 status = __cmd_top(&top); 1365 status = __cmd_top(&top);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index d5d7fff1c211..84debdbad327 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -578,7 +578,6 @@ static struct syscall_fmt {
578} syscall_fmts[] = { 578} syscall_fmts[] = {
579 { .name = "access", 579 { .name = "access",
580 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 580 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
581 { .name = "arch_prctl", .alias = "prctl", },
582 { .name = "bpf", 581 { .name = "bpf",
583 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, }, 582 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
584 { .name = "brk", .hexret = true, 583 { .name = "brk", .hexret = true,
@@ -634,6 +633,12 @@ static struct syscall_fmt {
634#else 633#else
635 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 634 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
636#endif 635#endif
636 { .name = "kcmp", .nr_args = 5,
637 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
638 [1] = { .name = "pid2", .scnprintf = SCA_PID, },
639 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
640 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
641 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
637 { .name = "keyctl", 642 { .name = "keyctl",
638 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 643 .arg = { [0] = STRARRAY(option, keyctl_options), }, },
639 { .name = "kill", 644 { .name = "kill",
@@ -703,6 +708,10 @@ static struct syscall_fmt {
703 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 708 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
704 { .name = "poll", .timeout = true, }, 709 { .name = "poll", .timeout = true, },
705 { .name = "ppoll", .timeout = true, }, 710 { .name = "ppoll", .timeout = true, },
711 { .name = "prctl", .alias = "arch_prctl",
712 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
713 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
714 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
706 { .name = "pread", .alias = "pread64", }, 715 { .name = "pread", .alias = "pread64", },
707 { .name = "preadv", .alias = "pread", }, 716 { .name = "preadv", .alias = "pread", },
708 { .name = "prlimit64", 717 { .name = "prlimit64",
@@ -985,6 +994,23 @@ size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
985 return printed; 994 return printed;
986} 995}
987 996
997size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
998{
999 size_t printed = scnprintf(bf, size, "%d", fd);
1000 struct thread *thread = machine__find_thread(trace->host, pid, pid);
1001
1002 if (thread) {
1003 const char *path = thread__fd_path(thread, fd, trace);
1004
1005 if (path)
1006 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
1007
1008 thread__put(thread);
1009 }
1010
1011 return printed;
1012}
1013
988static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1014static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
989 struct syscall_arg *arg) 1015 struct syscall_arg *arg)
990{ 1016{
@@ -1126,12 +1152,14 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1126 if (trace->host == NULL) 1152 if (trace->host == NULL)
1127 return -ENOMEM; 1153 return -ENOMEM;
1128 1154
1129 if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0) 1155 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr);
1130 return -errno; 1156 if (err < 0)
1157 goto out;
1131 1158
1132 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1159 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1133 evlist->threads, trace__tool_process, false, 1160 evlist->threads, trace__tool_process, false,
1134 trace->opts.proc_map_timeout); 1161 trace->opts.proc_map_timeout, 1);
1162out:
1135 if (err) 1163 if (err)
1136 symbol__exit(); 1164 symbol__exit();
1137 1165
@@ -1836,16 +1864,14 @@ out_dump:
1836 goto out_put; 1864 goto out_put;
1837} 1865}
1838 1866
1839static void bpf_output__printer(enum binary_printer_ops op, 1867static int bpf_output__printer(enum binary_printer_ops op,
1840 unsigned int val, void *extra) 1868 unsigned int val, void *extra __maybe_unused, FILE *fp)
1841{ 1869{
1842 FILE *output = extra;
1843 unsigned char ch = (unsigned char)val; 1870 unsigned char ch = (unsigned char)val;
1844 1871
1845 switch (op) { 1872 switch (op) {
1846 case BINARY_PRINT_CHAR_DATA: 1873 case BINARY_PRINT_CHAR_DATA:
1847 fprintf(output, "%c", isprint(ch) ? ch : '.'); 1874 return fprintf(fp, "%c", isprint(ch) ? ch : '.');
1848 break;
1849 case BINARY_PRINT_DATA_BEGIN: 1875 case BINARY_PRINT_DATA_BEGIN:
1850 case BINARY_PRINT_LINE_BEGIN: 1876 case BINARY_PRINT_LINE_BEGIN:
1851 case BINARY_PRINT_ADDR: 1877 case BINARY_PRINT_ADDR:
@@ -1858,13 +1884,15 @@ static void bpf_output__printer(enum binary_printer_ops op,
1858 default: 1884 default:
1859 break; 1885 break;
1860 } 1886 }
1887
1888 return 0;
1861} 1889}
1862 1890
1863static void bpf_output__fprintf(struct trace *trace, 1891static void bpf_output__fprintf(struct trace *trace,
1864 struct perf_sample *sample) 1892 struct perf_sample *sample)
1865{ 1893{
1866 print_binary(sample->raw_data, sample->raw_size, 8, 1894 binary__fprintf(sample->raw_data, sample->raw_size, 8,
1867 bpf_output__printer, trace->output); 1895 bpf_output__printer, NULL, trace->output);
1868} 1896}
1869 1897
1870static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel, 1898static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
@@ -2086,6 +2114,7 @@ static int trace__record(struct trace *trace, int argc, const char **argv)
2086 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 2114 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
2087 else { 2115 else {
2088 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 2116 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
2117 free(rec_argv);
2089 return -1; 2118 return -1;
2090 } 2119 }
2091 } 2120 }
@@ -2538,10 +2567,12 @@ static int trace__replay(struct trace *trace)
2538 const struct perf_evsel_str_handler handlers[] = { 2567 const struct perf_evsel_str_handler handlers[] = {
2539 { "probe:vfs_getname", trace__vfs_getname, }, 2568 { "probe:vfs_getname", trace__vfs_getname, },
2540 }; 2569 };
2541 struct perf_data_file file = { 2570 struct perf_data data = {
2542 .path = input_name, 2571 .file = {
2543 .mode = PERF_DATA_MODE_READ, 2572 .path = input_name,
2544 .force = trace->force, 2573 },
2574 .mode = PERF_DATA_MODE_READ,
2575 .force = trace->force,
2545 }; 2576 };
2546 struct perf_session *session; 2577 struct perf_session *session;
2547 struct perf_evsel *evsel; 2578 struct perf_evsel *evsel;
@@ -2564,7 +2595,7 @@ static int trace__replay(struct trace *trace)
2564 /* add tid to output */ 2595 /* add tid to output */
2565 trace->multiple_threads = true; 2596 trace->multiple_threads = true;
2566 2597
2567 session = perf_session__new(&file, false, &trace->tool); 2598 session = perf_session__new(&data, false, &trace->tool);
2568 if (session == NULL) 2599 if (session == NULL)
2569 return -1; 2600 return -1;
2570 2601
@@ -2740,20 +2771,23 @@ DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_event
2740 2771
2741static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 2772static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2742{ 2773{
2743 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
2744 size_t printed = trace__fprintf_threads_header(fp); 2774 size_t printed = trace__fprintf_threads_header(fp);
2745 struct rb_node *nd; 2775 struct rb_node *nd;
2776 int i;
2746 2777
2747 if (threads == NULL) { 2778 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2748 fprintf(fp, "%s", "Error sorting output by nr_events!\n"); 2779 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
2749 return 0;
2750 }
2751 2780
2752 resort_rb__for_each_entry(nd, threads) 2781 if (threads == NULL) {
2753 printed += trace__fprintf_thread(fp, threads_entry->thread, trace); 2782 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
2783 return 0;
2784 }
2754 2785
2755 resort_rb__delete(threads); 2786 resort_rb__for_each_entry(nd, threads)
2787 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
2756 2788
2789 resort_rb__delete(threads);
2790 }
2757 return printed; 2791 return printed;
2758} 2792}
2759 2793
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 50cd6228f506..3e64f10b6d66 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -5,8 +5,10 @@ HEADERS='
5include/uapi/drm/drm.h 5include/uapi/drm/drm.h
6include/uapi/drm/i915_drm.h 6include/uapi/drm/i915_drm.h
7include/uapi/linux/fcntl.h 7include/uapi/linux/fcntl.h
8include/uapi/linux/kcmp.h
8include/uapi/linux/kvm.h 9include/uapi/linux/kvm.h
9include/uapi/linux/perf_event.h 10include/uapi/linux/perf_event.h
11include/uapi/linux/prctl.h
10include/uapi/linux/sched.h 12include/uapi/linux/sched.h
11include/uapi/linux/stat.h 13include/uapi/linux/stat.h
12include/uapi/linux/vhost.h 14include/uapi/linux/vhost.h
@@ -19,6 +21,7 @@ arch/x86/include/asm/cpufeatures.h
19arch/arm/include/uapi/asm/perf_regs.h 21arch/arm/include/uapi/asm/perf_regs.h
20arch/arm64/include/uapi/asm/perf_regs.h 22arch/arm64/include/uapi/asm/perf_regs.h
21arch/powerpc/include/uapi/asm/perf_regs.h 23arch/powerpc/include/uapi/asm/perf_regs.h
24arch/s390/include/uapi/asm/perf_regs.h
22arch/x86/include/uapi/asm/perf_regs.h 25arch/x86/include/uapi/asm/perf_regs.h
23arch/x86/include/uapi/asm/kvm.h 26arch/x86/include/uapi/asm/kvm.h
24arch/x86/include/uapi/asm/kvm_perf.h 27arch/x86/include/uapi/asm/kvm_perf.h
@@ -28,6 +31,7 @@ arch/x86/include/uapi/asm/vmx.h
28arch/powerpc/include/uapi/asm/kvm.h 31arch/powerpc/include/uapi/asm/kvm.h
29arch/s390/include/uapi/asm/kvm.h 32arch/s390/include/uapi/asm/kvm.h
30arch/s390/include/uapi/asm/kvm_perf.h 33arch/s390/include/uapi/asm/kvm_perf.h
34arch/s390/include/uapi/asm/ptrace.h
31arch/s390/include/uapi/asm/sie.h 35arch/s390/include/uapi/asm/sie.h
32arch/arm/include/uapi/asm/kvm.h 36arch/arm/include/uapi/asm/kvm.h
33arch/arm64/include/uapi/asm/kvm.h 37arch/arm64/include/uapi/asm/kvm.h
@@ -58,6 +62,11 @@ check () {
58} 62}
59 63
60 64
65# Check if we have the kernel headers (tools/perf/../../include), else
66# we're probably on a detached tarball, so no point in trying to check
67# differences.
68test -d ../../include || exit 0
69
61# simple diff check 70# simple diff check
62for i in $HEADERS; do 71for i in $HEADERS; do
63 check $i -B 72 check $i -B
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index cf36de7ea255..0c6d1002b524 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -384,13 +384,13 @@ jvmti_write_code(void *agent, char const *sym,
384} 384}
385 385
386int 386int
387jvmti_write_debug_info(void *agent, uint64_t code, const char *file, 387jvmti_write_debug_info(void *agent, uint64_t code,
388 jvmti_line_info_t *li, int nr_lines) 388 int nr_lines, jvmti_line_info_t *li,
389 const char * const * file_names)
389{ 390{
390 struct jr_code_debug_info rec; 391 struct jr_code_debug_info rec;
391 size_t sret, len, size, flen; 392 size_t sret, len, size, flen = 0;
392 uint64_t addr; 393 uint64_t addr;
393 const char *fn = file;
394 FILE *fp = agent; 394 FILE *fp = agent;
395 int i; 395 int i;
396 396
@@ -405,7 +405,9 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
405 return -1; 405 return -1;
406 } 406 }
407 407
408 flen = strlen(file) + 1; 408 for (i = 0; i < nr_lines; ++i) {
409 flen += strlen(file_names[i]) + 1;
410 }
409 411
410 rec.p.id = JIT_CODE_DEBUG_INFO; 412 rec.p.id = JIT_CODE_DEBUG_INFO;
411 size = sizeof(rec); 413 size = sizeof(rec);
@@ -421,7 +423,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
421 * file[] : source file name 423 * file[] : source file name
422 */ 424 */
423 size += nr_lines * sizeof(struct debug_entry); 425 size += nr_lines * sizeof(struct debug_entry);
424 size += flen * nr_lines; 426 size += flen;
425 rec.p.total_size = size; 427 rec.p.total_size = size;
426 428
427 /* 429 /*
@@ -452,7 +454,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
452 if (sret != 1) 454 if (sret != 1)
453 goto error; 455 goto error;
454 456
455 sret = fwrite_unlocked(fn, flen, 1, fp); 457 sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp);
456 if (sret != 1) 458 if (sret != 1)
457 goto error; 459 goto error;
458 } 460 }
diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h
index fe32d8344a82..6ed82f6c06dd 100644
--- a/tools/perf/jvmti/jvmti_agent.h
+++ b/tools/perf/jvmti/jvmti_agent.h
@@ -14,6 +14,7 @@ typedef struct {
14 unsigned long pc; 14 unsigned long pc;
15 int line_number; 15 int line_number;
16 int discrim; /* discriminator -- 0 for now */ 16 int discrim; /* discriminator -- 0 for now */
17 jmethodID methodID;
17} jvmti_line_info_t; 18} jvmti_line_info_t;
18 19
19void *jvmti_open(void); 20void *jvmti_open(void);
@@ -22,11 +23,9 @@ int jvmti_write_code(void *agent, char const *symbol_name,
22 uint64_t vma, void const *code, 23 uint64_t vma, void const *code,
23 const unsigned int code_size); 24 const unsigned int code_size);
24 25
25int jvmti_write_debug_info(void *agent, 26int jvmti_write_debug_info(void *agent, uint64_t code, int nr_lines,
26 uint64_t code,
27 const char *file,
28 jvmti_line_info_t *li, 27 jvmti_line_info_t *li,
29 int nr_lines); 28 const char * const * file_names);
30 29
31#if defined(__cplusplus) 30#if defined(__cplusplus)
32} 31}
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index c62c9fc9a525..6add3e982614 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -47,6 +47,7 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
47 tab[lines].pc = (unsigned long)pc; 47 tab[lines].pc = (unsigned long)pc;
48 tab[lines].line_number = loc_tab[i].line_number; 48 tab[lines].line_number = loc_tab[i].line_number;
49 tab[lines].discrim = 0; /* not yet used */ 49 tab[lines].discrim = 0; /* not yet used */
50 tab[lines].methodID = m;
50 lines++; 51 lines++;
51 } else { 52 } else {
52 break; 53 break;
@@ -125,6 +126,99 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
125 return JVMTI_ERROR_NONE; 126 return JVMTI_ERROR_NONE;
126} 127}
127 128
129static void
130copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length)
131{
132 /*
133 * Assume path name is class hierarchy, this is a common practice with Java programs
134 */
135 if (*class_sign == 'L') {
136 int j, i = 0;
137 char *p = strrchr(class_sign, '/');
138 if (p) {
139 /* drop the 'L' prefix and copy up to the final '/' */
140 for (i = 0; i < (p - class_sign); i++)
141 result[i] = class_sign[i+1];
142 }
143 /*
144 * append file name, we use loops and not string ops to avoid modifying
145 * class_sign which is used later for the symbol name
146 */
147 for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++)
148 result[i] = file_name[j];
149
150 result[i] = '\0';
151 } else {
152 /* fallback case */
153 size_t file_name_len = strlen(file_name);
154 strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
155 }
156}
157
158static jvmtiError
159get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer)
160{
161 jvmtiError ret;
162 jclass decl_class;
163 char *file_name = NULL;
164 char *class_sign = NULL;
165 char fn[PATH_MAX];
166 size_t len;
167
168 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class);
169 if (ret != JVMTI_ERROR_NONE) {
170 print_error(jvmti, "GetMethodDeclaringClass", ret);
171 return ret;
172 }
173
174 ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
175 if (ret != JVMTI_ERROR_NONE) {
176 print_error(jvmti, "GetSourceFileName", ret);
177 return ret;
178 }
179
180 ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL);
181 if (ret != JVMTI_ERROR_NONE) {
182 print_error(jvmti, "GetClassSignature", ret);
183 goto free_file_name_error;
184 }
185
186 copy_class_filename(class_sign, file_name, fn, PATH_MAX);
187 len = strlen(fn);
188 *buffer = malloc((len + 1) * sizeof(char));
189 if (!*buffer) {
190 print_error(jvmti, "GetClassSignature", ret);
191 ret = JVMTI_ERROR_OUT_OF_MEMORY;
192 goto free_class_sign_error;
193 }
194 strcpy(*buffer, fn);
195 ret = JVMTI_ERROR_NONE;
196
197free_class_sign_error:
198 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
199free_file_name_error:
200 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
201
202 return ret;
203}
204
205static jvmtiError
206fill_source_filenames(jvmtiEnv *jvmti, int nr_lines,
207 const jvmti_line_info_t * line_tab,
208 char ** file_names)
209{
210 int index;
211 jvmtiError ret;
212
213 for (index = 0; index < nr_lines; ++index) {
214 ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index]));
215 if (ret != JVMTI_ERROR_NONE)
216 return ret;
217 }
218
219 return JVMTI_ERROR_NONE;
220}
221
128static void JNICALL 222static void JNICALL
129compiled_method_load_cb(jvmtiEnv *jvmti, 223compiled_method_load_cb(jvmtiEnv *jvmti,
130 jmethodID method, 224 jmethodID method,
@@ -135,16 +229,18 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
135 const void *compile_info) 229 const void *compile_info)
136{ 230{
137 jvmti_line_info_t *line_tab = NULL; 231 jvmti_line_info_t *line_tab = NULL;
232 char ** line_file_names = NULL;
138 jclass decl_class; 233 jclass decl_class;
139 char *class_sign = NULL; 234 char *class_sign = NULL;
140 char *func_name = NULL; 235 char *func_name = NULL;
141 char *func_sign = NULL; 236 char *func_sign = NULL;
142 char *file_name= NULL; 237 char *file_name = NULL;
143 char fn[PATH_MAX]; 238 char fn[PATH_MAX];
144 uint64_t addr = (uint64_t)(uintptr_t)code_addr; 239 uint64_t addr = (uint64_t)(uintptr_t)code_addr;
145 jvmtiError ret; 240 jvmtiError ret;
146 int nr_lines = 0; /* in line_tab[] */ 241 int nr_lines = 0; /* in line_tab[] */
147 size_t len; 242 size_t len;
243 int output_debug_info = 0;
148 244
149 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method, 245 ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
150 &decl_class); 246 &decl_class);
@@ -158,6 +254,19 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
158 if (ret != JVMTI_ERROR_NONE) { 254 if (ret != JVMTI_ERROR_NONE) {
159 warnx("jvmti: cannot get line table for method"); 255 warnx("jvmti: cannot get line table for method");
160 nr_lines = 0; 256 nr_lines = 0;
257 } else if (nr_lines > 0) {
258 line_file_names = malloc(sizeof(char*) * nr_lines);
259 if (!line_file_names) {
260 warnx("jvmti: cannot allocate space for line table method names");
261 } else {
262 memset(line_file_names, 0, sizeof(char*) * nr_lines);
263 ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names);
264 if (ret != JVMTI_ERROR_NONE) {
265 warnx("jvmti: fill_source_filenames failed");
266 } else {
267 output_debug_info = 1;
268 }
269 }
161 } 270 }
162 } 271 }
163 272
@@ -181,33 +290,14 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
181 goto error; 290 goto error;
182 } 291 }
183 292
184 /* 293 copy_class_filename(class_sign, file_name, fn, PATH_MAX);
185 * Assume path name is class hierarchy, this is a common practice with Java programs 294
186 */
187 if (*class_sign == 'L') {
188 int j, i = 0;
189 char *p = strrchr(class_sign, '/');
190 if (p) {
191 /* drop the 'L' prefix and copy up to the final '/' */
192 for (i = 0; i < (p - class_sign); i++)
193 fn[i] = class_sign[i+1];
194 }
195 /*
196 * append file name, we use loops and not string ops to avoid modifying
197 * class_sign which is used later for the symbol name
198 */
199 for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++)
200 fn[i] = file_name[j];
201 fn[i] = '\0';
202 } else {
203 /* fallback case */
204 strcpy(fn, file_name);
205 }
206 /* 295 /*
207 * write source line info record if we have it 296 * write source line info record if we have it
208 */ 297 */
209 if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines)) 298 if (output_debug_info)
210 warnx("jvmti: write_debug_info() failed"); 299 if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names))
300 warnx("jvmti: write_debug_info() failed");
211 301
212 len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2; 302 len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
213 { 303 {
@@ -223,6 +313,13 @@ error:
223 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign); 313 (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
224 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name); 314 (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
225 free(line_tab); 315 free(line_tab);
316 while (line_file_names && (nr_lines > 0)) {
317 if (line_file_names[nr_lines - 1]) {
318 free(line_file_names[nr_lines - 1]);
319 }
320 nr_lines -= 1;
321 }
322 free(line_file_names);
226} 323}
227 324
228static void JNICALL 325static void JNICALL
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index f75f3dec7485..2357f4ccc9c7 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -66,6 +66,7 @@ struct record_opts {
66 unsigned int user_freq; 66 unsigned int user_freq;
67 u64 branch_stack; 67 u64 branch_stack;
68 u64 sample_intr_regs; 68 u64 sample_intr_regs;
69 u64 sample_user_regs;
69 u64 default_interval; 70 u64 default_interval;
70 u64 user_interval; 71 u64 user_interval;
71 size_t auxtrace_snapshot_size; 72 size_t auxtrace_snapshot_size;
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
new file mode 100644
index 000000000000..00bfdb5c5acb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -0,0 +1,164 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Giga Floating Point Operations Per Second",
100 "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
101 "MetricGroup": "FLOPS;Summary",
102 "MetricName": "GFLOPs"
103 },
104 {
105 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
106 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
107 "MetricGroup": "Power",
108 "MetricName": "Turbo_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles where both hardware threads were active",
112 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
113 "MetricGroup": "SMT;Summary",
114 "MetricName": "SMT_2T_Utilization"
115 },
116 {
117 "BriefDescription": "Fraction of cycles spent in Kernel mode",
118 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
119 "MetricGroup": "Summary",
120 "MetricName": "Kernel_Utilization"
121 },
122 {
123 "BriefDescription": "C3 residency percent per core",
124 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Core_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per core",
130 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Core_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per core",
136 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Core_Residency"
139 },
140 {
141 "BriefDescription": "C2 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C2_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C3 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C3_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C6 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C6_Pkg_Residency"
157 },
158 {
159 "BriefDescription": "C7 residency percent per package",
160 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
161 "MetricGroup": "Power",
162 "MetricName": "C7_Pkg_Residency"
163 }
164]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
new file mode 100644
index 000000000000..49c5f123d811
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
@@ -0,0 +1,164 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / INST_RETIRED.ANY / cycles",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED ) ) / RS_EVENTS.EMPTY_END",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Giga Floating Point Operations Per Second",
100 "MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time",
101 "MetricGroup": "FLOPS;Summary",
102 "MetricName": "GFLOPs"
103 },
104 {
105 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
106 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
107 "MetricGroup": "Power",
108 "MetricName": "Turbo_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles where both hardware threads were active",
112 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
113 "MetricGroup": "SMT;Summary",
114 "MetricName": "SMT_2T_Utilization"
115 },
116 {
117 "BriefDescription": "Fraction of cycles spent in Kernel mode",
118 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
119 "MetricGroup": "Summary",
120 "MetricName": "Kernel_Utilization"
121 },
122 {
123 "BriefDescription": "C3 residency percent per core",
124 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Core_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per core",
130 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Core_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per core",
136 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Core_Residency"
139 },
140 {
141 "BriefDescription": "C2 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C2_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C3 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C3_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C6 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C6_Pkg_Residency"
157 },
158 {
159 "BriefDescription": "C7 residency percent per package",
160 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
161 "MetricGroup": "Power",
162 "MetricName": "C7_Pkg_Residency"
163 }
164]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
new file mode 100644
index 000000000000..5a7f1ec24200
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -0,0 +1,164 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles))",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Giga Floating Point Operations Per Second",
100 "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
101 "MetricGroup": "FLOPS;Summary",
102 "MetricName": "GFLOPs"
103 },
104 {
105 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
106 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
107 "MetricGroup": "Power",
108 "MetricName": "Turbo_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles where both hardware threads were active",
112 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
113 "MetricGroup": "SMT;Summary",
114 "MetricName": "SMT_2T_Utilization"
115 },
116 {
117 "BriefDescription": "Fraction of cycles spent in Kernel mode",
118 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
119 "MetricGroup": "Summary",
120 "MetricName": "Kernel_Utilization"
121 },
122 {
123 "BriefDescription": "C3 residency percent per core",
124 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Core_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per core",
130 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Core_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per core",
136 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Core_Residency"
139 },
140 {
141 "BriefDescription": "C2 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C2_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C3 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C3_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C6 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C6_Pkg_Residency"
157 },
158 {
159 "BriefDescription": "C7 residency percent per package",
160 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
161 "MetricGroup": "Power",
162 "MetricName": "C7_Pkg_Residency"
163 }
164]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
new file mode 100644
index 000000000000..b4791b443a66
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
@@ -0,0 +1,1453 @@
1[
2 {
3 "CollectPEBSRecord": "1",
4 "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
5 "EventCode": "0x2E",
6 "Counter": "0,1,2,3",
7 "UMask": "0x41",
8 "PEBScounters": "0,1,2,3",
9 "EventName": "LONGEST_LAT_CACHE.MISS",
10 "PDIR_COUNTER": "na",
11 "SampleAfterValue": "200003",
12 "BriefDescription": "L2 cache request misses"
13 },
14 {
15 "CollectPEBSRecord": "1",
16 "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
17 "EventCode": "0x2E",
18 "Counter": "0,1,2,3",
19 "UMask": "0x4f",
20 "PEBScounters": "0,1,2,3",
21 "EventName": "LONGEST_LAT_CACHE.REFERENCE",
22 "PDIR_COUNTER": "na",
23 "SampleAfterValue": "200003",
24 "BriefDescription": "L2 cache requests"
25 },
26 {
27 "CollectPEBSRecord": "1",
28 "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
29 "EventCode": "0x30",
30 "Counter": "0,1,2,3",
31 "UMask": "0x0",
32 "PEBScounters": "0,1,2,3",
33 "EventName": "L2_REJECT_XQ.ALL",
34 "PDIR_COUNTER": "na",
35 "SampleAfterValue": "200003",
36 "BriefDescription": "Requests rejected by the XQ"
37 },
38 {
39 "CollectPEBSRecord": "1",
40 "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
41 "EventCode": "0x31",
42 "Counter": "0,1,2,3",
43 "UMask": "0x0",
44 "PEBScounters": "0,1,2,3",
45 "EventName": "CORE_REJECT_L2Q.ALL",
46 "PDIR_COUNTER": "na",
47 "SampleAfterValue": "200003",
48 "BriefDescription": "Requests rejected by the L2Q"
49 },
50 {
51 "CollectPEBSRecord": "1",
52 "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
53 "EventCode": "0x51",
54 "Counter": "0,1,2,3",
55 "UMask": "0x1",
56 "PEBScounters": "0,1,2,3",
57 "EventName": "DL1.REPLACEMENT",
58 "PDIR_COUNTER": "na",
59 "SampleAfterValue": "200003",
60 "BriefDescription": "L1 Cache evictions for dirty data"
61 },
62 {
63 "CollectPEBSRecord": "1",
64 "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
65 "EventCode": "0x86",
66 "Counter": "0,1,2,3",
67 "UMask": "0x2",
68 "PEBScounters": "0,1,2,3",
69 "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
70 "PDIR_COUNTER": "na",
71 "SampleAfterValue": "200003",
72 "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
73 },
74 {
75 "CollectPEBSRecord": "1",
76 "EventCode": "0xB7",
77 "Counter": "0,1,2,3",
78 "UMask": "0x1",
79 "PEBScounters": "0,1,2,3",
80 "EventName": "OFFCORE_RESPONSE",
81 "PDIR_COUNTER": "na",
82 "SampleAfterValue": "100007",
83 "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
84 },
85 {
86 "PEBS": "2",
87 "CollectPEBSRecord": "2",
88 "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
89 "EventCode": "0xD0",
90 "Counter": "0,1,2,3",
91 "UMask": "0x21",
92 "PEBScounters": "0,1,2,3",
93 "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
94 "SampleAfterValue": "200003",
95 "BriefDescription": "Locked load uops retired (Precise event capable)"
96 },
97 {
98 "PEBS": "2",
99 "CollectPEBSRecord": "2",
100 "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
101 "EventCode": "0xD0",
102 "Counter": "0,1,2,3",
103 "UMask": "0x41",
104 "PEBScounters": "0,1,2,3",
105 "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
106 "SampleAfterValue": "200003",
107 "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
108 },
109 {
110 "PEBS": "2",
111 "CollectPEBSRecord": "2",
112 "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
113 "EventCode": "0xD0",
114 "Counter": "0,1,2,3",
115 "UMask": "0x42",
116 "PEBScounters": "0,1,2,3",
117 "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
118 "SampleAfterValue": "200003",
119 "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
120 },
121 {
122 "PEBS": "2",
123 "CollectPEBSRecord": "2",
124 "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
125 "EventCode": "0xD0",
126 "Counter": "0,1,2,3",
127 "UMask": "0x43",
128 "PEBScounters": "0,1,2,3",
129 "EventName": "MEM_UOPS_RETIRED.SPLIT",
130 "SampleAfterValue": "200003",
131 "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
132 },
133 {
134 "PEBS": "2",
135 "CollectPEBSRecord": "2",
136 "PublicDescription": "Counts the number of load uops retired.",
137 "EventCode": "0xD0",
138 "Counter": "0,1,2,3",
139 "UMask": "0x81",
140 "PEBScounters": "0,1,2,3",
141 "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
142 "SampleAfterValue": "200003",
143 "BriefDescription": "Load uops retired (Precise event capable)"
144 },
145 {
146 "PEBS": "2",
147 "CollectPEBSRecord": "2",
148 "PublicDescription": "Counts the number of store uops retired.",
149 "EventCode": "0xD0",
150 "Counter": "0,1,2,3",
151 "UMask": "0x82",
152 "PEBScounters": "0,1,2,3",
153 "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
154 "SampleAfterValue": "200003",
155 "BriefDescription": "Store uops retired (Precise event capable)"
156 },
157 {
158 "PEBS": "2",
159 "CollectPEBSRecord": "2",
160 "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
161 "EventCode": "0xD0",
162 "Counter": "0,1,2,3",
163 "UMask": "0x83",
164 "PEBScounters": "0,1,2,3",
165 "EventName": "MEM_UOPS_RETIRED.ALL",
166 "SampleAfterValue": "200003",
167 "BriefDescription": "Memory uops retired (Precise event capable)"
168 },
169 {
170 "PEBS": "2",
171 "CollectPEBSRecord": "2",
172 "PublicDescription": "Counts load uops retired that hit the L1 data cache.",
173 "EventCode": "0xD1",
174 "Counter": "0,1,2,3",
175 "UMask": "0x1",
176 "PEBScounters": "0,1,2,3",
177 "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
178 "SampleAfterValue": "200003",
179 "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
180 },
181 {
182 "PEBS": "2",
183 "CollectPEBSRecord": "2",
184 "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
185 "EventCode": "0xD1",
186 "Counter": "0,1,2,3",
187 "UMask": "0x2",
188 "PEBScounters": "0,1,2,3",
189 "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
190 "SampleAfterValue": "200003",
191 "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
192 },
193 {
194 "PEBS": "2",
195 "CollectPEBSRecord": "2",
196 "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
197 "EventCode": "0xD1",
198 "Counter": "0,1,2,3",
199 "UMask": "0x8",
200 "PEBScounters": "0,1,2,3",
201 "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
202 "SampleAfterValue": "200003",
203 "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
204 },
205 {
206 "PEBS": "2",
207 "CollectPEBSRecord": "2",
208 "PublicDescription": "Counts load uops retired that miss in the L2 cache.",
209 "EventCode": "0xD1",
210 "Counter": "0,1,2,3",
211 "UMask": "0x10",
212 "PEBScounters": "0,1,2,3",
213 "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
214 "SampleAfterValue": "200003",
215 "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
216 },
217 {
218 "PEBS": "2",
219 "CollectPEBSRecord": "2",
220 "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
221 "EventCode": "0xD1",
222 "Counter": "0,1,2,3",
223 "UMask": "0x20",
224 "PEBScounters": "0,1,2,3",
225 "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
226 "SampleAfterValue": "200003",
227 "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
228 },
229 {
230 "PEBS": "2",
231 "CollectPEBSRecord": "2",
232 "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
233 "EventCode": "0xD1",
234 "Counter": "0,1,2,3",
235 "UMask": "0x40",
236 "PEBScounters": "0,1,2,3",
237 "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
238 "SampleAfterValue": "200003",
239 "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
240 },
241 {
242 "PEBS": "2",
243 "CollectPEBSRecord": "2",
244 "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
245 "EventCode": "0xD1",
246 "Counter": "0,1,2,3",
247 "UMask": "0x80",
248 "PEBScounters": "0,1,2,3",
249 "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
250 "SampleAfterValue": "200003",
251 "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
252 },
253 {
254 "CollectPEBSRecord": "1",
255 "PublicDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
256 "EventCode": "0xB7",
257 "MSRValue": "0x0000010001",
258 "Counter": "0,1,2,3",
259 "UMask": "0x1",
260 "PEBScounters": "0,1,2,3",
261 "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
262 "PDIR_COUNTER": "na",
263 "MSRIndex": "0x1a6, 0x1a7",
264 "SampleAfterValue": "100007",
265 "BriefDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem.",
266 "Offcore": "1"
267 },
268 {
269 "CollectPEBSRecord": "1",
270 "PublicDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
271 "EventCode": "0xB7",
272 "MSRValue": "0x0000040001",
273 "Counter": "0,1,2,3",
274 "UMask": "0x1",
275 "PEBScounters": "0,1,2,3",
276 "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
277 "PDIR_COUNTER": "na",
278 "MSRIndex": "0x1a6, 0x1a7",
279 "SampleAfterValue": "100007",
280 "BriefDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache.",
281 "Offcore": "1"
282 },
283 {
284 "CollectPEBSRecord": "1",
285 "PublicDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
286 "EventCode": "0xB7",
287 "MSRValue": "0x0200000001",
288 "Counter": "0,1,2,3",
289 "UMask": "0x1",
290 "PEBScounters": "0,1,2,3",
291 "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
292 "PDIR_COUNTER": "na",
293 "MSRIndex": "0x1a6, 0x1a7",
294 "SampleAfterValue": "100007",
295 "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ",
296 "Offcore": "1"
297 },
298 {
299 "CollectPEBSRecord": "1",
300 "PublicDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
301 "EventCode": "0xB7",
302 "MSRValue": "0x1000000001",
303 "Counter": "0,1,2,3",
304 "UMask": "0x1",
305 "PEBScounters": "0,1,2,3",
306 "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
307 "PDIR_COUNTER": "na",
308 "MSRIndex": "0x1a6, 0x1a7",
309 "SampleAfterValue": "100007",
310 "BriefDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
311 "Offcore": "1"
312 },
313 {
314 "CollectPEBSRecord": "1",
315 "PublicDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
316 "EventCode": "0xB7",
317 "MSRValue": "0x4000000001",
318 "Counter": "0,1,2,3",
319 "UMask": "0x1",
320 "PEBScounters": "0,1,2,3",
321 "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
322 "PDIR_COUNTER": "na",
323 "MSRIndex": "0x1a6",
324 "SampleAfterValue": "100007",
325 "BriefDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received.",
326 "Offcore": "1"
327 },
328 {
329 "CollectPEBSRecord": "1",
330 "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
331 "EventCode": "0xB7",
332 "MSRValue": "0x0000010002",
333 "Counter": "0,1,2,3",
334 "UMask": "0x1",
335 "PEBScounters": "0,1,2,3",
336 "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
337 "PDIR_COUNTER": "na",
338 "MSRIndex": "0x1a6, 0x1a7",
339 "SampleAfterValue": "100007",
340 "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem.",
341 "Offcore": "1"
342 },
343 {
344 "CollectPEBSRecord": "1",
345 "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
346 "EventCode": "0xB7",
347 "MSRValue": "0x0000040002",
348 "Counter": "0,1,2,3",
349 "UMask": "0x1",
350 "PEBScounters": "0,1,2,3",
351 "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
352 "PDIR_COUNTER": "na",
353 "MSRIndex": "0x1a6, 0x1a7",
354 "SampleAfterValue": "100007",
355 "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache.",
356 "Offcore": "1"
357 },
358 {
359 "CollectPEBSRecord": "1",
360 "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
361 "EventCode": "0xB7",
362 "MSRValue": "0x0200000002",
363 "Counter": "0,1,2,3",
364 "UMask": "0x1",
365 "PEBScounters": "0,1,2,3",
366 "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
367 "PDIR_COUNTER": "na",
368 "MSRIndex": "0x1a6, 0x1a7",
369 "SampleAfterValue": "100007",
370 "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ",
371 "Offcore": "1"
372 },
373 {
374 "CollectPEBSRecord": "1",
375 "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
376 "EventCode": "0xB7",
377 "MSRValue": "0x1000000002",
378 "Counter": "0,1,2,3",
379 "UMask": "0x1",
380 "PEBScounters": "0,1,2,3",
381 "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
382 "PDIR_COUNTER": "na",
383 "MSRIndex": "0x1a6, 0x1a7",
384 "SampleAfterValue": "100007",
385 "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
386 "Offcore": "1"
387 },
388 {
389 "CollectPEBSRecord": "1",
390 "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
391 "EventCode": "0xB7",
392 "MSRValue": "0x4000000002",
393 "Counter": "0,1,2,3",
394 "UMask": "0x1",
395 "PEBScounters": "0,1,2,3",
396 "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
397 "PDIR_COUNTER": "na",
398 "MSRIndex": "0x1a6",
399 "SampleAfterValue": "100007",
400 "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received.",
401 "Offcore": "1"
402 },
403 {
404 "CollectPEBSRecord": "1",
405 "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
406 "EventCode": "0xB7",
407 "MSRValue": "0x0000010004",
408 "Counter": "0,1,2,3",
409 "UMask": "0x1",
410 "PEBScounters": "0,1,2,3",
411 "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
412 "PDIR_COUNTER": "na",
413 "MSRIndex": "0x1a6, 0x1a7",
414 "SampleAfterValue": "100007",
415 "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem.",
416 "Offcore": "1"
417 },
418 {
419 "CollectPEBSRecord": "1",
420 "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
421 "EventCode": "0xB7",
422 "MSRValue": "0x0000040004",
423 "Counter": "0,1,2,3",
424 "UMask": "0x1",
425 "PEBScounters": "0,1,2,3",
426 "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
427 "PDIR_COUNTER": "na",
428 "MSRIndex": "0x1a6, 0x1a7",
429 "SampleAfterValue": "100007",
430 "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache.",
431 "Offcore": "1"
432 },
433 {
434 "CollectPEBSRecord": "1",
435 "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
436 "EventCode": "0xB7",
437 "MSRValue": "0x0200000004",
438 "Counter": "0,1,2,3",
439 "UMask": "0x1",
440 "PEBScounters": "0,1,2,3",
441 "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
442 "PDIR_COUNTER": "na",
443 "MSRIndex": "0x1a6, 0x1a7",
444 "SampleAfterValue": "100007",
445 "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ",
446 "Offcore": "1"
447 },
448 {
449 "CollectPEBSRecord": "1",
450 "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
451 "EventCode": "0xB7",
452 "MSRValue": "0x1000000004",
453 "Counter": "0,1,2,3",
454 "UMask": "0x1",
455 "PEBScounters": "0,1,2,3",
456 "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
457 "PDIR_COUNTER": "na",
458 "MSRIndex": "0x1a6, 0x1a7",
459 "SampleAfterValue": "100007",
460 "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
461 "Offcore": "1"
462 },
463 {
464 "CollectPEBSRecord": "1",
465 "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
466 "EventCode": "0xB7",
467 "MSRValue": "0x4000000004",
468 "Counter": "0,1,2,3",
469 "UMask": "0x1",
470 "PEBScounters": "0,1,2,3",
471 "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
472 "PDIR_COUNTER": "na",
473 "MSRIndex": "0x1a6",
474 "SampleAfterValue": "100007",
475 "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received.",
476 "Offcore": "1"
477 },
478 {
479 "CollectPEBSRecord": "1",
480 "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
481 "EventCode": "0xB7",
482 "MSRValue": "0x0000010008",
483 "Counter": "0,1,2,3",
484 "UMask": "0x1",
485 "PEBScounters": "0,1,2,3",
486 "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
487 "PDIR_COUNTER": "na",
488 "MSRIndex": "0x1a6, 0x1a7",
489 "SampleAfterValue": "100007",
490 "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem.",
491 "Offcore": "1"
492 },
493 {
494 "CollectPEBSRecord": "1",
495 "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
496 "EventCode": "0xB7",
497 "MSRValue": "0x0000040008",
498 "Counter": "0,1,2,3",
499 "UMask": "0x1",
500 "PEBScounters": "0,1,2,3",
501 "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
502 "PDIR_COUNTER": "na",
503 "MSRIndex": "0x1a6, 0x1a7",
504 "SampleAfterValue": "100007",
505 "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache.",
506 "Offcore": "1"
507 },
508 {
509 "CollectPEBSRecord": "1",
510 "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
511 "EventCode": "0xB7",
512 "MSRValue": "0x0200000008",
513 "Counter": "0,1,2,3",
514 "UMask": "0x1",
515 "PEBScounters": "0,1,2,3",
516 "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
517 "PDIR_COUNTER": "na",
518 "MSRIndex": "0x1a6, 0x1a7",
519 "SampleAfterValue": "100007",
520 "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ",
521 "Offcore": "1"
522 },
523 {
524 "CollectPEBSRecord": "1",
525 "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
526 "EventCode": "0xB7",
527 "MSRValue": "0x1000000008",
528 "Counter": "0,1,2,3",
529 "UMask": "0x1",
530 "PEBScounters": "0,1,2,3",
531 "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
532 "PDIR_COUNTER": "na",
533 "MSRIndex": "0x1a6, 0x1a7",
534 "SampleAfterValue": "100007",
535 "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
536 "Offcore": "1"
537 },
538 {
539 "CollectPEBSRecord": "1",
540 "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
541 "EventCode": "0xB7",
542 "MSRValue": "0x4000000008",
543 "Counter": "0,1,2,3",
544 "UMask": "0x1",
545 "PEBScounters": "0,1,2,3",
546 "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
547 "PDIR_COUNTER": "na",
548 "MSRIndex": "0x1a6",
549 "SampleAfterValue": "100007",
550 "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
551 "Offcore": "1"
552 },
553 {
554 "CollectPEBSRecord": "1",
555 "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
556 "EventCode": "0xB7",
557 "MSRValue": "0x0000010010",
558 "Counter": "0,1,2,3",
559 "UMask": "0x1",
560 "PEBScounters": "0,1,2,3",
561 "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
562 "PDIR_COUNTER": "na",
563 "MSRIndex": "0x1a6, 0x1a7",
564 "SampleAfterValue": "100007",
565 "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem.",
566 "Offcore": "1"
567 },
568 {
569 "CollectPEBSRecord": "1",
570 "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
571 "EventCode": "0xB7",
572 "MSRValue": "0x0000040010",
573 "Counter": "0,1,2,3",
574 "UMask": "0x1",
575 "PEBScounters": "0,1,2,3",
576 "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
577 "PDIR_COUNTER": "na",
578 "MSRIndex": "0x1a6, 0x1a7",
579 "SampleAfterValue": "100007",
580 "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache.",
581 "Offcore": "1"
582 },
583 {
584 "CollectPEBSRecord": "1",
585 "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
586 "EventCode": "0xB7",
587 "MSRValue": "0x0200000010",
588 "Counter": "0,1,2,3",
589 "UMask": "0x1",
590 "PEBScounters": "0,1,2,3",
591 "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
592 "PDIR_COUNTER": "na",
593 "MSRIndex": "0x1a6, 0x1a7",
594 "SampleAfterValue": "100007",
595 "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
596 "Offcore": "1"
597 },
598 {
599 "CollectPEBSRecord": "1",
600 "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
601 "EventCode": "0xB7",
602 "MSRValue": "0x1000000010",
603 "Counter": "0,1,2,3",
604 "UMask": "0x1",
605 "PEBScounters": "0,1,2,3",
606 "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
607 "PDIR_COUNTER": "na",
608 "MSRIndex": "0x1a6, 0x1a7",
609 "SampleAfterValue": "100007",
610 "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
611 "Offcore": "1"
612 },
613 {
614 "CollectPEBSRecord": "1",
615 "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
616 "EventCode": "0xB7",
617 "MSRValue": "0x4000000010",
618 "Counter": "0,1,2,3",
619 "UMask": "0x1",
620 "PEBScounters": "0,1,2,3",
621 "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
622 "PDIR_COUNTER": "na",
623 "MSRIndex": "0x1a6",
624 "SampleAfterValue": "100007",
625 "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
626 "Offcore": "1"
627 },
628 {
629 "CollectPEBSRecord": "1",
630 "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
631 "EventCode": "0xB7",
632 "MSRValue": "0x0000010020",
633 "Counter": "0,1,2,3",
634 "UMask": "0x1",
635 "PEBScounters": "0,1,2,3",
636 "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
637 "PDIR_COUNTER": "na",
638 "MSRIndex": "0x1a6, 0x1a7",
639 "SampleAfterValue": "100007",
640 "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem.",
641 "Offcore": "1"
642 },
643 {
644 "CollectPEBSRecord": "1",
645 "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
646 "EventCode": "0xB7",
647 "MSRValue": "0x0000040020",
648 "Counter": "0,1,2,3",
649 "UMask": "0x1",
650 "PEBScounters": "0,1,2,3",
651 "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
652 "PDIR_COUNTER": "na",
653 "MSRIndex": "0x1a6, 0x1a7",
654 "SampleAfterValue": "100007",
655 "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache.",
656 "Offcore": "1"
657 },
658 {
659 "CollectPEBSRecord": "1",
660 "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
661 "EventCode": "0xB7",
662 "MSRValue": "0x0200000020",
663 "Counter": "0,1,2,3",
664 "UMask": "0x1",
665 "PEBScounters": "0,1,2,3",
666 "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
667 "PDIR_COUNTER": "na",
668 "MSRIndex": "0x1a6, 0x1a7",
669 "SampleAfterValue": "100007",
670 "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
671 "Offcore": "1"
672 },
673 {
674 "CollectPEBSRecord": "1",
675 "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
676 "EventCode": "0xB7",
677 "MSRValue": "0x1000000020",
678 "Counter": "0,1,2,3",
679 "UMask": "0x1",
680 "PEBScounters": "0,1,2,3",
681 "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
682 "PDIR_COUNTER": "na",
683 "MSRIndex": "0x1a6, 0x1a7",
684 "SampleAfterValue": "100007",
685 "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
686 "Offcore": "1"
687 },
688 {
689 "CollectPEBSRecord": "1",
690 "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
691 "EventCode": "0xB7",
692 "MSRValue": "0x4000000020",
693 "Counter": "0,1,2,3",
694 "UMask": "0x1",
695 "PEBScounters": "0,1,2,3",
696 "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
697 "PDIR_COUNTER": "na",
698 "MSRIndex": "0x1a6",
699 "SampleAfterValue": "100007",
700 "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
701 "Offcore": "1"
702 },
703 {
704 "CollectPEBSRecord": "1",
705 "PublicDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
706 "EventCode": "0xB7",
707 "MSRValue": "0x0000010400",
708 "Counter": "0,1,2,3",
709 "UMask": "0x1",
710 "PEBScounters": "0,1,2,3",
711 "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
712 "PDIR_COUNTER": "na",
713 "MSRIndex": "0x1a6, 0x1a7",
714 "SampleAfterValue": "100007",
715 "BriefDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem.",
716 "Offcore": "1"
717 },
718 {
719 "CollectPEBSRecord": "1",
720 "PublicDescription": "Counts bus lock and split lock requests hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
721 "EventCode": "0xB7",
722 "MSRValue": "0x0000040400",
723 "Counter": "0,1,2,3",
724 "UMask": "0x1",
725 "PEBScounters": "0,1,2,3",
726 "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
727 "PDIR_COUNTER": "na",
728 "MSRIndex": "0x1a6, 0x1a7",
729 "SampleAfterValue": "100007",
730 "BriefDescription": "Counts bus lock and split lock requests hit the L2 cache.",
731 "Offcore": "1"
732 },
733 {
734 "CollectPEBSRecord": "1",
735 "PublicDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
736 "EventCode": "0xB7",
737 "MSRValue": "0x0200000400",
738 "Counter": "0,1,2,3",
739 "UMask": "0x1",
740 "PEBScounters": "0,1,2,3",
741 "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
742 "PDIR_COUNTER": "na",
743 "MSRIndex": "0x1a6, 0x1a7",
744 "SampleAfterValue": "100007",
745 "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ",
746 "Offcore": "1"
747 },
748 {
749 "CollectPEBSRecord": "1",
750 "PublicDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
751 "EventCode": "0xB7",
752 "MSRValue": "0x1000000400",
753 "Counter": "0,1,2,3",
754 "UMask": "0x1",
755 "PEBScounters": "0,1,2,3",
756 "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
757 "PDIR_COUNTER": "na",
758 "MSRIndex": "0x1a6, 0x1a7",
759 "SampleAfterValue": "100007",
760 "BriefDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
761 "Offcore": "1"
762 },
763 {
764 "CollectPEBSRecord": "1",
765 "PublicDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
766 "EventCode": "0xB7",
767 "MSRValue": "0x4000000400",
768 "Counter": "0,1,2,3",
769 "UMask": "0x1",
770 "PEBScounters": "0,1,2,3",
771 "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
772 "PDIR_COUNTER": "na",
773 "MSRIndex": "0x1a6",
774 "SampleAfterValue": "100007",
775 "BriefDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received.",
776 "Offcore": "1"
777 },
778 {
779 "CollectPEBSRecord": "1",
780 "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
781 "EventCode": "0xB7",
782 "MSRValue": "0x0000010800",
783 "Counter": "0,1,2,3",
784 "UMask": "0x1",
785 "PEBScounters": "0,1,2,3",
786 "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
787 "PDIR_COUNTER": "na",
788 "MSRIndex": "0x1a6, 0x1a7",
789 "SampleAfterValue": "100007",
790 "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem.",
791 "Offcore": "1"
792 },
793 {
794 "CollectPEBSRecord": "1",
795 "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
796 "EventCode": "0xB7",
797 "MSRValue": "0x0000040800",
798 "Counter": "0,1,2,3",
799 "UMask": "0x1",
800 "PEBScounters": "0,1,2,3",
801 "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
802 "PDIR_COUNTER": "na",
803 "MSRIndex": "0x1a6, 0x1a7",
804 "SampleAfterValue": "100007",
805 "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache.",
806 "Offcore": "1"
807 },
808 {
809 "CollectPEBSRecord": "1",
810 "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
811 "EventCode": "0xB7",
812 "MSRValue": "0x0200000800",
813 "Counter": "0,1,2,3",
814 "UMask": "0x1",
815 "PEBScounters": "0,1,2,3",
816 "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
817 "PDIR_COUNTER": "na",
818 "MSRIndex": "0x1a6, 0x1a7",
819 "SampleAfterValue": "100007",
820 "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ",
821 "Offcore": "1"
822 },
823 {
824 "CollectPEBSRecord": "1",
825 "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
826 "EventCode": "0xB7",
827 "MSRValue": "0x1000000800",
828 "Counter": "0,1,2,3",
829 "UMask": "0x1",
830 "PEBScounters": "0,1,2,3",
831 "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
832 "PDIR_COUNTER": "na",
833 "MSRIndex": "0x1a6, 0x1a7",
834 "SampleAfterValue": "100007",
835 "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
836 "Offcore": "1"
837 },
838 {
839 "CollectPEBSRecord": "1",
840 "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
841 "EventCode": "0xB7",
842 "MSRValue": "0x4000000800",
843 "Counter": "0,1,2,3",
844 "UMask": "0x1",
845 "PEBScounters": "0,1,2,3",
846 "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
847 "PDIR_COUNTER": "na",
848 "MSRIndex": "0x1a6",
849 "SampleAfterValue": "100007",
850 "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received.",
851 "Offcore": "1"
852 },
853 {
854 "CollectPEBSRecord": "1",
855 "PublicDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
856 "EventCode": "0xB7",
857 "MSRValue": "0x0000011000",
858 "Counter": "0,1,2,3",
859 "UMask": "0x1",
860 "PEBScounters": "0,1,2,3",
861 "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
862 "PDIR_COUNTER": "na",
863 "MSRIndex": "0x1a6, 0x1a7",
864 "SampleAfterValue": "100007",
865 "BriefDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem.",
866 "Offcore": "1"
867 },
868 {
869 "CollectPEBSRecord": "1",
870 "PublicDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
871 "EventCode": "0xB7",
872 "MSRValue": "0x0000041000",
873 "Counter": "0,1,2,3",
874 "UMask": "0x1",
875 "PEBScounters": "0,1,2,3",
876 "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
877 "PDIR_COUNTER": "na",
878 "MSRIndex": "0x1a6, 0x1a7",
879 "SampleAfterValue": "100007",
880 "BriefDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache.",
881 "Offcore": "1"
882 },
883 {
884 "CollectPEBSRecord": "1",
885 "PublicDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
886 "EventCode": "0xB7",
887 "MSRValue": "0x0200001000",
888 "Counter": "0,1,2,3",
889 "UMask": "0x1",
890 "PEBScounters": "0,1,2,3",
891 "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
892 "PDIR_COUNTER": "na",
893 "MSRIndex": "0x1a6, 0x1a7",
894 "SampleAfterValue": "100007",
895 "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ",
896 "Offcore": "1"
897 },
898 {
899 "CollectPEBSRecord": "1",
900 "PublicDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
901 "EventCode": "0xB7",
902 "MSRValue": "0x1000001000",
903 "Counter": "0,1,2,3",
904 "UMask": "0x1",
905 "PEBScounters": "0,1,2,3",
906 "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
907 "PDIR_COUNTER": "na",
908 "MSRIndex": "0x1a6, 0x1a7",
909 "SampleAfterValue": "100007",
910 "BriefDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
911 "Offcore": "1"
912 },
913 {
914 "CollectPEBSRecord": "1",
915 "PublicDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
916 "EventCode": "0xB7",
917 "MSRValue": "0x4000001000",
918 "Counter": "0,1,2,3",
919 "UMask": "0x1",
920 "PEBScounters": "0,1,2,3",
921 "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
922 "PDIR_COUNTER": "na",
923 "MSRIndex": "0x1a6",
924 "SampleAfterValue": "100007",
925 "BriefDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
926 "Offcore": "1"
927 },
928 {
929 "CollectPEBSRecord": "1",
930 "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
931 "EventCode": "0xB7",
932 "MSRValue": "0x0000012000",
933 "Counter": "0,1,2,3",
934 "UMask": "0x1",
935 "PEBScounters": "0,1,2,3",
936 "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
937 "PDIR_COUNTER": "na",
938 "MSRIndex": "0x1a6, 0x1a7",
939 "SampleAfterValue": "100007",
940 "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem.",
941 "Offcore": "1"
942 },
943 {
944 "CollectPEBSRecord": "1",
945 "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
946 "EventCode": "0xB7",
947 "MSRValue": "0x0000042000",
948 "Counter": "0,1,2,3",
949 "UMask": "0x1",
950 "PEBScounters": "0,1,2,3",
951 "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
952 "PDIR_COUNTER": "na",
953 "MSRIndex": "0x1a6, 0x1a7",
954 "SampleAfterValue": "100007",
955 "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache.",
956 "Offcore": "1"
957 },
958 {
959 "CollectPEBSRecord": "1",
960 "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
961 "EventCode": "0xB7",
962 "MSRValue": "0x0200002000",
963 "Counter": "0,1,2,3",
964 "UMask": "0x1",
965 "PEBScounters": "0,1,2,3",
966 "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
967 "PDIR_COUNTER": "na",
968 "MSRIndex": "0x1a6, 0x1a7",
969 "SampleAfterValue": "100007",
970 "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
971 "Offcore": "1"
972 },
973 {
974 "CollectPEBSRecord": "1",
975 "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
976 "EventCode": "0xB7",
977 "MSRValue": "0x1000002000",
978 "Counter": "0,1,2,3",
979 "UMask": "0x1",
980 "PEBScounters": "0,1,2,3",
981 "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
982 "PDIR_COUNTER": "na",
983 "MSRIndex": "0x1a6, 0x1a7",
984 "SampleAfterValue": "100007",
985 "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
986 "Offcore": "1"
987 },
988 {
989 "CollectPEBSRecord": "1",
990 "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
991 "EventCode": "0xB7",
992 "MSRValue": "0x4000002000",
993 "Counter": "0,1,2,3",
994 "UMask": "0x1",
995 "PEBScounters": "0,1,2,3",
996 "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
997 "PDIR_COUNTER": "na",
998 "MSRIndex": "0x1a6",
999 "SampleAfterValue": "100007",
1000 "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
1001 "Offcore": "1"
1002 },
1003 {
1004 "CollectPEBSRecord": "1",
1005 "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1006 "EventCode": "0xB7",
1007 "MSRValue": "0x0000014800",
1008 "Counter": "0,1,2,3",
1009 "UMask": "0x1",
1010 "PEBScounters": "0,1,2,3",
1011 "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
1012 "PDIR_COUNTER": "na",
1013 "MSRIndex": "0x1a6, 0x1a7",
1014 "SampleAfterValue": "100007",
1015 "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem.",
1016 "Offcore": "1"
1017 },
1018 {
1019 "CollectPEBSRecord": "1",
1020 "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1021 "EventCode": "0xB7",
1022 "MSRValue": "0x0000044800",
1023 "Counter": "0,1,2,3",
1024 "UMask": "0x1",
1025 "PEBScounters": "0,1,2,3",
1026 "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
1027 "PDIR_COUNTER": "na",
1028 "MSRIndex": "0x1a6, 0x1a7",
1029 "SampleAfterValue": "100007",
1030 "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache.",
1031 "Offcore": "1"
1032 },
1033 {
1034 "CollectPEBSRecord": "1",
1035 "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1036 "EventCode": "0xB7",
1037 "MSRValue": "0x0200004800",
1038 "Counter": "0,1,2,3",
1039 "UMask": "0x1",
1040 "PEBScounters": "0,1,2,3",
1041 "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
1042 "PDIR_COUNTER": "na",
1043 "MSRIndex": "0x1a6, 0x1a7",
1044 "SampleAfterValue": "100007",
1045 "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. ",
1046 "Offcore": "1"
1047 },
1048 {
1049 "CollectPEBSRecord": "1",
1050 "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1051 "EventCode": "0xB7",
1052 "MSRValue": "0x1000004800",
1053 "Counter": "0,1,2,3",
1054 "UMask": "0x1",
1055 "PEBScounters": "0,1,2,3",
1056 "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
1057 "PDIR_COUNTER": "na",
1058 "MSRIndex": "0x1a6, 0x1a7",
1059 "SampleAfterValue": "100007",
1060 "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
1061 "Offcore": "1"
1062 },
1063 {
1064 "CollectPEBSRecord": "1",
1065 "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1066 "EventCode": "0xB7",
1067 "MSRValue": "0x4000004800",
1068 "Counter": "0,1,2,3",
1069 "UMask": "0x1",
1070 "PEBScounters": "0,1,2,3",
1071 "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
1072 "PDIR_COUNTER": "na",
1073 "MSRIndex": "0x1a6",
1074 "SampleAfterValue": "100007",
1075 "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received.",
1076 "Offcore": "1"
1077 },
1078 {
1079 "CollectPEBSRecord": "1",
1080 "PublicDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1081 "EventCode": "0xB7",
1082 "MSRValue": "0x0000018000",
1083 "Counter": "0,1,2,3",
1084 "UMask": "0x1",
1085 "PEBScounters": "0,1,2,3",
1086 "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
1087 "PDIR_COUNTER": "na",
1088 "MSRIndex": "0x1a6, 0x1a7",
1089 "SampleAfterValue": "100007",
1090 "BriefDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem.",
1091 "Offcore": "1"
1092 },
1093 {
1094 "CollectPEBSRecord": "1",
1095 "PublicDescription": "Counts requests to the uncore subsystem hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1096 "EventCode": "0xB7",
1097 "MSRValue": "0x0000048000",
1098 "Counter": "0,1,2,3",
1099 "UMask": "0x1",
1100 "PEBScounters": "0,1,2,3",
1101 "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
1102 "PDIR_COUNTER": "na",
1103 "MSRIndex": "0x1a6, 0x1a7",
1104 "SampleAfterValue": "100007",
1105 "BriefDescription": "Counts requests to the uncore subsystem hit the L2 cache.",
1106 "Offcore": "1"
1107 },
1108 {
1109 "CollectPEBSRecord": "1",
1110 "PublicDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1111 "EventCode": "0xB7",
1112 "MSRValue": "0x0200008000",
1113 "Counter": "0,1,2,3",
1114 "UMask": "0x1",
1115 "PEBScounters": "0,1,2,3",
1116 "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
1117 "PDIR_COUNTER": "na",
1118 "MSRIndex": "0x1a6, 0x1a7",
1119 "SampleAfterValue": "100007",
1120 "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ",
1121 "Offcore": "1"
1122 },
1123 {
1124 "CollectPEBSRecord": "1",
1125 "PublicDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1126 "EventCode": "0xB7",
1127 "MSRValue": "0x1000008000",
1128 "Counter": "0,1,2,3",
1129 "UMask": "0x1",
1130 "PEBScounters": "0,1,2,3",
1131 "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
1132 "PDIR_COUNTER": "na",
1133 "MSRIndex": "0x1a6, 0x1a7",
1134 "SampleAfterValue": "100007",
1135 "BriefDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
1136 "Offcore": "1"
1137 },
1138 {
1139 "CollectPEBSRecord": "1",
1140 "PublicDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1141 "EventCode": "0xB7",
1142 "MSRValue": "0x4000008000",
1143 "Counter": "0,1,2,3",
1144 "UMask": "0x1",
1145 "PEBScounters": "0,1,2,3",
1146 "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
1147 "PDIR_COUNTER": "na",
1148 "MSRIndex": "0x1a6",
1149 "SampleAfterValue": "100007",
1150 "BriefDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received.",
1151 "Offcore": "1"
1152 },
1153 {
1154 "CollectPEBSRecord": "1",
1155 "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1156 "EventCode": "0xB7",
1157 "MSRValue": "0x0000013010",
1158 "Counter": "0,1,2,3",
1159 "UMask": "0x1",
1160 "PEBScounters": "0,1,2,3",
1161 "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
1162 "PDIR_COUNTER": "na",
1163 "MSRIndex": "0x1a6, 0x1a7",
1164 "SampleAfterValue": "100007",
1165 "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem.",
1166 "Offcore": "1"
1167 },
1168 {
1169 "CollectPEBSRecord": "1",
1170 "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1171 "EventCode": "0xB7",
1172 "MSRValue": "0x0000043010",
1173 "Counter": "0,1,2,3",
1174 "UMask": "0x1",
1175 "PEBScounters": "0,1,2,3",
1176 "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
1177 "PDIR_COUNTER": "na",
1178 "MSRIndex": "0x1a6, 0x1a7",
1179 "SampleAfterValue": "100007",
1180 "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache.",
1181 "Offcore": "1"
1182 },
1183 {
1184 "CollectPEBSRecord": "1",
1185 "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1186 "EventCode": "0xB7",
1187 "MSRValue": "0x0200003010",
1188 "Counter": "0,1,2,3",
1189 "UMask": "0x1",
1190 "PEBScounters": "0,1,2,3",
1191 "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
1192 "PDIR_COUNTER": "na",
1193 "MSRIndex": "0x1a6, 0x1a7",
1194 "SampleAfterValue": "100007",
1195 "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ",
1196 "Offcore": "1"
1197 },
1198 {
1199 "CollectPEBSRecord": "1",
1200 "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1201 "EventCode": "0xB7",
1202 "MSRValue": "0x1000003010",
1203 "Counter": "0,1,2,3",
1204 "UMask": "0x1",
1205 "PEBScounters": "0,1,2,3",
1206 "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
1207 "PDIR_COUNTER": "na",
1208 "MSRIndex": "0x1a6, 0x1a7",
1209 "SampleAfterValue": "100007",
1210 "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
1211 "Offcore": "1"
1212 },
1213 {
1214 "CollectPEBSRecord": "1",
1215 "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1216 "EventCode": "0xB7",
1217 "MSRValue": "0x4000003010",
1218 "Counter": "0,1,2,3",
1219 "UMask": "0x1",
1220 "PEBScounters": "0,1,2,3",
1221 "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
1222 "PDIR_COUNTER": "na",
1223 "MSRIndex": "0x1a6",
1224 "SampleAfterValue": "100007",
1225 "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received.",
1226 "Offcore": "1"
1227 },
1228 {
1229 "CollectPEBSRecord": "1",
1230 "PublicDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1231 "EventCode": "0xB7",
1232 "MSRValue": "0x0000013091",
1233 "Counter": "0,1,2,3",
1234 "UMask": "0x1",
1235 "PEBScounters": "0,1,2,3",
1236 "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
1237 "PDIR_COUNTER": "na",
1238 "MSRIndex": "0x1a6, 0x1a7",
1239 "SampleAfterValue": "100007",
1240 "BriefDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem.",
1241 "Offcore": "1"
1242 },
1243 {
1244 "CollectPEBSRecord": "1",
1245 "PublicDescription": "Counts data reads (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1246 "EventCode": "0xB7",
1247 "MSRValue": "0x0000043091",
1248 "Counter": "0,1,2,3",
1249 "UMask": "0x1",
1250 "PEBScounters": "0,1,2,3",
1251 "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
1252 "PDIR_COUNTER": "na",
1253 "MSRIndex": "0x1a6, 0x1a7",
1254 "SampleAfterValue": "100007",
1255 "BriefDescription": "Counts data reads (demand & prefetch) hit the L2 cache.",
1256 "Offcore": "1"
1257 },
1258 {
1259 "CollectPEBSRecord": "1",
1260 "PublicDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1261 "EventCode": "0xB7",
1262 "MSRValue": "0x0200003091",
1263 "Counter": "0,1,2,3",
1264 "UMask": "0x1",
1265 "PEBScounters": "0,1,2,3",
1266 "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
1267 "PDIR_COUNTER": "na",
1268 "MSRIndex": "0x1a6, 0x1a7",
1269 "SampleAfterValue": "100007",
1270 "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
1271 "Offcore": "1"
1272 },
1273 {
1274 "CollectPEBSRecord": "1",
1275 "PublicDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1276 "EventCode": "0xB7",
1277 "MSRValue": "0x1000003091",
1278 "Counter": "0,1,2,3",
1279 "UMask": "0x1",
1280 "PEBScounters": "0,1,2,3",
1281 "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
1282 "PDIR_COUNTER": "na",
1283 "MSRIndex": "0x1a6, 0x1a7",
1284 "SampleAfterValue": "100007",
1285 "BriefDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
1286 "Offcore": "1"
1287 },
1288 {
1289 "CollectPEBSRecord": "1",
1290 "PublicDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1291 "EventCode": "0xB7",
1292 "MSRValue": "0x4000003091",
1293 "Counter": "0,1,2,3",
1294 "UMask": "0x1",
1295 "PEBScounters": "0,1,2,3",
1296 "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
1297 "PDIR_COUNTER": "na",
1298 "MSRIndex": "0x1a6",
1299 "SampleAfterValue": "100007",
1300 "BriefDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
1301 "Offcore": "1"
1302 },
1303 {
1304 "CollectPEBSRecord": "1",
1305 "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1306 "EventCode": "0xB7",
1307 "MSRValue": "0x0000010022",
1308 "Counter": "0,1,2,3",
1309 "UMask": "0x1",
1310 "PEBScounters": "0,1,2,3",
1311 "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
1312 "PDIR_COUNTER": "na",
1313 "MSRIndex": "0x1a6, 0x1a7",
1314 "SampleAfterValue": "100007",
1315 "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
1316 "Offcore": "1"
1317 },
1318 {
1319 "CollectPEBSRecord": "1",
1320 "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1321 "EventCode": "0xB7",
1322 "MSRValue": "0x0000040022",
1323 "Counter": "0,1,2,3",
1324 "UMask": "0x1",
1325 "PEBScounters": "0,1,2,3",
1326 "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
1327 "PDIR_COUNTER": "na",
1328 "MSRIndex": "0x1a6, 0x1a7",
1329 "SampleAfterValue": "100007",
1330 "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
1331 "Offcore": "1"
1332 },
1333 {
1334 "CollectPEBSRecord": "1",
1335 "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1336 "EventCode": "0xB7",
1337 "MSRValue": "0x0200000022",
1338 "Counter": "0,1,2,3",
1339 "UMask": "0x1",
1340 "PEBScounters": "0,1,2,3",
1341 "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
1342 "PDIR_COUNTER": "na",
1343 "MSRIndex": "0x1a6, 0x1a7",
1344 "SampleAfterValue": "100007",
1345 "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
1346 "Offcore": "1"
1347 },
1348 {
1349 "CollectPEBSRecord": "1",
1350 "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1351 "EventCode": "0xB7",
1352 "MSRValue": "0x1000000022",
1353 "Counter": "0,1,2,3",
1354 "UMask": "0x1",
1355 "PEBScounters": "0,1,2,3",
1356 "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
1357 "PDIR_COUNTER": "na",
1358 "MSRIndex": "0x1a6, 0x1a7",
1359 "SampleAfterValue": "100007",
1360 "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
1361 "Offcore": "1"
1362 },
1363 {
1364 "CollectPEBSRecord": "1",
1365 "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1366 "EventCode": "0xB7",
1367 "MSRValue": "0x4000000022",
1368 "Counter": "0,1,2,3",
1369 "UMask": "0x1",
1370 "PEBScounters": "0,1,2,3",
1371 "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
1372 "PDIR_COUNTER": "na",
1373 "MSRIndex": "0x1a6",
1374 "SampleAfterValue": "100007",
1375 "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
1376 "Offcore": "1"
1377 },
1378 {
1379 "CollectPEBSRecord": "1",
1380 "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1381 "EventCode": "0xB7",
1382 "MSRValue": "0x00000132b7",
1383 "Counter": "0,1,2,3",
1384 "UMask": "0x1",
1385 "PEBScounters": "0,1,2,3",
1386 "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
1387 "PDIR_COUNTER": "na",
1388 "MSRIndex": "0x1a6, 0x1a7",
1389 "SampleAfterValue": "100007",
1390 "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
1391 "Offcore": "1"
1392 },
1393 {
1394 "CollectPEBSRecord": "1",
1395 "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1396 "EventCode": "0xB7",
1397 "MSRValue": "0x00000432b7",
1398 "Counter": "0,1,2,3",
1399 "UMask": "0x1",
1400 "PEBScounters": "0,1,2,3",
1401 "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
1402 "PDIR_COUNTER": "na",
1403 "MSRIndex": "0x1a6, 0x1a7",
1404 "SampleAfterValue": "100007",
1405 "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
1406 "Offcore": "1"
1407 },
1408 {
1409 "CollectPEBSRecord": "1",
1410 "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1411 "EventCode": "0xB7",
1412 "MSRValue": "0x02000032b7",
1413 "Counter": "0,1,2,3",
1414 "UMask": "0x1",
1415 "PEBScounters": "0,1,2,3",
1416 "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
1417 "PDIR_COUNTER": "na",
1418 "MSRIndex": "0x1a6, 0x1a7",
1419 "SampleAfterValue": "100007",
1420 "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
1421 "Offcore": "1"
1422 },
1423 {
1424 "CollectPEBSRecord": "1",
1425 "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1426 "EventCode": "0xB7",
1427 "MSRValue": "0x10000032b7",
1428 "Counter": "0,1,2,3",
1429 "UMask": "0x1",
1430 "PEBScounters": "0,1,2,3",
1431 "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
1432 "PDIR_COUNTER": "na",
1433 "MSRIndex": "0x1a6, 0x1a7",
1434 "SampleAfterValue": "100007",
1435 "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
1436 "Offcore": "1"
1437 },
1438 {
1439 "CollectPEBSRecord": "1",
1440 "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
1441 "EventCode": "0xB7",
1442 "MSRValue": "0x40000032b7",
1443 "Counter": "0,1,2,3",
1444 "UMask": "0x1",
1445 "PEBScounters": "0,1,2,3",
1446 "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
1447 "PDIR_COUNTER": "na",
1448 "MSRIndex": "0x1a6",
1449 "SampleAfterValue": "100007",
1450 "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
1451 "Offcore": "1"
1452 }
1453] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
new file mode 100644
index 000000000000..a7878965ceab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
@@ -0,0 +1,62 @@
1[
2 {
3 "CollectPEBSRecord": "1",
4 "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
5 "EventCode": "0x80",
6 "Counter": "0,1,2,3",
7 "UMask": "0x1",
8 "PEBScounters": "0,1,2,3",
9 "EventName": "ICACHE.HIT",
10 "PDIR_COUNTER": "na",
11 "SampleAfterValue": "200003",
12 "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture"
13 },
14 {
15 "CollectPEBSRecord": "1",
16 "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
17 "EventCode": "0x80",
18 "Counter": "0,1,2,3",
19 "UMask": "0x2",
20 "PEBScounters": "0,1,2,3",
21 "EventName": "ICACHE.MISSES",
22 "PDIR_COUNTER": "na",
23 "SampleAfterValue": "200003",
24 "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture"
25 },
26 {
27 "CollectPEBSRecord": "1",
28 "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
29 "EventCode": "0x80",
30 "Counter": "0,1,2,3",
31 "UMask": "0x3",
32 "PEBScounters": "0,1,2,3",
33 "EventName": "ICACHE.ACCESSES",
34 "PDIR_COUNTER": "na",
35 "SampleAfterValue": "200003",
36 "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture"
37 },
38 {
39 "CollectPEBSRecord": "1",
40 "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
41 "EventCode": "0xE7",
42 "Counter": "0,1,2,3",
43 "UMask": "0x1",
44 "PEBScounters": "0,1,2,3",
45 "EventName": "MS_DECODED.MS_ENTRY",
46 "PDIR_COUNTER": "na",
47 "SampleAfterValue": "200003",
48 "BriefDescription": "MS decode starts"
49 },
50 {
51 "CollectPEBSRecord": "1",
52 "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
53 "EventCode": "0xE9",
54 "Counter": "0,1,2,3",
55 "UMask": "0x1",
56 "PEBScounters": "0,1,2,3",
57 "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
58 "PDIR_COUNTER": "na",
59 "SampleAfterValue": "200003",
60 "BriefDescription": "Decode restrictions due to predicting wrong instruction length"
61 }
62] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
new file mode 100644
index 000000000000..91e0815f3ffb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
@@ -0,0 +1,38 @@
1[
2 {
3 "PEBS": "2",
4 "CollectPEBSRecord": "2",
5 "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
6 "EventCode": "0x13",
7 "Counter": "0,1,2,3",
8 "UMask": "0x2",
9 "PEBScounters": "0,1,2,3",
10 "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
11 "SampleAfterValue": "200003",
12 "BriefDescription": "Load uops that split a page (Precise event capable)"
13 },
14 {
15 "PEBS": "2",
16 "CollectPEBSRecord": "2",
17 "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
18 "EventCode": "0x13",
19 "Counter": "0,1,2,3",
20 "UMask": "0x4",
21 "PEBScounters": "0,1,2,3",
22 "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
23 "SampleAfterValue": "200003",
24 "BriefDescription": "Store uops that split a page (Precise event capable)"
25 },
26 {
27 "CollectPEBSRecord": "1",
28 "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
29 "EventCode": "0xC3",
30 "Counter": "0,1,2,3",
31 "UMask": "0x2",
32 "PEBScounters": "0,1,2,3",
33 "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
34 "PDIR_COUNTER": "na",
35 "SampleAfterValue": "20003",
36 "BriefDescription": "Machine clears due to memory ordering issue"
37 }
38] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
new file mode 100644
index 000000000000..b860374418ab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
@@ -0,0 +1,98 @@
1[
2 {
3 "CollectPEBSRecord": "1",
4 "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
5 "EventCode": "0x86",
6 "Counter": "0,1,2,3",
7 "UMask": "0x0",
8 "PEBScounters": "0,1,2,3",
9 "EventName": "FETCH_STALL.ALL",
10 "PDIR_COUNTER": "na",
11 "SampleAfterValue": "200003",
12 "BriefDescription": "Cycles code-fetch stalled due to any reason."
13 },
14 {
15 "CollectPEBSRecord": "1",
16 "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
17 "EventCode": "0x86",
18 "Counter": "0,1,2,3",
19 "UMask": "0x1",
20 "PEBScounters": "0,1,2,3",
21 "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
22 "PDIR_COUNTER": "na",
23 "SampleAfterValue": "200003",
24 "BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding."
25 },
26 {
27 "CollectPEBSRecord": "1",
28 "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
29 "EventCode": "0xCA",
30 "Counter": "0,1,2,3",
31 "UMask": "0x0",
32 "PEBScounters": "0,1,2,3",
33 "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
34 "PDIR_COUNTER": "na",
35 "SampleAfterValue": "200003",
36 "BriefDescription": "Unfilled issue slots per cycle"
37 },
38 {
39 "CollectPEBSRecord": "1",
40 "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
41 "EventCode": "0xCA",
42 "Counter": "0,1,2,3",
43 "UMask": "0x1",
44 "PEBScounters": "0,1,2,3",
45 "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
46 "PDIR_COUNTER": "na",
47 "SampleAfterValue": "200003",
48 "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
49 },
50 {
51 "CollectPEBSRecord": "1",
52 "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
53 "EventCode": "0xCA",
54 "Counter": "0,1,2,3",
55 "UMask": "0x2",
56 "PEBScounters": "0,1,2,3",
57 "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
58 "PDIR_COUNTER": "na",
59 "SampleAfterValue": "200003",
60 "BriefDescription": "Unfilled issue slots per cycle to recover"
61 },
62 {
63 "CollectPEBSRecord": "2",
64 "PublicDescription": "Counts hardware interrupts received by the processor.",
65 "EventCode": "0xCB",
66 "Counter": "0,1,2,3",
67 "UMask": "0x1",
68 "PEBScounters": "0,1,2,3",
69 "EventName": "HW_INTERRUPTS.RECEIVED",
70 "PDIR_COUNTER": "na",
71 "SampleAfterValue": "203",
72 "BriefDescription": "Hardware interrupts received"
73 },
74 {
75 "CollectPEBSRecord": "2",
76 "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
77 "EventCode": "0xCB",
78 "Counter": "0,1,2,3",
79 "UMask": "0x2",
80 "PEBScounters": "0,1,2,3",
81 "EventName": "HW_INTERRUPTS.MASKED",
82 "PDIR_COUNTER": "na",
83 "SampleAfterValue": "200003",
84 "BriefDescription": "Cycles hardware interrupts are masked"
85 },
86 {
87 "CollectPEBSRecord": "2",
88 "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
89 "EventCode": "0xCB",
90 "Counter": "0,1,2,3",
91 "UMask": "0x4",
92 "PEBScounters": "0,1,2,3",
93 "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
94 "PDIR_COUNTER": "na",
95 "SampleAfterValue": "200003",
96 "BriefDescription": "Cycles pending interrupts are masked"
97 }
98] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
new file mode 100644
index 000000000000..ccf1aed69197
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
@@ -0,0 +1,544 @@
1[
2 {
3 "PEBS": "2",
4 "CollectPEBSRecord": "1",
5 "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
6 "EventCode": "0x00",
7 "Counter": "Fixed counter 0",
8 "UMask": "0x1",
9 "PEBScounters": "32",
10 "EventName": "INST_RETIRED.ANY",
11 "PDIR_COUNTER": "na",
12 "SampleAfterValue": "2000003",
13 "BriefDescription": "Instructions retired (Fixed event)"
14 },
15 {
16 "CollectPEBSRecord": "1",
17 "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
18 "EventCode": "0x00",
19 "Counter": "Fixed counter 1",
20 "UMask": "0x2",
21 "PEBScounters": "33",
22 "EventName": "CPU_CLK_UNHALTED.CORE",
23 "PDIR_COUNTER": "na",
24 "SampleAfterValue": "2000003",
25 "BriefDescription": "Core cycles when core is not halted (Fixed event)"
26 },
27 {
28 "CollectPEBSRecord": "1",
29 "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
30 "EventCode": "0x00",
31 "Counter": "Fixed counter 2",
32 "UMask": "0x3",
33 "PEBScounters": "34",
34 "EventName": "CPU_CLK_UNHALTED.REF_TSC",
35 "PDIR_COUNTER": "na",
36 "SampleAfterValue": "2000003",
37 "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
38 },
39 {
40 "PEBS": "2",
41 "CollectPEBSRecord": "2",
42 "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
43 "EventCode": "0x03",
44 "Counter": "0,1,2,3",
45 "UMask": "0x1",
46 "PEBScounters": "0,1,2,3",
47 "EventName": "LD_BLOCKS.DATA_UNKNOWN",
48 "SampleAfterValue": "200003",
49 "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
50 },
51 {
52 "PEBS": "2",
53 "CollectPEBSRecord": "2",
54 "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
55 "EventCode": "0x03",
56 "Counter": "0,1,2,3",
57 "UMask": "0x2",
58 "PEBScounters": "0,1,2,3",
59 "EventName": "LD_BLOCKS.STORE_FORWARD",
60 "SampleAfterValue": "200003",
61 "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
62 },
63 {
64 "PEBS": "2",
65 "CollectPEBSRecord": "2",
66 "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
67 "EventCode": "0x03",
68 "Counter": "0,1,2,3",
69 "UMask": "0x4",
70 "PEBScounters": "0,1,2,3",
71 "EventName": "LD_BLOCKS.4K_ALIAS",
72 "SampleAfterValue": "200003",
73 "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
74 },
75 {
76 "PEBS": "2",
77 "CollectPEBSRecord": "2",
78 "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
79 "EventCode": "0x03",
80 "Counter": "0,1,2,3",
81 "UMask": "0x8",
82 "PEBScounters": "0,1,2,3",
83 "EventName": "LD_BLOCKS.UTLB_MISS",
84 "SampleAfterValue": "200003",
85 "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
86 },
87 {
88 "PEBS": "2",
89 "CollectPEBSRecord": "2",
90 "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
91 "EventCode": "0x03",
92 "Counter": "0,1,2,3",
93 "UMask": "0x10",
94 "PEBScounters": "0,1,2,3",
95 "EventName": "LD_BLOCKS.ALL_BLOCK",
96 "SampleAfterValue": "200003",
97 "BriefDescription": "Loads blocked (Precise event capable)"
98 },
99 {
100 "CollectPEBSRecord": "1",
101 "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
102 "EventCode": "0x0E",
103 "Counter": "0,1,2,3",
104 "UMask": "0x0",
105 "PEBScounters": "0,1,2,3",
106 "EventName": "UOPS_ISSUED.ANY",
107 "PDIR_COUNTER": "na",
108 "SampleAfterValue": "200003",
109 "BriefDescription": "Uops issued to the back end per cycle"
110 },
111 {
112 "CollectPEBSRecord": "1",
113 "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
114 "EventCode": "0x3C",
115 "Counter": "0,1,2,3",
116 "UMask": "0x0",
117 "PEBScounters": "0,1,2,3",
118 "EventName": "CPU_CLK_UNHALTED.CORE_P",
119 "PDIR_COUNTER": "na",
120 "SampleAfterValue": "2000003",
121 "BriefDescription": "Core cycles when core is not halted"
122 },
123 {
124 "CollectPEBSRecord": "1",
125 "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
126 "EventCode": "0x3C",
127 "Counter": "0,1,2,3",
128 "UMask": "0x1",
129 "PEBScounters": "0,1,2,3",
130 "EventName": "CPU_CLK_UNHALTED.REF",
131 "PDIR_COUNTER": "na",
132 "SampleAfterValue": "2000003",
133 "BriefDescription": "Reference cycles when core is not halted"
134 },
135 {
136 "CollectPEBSRecord": "1",
137 "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
138 "EventCode": "0x9C",
139 "Counter": "0,1,2,3",
140 "UMask": "0x0",
141 "PEBScounters": "0,1,2,3",
142 "EventName": "UOPS_NOT_DELIVERED.ANY",
143 "PDIR_COUNTER": "na",
144 "SampleAfterValue": "200003",
145 "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
146 },
147 {
148 "PEBS": "2",
149 "CollectPEBSRecord": "1",
150 "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
151 "EventCode": "0xC0",
152 "Counter": "0,1,2,3",
153 "UMask": "0x0",
154 "PEBScounters": "0,1,2,3",
155 "EventName": "INST_RETIRED.ANY_P",
156 "SampleAfterValue": "2000003",
157 "BriefDescription": "Instructions retired (Precise event capable)"
158 },
159 {
160 "PEBS": "2",
161 "CollectPEBSRecord": "2",
162 "PublicDescription": "Counts INST_RETIRED.ANY using the Reduced Skid PEBS feature that reduces the shadow in which events aren't counted allowing for a more unbiased distribution of samples across instructions retired.",
163 "EventCode": "0xC0",
164 "Counter": "0,1,2,3",
165 "UMask": "0x0",
166 "EventName": "INST_RETIRED.PREC_DIST",
167 "SampleAfterValue": "2000003",
168 "BriefDescription": "Instructions retired - using Reduced Skid PEBS feature"
169 },
170 {
171 "PEBS": "2",
172 "CollectPEBSRecord": "2",
173 "PublicDescription": "Counts uops which retired.",
174 "EventCode": "0xC2",
175 "Counter": "0,1,2,3",
176 "UMask": "0x0",
177 "PEBScounters": "0,1,2,3",
178 "EventName": "UOPS_RETIRED.ANY",
179 "PDIR_COUNTER": "na",
180 "SampleAfterValue": "2000003",
181 "BriefDescription": "Uops retired (Precise event capable)"
182 },
183 {
184 "PEBS": "2",
185 "CollectPEBSRecord": "2",
186 "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
187 "EventCode": "0xC2",
188 "Counter": "0,1,2,3",
189 "UMask": "0x1",
190 "PEBScounters": "0,1,2,3",
191 "EventName": "UOPS_RETIRED.MS",
192 "PDIR_COUNTER": "na",
193 "SampleAfterValue": "2000003",
194 "BriefDescription": "MS uops retired (Precise event capable)"
195 },
196 {
197 "PEBS": "2",
198 "CollectPEBSRecord": "1",
199 "PublicDescription": "Counts the number of floating point divide uops retired.",
200 "EventCode": "0xC2",
201 "Counter": "0,1,2,3",
202 "UMask": "0x8",
203 "PEBScounters": "0,1,2,3",
204 "EventName": "UOPS_RETIRED.FPDIV",
205 "SampleAfterValue": "2000003",
206 "BriefDescription": "Floating point divide uops retired (Precise Event Capable)"
207 },
208 {
209 "PEBS": "2",
210 "CollectPEBSRecord": "1",
211 "PublicDescription": "Counts the number of integer divide uops retired.",
212 "EventCode": "0xC2",
213 "Counter": "0,1,2,3",
214 "UMask": "0x10",
215 "PEBScounters": "0,1,2,3",
216 "EventName": "UOPS_RETIRED.IDIV",
217 "SampleAfterValue": "2000003",
218 "BriefDescription": "Integer divide uops retired (Precise Event Capable)"
219 },
220 {
221 "CollectPEBSRecord": "1",
222 "PublicDescription": "Counts machine clears for any reason.",
223 "EventCode": "0xC3",
224 "Counter": "0,1,2,3",
225 "UMask": "0x0",
226 "PEBScounters": "0,1,2,3",
227 "EventName": "MACHINE_CLEARS.ALL",
228 "PDIR_COUNTER": "na",
229 "SampleAfterValue": "20003",
230 "BriefDescription": "All machine clears"
231 },
232 {
233 "CollectPEBSRecord": "1",
234 "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
235 "EventCode": "0xC3",
236 "Counter": "0,1,2,3",
237 "UMask": "0x1",
238 "PEBScounters": "0,1,2,3",
239 "EventName": "MACHINE_CLEARS.SMC",
240 "PDIR_COUNTER": "na",
241 "SampleAfterValue": "20003",
242 "BriefDescription": "Self-Modifying Code detected"
243 },
244 {
245 "CollectPEBSRecord": "1",
246 "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
247 "EventCode": "0xC3",
248 "Counter": "0,1,2,3",
249 "UMask": "0x4",
250 "PEBScounters": "0,1,2,3",
251 "EventName": "MACHINE_CLEARS.FP_ASSIST",
252 "PDIR_COUNTER": "na",
253 "SampleAfterValue": "20003",
254 "BriefDescription": "Machine clears due to FP assists"
255 },
256 {
257 "CollectPEBSRecord": "1",
258 "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
259 "EventCode": "0xC3",
260 "Counter": "0,1,2,3",
261 "UMask": "0x8",
262 "PEBScounters": "0,1,2,3",
263 "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
264 "PDIR_COUNTER": "na",
265 "SampleAfterValue": "20003",
266 "BriefDescription": "Machine clears due to memory disambiguation"
267 },
268 {
269 "CollectPEBSRecord": "1",
270 "PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation",
271 "EventCode": "0xC3",
272 "Counter": "0,1,2,3",
273 "UMask": "0x20",
274 "PEBScounters": "0,1,2,3",
275 "EventName": "MACHINE_CLEARS.PAGE_FAULT",
276 "PDIR_COUNTER": "na",
277 "SampleAfterValue": "20003",
278 "BriefDescription": "Machines clear due to a page fault"
279 },
280 {
281 "PEBS": "2",
282 "CollectPEBSRecord": "2",
283 "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
284 "EventCode": "0xC4",
285 "Counter": "0,1,2,3",
286 "UMask": "0x0",
287 "PEBScounters": "0,1,2,3",
288 "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
289 "SampleAfterValue": "200003",
290 "BriefDescription": "Retired branch instructions (Precise event capable)"
291 },
292 {
293 "PEBS": "2",
294 "CollectPEBSRecord": "2",
295 "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
296 "EventCode": "0xC4",
297 "Counter": "0,1,2,3",
298 "UMask": "0x7e",
299 "PEBScounters": "0,1,2,3",
300 "EventName": "BR_INST_RETIRED.JCC",
301 "SampleAfterValue": "200003",
302 "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
303 },
304 {
305 "PEBS": "2",
306 "CollectPEBSRecord": "2",
307 "PublicDescription": "Counts the number of taken branch instructions retired.",
308 "EventCode": "0xC4",
309 "Counter": "0,1,2,3",
310 "UMask": "0x80",
311 "PEBScounters": "0,1,2,3",
312 "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
313 "SampleAfterValue": "200003",
314 "BriefDescription": "Retired taken branch instructions (Precise event capable)"
315 },
316 {
317 "PEBS": "2",
318 "CollectPEBSRecord": "2",
319 "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
320 "EventCode": "0xC4",
321 "Counter": "0,1,2,3",
322 "UMask": "0xbf",
323 "PEBScounters": "0,1,2,3",
324 "EventName": "BR_INST_RETIRED.FAR_BRANCH",
325 "SampleAfterValue": "200003",
326 "BriefDescription": "Retired far branch instructions (Precise event capable)"
327 },
328 {
329 "PEBS": "2",
330 "CollectPEBSRecord": "2",
331 "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
332 "EventCode": "0xC4",
333 "Counter": "0,1,2,3",
334 "UMask": "0xeb",
335 "PEBScounters": "0,1,2,3",
336 "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
337 "SampleAfterValue": "200003",
338 "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
339 },
340 {
341 "PEBS": "2",
342 "CollectPEBSRecord": "2",
343 "PublicDescription": "Counts near return branch instructions retired.",
344 "EventCode": "0xC4",
345 "Counter": "0,1,2,3",
346 "UMask": "0xf7",
347 "PEBScounters": "0,1,2,3",
348 "EventName": "BR_INST_RETIRED.RETURN",
349 "SampleAfterValue": "200003",
350 "BriefDescription": "Retired near return instructions (Precise event capable)"
351 },
352 {
353 "PEBS": "2",
354 "CollectPEBSRecord": "2",
355 "PublicDescription": "Counts near CALL branch instructions retired.",
356 "EventCode": "0xC4",
357 "Counter": "0,1,2,3",
358 "UMask": "0xf9",
359 "PEBScounters": "0,1,2,3",
360 "EventName": "BR_INST_RETIRED.CALL",
361 "SampleAfterValue": "200003",
362 "BriefDescription": "Retired near call instructions (Precise event capable)"
363 },
364 {
365 "PEBS": "2",
366 "CollectPEBSRecord": "2",
367 "PublicDescription": "Counts near indirect CALL branch instructions retired.",
368 "EventCode": "0xC4",
369 "Counter": "0,1,2,3",
370 "UMask": "0xfb",
371 "PEBScounters": "0,1,2,3",
372 "EventName": "BR_INST_RETIRED.IND_CALL",
373 "SampleAfterValue": "200003",
374 "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
375 },
376 {
377 "PEBS": "2",
378 "CollectPEBSRecord": "2",
379 "PublicDescription": "Counts near relative CALL branch instructions retired.",
380 "EventCode": "0xC4",
381 "Counter": "0,1,2,3",
382 "UMask": "0xfd",
383 "PEBScounters": "0,1,2,3",
384 "EventName": "BR_INST_RETIRED.REL_CALL",
385 "SampleAfterValue": "200003",
386 "BriefDescription": "Retired near relative call instructions (Precise event capable)"
387 },
388 {
389 "PEBS": "2",
390 "CollectPEBSRecord": "2",
391 "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
392 "EventCode": "0xC4",
393 "Counter": "0,1,2,3",
394 "UMask": "0xfe",
395 "PEBScounters": "0,1,2,3",
396 "EventName": "BR_INST_RETIRED.TAKEN_JCC",
397 "SampleAfterValue": "200003",
398 "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
399 },
400 {
401 "PEBS": "2",
402 "CollectPEBSRecord": "2",
403 "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
404 "EventCode": "0xC5",
405 "Counter": "0,1,2,3",
406 "UMask": "0x0",
407 "PEBScounters": "0,1,2,3",
408 "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
409 "SampleAfterValue": "200003",
410 "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
411 },
412 {
413 "PEBS": "2",
414 "CollectPEBSRecord": "2",
415 "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
416 "EventCode": "0xC5",
417 "Counter": "0,1,2,3",
418 "UMask": "0x7e",
419 "PEBScounters": "0,1,2,3",
420 "EventName": "BR_MISP_RETIRED.JCC",
421 "SampleAfterValue": "200003",
422 "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
423 },
424 {
425 "PEBS": "2",
426 "CollectPEBSRecord": "2",
427 "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
428 "EventCode": "0xC5",
429 "Counter": "0,1,2,3",
430 "UMask": "0xeb",
431 "PEBScounters": "0,1,2,3",
432 "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
433 "SampleAfterValue": "200003",
434 "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)"
435 },
436 {
437 "PEBS": "2",
438 "CollectPEBSRecord": "2",
439 "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
440 "EventCode": "0xC5",
441 "Counter": "0,1,2,3",
442 "UMask": "0xf7",
443 "PEBScounters": "0,1,2,3",
444 "EventName": "BR_MISP_RETIRED.RETURN",
445 "SampleAfterValue": "200003",
446 "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
447 },
448 {
449 "PEBS": "2",
450 "CollectPEBSRecord": "2",
451 "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
452 "EventCode": "0xC5",
453 "Counter": "0,1,2,3",
454 "UMask": "0xfb",
455 "PEBScounters": "0,1,2,3",
456 "EventName": "BR_MISP_RETIRED.IND_CALL",
457 "SampleAfterValue": "200003",
458 "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
459 },
460 {
461 "PEBS": "2",
462 "CollectPEBSRecord": "2",
463 "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
464 "EventCode": "0xC5",
465 "Counter": "0,1,2,3",
466 "UMask": "0xfe",
467 "PEBScounters": "0,1,2,3",
468 "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
469 "SampleAfterValue": "200003",
470 "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
471 },
472 {
473 "CollectPEBSRecord": "1",
474 "PublicDescription": "Counts core cycles if either divide unit is busy.",
475 "EventCode": "0xCD",
476 "Counter": "0,1,2,3",
477 "UMask": "0x0",
478 "PEBScounters": "0,1,2,3",
479 "EventName": "CYCLES_DIV_BUSY.ALL",
480 "PDIR_COUNTER": "na",
481 "SampleAfterValue": "2000003",
482 "BriefDescription": "Cycles a divider is busy"
483 },
484 {
485 "CollectPEBSRecord": "1",
486 "PublicDescription": "Counts core cycles the integer divide unit is busy.",
487 "EventCode": "0xCD",
488 "Counter": "0,1,2,3",
489 "UMask": "0x1",
490 "PEBScounters": "0,1,2,3",
491 "EventName": "CYCLES_DIV_BUSY.IDIV",
492 "PDIR_COUNTER": "na",
493 "SampleAfterValue": "200003",
494 "BriefDescription": "Cycles the integer divide unit is busy"
495 },
496 {
497 "CollectPEBSRecord": "1",
498 "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
499 "EventCode": "0xCD",
500 "Counter": "0,1,2,3",
501 "UMask": "0x2",
502 "PEBScounters": "0,1,2,3",
503 "EventName": "CYCLES_DIV_BUSY.FPDIV",
504 "PDIR_COUNTER": "na",
505 "SampleAfterValue": "200003",
506 "BriefDescription": "Cycles the FP divide unit is busy"
507 },
508 {
509 "CollectPEBSRecord": "1",
510 "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
511 "EventCode": "0xE6",
512 "Counter": "0,1,2,3",
513 "UMask": "0x1",
514 "PEBScounters": "0,1,2,3",
515 "EventName": "BACLEARS.ALL",
516 "PDIR_COUNTER": "na",
517 "SampleAfterValue": "200003",
518 "BriefDescription": "BACLEARs asserted for any branch type"
519 },
520 {
521 "CollectPEBSRecord": "1",
522 "PublicDescription": "Counts BACLEARS on return instructions.",
523 "EventCode": "0xE6",
524 "Counter": "0,1,2,3",
525 "UMask": "0x8",
526 "PEBScounters": "0,1,2,3",
527 "EventName": "BACLEARS.RETURN",
528 "PDIR_COUNTER": "na",
529 "SampleAfterValue": "200003",
530 "BriefDescription": "BACLEARs asserted for return branch"
531 },
532 {
533 "CollectPEBSRecord": "1",
534 "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
535 "EventCode": "0xE6",
536 "Counter": "0,1,2,3",
537 "UMask": "0x10",
538 "PEBScounters": "0,1,2,3",
539 "EventName": "BACLEARS.COND",
540 "PDIR_COUNTER": "na",
541 "SampleAfterValue": "200003",
542 "BriefDescription": "BACLEARs asserted for conditional branch"
543 }
544] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
new file mode 100644
index 000000000000..0b53a3b0dfb8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
@@ -0,0 +1,218 @@
1[
2 {
3 "CollectPEBSRecord": "1",
4 "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.",
5 "EventCode": "0x08",
6 "Counter": "0,1,2,3",
7 "UMask": "0x2",
8 "PEBScounters": "0,1,2,3",
9 "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
10 "PDIR_COUNTER": "na",
11 "SampleAfterValue": "200003",
12 "BriefDescription": "Page walk completed due to a demand load to a 4K page"
13 },
14 {
15 "CollectPEBSRecord": "1",
16 "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
17 "EventCode": "0x08",
18 "Counter": "0,1,2,3",
19 "UMask": "0x4",
20 "PEBScounters": "0,1,2,3",
21 "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
22 "PDIR_COUNTER": "na",
23 "SampleAfterValue": "200003",
24 "BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page"
25 },
26 {
27 "CollectPEBSRecord": "1",
28 "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.",
29 "EventCode": "0x08",
30 "Counter": "0,1,2,3",
31 "UMask": "0x8",
32 "PEBScounters": "0,1,2,3",
33 "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB",
34 "PDIR_COUNTER": "na",
35 "SampleAfterValue": "200003",
36 "BriefDescription": "Page walk completed due to a demand load to a 1GB page"
37 },
38 {
39 "CollectPEBSRecord": "1",
40 "PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
41 "EventCode": "0x08",
42 "Counter": "0,1,2,3",
43 "UMask": "0x10",
44 "PEBScounters": "0,1,2,3",
45 "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
46 "PDIR_COUNTER": "na",
47 "SampleAfterValue": "200003",
48 "BriefDescription": "Page walks outstanding due to a demand load every cycle."
49 },
50 {
51 "CollectPEBSRecord": "1",
52 "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
53 "EventCode": "0x49",
54 "Counter": "0,1,2,3",
55 "UMask": "0x2",
56 "PEBScounters": "0,1,2,3",
57 "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
58 "PDIR_COUNTER": "na",
59 "SampleAfterValue": "2000003",
60 "BriefDescription": "Page walk completed due to a demand data store to a 4K page"
61 },
62 {
63 "CollectPEBSRecord": "1",
64 "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
65 "EventCode": "0x49",
66 "Counter": "0,1,2,3",
67 "UMask": "0x4",
68 "PEBScounters": "0,1,2,3",
69 "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
70 "PDIR_COUNTER": "na",
71 "SampleAfterValue": "2000003",
72 "BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page"
73 },
74 {
75 "CollectPEBSRecord": "1",
76 "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
77 "EventCode": "0x49",
78 "Counter": "0,1,2,3",
79 "UMask": "0x8",
80 "PEBScounters": "0,1,2,3",
81 "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB",
82 "PDIR_COUNTER": "na",
83 "SampleAfterValue": "2000003",
84 "BriefDescription": "Page walk completed due to a demand data store to a 1GB page"
85 },
86 {
87 "CollectPEBSRecord": "1",
88 "PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
89 "EventCode": "0x49",
90 "Counter": "0,1,2,3",
91 "UMask": "0x10",
92 "PEBScounters": "0,1,2,3",
93 "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
94 "PDIR_COUNTER": "na",
95 "SampleAfterValue": "200003",
96 "BriefDescription": "Page walks outstanding due to a demand data store every cycle."
97 },
98 {
99 "CollectPEBSRecord": "1",
100 "PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.",
101 "EventCode": "0x4F",
102 "Counter": "0,1,2,3",
103 "UMask": "0x10",
104 "PEBScounters": "0,1,2,3",
105 "EventName": "EPT.WALK_PENDING",
106 "PDIR_COUNTER": "na",
107 "SampleAfterValue": "200003",
108 "BriefDescription": "Page walks outstanding due to walking the EPT every cycle"
109 },
110 {
111 "CollectPEBSRecord": "1",
112 "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
113 "EventCode": "0x81",
114 "Counter": "0,1,2,3",
115 "UMask": "0x4",
116 "PEBScounters": "0,1,2,3",
117 "EventName": "ITLB.MISS",
118 "PDIR_COUNTER": "na",
119 "SampleAfterValue": "200003",
120 "BriefDescription": "ITLB misses"
121 },
122 {
123 "CollectPEBSRecord": "1",
124 "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
125 "EventCode": "0x85",
126 "Counter": "0,1,2,3",
127 "UMask": "0x2",
128 "PEBScounters": "0,1,2,3",
129 "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
130 "PDIR_COUNTER": "na",
131 "SampleAfterValue": "2000003",
132 "BriefDescription": "Page walk completed due to an instruction fetch in a 4K page"
133 },
134 {
135 "CollectPEBSRecord": "1",
136 "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
137 "EventCode": "0x85",
138 "Counter": "0,1,2,3",
139 "UMask": "0x4",
140 "PEBScounters": "0,1,2,3",
141 "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
142 "PDIR_COUNTER": "na",
143 "SampleAfterValue": "2000003",
144 "BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page"
145 },
146 {
147 "CollectPEBSRecord": "1",
148 "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
149 "EventCode": "0x85",
150 "Counter": "0,1,2,3",
151 "UMask": "0x8",
152 "PEBScounters": "0,1,2,3",
153 "EventName": "ITLB_MISSES.WALK_COMPLETED_1GB",
154 "PDIR_COUNTER": "na",
155 "SampleAfterValue": "2000003",
156 "BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page"
157 },
158 {
159 "CollectPEBSRecord": "1",
160 "PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
161 "EventCode": "0x85",
162 "Counter": "0,1,2,3",
163 "UMask": "0x10",
164 "PEBScounters": "0,1,2,3",
165 "EventName": "ITLB_MISSES.WALK_PENDING",
166 "PDIR_COUNTER": "na",
167 "SampleAfterValue": "200003",
168 "BriefDescription": "Page walks outstanding due to an instruction fetch every cycle."
169 },
170 {
171 "CollectPEBSRecord": "1",
172 "PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.",
173 "EventCode": "0xBD",
174 "Counter": "0,1,2,3",
175 "UMask": "0x20",
176 "PEBScounters": "0,1,2,3",
177 "EventName": "TLB_FLUSHES.STLB_ANY",
178 "PDIR_COUNTER": "na",
179 "SampleAfterValue": "20003",
180 "BriefDescription": "STLB flushes"
181 },
182 {
183 "PEBS": "2",
184 "CollectPEBSRecord": "2",
185 "PublicDescription": "Counts load uops retired that caused a DTLB miss.",
186 "EventCode": "0xD0",
187 "Counter": "0,1,2,3",
188 "UMask": "0x11",
189 "PEBScounters": "0,1,2,3",
190 "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
191 "SampleAfterValue": "200003",
192 "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
193 },
194 {
195 "PEBS": "2",
196 "CollectPEBSRecord": "2",
197 "PublicDescription": "Counts store uops retired that caused a DTLB miss.",
198 "EventCode": "0xD0",
199 "Counter": "0,1,2,3",
200 "UMask": "0x12",
201 "PEBScounters": "0,1,2,3",
202 "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
203 "SampleAfterValue": "200003",
204 "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
205 },
206 {
207 "PEBS": "2",
208 "CollectPEBSRecord": "2",
209 "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
210 "EventCode": "0xD0",
211 "Counter": "0,1,2,3",
212 "UMask": "0x13",
213 "PEBScounters": "0,1,2,3",
214 "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
215 "SampleAfterValue": "200003",
216 "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
217 }
218] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
new file mode 100644
index 000000000000..5ab5c78fe580
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -0,0 +1,158 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
100 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
101 "MetricGroup": "Power",
102 "MetricName": "Turbo_Utilization"
103 },
104 {
105 "BriefDescription": "Fraction of cycles where both hardware threads were active",
106 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
107 "MetricGroup": "SMT;Summary",
108 "MetricName": "SMT_2T_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles spent in Kernel mode",
112 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
113 "MetricGroup": "Summary",
114 "MetricName": "Kernel_Utilization"
115 },
116 {
117 "BriefDescription": "C3 residency percent per core",
118 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
119 "MetricGroup": "Power",
120 "MetricName": "C3_Core_Residency"
121 },
122 {
123 "BriefDescription": "C6 residency percent per core",
124 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C6_Core_Residency"
127 },
128 {
129 "BriefDescription": "C7 residency percent per core",
130 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C7_Core_Residency"
133 },
134 {
135 "BriefDescription": "C2 residency percent per package",
136 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C2_Pkg_Residency"
139 },
140 {
141 "BriefDescription": "C3 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C3_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C6 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C6_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C7 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C7_Pkg_Residency"
157 }
158]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
new file mode 100644
index 000000000000..5ab5c78fe580
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -0,0 +1,158 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
100 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
101 "MetricGroup": "Power",
102 "MetricName": "Turbo_Utilization"
103 },
104 {
105 "BriefDescription": "Fraction of cycles where both hardware threads were active",
106 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
107 "MetricGroup": "SMT;Summary",
108 "MetricName": "SMT_2T_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles spent in Kernel mode",
112 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
113 "MetricGroup": "Summary",
114 "MetricName": "Kernel_Utilization"
115 },
116 {
117 "BriefDescription": "C3 residency percent per core",
118 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
119 "MetricGroup": "Power",
120 "MetricName": "C3_Core_Residency"
121 },
122 {
123 "BriefDescription": "C6 residency percent per core",
124 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C6_Core_Residency"
127 },
128 {
129 "BriefDescription": "C7 residency percent per core",
130 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C7_Core_Residency"
133 },
134 {
135 "BriefDescription": "C2 residency percent per package",
136 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C2_Pkg_Residency"
139 },
140 {
141 "BriefDescription": "C3 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C3_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C6 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C6_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C7 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C7_Pkg_Residency"
157 }
158]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
new file mode 100644
index 000000000000..7c2679514efb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -0,0 +1,164 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Giga Floating Point Operations Per Second",
100 "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
101 "MetricGroup": "FLOPS;Summary",
102 "MetricName": "GFLOPs"
103 },
104 {
105 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
106 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
107 "MetricGroup": "Power",
108 "MetricName": "Turbo_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles where both hardware threads were active",
112 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
113 "MetricGroup": "SMT;Summary",
114 "MetricName": "SMT_2T_Utilization"
115 },
116 {
117 "BriefDescription": "Fraction of cycles spent in Kernel mode",
118 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
119 "MetricGroup": "Summary",
120 "MetricName": "Kernel_Utilization"
121 },
122 {
123 "BriefDescription": "C3 residency percent per core",
124 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Core_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per core",
130 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Core_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per core",
136 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Core_Residency"
139 },
140 {
141 "BriefDescription": "C2 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C2_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C3 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C3_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C6 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C6_Pkg_Residency"
157 },
158 {
159 "BriefDescription": "C7 residency percent per package",
160 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
161 "MetricGroup": "Power",
162 "MetricName": "C7_Pkg_Residency"
163 }
164]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
new file mode 100644
index 000000000000..7c2679514efb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -0,0 +1,164 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Giga Floating Point Operations Per Second",
100 "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
101 "MetricGroup": "FLOPS;Summary",
102 "MetricName": "GFLOPs"
103 },
104 {
105 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
106 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
107 "MetricGroup": "Power",
108 "MetricName": "Turbo_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles where both hardware threads were active",
112 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
113 "MetricGroup": "SMT;Summary",
114 "MetricName": "SMT_2T_Utilization"
115 },
116 {
117 "BriefDescription": "Fraction of cycles spent in Kernel mode",
118 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
119 "MetricGroup": "Summary",
120 "MetricName": "Kernel_Utilization"
121 },
122 {
123 "BriefDescription": "C3 residency percent per core",
124 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Core_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per core",
130 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Core_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per core",
136 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Core_Residency"
139 },
140 {
141 "BriefDescription": "C2 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C2_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C3 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C3_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C6 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C6_Pkg_Residency"
157 },
158 {
159 "BriefDescription": "C7 residency percent per package",
160 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
161 "MetricGroup": "Power",
162 "MetricName": "C7_Pkg_Residency"
163 }
164]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
new file mode 100644
index 000000000000..fd7d7c438226
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -0,0 +1,140 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
64 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
65 "MetricGroup": "SMT",
66 "MetricName": "CORE_CLKS"
67 },
68 {
69 "BriefDescription": "Average CPU Utilization",
70 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
71 "MetricGroup": "Summary",
72 "MetricName": "CPU_Utilization"
73 },
74 {
75 "BriefDescription": "Giga Floating Point Operations Per Second",
76 "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
77 "MetricGroup": "FLOPS;Summary",
78 "MetricName": "GFLOPs"
79 },
80 {
81 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
82 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
83 "MetricGroup": "Power",
84 "MetricName": "Turbo_Utilization"
85 },
86 {
87 "BriefDescription": "Fraction of cycles where both hardware threads were active",
88 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
89 "MetricGroup": "SMT;Summary",
90 "MetricName": "SMT_2T_Utilization"
91 },
92 {
93 "BriefDescription": "Fraction of cycles spent in Kernel mode",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
95 "MetricGroup": "Summary",
96 "MetricName": "Kernel_Utilization"
97 },
98 {
99 "BriefDescription": "C3 residency percent per core",
100 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
101 "MetricGroup": "Power",
102 "MetricName": "C3_Core_Residency"
103 },
104 {
105 "BriefDescription": "C6 residency percent per core",
106 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
107 "MetricGroup": "Power",
108 "MetricName": "C6_Core_Residency"
109 },
110 {
111 "BriefDescription": "C7 residency percent per core",
112 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
113 "MetricGroup": "Power",
114 "MetricName": "C7_Core_Residency"
115 },
116 {
117 "BriefDescription": "C2 residency percent per package",
118 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
119 "MetricGroup": "Power",
120 "MetricName": "C2_Pkg_Residency"
121 },
122 {
123 "BriefDescription": "C3 residency percent per package",
124 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Pkg_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per package",
130 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Pkg_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per package",
136 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Pkg_Residency"
139 }
140]
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 4ea068366c3e..fe1a2c47cabf 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -9,6 +9,7 @@ GenuineIntel-6-27,v4,bonnell,core
9GenuineIntel-6-36,v4,bonnell,core 9GenuineIntel-6-36,v4,bonnell,core
10GenuineIntel-6-35,v4,bonnell,core 10GenuineIntel-6-35,v4,bonnell,core
11GenuineIntel-6-5C,v8,goldmont,core 11GenuineIntel-6-5C,v8,goldmont,core
12GenuineIntel-6-7A,v1,goldmontplus,core
12GenuineIntel-6-3C,v24,haswell,core 13GenuineIntel-6-3C,v24,haswell,core
13GenuineIntel-6-45,v24,haswell,core 14GenuineIntel-6-45,v24,haswell,core
14GenuineIntel-6-46,v24,haswell,core 15GenuineIntel-6-46,v24,haswell,core
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
new file mode 100644
index 000000000000..fd7d7c438226
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -0,0 +1,140 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
64 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
65 "MetricGroup": "SMT",
66 "MetricName": "CORE_CLKS"
67 },
68 {
69 "BriefDescription": "Average CPU Utilization",
70 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
71 "MetricGroup": "Summary",
72 "MetricName": "CPU_Utilization"
73 },
74 {
75 "BriefDescription": "Giga Floating Point Operations Per Second",
76 "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
77 "MetricGroup": "FLOPS;Summary",
78 "MetricName": "GFLOPs"
79 },
80 {
81 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
82 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
83 "MetricGroup": "Power",
84 "MetricName": "Turbo_Utilization"
85 },
86 {
87 "BriefDescription": "Fraction of cycles where both hardware threads were active",
88 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
89 "MetricGroup": "SMT;Summary",
90 "MetricName": "SMT_2T_Utilization"
91 },
92 {
93 "BriefDescription": "Fraction of cycles spent in Kernel mode",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
95 "MetricGroup": "Summary",
96 "MetricName": "Kernel_Utilization"
97 },
98 {
99 "BriefDescription": "C3 residency percent per core",
100 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
101 "MetricGroup": "Power",
102 "MetricName": "C3_Core_Residency"
103 },
104 {
105 "BriefDescription": "C6 residency percent per core",
106 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
107 "MetricGroup": "Power",
108 "MetricName": "C6_Core_Residency"
109 },
110 {
111 "BriefDescription": "C7 residency percent per core",
112 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
113 "MetricGroup": "Power",
114 "MetricName": "C7_Core_Residency"
115 },
116 {
117 "BriefDescription": "C2 residency percent per package",
118 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
119 "MetricGroup": "Power",
120 "MetricName": "C2_Pkg_Residency"
121 },
122 {
123 "BriefDescription": "C3 residency percent per package",
124 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Pkg_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per package",
130 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Pkg_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per package",
136 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Pkg_Residency"
139 }
140]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
new file mode 100644
index 000000000000..36c903faed0b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -0,0 +1,164 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Giga Floating Point Operations Per Second",
100 "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
101 "MetricGroup": "FLOPS;Summary",
102 "MetricName": "GFLOPs"
103 },
104 {
105 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
106 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
107 "MetricGroup": "Power",
108 "MetricName": "Turbo_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles where both hardware threads were active",
112 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
113 "MetricGroup": "SMT;Summary",
114 "MetricName": "SMT_2T_Utilization"
115 },
116 {
117 "BriefDescription": "Fraction of cycles spent in Kernel mode",
118 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
119 "MetricGroup": "Summary",
120 "MetricName": "Kernel_Utilization"
121 },
122 {
123 "BriefDescription": "C3 residency percent per core",
124 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Core_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per core",
130 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Core_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per core",
136 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Core_Residency"
139 },
140 {
141 "BriefDescription": "C2 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C2_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C3 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C3_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C6 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C6_Pkg_Residency"
157 },
158 {
159 "BriefDescription": "C7 residency percent per package",
160 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
161 "MetricGroup": "Power",
162 "MetricName": "C7_Pkg_Residency"
163 }
164]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
new file mode 100644
index 000000000000..36c903faed0b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -0,0 +1,164 @@
1[
2 {
3 "BriefDescription": "Instructions Per Cycle (per logical thread)",
4 "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
5 "MetricGroup": "TopDownL1",
6 "MetricName": "IPC"
7 },
8 {
9 "BriefDescription": "Uops Per Instruction",
10 "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
11 "MetricGroup": "Pipeline",
12 "MetricName": "UPI"
13 },
14 {
15 "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
16 "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
17 "MetricGroup": "Frontend",
18 "MetricName": "IFetch_Line_Utilization"
19 },
20 {
21 "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
22 "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
23 "MetricGroup": "DSB; Frontend_Bandwidth",
24 "MetricName": "DSB_Coverage"
25 },
26 {
27 "BriefDescription": "Cycles Per Instruction (threaded)",
28 "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
29 "MetricGroup": "Pipeline;Summary",
30 "MetricName": "CPI"
31 },
32 {
33 "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
34 "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
35 "MetricGroup": "Summary",
36 "MetricName": "CLKS"
37 },
38 {
39 "BriefDescription": "Total issue-pipeline slots",
40 "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
41 "MetricGroup": "TopDownL1",
42 "MetricName": "SLOTS"
43 },
44 {
45 "BriefDescription": "Total number of retired Instructions",
46 "MetricExpr": "INST_RETIRED.ANY",
47 "MetricGroup": "Summary",
48 "MetricName": "Instructions"
49 },
50 {
51 "BriefDescription": "Instructions Per Cycle (per physical core)",
52 "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
53 "MetricGroup": "SMT",
54 "MetricName": "CoreIPC"
55 },
56 {
57 "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
58 "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
59 "MetricGroup": "Pipeline;Ports_Utilization",
60 "MetricName": "ILP"
61 },
62 {
63 "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
64 "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
65 "MetricGroup": "Unknown_Branches",
66 "MetricName": "BAClear_Cost"
67 },
68 {
69 "BriefDescription": "Core actual clocks when any thread is active on the physical core",
70 "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
71 "MetricGroup": "SMT",
72 "MetricName": "CORE_CLKS"
73 },
74 {
75 "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
76 "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
77 "MetricGroup": "Memory_Bound;Memory_Lat",
78 "MetricName": "Load_Miss_Real_Latency"
79 },
80 {
81 "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
82 "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
83 "MetricGroup": "Memory_Bound;Memory_BW",
84 "MetricName": "MLP"
85 },
86 {
87 "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
88 "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
89 "MetricGroup": "TLB",
90 "MetricName": "Page_Walks_Utilization"
91 },
92 {
93 "BriefDescription": "Average CPU Utilization",
94 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
95 "MetricGroup": "Summary",
96 "MetricName": "CPU_Utilization"
97 },
98 {
99 "BriefDescription": "Giga Floating Point Operations Per Second",
100 "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
101 "MetricGroup": "FLOPS;Summary",
102 "MetricName": "GFLOPs"
103 },
104 {
105 "BriefDescription": "Average Frequency Utilization relative nominal frequency",
106 "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
107 "MetricGroup": "Power",
108 "MetricName": "Turbo_Utilization"
109 },
110 {
111 "BriefDescription": "Fraction of cycles where both hardware threads were active",
112 "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
113 "MetricGroup": "SMT;Summary",
114 "MetricName": "SMT_2T_Utilization"
115 },
116 {
117 "BriefDescription": "Fraction of cycles spent in Kernel mode",
118 "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
119 "MetricGroup": "Summary",
120 "MetricName": "Kernel_Utilization"
121 },
122 {
123 "BriefDescription": "C3 residency percent per core",
124 "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
125 "MetricGroup": "Power",
126 "MetricName": "C3_Core_Residency"
127 },
128 {
129 "BriefDescription": "C6 residency percent per core",
130 "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
131 "MetricGroup": "Power",
132 "MetricName": "C6_Core_Residency"
133 },
134 {
135 "BriefDescription": "C7 residency percent per core",
136 "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
137 "MetricGroup": "Power",
138 "MetricName": "C7_Core_Residency"
139 },
140 {
141 "BriefDescription": "C2 residency percent per package",
142 "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
143 "MetricGroup": "Power",
144 "MetricName": "C2_Pkg_Residency"
145 },
146 {
147 "BriefDescription": "C3 residency percent per package",
148 "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
149 "MetricGroup": "Power",
150 "MetricName": "C3_Pkg_Residency"
151 },
152 {
153 "BriefDescription": "C6 residency percent per package",
154 "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
155 "MetricGroup": "Power",
156 "MetricName": "C6_Pkg_Residency"
157 },
158 {
159 "BriefDescription": "C7 residency percent per package",
160 "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
161 "MetricGroup": "Power",
162 "MetricName": "C7_Pkg_Residency"
163 }
164]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index d51dc9ca8861..9eb7047bafe4 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -292,7 +292,7 @@ static int print_events_table_entry(void *data, char *name, char *event,
292 char *desc, char *long_desc, 292 char *desc, char *long_desc,
293 char *pmu, char *unit, char *perpkg, 293 char *pmu, char *unit, char *perpkg,
294 char *metric_expr, 294 char *metric_expr,
295 char *metric_name) 295 char *metric_name, char *metric_group)
296{ 296{
297 struct perf_entry_data *pd = data; 297 struct perf_entry_data *pd = data;
298 FILE *outfp = pd->outfp; 298 FILE *outfp = pd->outfp;
@@ -304,8 +304,10 @@ static int print_events_table_entry(void *data, char *name, char *event,
304 */ 304 */
305 fprintf(outfp, "{\n"); 305 fprintf(outfp, "{\n");
306 306
307 fprintf(outfp, "\t.name = \"%s\",\n", name); 307 if (name)
308 fprintf(outfp, "\t.event = \"%s\",\n", event); 308 fprintf(outfp, "\t.name = \"%s\",\n", name);
309 if (event)
310 fprintf(outfp, "\t.event = \"%s\",\n", event);
309 fprintf(outfp, "\t.desc = \"%s\",\n", desc); 311 fprintf(outfp, "\t.desc = \"%s\",\n", desc);
310 fprintf(outfp, "\t.topic = \"%s\",\n", topic); 312 fprintf(outfp, "\t.topic = \"%s\",\n", topic);
311 if (long_desc && long_desc[0]) 313 if (long_desc && long_desc[0])
@@ -320,6 +322,8 @@ static int print_events_table_entry(void *data, char *name, char *event,
320 fprintf(outfp, "\t.metric_expr = \"%s\",\n", metric_expr); 322 fprintf(outfp, "\t.metric_expr = \"%s\",\n", metric_expr);
321 if (metric_name) 323 if (metric_name)
322 fprintf(outfp, "\t.metric_name = \"%s\",\n", metric_name); 324 fprintf(outfp, "\t.metric_name = \"%s\",\n", metric_name);
325 if (metric_group)
326 fprintf(outfp, "\t.metric_group = \"%s\",\n", metric_group);
323 fprintf(outfp, "},\n"); 327 fprintf(outfp, "},\n");
324 328
325 return 0; 329 return 0;
@@ -357,6 +361,9 @@ static char *real_event(const char *name, char *event)
357{ 361{
358 int i; 362 int i;
359 363
364 if (!name)
365 return NULL;
366
360 for (i = 0; fixed[i].name; i++) 367 for (i = 0; fixed[i].name; i++)
361 if (!strcasecmp(name, fixed[i].name)) 368 if (!strcasecmp(name, fixed[i].name))
362 return (char *)fixed[i].event; 369 return (char *)fixed[i].event;
@@ -369,7 +376,7 @@ int json_events(const char *fn,
369 char *long_desc, 376 char *long_desc,
370 char *pmu, char *unit, char *perpkg, 377 char *pmu, char *unit, char *perpkg,
371 char *metric_expr, 378 char *metric_expr,
372 char *metric_name), 379 char *metric_name, char *metric_group),
373 void *data) 380 void *data)
374{ 381{
375 int err = -EIO; 382 int err = -EIO;
@@ -397,6 +404,7 @@ int json_events(const char *fn,
397 char *unit = NULL; 404 char *unit = NULL;
398 char *metric_expr = NULL; 405 char *metric_expr = NULL;
399 char *metric_name = NULL; 406 char *metric_name = NULL;
407 char *metric_group = NULL;
400 unsigned long long eventcode = 0; 408 unsigned long long eventcode = 0;
401 struct msrmap *msr = NULL; 409 struct msrmap *msr = NULL;
402 jsmntok_t *msrval = NULL; 410 jsmntok_t *msrval = NULL;
@@ -476,6 +484,8 @@ int json_events(const char *fn,
476 addfield(map, &perpkg, "", "", val); 484 addfield(map, &perpkg, "", "", val);
477 } else if (json_streq(map, field, "MetricName")) { 485 } else if (json_streq(map, field, "MetricName")) {
478 addfield(map, &metric_name, "", "", val); 486 addfield(map, &metric_name, "", "", val);
487 } else if (json_streq(map, field, "MetricGroup")) {
488 addfield(map, &metric_group, "", "", val);
479 } else if (json_streq(map, field, "MetricExpr")) { 489 } else if (json_streq(map, field, "MetricExpr")) {
480 addfield(map, &metric_expr, "", "", val); 490 addfield(map, &metric_expr, "", "", val);
481 for (s = metric_expr; *s; s++) 491 for (s = metric_expr; *s; s++)
@@ -501,10 +511,11 @@ int json_events(const char *fn,
501 addfield(map, &event, ",", filter, NULL); 511 addfield(map, &event, ",", filter, NULL);
502 if (msr != NULL) 512 if (msr != NULL)
503 addfield(map, &event, ",", msr->pname, msrval); 513 addfield(map, &event, ",", msr->pname, msrval);
504 fixname(name); 514 if (name)
515 fixname(name);
505 516
506 err = func(data, name, real_event(name, event), desc, long_desc, 517 err = func(data, name, real_event(name, event), desc, long_desc,
507 pmu, unit, perpkg, metric_expr, metric_name); 518 pmu, unit, perpkg, metric_expr, metric_name, metric_group);
508 free(event); 519 free(event);
509 free(desc); 520 free(desc);
510 free(name); 521 free(name);
@@ -516,6 +527,7 @@ int json_events(const char *fn,
516 free(unit); 527 free(unit);
517 free(metric_expr); 528 free(metric_expr);
518 free(metric_name); 529 free(metric_name);
530 free(metric_group);
519 if (err) 531 if (err)
520 break; 532 break;
521 tok += j; 533 tok += j;
diff --git a/tools/perf/pmu-events/jevents.h b/tools/perf/pmu-events/jevents.h
index d87efd2685b8..4684c673c445 100644
--- a/tools/perf/pmu-events/jevents.h
+++ b/tools/perf/pmu-events/jevents.h
@@ -7,7 +7,7 @@ int json_events(const char *fn,
7 char *long_desc, 7 char *long_desc,
8 char *pmu, 8 char *pmu,
9 char *unit, char *perpkg, char *metric_expr, 9 char *unit, char *perpkg, char *metric_expr,
10 char *metric_name), 10 char *metric_name, char *metric_group),
11 void *data); 11 void *data);
12char *get_cpu_str(void); 12char *get_cpu_str(void);
13 13
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index e08789ddfe6c..92a4d15ee0b9 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -16,6 +16,7 @@ struct pmu_event {
16 const char *perpkg; 16 const char *perpkg;
17 const char *metric_expr; 17 const char *metric_expr;
18 const char *metric_name; 18 const char *metric_name;
19 const char *metric_group;
19}; 20};
20 21
21/* 22/*
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index c180bbcdbef6..0e1367f90af5 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -167,7 +167,7 @@ static int run_dir(const char *d, const char *perf)
167 snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", 167 snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
168 d, d, perf, vcnt, v); 168 d, d, perf, vcnt, v);
169 169
170 return system(cmd); 170 return system(cmd) ? TEST_FAIL : TEST_OK;
171} 171}
172 172
173int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused) 173int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused)
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index 907b1b2f56ad..ff9b60b99f52 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -238,6 +238,7 @@ class Test(object):
238 # events in result. Fail if there's not any. 238 # events in result. Fail if there's not any.
239 for exp_name, exp_event in expect.items(): 239 for exp_name, exp_event in expect.items():
240 exp_list = [] 240 exp_list = []
241 res_event = {}
241 log.debug(" matching [%s]" % exp_name) 242 log.debug(" matching [%s]" % exp_name)
242 for res_name, res_event in result.items(): 243 for res_name, res_event in result.items():
243 log.debug(" to [%s]" % res_name) 244 log.debug(" to [%s]" % res_name)
@@ -254,7 +255,10 @@ class Test(object):
254 if exp_event.optional(): 255 if exp_event.optional():
255 log.debug(" %s does not match, but is optional" % exp_name) 256 log.debug(" %s does not match, but is optional" % exp_name)
256 else: 257 else:
257 exp_event.diff(res_event) 258 if not res_event:
259 log.debug(" res_event is empty");
260 else:
261 exp_event.diff(res_event)
258 raise Fail(self, 'match failure'); 262 raise Fail(self, 'match failure');
259 263
260 match[exp_name] = exp_list 264 match[exp_name] = exp_list
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 31e0b1da830b..37940665f736 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -23,7 +23,7 @@ comm=1
23freq=1 23freq=1
24inherit_stat=0 24inherit_stat=0
25enable_on_exec=1 25enable_on_exec=1
26task=0 26task=1
27watermark=0 27watermark=0
28precise_ip=0|1|2|3 28precise_ip=0|1|2|3
29mmap_data=0 29mmap_data=0
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
index 6e7961f6f7a5..618ba1c17474 100644
--- a/tools/perf/tests/attr/test-record-group
+++ b/tools/perf/tests/attr/test-record-group
@@ -17,5 +17,6 @@ sample_type=327
17read_format=4 17read_format=4
18mmap=0 18mmap=0
19comm=0 19comm=0
20task=0
20enable_on_exec=0 21enable_on_exec=0
21disabled=0 22disabled=0
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index ef59afd6d635..f906b793196f 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -23,7 +23,7 @@ sample_type=343
23 23
24# PERF_FORMAT_ID | PERF_FORMAT_GROUP 24# PERF_FORMAT_ID | PERF_FORMAT_GROUP
25read_format=12 25read_format=12
26 26task=0
27mmap=0 27mmap=0
28comm=0 28comm=0
29enable_on_exec=0 29enable_on_exec=0
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
index 87a222d014d8..48e8bd12fe46 100644
--- a/tools/perf/tests/attr/test-record-group1
+++ b/tools/perf/tests/attr/test-record-group1
@@ -18,5 +18,6 @@ sample_type=327
18read_format=4 18read_format=4
19mmap=0 19mmap=0
20comm=0 20comm=0
21task=0
21enable_on_exec=0 22enable_on_exec=0
22disabled=0 23disabled=0
diff --git a/tools/perf/tests/attr/test-stat-C0 b/tools/perf/tests/attr/test-stat-C0
index 67717fe6a65d..a2c76d10b2bb 100644
--- a/tools/perf/tests/attr/test-stat-C0
+++ b/tools/perf/tests/attr/test-stat-C0
@@ -7,3 +7,4 @@ ret = 1
7# events are disabled by default when attached to cpu 7# events are disabled by default when attached to cpu
8disabled=1 8disabled=1
9enable_on_exec=0 9enable_on_exec=0
10optional=1
diff --git a/tools/perf/tests/attr/test-stat-basic b/tools/perf/tests/attr/test-stat-basic
index 74e17881f2ba..69867d049fda 100644
--- a/tools/perf/tests/attr/test-stat-basic
+++ b/tools/perf/tests/attr/test-stat-basic
@@ -4,3 +4,4 @@ args = -e cycles kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-stat] 6[event:base-stat]
7optional=1
diff --git a/tools/perf/tests/attr/test-stat-default b/tools/perf/tests/attr/test-stat-default
index e911dbd4eb47..d9e99b3f77e6 100644
--- a/tools/perf/tests/attr/test-stat-default
+++ b/tools/perf/tests/attr/test-stat-default
@@ -32,6 +32,7 @@ config=2
32fd=5 32fd=5
33type=0 33type=0
34config=0 34config=0
35optional=1
35 36
36# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 37# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
37[event6:base-stat] 38[event6:base-stat]
@@ -52,15 +53,18 @@ optional=1
52fd=8 53fd=8
53type=0 54type=0
54config=1 55config=1
56optional=1
55 57
56# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS 58# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
57[event9:base-stat] 59[event9:base-stat]
58fd=9 60fd=9
59type=0 61type=0
60config=4 62config=4
63optional=1
61 64
62# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES 65# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
63[event10:base-stat] 66[event10:base-stat]
64fd=10 67fd=10
65type=0 68type=0
66config=5 69config=5
70optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-1 b/tools/perf/tests/attr/test-stat-detailed-1
index b39270a08e74..8b04a055d154 100644
--- a/tools/perf/tests/attr/test-stat-detailed-1
+++ b/tools/perf/tests/attr/test-stat-detailed-1
@@ -33,6 +33,7 @@ config=2
33fd=5 33fd=5
34type=0 34type=0
35config=0 35config=0
36optional=1
36 37
37# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 38# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
38[event6:base-stat] 39[event6:base-stat]
@@ -53,18 +54,21 @@ optional=1
53fd=8 54fd=8
54type=0 55type=0
55config=1 56config=1
57optional=1
56 58
57# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS 59# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
58[event9:base-stat] 60[event9:base-stat]
59fd=9 61fd=9
60type=0 62type=0
61config=4 63config=4
64optional=1
62 65
63# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES 66# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
64[event10:base-stat] 67[event10:base-stat]
65fd=10 68fd=10
66type=0 69type=0
67config=5 70config=5
71optional=1
68 72
69# PERF_TYPE_HW_CACHE / 73# PERF_TYPE_HW_CACHE /
70# PERF_COUNT_HW_CACHE_L1D << 0 | 74# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -74,6 +78,7 @@ config=5
74fd=11 78fd=11
75type=3 79type=3
76config=0 80config=0
81optional=1
77 82
78# PERF_TYPE_HW_CACHE / 83# PERF_TYPE_HW_CACHE /
79# PERF_COUNT_HW_CACHE_L1D << 0 | 84# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -83,6 +88,7 @@ config=0
83fd=12 88fd=12
84type=3 89type=3
85config=65536 90config=65536
91optional=1
86 92
87# PERF_TYPE_HW_CACHE / 93# PERF_TYPE_HW_CACHE /
88# PERF_COUNT_HW_CACHE_LL << 0 | 94# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -92,6 +98,7 @@ config=65536
92fd=13 98fd=13
93type=3 99type=3
94config=2 100config=2
101optional=1
95 102
96# PERF_TYPE_HW_CACHE, 103# PERF_TYPE_HW_CACHE,
97# PERF_COUNT_HW_CACHE_LL << 0 | 104# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -101,3 +108,4 @@ config=2
101fd=14 108fd=14
102type=3 109type=3
103config=65538 110config=65538
111optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-2 b/tools/perf/tests/attr/test-stat-detailed-2
index 45f8e6ea34f8..4fca9f1bfbf8 100644
--- a/tools/perf/tests/attr/test-stat-detailed-2
+++ b/tools/perf/tests/attr/test-stat-detailed-2
@@ -33,6 +33,7 @@ config=2
33fd=5 33fd=5
34type=0 34type=0
35config=0 35config=0
36optional=1
36 37
37# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 38# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
38[event6:base-stat] 39[event6:base-stat]
@@ -53,18 +54,21 @@ optional=1
53fd=8 54fd=8
54type=0 55type=0
55config=1 56config=1
57optional=1
56 58
57# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS 59# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
58[event9:base-stat] 60[event9:base-stat]
59fd=9 61fd=9
60type=0 62type=0
61config=4 63config=4
64optional=1
62 65
63# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES 66# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
64[event10:base-stat] 67[event10:base-stat]
65fd=10 68fd=10
66type=0 69type=0
67config=5 70config=5
71optional=1
68 72
69# PERF_TYPE_HW_CACHE / 73# PERF_TYPE_HW_CACHE /
70# PERF_COUNT_HW_CACHE_L1D << 0 | 74# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -74,6 +78,7 @@ config=5
74fd=11 78fd=11
75type=3 79type=3
76config=0 80config=0
81optional=1
77 82
78# PERF_TYPE_HW_CACHE / 83# PERF_TYPE_HW_CACHE /
79# PERF_COUNT_HW_CACHE_L1D << 0 | 84# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -83,6 +88,7 @@ config=0
83fd=12 88fd=12
84type=3 89type=3
85config=65536 90config=65536
91optional=1
86 92
87# PERF_TYPE_HW_CACHE / 93# PERF_TYPE_HW_CACHE /
88# PERF_COUNT_HW_CACHE_LL << 0 | 94# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -92,6 +98,7 @@ config=65536
92fd=13 98fd=13
93type=3 99type=3
94config=2 100config=2
101optional=1
95 102
96# PERF_TYPE_HW_CACHE, 103# PERF_TYPE_HW_CACHE,
97# PERF_COUNT_HW_CACHE_LL << 0 | 104# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -101,6 +108,7 @@ config=2
101fd=14 108fd=14
102type=3 109type=3
103config=65538 110config=65538
111optional=1
104 112
105# PERF_TYPE_HW_CACHE, 113# PERF_TYPE_HW_CACHE,
106# PERF_COUNT_HW_CACHE_L1I << 0 | 114# PERF_COUNT_HW_CACHE_L1I << 0 |
@@ -120,6 +128,7 @@ optional=1
120fd=16 128fd=16
121type=3 129type=3
122config=65537 130config=65537
131optional=1
123 132
124# PERF_TYPE_HW_CACHE, 133# PERF_TYPE_HW_CACHE,
125# PERF_COUNT_HW_CACHE_DTLB << 0 | 134# PERF_COUNT_HW_CACHE_DTLB << 0 |
@@ -129,6 +138,7 @@ config=65537
129fd=17 138fd=17
130type=3 139type=3
131config=3 140config=3
141optional=1
132 142
133# PERF_TYPE_HW_CACHE, 143# PERF_TYPE_HW_CACHE,
134# PERF_COUNT_HW_CACHE_DTLB << 0 | 144# PERF_COUNT_HW_CACHE_DTLB << 0 |
@@ -138,6 +148,7 @@ config=3
138fd=18 148fd=18
139type=3 149type=3
140config=65539 150config=65539
151optional=1
141 152
142# PERF_TYPE_HW_CACHE, 153# PERF_TYPE_HW_CACHE,
143# PERF_COUNT_HW_CACHE_ITLB << 0 | 154# PERF_COUNT_HW_CACHE_ITLB << 0 |
@@ -147,6 +158,7 @@ config=65539
147fd=19 158fd=19
148type=3 159type=3
149config=4 160config=4
161optional=1
150 162
151# PERF_TYPE_HW_CACHE, 163# PERF_TYPE_HW_CACHE,
152# PERF_COUNT_HW_CACHE_ITLB << 0 | 164# PERF_COUNT_HW_CACHE_ITLB << 0 |
@@ -156,3 +168,4 @@ config=4
156fd=20 168fd=20
157type=3 169type=3
158config=65540 170config=65540
171optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-3 b/tools/perf/tests/attr/test-stat-detailed-3
index 30ae0fb7a3fd..4bb58e1c82a6 100644
--- a/tools/perf/tests/attr/test-stat-detailed-3
+++ b/tools/perf/tests/attr/test-stat-detailed-3
@@ -33,6 +33,7 @@ config=2
33fd=5 33fd=5
34type=0 34type=0
35config=0 35config=0
36optional=1
36 37
37# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 38# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
38[event6:base-stat] 39[event6:base-stat]
@@ -53,18 +54,21 @@ optional=1
53fd=8 54fd=8
54type=0 55type=0
55config=1 56config=1
57optional=1
56 58
57# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS 59# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
58[event9:base-stat] 60[event9:base-stat]
59fd=9 61fd=9
60type=0 62type=0
61config=4 63config=4
64optional=1
62 65
63# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES 66# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
64[event10:base-stat] 67[event10:base-stat]
65fd=10 68fd=10
66type=0 69type=0
67config=5 70config=5
71optional=1
68 72
69# PERF_TYPE_HW_CACHE / 73# PERF_TYPE_HW_CACHE /
70# PERF_COUNT_HW_CACHE_L1D << 0 | 74# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -74,6 +78,7 @@ config=5
74fd=11 78fd=11
75type=3 79type=3
76config=0 80config=0
81optional=1
77 82
78# PERF_TYPE_HW_CACHE / 83# PERF_TYPE_HW_CACHE /
79# PERF_COUNT_HW_CACHE_L1D << 0 | 84# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -83,6 +88,7 @@ config=0
83fd=12 88fd=12
84type=3 89type=3
85config=65536 90config=65536
91optional=1
86 92
87# PERF_TYPE_HW_CACHE / 93# PERF_TYPE_HW_CACHE /
88# PERF_COUNT_HW_CACHE_LL << 0 | 94# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -92,6 +98,7 @@ config=65536
92fd=13 98fd=13
93type=3 99type=3
94config=2 100config=2
101optional=1
95 102
96# PERF_TYPE_HW_CACHE, 103# PERF_TYPE_HW_CACHE,
97# PERF_COUNT_HW_CACHE_LL << 0 | 104# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -101,6 +108,7 @@ config=2
101fd=14 108fd=14
102type=3 109type=3
103config=65538 110config=65538
111optional=1
104 112
105# PERF_TYPE_HW_CACHE, 113# PERF_TYPE_HW_CACHE,
106# PERF_COUNT_HW_CACHE_L1I << 0 | 114# PERF_COUNT_HW_CACHE_L1I << 0 |
@@ -120,6 +128,7 @@ optional=1
120fd=16 128fd=16
121type=3 129type=3
122config=65537 130config=65537
131optional=1
123 132
124# PERF_TYPE_HW_CACHE, 133# PERF_TYPE_HW_CACHE,
125# PERF_COUNT_HW_CACHE_DTLB << 0 | 134# PERF_COUNT_HW_CACHE_DTLB << 0 |
@@ -129,6 +138,7 @@ config=65537
129fd=17 138fd=17
130type=3 139type=3
131config=3 140config=3
141optional=1
132 142
133# PERF_TYPE_HW_CACHE, 143# PERF_TYPE_HW_CACHE,
134# PERF_COUNT_HW_CACHE_DTLB << 0 | 144# PERF_COUNT_HW_CACHE_DTLB << 0 |
@@ -138,6 +148,7 @@ config=3
138fd=18 148fd=18
139type=3 149type=3
140config=65539 150config=65539
151optional=1
141 152
142# PERF_TYPE_HW_CACHE, 153# PERF_TYPE_HW_CACHE,
143# PERF_COUNT_HW_CACHE_ITLB << 0 | 154# PERF_COUNT_HW_CACHE_ITLB << 0 |
@@ -147,6 +158,7 @@ config=65539
147fd=19 158fd=19
148type=3 159type=3
149config=4 160config=4
161optional=1
150 162
151# PERF_TYPE_HW_CACHE, 163# PERF_TYPE_HW_CACHE,
152# PERF_COUNT_HW_CACHE_ITLB << 0 | 164# PERF_COUNT_HW_CACHE_ITLB << 0 |
@@ -156,6 +168,7 @@ config=4
156fd=20 168fd=20
157type=3 169type=3
158config=65540 170config=65540
171optional=1
159 172
160# PERF_TYPE_HW_CACHE, 173# PERF_TYPE_HW_CACHE,
161# PERF_COUNT_HW_CACHE_L1D << 0 | 174# PERF_COUNT_HW_CACHE_L1D << 0 |
diff --git a/tools/perf/tests/attr/test-stat-group b/tools/perf/tests/attr/test-stat-group
index fdc1596a8862..e15d6946e9b3 100644
--- a/tools/perf/tests/attr/test-stat-group
+++ b/tools/perf/tests/attr/test-stat-group
@@ -6,6 +6,7 @@ ret = 1
6[event-1:base-stat] 6[event-1:base-stat]
7fd=1 7fd=1
8group_fd=-1 8group_fd=-1
9read_format=3|15
9 10
10[event-2:base-stat] 11[event-2:base-stat]
11fd=2 12fd=2
@@ -13,3 +14,4 @@ group_fd=1
13config=1 14config=1
14disabled=0 15disabled=0
15enable_on_exec=0 16enable_on_exec=0
17read_format=3|15
diff --git a/tools/perf/tests/attr/test-stat-group1 b/tools/perf/tests/attr/test-stat-group1
index 2a1f86e4a904..1746751123dc 100644
--- a/tools/perf/tests/attr/test-stat-group1
+++ b/tools/perf/tests/attr/test-stat-group1
@@ -6,6 +6,7 @@ ret = 1
6[event-1:base-stat] 6[event-1:base-stat]
7fd=1 7fd=1
8group_fd=-1 8group_fd=-1
9read_format=3|15
9 10
10[event-2:base-stat] 11[event-2:base-stat]
11fd=2 12fd=2
@@ -13,3 +14,4 @@ group_fd=1
13config=1 14config=1
14disabled=0 15disabled=0
15enable_on_exec=0 16enable_on_exec=0
17read_format=3|15
diff --git a/tools/perf/tests/attr/test-stat-no-inherit b/tools/perf/tests/attr/test-stat-no-inherit
index d54b2a1e3e28..924fbb9300d1 100644
--- a/tools/perf/tests/attr/test-stat-no-inherit
+++ b/tools/perf/tests/attr/test-stat-no-inherit
@@ -5,3 +5,4 @@ ret = 1
5 5
6[event:base-stat] 6[event:base-stat]
7inherit=0 7inherit=0
8optional=1
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 53d06f37406a..766573e236e4 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -4,6 +4,7 @@
4 * 4 *
5 * Builtin regression testing command: ever growing number of sanity tests 5 * Builtin regression testing command: ever growing number of sanity tests
6 */ 6 */
7#include <fcntl.h>
7#include <errno.h> 8#include <errno.h>
8#include <unistd.h> 9#include <unistd.h>
9#include <string.h> 10#include <string.h>
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 3c3f3e029e33..868d82b501f4 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -132,7 +132,7 @@ static int synth_all(struct machine *machine)
132{ 132{
133 return perf_event__synthesize_threads(NULL, 133 return perf_event__synthesize_threads(NULL,
134 perf_event__process, 134 perf_event__process,
135 machine, 0, 500); 135 machine, 0, 500, 1);
136} 136}
137 137
138static int synth_process(struct machine *machine) 138static int synth_process(struct machine *machine)
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 7a84d73324e3..8b3da21a08f1 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,8 +10,8 @@
10 10
11. $(dirname $0)/lib/probe.sh 11. $(dirname $0)/lib/probe.sh
12 12
13ld=$(realpath /lib64/ld*.so.* | uniq) 13libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
14libc=$(echo $ld | sed 's/ld/libc/g') 14nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254
15 15
16trace_libc_inet_pton_backtrace() { 16trace_libc_inet_pton_backtrace() {
17 idx=0 17 idx=0
@@ -37,6 +37,9 @@ trace_libc_inet_pton_backtrace() {
37 done 37 done
38} 38}
39 39
40# Check for IPv6 interface existence
41ip a sh lo | fgrep -q inet6 || exit 2
42
40skip_if_no_perf_probe && \ 43skip_if_no_perf_probe && \
41perf probe -q $libc inet_pton && \ 44perf probe -q $libc inet_pton && \
42trace_libc_inet_pton_backtrace 45trace_libc_inet_pton_backtrace
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 2e68c5f120da..2a9ef080efd0 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -17,8 +17,10 @@ skip_if_no_perf_probe || exit 2
17file=$(mktemp /tmp/temporary_file.XXXXX) 17file=$(mktemp /tmp/temporary_file.XXXXX)
18 18
19trace_open_vfs_getname() { 19trace_open_vfs_getname() {
20 perf trace -e open touch $file 2>&1 | \ 20 test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
21 egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open\(filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$" 21
22 perf trace -e ${svc:-open} touch $file 2>&1 | \
23 egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
22} 24}
23 25
24 26
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index bc4a7344e274..89c8e1604ca7 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -84,7 +84,11 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
84 84
85 evsel = perf_evlist__first(evlist); 85 evsel = perf_evlist__first(evlist);
86 evsel->attr.task = 1; 86 evsel->attr.task = 1;
87#ifdef __s390x__
88 evsel->attr.sample_freq = 1000000;
89#else
87 evsel->attr.sample_freq = 1; 90 evsel->attr.sample_freq = 1;
91#endif
88 evsel->attr.inherit = 0; 92 evsel->attr.inherit = 0;
89 evsel->attr.watermark = 0; 93 evsel->attr.watermark = 0;
90 evsel->attr.wakeup_events = 1; 94 evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index a59db7c45a65..17cb1bb3448c 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -30,12 +30,14 @@ static int get_temp(char *path)
30static int session_write_header(char *path) 30static int session_write_header(char *path)
31{ 31{
32 struct perf_session *session; 32 struct perf_session *session;
33 struct perf_data_file file = { 33 struct perf_data data = {
34 .path = path, 34 .file = {
35 .mode = PERF_DATA_MODE_WRITE, 35 .path = path,
36 },
37 .mode = PERF_DATA_MODE_WRITE,
36 }; 38 };
37 39
38 session = perf_session__new(&file, false, NULL); 40 session = perf_session__new(&data, false, NULL);
39 TEST_ASSERT_VAL("can't get session", session); 41 TEST_ASSERT_VAL("can't get session", session);
40 42
41 session->evlist = perf_evlist__new_default(); 43 session->evlist = perf_evlist__new_default();
@@ -47,7 +49,7 @@ static int session_write_header(char *path)
47 session->header.data_size += DATA_SIZE; 49 session->header.data_size += DATA_SIZE;
48 50
49 TEST_ASSERT_VAL("failed to write header", 51 TEST_ASSERT_VAL("failed to write header",
50 !perf_session__write_header(session, session->evlist, file.fd, true)); 52 !perf_session__write_header(session, session->evlist, data.file.fd, true));
51 53
52 perf_session__delete(session); 54 perf_session__delete(session);
53 55
@@ -57,13 +59,15 @@ static int session_write_header(char *path)
57static int check_cpu_topology(char *path, struct cpu_map *map) 59static int check_cpu_topology(char *path, struct cpu_map *map)
58{ 60{
59 struct perf_session *session; 61 struct perf_session *session;
60 struct perf_data_file file = { 62 struct perf_data data = {
61 .path = path, 63 .file = {
62 .mode = PERF_DATA_MODE_READ, 64 .path = path,
65 },
66 .mode = PERF_DATA_MODE_READ,
63 }; 67 };
64 int i; 68 int i;
65 69
66 session = perf_session__new(&file, false, NULL); 70 session = perf_session__new(&data, false, NULL);
67 TEST_ASSERT_VAL("can't get session", session); 71 TEST_ASSERT_VAL("can't get session", session);
68 72
69 for (i = 0; i < session->header.env.nr_cpus_avail; i++) { 73 for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 175d633c6b49..066bbf0f4a74 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -3,5 +3,7 @@ libperf-y += fcntl.o
3ifeq ($(SRCARCH),$(filter $(SRCARCH),x86)) 3ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
4libperf-y += ioctl.o 4libperf-y += ioctl.o
5endif 5endif
6libperf-y += kcmp.o
6libperf-y += pkey_alloc.o 7libperf-y += pkey_alloc.o
8libperf-y += prctl.o
7libperf-y += statx.o 9libperf-y += statx.o
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index d80655cd1881..a6dfd04beaee 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <sys/types.h>
7 8
8struct strarray { 9struct strarray {
9 int offset; 10 int offset;
@@ -27,6 +28,8 @@ size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const cha
27struct trace; 28struct trace;
28struct thread; 29struct thread;
29 30
31size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size);
32
30/** 33/**
31 * @val: value of syscall argument being formatted 34 * @val: value of syscall argument being formatted
32 * @args: All the args, use syscall_args__val(arg, nth) to access one 35 * @args: All the args, use syscall_args__val(arg, nth) to access one
@@ -79,12 +82,27 @@ size_t syscall_arg__scnprintf_fcntl_arg(char *bf, size_t size, struct syscall_ar
79size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg); 82size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg);
80#define SCA_IOCTL_CMD syscall_arg__scnprintf_ioctl_cmd 83#define SCA_IOCTL_CMD syscall_arg__scnprintf_ioctl_cmd
81 84
85size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_arg *arg);
86#define SCA_KCMP_TYPE syscall_arg__scnprintf_kcmp_type
87
88size_t syscall_arg__scnprintf_kcmp_idx(char *bf, size_t size, struct syscall_arg *arg);
89#define SCA_KCMP_IDX syscall_arg__scnprintf_kcmp_idx
90
82size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg); 91size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg);
83#define SCA_PKEY_ALLOC_ACCESS_RIGHTS syscall_arg__scnprintf_pkey_alloc_access_rights 92#define SCA_PKEY_ALLOC_ACCESS_RIGHTS syscall_arg__scnprintf_pkey_alloc_access_rights
84 93
85size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, struct syscall_arg *arg); 94size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, struct syscall_arg *arg);
86#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags 95#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
87 96
97size_t syscall_arg__scnprintf_prctl_option(char *bf, size_t size, struct syscall_arg *arg);
98#define SCA_PRCTL_OPTION syscall_arg__scnprintf_prctl_option
99
100size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_arg *arg);
101#define SCA_PRCTL_ARG2 syscall_arg__scnprintf_prctl_arg2
102
103size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg);
104#define SCA_PRCTL_ARG3 syscall_arg__scnprintf_prctl_arg3
105
88size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg); 106size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg);
89#define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags 107#define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags
90 108
diff --git a/tools/perf/trace/beauty/kcmp.c b/tools/perf/trace/beauty/kcmp.c
new file mode 100644
index 000000000000..f62040eb9d5c
--- /dev/null
+++ b/tools/perf/trace/beauty/kcmp.c
@@ -0,0 +1,44 @@
1/*
2 * trace/beauty/kcmp.c
3 *
4 * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 *
6 * Released under the GPL v2. (and only v2, not any later version)
7 */
8
9#include "trace/beauty/beauty.h"
10#include <linux/kernel.h>
11#include <sys/types.h>
12#include <machine.h>
13#include <uapi/linux/kcmp.h>
14
15#include "trace/beauty/generated/kcmp_type_array.c"
16
17size_t syscall_arg__scnprintf_kcmp_idx(char *bf, size_t size, struct syscall_arg *arg)
18{
19 unsigned long fd = arg->val;
20 int type = syscall_arg__val(arg, 2);
21 pid_t pid;
22
23 if (type != KCMP_FILE)
24 return syscall_arg__scnprintf_long(bf, size, arg);
25
26 pid = syscall_arg__val(arg, arg->idx == 3 ? 0 : 1); /* idx1 -> pid1, idx2 -> pid2 */
27 return pid__scnprintf_fd(arg->trace, pid, fd, bf, size);
28}
29
30static size_t kcmp__scnprintf_type(int type, char *bf, size_t size)
31{
32 static DEFINE_STRARRAY(kcmp_types);
33 return strarray__scnprintf(&strarray__kcmp_types, bf, size, "%d", type);
34}
35
36size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_arg *arg)
37{
38 unsigned long type = arg->val;
39
40 if (type != KCMP_FILE)
41 arg->mask |= (1 << 3) | (1 << 4); /* Ignore idx1 and idx2 */
42
43 return kcmp__scnprintf_type(type, bf, size);
44}
diff --git a/tools/perf/trace/beauty/kcmp_type.sh b/tools/perf/trace/beauty/kcmp_type.sh
new file mode 100755
index 000000000000..40d063b8c082
--- /dev/null
+++ b/tools/perf/trace/beauty/kcmp_type.sh
@@ -0,0 +1,10 @@
1#!/bin/sh
2
3header_dir=$1
4
5printf "static const char *kcmp_types[] = {\n"
6regex='^[[:space:]]+(KCMP_(\w+)),'
7egrep $regex ${header_dir}/kcmp.h | grep -v KCMP_TYPES, | \
8 sed -r "s/$regex/\1 \2/g" | \
9 xargs printf "\t[%s]\t= \"%s\",\n"
10printf "};\n"
diff --git a/tools/perf/trace/beauty/madvise_behavior.sh b/tools/perf/trace/beauty/madvise_behavior.sh
new file mode 100755
index 000000000000..60ef8640ee70
--- /dev/null
+++ b/tools/perf/trace/beauty/madvise_behavior.sh
@@ -0,0 +1,10 @@
1#!/bin/sh
2
3header_dir=$1
4
5printf "static const char *madvise_advices[] = {\n"
6regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MADV_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
7egrep $regex ${header_dir}/mman-common.h | \
8 sed -r "s/$regex/\2 \1/g" | \
9 sort -n | xargs printf "\t[%s] = \"%s\",\n"
10printf "};\n"
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index 51f1cea406f5..417e3ecfe9d7 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -62,6 +62,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
62 P_MMAP_FLAG(POPULATE); 62 P_MMAP_FLAG(POPULATE);
63 P_MMAP_FLAG(STACK); 63 P_MMAP_FLAG(STACK);
64 P_MMAP_FLAG(UNINITIALIZED); 64 P_MMAP_FLAG(UNINITIALIZED);
65#ifdef MAP_SYNC
66 P_MMAP_FLAG(SYNC);
67#endif
65#undef P_MMAP_FLAG 68#undef P_MMAP_FLAG
66 69
67 if (flags) 70 if (flags)
@@ -95,35 +98,21 @@ static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
95 98
96#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags 99#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
97 100
101static size_t madvise__scnprintf_behavior(int behavior, char *bf, size_t size)
102{
103#include "trace/beauty/generated/madvise_behavior_array.c"
104 static DEFINE_STRARRAY(madvise_advices);
105
106 if (behavior < strarray__madvise_advices.nr_entries && strarray__madvise_advices.entries[behavior] != NULL)
107 return scnprintf(bf, size, "MADV_%s", strarray__madvise_advices.entries[behavior]);
108
109 return scnprintf(bf, size, "%#", behavior);
110}
111
98static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size, 112static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
99 struct syscall_arg *arg) 113 struct syscall_arg *arg)
100{ 114{
101 int behavior = arg->val; 115 return madvise__scnprintf_behavior(arg->val, bf, size);
102
103 switch (behavior) {
104#define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
105 P_MADV_BHV(NORMAL);
106 P_MADV_BHV(RANDOM);
107 P_MADV_BHV(SEQUENTIAL);
108 P_MADV_BHV(WILLNEED);
109 P_MADV_BHV(DONTNEED);
110 P_MADV_BHV(FREE);
111 P_MADV_BHV(REMOVE);
112 P_MADV_BHV(DONTFORK);
113 P_MADV_BHV(DOFORK);
114 P_MADV_BHV(HWPOISON);
115 P_MADV_BHV(SOFT_OFFLINE);
116 P_MADV_BHV(MERGEABLE);
117 P_MADV_BHV(UNMERGEABLE);
118 P_MADV_BHV(HUGEPAGE);
119 P_MADV_BHV(NOHUGEPAGE);
120 P_MADV_BHV(DONTDUMP);
121 P_MADV_BHV(DODUMP);
122#undef P_MADV_BHV
123 default: break;
124 }
125
126 return scnprintf(bf, size, "%#x", behavior);
127} 116}
128 117
129#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior 118#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
diff --git a/tools/perf/trace/beauty/prctl.c b/tools/perf/trace/beauty/prctl.c
new file mode 100644
index 000000000000..246130dad6c4
--- /dev/null
+++ b/tools/perf/trace/beauty/prctl.c
@@ -0,0 +1,82 @@
1/*
2 * trace/beauty/prctl.c
3 *
4 * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 *
6 * Released under the GPL v2. (and only v2, not any later version)
7 */
8
9#include "trace/beauty/beauty.h"
10#include <linux/kernel.h>
11#include <uapi/linux/prctl.h>
12
13#include "trace/beauty/generated/prctl_option_array.c"
14
15static size_t prctl__scnprintf_option(int option, char *bf, size_t size)
16{
17 static DEFINE_STRARRAY(prctl_options);
18 return strarray__scnprintf(&strarray__prctl_options, bf, size, "%d", option);
19}
20
21static size_t prctl__scnprintf_set_mm(int option, char *bf, size_t size)
22{
23 static DEFINE_STRARRAY(prctl_set_mm_options);
24 return strarray__scnprintf(&strarray__prctl_set_mm_options, bf, size, "%d", option);
25}
26
27size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_arg *arg)
28{
29 int option = syscall_arg__val(arg, 0);
30
31 if (option == PR_SET_MM)
32 return prctl__scnprintf_set_mm(arg->val, bf, size);
33 /*
34 * We still don't grab the contents of pointers on entry or exit,
35 * so just print them as hex numbers
36 */
37 if (option == PR_SET_NAME)
38 return syscall_arg__scnprintf_hex(bf, size, arg);
39
40 return syscall_arg__scnprintf_long(bf, size, arg);
41}
42
43size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg)
44{
45 int option = syscall_arg__val(arg, 0);
46
47 if (option == PR_SET_MM)
48 return syscall_arg__scnprintf_hex(bf, size, arg);
49
50 return syscall_arg__scnprintf_long(bf, size, arg);
51}
52
53size_t syscall_arg__scnprintf_prctl_option(char *bf, size_t size, struct syscall_arg *arg)
54{
55 unsigned long option = arg->val;
56 enum {
57 SPO_ARG2 = (1 << 1),
58 SPO_ARG3 = (1 << 2),
59 SPO_ARG4 = (1 << 3),
60 SPO_ARG5 = (1 << 4),
61 SPO_ARG6 = (1 << 5),
62 };
63 const u8 all_but2 = SPO_ARG3 | SPO_ARG4 | SPO_ARG5 | SPO_ARG6;
64 const u8 all = SPO_ARG2 | all_but2;
65 const u8 masks[] = {
66 [PR_GET_DUMPABLE] = all,
67 [PR_SET_DUMPABLE] = all_but2,
68 [PR_SET_NAME] = all_but2,
69 [PR_GET_CHILD_SUBREAPER] = all_but2,
70 [PR_SET_CHILD_SUBREAPER] = all_but2,
71 [PR_GET_SECUREBITS] = all,
72 [PR_SET_SECUREBITS] = all_but2,
73 [PR_SET_MM] = SPO_ARG4 | SPO_ARG5 | SPO_ARG6,
74 [PR_GET_PDEATHSIG] = all,
75 [PR_SET_PDEATHSIG] = all_but2,
76 };
77
78 if (option < ARRAY_SIZE(masks))
79 arg->mask |= masks[option];
80
81 return prctl__scnprintf_option(option, bf, size);
82}
diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh
new file mode 100755
index 000000000000..0be4138fbe71
--- /dev/null
+++ b/tools/perf/trace/beauty/prctl_option.sh
@@ -0,0 +1,17 @@
1#!/bin/sh
2
3header_dir=$1
4
5printf "static const char *prctl_options[] = {\n"
6regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*'
7egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
8 sed -r "s/$regex/\2 \1/g" | \
9 sort -n | xargs printf "\t[%s] = \"%s\",\n"
10printf "};\n"
11
12printf "static const char *prctl_set_mm_options[] = {\n"
13regex='^#[[:space:]]+define[[:space:]]+PR_SET_MM_(\w+)[[:space:]]*([[:digit:]]+).*'
14egrep $regex ${header_dir}/prctl.h | \
15 sed -r "s/$regex/\2 \1/g" | \
16 sort -n | xargs printf "\t[%s] = \"%s\",\n"
17printf "};\n"
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 628ad5f7eddb..68146f4620a5 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -155,57 +155,9 @@ static void callchain_list__set_folding(struct callchain_list *cl, bool unfold)
155 cl->unfolded = unfold ? cl->has_children : false; 155 cl->unfolded = unfold ? cl->has_children : false;
156} 156}
157 157
158static struct inline_node *inline_node__create(struct map *map, u64 ip)
159{
160 struct dso *dso;
161 struct inline_node *node;
162
163 if (map == NULL)
164 return NULL;
165
166 dso = map->dso;
167 if (dso == NULL)
168 return NULL;
169
170 node = dso__parse_addr_inlines(dso,
171 map__rip_2objdump(map, ip));
172
173 return node;
174}
175
176static int inline__count_rows(struct inline_node *node)
177{
178 struct inline_list *ilist;
179 int i = 0;
180
181 if (node == NULL)
182 return 0;
183
184 list_for_each_entry(ilist, &node->val, list) {
185 if ((ilist->filename != NULL) || (ilist->funcname != NULL))
186 i++;
187 }
188
189 return i;
190}
191
192static int callchain_list__inline_rows(struct callchain_list *chain)
193{
194 struct inline_node *node;
195 int rows;
196
197 node = inline_node__create(chain->ms.map, chain->ip);
198 if (node == NULL)
199 return 0;
200
201 rows = inline__count_rows(node);
202 inline_node__delete(node);
203 return rows;
204}
205
206static int callchain_node__count_rows_rb_tree(struct callchain_node *node) 158static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
207{ 159{
208 int n = 0, inline_rows; 160 int n = 0;
209 struct rb_node *nd; 161 struct rb_node *nd;
210 162
211 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { 163 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
@@ -216,12 +168,6 @@ static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
216 list_for_each_entry(chain, &child->val, list) { 168 list_for_each_entry(chain, &child->val, list) {
217 ++n; 169 ++n;
218 170
219 if (symbol_conf.inline_name) {
220 inline_rows =
221 callchain_list__inline_rows(chain);
222 n += inline_rows;
223 }
224
225 /* We need this because we may not have children */ 171 /* We need this because we may not have children */
226 folded_sign = callchain_list__folded(chain); 172 folded_sign = callchain_list__folded(chain);
227 if (folded_sign == '+') 173 if (folded_sign == '+')
@@ -273,7 +219,7 @@ static int callchain_node__count_rows(struct callchain_node *node)
273{ 219{
274 struct callchain_list *chain; 220 struct callchain_list *chain;
275 bool unfolded = false; 221 bool unfolded = false;
276 int n = 0, inline_rows; 222 int n = 0;
277 223
278 if (callchain_param.mode == CHAIN_FLAT) 224 if (callchain_param.mode == CHAIN_FLAT)
279 return callchain_node__count_flat_rows(node); 225 return callchain_node__count_flat_rows(node);
@@ -282,10 +228,6 @@ static int callchain_node__count_rows(struct callchain_node *node)
282 228
283 list_for_each_entry(chain, &node->val, list) { 229 list_for_each_entry(chain, &node->val, list) {
284 ++n; 230 ++n;
285 if (symbol_conf.inline_name) {
286 inline_rows = callchain_list__inline_rows(chain);
287 n += inline_rows;
288 }
289 231
290 unfolded = chain->unfolded; 232 unfolded = chain->unfolded;
291 } 233 }
@@ -433,19 +375,6 @@ static void hist_entry__init_have_children(struct hist_entry *he)
433 he->init_have_children = true; 375 he->init_have_children = true;
434} 376}
435 377
436static void hist_entry_init_inline_node(struct hist_entry *he)
437{
438 if (he->inline_node)
439 return;
440
441 he->inline_node = inline_node__create(he->ms.map, he->ip);
442
443 if (he->inline_node == NULL)
444 return;
445
446 he->has_children = true;
447}
448
449static bool hist_browser__toggle_fold(struct hist_browser *browser) 378static bool hist_browser__toggle_fold(struct hist_browser *browser)
450{ 379{
451 struct hist_entry *he = browser->he_selection; 380 struct hist_entry *he = browser->he_selection;
@@ -477,12 +406,8 @@ static bool hist_browser__toggle_fold(struct hist_browser *browser)
477 406
478 if (he->unfolded) { 407 if (he->unfolded) {
479 if (he->leaf) 408 if (he->leaf)
480 if (he->inline_node) 409 he->nr_rows = callchain__count_rows(
481 he->nr_rows = inline__count_rows( 410 &he->sorted_chain);
482 he->inline_node);
483 else
484 he->nr_rows = callchain__count_rows(
485 &he->sorted_chain);
486 else 411 else
487 he->nr_rows = hierarchy_count_rows(browser, he, false); 412 he->nr_rows = hierarchy_count_rows(browser, he, false);
488 413
@@ -842,71 +767,6 @@ static bool hist_browser__check_dump_full(struct hist_browser *browser __maybe_u
842 767
843#define LEVEL_OFFSET_STEP 3 768#define LEVEL_OFFSET_STEP 3
844 769
845static int hist_browser__show_inline(struct hist_browser *browser,
846 struct inline_node *node,
847 unsigned short row,
848 int offset)
849{
850 struct inline_list *ilist;
851 char buf[1024];
852 int color, width, first_row;
853
854 first_row = row;
855 width = browser->b.width - (LEVEL_OFFSET_STEP + 2);
856 list_for_each_entry(ilist, &node->val, list) {
857 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
858 color = HE_COLORSET_NORMAL;
859 if (ui_browser__is_current_entry(&browser->b, row))
860 color = HE_COLORSET_SELECTED;
861
862 if (callchain_param.key == CCKEY_ADDRESS ||
863 callchain_param.key == CCKEY_SRCLINE) {
864 if (ilist->filename != NULL)
865 scnprintf(buf, sizeof(buf),
866 "%s:%d (inline)",
867 ilist->filename,
868 ilist->line_nr);
869 else
870 scnprintf(buf, sizeof(buf), "??");
871 } else if (ilist->funcname != NULL)
872 scnprintf(buf, sizeof(buf), "%s (inline)",
873 ilist->funcname);
874 else if (ilist->filename != NULL)
875 scnprintf(buf, sizeof(buf),
876 "%s:%d (inline)",
877 ilist->filename,
878 ilist->line_nr);
879 else
880 scnprintf(buf, sizeof(buf), "??");
881
882 ui_browser__set_color(&browser->b, color);
883 hist_browser__gotorc(browser, row, 0);
884 ui_browser__write_nstring(&browser->b, " ",
885 LEVEL_OFFSET_STEP + offset);
886 ui_browser__write_nstring(&browser->b, buf, width);
887 row++;
888 }
889 }
890
891 return row - first_row;
892}
893
894static size_t show_inline_list(struct hist_browser *browser, struct map *map,
895 u64 ip, int row, int offset)
896{
897 struct inline_node *node;
898 int ret;
899
900 node = inline_node__create(map, ip);
901 if (node == NULL)
902 return 0;
903
904 ret = hist_browser__show_inline(browser, node, row, offset);
905
906 inline_node__delete(node);
907 return ret;
908}
909
910static int hist_browser__show_callchain_list(struct hist_browser *browser, 770static int hist_browser__show_callchain_list(struct hist_browser *browser,
911 struct callchain_node *node, 771 struct callchain_node *node,
912 struct callchain_list *chain, 772 struct callchain_list *chain,
@@ -918,7 +778,7 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
918 char bf[1024], *alloc_str; 778 char bf[1024], *alloc_str;
919 char buf[64], *alloc_str2; 779 char buf[64], *alloc_str2;
920 const char *str; 780 const char *str;
921 int inline_rows = 0, ret = 1; 781 int ret = 1;
922 782
923 if (arg->row_offset != 0) { 783 if (arg->row_offset != 0) {
924 arg->row_offset--; 784 arg->row_offset--;
@@ -955,12 +815,7 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
955 free(alloc_str); 815 free(alloc_str);
956 free(alloc_str2); 816 free(alloc_str2);
957 817
958 if (symbol_conf.inline_name) { 818 return ret;
959 inline_rows = show_inline_list(browser, chain->ms.map,
960 chain->ip, row + 1, offset);
961 }
962
963 return ret + inline_rows;
964} 819}
965 820
966static bool check_percent_display(struct rb_node *node, u64 parent_total) 821static bool check_percent_display(struct rb_node *node, u64 parent_total)
@@ -1384,12 +1239,6 @@ static int hist_browser__show_entry(struct hist_browser *browser,
1384 folded_sign = hist_entry__folded(entry); 1239 folded_sign = hist_entry__folded(entry);
1385 } 1240 }
1386 1241
1387 if (symbol_conf.inline_name &&
1388 (!entry->has_children)) {
1389 hist_entry_init_inline_node(entry);
1390 folded_sign = hist_entry__folded(entry);
1391 }
1392
1393 if (row_offset == 0) { 1242 if (row_offset == 0) {
1394 struct hpp_arg arg = { 1243 struct hpp_arg arg = {
1395 .b = &browser->b, 1244 .b = &browser->b,
@@ -1421,8 +1270,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
1421 } 1270 }
1422 1271
1423 if (first) { 1272 if (first) {
1424 if (symbol_conf.use_callchain || 1273 if (symbol_conf.use_callchain) {
1425 symbol_conf.inline_name) {
1426 ui_browser__printf(&browser->b, "%c ", folded_sign); 1274 ui_browser__printf(&browser->b, "%c ", folded_sign);
1427 width -= 2; 1275 width -= 2;
1428 } 1276 }
@@ -1464,15 +1312,11 @@ static int hist_browser__show_entry(struct hist_browser *browser,
1464 .is_current_entry = current_entry, 1312 .is_current_entry = current_entry,
1465 }; 1313 };
1466 1314
1467 if (entry->inline_node) 1315 printed += hist_browser__show_callchain(browser,
1468 printed += hist_browser__show_inline(browser, 1316 entry, 1, row,
1469 entry->inline_node, row, 0); 1317 hist_browser__show_callchain_entry,
1470 else 1318 &arg,
1471 printed += hist_browser__show_callchain(browser, 1319 hist_browser__check_output_full);
1472 entry, 1, row,
1473 hist_browser__show_callchain_entry,
1474 &arg,
1475 hist_browser__check_output_full);
1476 } 1320 }
1477 1321
1478 return printed; 1322 return printed;
diff --git a/tools/perf/ui/progress.c b/tools/perf/ui/progress.c
index b5a5df14d702..bbfbc91a0fa4 100644
--- a/tools/perf/ui/progress.c
+++ b/tools/perf/ui/progress.c
@@ -28,13 +28,17 @@ void ui_progress__update(struct ui_progress *p, u64 adv)
28 } 28 }
29} 29}
30 30
31void ui_progress__init(struct ui_progress *p, u64 total, const char *title) 31void __ui_progress__init(struct ui_progress *p, u64 total,
32 const char *title, bool size)
32{ 33{
33 p->curr = 0; 34 p->curr = 0;
34 p->next = p->step = total / 16 ?: 1; 35 p->next = p->step = total / 16 ?: 1;
35 p->total = total; 36 p->total = total;
36 p->title = title; 37 p->title = title;
38 p->size = size;
37 39
40 if (ui_progress__ops->init)
41 ui_progress__ops->init(p);
38} 42}
39 43
40void ui_progress__finish(void) 44void ui_progress__finish(void)
diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h
index 594bbe6935dd..4f52c37b2f09 100644
--- a/tools/perf/ui/progress.h
+++ b/tools/perf/ui/progress.h
@@ -9,12 +9,22 @@ void ui_progress__finish(void);
9struct ui_progress { 9struct ui_progress {
10 const char *title; 10 const char *title;
11 u64 curr, next, step, total; 11 u64 curr, next, step, total;
12 bool size;
12}; 13};
13 14
14void ui_progress__init(struct ui_progress *p, u64 total, const char *title); 15void __ui_progress__init(struct ui_progress *p, u64 total,
16 const char *title, bool size);
17
18#define ui_progress__init(p, total, title) \
19 __ui_progress__init(p, total, title, false)
20
21#define ui_progress__init_size(p, total, title) \
22 __ui_progress__init(p, total, title, true)
23
15void ui_progress__update(struct ui_progress *p, u64 adv); 24void ui_progress__update(struct ui_progress *p, u64 adv);
16 25
17struct ui_progress_ops { 26struct ui_progress_ops {
27 void (*init)(struct ui_progress *p);
18 void (*update)(struct ui_progress *p); 28 void (*update)(struct ui_progress *p);
19 void (*finish)(void); 29 void (*finish)(void);
20}; 30};
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index de2810ae16be..25dd1e0ecc58 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -22,64 +22,6 @@ static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
22 return ret; 22 return ret;
23} 23}
24 24
25static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
26 int depth, int depth_mask, FILE *fp)
27{
28 struct dso *dso;
29 struct inline_node *node;
30 struct inline_list *ilist;
31 int ret = 0, i;
32
33 if (map == NULL)
34 return 0;
35
36 dso = map->dso;
37 if (dso == NULL)
38 return 0;
39
40 node = dso__parse_addr_inlines(dso,
41 map__rip_2objdump(map, ip));
42 if (node == NULL)
43 return 0;
44
45 list_for_each_entry(ilist, &node->val, list) {
46 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
47 ret += callchain__fprintf_left_margin(fp, left_margin);
48
49 for (i = 0; i < depth; i++) {
50 if (depth_mask & (1 << i))
51 ret += fprintf(fp, "|");
52 else
53 ret += fprintf(fp, " ");
54 ret += fprintf(fp, " ");
55 }
56
57 if (callchain_param.key == CCKEY_ADDRESS ||
58 callchain_param.key == CCKEY_SRCLINE) {
59 if (ilist->filename != NULL)
60 ret += fprintf(fp, "%s:%d (inline)",
61 ilist->filename,
62 ilist->line_nr);
63 else
64 ret += fprintf(fp, "??");
65 } else if (ilist->funcname != NULL)
66 ret += fprintf(fp, "%s (inline)",
67 ilist->funcname);
68 else if (ilist->filename != NULL)
69 ret += fprintf(fp, "%s:%d (inline)",
70 ilist->filename,
71 ilist->line_nr);
72 else
73 ret += fprintf(fp, "??");
74
75 ret += fprintf(fp, "\n");
76 }
77 }
78
79 inline_node__delete(node);
80 return ret;
81}
82
83static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, 25static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
84 int left_margin) 26 int left_margin)
85{ 27{
@@ -138,9 +80,6 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
138 fputc('\n', fp); 80 fputc('\n', fp);
139 free(alloc_str); 81 free(alloc_str);
140 82
141 if (symbol_conf.inline_name)
142 ret += inline__fprintf(chain->ms.map, chain->ip,
143 left_margin, depth, depth_mask, fp);
144 return ret; 83 return ret;
145} 84}
146 85
@@ -315,13 +254,6 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
315 254
316 if (++entries_printed == callchain_param.print_limit) 255 if (++entries_printed == callchain_param.print_limit)
317 break; 256 break;
318
319 if (symbol_conf.inline_name)
320 ret += inline__fprintf(chain->ms.map,
321 chain->ip,
322 left_margin,
323 0, 0,
324 fp);
325 } 257 }
326 root = &cnode->rb_root; 258 root = &cnode->rb_root;
327 } 259 }
@@ -601,7 +533,6 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
601{ 533{
602 int ret; 534 int ret;
603 int callchain_ret = 0; 535 int callchain_ret = 0;
604 int inline_ret = 0;
605 struct perf_hpp hpp = { 536 struct perf_hpp hpp = {
606 .buf = bf, 537 .buf = bf,
607 .size = size, 538 .size = size,
@@ -623,13 +554,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
623 callchain_ret = hist_entry_callchain__fprintf(he, total_period, 554 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
624 0, fp); 555 0, fp);
625 556
626 if (callchain_ret == 0 && symbol_conf.inline_name) { 557 ret += callchain_ret;
627 inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
628 ret += inline_ret;
629 if (inline_ret > 0)
630 ret += fprintf(fp, "\n");
631 } else
632 ret += callchain_ret;
633 558
634 return ret; 559 return ret;
635} 560}
diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c
index 236bcb620ae4..bc134b82829d 100644
--- a/tools/perf/ui/tui/progress.c
+++ b/tools/perf/ui/tui/progress.c
@@ -1,13 +1,34 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
2#include "../cache.h" 3#include "../cache.h"
3#include "../progress.h" 4#include "../progress.h"
4#include "../libslang.h" 5#include "../libslang.h"
5#include "../ui.h" 6#include "../ui.h"
6#include "tui.h" 7#include "tui.h"
8#include "units.h"
7#include "../browser.h" 9#include "../browser.h"
8 10
11static void __tui_progress__init(struct ui_progress *p)
12{
13 p->next = p->step = p->total / (SLtt_Screen_Cols - 2) ?: 1;
14}
15
16static int get_title(struct ui_progress *p, char *buf, size_t size)
17{
18 char buf_cur[20];
19 char buf_tot[20];
20 int ret;
21
22 ret = unit_number__scnprintf(buf_cur, sizeof(buf_cur), p->curr);
23 ret += unit_number__scnprintf(buf_tot, sizeof(buf_tot), p->total);
24
25 return ret + scnprintf(buf, size, "%s [%s/%s]",
26 p->title, buf_cur, buf_tot);
27}
28
9static void tui_progress__update(struct ui_progress *p) 29static void tui_progress__update(struct ui_progress *p)
10{ 30{
31 char buf[100], *title = (char *) p->title;
11 int bar, y; 32 int bar, y;
12 /* 33 /*
13 * FIXME: We should have a per UI backend way of showing progress, 34 * FIXME: We should have a per UI backend way of showing progress,
@@ -19,13 +40,18 @@ static void tui_progress__update(struct ui_progress *p)
19 if (p->total == 0) 40 if (p->total == 0)
20 return; 41 return;
21 42
43 if (p->size) {
44 get_title(p, buf, sizeof(buf));
45 title = buf;
46 }
47
22 ui__refresh_dimensions(false); 48 ui__refresh_dimensions(false);
23 pthread_mutex_lock(&ui__lock); 49 pthread_mutex_lock(&ui__lock);
24 y = SLtt_Screen_Rows / 2 - 2; 50 y = SLtt_Screen_Rows / 2 - 2;
25 SLsmg_set_color(0); 51 SLsmg_set_color(0);
26 SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols); 52 SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
27 SLsmg_gotorc(y++, 1); 53 SLsmg_gotorc(y++, 1);
28 SLsmg_write_string((char *)p->title); 54 SLsmg_write_string(title);
29 SLsmg_fill_region(y, 1, 1, SLtt_Screen_Cols - 2, ' '); 55 SLsmg_fill_region(y, 1, 1, SLtt_Screen_Cols - 2, ' ');
30 SLsmg_set_color(HE_COLORSET_SELECTED); 56 SLsmg_set_color(HE_COLORSET_SELECTED);
31 bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total; 57 bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total;
@@ -50,8 +76,8 @@ static void tui_progress__finish(void)
50 pthread_mutex_unlock(&ui__lock); 76 pthread_mutex_unlock(&ui__lock);
51} 77}
52 78
53static struct ui_progress_ops tui_progress__ops = 79static struct ui_progress_ops tui_progress__ops = {
54{ 80 .init = __tui_progress__init,
55 .update = tui_progress__update, 81 .update = tui_progress__update,
56 .finish = tui_progress__finish, 82 .finish = tui_progress__finish,
57}; 83};
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 94518c1bf8b6..a3de7916fe63 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -13,6 +13,7 @@ libperf-y += find_bit.o
13libperf-y += kallsyms.o 13libperf-y += kallsyms.o
14libperf-y += levenshtein.o 14libperf-y += levenshtein.o
15libperf-y += llvm-utils.o 15libperf-y += llvm-utils.o
16libperf-y += mmap.o
16libperf-y += memswap.o 17libperf-y += memswap.o
17libperf-y += parse-events.o 18libperf-y += parse-events.o
18libperf-y += perf_regs.o 19libperf-y += perf_regs.o
@@ -34,6 +35,7 @@ libperf-y += dso.o
34libperf-y += symbol.o 35libperf-y += symbol.o
35libperf-y += symbol_fprintf.o 36libperf-y += symbol_fprintf.o
36libperf-y += color.o 37libperf-y += color.o
38libperf-y += metricgroup.o
37libperf-y += header.o 39libperf-y += header.o
38libperf-y += callchain.o 40libperf-y += callchain.o
39libperf-y += values.o 41libperf-y += values.o
@@ -78,6 +80,7 @@ libperf-y += data.o
78libperf-y += tsc.o 80libperf-y += tsc.o
79libperf-y += cloexec.o 81libperf-y += cloexec.o
80libperf-y += call-path.o 82libperf-y += call-path.o
83libperf-y += rwsem.o
81libperf-y += thread-stack.o 84libperf-y += thread-stack.o
82libperf-$(CONFIG_AUXTRACE) += auxtrace.o 85libperf-$(CONFIG_AUXTRACE) += auxtrace.o
83libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/ 86libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index aa66791b1bfc..3369c7830260 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -49,10 +49,9 @@ struct arch {
49 void *priv; 49 void *priv;
50 unsigned int model; 50 unsigned int model;
51 unsigned int family; 51 unsigned int family;
52 int (*init)(struct arch *arch); 52 int (*init)(struct arch *arch, char *cpuid);
53 bool (*ins_is_fused)(struct arch *arch, const char *ins1, 53 bool (*ins_is_fused)(struct arch *arch, const char *ins1,
54 const char *ins2); 54 const char *ins2);
55 int (*cpuid_parse)(struct arch *arch, char *cpuid);
56 struct { 55 struct {
57 char comment_char; 56 char comment_char;
58 char skip_functions_char; 57 char skip_functions_char;
@@ -132,10 +131,10 @@ static struct arch architectures[] = {
132 }, 131 },
133 { 132 {
134 .name = "x86", 133 .name = "x86",
134 .init = x86__annotate_init,
135 .instructions = x86__instructions, 135 .instructions = x86__instructions,
136 .nr_instructions = ARRAY_SIZE(x86__instructions), 136 .nr_instructions = ARRAY_SIZE(x86__instructions),
137 .ins_is_fused = x86__ins_is_fused, 137 .ins_is_fused = x86__ins_is_fused,
138 .cpuid_parse = x86__cpuid_parse,
139 .objdump = { 138 .objdump = {
140 .comment_char = '#', 139 .comment_char = '#',
141 }, 140 },
@@ -166,7 +165,7 @@ static void ins__delete(struct ins_operands *ops)
166static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, 165static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
167 struct ins_operands *ops) 166 struct ins_operands *ops)
168{ 167{
169 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw); 168 return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
170} 169}
171 170
172int ins__scnprintf(struct ins *ins, char *bf, size_t size, 171int ins__scnprintf(struct ins *ins, char *bf, size_t size,
@@ -231,12 +230,12 @@ static int call__scnprintf(struct ins *ins, char *bf, size_t size,
231 struct ins_operands *ops) 230 struct ins_operands *ops)
232{ 231{
233 if (ops->target.name) 232 if (ops->target.name)
234 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); 233 return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
235 234
236 if (ops->target.addr == 0) 235 if (ops->target.addr == 0)
237 return ins__raw_scnprintf(ins, bf, size, ops); 236 return ins__raw_scnprintf(ins, bf, size, ops);
238 237
239 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); 238 return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
240} 239}
241 240
242static struct ins_ops call_ops = { 241static struct ins_ops call_ops = {
@@ -300,7 +299,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
300 c++; 299 c++;
301 } 300 }
302 301
303 return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64, 302 return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
304 ins->name, c ? c - ops->raw : 0, ops->raw, 303 ins->name, c ? c - ops->raw : 0, ops->raw,
305 ops->target.offset); 304 ops->target.offset);
306} 305}
@@ -373,7 +372,7 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
373 if (ops->locked.ins.ops == NULL) 372 if (ops->locked.ins.ops == NULL)
374 return ins__raw_scnprintf(ins, bf, size, ops); 373 return ins__raw_scnprintf(ins, bf, size, ops);
375 374
376 printed = scnprintf(bf, size, "%-6.6s ", ins->name); 375 printed = scnprintf(bf, size, "%-6s ", ins->name);
377 return printed + ins__scnprintf(&ops->locked.ins, bf + printed, 376 return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
378 size - printed, ops->locked.ops); 377 size - printed, ops->locked.ops);
379} 378}
@@ -449,7 +448,7 @@ out_free_source:
449static int mov__scnprintf(struct ins *ins, char *bf, size_t size, 448static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
450 struct ins_operands *ops) 449 struct ins_operands *ops)
451{ 450{
452 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name, 451 return scnprintf(bf, size, "%-6s %s,%s", ins->name,
453 ops->source.name ?: ops->source.raw, 452 ops->source.name ?: ops->source.raw,
454 ops->target.name ?: ops->target.raw); 453 ops->target.name ?: ops->target.raw);
455} 454}
@@ -489,7 +488,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
489static int dec__scnprintf(struct ins *ins, char *bf, size_t size, 488static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
490 struct ins_operands *ops) 489 struct ins_operands *ops)
491{ 490{
492 return scnprintf(bf, size, "%-6.6s %s", ins->name, 491 return scnprintf(bf, size, "%-6s %s", ins->name,
493 ops->target.name ?: ops->target.raw); 492 ops->target.name ?: ops->target.raw);
494} 493}
495 494
@@ -501,7 +500,7 @@ static struct ins_ops dec_ops = {
501static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, 500static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
502 struct ins_operands *ops __maybe_unused) 501 struct ins_operands *ops __maybe_unused)
503{ 502{
504 return scnprintf(bf, size, "%-6.6s", "nop"); 503 return scnprintf(bf, size, "%-6s", "nop");
505} 504}
506 505
507static struct ins_ops nop_ops = { 506static struct ins_ops nop_ops = {
@@ -925,7 +924,7 @@ void disasm_line__free(struct disasm_line *dl)
925int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw) 924int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
926{ 925{
927 if (raw || !dl->ins.ops) 926 if (raw || !dl->ins.ops)
928 return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw); 927 return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
929 928
930 return ins__scnprintf(&dl->ins, bf, size, &dl->ops); 929 return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
931} 930}
@@ -1457,16 +1456,13 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1457 *parch = arch; 1456 *parch = arch;
1458 1457
1459 if (arch->init) { 1458 if (arch->init) {
1460 err = arch->init(arch); 1459 err = arch->init(arch, cpuid);
1461 if (err) { 1460 if (err) {
1462 pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name); 1461 pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
1463 return err; 1462 return err;
1464 } 1463 }
1465 } 1464 }
1466 1465
1467 if (arch->cpuid_parse && cpuid)
1468 arch->cpuid_parse(arch, cpuid);
1469
1470 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, 1466 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
1471 symfs_filename, sym->name, map->unmap_ip(map, sym->start), 1467 symfs_filename, sym->name, map->unmap_ip(map, sym->start),
1472 map->unmap_ip(map, sym->end)); 1468 map->unmap_ip(map, sym->end));
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 5547457566a7..a33491416400 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -208,7 +208,7 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
208 208
209static void *auxtrace_copy_data(u64 size, struct perf_session *session) 209static void *auxtrace_copy_data(u64 size, struct perf_session *session)
210{ 210{
211 int fd = perf_data_file__fd(session->file); 211 int fd = perf_data__fd(session->data);
212 void *p; 212 void *p;
213 ssize_t ret; 213 ssize_t ret;
214 214
@@ -305,7 +305,7 @@ static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
305 if (session->one_mmap) { 305 if (session->one_mmap) {
306 buffer->data = buffer->data_offset - session->one_mmap_offset + 306 buffer->data = buffer->data_offset - session->one_mmap_offset +
307 session->one_mmap_addr; 307 session->one_mmap_addr;
308 } else if (perf_data_file__is_pipe(session->file)) { 308 } else if (perf_data__is_pipe(session->data)) {
309 buffer->data = auxtrace_copy_data(buffer->size, session); 309 buffer->data = auxtrace_copy_data(buffer->size, session);
310 if (!buffer->data) 310 if (!buffer->data)
311 return -ENOMEM; 311 return -ENOMEM;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 33b5e6cdf38c..d19e11b68de7 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -378,7 +378,7 @@ struct addr_filters {
378static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) 378static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
379{ 379{
380 struct perf_event_mmap_page *pc = mm->userpg; 380 struct perf_event_mmap_page *pc = mm->userpg;
381 u64 head = ACCESS_ONCE(pc->aux_head); 381 u64 head = READ_ONCE(pc->aux_head);
382 382
383 /* Ensure all reads are done after we read the head */ 383 /* Ensure all reads are done after we read the head */
384 rmb(); 384 rmb();
@@ -389,7 +389,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
389{ 389{
390 struct perf_event_mmap_page *pc = mm->userpg; 390 struct perf_event_mmap_page *pc = mm->userpg;
391#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 391#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
392 u64 head = ACCESS_ONCE(pc->aux_head); 392 u64 head = READ_ONCE(pc->aux_head);
393#else 393#else
394 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); 394 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
395#endif 395#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 6031933d811c..082505d08d72 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -567,6 +567,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
567 call->ip = cursor_node->ip; 567 call->ip = cursor_node->ip;
568 call->ms.sym = cursor_node->sym; 568 call->ms.sym = cursor_node->sym;
569 call->ms.map = map__get(cursor_node->map); 569 call->ms.map = map__get(cursor_node->map);
570 call->srcline = cursor_node->srcline;
570 571
571 if (cursor_node->branch) { 572 if (cursor_node->branch) {
572 call->branch_count = 1; 573 call->branch_count = 1;
@@ -645,103 +646,120 @@ enum match_result {
645 MATCH_GT, 646 MATCH_GT,
646}; 647};
647 648
648static enum match_result match_chain_srcline(struct callchain_cursor_node *node, 649static enum match_result match_chain_strings(const char *left,
649 struct callchain_list *cnode) 650 const char *right)
650{ 651{
651 char *left = NULL;
652 char *right = NULL;
653 enum match_result ret = MATCH_EQ; 652 enum match_result ret = MATCH_EQ;
654 int cmp; 653 int cmp;
655 654
656 if (cnode->ms.map)
657 left = get_srcline(cnode->ms.map->dso,
658 map__rip_2objdump(cnode->ms.map, cnode->ip),
659 cnode->ms.sym, true, false);
660 if (node->map)
661 right = get_srcline(node->map->dso,
662 map__rip_2objdump(node->map, node->ip),
663 node->sym, true, false);
664
665 if (left && right) 655 if (left && right)
666 cmp = strcmp(left, right); 656 cmp = strcmp(left, right);
667 else if (!left && right) 657 else if (!left && right)
668 cmp = 1; 658 cmp = 1;
669 else if (left && !right) 659 else if (left && !right)
670 cmp = -1; 660 cmp = -1;
671 else if (cnode->ip == node->ip)
672 cmp = 0;
673 else 661 else
674 cmp = (cnode->ip < node->ip) ? -1 : 1; 662 return MATCH_ERROR;
675 663
676 if (cmp != 0) 664 if (cmp != 0)
677 ret = cmp < 0 ? MATCH_LT : MATCH_GT; 665 ret = cmp < 0 ? MATCH_LT : MATCH_GT;
678 666
679 free_srcline(left);
680 free_srcline(right);
681 return ret; 667 return ret;
682} 668}
683 669
670/*
671 * We need to always use relative addresses because we're aggregating
672 * callchains from multiple threads, i.e. different address spaces, so
673 * comparing absolute addresses make no sense as a symbol in a DSO may end up
674 * in a different address when used in a different binary or even the same
675 * binary but with some sort of address randomization technique, thus we need
676 * to compare just relative addresses. -acme
677 */
678static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
679 struct map *right_map, u64 right_ip)
680{
681 struct dso *left_dso = left_map ? left_map->dso : NULL;
682 struct dso *right_dso = right_map ? right_map->dso : NULL;
683
684 if (left_dso != right_dso)
685 return left_dso < right_dso ? MATCH_LT : MATCH_GT;
686
687 if (left_ip != right_ip)
688 return left_ip < right_ip ? MATCH_LT : MATCH_GT;
689
690 return MATCH_EQ;
691}
692
684static enum match_result match_chain(struct callchain_cursor_node *node, 693static enum match_result match_chain(struct callchain_cursor_node *node,
685 struct callchain_list *cnode) 694 struct callchain_list *cnode)
686{ 695{
687 struct symbol *sym = node->sym; 696 enum match_result match = MATCH_ERROR;
688 u64 left, right;
689 struct dso *left_dso = NULL;
690 struct dso *right_dso = NULL;
691
692 if (callchain_param.key == CCKEY_SRCLINE) {
693 enum match_result match = match_chain_srcline(node, cnode);
694 697
698 switch (callchain_param.key) {
699 case CCKEY_SRCLINE:
700 match = match_chain_strings(cnode->srcline, node->srcline);
695 if (match != MATCH_ERROR) 701 if (match != MATCH_ERROR)
696 return match; 702 break;
697 } 703 /* otherwise fall-back to symbol-based comparison below */
698 704 __fallthrough;
699 if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) { 705 case CCKEY_FUNCTION:
700 left = cnode->ms.sym->start; 706 if (node->sym && cnode->ms.sym) {
701 right = sym->start; 707 /*
702 left_dso = cnode->ms.map->dso; 708 * Compare inlined frames based on their symbol name
703 right_dso = node->map->dso; 709 * because different inlined frames will have the same
704 } else { 710 * symbol start. Otherwise do a faster comparison based
705 left = cnode->ip; 711 * on the symbol start address.
706 right = node->ip; 712 */
713 if (cnode->ms.sym->inlined || node->sym->inlined) {
714 match = match_chain_strings(cnode->ms.sym->name,
715 node->sym->name);
716 if (match != MATCH_ERROR)
717 break;
718 } else {
719 match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
720 node->map, node->sym->start);
721 break;
722 }
723 }
724 /* otherwise fall-back to IP-based comparison below */
725 __fallthrough;
726 case CCKEY_ADDRESS:
727 default:
728 match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
729 break;
707 } 730 }
708 731
709 if (left == right && left_dso == right_dso) { 732 if (match == MATCH_EQ && node->branch) {
710 if (node->branch) { 733 cnode->branch_count++;
711 cnode->branch_count++;
712 734
713 if (node->branch_from) { 735 if (node->branch_from) {
714 /* 736 /*
715 * It's "to" of a branch 737 * It's "to" of a branch
716 */ 738 */
717 cnode->brtype_stat.branch_to = true; 739 cnode->brtype_stat.branch_to = true;
718 740
719 if (node->branch_flags.predicted) 741 if (node->branch_flags.predicted)
720 cnode->predicted_count++; 742 cnode->predicted_count++;
721 743
722 if (node->branch_flags.abort) 744 if (node->branch_flags.abort)
723 cnode->abort_count++; 745 cnode->abort_count++;
724 746
725 branch_type_count(&cnode->brtype_stat, 747 branch_type_count(&cnode->brtype_stat,
726 &node->branch_flags, 748 &node->branch_flags,
727 node->branch_from, 749 node->branch_from,
728 node->ip); 750 node->ip);
729 } else { 751 } else {
730 /* 752 /*
731 * It's "from" of a branch 753 * It's "from" of a branch
732 */ 754 */
733 cnode->brtype_stat.branch_to = false; 755 cnode->brtype_stat.branch_to = false;
734 cnode->cycles_count += 756 cnode->cycles_count += node->branch_flags.cycles;
735 node->branch_flags.cycles; 757 cnode->iter_count += node->nr_loop_iter;
736 cnode->iter_count += node->nr_loop_iter; 758 cnode->iter_cycles += node->iter_cycles;
737 cnode->iter_cycles += node->iter_cycles;
738 }
739 } 759 }
740
741 return MATCH_EQ;
742 } 760 }
743 761
744 return left > right ? MATCH_GT : MATCH_LT; 762 return match;
745} 763}
746 764
747/* 765/*
@@ -970,7 +988,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
970 list_for_each_entry_safe(list, next_list, &src->val, list) { 988 list_for_each_entry_safe(list, next_list, &src->val, list) {
971 callchain_cursor_append(cursor, list->ip, 989 callchain_cursor_append(cursor, list->ip,
972 list->ms.map, list->ms.sym, 990 list->ms.map, list->ms.sym,
973 false, NULL, 0, 0, 0); 991 false, NULL, 0, 0, 0, list->srcline);
974 list_del(&list->list); 992 list_del(&list->list);
975 map__zput(list->ms.map); 993 map__zput(list->ms.map);
976 free(list); 994 free(list);
@@ -1010,7 +1028,8 @@ int callchain_merge(struct callchain_cursor *cursor,
1010int callchain_cursor_append(struct callchain_cursor *cursor, 1028int callchain_cursor_append(struct callchain_cursor *cursor,
1011 u64 ip, struct map *map, struct symbol *sym, 1029 u64 ip, struct map *map, struct symbol *sym,
1012 bool branch, struct branch_flags *flags, 1030 bool branch, struct branch_flags *flags,
1013 int nr_loop_iter, u64 iter_cycles, u64 branch_from) 1031 int nr_loop_iter, u64 iter_cycles, u64 branch_from,
1032 const char *srcline)
1014{ 1033{
1015 struct callchain_cursor_node *node = *cursor->last; 1034 struct callchain_cursor_node *node = *cursor->last;
1016 1035
@@ -1029,6 +1048,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
1029 node->branch = branch; 1048 node->branch = branch;
1030 node->nr_loop_iter = nr_loop_iter; 1049 node->nr_loop_iter = nr_loop_iter;
1031 node->iter_cycles = iter_cycles; 1050 node->iter_cycles = iter_cycles;
1051 node->srcline = srcline;
1032 1052
1033 if (flags) 1053 if (flags)
1034 memcpy(&node->branch_flags, flags, 1054 memcpy(&node->branch_flags, flags,
@@ -1071,10 +1091,8 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
1071{ 1091{
1072 al->map = node->map; 1092 al->map = node->map;
1073 al->sym = node->sym; 1093 al->sym = node->sym;
1074 if (node->map) 1094 al->srcline = node->srcline;
1075 al->addr = node->map->map_ip(node->map, node->ip); 1095 al->addr = node->ip;
1076 else
1077 al->addr = node->ip;
1078 1096
1079 if (al->sym == NULL) { 1097 if (al->sym == NULL) {
1080 if (hide_unresolved) 1098 if (hide_unresolved)
@@ -1116,16 +1134,15 @@ char *callchain_list__sym_name(struct callchain_list *cl,
1116 int printed; 1134 int printed;
1117 1135
1118 if (cl->ms.sym) { 1136 if (cl->ms.sym) {
1119 if (show_srcline && cl->ms.map && !cl->srcline) 1137 const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
1120 cl->srcline = get_srcline(cl->ms.map->dso, 1138
1121 map__rip_2objdump(cl->ms.map, 1139 if (show_srcline && cl->srcline)
1122 cl->ip), 1140 printed = scnprintf(bf, bfsize, "%s %s%s",
1123 cl->ms.sym, false, show_addr); 1141 cl->ms.sym->name, cl->srcline,
1124 if (cl->srcline) 1142 inlined);
1125 printed = scnprintf(bf, bfsize, "%s %s",
1126 cl->ms.sym->name, cl->srcline);
1127 else 1143 else
1128 printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name); 1144 printed = scnprintf(bf, bfsize, "%s%s",
1145 cl->ms.sym->name, inlined);
1129 } else 1146 } else
1130 printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip); 1147 printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
1131 1148
@@ -1533,7 +1550,7 @@ int callchain_cursor__copy(struct callchain_cursor *dst,
1533 node->branch, &node->branch_flags, 1550 node->branch, &node->branch_flags,
1534 node->nr_loop_iter, 1551 node->nr_loop_iter,
1535 node->iter_cycles, 1552 node->iter_cycles,
1536 node->branch_from); 1553 node->branch_from, node->srcline);
1537 if (rc) 1554 if (rc)
1538 break; 1555 break;
1539 1556
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index f967aa47d0a1..b79ef2478a57 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -122,7 +122,7 @@ struct callchain_list {
122 u64 iter_count; 122 u64 iter_count;
123 u64 iter_cycles; 123 u64 iter_cycles;
124 struct branch_type_stat brtype_stat; 124 struct branch_type_stat brtype_stat;
125 char *srcline; 125 const char *srcline;
126 struct list_head list; 126 struct list_head list;
127}; 127};
128 128
@@ -136,6 +136,7 @@ struct callchain_cursor_node {
136 u64 ip; 136 u64 ip;
137 struct map *map; 137 struct map *map;
138 struct symbol *sym; 138 struct symbol *sym;
139 const char *srcline;
139 bool branch; 140 bool branch;
140 struct branch_flags branch_flags; 141 struct branch_flags branch_flags;
141 u64 branch_from; 142 u64 branch_from;
@@ -202,7 +203,8 @@ static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
202int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, 203int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
203 struct map *map, struct symbol *sym, 204 struct map *map, struct symbol *sym,
204 bool branch, struct branch_flags *flags, 205 bool branch, struct branch_flags *flags,
205 int nr_loop_iter, u64 iter_cycles, u64 branch_from); 206 int nr_loop_iter, u64 iter_cycles, u64 branch_from,
207 const char *srcline);
206 208
207/* Close a cursor writing session. Initialize for the reader */ 209/* Close a cursor writing session. Initialize for the reader */
208static inline void callchain_cursor_commit(struct callchain_cursor *cursor) 210static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 8808570f8e9c..7798a2cc8a86 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -6,6 +6,7 @@
6#include <stdio.h> 6#include <stdio.h>
7#include <string.h> 7#include <string.h>
8#include <linux/refcount.h> 8#include <linux/refcount.h>
9#include "rwsem.h"
9 10
10struct comm_str { 11struct comm_str {
11 char *str; 12 char *str;
@@ -15,6 +16,7 @@ struct comm_str {
15 16
16/* Should perhaps be moved to struct machine */ 17/* Should perhaps be moved to struct machine */
17static struct rb_root comm_str_root; 18static struct rb_root comm_str_root;
19static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,};
18 20
19static struct comm_str *comm_str__get(struct comm_str *cs) 21static struct comm_str *comm_str__get(struct comm_str *cs)
20{ 22{
@@ -26,7 +28,9 @@ static struct comm_str *comm_str__get(struct comm_str *cs)
26static void comm_str__put(struct comm_str *cs) 28static void comm_str__put(struct comm_str *cs)
27{ 29{
28 if (cs && refcount_dec_and_test(&cs->refcnt)) { 30 if (cs && refcount_dec_and_test(&cs->refcnt)) {
31 down_write(&comm_str_lock);
29 rb_erase(&cs->rb_node, &comm_str_root); 32 rb_erase(&cs->rb_node, &comm_str_root);
33 up_write(&comm_str_lock);
30 zfree(&cs->str); 34 zfree(&cs->str);
31 free(cs); 35 free(cs);
32 } 36 }
@@ -51,7 +55,8 @@ static struct comm_str *comm_str__alloc(const char *str)
51 return cs; 55 return cs;
52} 56}
53 57
54static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root) 58static
59struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root)
55{ 60{
56 struct rb_node **p = &root->rb_node; 61 struct rb_node **p = &root->rb_node;
57 struct rb_node *parent = NULL; 62 struct rb_node *parent = NULL;
@@ -82,6 +87,17 @@ static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
82 return new; 87 return new;
83} 88}
84 89
90static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
91{
92 struct comm_str *cs;
93
94 down_write(&comm_str_lock);
95 cs = __comm_str__findnew(str, root);
96 up_write(&comm_str_lock);
97
98 return cs;
99}
100
85struct comm *comm__new(const char *str, u64 timestamp, bool exec) 101struct comm *comm__new(const char *str, u64 timestamp, bool exec)
86{ 102{
87 struct comm *comm = zalloc(sizeof(*comm)); 103 struct comm *comm = zalloc(sizeof(*comm));
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 4b893c622236..84eb9393c7db 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -701,10 +701,7 @@ struct perf_config_set *perf_config_set__new(void)
701 701
702 if (set) { 702 if (set) {
703 INIT_LIST_HEAD(&set->sections); 703 INIT_LIST_HEAD(&set->sections);
704 if (perf_config_set__init(set) < 0) { 704 perf_config_set__init(set);
705 perf_config_set__delete(set);
706 set = NULL;
707 }
708 } 705 }
709 706
710 return set; 707 return set;
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 2346cecb8ea2..5744c12641a5 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1577,10 +1577,10 @@ int bt_convert__perf2ctf(const char *input, const char *path,
1577 struct perf_data_convert_opts *opts) 1577 struct perf_data_convert_opts *opts)
1578{ 1578{
1579 struct perf_session *session; 1579 struct perf_session *session;
1580 struct perf_data_file file = { 1580 struct perf_data data = {
1581 .path = input, 1581 .file.path = input,
1582 .mode = PERF_DATA_MODE_READ, 1582 .mode = PERF_DATA_MODE_READ,
1583 .force = opts->force, 1583 .force = opts->force,
1584 }; 1584 };
1585 struct convert c = { 1585 struct convert c = {
1586 .tool = { 1586 .tool = {
@@ -1619,7 +1619,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
1619 1619
1620 err = -1; 1620 err = -1;
1621 /* perf.data session */ 1621 /* perf.data session */
1622 session = perf_session__new(&file, 0, &c.tool); 1622 session = perf_session__new(&data, 0, &c.tool);
1623 if (!session) 1623 if (!session)
1624 goto free_writer; 1624 goto free_writer;
1625 1625
@@ -1650,7 +1650,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
1650 1650
1651 fprintf(stderr, 1651 fprintf(stderr,
1652 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1652 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1653 file.path, path); 1653 data.file.path, path);
1654 1654
1655 fprintf(stderr, 1655 fprintf(stderr,
1656 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples", 1656 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 79192758bdb3..48094fde0a68 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -4,6 +4,7 @@
4#include <sys/types.h> 4#include <sys/types.h>
5#include <sys/stat.h> 5#include <sys/stat.h>
6#include <errno.h> 6#include <errno.h>
7#include <fcntl.h>
7#include <unistd.h> 8#include <unistd.h>
8#include <string.h> 9#include <string.h>
9 10
@@ -21,56 +22,56 @@
21#endif 22#endif
22#endif 23#endif
23 24
24static bool check_pipe(struct perf_data_file *file) 25static bool check_pipe(struct perf_data *data)
25{ 26{
26 struct stat st; 27 struct stat st;
27 bool is_pipe = false; 28 bool is_pipe = false;
28 int fd = perf_data_file__is_read(file) ? 29 int fd = perf_data__is_read(data) ?
29 STDIN_FILENO : STDOUT_FILENO; 30 STDIN_FILENO : STDOUT_FILENO;
30 31
31 if (!file->path) { 32 if (!data->file.path) {
32 if (!fstat(fd, &st) && S_ISFIFO(st.st_mode)) 33 if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
33 is_pipe = true; 34 is_pipe = true;
34 } else { 35 } else {
35 if (!strcmp(file->path, "-")) 36 if (!strcmp(data->file.path, "-"))
36 is_pipe = true; 37 is_pipe = true;
37 } 38 }
38 39
39 if (is_pipe) 40 if (is_pipe)
40 file->fd = fd; 41 data->file.fd = fd;
41 42
42 return file->is_pipe = is_pipe; 43 return data->is_pipe = is_pipe;
43} 44}
44 45
45static int check_backup(struct perf_data_file *file) 46static int check_backup(struct perf_data *data)
46{ 47{
47 struct stat st; 48 struct stat st;
48 49
49 if (!stat(file->path, &st) && st.st_size) { 50 if (!stat(data->file.path, &st) && st.st_size) {
50 /* TODO check errors properly */ 51 /* TODO check errors properly */
51 char oldname[PATH_MAX]; 52 char oldname[PATH_MAX];
52 snprintf(oldname, sizeof(oldname), "%s.old", 53 snprintf(oldname, sizeof(oldname), "%s.old",
53 file->path); 54 data->file.path);
54 unlink(oldname); 55 unlink(oldname);
55 rename(file->path, oldname); 56 rename(data->file.path, oldname);
56 } 57 }
57 58
58 return 0; 59 return 0;
59} 60}
60 61
61static int open_file_read(struct perf_data_file *file) 62static int open_file_read(struct perf_data *data)
62{ 63{
63 struct stat st; 64 struct stat st;
64 int fd; 65 int fd;
65 char sbuf[STRERR_BUFSIZE]; 66 char sbuf[STRERR_BUFSIZE];
66 67
67 fd = open(file->path, O_RDONLY); 68 fd = open(data->file.path, O_RDONLY);
68 if (fd < 0) { 69 if (fd < 0) {
69 int err = errno; 70 int err = errno;
70 71
71 pr_err("failed to open %s: %s", file->path, 72 pr_err("failed to open %s: %s", data->file.path,
72 str_error_r(err, sbuf, sizeof(sbuf))); 73 str_error_r(err, sbuf, sizeof(sbuf)));
73 if (err == ENOENT && !strcmp(file->path, "perf.data")) 74 if (err == ENOENT && !strcmp(data->file.path, "perf.data"))
74 pr_err(" (try 'perf record' first)"); 75 pr_err(" (try 'perf record' first)");
75 pr_err("\n"); 76 pr_err("\n");
76 return -err; 77 return -err;
@@ -79,19 +80,19 @@ static int open_file_read(struct perf_data_file *file)
79 if (fstat(fd, &st) < 0) 80 if (fstat(fd, &st) < 0)
80 goto out_close; 81 goto out_close;
81 82
82 if (!file->force && st.st_uid && (st.st_uid != geteuid())) { 83 if (!data->force && st.st_uid && (st.st_uid != geteuid())) {
83 pr_err("File %s not owned by current user or root (use -f to override)\n", 84 pr_err("File %s not owned by current user or root (use -f to override)\n",
84 file->path); 85 data->file.path);
85 goto out_close; 86 goto out_close;
86 } 87 }
87 88
88 if (!st.st_size) { 89 if (!st.st_size) {
89 pr_info("zero-sized file (%s), nothing to do!\n", 90 pr_info("zero-sized data (%s), nothing to do!\n",
90 file->path); 91 data->file.path);
91 goto out_close; 92 goto out_close;
92 } 93 }
93 94
94 file->size = st.st_size; 95 data->size = st.st_size;
95 return fd; 96 return fd;
96 97
97 out_close: 98 out_close:
@@ -99,49 +100,49 @@ static int open_file_read(struct perf_data_file *file)
99 return -1; 100 return -1;
100} 101}
101 102
102static int open_file_write(struct perf_data_file *file) 103static int open_file_write(struct perf_data *data)
103{ 104{
104 int fd; 105 int fd;
105 char sbuf[STRERR_BUFSIZE]; 106 char sbuf[STRERR_BUFSIZE];
106 107
107 if (check_backup(file)) 108 if (check_backup(data))
108 return -1; 109 return -1;
109 110
110 fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC, 111 fd = open(data->file.path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC,
111 S_IRUSR|S_IWUSR); 112 S_IRUSR|S_IWUSR);
112 113
113 if (fd < 0) 114 if (fd < 0)
114 pr_err("failed to open %s : %s\n", file->path, 115 pr_err("failed to open %s : %s\n", data->file.path,
115 str_error_r(errno, sbuf, sizeof(sbuf))); 116 str_error_r(errno, sbuf, sizeof(sbuf)));
116 117
117 return fd; 118 return fd;
118} 119}
119 120
120static int open_file(struct perf_data_file *file) 121static int open_file(struct perf_data *data)
121{ 122{
122 int fd; 123 int fd;
123 124
124 fd = perf_data_file__is_read(file) ? 125 fd = perf_data__is_read(data) ?
125 open_file_read(file) : open_file_write(file); 126 open_file_read(data) : open_file_write(data);
126 127
127 file->fd = fd; 128 data->file.fd = fd;
128 return fd < 0 ? -1 : 0; 129 return fd < 0 ? -1 : 0;
129} 130}
130 131
131int perf_data_file__open(struct perf_data_file *file) 132int perf_data__open(struct perf_data *data)
132{ 133{
133 if (check_pipe(file)) 134 if (check_pipe(data))
134 return 0; 135 return 0;
135 136
136 if (!file->path) 137 if (!data->file.path)
137 file->path = "perf.data"; 138 data->file.path = "perf.data";
138 139
139 return open_file(file); 140 return open_file(data);
140} 141}
141 142
142void perf_data_file__close(struct perf_data_file *file) 143void perf_data__close(struct perf_data *data)
143{ 144{
144 close(file->fd); 145 close(data->file.fd);
145} 146}
146 147
147ssize_t perf_data_file__write(struct perf_data_file *file, 148ssize_t perf_data_file__write(struct perf_data_file *file,
@@ -150,42 +151,48 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
150 return writen(file->fd, buf, size); 151 return writen(file->fd, buf, size);
151} 152}
152 153
153int perf_data_file__switch(struct perf_data_file *file, 154ssize_t perf_data__write(struct perf_data *data,
155 void *buf, size_t size)
156{
157 return perf_data_file__write(&data->file, buf, size);
158}
159
160int perf_data__switch(struct perf_data *data,
154 const char *postfix, 161 const char *postfix,
155 size_t pos, bool at_exit) 162 size_t pos, bool at_exit)
156{ 163{
157 char *new_filepath; 164 char *new_filepath;
158 int ret; 165 int ret;
159 166
160 if (check_pipe(file)) 167 if (check_pipe(data))
161 return -EINVAL; 168 return -EINVAL;
162 if (perf_data_file__is_read(file)) 169 if (perf_data__is_read(data))
163 return -EINVAL; 170 return -EINVAL;
164 171
165 if (asprintf(&new_filepath, "%s.%s", file->path, postfix) < 0) 172 if (asprintf(&new_filepath, "%s.%s", data->file.path, postfix) < 0)
166 return -ENOMEM; 173 return -ENOMEM;
167 174
168 /* 175 /*
169 * Only fire a warning, don't return error, continue fill 176 * Only fire a warning, don't return error, continue fill
170 * original file. 177 * original file.
171 */ 178 */
172 if (rename(file->path, new_filepath)) 179 if (rename(data->file.path, new_filepath))
173 pr_warning("Failed to rename %s to %s\n", file->path, new_filepath); 180 pr_warning("Failed to rename %s to %s\n", data->file.path, new_filepath);
174 181
175 if (!at_exit) { 182 if (!at_exit) {
176 close(file->fd); 183 close(data->file.fd);
177 ret = perf_data_file__open(file); 184 ret = perf_data__open(data);
178 if (ret < 0) 185 if (ret < 0)
179 goto out; 186 goto out;
180 187
181 if (lseek(file->fd, pos, SEEK_SET) == (off_t)-1) { 188 if (lseek(data->file.fd, pos, SEEK_SET) == (off_t)-1) {
182 ret = -errno; 189 ret = -errno;
183 pr_debug("Failed to lseek to %zu: %s", 190 pr_debug("Failed to lseek to %zu: %s",
184 pos, strerror(errno)); 191 pos, strerror(errno));
185 goto out; 192 goto out;
186 } 193 }
187 } 194 }
188 ret = file->fd; 195 ret = data->file.fd;
189out: 196out:
190 free(new_filepath); 197 free(new_filepath);
191 return ret; 198 return ret;
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 80241ba78101..4828f7feea89 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -10,51 +10,57 @@ enum perf_data_mode {
10}; 10};
11 11
12struct perf_data_file { 12struct perf_data_file {
13 const char *path; 13 const char *path;
14 int fd; 14 int fd;
15};
16
17struct perf_data {
18 struct perf_data_file file;
15 bool is_pipe; 19 bool is_pipe;
16 bool force; 20 bool force;
17 unsigned long size; 21 unsigned long size;
18 enum perf_data_mode mode; 22 enum perf_data_mode mode;
19}; 23};
20 24
21static inline bool perf_data_file__is_read(struct perf_data_file *file) 25static inline bool perf_data__is_read(struct perf_data *data)
22{ 26{
23 return file->mode == PERF_DATA_MODE_READ; 27 return data->mode == PERF_DATA_MODE_READ;
24} 28}
25 29
26static inline bool perf_data_file__is_write(struct perf_data_file *file) 30static inline bool perf_data__is_write(struct perf_data *data)
27{ 31{
28 return file->mode == PERF_DATA_MODE_WRITE; 32 return data->mode == PERF_DATA_MODE_WRITE;
29} 33}
30 34
31static inline int perf_data_file__is_pipe(struct perf_data_file *file) 35static inline int perf_data__is_pipe(struct perf_data *data)
32{ 36{
33 return file->is_pipe; 37 return data->is_pipe;
34} 38}
35 39
36static inline int perf_data_file__fd(struct perf_data_file *file) 40static inline int perf_data__fd(struct perf_data *data)
37{ 41{
38 return file->fd; 42 return data->file.fd;
39} 43}
40 44
41static inline unsigned long perf_data_file__size(struct perf_data_file *file) 45static inline unsigned long perf_data__size(struct perf_data *data)
42{ 46{
43 return file->size; 47 return data->size;
44} 48}
45 49
46int perf_data_file__open(struct perf_data_file *file); 50int perf_data__open(struct perf_data *data);
47void perf_data_file__close(struct perf_data_file *file); 51void perf_data__close(struct perf_data *data);
52ssize_t perf_data__write(struct perf_data *data,
53 void *buf, size_t size);
48ssize_t perf_data_file__write(struct perf_data_file *file, 54ssize_t perf_data_file__write(struct perf_data_file *file,
49 void *buf, size_t size); 55 void *buf, size_t size);
50/* 56/*
51 * If at_exit is set, only rename current perf.data to 57 * If at_exit is set, only rename current perf.data to
52 * perf.data.<postfix>, continue write on original file. 58 * perf.data.<postfix>, continue write on original data.
53 * Set at_exit when flushing the last output. 59 * Set at_exit when flushing the last output.
54 * 60 *
55 * Return value is fd of new output. 61 * Return value is fd of new output.
56 */ 62 */
57int perf_data_file__switch(struct perf_data_file *file, 63int perf_data__switch(struct perf_data *data,
58 const char *postfix, 64 const char *postfix,
59 size_t pos, bool at_exit); 65 size_t pos, bool at_exit);
60#endif /* __PERF_DATA_H */ 66#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index dc8b53b6950e..f3a71db83947 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -112,50 +112,53 @@ int dump_printf(const char *fmt, ...)
112 return ret; 112 return ret;
113} 113}
114 114
115static void trace_event_printer(enum binary_printer_ops op, 115static int trace_event_printer(enum binary_printer_ops op,
116 unsigned int val, void *extra) 116 unsigned int val, void *extra, FILE *fp)
117{ 117{
118 const char *color = PERF_COLOR_BLUE; 118 const char *color = PERF_COLOR_BLUE;
119 union perf_event *event = (union perf_event *)extra; 119 union perf_event *event = (union perf_event *)extra;
120 unsigned char ch = (unsigned char)val; 120 unsigned char ch = (unsigned char)val;
121 int printed = 0;
121 122
122 switch (op) { 123 switch (op) {
123 case BINARY_PRINT_DATA_BEGIN: 124 case BINARY_PRINT_DATA_BEGIN:
124 printf("."); 125 printed += fprintf(fp, ".");
125 color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n", 126 printed += color_fprintf(fp, color, "\n. ... raw event: size %d bytes\n",
126 event->header.size); 127 event->header.size);
127 break; 128 break;
128 case BINARY_PRINT_LINE_BEGIN: 129 case BINARY_PRINT_LINE_BEGIN:
129 printf("."); 130 printed += fprintf(fp, ".");
130 break; 131 break;
131 case BINARY_PRINT_ADDR: 132 case BINARY_PRINT_ADDR:
132 color_fprintf(stdout, color, " %04x: ", val); 133 printed += color_fprintf(fp, color, " %04x: ", val);
133 break; 134 break;
134 case BINARY_PRINT_NUM_DATA: 135 case BINARY_PRINT_NUM_DATA:
135 color_fprintf(stdout, color, " %02x", val); 136 printed += color_fprintf(fp, color, " %02x", val);
136 break; 137 break;
137 case BINARY_PRINT_NUM_PAD: 138 case BINARY_PRINT_NUM_PAD:
138 color_fprintf(stdout, color, " "); 139 printed += color_fprintf(fp, color, " ");
139 break; 140 break;
140 case BINARY_PRINT_SEP: 141 case BINARY_PRINT_SEP:
141 color_fprintf(stdout, color, " "); 142 printed += color_fprintf(fp, color, " ");
142 break; 143 break;
143 case BINARY_PRINT_CHAR_DATA: 144 case BINARY_PRINT_CHAR_DATA:
144 color_fprintf(stdout, color, "%c", 145 printed += color_fprintf(fp, color, "%c",
145 isprint(ch) ? ch : '.'); 146 isprint(ch) ? ch : '.');
146 break; 147 break;
147 case BINARY_PRINT_CHAR_PAD: 148 case BINARY_PRINT_CHAR_PAD:
148 color_fprintf(stdout, color, " "); 149 printed += color_fprintf(fp, color, " ");
149 break; 150 break;
150 case BINARY_PRINT_LINE_END: 151 case BINARY_PRINT_LINE_END:
151 color_fprintf(stdout, color, "\n"); 152 printed += color_fprintf(fp, color, "\n");
152 break; 153 break;
153 case BINARY_PRINT_DATA_END: 154 case BINARY_PRINT_DATA_END:
154 printf("\n"); 155 printed += fprintf(fp, "\n");
155 break; 156 break;
156 default: 157 default:
157 break; 158 break;
158 } 159 }
160
161 return printed;
159} 162}
160 163
161void trace_event(union perf_event *event) 164void trace_event(union perf_event *event)
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 00c98c968cb1..d5b6f7f5baff 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -7,9 +7,11 @@
7#include <sys/stat.h> 7#include <sys/stat.h>
8#include <unistd.h> 8#include <unistd.h>
9#include <errno.h> 9#include <errno.h>
10#include <fcntl.h>
10#include "compress.h" 11#include "compress.h"
11#include "path.h" 12#include "path.h"
12#include "symbol.h" 13#include "symbol.h"
14#include "srcline.h"
13#include "dso.h" 15#include "dso.h"
14#include "machine.h" 16#include "machine.h"
15#include "auxtrace.h" 17#include "auxtrace.h"
@@ -1201,6 +1203,8 @@ struct dso *dso__new(const char *name)
1201 for (i = 0; i < MAP__NR_TYPES; ++i) 1203 for (i = 0; i < MAP__NR_TYPES; ++i)
1202 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; 1204 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1203 dso->data.cache = RB_ROOT; 1205 dso->data.cache = RB_ROOT;
1206 dso->inlined_nodes = RB_ROOT;
1207 dso->srclines = RB_ROOT;
1204 dso->data.fd = -1; 1208 dso->data.fd = -1;
1205 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1209 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1206 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1210 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1232,6 +1236,10 @@ void dso__delete(struct dso *dso)
1232 if (!RB_EMPTY_NODE(&dso->rb_node)) 1236 if (!RB_EMPTY_NODE(&dso->rb_node))
1233 pr_err("DSO %s is still in rbtree when being deleted!\n", 1237 pr_err("DSO %s is still in rbtree when being deleted!\n",
1234 dso->long_name); 1238 dso->long_name);
1239
1240 /* free inlines first, as they reference symbols */
1241 inlines__tree_delete(&dso->inlined_nodes);
1242 srcline__tree_delete(&dso->srclines);
1235 for (i = 0; i < MAP__NR_TYPES; ++i) 1243 for (i = 0; i < MAP__NR_TYPES; ++i)
1236 symbols__delete(&dso->symbols[i]); 1244 symbols__delete(&dso->symbols[i]);
1237 1245
@@ -1366,9 +1374,9 @@ void __dsos__add(struct dsos *dsos, struct dso *dso)
1366 1374
1367void dsos__add(struct dsos *dsos, struct dso *dso) 1375void dsos__add(struct dsos *dsos, struct dso *dso)
1368{ 1376{
1369 pthread_rwlock_wrlock(&dsos->lock); 1377 down_write(&dsos->lock);
1370 __dsos__add(dsos, dso); 1378 __dsos__add(dsos, dso);
1371 pthread_rwlock_unlock(&dsos->lock); 1379 up_write(&dsos->lock);
1372} 1380}
1373 1381
1374struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1382struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
@@ -1387,9 +1395,9 @@ struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1387struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1395struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1388{ 1396{
1389 struct dso *dso; 1397 struct dso *dso;
1390 pthread_rwlock_rdlock(&dsos->lock); 1398 down_read(&dsos->lock);
1391 dso = __dsos__find(dsos, name, cmp_short); 1399 dso = __dsos__find(dsos, name, cmp_short);
1392 pthread_rwlock_unlock(&dsos->lock); 1400 up_read(&dsos->lock);
1393 return dso; 1401 return dso;
1394} 1402}
1395 1403
@@ -1416,9 +1424,9 @@ struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1416struct dso *dsos__findnew(struct dsos *dsos, const char *name) 1424struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1417{ 1425{
1418 struct dso *dso; 1426 struct dso *dso;
1419 pthread_rwlock_wrlock(&dsos->lock); 1427 down_write(&dsos->lock);
1420 dso = dso__get(__dsos__findnew(dsos, name)); 1428 dso = dso__get(__dsos__findnew(dsos, name));
1421 pthread_rwlock_unlock(&dsos->lock); 1429 up_write(&dsos->lock);
1422 return dso; 1430 return dso;
1423} 1431}
1424 1432
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 926ff2e7f668..c229dbe0277a 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -7,7 +7,7 @@
7#include <linux/rbtree.h> 7#include <linux/rbtree.h>
8#include <sys/types.h> 8#include <sys/types.h>
9#include <stdbool.h> 9#include <stdbool.h>
10#include <pthread.h> 10#include "rwsem.h"
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include "map.h" 13#include "map.h"
@@ -130,7 +130,7 @@ struct dso_cache {
130struct dsos { 130struct dsos {
131 struct list_head head; 131 struct list_head head;
132 struct rb_root root; /* rbtree root sorted by long name */ 132 struct rb_root root; /* rbtree root sorted by long name */
133 pthread_rwlock_t lock; 133 struct rw_semaphore lock;
134}; 134};
135 135
136struct auxtrace_cache; 136struct auxtrace_cache;
@@ -142,6 +142,8 @@ struct dso {
142 struct rb_root *root; /* root of rbtree that rb_node is in */ 142 struct rb_root *root; /* root of rbtree that rb_node is in */
143 struct rb_root symbols[MAP__NR_TYPES]; 143 struct rb_root symbols[MAP__NR_TYPES];
144 struct rb_root symbol_names[MAP__NR_TYPES]; 144 struct rb_root symbol_names[MAP__NR_TYPES];
145 struct rb_root inlined_nodes;
146 struct rb_root srclines;
145 struct { 147 struct {
146 u64 addr; 148 u64 addr;
147 struct symbol *symbol; 149 struct symbol *symbol;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index fc690fecbfd6..97a8ef9980db 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,6 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <dirent.h> 2#include <dirent.h>
3#include <errno.h> 3#include <errno.h>
4#include <fcntl.h>
4#include <inttypes.h> 5#include <inttypes.h>
5#include <linux/kernel.h> 6#include <linux/kernel.h>
6#include <linux/types.h> 7#include <linux/types.h>
@@ -678,21 +679,21 @@ out:
678 return err; 679 return err;
679} 680}
680 681
681int perf_event__synthesize_threads(struct perf_tool *tool, 682static int __perf_event__synthesize_threads(struct perf_tool *tool,
682 perf_event__handler_t process, 683 perf_event__handler_t process,
683 struct machine *machine, 684 struct machine *machine,
684 bool mmap_data, 685 bool mmap_data,
685 unsigned int proc_map_timeout) 686 unsigned int proc_map_timeout,
687 struct dirent **dirent,
688 int start,
689 int num)
686{ 690{
687 DIR *proc;
688 char proc_path[PATH_MAX];
689 struct dirent *dirent;
690 union perf_event *comm_event, *mmap_event, *fork_event; 691 union perf_event *comm_event, *mmap_event, *fork_event;
691 union perf_event *namespaces_event; 692 union perf_event *namespaces_event;
692 int err = -1; 693 int err = -1;
693 694 char *end;
694 if (machine__is_default_guest(machine)) 695 pid_t pid;
695 return 0; 696 int i;
696 697
697 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 698 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
698 if (comm_event == NULL) 699 if (comm_event == NULL)
@@ -712,31 +713,25 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
712 if (namespaces_event == NULL) 713 if (namespaces_event == NULL)
713 goto out_free_fork; 714 goto out_free_fork;
714 715
715 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 716 for (i = start; i < start + num; i++) {
716 proc = opendir(proc_path); 717 if (!isdigit(dirent[i]->d_name[0]))
717 718 continue;
718 if (proc == NULL)
719 goto out_free_namespaces;
720
721 while ((dirent = readdir(proc)) != NULL) {
722 char *end;
723 pid_t pid = strtol(dirent->d_name, &end, 10);
724 719
725 if (*end) /* only interested in proper numerical dirents */ 720 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
721 /* only interested in proper numerical dirents */
722 if (*end)
726 continue; 723 continue;
727 /* 724 /*
728 * We may race with exiting thread, so don't stop just because 725 * We may race with exiting thread, so don't stop just because
729 * one thread couldn't be synthesized. 726 * one thread couldn't be synthesized.
730 */ 727 */
731 __event__synthesize_thread(comm_event, mmap_event, fork_event, 728 __event__synthesize_thread(comm_event, mmap_event, fork_event,
732 namespaces_event, pid, 1, process, 729 namespaces_event, pid, 1, process,
733 tool, machine, mmap_data, 730 tool, machine, mmap_data,
734 proc_map_timeout); 731 proc_map_timeout);
735 } 732 }
736
737 err = 0; 733 err = 0;
738 closedir(proc); 734
739out_free_namespaces:
740 free(namespaces_event); 735 free(namespaces_event);
741out_free_fork: 736out_free_fork:
742 free(fork_event); 737 free(fork_event);
@@ -748,6 +743,118 @@ out:
748 return err; 743 return err;
749} 744}
750 745
746struct synthesize_threads_arg {
747 struct perf_tool *tool;
748 perf_event__handler_t process;
749 struct machine *machine;
750 bool mmap_data;
751 unsigned int proc_map_timeout;
752 struct dirent **dirent;
753 int num;
754 int start;
755};
756
757static void *synthesize_threads_worker(void *arg)
758{
759 struct synthesize_threads_arg *args = arg;
760
761 __perf_event__synthesize_threads(args->tool, args->process,
762 args->machine, args->mmap_data,
763 args->proc_map_timeout, args->dirent,
764 args->start, args->num);
765 return NULL;
766}
767
768int perf_event__synthesize_threads(struct perf_tool *tool,
769 perf_event__handler_t process,
770 struct machine *machine,
771 bool mmap_data,
772 unsigned int proc_map_timeout,
773 unsigned int nr_threads_synthesize)
774{
775 struct synthesize_threads_arg *args = NULL;
776 pthread_t *synthesize_threads = NULL;
777 char proc_path[PATH_MAX];
778 struct dirent **dirent;
779 int num_per_thread;
780 int m, n, i, j;
781 int thread_nr;
782 int base = 0;
783 int err = -1;
784
785
786 if (machine__is_default_guest(machine))
787 return 0;
788
789 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
790 n = scandir(proc_path, &dirent, 0, alphasort);
791 if (n < 0)
792 return err;
793
794 if (nr_threads_synthesize == UINT_MAX)
795 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
796 else
797 thread_nr = nr_threads_synthesize;
798
799 if (thread_nr <= 1) {
800 err = __perf_event__synthesize_threads(tool, process,
801 machine, mmap_data,
802 proc_map_timeout,
803 dirent, base, n);
804 goto free_dirent;
805 }
806 if (thread_nr > n)
807 thread_nr = n;
808
809 synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
810 if (synthesize_threads == NULL)
811 goto free_dirent;
812
813 args = calloc(sizeof(*args), thread_nr);
814 if (args == NULL)
815 goto free_threads;
816
817 num_per_thread = n / thread_nr;
818 m = n % thread_nr;
819 for (i = 0; i < thread_nr; i++) {
820 args[i].tool = tool;
821 args[i].process = process;
822 args[i].machine = machine;
823 args[i].mmap_data = mmap_data;
824 args[i].proc_map_timeout = proc_map_timeout;
825 args[i].dirent = dirent;
826 }
827 for (i = 0; i < m; i++) {
828 args[i].num = num_per_thread + 1;
829 args[i].start = i * args[i].num;
830 }
831 if (i != 0)
832 base = args[i-1].start + args[i-1].num;
833 for (j = i; j < thread_nr; j++) {
834 args[j].num = num_per_thread;
835 args[j].start = base + (j - i) * args[i].num;
836 }
837
838 for (i = 0; i < thread_nr; i++) {
839 if (pthread_create(&synthesize_threads[i], NULL,
840 synthesize_threads_worker, &args[i]))
841 goto out_join;
842 }
843 err = 0;
844out_join:
845 for (i = 0; i < thread_nr; i++)
846 pthread_join(synthesize_threads[i], NULL);
847 free(args);
848free_threads:
849 free(synthesize_threads);
850free_dirent:
851 for (i = 0; i < n; i++)
852 free(dirent[i]);
853 free(dirent);
854
855 return err;
856}
857
751struct process_symbol_args { 858struct process_symbol_args {
752 const char *name; 859 const char *name;
753 u64 start; 860 u64 start;
@@ -1498,6 +1605,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
1498 al->sym = NULL; 1605 al->sym = NULL;
1499 al->cpu = sample->cpu; 1606 al->cpu = sample->cpu;
1500 al->socket = -1; 1607 al->socket = -1;
1608 al->srcline = NULL;
1501 1609
1502 if (al->cpu >= 0) { 1610 if (al->cpu >= 0) {
1503 struct perf_env *env = machine->env; 1611 struct perf_env *env = machine->env;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 5524ee69279c..1ae95efbfb95 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -681,7 +681,8 @@ int perf_event__synthesize_cpu_map(struct perf_tool *tool,
681int perf_event__synthesize_threads(struct perf_tool *tool, 681int perf_event__synthesize_threads(struct perf_tool *tool,
682 perf_event__handler_t process, 682 perf_event__handler_t process,
683 struct machine *machine, bool mmap_data, 683 struct machine *machine, bool mmap_data,
684 unsigned int proc_map_timeout); 684 unsigned int proc_map_timeout,
685 unsigned int nr_threads_synthesize);
685int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 686int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
686 perf_event__handler_t process, 687 perf_event__handler_t process,
687 struct machine *machine); 688 struct machine *machine);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6a0d7ffbeba0..b62e523a7035 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -33,9 +33,6 @@
33#include <linux/log2.h> 33#include <linux/log2.h>
34#include <linux/err.h> 34#include <linux/err.h>
35 35
36static void perf_mmap__munmap(struct perf_mmap *map);
37static void perf_mmap__put(struct perf_mmap *map);
38
39#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 36#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
40#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 37#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
41 38
@@ -260,7 +257,7 @@ int perf_evlist__add_dummy(struct perf_evlist *evlist)
260 .config = PERF_COUNT_SW_DUMMY, 257 .config = PERF_COUNT_SW_DUMMY,
261 .size = sizeof(attr), /* to capture ABI version */ 258 .size = sizeof(attr), /* to capture ABI version */
262 }; 259 };
263 struct perf_evsel *evsel = perf_evsel__new(&attr); 260 struct perf_evsel *evsel = perf_evsel__new_idx(&attr, evlist->nr_entries);
264 261
265 if (evsel == NULL) 262 if (evsel == NULL)
266 return -ENOMEM; 263 return -ENOMEM;
@@ -704,129 +701,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
704 return perf_evlist__set_paused(evlist, false); 701 return perf_evlist__set_paused(evlist, false);
705} 702}
706 703
707/* When check_messup is true, 'end' must points to a good entry */
708static union perf_event *
709perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
710 u64 end, u64 *prev)
711{
712 unsigned char *data = md->base + page_size;
713 union perf_event *event = NULL;
714 int diff = end - start;
715
716 if (check_messup) {
717 /*
718 * If we're further behind than half the buffer, there's a chance
719 * the writer will bite our tail and mess up the samples under us.
720 *
721 * If we somehow ended up ahead of the 'end', we got messed up.
722 *
723 * In either case, truncate and restart at 'end'.
724 */
725 if (diff > md->mask / 2 || diff < 0) {
726 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
727
728 /*
729 * 'end' points to a known good entry, start there.
730 */
731 start = end;
732 diff = 0;
733 }
734 }
735
736 if (diff >= (int)sizeof(event->header)) {
737 size_t size;
738
739 event = (union perf_event *)&data[start & md->mask];
740 size = event->header.size;
741
742 if (size < sizeof(event->header) || diff < (int)size) {
743 event = NULL;
744 goto broken_event;
745 }
746
747 /*
748 * Event straddles the mmap boundary -- header should always
749 * be inside due to u64 alignment of output.
750 */
751 if ((start & md->mask) + size != ((start + size) & md->mask)) {
752 unsigned int offset = start;
753 unsigned int len = min(sizeof(*event), size), cpy;
754 void *dst = md->event_copy;
755
756 do {
757 cpy = min(md->mask + 1 - (offset & md->mask), len);
758 memcpy(dst, &data[offset & md->mask], cpy);
759 offset += cpy;
760 dst += cpy;
761 len -= cpy;
762 } while (len);
763
764 event = (union perf_event *) md->event_copy;
765 }
766
767 start += size;
768 }
769
770broken_event:
771 if (prev)
772 *prev = start;
773
774 return event;
775}
776
777union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
778{
779 u64 head;
780 u64 old = md->prev;
781
782 /*
783 * Check if event was unmapped due to a POLLHUP/POLLERR.
784 */
785 if (!refcount_read(&md->refcnt))
786 return NULL;
787
788 head = perf_mmap__read_head(md);
789
790 return perf_mmap__read(md, check_messup, old, head, &md->prev);
791}
792
793union perf_event *
794perf_mmap__read_backward(struct perf_mmap *md)
795{
796 u64 head, end;
797 u64 start = md->prev;
798
799 /*
800 * Check if event was unmapped due to a POLLHUP/POLLERR.
801 */
802 if (!refcount_read(&md->refcnt))
803 return NULL;
804
805 head = perf_mmap__read_head(md);
806 if (!head)
807 return NULL;
808
809 /*
810 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
811 * it each time when kernel writes to it, so in fact 'head' is
812 * negative. 'end' pointer is made manually by adding the size of
813 * the ring buffer to 'head' pointer, means the validate data can
814 * read is the whole ring buffer. If 'end' is positive, the ring
815 * buffer has not fully filled, so we must adjust 'end' to 0.
816 *
817 * However, since both 'head' and 'end' is unsigned, we can't
818 * simply compare 'end' against 0. Here we compare '-head' and
819 * the size of the ring buffer, where -head is the number of bytes
820 * kernel write to the ring buffer.
821 */
822 if (-head < (u64)(md->mask + 1))
823 end = 0;
824 else
825 end = head + md->mask + 1;
826
827 return perf_mmap__read(md, false, start, end, &md->prev);
828}
829
830union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx) 704union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
831{ 705{
832 struct perf_mmap *md = &evlist->mmap[idx]; 706 struct perf_mmap *md = &evlist->mmap[idx];
@@ -857,96 +731,16 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
857 return perf_evlist__mmap_read_forward(evlist, idx); 731 return perf_evlist__mmap_read_forward(evlist, idx);
858} 732}
859 733
860void perf_mmap__read_catchup(struct perf_mmap *md)
861{
862 u64 head;
863
864 if (!refcount_read(&md->refcnt))
865 return;
866
867 head = perf_mmap__read_head(md);
868 md->prev = head;
869}
870
871void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) 734void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
872{ 735{
873 perf_mmap__read_catchup(&evlist->mmap[idx]); 736 perf_mmap__read_catchup(&evlist->mmap[idx]);
874} 737}
875 738
876static bool perf_mmap__empty(struct perf_mmap *md)
877{
878 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
879}
880
881static void perf_mmap__get(struct perf_mmap *map)
882{
883 refcount_inc(&map->refcnt);
884}
885
886static void perf_mmap__put(struct perf_mmap *md)
887{
888 BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
889
890 if (refcount_dec_and_test(&md->refcnt))
891 perf_mmap__munmap(md);
892}
893
894void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
895{
896 if (!overwrite) {
897 u64 old = md->prev;
898
899 perf_mmap__write_tail(md, old);
900 }
901
902 if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
903 perf_mmap__put(md);
904}
905
906void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 739void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
907{ 740{
908 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite); 741 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
909} 742}
910 743
911int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
912 struct auxtrace_mmap_params *mp __maybe_unused,
913 void *userpg __maybe_unused,
914 int fd __maybe_unused)
915{
916 return 0;
917}
918
919void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
920{
921}
922
923void __weak auxtrace_mmap_params__init(
924 struct auxtrace_mmap_params *mp __maybe_unused,
925 off_t auxtrace_offset __maybe_unused,
926 unsigned int auxtrace_pages __maybe_unused,
927 bool auxtrace_overwrite __maybe_unused)
928{
929}
930
931void __weak auxtrace_mmap_params__set_idx(
932 struct auxtrace_mmap_params *mp __maybe_unused,
933 struct perf_evlist *evlist __maybe_unused,
934 int idx __maybe_unused,
935 bool per_cpu __maybe_unused)
936{
937}
938
939static void perf_mmap__munmap(struct perf_mmap *map)
940{
941 if (map->base != NULL) {
942 munmap(map->base, perf_mmap__mmap_len(map));
943 map->base = NULL;
944 map->fd = -1;
945 refcount_set(&map->refcnt, 0);
946 }
947 auxtrace_mmap__munmap(&map->auxtrace_mmap);
948}
949
950static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) 744static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
951{ 745{
952 int i; 746 int i;
@@ -995,48 +789,6 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
995 return map; 789 return map;
996} 790}
997 791
998struct mmap_params {
999 int prot;
1000 int mask;
1001 struct auxtrace_mmap_params auxtrace_mp;
1002};
1003
1004static int perf_mmap__mmap(struct perf_mmap *map,
1005 struct mmap_params *mp, int fd)
1006{
1007 /*
1008 * The last one will be done at perf_evlist__mmap_consume(), so that we
1009 * make sure we don't prevent tools from consuming every last event in
1010 * the ring buffer.
1011 *
1012 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1013 * anymore, but the last events for it are still in the ring buffer,
1014 * waiting to be consumed.
1015 *
1016 * Tools can chose to ignore this at their own discretion, but the
1017 * evlist layer can't just drop it when filtering events in
1018 * perf_evlist__filter_pollfd().
1019 */
1020 refcount_set(&map->refcnt, 2);
1021 map->prev = 0;
1022 map->mask = mp->mask;
1023 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1024 MAP_SHARED, fd, 0);
1025 if (map->base == MAP_FAILED) {
1026 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1027 errno);
1028 map->base = NULL;
1029 return -1;
1030 }
1031 map->fd = fd;
1032
1033 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1034 &mp->auxtrace_mp, map->base, fd))
1035 return -1;
1036
1037 return 0;
1038}
1039
1040static bool 792static bool
1041perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, 793perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1042 struct perf_evsel *evsel) 794 struct perf_evsel *evsel)
@@ -2034,3 +1786,15 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
2034state_err: 1786state_err:
2035 return; 1787 return;
2036} 1788}
1789
1790bool perf_evlist__exclude_kernel(struct perf_evlist *evlist)
1791{
1792 struct perf_evsel *evsel;
1793
1794 evlist__for_each_entry(evlist, evsel) {
1795 if (!evsel->attr.exclude_kernel)
1796 return false;
1797 }
1798
1799 return true;
1800}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index c1750a400bb7..491f69542920 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -7,12 +7,13 @@
7#include <linux/refcount.h> 7#include <linux/refcount.h>
8#include <linux/list.h> 8#include <linux/list.h>
9#include <api/fd/array.h> 9#include <api/fd/array.h>
10#include <fcntl.h>
10#include <stdio.h> 11#include <stdio.h>
11#include "../perf.h" 12#include "../perf.h"
12#include "event.h" 13#include "event.h"
13#include "evsel.h" 14#include "evsel.h"
15#include "mmap.h"
14#include "util.h" 16#include "util.h"
15#include "auxtrace.h"
16#include <signal.h> 17#include <signal.h>
17#include <unistd.h> 18#include <unistd.h>
18 19
@@ -24,55 +25,6 @@ struct record_opts;
24#define PERF_EVLIST__HLIST_BITS 8 25#define PERF_EVLIST__HLIST_BITS 8
25#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) 26#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
26 27
27/**
28 * struct perf_mmap - perf's ring buffer mmap details
29 *
30 * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
31 */
32struct perf_mmap {
33 void *base;
34 int mask;
35 int fd;
36 refcount_t refcnt;
37 u64 prev;
38 struct auxtrace_mmap auxtrace_mmap;
39 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
40};
41
42static inline size_t
43perf_mmap__mmap_len(struct perf_mmap *map)
44{
45 return map->mask + 1 + page_size;
46}
47
48/*
49 * State machine of bkw_mmap_state:
50 *
51 * .________________(forbid)_____________.
52 * | V
53 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
54 * ^ ^ | ^ |
55 * | |__(forbid)____/ |___(forbid)___/|
56 * | |
57 * \_________________(3)_______________/
58 *
59 * NOTREADY : Backward ring buffers are not ready
60 * RUNNING : Backward ring buffers are recording
61 * DATA_PENDING : We are required to collect data from backward ring buffers
62 * EMPTY : We have collected data from backward ring buffers.
63 *
64 * (0): Setup backward ring buffer
65 * (1): Pause ring buffers for reading
66 * (2): Read from ring buffers
67 * (3): Resume ring buffers for recording
68 */
69enum bkw_mmap_state {
70 BKW_MMAP_NOTREADY,
71 BKW_MMAP_RUNNING,
72 BKW_MMAP_DATA_PENDING,
73 BKW_MMAP_EMPTY,
74};
75
76struct perf_evlist { 28struct perf_evlist {
77 struct list_head entries; 29 struct list_head entries;
78 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 30 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
@@ -177,12 +129,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
177 129
178void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state); 130void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
179 131
180union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
181union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
182
183void perf_mmap__read_catchup(struct perf_mmap *md);
184void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
185
186union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); 132union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
187 133
188union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, 134union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
@@ -286,25 +232,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
286int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); 232int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
287int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); 233int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
288 234
289static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
290{
291 struct perf_event_mmap_page *pc = mm->base;
292 u64 head = ACCESS_ONCE(pc->data_head);
293 rmb();
294 return head;
295}
296
297static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
298{
299 struct perf_event_mmap_page *pc = md->base;
300
301 /*
302 * ensure all reads are done before we write the tail out.
303 */
304 mb();
305 pc->data_tail = tail;
306}
307
308bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str); 235bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
309void perf_evlist__to_front(struct perf_evlist *evlist, 236void perf_evlist__to_front(struct perf_evlist *evlist,
310 struct perf_evsel *move_evsel); 237 struct perf_evsel *move_evsel);
@@ -385,4 +312,6 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
385 312
386struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 313struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
387 union perf_event *event); 314 union perf_event *event);
315
316bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
388#endif /* __PERF_EVLIST_H */ 317#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0dccdb89572c..d5fbcf8c7aa7 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -683,7 +683,7 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel,
683 if (!function) { 683 if (!function) {
684 perf_evsel__set_sample_bit(evsel, REGS_USER); 684 perf_evsel__set_sample_bit(evsel, REGS_USER);
685 perf_evsel__set_sample_bit(evsel, STACK_USER); 685 perf_evsel__set_sample_bit(evsel, STACK_USER);
686 attr->sample_regs_user = PERF_REGS_MASK; 686 attr->sample_regs_user |= PERF_REGS_MASK;
687 attr->sample_stack_user = param->dump_size; 687 attr->sample_stack_user = param->dump_size;
688 attr->exclude_callchain_user = 1; 688 attr->exclude_callchain_user = 1;
689 } else { 689 } else {
@@ -733,12 +733,16 @@ static void apply_config_terms(struct perf_evsel *evsel,
733 list_for_each_entry(term, config_terms, list) { 733 list_for_each_entry(term, config_terms, list) {
734 switch (term->type) { 734 switch (term->type) {
735 case PERF_EVSEL__CONFIG_TERM_PERIOD: 735 case PERF_EVSEL__CONFIG_TERM_PERIOD:
736 attr->sample_period = term->val.period; 736 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
737 attr->freq = 0; 737 attr->sample_period = term->val.period;
738 attr->freq = 0;
739 }
738 break; 740 break;
739 case PERF_EVSEL__CONFIG_TERM_FREQ: 741 case PERF_EVSEL__CONFIG_TERM_FREQ:
740 attr->sample_freq = term->val.freq; 742 if (!(term->weak && opts->user_freq != UINT_MAX)) {
741 attr->freq = 1; 743 attr->sample_freq = term->val.freq;
744 attr->freq = 1;
745 }
742 break; 746 break;
743 case PERF_EVSEL__CONFIG_TERM_TIME: 747 case PERF_EVSEL__CONFIG_TERM_TIME:
744 if (term->val.time) 748 if (term->val.time)
@@ -936,6 +940,11 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
936 perf_evsel__set_sample_bit(evsel, REGS_INTR); 940 perf_evsel__set_sample_bit(evsel, REGS_INTR);
937 } 941 }
938 942
943 if (opts->sample_user_regs) {
944 attr->sample_regs_user |= opts->sample_user_regs;
945 perf_evsel__set_sample_bit(evsel, REGS_USER);
946 }
947
939 if (target__has_cpu(&opts->target) || opts->sample_cpu) 948 if (target__has_cpu(&opts->target) || opts->sample_cpu)
940 perf_evsel__set_sample_bit(evsel, CPU); 949 perf_evsel__set_sample_bit(evsel, CPU);
941 950
@@ -1366,7 +1375,7 @@ perf_evsel__process_group_data(struct perf_evsel *leader,
1366static int 1375static int
1367perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread) 1376perf_evsel__read_group(struct perf_evsel *leader, int cpu, int thread)
1368{ 1377{
1369 struct perf_stat_evsel *ps = leader->priv; 1378 struct perf_stat_evsel *ps = leader->stats;
1370 u64 read_format = leader->attr.read_format; 1379 u64 read_format = leader->attr.read_format;
1371 int size = perf_evsel__read_size(leader); 1380 int size = perf_evsel__read_size(leader);
1372 u64 *data = ps->group_data; 1381 u64 *data = ps->group_data;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b4df79d72329..157f49e8a772 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -67,8 +67,11 @@ struct perf_evsel_config_term {
67 bool overwrite; 67 bool overwrite;
68 char *branch; 68 char *branch;
69 } val; 69 } val;
70 bool weak;
70}; 71};
71 72
73struct perf_stat_evsel;
74
72/** struct perf_evsel - event selector 75/** struct perf_evsel - event selector
73 * 76 *
74 * @evlist - evlist this evsel is in, if it is in one. 77 * @evlist - evlist this evsel is in, if it is in one.
@@ -102,6 +105,7 @@ struct perf_evsel {
102 const char *unit; 105 const char *unit;
103 struct event_format *tp_format; 106 struct event_format *tp_format;
104 off_t id_offset; 107 off_t id_offset;
108 struct perf_stat_evsel *stats;
105 void *priv; 109 void *priv;
106 u64 db_id; 110 u64 db_id;
107 struct cgroup_sel *cgrp; 111 struct cgroup_sel *cgrp;
@@ -138,6 +142,7 @@ struct perf_evsel {
138 const char * metric_name; 142 const char * metric_name;
139 struct perf_evsel **metric_events; 143 struct perf_evsel **metric_events;
140 bool collect_stat; 144 bool collect_stat;
145 bool weak_group;
141}; 146};
142 147
143union u64_swap { 148union u64_swap {
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 1fd7c2e46db2..06dfb027879d 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -158,7 +158,7 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
158 } 158 }
159 } 159 }
160 160
161 if (print_dso) { 161 if (print_dso && (!node->sym || !node->sym->inlined)) {
162 printed += fprintf(fp, " ("); 162 printed += fprintf(fp, " (");
163 printed += map__fprintf_dsoname(node->map, fp); 163 printed += map__fprintf_dsoname(node->map, fp);
164 printed += fprintf(fp, ")"); 164 printed += fprintf(fp, ")");
@@ -167,41 +167,12 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
167 if (print_srcline) 167 if (print_srcline)
168 printed += map__fprintf_srcline(node->map, addr, "\n ", fp); 168 printed += map__fprintf_srcline(node->map, addr, "\n ", fp);
169 169
170 if (node->sym && node->sym->inlined)
171 printed += fprintf(fp, " (inlined)");
172
170 if (!print_oneline) 173 if (!print_oneline)
171 printed += fprintf(fp, "\n"); 174 printed += fprintf(fp, "\n");
172 175
173 if (symbol_conf.inline_name && node->map) {
174 struct inline_node *inode;
175
176 addr = map__rip_2objdump(node->map, node->ip),
177 inode = dso__parse_addr_inlines(node->map->dso, addr);
178
179 if (inode) {
180 struct inline_list *ilist;
181
182 list_for_each_entry(ilist, &inode->val, list) {
183 if (print_arrow)
184 printed += fprintf(fp, " <-");
185
186 /* IP is same, just skip it */
187 if (print_ip)
188 printed += fprintf(fp, "%c%16s",
189 s, "");
190 if (print_sym)
191 printed += fprintf(fp, " %s",
192 ilist->funcname);
193 if (print_srcline)
194 printed += fprintf(fp, "\n %s:%d",
195 ilist->filename,
196 ilist->line_nr);
197 if (!print_oneline)
198 printed += fprintf(fp, "\n");
199 }
200
201 inline_node__delete(inode);
202 }
203 }
204
205 if (symbol_conf.bt_stop_list && 176 if (symbol_conf.bt_stop_list &&
206 node->sym && 177 node->sym &&
207 strlist__has_entry(symbol_conf.bt_stop_list, 178 strlist__has_entry(symbol_conf.bt_stop_list,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index ba0cea8fef72..7c0e9d587bfa 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1763,7 +1763,7 @@ process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
1763 1763
1764 session = container_of(ff->ph, struct perf_session, header); 1764 session = container_of(ff->ph, struct perf_session, header);
1765 1765
1766 if (session->file->is_pipe) { 1766 if (session->data->is_pipe) {
1767 /* Save events for reading later by print_event_desc, 1767 /* Save events for reading later by print_event_desc,
1768 * since they can't be read again in pipe mode. */ 1768 * since they can't be read again in pipe mode. */
1769 ff->events = events; 1769 ff->events = events;
@@ -1772,7 +1772,7 @@ process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
1772 for (evsel = events; evsel->attr.size; evsel++) 1772 for (evsel = events; evsel->attr.size; evsel++)
1773 perf_evlist__set_event_name(session->evlist, evsel); 1773 perf_evlist__set_event_name(session->evlist, evsel);
1774 1774
1775 if (!session->file->is_pipe) 1775 if (!session->data->is_pipe)
1776 free_event_desc(events); 1776 free_event_desc(events);
1777 1777
1778 return 0; 1778 return 0;
@@ -2249,7 +2249,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2249{ 2249{
2250 struct header_print_data hd; 2250 struct header_print_data hd;
2251 struct perf_header *header = &session->header; 2251 struct perf_header *header = &session->header;
2252 int fd = perf_data_file__fd(session->file); 2252 int fd = perf_data__fd(session->data);
2253 struct stat st; 2253 struct stat st;
2254 int ret, bit; 2254 int ret, bit;
2255 2255
@@ -2265,7 +2265,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2265 perf_header__process_sections(header, fd, &hd, 2265 perf_header__process_sections(header, fd, &hd,
2266 perf_file_section__fprintf_info); 2266 perf_file_section__fprintf_info);
2267 2267
2268 if (session->file->is_pipe) 2268 if (session->data->is_pipe)
2269 return 0; 2269 return 0;
2270 2270
2271 fprintf(fp, "# missing features: "); 2271 fprintf(fp, "# missing features: ");
@@ -2758,7 +2758,7 @@ static int perf_header__read_pipe(struct perf_session *session)
2758 struct perf_pipe_file_header f_header; 2758 struct perf_pipe_file_header f_header;
2759 2759
2760 if (perf_file_header__read_pipe(&f_header, header, 2760 if (perf_file_header__read_pipe(&f_header, header,
2761 perf_data_file__fd(session->file), 2761 perf_data__fd(session->data),
2762 session->repipe) < 0) { 2762 session->repipe) < 0) {
2763 pr_debug("incompatible file format\n"); 2763 pr_debug("incompatible file format\n");
2764 return -EINVAL; 2764 return -EINVAL;
@@ -2861,13 +2861,13 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2861 2861
2862int perf_session__read_header(struct perf_session *session) 2862int perf_session__read_header(struct perf_session *session)
2863{ 2863{
2864 struct perf_data_file *file = session->file; 2864 struct perf_data *data = session->data;
2865 struct perf_header *header = &session->header; 2865 struct perf_header *header = &session->header;
2866 struct perf_file_header f_header; 2866 struct perf_file_header f_header;
2867 struct perf_file_attr f_attr; 2867 struct perf_file_attr f_attr;
2868 u64 f_id; 2868 u64 f_id;
2869 int nr_attrs, nr_ids, i, j; 2869 int nr_attrs, nr_ids, i, j;
2870 int fd = perf_data_file__fd(file); 2870 int fd = perf_data__fd(data);
2871 2871
2872 session->evlist = perf_evlist__new(); 2872 session->evlist = perf_evlist__new();
2873 if (session->evlist == NULL) 2873 if (session->evlist == NULL)
@@ -2875,7 +2875,7 @@ int perf_session__read_header(struct perf_session *session)
2875 2875
2876 session->evlist->env = &header->env; 2876 session->evlist->env = &header->env;
2877 session->machines.host.env = &header->env; 2877 session->machines.host.env = &header->env;
2878 if (perf_data_file__is_pipe(file)) 2878 if (perf_data__is_pipe(data))
2879 return perf_header__read_pipe(session); 2879 return perf_header__read_pipe(session);
2880 2880
2881 if (perf_file_header__read(&f_header, header, fd) < 0) 2881 if (perf_file_header__read(&f_header, header, fd) < 0)
@@ -2890,7 +2890,7 @@ int perf_session__read_header(struct perf_session *session)
2890 if (f_header.data.size == 0) { 2890 if (f_header.data.size == 0) {
2891 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 2891 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2892 "Was the 'perf record' command properly terminated?\n", 2892 "Was the 'perf record' command properly terminated?\n",
2893 file->path); 2893 data->file.path);
2894 } 2894 }
2895 2895
2896 nr_attrs = f_header.attrs.size / f_header.attr_size; 2896 nr_attrs = f_header.attrs.size / f_header.attr_size;
@@ -3398,7 +3398,7 @@ int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3398 struct perf_session *session) 3398 struct perf_session *session)
3399{ 3399{
3400 ssize_t size_read, padding, size = event->tracing_data.size; 3400 ssize_t size_read, padding, size = event->tracing_data.size;
3401 int fd = perf_data_file__fd(session->file); 3401 int fd = perf_data__fd(session->data);
3402 off_t offset = lseek(fd, 0, SEEK_CUR); 3402 off_t offset = lseek(fd, 0, SEEK_CUR);
3403 char buf[BUFSIZ]; 3403 char buf[BUFSIZ];
3404 3404
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 097473600d94..b6140950301e 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -597,6 +597,7 @@ __hists__add_entry(struct hists *hists,
597 .map = al->map, 597 .map = al->map,
598 .sym = al->sym, 598 .sym = al->sym,
599 }, 599 },
600 .srcline = al->srcline ? strdup(al->srcline) : NULL,
600 .socket = al->socket, 601 .socket = al->socket,
601 .cpu = al->cpu, 602 .cpu = al->cpu,
602 .cpumode = al->cpumode, 603 .cpumode = al->cpumode,
@@ -951,6 +952,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
951 .map = al->map, 952 .map = al->map,
952 .sym = al->sym, 953 .sym = al->sym,
953 }, 954 },
955 .srcline = al->srcline ? strdup(al->srcline) : NULL,
954 .parent = iter->parent, 956 .parent = iter->parent,
955 .raw_data = sample->raw_data, 957 .raw_data = sample->raw_data,
956 .raw_size = sample->raw_size, 958 .raw_size = sample->raw_size,
@@ -1142,11 +1144,6 @@ void hist_entry__delete(struct hist_entry *he)
1142 zfree(&he->mem_info); 1144 zfree(&he->mem_info);
1143 } 1145 }
1144 1146
1145 if (he->inline_node) {
1146 inline_node__delete(he->inline_node);
1147 he->inline_node = NULL;
1148 }
1149
1150 zfree(&he->stat_acc); 1147 zfree(&he->stat_acc);
1151 free_srcline(he->srcline); 1148 free_srcline(he->srcline);
1152 if (he->srcfile && he->srcfile[0]) 1149 if (he->srcfile && he->srcfile[0])
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 218ee2bac9a5..5325e65f9711 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -500,7 +500,7 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
500 } 500 }
501 501
502 if (!buffer->data) { 502 if (!buffer->data) {
503 int fd = perf_data_file__fd(btsq->bts->session->file); 503 int fd = perf_data__fd(btsq->bts->session->data);
504 504
505 buffer->data = auxtrace_buffer__get_data(buffer, fd); 505 buffer->data = auxtrace_buffer__get_data(buffer, fd);
506 if (!buffer->data) { 506 if (!buffer->data) {
@@ -664,10 +664,10 @@ static int intel_bts_process_auxtrace_event(struct perf_session *session,
664 if (!bts->data_queued) { 664 if (!bts->data_queued) {
665 struct auxtrace_buffer *buffer; 665 struct auxtrace_buffer *buffer;
666 off_t data_offset; 666 off_t data_offset;
667 int fd = perf_data_file__fd(session->file); 667 int fd = perf_data__fd(session->data);
668 int err; 668 int err;
669 669
670 if (perf_data_file__is_pipe(session->file)) { 670 if (perf_data__is_pipe(session->data)) {
671 data_offset = 0; 671 data_offset = 0;
672 } else { 672 } else {
673 data_offset = lseek(fd, 0, SEEK_CUR); 673 data_offset = lseek(fd, 0, SEEK_CUR);
diff --git a/tools/perf/util/intel-pt-decoder/inat.h b/tools/perf/util/intel-pt-decoder/inat.h
index 125ecd2a300d..52dc8d911173 100644
--- a/tools/perf/util/intel-pt-decoder/inat.h
+++ b/tools/perf/util/intel-pt-decoder/inat.h
@@ -97,6 +97,16 @@
97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) 97#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) 98#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
99 99
100/* Identifiers for segment registers */
101#define INAT_SEG_REG_IGNORE 0
102#define INAT_SEG_REG_DEFAULT 1
103#define INAT_SEG_REG_CS 2
104#define INAT_SEG_REG_SS 3
105#define INAT_SEG_REG_DS 4
106#define INAT_SEG_REG_ES 5
107#define INAT_SEG_REG_FS 6
108#define INAT_SEG_REG_GS 7
109
100/* Attribute search APIs */ 110/* Attribute search APIs */
101extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); 111extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
102extern int inat_get_last_prefix_id(insn_byte_t last_pfx); 112extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
+++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
896 896
897GrpTable: Grp3_1 897GrpTable: Grp3_1
8980: TEST Eb,Ib 8980: TEST Eb,Ib
8991: 8991: TEST Eb,Ib
9002: NOT Eb 9002: NOT Eb
9013: NEG Eb 9013: NEG Eb
9024: MUL AL,Eb 9024: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index b58f9fd1e2ee..23f9ba676df0 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -271,7 +271,7 @@ next:
271 ptq->buffer = buffer; 271 ptq->buffer = buffer;
272 272
273 if (!buffer->data) { 273 if (!buffer->data) {
274 int fd = perf_data_file__fd(ptq->pt->session->file); 274 int fd = perf_data__fd(ptq->pt->session->data);
275 275
276 buffer->data = auxtrace_buffer__get_data(buffer, fd); 276 buffer->data = auxtrace_buffer__get_data(buffer, fd);
277 if (!buffer->data) 277 if (!buffer->data)
@@ -2084,10 +2084,10 @@ static int intel_pt_process_auxtrace_event(struct perf_session *session,
2084 if (!pt->data_queued) { 2084 if (!pt->data_queued) {
2085 struct auxtrace_buffer *buffer; 2085 struct auxtrace_buffer *buffer;
2086 off_t data_offset; 2086 off_t data_offset;
2087 int fd = perf_data_file__fd(session->file); 2087 int fd = perf_data__fd(session->data);
2088 int err; 2088 int err;
2089 2089
2090 if (perf_data_file__is_pipe(session->file)) { 2090 if (perf_data__is_pipe(session->data)) {
2091 data_offset = 0; 2091 data_offset = 0;
2092 } else { 2092 } else {
2093 data_offset = lseek(fd, 0, SEEK_CUR); 2093 data_offset = lseek(fd, 0, SEEK_CUR);
diff --git a/tools/perf/util/jit.h b/tools/perf/util/jit.h
index c2582fa9fe21..6817ffc2a059 100644
--- a/tools/perf/util/jit.h
+++ b/tools/perf/util/jit.h
@@ -4,7 +4,7 @@
4 4
5#include <data.h> 5#include <data.h>
6 6
7int jit_process(struct perf_session *session, struct perf_data_file *output, 7int jit_process(struct perf_session *session, struct perf_data *output,
8 struct machine *machine, char *filename, pid_t pid, u64 *nbytes); 8 struct machine *machine, char *filename, pid_t pid, u64 *nbytes);
9 9
10int jit_inject_record(const char *filename); 10int jit_inject_record(const char *filename);
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 36483db032e8..a1863000e972 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -30,7 +30,7 @@
30#include "sane_ctype.h" 30#include "sane_ctype.h"
31 31
32struct jit_buf_desc { 32struct jit_buf_desc {
33 struct perf_data_file *output; 33 struct perf_data *output;
34 struct perf_session *session; 34 struct perf_session *session;
35 struct machine *machine; 35 struct machine *machine;
36 union jr_entry *entry; 36 union jr_entry *entry;
@@ -61,8 +61,8 @@ struct debug_line_info {
61 61
62struct jit_tool { 62struct jit_tool {
63 struct perf_tool tool; 63 struct perf_tool tool;
64 struct perf_data_file output; 64 struct perf_data output;
65 struct perf_data_file input; 65 struct perf_data input;
66 u64 bytes_written; 66 u64 bytes_written;
67}; 67};
68 68
@@ -357,7 +357,7 @@ jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
357{ 357{
358 ssize_t size; 358 ssize_t size;
359 359
360 size = perf_data_file__write(jd->output, event, event->header.size); 360 size = perf_data__write(jd->output, event, event->header.size);
361 if (size < 0) 361 if (size < 0)
362 return -1; 362 return -1;
363 363
@@ -752,7 +752,7 @@ jit_detect(char *mmap_name, pid_t pid)
752 752
753int 753int
754jit_process(struct perf_session *session, 754jit_process(struct perf_session *session,
755 struct perf_data_file *output, 755 struct perf_data *output,
756 struct machine *machine, 756 struct machine *machine,
757 char *filename, 757 char *filename,
758 pid_t pid, 758 pid_t pid,
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index bd5d5b5e2218..270f3223c6df 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -31,7 +31,21 @@ static void dsos__init(struct dsos *dsos)
31{ 31{
32 INIT_LIST_HEAD(&dsos->head); 32 INIT_LIST_HEAD(&dsos->head);
33 dsos->root = RB_ROOT; 33 dsos->root = RB_ROOT;
34 pthread_rwlock_init(&dsos->lock, NULL); 34 init_rwsem(&dsos->lock);
35}
36
37static void machine__threads_init(struct machine *machine)
38{
39 int i;
40
41 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
42 struct threads *threads = &machine->threads[i];
43 threads->entries = RB_ROOT;
44 init_rwsem(&threads->lock);
45 threads->nr = 0;
46 INIT_LIST_HEAD(&threads->dead);
47 threads->last_match = NULL;
48 }
35} 49}
36 50
37int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 51int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
@@ -41,11 +55,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
41 RB_CLEAR_NODE(&machine->rb_node); 55 RB_CLEAR_NODE(&machine->rb_node);
42 dsos__init(&machine->dsos); 56 dsos__init(&machine->dsos);
43 57
44 machine->threads = RB_ROOT; 58 machine__threads_init(machine);
45 pthread_rwlock_init(&machine->threads_lock, NULL);
46 machine->nr_threads = 0;
47 INIT_LIST_HEAD(&machine->dead_threads);
48 machine->last_match = NULL;
49 59
50 machine->vdso_info = NULL; 60 machine->vdso_info = NULL;
51 machine->env = NULL; 61 machine->env = NULL;
@@ -121,7 +131,7 @@ static void dsos__purge(struct dsos *dsos)
121{ 131{
122 struct dso *pos, *n; 132 struct dso *pos, *n;
123 133
124 pthread_rwlock_wrlock(&dsos->lock); 134 down_write(&dsos->lock);
125 135
126 list_for_each_entry_safe(pos, n, &dsos->head, node) { 136 list_for_each_entry_safe(pos, n, &dsos->head, node) {
127 RB_CLEAR_NODE(&pos->rb_node); 137 RB_CLEAR_NODE(&pos->rb_node);
@@ -130,39 +140,52 @@ static void dsos__purge(struct dsos *dsos)
130 dso__put(pos); 140 dso__put(pos);
131 } 141 }
132 142
133 pthread_rwlock_unlock(&dsos->lock); 143 up_write(&dsos->lock);
134} 144}
135 145
136static void dsos__exit(struct dsos *dsos) 146static void dsos__exit(struct dsos *dsos)
137{ 147{
138 dsos__purge(dsos); 148 dsos__purge(dsos);
139 pthread_rwlock_destroy(&dsos->lock); 149 exit_rwsem(&dsos->lock);
140} 150}
141 151
142void machine__delete_threads(struct machine *machine) 152void machine__delete_threads(struct machine *machine)
143{ 153{
144 struct rb_node *nd; 154 struct rb_node *nd;
155 int i;
145 156
146 pthread_rwlock_wrlock(&machine->threads_lock); 157 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
147 nd = rb_first(&machine->threads); 158 struct threads *threads = &machine->threads[i];
148 while (nd) { 159 down_write(&threads->lock);
149 struct thread *t = rb_entry(nd, struct thread, rb_node); 160 nd = rb_first(&threads->entries);
161 while (nd) {
162 struct thread *t = rb_entry(nd, struct thread, rb_node);
150 163
151 nd = rb_next(nd); 164 nd = rb_next(nd);
152 __machine__remove_thread(machine, t, false); 165 __machine__remove_thread(machine, t, false);
166 }
167 up_write(&threads->lock);
153 } 168 }
154 pthread_rwlock_unlock(&machine->threads_lock);
155} 169}
156 170
157void machine__exit(struct machine *machine) 171void machine__exit(struct machine *machine)
158{ 172{
173 int i;
174
175 if (machine == NULL)
176 return;
177
159 machine__destroy_kernel_maps(machine); 178 machine__destroy_kernel_maps(machine);
160 map_groups__exit(&machine->kmaps); 179 map_groups__exit(&machine->kmaps);
161 dsos__exit(&machine->dsos); 180 dsos__exit(&machine->dsos);
162 machine__exit_vdso(machine); 181 machine__exit_vdso(machine);
163 zfree(&machine->root_dir); 182 zfree(&machine->root_dir);
164 zfree(&machine->current_tid); 183 zfree(&machine->current_tid);
165 pthread_rwlock_destroy(&machine->threads_lock); 184
185 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
186 struct threads *threads = &machine->threads[i];
187 exit_rwsem(&threads->lock);
188 }
166} 189}
167 190
168void machine__delete(struct machine *machine) 191void machine__delete(struct machine *machine)
@@ -380,10 +403,11 @@ out_err:
380 * lookup/new thread inserted. 403 * lookup/new thread inserted.
381 */ 404 */
382static struct thread *____machine__findnew_thread(struct machine *machine, 405static struct thread *____machine__findnew_thread(struct machine *machine,
406 struct threads *threads,
383 pid_t pid, pid_t tid, 407 pid_t pid, pid_t tid,
384 bool create) 408 bool create)
385{ 409{
386 struct rb_node **p = &machine->threads.rb_node; 410 struct rb_node **p = &threads->entries.rb_node;
387 struct rb_node *parent = NULL; 411 struct rb_node *parent = NULL;
388 struct thread *th; 412 struct thread *th;
389 413
@@ -392,14 +416,14 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
392 * so most of the time we dont have to look up 416 * so most of the time we dont have to look up
393 * the full rbtree: 417 * the full rbtree:
394 */ 418 */
395 th = machine->last_match; 419 th = threads->last_match;
396 if (th != NULL) { 420 if (th != NULL) {
397 if (th->tid == tid) { 421 if (th->tid == tid) {
398 machine__update_thread_pid(machine, th, pid); 422 machine__update_thread_pid(machine, th, pid);
399 return thread__get(th); 423 return thread__get(th);
400 } 424 }
401 425
402 machine->last_match = NULL; 426 threads->last_match = NULL;
403 } 427 }
404 428
405 while (*p != NULL) { 429 while (*p != NULL) {
@@ -407,7 +431,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
407 th = rb_entry(parent, struct thread, rb_node); 431 th = rb_entry(parent, struct thread, rb_node);
408 432
409 if (th->tid == tid) { 433 if (th->tid == tid) {
410 machine->last_match = th; 434 threads->last_match = th;
411 machine__update_thread_pid(machine, th, pid); 435 machine__update_thread_pid(machine, th, pid);
412 return thread__get(th); 436 return thread__get(th);
413 } 437 }
@@ -424,7 +448,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
424 th = thread__new(pid, tid); 448 th = thread__new(pid, tid);
425 if (th != NULL) { 449 if (th != NULL) {
426 rb_link_node(&th->rb_node, parent, p); 450 rb_link_node(&th->rb_node, parent, p);
427 rb_insert_color(&th->rb_node, &machine->threads); 451 rb_insert_color(&th->rb_node, &threads->entries);
428 452
429 /* 453 /*
430 * We have to initialize map_groups separately 454 * We have to initialize map_groups separately
@@ -435,7 +459,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
435 * leader and that would screwed the rb tree. 459 * leader and that would screwed the rb tree.
436 */ 460 */
437 if (thread__init_map_groups(th, machine)) { 461 if (thread__init_map_groups(th, machine)) {
438 rb_erase_init(&th->rb_node, &machine->threads); 462 rb_erase_init(&th->rb_node, &threads->entries);
439 RB_CLEAR_NODE(&th->rb_node); 463 RB_CLEAR_NODE(&th->rb_node);
440 thread__put(th); 464 thread__put(th);
441 return NULL; 465 return NULL;
@@ -444,8 +468,8 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
444 * It is now in the rbtree, get a ref 468 * It is now in the rbtree, get a ref
445 */ 469 */
446 thread__get(th); 470 thread__get(th);
447 machine->last_match = th; 471 threads->last_match = th;
448 ++machine->nr_threads; 472 ++threads->nr;
449 } 473 }
450 474
451 return th; 475 return th;
@@ -453,27 +477,30 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
453 477
454struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 478struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
455{ 479{
456 return ____machine__findnew_thread(machine, pid, tid, true); 480 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
457} 481}
458 482
459struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 483struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
460 pid_t tid) 484 pid_t tid)
461{ 485{
486 struct threads *threads = machine__threads(machine, tid);
462 struct thread *th; 487 struct thread *th;
463 488
464 pthread_rwlock_wrlock(&machine->threads_lock); 489 down_write(&threads->lock);
465 th = __machine__findnew_thread(machine, pid, tid); 490 th = __machine__findnew_thread(machine, pid, tid);
466 pthread_rwlock_unlock(&machine->threads_lock); 491 up_write(&threads->lock);
467 return th; 492 return th;
468} 493}
469 494
470struct thread *machine__find_thread(struct machine *machine, pid_t pid, 495struct thread *machine__find_thread(struct machine *machine, pid_t pid,
471 pid_t tid) 496 pid_t tid)
472{ 497{
498 struct threads *threads = machine__threads(machine, tid);
473 struct thread *th; 499 struct thread *th;
474 pthread_rwlock_rdlock(&machine->threads_lock); 500
475 th = ____machine__findnew_thread(machine, pid, tid, false); 501 down_read(&threads->lock);
476 pthread_rwlock_unlock(&machine->threads_lock); 502 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
503 up_read(&threads->lock);
477 return th; 504 return th;
478} 505}
479 506
@@ -565,7 +592,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
565{ 592{
566 struct dso *dso; 593 struct dso *dso;
567 594
568 pthread_rwlock_wrlock(&machine->dsos.lock); 595 down_write(&machine->dsos.lock);
569 596
570 dso = __dsos__find(&machine->dsos, m->name, true); 597 dso = __dsos__find(&machine->dsos, m->name, true);
571 if (!dso) { 598 if (!dso) {
@@ -579,7 +606,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
579 606
580 dso__get(dso); 607 dso__get(dso);
581out_unlock: 608out_unlock:
582 pthread_rwlock_unlock(&machine->dsos.lock); 609 up_write(&machine->dsos.lock);
583 return dso; 610 return dso;
584} 611}
585 612
@@ -720,21 +747,25 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
720 747
721size_t machine__fprintf(struct machine *machine, FILE *fp) 748size_t machine__fprintf(struct machine *machine, FILE *fp)
722{ 749{
723 size_t ret;
724 struct rb_node *nd; 750 struct rb_node *nd;
751 size_t ret;
752 int i;
725 753
726 pthread_rwlock_rdlock(&machine->threads_lock); 754 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
755 struct threads *threads = &machine->threads[i];
727 756
728 ret = fprintf(fp, "Threads: %u\n", machine->nr_threads); 757 down_read(&threads->lock);
729 758
730 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 759 ret = fprintf(fp, "Threads: %u\n", threads->nr);
731 struct thread *pos = rb_entry(nd, struct thread, rb_node);
732 760
733 ret += thread__fprintf(pos, fp); 761 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
734 } 762 struct thread *pos = rb_entry(nd, struct thread, rb_node);
735 763
736 pthread_rwlock_unlock(&machine->threads_lock); 764 ret += thread__fprintf(pos, fp);
765 }
737 766
767 up_read(&threads->lock);
768 }
738 return ret; 769 return ret;
739} 770}
740 771
@@ -1293,7 +1324,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1293 struct dso *kernel = NULL; 1324 struct dso *kernel = NULL;
1294 struct dso *dso; 1325 struct dso *dso;
1295 1326
1296 pthread_rwlock_rdlock(&machine->dsos.lock); 1327 down_read(&machine->dsos.lock);
1297 1328
1298 list_for_each_entry(dso, &machine->dsos.head, node) { 1329 list_for_each_entry(dso, &machine->dsos.head, node) {
1299 1330
@@ -1323,7 +1354,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1323 break; 1354 break;
1324 } 1355 }
1325 1356
1326 pthread_rwlock_unlock(&machine->dsos.lock); 1357 up_read(&machine->dsos.lock);
1327 1358
1328 if (kernel == NULL) 1359 if (kernel == NULL)
1329 kernel = machine__findnew_dso(machine, kmmap_prefix); 1360 kernel = machine__findnew_dso(machine, kmmap_prefix);
@@ -1480,23 +1511,25 @@ out_problem:
1480 1511
1481static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock) 1512static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1482{ 1513{
1483 if (machine->last_match == th) 1514 struct threads *threads = machine__threads(machine, th->tid);
1484 machine->last_match = NULL; 1515
1516 if (threads->last_match == th)
1517 threads->last_match = NULL;
1485 1518
1486 BUG_ON(refcount_read(&th->refcnt) == 0); 1519 BUG_ON(refcount_read(&th->refcnt) == 0);
1487 if (lock) 1520 if (lock)
1488 pthread_rwlock_wrlock(&machine->threads_lock); 1521 down_write(&threads->lock);
1489 rb_erase_init(&th->rb_node, &machine->threads); 1522 rb_erase_init(&th->rb_node, &threads->entries);
1490 RB_CLEAR_NODE(&th->rb_node); 1523 RB_CLEAR_NODE(&th->rb_node);
1491 --machine->nr_threads; 1524 --threads->nr;
1492 /* 1525 /*
1493 * Move it first to the dead_threads list, then drop the reference, 1526 * Move it first to the dead_threads list, then drop the reference,
1494 * if this is the last reference, then the thread__delete destructor 1527 * if this is the last reference, then the thread__delete destructor
1495 * will be called and we will remove it from the dead_threads list. 1528 * will be called and we will remove it from the dead_threads list.
1496 */ 1529 */
1497 list_add_tail(&th->node, &machine->dead_threads); 1530 list_add_tail(&th->node, &threads->dead);
1498 if (lock) 1531 if (lock)
1499 pthread_rwlock_unlock(&machine->threads_lock); 1532 up_write(&threads->lock);
1500 thread__put(th); 1533 thread__put(th);
1501} 1534}
1502 1535
@@ -1680,6 +1713,26 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1680 return mi; 1713 return mi;
1681} 1714}
1682 1715
1716static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
1717{
1718 char *srcline = NULL;
1719
1720 if (!map || callchain_param.key == CCKEY_FUNCTION)
1721 return srcline;
1722
1723 srcline = srcline__tree_find(&map->dso->srclines, ip);
1724 if (!srcline) {
1725 bool show_sym = false;
1726 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
1727
1728 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
1729 sym, show_sym, show_addr);
1730 srcline__tree_insert(&map->dso->srclines, ip, srcline);
1731 }
1732
1733 return srcline;
1734}
1735
1683struct iterations { 1736struct iterations {
1684 int nr_loop_iter; 1737 int nr_loop_iter;
1685 u64 cycles; 1738 u64 cycles;
@@ -1699,6 +1752,7 @@ static int add_callchain_ip(struct thread *thread,
1699 struct addr_location al; 1752 struct addr_location al;
1700 int nr_loop_iter = 0; 1753 int nr_loop_iter = 0;
1701 u64 iter_cycles = 0; 1754 u64 iter_cycles = 0;
1755 const char *srcline = NULL;
1702 1756
1703 al.filtered = 0; 1757 al.filtered = 0;
1704 al.sym = NULL; 1758 al.sym = NULL;
@@ -1754,9 +1808,10 @@ static int add_callchain_ip(struct thread *thread,
1754 iter_cycles = iter->cycles; 1808 iter_cycles = iter->cycles;
1755 } 1809 }
1756 1810
1811 srcline = callchain_srcline(al.map, al.sym, al.addr);
1757 return callchain_cursor_append(cursor, al.addr, al.map, al.sym, 1812 return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
1758 branch, flags, nr_loop_iter, 1813 branch, flags, nr_loop_iter,
1759 iter_cycles, branch_from); 1814 iter_cycles, branch_from, srcline);
1760} 1815}
1761 1816
1762struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 1817struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
@@ -2069,15 +2124,54 @@ check_calls:
2069 return 0; 2124 return 0;
2070} 2125}
2071 2126
2127static int append_inlines(struct callchain_cursor *cursor,
2128 struct map *map, struct symbol *sym, u64 ip)
2129{
2130 struct inline_node *inline_node;
2131 struct inline_list *ilist;
2132 u64 addr;
2133 int ret = 1;
2134
2135 if (!symbol_conf.inline_name || !map || !sym)
2136 return ret;
2137
2138 addr = map__rip_2objdump(map, ip);
2139
2140 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2141 if (!inline_node) {
2142 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2143 if (!inline_node)
2144 return ret;
2145 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2146 }
2147
2148 list_for_each_entry(ilist, &inline_node->val, list) {
2149 ret = callchain_cursor_append(cursor, ip, map,
2150 ilist->symbol, false,
2151 NULL, 0, 0, 0, ilist->srcline);
2152
2153 if (ret != 0)
2154 return ret;
2155 }
2156
2157 return ret;
2158}
2159
2072static int unwind_entry(struct unwind_entry *entry, void *arg) 2160static int unwind_entry(struct unwind_entry *entry, void *arg)
2073{ 2161{
2074 struct callchain_cursor *cursor = arg; 2162 struct callchain_cursor *cursor = arg;
2163 const char *srcline = NULL;
2075 2164
2076 if (symbol_conf.hide_unresolved && entry->sym == NULL) 2165 if (symbol_conf.hide_unresolved && entry->sym == NULL)
2077 return 0; 2166 return 0;
2167
2168 if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
2169 return 0;
2170
2171 srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
2078 return callchain_cursor_append(cursor, entry->ip, 2172 return callchain_cursor_append(cursor, entry->ip,
2079 entry->map, entry->sym, 2173 entry->map, entry->sym,
2080 false, NULL, 0, 0, 0); 2174 false, NULL, 0, 0, 0, srcline);
2081} 2175}
2082 2176
2083static int thread__resolve_callchain_unwind(struct thread *thread, 2177static int thread__resolve_callchain_unwind(struct thread *thread,
@@ -2141,21 +2235,26 @@ int machine__for_each_thread(struct machine *machine,
2141 int (*fn)(struct thread *thread, void *p), 2235 int (*fn)(struct thread *thread, void *p),
2142 void *priv) 2236 void *priv)
2143{ 2237{
2238 struct threads *threads;
2144 struct rb_node *nd; 2239 struct rb_node *nd;
2145 struct thread *thread; 2240 struct thread *thread;
2146 int rc = 0; 2241 int rc = 0;
2242 int i;
2147 2243
2148 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 2244 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2149 thread = rb_entry(nd, struct thread, rb_node); 2245 threads = &machine->threads[i];
2150 rc = fn(thread, priv); 2246 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
2151 if (rc != 0) 2247 thread = rb_entry(nd, struct thread, rb_node);
2152 return rc; 2248 rc = fn(thread, priv);
2153 } 2249 if (rc != 0)
2250 return rc;
2251 }
2154 2252
2155 list_for_each_entry(thread, &machine->dead_threads, node) { 2253 list_for_each_entry(thread, &threads->dead, node) {
2156 rc = fn(thread, priv); 2254 rc = fn(thread, priv);
2157 if (rc != 0) 2255 if (rc != 0)
2158 return rc; 2256 return rc;
2257 }
2159 } 2258 }
2160 return rc; 2259 return rc;
2161} 2260}
@@ -2184,12 +2283,16 @@ int machines__for_each_thread(struct machines *machines,
2184int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 2283int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
2185 struct target *target, struct thread_map *threads, 2284 struct target *target, struct thread_map *threads,
2186 perf_event__handler_t process, bool data_mmap, 2285 perf_event__handler_t process, bool data_mmap,
2187 unsigned int proc_map_timeout) 2286 unsigned int proc_map_timeout,
2287 unsigned int nr_threads_synthesize)
2188{ 2288{
2189 if (target__has_task(target)) 2289 if (target__has_task(target))
2190 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout); 2290 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
2191 else if (target__has_cpu(target)) 2291 else if (target__has_cpu(target))
2192 return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout); 2292 return perf_event__synthesize_threads(tool, process,
2293 machine, data_mmap,
2294 proc_map_timeout,
2295 nr_threads_synthesize);
2193 /* command specified */ 2296 /* command specified */
2194 return 0; 2297 return 0;
2195} 2298}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d551aa80a59b..5ce860b64c74 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -7,6 +7,7 @@
7#include "map.h" 7#include "map.h"
8#include "dso.h" 8#include "dso.h"
9#include "event.h" 9#include "event.h"
10#include "rwsem.h"
10 11
11struct addr_location; 12struct addr_location;
12struct branch_stack; 13struct branch_stack;
@@ -24,6 +25,17 @@ extern const char *ref_reloc_sym_names[];
24 25
25struct vdso_info; 26struct vdso_info;
26 27
28#define THREADS__TABLE_BITS 8
29#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
30
31struct threads {
32 struct rb_root entries;
33 struct rw_semaphore lock;
34 unsigned int nr;
35 struct list_head dead;
36 struct thread *last_match;
37};
38
27struct machine { 39struct machine {
28 struct rb_node rb_node; 40 struct rb_node rb_node;
29 pid_t pid; 41 pid_t pid;
@@ -31,11 +43,7 @@ struct machine {
31 bool comm_exec; 43 bool comm_exec;
32 bool kptr_restrict_warned; 44 bool kptr_restrict_warned;
33 char *root_dir; 45 char *root_dir;
34 struct rb_root threads; 46 struct threads threads[THREADS__TABLE_SIZE];
35 pthread_rwlock_t threads_lock;
36 unsigned int nr_threads;
37 struct list_head dead_threads;
38 struct thread *last_match;
39 struct vdso_info *vdso_info; 47 struct vdso_info *vdso_info;
40 struct perf_env *env; 48 struct perf_env *env;
41 struct dsos dsos; 49 struct dsos dsos;
@@ -49,6 +57,12 @@ struct machine {
49 }; 57 };
50}; 58};
51 59
60static inline struct threads *machine__threads(struct machine *machine, pid_t tid)
61{
62 /* Cast it to handle tid == -1 */
63 return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE];
64}
65
52static inline 66static inline
53struct map *__machine__kernel_map(struct machine *machine, enum map_type type) 67struct map *__machine__kernel_map(struct machine *machine, enum map_type type)
54{ 68{
@@ -244,15 +258,18 @@ int machines__for_each_thread(struct machines *machines,
244int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 258int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
245 struct target *target, struct thread_map *threads, 259 struct target *target, struct thread_map *threads,
246 perf_event__handler_t process, bool data_mmap, 260 perf_event__handler_t process, bool data_mmap,
247 unsigned int proc_map_timeout); 261 unsigned int proc_map_timeout,
262 unsigned int nr_threads_synthesize);
248static inline 263static inline
249int machine__synthesize_threads(struct machine *machine, struct target *target, 264int machine__synthesize_threads(struct machine *machine, struct target *target,
250 struct thread_map *threads, bool data_mmap, 265 struct thread_map *threads, bool data_mmap,
251 unsigned int proc_map_timeout) 266 unsigned int proc_map_timeout,
267 unsigned int nr_threads_synthesize)
252{ 268{
253 return __machine__synthesize_threads(machine, NULL, target, threads, 269 return __machine__synthesize_threads(machine, NULL, target, threads,
254 perf_event__process, data_mmap, 270 perf_event__process, data_mmap,
255 proc_map_timeout); 271 proc_map_timeout,
272 nr_threads_synthesize);
256} 273}
257 274
258pid_t machine__get_current_tid(struct machine *machine, int cpu); 275pid_t machine__get_current_tid(struct machine *machine, int cpu);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 4e7bd2750122..6d40efd74402 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -489,7 +489,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
489static void maps__init(struct maps *maps) 489static void maps__init(struct maps *maps)
490{ 490{
491 maps->entries = RB_ROOT; 491 maps->entries = RB_ROOT;
492 pthread_rwlock_init(&maps->lock, NULL); 492 init_rwsem(&maps->lock);
493} 493}
494 494
495void map_groups__init(struct map_groups *mg, struct machine *machine) 495void map_groups__init(struct map_groups *mg, struct machine *machine)
@@ -518,9 +518,9 @@ static void __maps__purge(struct maps *maps)
518 518
519static void maps__exit(struct maps *maps) 519static void maps__exit(struct maps *maps)
520{ 520{
521 pthread_rwlock_wrlock(&maps->lock); 521 down_write(&maps->lock);
522 __maps__purge(maps); 522 __maps__purge(maps);
523 pthread_rwlock_unlock(&maps->lock); 523 up_write(&maps->lock);
524} 524}
525 525
526void map_groups__exit(struct map_groups *mg) 526void map_groups__exit(struct map_groups *mg)
@@ -587,7 +587,7 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
587 struct symbol *sym; 587 struct symbol *sym;
588 struct rb_node *nd; 588 struct rb_node *nd;
589 589
590 pthread_rwlock_rdlock(&maps->lock); 590 down_read(&maps->lock);
591 591
592 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 592 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
593 struct map *pos = rb_entry(nd, struct map, rb_node); 593 struct map *pos = rb_entry(nd, struct map, rb_node);
@@ -603,7 +603,7 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
603 603
604 sym = NULL; 604 sym = NULL;
605out: 605out:
606 pthread_rwlock_unlock(&maps->lock); 606 up_read(&maps->lock);
607 return sym; 607 return sym;
608} 608}
609 609
@@ -639,7 +639,7 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
639 size_t printed = 0; 639 size_t printed = 0;
640 struct rb_node *nd; 640 struct rb_node *nd;
641 641
642 pthread_rwlock_rdlock(&maps->lock); 642 down_read(&maps->lock);
643 643
644 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 644 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
645 struct map *pos = rb_entry(nd, struct map, rb_node); 645 struct map *pos = rb_entry(nd, struct map, rb_node);
@@ -651,7 +651,7 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
651 } 651 }
652 } 652 }
653 653
654 pthread_rwlock_unlock(&maps->lock); 654 up_read(&maps->lock);
655 655
656 return printed; 656 return printed;
657} 657}
@@ -683,7 +683,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
683 struct rb_node *next; 683 struct rb_node *next;
684 int err = 0; 684 int err = 0;
685 685
686 pthread_rwlock_wrlock(&maps->lock); 686 down_write(&maps->lock);
687 687
688 root = &maps->entries; 688 root = &maps->entries;
689 next = rb_first(root); 689 next = rb_first(root);
@@ -751,7 +751,7 @@ put_map:
751 751
752 err = 0; 752 err = 0;
753out: 753out:
754 pthread_rwlock_unlock(&maps->lock); 754 up_write(&maps->lock);
755 return err; 755 return err;
756} 756}
757 757
@@ -772,7 +772,7 @@ int map_groups__clone(struct thread *thread,
772 struct map *map; 772 struct map *map;
773 struct maps *maps = &parent->maps[type]; 773 struct maps *maps = &parent->maps[type];
774 774
775 pthread_rwlock_rdlock(&maps->lock); 775 down_read(&maps->lock);
776 776
777 for (map = maps__first(maps); map; map = map__next(map)) { 777 for (map = maps__first(maps); map; map = map__next(map)) {
778 struct map *new = map__clone(map); 778 struct map *new = map__clone(map);
@@ -789,7 +789,7 @@ int map_groups__clone(struct thread *thread,
789 789
790 err = 0; 790 err = 0;
791out_unlock: 791out_unlock:
792 pthread_rwlock_unlock(&maps->lock); 792 up_read(&maps->lock);
793 return err; 793 return err;
794} 794}
795 795
@@ -816,9 +816,9 @@ static void __maps__insert(struct maps *maps, struct map *map)
816 816
817void maps__insert(struct maps *maps, struct map *map) 817void maps__insert(struct maps *maps, struct map *map)
818{ 818{
819 pthread_rwlock_wrlock(&maps->lock); 819 down_write(&maps->lock);
820 __maps__insert(maps, map); 820 __maps__insert(maps, map);
821 pthread_rwlock_unlock(&maps->lock); 821 up_write(&maps->lock);
822} 822}
823 823
824static void __maps__remove(struct maps *maps, struct map *map) 824static void __maps__remove(struct maps *maps, struct map *map)
@@ -829,9 +829,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
829 829
830void maps__remove(struct maps *maps, struct map *map) 830void maps__remove(struct maps *maps, struct map *map)
831{ 831{
832 pthread_rwlock_wrlock(&maps->lock); 832 down_write(&maps->lock);
833 __maps__remove(maps, map); 833 __maps__remove(maps, map);
834 pthread_rwlock_unlock(&maps->lock); 834 up_write(&maps->lock);
835} 835}
836 836
837struct map *maps__find(struct maps *maps, u64 ip) 837struct map *maps__find(struct maps *maps, u64 ip)
@@ -839,7 +839,7 @@ struct map *maps__find(struct maps *maps, u64 ip)
839 struct rb_node **p, *parent = NULL; 839 struct rb_node **p, *parent = NULL;
840 struct map *m; 840 struct map *m;
841 841
842 pthread_rwlock_rdlock(&maps->lock); 842 down_read(&maps->lock);
843 843
844 p = &maps->entries.rb_node; 844 p = &maps->entries.rb_node;
845 while (*p != NULL) { 845 while (*p != NULL) {
@@ -855,7 +855,7 @@ struct map *maps__find(struct maps *maps, u64 ip)
855 855
856 m = NULL; 856 m = NULL;
857out: 857out:
858 pthread_rwlock_unlock(&maps->lock); 858 up_read(&maps->lock);
859 return m; 859 return m;
860} 860}
861 861
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 1fb9b8589adc..edeb7291c8e1 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -10,6 +10,7 @@
10#include <stdio.h> 10#include <stdio.h>
11#include <stdbool.h> 11#include <stdbool.h>
12#include <linux/types.h> 12#include <linux/types.h>
13#include "rwsem.h"
13 14
14enum map_type { 15enum map_type {
15 MAP__FUNCTION = 0, 16 MAP__FUNCTION = 0,
@@ -62,7 +63,7 @@ struct kmap {
62 63
63struct maps { 64struct maps {
64 struct rb_root entries; 65 struct rb_root entries;
65 pthread_rwlock_t lock; 66 struct rw_semaphore lock;
66}; 67};
67 68
68struct map_groups { 69struct map_groups {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
new file mode 100644
index 000000000000..0ddd9c199227
--- /dev/null
+++ b/tools/perf/util/metricgroup.c
@@ -0,0 +1,490 @@
1/*
2 * Copyright (c) 2017, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 */
14
15/* Manage metrics and groups of metrics from JSON files */
16
17#include "metricgroup.h"
18#include "evlist.h"
19#include "strbuf.h"
20#include "pmu.h"
21#include "expr.h"
22#include "rblist.h"
23#include "pmu.h"
24#include <string.h>
25#include <stdbool.h>
26#include <errno.h>
27#include "pmu-events/pmu-events.h"
28#include "strbuf.h"
29#include "strlist.h"
30#include <assert.h>
31#include <ctype.h>
32
33struct metric_event *metricgroup__lookup(struct rblist *metric_events,
34 struct perf_evsel *evsel,
35 bool create)
36{
37 struct rb_node *nd;
38 struct metric_event me = {
39 .evsel = evsel
40 };
41 nd = rblist__find(metric_events, &me);
42 if (nd)
43 return container_of(nd, struct metric_event, nd);
44 if (create) {
45 rblist__add_node(metric_events, &me);
46 nd = rblist__find(metric_events, &me);
47 if (nd)
48 return container_of(nd, struct metric_event, nd);
49 }
50 return NULL;
51}
52
53static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
54{
55 struct metric_event *a = container_of(rb_node,
56 struct metric_event,
57 nd);
58 const struct metric_event *b = entry;
59
60 if (a->evsel == b->evsel)
61 return 0;
62 if ((char *)a->evsel < (char *)b->evsel)
63 return -1;
64 return +1;
65}
66
67static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
68 const void *entry)
69{
70 struct metric_event *me = malloc(sizeof(struct metric_event));
71
72 if (!me)
73 return NULL;
74 memcpy(me, entry, sizeof(struct metric_event));
75 me->evsel = ((struct metric_event *)entry)->evsel;
76 INIT_LIST_HEAD(&me->head);
77 return &me->nd;
78}
79
80static void metricgroup__rblist_init(struct rblist *metric_events)
81{
82 rblist__init(metric_events);
83 metric_events->node_cmp = metric_event_cmp;
84 metric_events->node_new = metric_event_new;
85}
86
87struct egroup {
88 struct list_head nd;
89 int idnum;
90 const char **ids;
91 const char *metric_name;
92 const char *metric_expr;
93};
94
95static struct perf_evsel *find_evsel(struct perf_evlist *perf_evlist,
96 const char **ids,
97 int idnum,
98 struct perf_evsel **metric_events)
99{
100 struct perf_evsel *ev, *start = NULL;
101 int ind = 0;
102
103 evlist__for_each_entry (perf_evlist, ev) {
104 if (!strcmp(ev->name, ids[ind])) {
105 metric_events[ind] = ev;
106 if (ind == 0)
107 start = ev;
108 if (++ind == idnum) {
109 metric_events[ind] = NULL;
110 return start;
111 }
112 } else {
113 ind = 0;
114 start = NULL;
115 }
116 }
117 /*
118 * This can happen when an alias expands to multiple
119 * events, like for uncore events.
120 * We don't support this case for now.
121 */
122 return NULL;
123}
124
125static int metricgroup__setup_events(struct list_head *groups,
126 struct perf_evlist *perf_evlist,
127 struct rblist *metric_events_list)
128{
129 struct metric_event *me;
130 struct metric_expr *expr;
131 int i = 0;
132 int ret = 0;
133 struct egroup *eg;
134 struct perf_evsel *evsel;
135
136 list_for_each_entry (eg, groups, nd) {
137 struct perf_evsel **metric_events;
138
139 metric_events = calloc(sizeof(void *), eg->idnum + 1);
140 if (!metric_events) {
141 ret = -ENOMEM;
142 break;
143 }
144 evsel = find_evsel(perf_evlist, eg->ids, eg->idnum,
145 metric_events);
146 if (!evsel) {
147 pr_debug("Cannot resolve %s: %s\n",
148 eg->metric_name, eg->metric_expr);
149 continue;
150 }
151 for (i = 0; i < eg->idnum; i++)
152 metric_events[i]->collect_stat = true;
153 me = metricgroup__lookup(metric_events_list, evsel, true);
154 if (!me) {
155 ret = -ENOMEM;
156 break;
157 }
158 expr = malloc(sizeof(struct metric_expr));
159 if (!expr) {
160 ret = -ENOMEM;
161 break;
162 }
163 expr->metric_expr = eg->metric_expr;
164 expr->metric_name = eg->metric_name;
165 expr->metric_events = metric_events;
166 list_add(&expr->nd, &me->head);
167 }
168 return ret;
169}
170
171static bool match_metric(const char *n, const char *list)
172{
173 int len;
174 char *m;
175
176 if (!list)
177 return false;
178 if (!strcmp(list, "all"))
179 return true;
180 if (!n)
181 return !strcasecmp(list, "No_group");
182 len = strlen(list);
183 m = strcasestr(n, list);
184 if (!m)
185 return false;
186 if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
187 (m[len] == 0 || m[len] == ';'))
188 return true;
189 return false;
190}
191
192struct mep {
193 struct rb_node nd;
194 const char *name;
195 struct strlist *metrics;
196};
197
198static int mep_cmp(struct rb_node *rb_node, const void *entry)
199{
200 struct mep *a = container_of(rb_node, struct mep, nd);
201 struct mep *b = (struct mep *)entry;
202
203 return strcmp(a->name, b->name);
204}
205
206static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
207 const void *entry)
208{
209 struct mep *me = malloc(sizeof(struct mep));
210
211 if (!me)
212 return NULL;
213 memcpy(me, entry, sizeof(struct mep));
214 me->name = strdup(me->name);
215 if (!me->name)
216 goto out_me;
217 me->metrics = strlist__new(NULL, NULL);
218 if (!me->metrics)
219 goto out_name;
220 return &me->nd;
221out_name:
222 free((char *)me->name);
223out_me:
224 free(me);
225 return NULL;
226}
227
228static struct mep *mep_lookup(struct rblist *groups, const char *name)
229{
230 struct rb_node *nd;
231 struct mep me = {
232 .name = name
233 };
234 nd = rblist__find(groups, &me);
235 if (nd)
236 return container_of(nd, struct mep, nd);
237 rblist__add_node(groups, &me);
238 nd = rblist__find(groups, &me);
239 if (nd)
240 return container_of(nd, struct mep, nd);
241 return NULL;
242}
243
244static void mep_delete(struct rblist *rl __maybe_unused,
245 struct rb_node *nd)
246{
247 struct mep *me = container_of(nd, struct mep, nd);
248
249 strlist__delete(me->metrics);
250 free((void *)me->name);
251 free(me);
252}
253
254static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
255{
256 struct str_node *sn;
257 int n = 0;
258
259 strlist__for_each_entry (sn, metrics) {
260 if (raw)
261 printf("%s%s", n > 0 ? " " : "", sn->s);
262 else
263 printf(" %s\n", sn->s);
264 n++;
265 }
266 if (raw)
267 putchar('\n');
268}
269
270void metricgroup__print(bool metrics, bool metricgroups, char *filter,
271 bool raw)
272{
273 struct pmu_events_map *map = perf_pmu__find_map();
274 struct pmu_event *pe;
275 int i;
276 struct rblist groups;
277 struct rb_node *node, *next;
278 struct strlist *metriclist = NULL;
279
280 if (!map)
281 return;
282
283 if (!metricgroups) {
284 metriclist = strlist__new(NULL, NULL);
285 if (!metriclist)
286 return;
287 }
288
289 rblist__init(&groups);
290 groups.node_new = mep_new;
291 groups.node_cmp = mep_cmp;
292 groups.node_delete = mep_delete;
293 for (i = 0; ; i++) {
294 const char *g;
295 pe = &map->table[i];
296
297 if (!pe->name && !pe->metric_group && !pe->metric_name)
298 break;
299 if (!pe->metric_expr)
300 continue;
301 g = pe->metric_group;
302 if (!g && pe->metric_name) {
303 if (pe->name)
304 continue;
305 g = "No_group";
306 }
307 if (g) {
308 char *omg;
309 char *mg = strdup(g);
310
311 if (!mg)
312 return;
313 omg = mg;
314 while ((g = strsep(&mg, ";")) != NULL) {
315 struct mep *me;
316 char *s;
317
318 if (*g == 0)
319 g = "No_group";
320 while (isspace(*g))
321 g++;
322 if (filter && !strstr(g, filter))
323 continue;
324 if (raw)
325 s = (char *)pe->metric_name;
326 else {
327 if (asprintf(&s, "%s\n\t[%s]",
328 pe->metric_name, pe->desc) < 0)
329 return;
330 }
331
332 if (!s)
333 continue;
334
335 if (!metricgroups) {
336 strlist__add(metriclist, s);
337 } else {
338 me = mep_lookup(&groups, g);
339 if (!me)
340 continue;
341 strlist__add(me->metrics, s);
342 }
343 }
344 free(omg);
345 }
346 }
347
348 if (metricgroups && !raw)
349 printf("\nMetric Groups:\n\n");
350 else if (metrics && !raw)
351 printf("\nMetrics:\n\n");
352
353 for (node = rb_first(&groups.entries); node; node = next) {
354 struct mep *me = container_of(node, struct mep, nd);
355
356 if (metricgroups)
357 printf("%s%s%s", me->name, metrics ? ":" : "", raw ? " " : "\n");
358 if (metrics)
359 metricgroup__print_strlist(me->metrics, raw);
360 next = rb_next(node);
361 rblist__remove_node(&groups, node);
362 }
363 if (!metricgroups)
364 metricgroup__print_strlist(metriclist, raw);
365 strlist__delete(metriclist);
366}
367
368static int metricgroup__add_metric(const char *metric, struct strbuf *events,
369 struct list_head *group_list)
370{
371 struct pmu_events_map *map = perf_pmu__find_map();
372 struct pmu_event *pe;
373 int ret = -EINVAL;
374 int i, j;
375
376 if (!map)
377 return 0;
378
379 for (i = 0; ; i++) {
380 pe = &map->table[i];
381
382 if (!pe->name && !pe->metric_group && !pe->metric_name)
383 break;
384 if (!pe->metric_expr)
385 continue;
386 if (match_metric(pe->metric_group, metric) ||
387 match_metric(pe->metric_name, metric)) {
388 const char **ids;
389 int idnum;
390 struct egroup *eg;
391
392 pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
393
394 if (expr__find_other(pe->metric_expr,
395 NULL, &ids, &idnum) < 0)
396 continue;
397 if (events->len > 0)
398 strbuf_addf(events, ",");
399 for (j = 0; j < idnum; j++) {
400 pr_debug("found event %s\n", ids[j]);
401 strbuf_addf(events, "%s%s",
402 j == 0 ? "{" : ",",
403 ids[j]);
404 }
405 strbuf_addf(events, "}:W");
406
407 eg = malloc(sizeof(struct egroup));
408 if (!eg) {
409 ret = -ENOMEM;
410 break;
411 }
412 eg->ids = ids;
413 eg->idnum = idnum;
414 eg->metric_name = pe->metric_name;
415 eg->metric_expr = pe->metric_expr;
416 list_add_tail(&eg->nd, group_list);
417 ret = 0;
418 }
419 }
420 return ret;
421}
422
423static int metricgroup__add_metric_list(const char *list, struct strbuf *events,
424 struct list_head *group_list)
425{
426 char *llist, *nlist, *p;
427 int ret = -EINVAL;
428
429 nlist = strdup(list);
430 if (!nlist)
431 return -ENOMEM;
432 llist = nlist;
433
434 strbuf_init(events, 100);
435 strbuf_addf(events, "%s", "");
436
437 while ((p = strsep(&llist, ",")) != NULL) {
438 ret = metricgroup__add_metric(p, events, group_list);
439 if (ret == -EINVAL) {
440 fprintf(stderr, "Cannot find metric or group `%s'\n",
441 p);
442 break;
443 }
444 }
445 free(nlist);
446 return ret;
447}
448
449static void metricgroup__free_egroups(struct list_head *group_list)
450{
451 struct egroup *eg, *egtmp;
452 int i;
453
454 list_for_each_entry_safe (eg, egtmp, group_list, nd) {
455 for (i = 0; i < eg->idnum; i++)
456 free((char *)eg->ids[i]);
457 free(eg->ids);
458 free(eg);
459 }
460}
461
462int metricgroup__parse_groups(const struct option *opt,
463 const char *str,
464 struct rblist *metric_events)
465{
466 struct parse_events_error parse_error;
467 struct perf_evlist *perf_evlist = *(struct perf_evlist **)opt->value;
468 struct strbuf extra_events;
469 LIST_HEAD(group_list);
470 int ret;
471
472 if (metric_events->nr_entries == 0)
473 metricgroup__rblist_init(metric_events);
474 ret = metricgroup__add_metric_list(str, &extra_events, &group_list);
475 if (ret)
476 return ret;
477 pr_debug("adding %s\n", extra_events.buf);
478 memset(&parse_error, 0, sizeof(struct parse_events_error));
479 ret = parse_events(perf_evlist, extra_events.buf, &parse_error);
480 if (ret) {
481 parse_events_print_error(&parse_error, extra_events.buf);
482 goto out;
483 }
484 strbuf_release(&extra_events);
485 ret = metricgroup__setup_events(&group_list, perf_evlist,
486 metric_events);
487out:
488 metricgroup__free_egroups(&group_list);
489 return ret;
490}
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
new file mode 100644
index 000000000000..06854e125ee7
--- /dev/null
+++ b/tools/perf/util/metricgroup.h
@@ -0,0 +1,31 @@
1#ifndef METRICGROUP_H
2#define METRICGROUP_H 1
3
4#include "linux/list.h"
5#include "rblist.h"
6#include <subcmd/parse-options.h>
7#include "evlist.h"
8#include "strbuf.h"
9
10struct metric_event {
11 struct rb_node nd;
12 struct perf_evsel *evsel;
13 struct list_head head; /* list of metric_expr */
14};
15
16struct metric_expr {
17 struct list_head nd;
18 const char *metric_expr;
19 const char *metric_name;
20 struct perf_evsel **metric_events;
21};
22
23struct metric_event *metricgroup__lookup(struct rblist *metric_events,
24 struct perf_evsel *evsel,
25 bool create);
26int metricgroup__parse_groups(const struct option *opt,
27 const char *str,
28 struct rblist *metric_events);
29
30void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
31#endif
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
new file mode 100644
index 000000000000..9fe5f9c7d577
--- /dev/null
+++ b/tools/perf/util/mmap.c
@@ -0,0 +1,352 @@
1/*
2 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
10#include <sys/mman.h>
11#include <inttypes.h>
12#include <asm/bug.h>
13#include "debug.h"
14#include "event.h"
15#include "mmap.h"
16#include "util.h" /* page_size */
17
18size_t perf_mmap__mmap_len(struct perf_mmap *map)
19{
20 return map->mask + 1 + page_size;
21}
22
23/* When check_messup is true, 'end' must points to a good entry */
24static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
25 u64 start, u64 end, u64 *prev)
26{
27 unsigned char *data = map->base + page_size;
28 union perf_event *event = NULL;
29 int diff = end - start;
30
31 if (check_messup) {
32 /*
33 * If we're further behind than half the buffer, there's a chance
34 * the writer will bite our tail and mess up the samples under us.
35 *
36 * If we somehow ended up ahead of the 'end', we got messed up.
37 *
38 * In either case, truncate and restart at 'end'.
39 */
40 if (diff > map->mask / 2 || diff < 0) {
41 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
42
43 /*
44 * 'end' points to a known good entry, start there.
45 */
46 start = end;
47 diff = 0;
48 }
49 }
50
51 if (diff >= (int)sizeof(event->header)) {
52 size_t size;
53
54 event = (union perf_event *)&data[start & map->mask];
55 size = event->header.size;
56
57 if (size < sizeof(event->header) || diff < (int)size) {
58 event = NULL;
59 goto broken_event;
60 }
61
62 /*
63 * Event straddles the mmap boundary -- header should always
64 * be inside due to u64 alignment of output.
65 */
66 if ((start & map->mask) + size != ((start + size) & map->mask)) {
67 unsigned int offset = start;
68 unsigned int len = min(sizeof(*event), size), cpy;
69 void *dst = map->event_copy;
70
71 do {
72 cpy = min(map->mask + 1 - (offset & map->mask), len);
73 memcpy(dst, &data[offset & map->mask], cpy);
74 offset += cpy;
75 dst += cpy;
76 len -= cpy;
77 } while (len);
78
79 event = (union perf_event *)map->event_copy;
80 }
81
82 start += size;
83 }
84
85broken_event:
86 if (prev)
87 *prev = start;
88
89 return event;
90}
91
92union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
93{
94 u64 head;
95 u64 old = map->prev;
96
97 /*
98 * Check if event was unmapped due to a POLLHUP/POLLERR.
99 */
100 if (!refcount_read(&map->refcnt))
101 return NULL;
102
103 head = perf_mmap__read_head(map);
104
105 return perf_mmap__read(map, check_messup, old, head, &map->prev);
106}
107
108union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
109{
110 u64 head, end;
111 u64 start = map->prev;
112
113 /*
114 * Check if event was unmapped due to a POLLHUP/POLLERR.
115 */
116 if (!refcount_read(&map->refcnt))
117 return NULL;
118
119 head = perf_mmap__read_head(map);
120 if (!head)
121 return NULL;
122
123 /*
124 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
125 * it each time when kernel writes to it, so in fact 'head' is
126 * negative. 'end' pointer is made manually by adding the size of
127 * the ring buffer to 'head' pointer, means the validate data can
128 * read is the whole ring buffer. If 'end' is positive, the ring
129 * buffer has not fully filled, so we must adjust 'end' to 0.
130 *
131 * However, since both 'head' and 'end' is unsigned, we can't
132 * simply compare 'end' against 0. Here we compare '-head' and
133 * the size of the ring buffer, where -head is the number of bytes
134 * kernel write to the ring buffer.
135 */
136 if (-head < (u64)(map->mask + 1))
137 end = 0;
138 else
139 end = head + map->mask + 1;
140
141 return perf_mmap__read(map, false, start, end, &map->prev);
142}
143
144void perf_mmap__read_catchup(struct perf_mmap *map)
145{
146 u64 head;
147
148 if (!refcount_read(&map->refcnt))
149 return;
150
151 head = perf_mmap__read_head(map);
152 map->prev = head;
153}
154
155static bool perf_mmap__empty(struct perf_mmap *map)
156{
157 return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
158}
159
160void perf_mmap__get(struct perf_mmap *map)
161{
162 refcount_inc(&map->refcnt);
163}
164
165void perf_mmap__put(struct perf_mmap *map)
166{
167 BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
168
169 if (refcount_dec_and_test(&map->refcnt))
170 perf_mmap__munmap(map);
171}
172
173void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
174{
175 if (!overwrite) {
176 u64 old = map->prev;
177
178 perf_mmap__write_tail(map, old);
179 }
180
181 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
182 perf_mmap__put(map);
183}
184
185int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
186 struct auxtrace_mmap_params *mp __maybe_unused,
187 void *userpg __maybe_unused,
188 int fd __maybe_unused)
189{
190 return 0;
191}
192
193void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
194{
195}
196
197void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
198 off_t auxtrace_offset __maybe_unused,
199 unsigned int auxtrace_pages __maybe_unused,
200 bool auxtrace_overwrite __maybe_unused)
201{
202}
203
204void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
205 struct perf_evlist *evlist __maybe_unused,
206 int idx __maybe_unused,
207 bool per_cpu __maybe_unused)
208{
209}
210
211void perf_mmap__munmap(struct perf_mmap *map)
212{
213 if (map->base != NULL) {
214 munmap(map->base, perf_mmap__mmap_len(map));
215 map->base = NULL;
216 map->fd = -1;
217 refcount_set(&map->refcnt, 0);
218 }
219 auxtrace_mmap__munmap(&map->auxtrace_mmap);
220}
221
222int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
223{
224 /*
225 * The last one will be done at perf_evlist__mmap_consume(), so that we
226 * make sure we don't prevent tools from consuming every last event in
227 * the ring buffer.
228 *
229 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
230 * anymore, but the last events for it are still in the ring buffer,
231 * waiting to be consumed.
232 *
233 * Tools can chose to ignore this at their own discretion, but the
234 * evlist layer can't just drop it when filtering events in
235 * perf_evlist__filter_pollfd().
236 */
237 refcount_set(&map->refcnt, 2);
238 map->prev = 0;
239 map->mask = mp->mask;
240 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
241 MAP_SHARED, fd, 0);
242 if (map->base == MAP_FAILED) {
243 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
244 errno);
245 map->base = NULL;
246 return -1;
247 }
248 map->fd = fd;
249
250 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
251 &mp->auxtrace_mp, map->base, fd))
252 return -1;
253
254 return 0;
255}
256
257static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
258{
259 struct perf_event_header *pheader;
260 u64 evt_head = head;
261 int size = mask + 1;
262
263 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
264 pheader = (struct perf_event_header *)(buf + (head & mask));
265 *start = head;
266 while (true) {
267 if (evt_head - head >= (unsigned int)size) {
268 pr_debug("Finished reading backward ring buffer: rewind\n");
269 if (evt_head - head > (unsigned int)size)
270 evt_head -= pheader->size;
271 *end = evt_head;
272 return 0;
273 }
274
275 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
276
277 if (pheader->size == 0) {
278 pr_debug("Finished reading backward ring buffer: get start\n");
279 *end = evt_head;
280 return 0;
281 }
282
283 evt_head += pheader->size;
284 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
285 }
286 WARN_ONCE(1, "Shouldn't get here\n");
287 return -1;
288}
289
290static int rb_find_range(void *data, int mask, u64 head, u64 old,
291 u64 *start, u64 *end, bool backward)
292{
293 if (!backward) {
294 *start = old;
295 *end = head;
296 return 0;
297 }
298
299 return backward_rb_find_range(data, mask, head, start, end);
300}
301
302int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
303 void *to, int push(void *to, void *buf, size_t size))
304{
305 u64 head = perf_mmap__read_head(md);
306 u64 old = md->prev;
307 u64 end = head, start = old;
308 unsigned char *data = md->base + page_size;
309 unsigned long size;
310 void *buf;
311 int rc = 0;
312
313 if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
314 return -1;
315
316 if (start == end)
317 return 0;
318
319 size = end - start;
320 if (size > (unsigned long)(md->mask) + 1) {
321 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
322
323 md->prev = head;
324 perf_mmap__consume(md, overwrite || backward);
325 return 0;
326 }
327
328 if ((start & md->mask) + size != (end & md->mask)) {
329 buf = &data[start & md->mask];
330 size = md->mask + 1 - (start & md->mask);
331 start += size;
332
333 if (push(to, buf, size) < 0) {
334 rc = -1;
335 goto out;
336 }
337 }
338
339 buf = &data[start & md->mask];
340 size = end - start;
341 start += size;
342
343 if (push(to, buf, size) < 0) {
344 rc = -1;
345 goto out;
346 }
347
348 md->prev = head;
349 perf_mmap__consume(md, overwrite || backward);
350out:
351 return rc;
352}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
new file mode 100644
index 000000000000..3a5cb5a6e94a
--- /dev/null
+++ b/tools/perf/util/mmap.h
@@ -0,0 +1,97 @@
1#ifndef __PERF_MMAP_H
2#define __PERF_MMAP_H 1
3
4#include <linux/compiler.h>
5#include <linux/refcount.h>
6#include <linux/types.h>
7#include <asm/barrier.h>
8#include <stdbool.h>
9#include "auxtrace.h"
10#include "event.h"
11
12/**
13 * struct perf_mmap - perf's ring buffer mmap details
14 *
15 * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
16 */
17struct perf_mmap {
18 void *base;
19 int mask;
20 int fd;
21 refcount_t refcnt;
22 u64 prev;
23 struct auxtrace_mmap auxtrace_mmap;
24 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
25};
26
27/*
28 * State machine of bkw_mmap_state:
29 *
30 * .________________(forbid)_____________.
31 * | V
32 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
33 * ^ ^ | ^ |
34 * | |__(forbid)____/ |___(forbid)___/|
35 * | |
36 * \_________________(3)_______________/
37 *
38 * NOTREADY : Backward ring buffers are not ready
39 * RUNNING : Backward ring buffers are recording
40 * DATA_PENDING : We are required to collect data from backward ring buffers
41 * EMPTY : We have collected data from backward ring buffers.
42 *
43 * (0): Setup backward ring buffer
44 * (1): Pause ring buffers for reading
45 * (2): Read from ring buffers
46 * (3): Resume ring buffers for recording
47 */
48enum bkw_mmap_state {
49 BKW_MMAP_NOTREADY,
50 BKW_MMAP_RUNNING,
51 BKW_MMAP_DATA_PENDING,
52 BKW_MMAP_EMPTY,
53};
54
55struct mmap_params {
56 int prot, mask;
57 struct auxtrace_mmap_params auxtrace_mp;
58};
59
60int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd);
61void perf_mmap__munmap(struct perf_mmap *map);
62
63void perf_mmap__get(struct perf_mmap *map);
64void perf_mmap__put(struct perf_mmap *map);
65
66void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
67
68void perf_mmap__read_catchup(struct perf_mmap *md);
69
70static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
71{
72 struct perf_event_mmap_page *pc = mm->base;
73 u64 head = READ_ONCE(pc->data_head);
74 rmb();
75 return head;
76}
77
78static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
79{
80 struct perf_event_mmap_page *pc = md->base;
81
82 /*
83 * ensure all reads are done before we write the tail out.
84 */
85 mb();
86 pc->data_tail = tail;
87}
88
89union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
90union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
91
92int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
93 void *to, int push(void *to, void *buf, size_t size));
94
95size_t perf_mmap__mmap_len(struct perf_mmap *map);
96
97#endif /*__PERF_MMAP_H */
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index a58e91197729..5be021701f34 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -11,6 +11,7 @@
11#include "event.h" 11#include "event.h"
12#include <sys/types.h> 12#include <sys/types.h>
13#include <sys/stat.h> 13#include <sys/stat.h>
14#include <fcntl.h>
14#include <limits.h> 15#include <limits.h>
15#include <sched.h> 16#include <sched.h>
16#include <stdlib.h> 17#include <stdlib.h>
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index 05d82601c9a6..760558dcfd18 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -9,9 +9,10 @@
9#ifndef __PERF_NAMESPACES_H 9#ifndef __PERF_NAMESPACES_H
10#define __PERF_NAMESPACES_H 10#define __PERF_NAMESPACES_H
11 11
12#include "../perf.h" 12#include <sys/types.h>
13#include <linux/list.h> 13#include <linux/perf_event.h>
14#include <linux/refcount.h> 14#include <linux/refcount.h>
15#include <linux/types.h>
15 16
16struct namespaces_event; 17struct namespaces_event;
17 18
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 56694e3409ea..170316795a18 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -29,6 +29,7 @@
29#include "probe-file.h" 29#include "probe-file.h"
30#include "asm/bug.h" 30#include "asm/bug.h"
31#include "util/parse-branch-options.h" 31#include "util/parse-branch-options.h"
32#include "metricgroup.h"
32 33
33#define MAX_NAME_LEN 100 34#define MAX_NAME_LEN 100
34 35
@@ -1115,6 +1116,7 @@ do { \
1115 INIT_LIST_HEAD(&__t->list); \ 1116 INIT_LIST_HEAD(&__t->list); \
1116 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \ 1117 __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
1117 __t->val.__name = __val; \ 1118 __t->val.__name = __val; \
1119 __t->weak = term->weak; \
1118 list_add_tail(&__t->list, head_terms); \ 1120 list_add_tail(&__t->list, head_terms); \
1119} while (0) 1121} while (0)
1120 1122
@@ -1220,11 +1222,17 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
1220 struct perf_pmu_info info; 1222 struct perf_pmu_info info;
1221 struct perf_pmu *pmu; 1223 struct perf_pmu *pmu;
1222 struct perf_evsel *evsel; 1224 struct perf_evsel *evsel;
1225 struct parse_events_error *err = parse_state->error;
1223 LIST_HEAD(config_terms); 1226 LIST_HEAD(config_terms);
1224 1227
1225 pmu = perf_pmu__find(name); 1228 pmu = perf_pmu__find(name);
1226 if (!pmu) 1229 if (!pmu) {
1230 if (asprintf(&err->str,
1231 "Cannot find PMU `%s'. Missing kernel support?",
1232 name) < 0)
1233 err->str = NULL;
1227 return -EINVAL; 1234 return -EINVAL;
1235 }
1228 1236
1229 if (pmu->default_config) { 1237 if (pmu->default_config) {
1230 memcpy(&attr, pmu->default_config, 1238 memcpy(&attr, pmu->default_config,
@@ -1368,6 +1376,7 @@ struct event_modifier {
1368 int exclude_GH; 1376 int exclude_GH;
1369 int sample_read; 1377 int sample_read;
1370 int pinned; 1378 int pinned;
1379 int weak;
1371}; 1380};
1372 1381
1373static int get_event_modifier(struct event_modifier *mod, char *str, 1382static int get_event_modifier(struct event_modifier *mod, char *str,
@@ -1386,6 +1395,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
1386 1395
1387 int exclude = eu | ek | eh; 1396 int exclude = eu | ek | eh;
1388 int exclude_GH = evsel ? evsel->exclude_GH : 0; 1397 int exclude_GH = evsel ? evsel->exclude_GH : 0;
1398 int weak = 0;
1389 1399
1390 memset(mod, 0, sizeof(*mod)); 1400 memset(mod, 0, sizeof(*mod));
1391 1401
@@ -1423,6 +1433,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
1423 sample_read = 1; 1433 sample_read = 1;
1424 } else if (*str == 'D') { 1434 } else if (*str == 'D') {
1425 pinned = 1; 1435 pinned = 1;
1436 } else if (*str == 'W') {
1437 weak = 1;
1426 } else 1438 } else
1427 break; 1439 break;
1428 1440
@@ -1453,6 +1465,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
1453 mod->exclude_GH = exclude_GH; 1465 mod->exclude_GH = exclude_GH;
1454 mod->sample_read = sample_read; 1466 mod->sample_read = sample_read;
1455 mod->pinned = pinned; 1467 mod->pinned = pinned;
1468 mod->weak = weak;
1456 1469
1457 return 0; 1470 return 0;
1458} 1471}
@@ -1466,7 +1479,7 @@ static int check_modifier(char *str)
1466 char *p = str; 1479 char *p = str;
1467 1480
1468 /* The sizeof includes 0 byte as well. */ 1481 /* The sizeof includes 0 byte as well. */
1469 if (strlen(str) > (sizeof("ukhGHpppPSDI") - 1)) 1482 if (strlen(str) > (sizeof("ukhGHpppPSDIW") - 1))
1470 return -1; 1483 return -1;
1471 1484
1472 while (*p) { 1485 while (*p) {
@@ -1506,6 +1519,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
1506 evsel->exclude_GH = mod.exclude_GH; 1519 evsel->exclude_GH = mod.exclude_GH;
1507 evsel->sample_read = mod.sample_read; 1520 evsel->sample_read = mod.sample_read;
1508 evsel->precise_max = mod.precise_max; 1521 evsel->precise_max = mod.precise_max;
1522 evsel->weak_group = mod.weak;
1509 1523
1510 if (perf_evsel__is_group_leader(evsel)) 1524 if (perf_evsel__is_group_leader(evsel))
1511 evsel->attr.pinned = mod.pinned; 1525 evsel->attr.pinned = mod.pinned;
@@ -1728,8 +1742,8 @@ static int get_term_width(void)
1728 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col; 1742 return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
1729} 1743}
1730 1744
1731static void parse_events_print_error(struct parse_events_error *err, 1745void parse_events_print_error(struct parse_events_error *err,
1732 const char *event) 1746 const char *event)
1733{ 1747{
1734 const char *str = "invalid or unsupported event: "; 1748 const char *str = "invalid or unsupported event: ";
1735 char _buf[MAX_WIDTH]; 1749 char _buf[MAX_WIDTH];
@@ -1784,8 +1798,6 @@ static void parse_events_print_error(struct parse_events_error *err,
1784 zfree(&err->str); 1798 zfree(&err->str);
1785 zfree(&err->help); 1799 zfree(&err->help);
1786 } 1800 }
1787
1788 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
1789} 1801}
1790 1802
1791#undef MAX_WIDTH 1803#undef MAX_WIDTH
@@ -1797,8 +1809,10 @@ int parse_events_option(const struct option *opt, const char *str,
1797 struct parse_events_error err = { .idx = 0, }; 1809 struct parse_events_error err = { .idx = 0, };
1798 int ret = parse_events(evlist, str, &err); 1810 int ret = parse_events(evlist, str, &err);
1799 1811
1800 if (ret) 1812 if (ret) {
1801 parse_events_print_error(&err, str); 1813 parse_events_print_error(&err, str);
1814 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
1815 }
1802 1816
1803 return ret; 1817 return ret;
1804} 1818}
@@ -2376,6 +2390,8 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
2376 print_tracepoint_events(NULL, NULL, name_only); 2390 print_tracepoint_events(NULL, NULL, name_only);
2377 2391
2378 print_sdt_events(NULL, NULL, name_only); 2392 print_sdt_events(NULL, NULL, name_only);
2393
2394 metricgroup__print(true, true, NULL, name_only);
2379} 2395}
2380 2396
2381int parse_events__is_hardcoded_term(struct parse_events_term *term) 2397int parse_events__is_hardcoded_term(struct parse_events_term *term)
@@ -2395,6 +2411,7 @@ static int new_term(struct parse_events_term **_term,
2395 2411
2396 *term = *temp; 2412 *term = *temp;
2397 INIT_LIST_HEAD(&term->list); 2413 INIT_LIST_HEAD(&term->list);
2414 term->weak = false;
2398 2415
2399 switch (term->type_val) { 2416 switch (term->type_val) {
2400 case PARSE_EVENTS__TERM_TYPE_NUM: 2417 case PARSE_EVENTS__TERM_TYPE_NUM:
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index eed50b54bab3..88108cd11b4c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -101,6 +101,9 @@ struct parse_events_term {
101 /* error string indexes for within parsed string */ 101 /* error string indexes for within parsed string */
102 int err_term; 102 int err_term;
103 int err_val; 103 int err_val;
104
105 /* Coming from implicit alias */
106 bool weak;
104}; 107};
105 108
106struct parse_events_error { 109struct parse_events_error {
@@ -203,6 +206,9 @@ int is_valid_tracepoint(const char *event_string);
203int valid_event_mount(const char *eventfs); 206int valid_event_mount(const char *eventfs);
204char *parse_events_formats_error_string(char *additional_terms); 207char *parse_events_formats_error_string(char *additional_terms);
205 208
209void parse_events_print_error(struct parse_events_error *err,
210 const char *event);
211
206#ifdef HAVE_LIBELF_SUPPORT 212#ifdef HAVE_LIBELF_SUPPORT
207/* 213/*
208 * If the probe point starts with '%', 214 * If the probe point starts with '%',
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 025729510525..655ecff636a8 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -179,7 +179,7 @@ name [a-zA-Z_*?][a-zA-Z0-9_*?.]*
179name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]* 179name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
180drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)? 180drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
181/* If you add a modifier you need to update check_modifier() */ 181/* If you add a modifier you need to update check_modifier() */
182modifier_event [ukhpPGHSDI]+ 182modifier_event [ukhpPGHSDIW]+
183modifier_bp [rwx]{1,3} 183modifier_bp [rwx]{1,3}
184 184
185%% 185%%
@@ -306,6 +306,7 @@ cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COU
306alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); } 306alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
307emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); } 307emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
308dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); } 308dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
309duration_time { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
309bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); } 310bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
310 311
311 /* 312 /*
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index b10b35a63138..80fb1593913a 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -3,6 +3,7 @@
3#include <linux/compiler.h> 3#include <linux/compiler.h>
4#include <sys/types.h> 4#include <sys/types.h>
5#include <errno.h> 5#include <errno.h>
6#include <fcntl.h>
6#include <sys/stat.h> 7#include <sys/stat.h>
7#include <unistd.h> 8#include <unistd.h>
8#include <stdio.h> 9#include <stdio.h>
@@ -404,6 +405,11 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
404 parse_events_terms__purge(&list); 405 parse_events_terms__purge(&list);
405 return ret; 406 return ret;
406 } 407 }
408 /*
409 * Weak terms don't override command line options,
410 * which we don't want for implicit terms in aliases.
411 */
412 cloned->weak = true;
407 list_add_tail(&cloned->list, &list); 413 list_add_tail(&cloned->list, &list);
408 } 414 }
409 list_splice(&list, terms); 415 list_splice(&list, terms);
@@ -541,16 +547,8 @@ char * __weak get_cpuid_str(void)
541 return NULL; 547 return NULL;
542} 548}
543 549
544/* 550static char *perf_pmu__getcpuid(void)
545 * From the pmu_events_map, find the table of PMU events that corresponds
546 * to the current running CPU. Then, add all PMU events from that table
547 * as aliases.
548 */
549static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
550{ 551{
551 int i;
552 struct pmu_events_map *map;
553 struct pmu_event *pe;
554 char *cpuid; 552 char *cpuid;
555 static bool printed; 553 static bool printed;
556 554
@@ -560,22 +558,50 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
560 if (!cpuid) 558 if (!cpuid)
561 cpuid = get_cpuid_str(); 559 cpuid = get_cpuid_str();
562 if (!cpuid) 560 if (!cpuid)
563 return; 561 return NULL;
564 562
565 if (!printed) { 563 if (!printed) {
566 pr_debug("Using CPUID %s\n", cpuid); 564 pr_debug("Using CPUID %s\n", cpuid);
567 printed = true; 565 printed = true;
568 } 566 }
567 return cpuid;
568}
569
570struct pmu_events_map *perf_pmu__find_map(void)
571{
572 struct pmu_events_map *map;
573 char *cpuid = perf_pmu__getcpuid();
574 int i;
569 575
570 i = 0; 576 i = 0;
571 while (1) { 577 for (;;) {
572 map = &pmu_events_map[i++]; 578 map = &pmu_events_map[i++];
573 if (!map->table) 579 if (!map->table) {
574 goto out; 580 map = NULL;
581 break;
582 }
575 583
576 if (!strcmp(map->cpuid, cpuid)) 584 if (!strcmp(map->cpuid, cpuid))
577 break; 585 break;
578 } 586 }
587 free(cpuid);
588 return map;
589}
590
591/*
592 * From the pmu_events_map, find the table of PMU events that corresponds
593 * to the current running CPU. Then, add all PMU events from that table
594 * as aliases.
595 */
596static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
597{
598 int i;
599 struct pmu_events_map *map;
600 struct pmu_event *pe;
601
602 map = perf_pmu__find_map();
603 if (!map)
604 return;
579 605
580 /* 606 /*
581 * Found a matching PMU events table. Create aliases 607 * Found a matching PMU events table. Create aliases
@@ -585,8 +611,11 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
585 const char *pname; 611 const char *pname;
586 612
587 pe = &map->table[i++]; 613 pe = &map->table[i++];
588 if (!pe->name) 614 if (!pe->name) {
615 if (pe->metric_group || pe->metric_name)
616 continue;
589 break; 617 break;
618 }
590 619
591 pname = pe->pmu ? pe->pmu : "cpu"; 620 pname = pe->pmu ? pe->pmu : "cpu";
592 if (strncmp(pname, name, strlen(pname))) 621 if (strncmp(pname, name, strlen(pname)))
@@ -600,9 +629,6 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
600 (char *)pe->metric_expr, 629 (char *)pe->metric_expr,
601 (char *)pe->metric_name); 630 (char *)pe->metric_name);
602 } 631 }
603
604out:
605 free(cpuid);
606} 632}
607 633
608struct perf_event_attr * __weak 634struct perf_event_attr * __weak
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index eca99435f4a0..27c75e635866 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -92,4 +92,6 @@ int perf_pmu__test(void);
92 92
93struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu); 93struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
94 94
95struct pmu_events_map *perf_pmu__find_map(void);
96
95#endif /* __PMU_H */ 97#endif /* __PMU_H */
diff --git a/tools/perf/util/print_binary.c b/tools/perf/util/print_binary.c
index 779e35c9e566..23e367063446 100644
--- a/tools/perf/util/print_binary.c
+++ b/tools/perf/util/print_binary.c
@@ -3,40 +3,42 @@
3#include <linux/log2.h> 3#include <linux/log2.h>
4#include "sane_ctype.h" 4#include "sane_ctype.h"
5 5
6void print_binary(unsigned char *data, size_t len, 6int binary__fprintf(unsigned char *data, size_t len,
7 size_t bytes_per_line, print_binary_t printer, 7 size_t bytes_per_line, binary__fprintf_t printer,
8 void *extra) 8 void *extra, FILE *fp)
9{ 9{
10 size_t i, j, mask; 10 size_t i, j, mask;
11 int printed = 0;
11 12
12 if (!printer) 13 if (!printer)
13 return; 14 return 0;
14 15
15 bytes_per_line = roundup_pow_of_two(bytes_per_line); 16 bytes_per_line = roundup_pow_of_two(bytes_per_line);
16 mask = bytes_per_line - 1; 17 mask = bytes_per_line - 1;
17 18
18 printer(BINARY_PRINT_DATA_BEGIN, 0, extra); 19 printed += printer(BINARY_PRINT_DATA_BEGIN, 0, extra, fp);
19 for (i = 0; i < len; i++) { 20 for (i = 0; i < len; i++) {
20 if ((i & mask) == 0) { 21 if ((i & mask) == 0) {
21 printer(BINARY_PRINT_LINE_BEGIN, -1, extra); 22 printed += printer(BINARY_PRINT_LINE_BEGIN, -1, extra, fp);
22 printer(BINARY_PRINT_ADDR, i, extra); 23 printed += printer(BINARY_PRINT_ADDR, i, extra, fp);
23 } 24 }
24 25
25 printer(BINARY_PRINT_NUM_DATA, data[i], extra); 26 printed += printer(BINARY_PRINT_NUM_DATA, data[i], extra, fp);
26 27
27 if (((i & mask) == mask) || i == len - 1) { 28 if (((i & mask) == mask) || i == len - 1) {
28 for (j = 0; j < mask-(i & mask); j++) 29 for (j = 0; j < mask-(i & mask); j++)
29 printer(BINARY_PRINT_NUM_PAD, -1, extra); 30 printed += printer(BINARY_PRINT_NUM_PAD, -1, extra, fp);
30 31
31 printer(BINARY_PRINT_SEP, i, extra); 32 printer(BINARY_PRINT_SEP, i, extra, fp);
32 for (j = i & ~mask; j <= i; j++) 33 for (j = i & ~mask; j <= i; j++)
33 printer(BINARY_PRINT_CHAR_DATA, data[j], extra); 34 printed += printer(BINARY_PRINT_CHAR_DATA, data[j], extra, fp);
34 for (j = 0; j < mask-(i & mask); j++) 35 for (j = 0; j < mask-(i & mask); j++)
35 printer(BINARY_PRINT_CHAR_PAD, i, extra); 36 printed += printer(BINARY_PRINT_CHAR_PAD, i, extra, fp);
36 printer(BINARY_PRINT_LINE_END, -1, extra); 37 printed += printer(BINARY_PRINT_LINE_END, -1, extra, fp);
37 } 38 }
38 } 39 }
39 printer(BINARY_PRINT_DATA_END, -1, extra); 40 printed += printer(BINARY_PRINT_DATA_END, -1, extra, fp);
41 return printed;
40} 42}
41 43
42int is_printable_array(char *p, unsigned int len) 44int is_printable_array(char *p, unsigned int len)
diff --git a/tools/perf/util/print_binary.h b/tools/perf/util/print_binary.h
index 2be3075e2b05..2a1554afc957 100644
--- a/tools/perf/util/print_binary.h
+++ b/tools/perf/util/print_binary.h
@@ -3,6 +3,7 @@
3#define PERF_PRINT_BINARY_H 3#define PERF_PRINT_BINARY_H
4 4
5#include <stddef.h> 5#include <stddef.h>
6#include <stdio.h>
6 7
7enum binary_printer_ops { 8enum binary_printer_ops {
8 BINARY_PRINT_DATA_BEGIN, 9 BINARY_PRINT_DATA_BEGIN,
@@ -17,12 +18,19 @@ enum binary_printer_ops {
17 BINARY_PRINT_DATA_END, 18 BINARY_PRINT_DATA_END,
18}; 19};
19 20
20typedef void (*print_binary_t)(enum binary_printer_ops op, 21typedef int (*binary__fprintf_t)(enum binary_printer_ops op,
21 unsigned int val, void *extra); 22 unsigned int val, void *extra, FILE *fp);
22 23
23void print_binary(unsigned char *data, size_t len, 24int binary__fprintf(unsigned char *data, size_t len,
24 size_t bytes_per_line, print_binary_t printer, 25 size_t bytes_per_line, binary__fprintf_t printer,
25 void *extra); 26 void *extra, FILE *fp);
27
28static inline void print_binary(unsigned char *data, size_t len,
29 size_t bytes_per_line, binary__fprintf_t printer,
30 void *extra)
31{
32 binary__fprintf(data, len, bytes_per_line, printer, extra, stdout);
33}
26 34
27int is_printable_array(char *p, unsigned int len); 35int is_printable_array(char *p, unsigned int len);
28 36
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index cdf8d83a484c..4ae1123c6794 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -15,6 +15,7 @@
15 * 15 *
16 */ 16 */
17#include <errno.h> 17#include <errno.h>
18#include <fcntl.h>
18#include <sys/stat.h> 19#include <sys/stat.h>
19#include <sys/types.h> 20#include <sys/types.h>
20#include <sys/uio.h> 21#include <sys/uio.h>
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index e66dc495809a..b4f2f06722a7 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,7 @@ util/ctype.c
10util/evlist.c 10util/evlist.c
11util/evsel.c 11util/evsel.c
12util/cpumap.c 12util/cpumap.c
13util/mmap.c
13util/namespaces.c 14util/namespaces.c
14../lib/bitmap.c 15../lib/bitmap.c
15../lib/find_bit.c 16../lib/find_bit.c
diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h
index 7d8972b33f6b..a920f702a74d 100644
--- a/tools/perf/util/rb_resort.h
+++ b/tools/perf/util/rb_resort.h
@@ -144,7 +144,8 @@ struct __name##_sorted *__name = __name##_sorted__new
144 __ilist->rblist.nr_entries) 144 __ilist->rblist.nr_entries)
145 145
146/* For 'struct machine->threads' */ 146/* For 'struct machine->threads' */
147#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine) \ 147#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
148 DECLARE_RESORT_RB(__name)(&__machine->threads, __machine->nr_threads) 148 DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries, \
149 __machine->threads[hash_bucket].nr)
149 150
150#endif /* _PERF_RESORT_RB_H_ */ 151#endif /* _PERF_RESORT_RB_H_ */
diff --git a/tools/perf/util/rwsem.c b/tools/perf/util/rwsem.c
new file mode 100644
index 000000000000..5e52e7baa7b6
--- /dev/null
+++ b/tools/perf/util/rwsem.c
@@ -0,0 +1,32 @@
1#include "util.h"
2#include "rwsem.h"
3
4int init_rwsem(struct rw_semaphore *sem)
5{
6 return pthread_rwlock_init(&sem->lock, NULL);
7}
8
9int exit_rwsem(struct rw_semaphore *sem)
10{
11 return pthread_rwlock_destroy(&sem->lock);
12}
13
14int down_read(struct rw_semaphore *sem)
15{
16 return perf_singlethreaded ? 0 : pthread_rwlock_rdlock(&sem->lock);
17}
18
19int up_read(struct rw_semaphore *sem)
20{
21 return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
22}
23
24int down_write(struct rw_semaphore *sem)
25{
26 return perf_singlethreaded ? 0 : pthread_rwlock_wrlock(&sem->lock);
27}
28
29int up_write(struct rw_semaphore *sem)
30{
31 return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
32}
diff --git a/tools/perf/util/rwsem.h b/tools/perf/util/rwsem.h
new file mode 100644
index 000000000000..94565ad4d494
--- /dev/null
+++ b/tools/perf/util/rwsem.h
@@ -0,0 +1,19 @@
1#ifndef _PERF_RWSEM_H
2#define _PERF_RWSEM_H
3
4#include <pthread.h>
5
6struct rw_semaphore {
7 pthread_rwlock_t lock;
8};
9
10int init_rwsem(struct rw_semaphore *sem);
11int exit_rwsem(struct rw_semaphore *sem);
12
13int down_read(struct rw_semaphore *sem);
14int up_read(struct rw_semaphore *sem);
15
16int down_write(struct rw_semaphore *sem);
17int up_write(struct rw_semaphore *sem);
18
19#endif /* _PERF_RWSEM_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index da55081aefc6..5c412310f266 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -33,14 +33,14 @@ static int perf_session__deliver_event(struct perf_session *session,
33 33
34static int perf_session__open(struct perf_session *session) 34static int perf_session__open(struct perf_session *session)
35{ 35{
36 struct perf_data_file *file = session->file; 36 struct perf_data *data = session->data;
37 37
38 if (perf_session__read_header(session) < 0) { 38 if (perf_session__read_header(session) < 0) {
39 pr_err("incompatible file format (rerun with -v to learn more)\n"); 39 pr_err("incompatible file format (rerun with -v to learn more)\n");
40 return -1; 40 return -1;
41 } 41 }
42 42
43 if (perf_data_file__is_pipe(file)) 43 if (perf_data__is_pipe(data))
44 return 0; 44 return 0;
45 45
46 if (perf_header__has_feat(&session->header, HEADER_STAT)) 46 if (perf_header__has_feat(&session->header, HEADER_STAT))
@@ -121,7 +121,7 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
121 session->tool, event->file_offset); 121 session->tool, event->file_offset);
122} 122}
123 123
124struct perf_session *perf_session__new(struct perf_data_file *file, 124struct perf_session *perf_session__new(struct perf_data *data,
125 bool repipe, struct perf_tool *tool) 125 bool repipe, struct perf_tool *tool)
126{ 126{
127 struct perf_session *session = zalloc(sizeof(*session)); 127 struct perf_session *session = zalloc(sizeof(*session));
@@ -135,13 +135,13 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
135 machines__init(&session->machines); 135 machines__init(&session->machines);
136 ordered_events__init(&session->ordered_events, ordered_events__deliver_event); 136 ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
137 137
138 if (file) { 138 if (data) {
139 if (perf_data_file__open(file)) 139 if (perf_data__open(data))
140 goto out_delete; 140 goto out_delete;
141 141
142 session->file = file; 142 session->data = data;
143 143
144 if (perf_data_file__is_read(file)) { 144 if (perf_data__is_read(data)) {
145 if (perf_session__open(session) < 0) 145 if (perf_session__open(session) < 0)
146 goto out_close; 146 goto out_close;
147 147
@@ -149,7 +149,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
149 * set session attributes that are present in perf.data 149 * set session attributes that are present in perf.data
150 * but not in pipe-mode. 150 * but not in pipe-mode.
151 */ 151 */
152 if (!file->is_pipe) { 152 if (!data->is_pipe) {
153 perf_session__set_id_hdr_size(session); 153 perf_session__set_id_hdr_size(session);
154 perf_session__set_comm_exec(session); 154 perf_session__set_comm_exec(session);
155 } 155 }
@@ -158,7 +158,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
158 session->machines.host.env = &perf_env; 158 session->machines.host.env = &perf_env;
159 } 159 }
160 160
161 if (!file || perf_data_file__is_write(file)) { 161 if (!data || perf_data__is_write(data)) {
162 /* 162 /*
163 * In O_RDONLY mode this will be performed when reading the 163 * In O_RDONLY mode this will be performed when reading the
164 * kernel MMAP event, in perf_event__process_mmap(). 164 * kernel MMAP event, in perf_event__process_mmap().
@@ -171,7 +171,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
171 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is 171 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
172 * processed, so perf_evlist__sample_id_all is not meaningful here. 172 * processed, so perf_evlist__sample_id_all is not meaningful here.
173 */ 173 */
174 if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps && 174 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
175 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 175 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
176 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 176 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
177 tool->ordered_events = false; 177 tool->ordered_events = false;
@@ -180,7 +180,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
180 return session; 180 return session;
181 181
182 out_close: 182 out_close:
183 perf_data_file__close(file); 183 perf_data__close(data);
184 out_delete: 184 out_delete:
185 perf_session__delete(session); 185 perf_session__delete(session);
186 out: 186 out:
@@ -202,8 +202,8 @@ void perf_session__delete(struct perf_session *session)
202 perf_session__delete_threads(session); 202 perf_session__delete_threads(session);
203 perf_env__exit(&session->header.env); 203 perf_env__exit(&session->header.env);
204 machines__exit(&session->machines); 204 machines__exit(&session->machines);
205 if (session->file) 205 if (session->data)
206 perf_data_file__close(session->file); 206 perf_data__close(session->data);
207 free(session); 207 free(session);
208} 208}
209 209
@@ -291,8 +291,8 @@ static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
291 __maybe_unused) 291 __maybe_unused)
292{ 292{
293 dump_printf(": unhandled!\n"); 293 dump_printf(": unhandled!\n");
294 if (perf_data_file__is_pipe(session->file)) 294 if (perf_data__is_pipe(session->data))
295 skipn(perf_data_file__fd(session->file), event->auxtrace.size); 295 skipn(perf_data__fd(session->data), event->auxtrace.size);
296 return event->auxtrace.size; 296 return event->auxtrace.size;
297} 297}
298 298
@@ -1350,7 +1350,7 @@ static s64 perf_session__process_user_event(struct perf_session *session,
1350{ 1350{
1351 struct ordered_events *oe = &session->ordered_events; 1351 struct ordered_events *oe = &session->ordered_events;
1352 struct perf_tool *tool = session->tool; 1352 struct perf_tool *tool = session->tool;
1353 int fd = perf_data_file__fd(session->file); 1353 int fd = perf_data__fd(session->data);
1354 int err; 1354 int err;
1355 1355
1356 dump_event(session->evlist, event, file_offset, NULL); 1356 dump_event(session->evlist, event, file_offset, NULL);
@@ -1450,10 +1450,10 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1450 goto out_parse_sample; 1450 goto out_parse_sample;
1451 } 1451 }
1452 1452
1453 if (perf_data_file__is_pipe(session->file)) 1453 if (perf_data__is_pipe(session->data))
1454 return -1; 1454 return -1;
1455 1455
1456 fd = perf_data_file__fd(session->file); 1456 fd = perf_data__fd(session->data);
1457 hdr_sz = sizeof(struct perf_event_header); 1457 hdr_sz = sizeof(struct perf_event_header);
1458 1458
1459 if (buf_sz < hdr_sz) 1459 if (buf_sz < hdr_sz)
@@ -1688,7 +1688,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session)
1688{ 1688{
1689 struct ordered_events *oe = &session->ordered_events; 1689 struct ordered_events *oe = &session->ordered_events;
1690 struct perf_tool *tool = session->tool; 1690 struct perf_tool *tool = session->tool;
1691 int fd = perf_data_file__fd(session->file); 1691 int fd = perf_data__fd(session->data);
1692 union perf_event *event; 1692 union perf_event *event;
1693 uint32_t size, cur_size = 0; 1693 uint32_t size, cur_size = 0;
1694 void *buf = NULL; 1694 void *buf = NULL;
@@ -1829,7 +1829,7 @@ static int __perf_session__process_events(struct perf_session *session,
1829{ 1829{
1830 struct ordered_events *oe = &session->ordered_events; 1830 struct ordered_events *oe = &session->ordered_events;
1831 struct perf_tool *tool = session->tool; 1831 struct perf_tool *tool = session->tool;
1832 int fd = perf_data_file__fd(session->file); 1832 int fd = perf_data__fd(session->data);
1833 u64 head, page_offset, file_offset, file_pos, size; 1833 u64 head, page_offset, file_offset, file_pos, size;
1834 int err, mmap_prot, mmap_flags, map_idx = 0; 1834 int err, mmap_prot, mmap_flags, map_idx = 0;
1835 size_t mmap_size; 1835 size_t mmap_size;
@@ -1850,7 +1850,7 @@ static int __perf_session__process_events(struct perf_session *session,
1850 if (data_offset + data_size < file_size) 1850 if (data_offset + data_size < file_size)
1851 file_size = data_offset + data_size; 1851 file_size = data_offset + data_size;
1852 1852
1853 ui_progress__init(&prog, file_size, "Processing events..."); 1853 ui_progress__init_size(&prog, file_size, "Processing events...");
1854 1854
1855 mmap_size = MMAP_SIZE; 1855 mmap_size = MMAP_SIZE;
1856 if (mmap_size > file_size) { 1856 if (mmap_size > file_size) {
@@ -1946,13 +1946,13 @@ out_err:
1946 1946
1947int perf_session__process_events(struct perf_session *session) 1947int perf_session__process_events(struct perf_session *session)
1948{ 1948{
1949 u64 size = perf_data_file__size(session->file); 1949 u64 size = perf_data__size(session->data);
1950 int err; 1950 int err;
1951 1951
1952 if (perf_session__register_idle_thread(session) < 0) 1952 if (perf_session__register_idle_thread(session) < 0)
1953 return -ENOMEM; 1953 return -ENOMEM;
1954 1954
1955 if (!perf_data_file__is_pipe(session->file)) 1955 if (!perf_data__is_pipe(session->data))
1956 err = __perf_session__process_events(session, 1956 err = __perf_session__process_events(session,
1957 session->header.data_offset, 1957 session->header.data_offset,
1958 session->header.data_size, size); 1958 session->header.data_size, size);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 41caa098ed15..da1434a7c120 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -33,13 +33,13 @@ struct perf_session {
33 void *one_mmap_addr; 33 void *one_mmap_addr;
34 u64 one_mmap_offset; 34 u64 one_mmap_offset;
35 struct ordered_events ordered_events; 35 struct ordered_events ordered_events;
36 struct perf_data_file *file; 36 struct perf_data *data;
37 struct perf_tool *tool; 37 struct perf_tool *tool;
38}; 38};
39 39
40struct perf_tool; 40struct perf_tool;
41 41
42struct perf_session *perf_session__new(struct perf_data_file *file, 42struct perf_session *perf_session__new(struct perf_data *data,
43 bool repipe, struct perf_tool *tool); 43 bool repipe, struct perf_tool *tool);
44void perf_session__delete(struct perf_session *session); 44void perf_session__delete(struct perf_session *session);
45 45
@@ -114,7 +114,7 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
114 114
115extern volatile int session_done; 115extern volatile int session_done;
116 116
117#define session_done() ACCESS_ONCE(session_done) 117#define session_done() READ_ONCE(session_done)
118 118
119int perf_session__deliver_synth_event(struct perf_session *session, 119int perf_session__deliver_synth_event(struct perf_session *session,
120 union perf_event *event, 120 union perf_event *event,
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 84a33f1e9ec9..a00eacdf02ed 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -226,6 +226,9 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
226 if (sym_l == sym_r) 226 if (sym_l == sym_r)
227 return 0; 227 return 0;
228 228
229 if (sym_l->inlined || sym_r->inlined)
230 return strcmp(sym_l->name, sym_r->name);
231
229 if (sym_l->start != sym_r->start) 232 if (sym_l->start != sym_r->start)
230 return (int64_t)(sym_r->start - sym_l->start); 233 return (int64_t)(sym_r->start - sym_l->start);
231 234
@@ -284,6 +287,9 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
284 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 287 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
285 width - ret, 288 width - ret,
286 sym->name); 289 sym->name);
290 if (sym->inlined)
291 ret += repsep_snprintf(bf + ret, size - ret,
292 " (inlined)");
287 } 293 }
288 } else { 294 } else {
289 size_t len = BITS_PER_LONG / 4; 295 size_t len = BITS_PER_LONG / 4;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index b2b55e5149a7..f5901c10a563 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -130,7 +130,6 @@ struct hist_entry {
130 }; 130 };
131 char *srcline; 131 char *srcline;
132 char *srcfile; 132 char *srcfile;
133 struct inline_node *inline_node;
134 struct symbol *parent; 133 struct symbol *parent;
135 struct branch_info *branch_info; 134 struct branch_info *branch_info;
136 struct hists *hists; 135 struct hists *hists;
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 4105682afc7a..d19f05c56de6 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -11,7 +11,7 @@
11#include "util/debug.h" 11#include "util/debug.h"
12#include "util/callchain.h" 12#include "util/callchain.h"
13#include "srcline.h" 13#include "srcline.h"
14 14#include "string2.h"
15#include "symbol.h" 15#include "symbol.h"
16 16
17bool srcline_full_filename; 17bool srcline_full_filename;
@@ -34,28 +34,17 @@ static const char *dso__name(struct dso *dso)
34 return dso_name; 34 return dso_name;
35} 35}
36 36
37static int inline_list__append(char *filename, char *funcname, int line_nr, 37static int inline_list__append(struct symbol *symbol, char *srcline,
38 struct inline_node *node, struct dso *dso) 38 struct inline_node *node)
39{ 39{
40 struct inline_list *ilist; 40 struct inline_list *ilist;
41 char *demangled;
42 41
43 ilist = zalloc(sizeof(*ilist)); 42 ilist = zalloc(sizeof(*ilist));
44 if (ilist == NULL) 43 if (ilist == NULL)
45 return -1; 44 return -1;
46 45
47 ilist->filename = filename; 46 ilist->symbol = symbol;
48 ilist->line_nr = line_nr; 47 ilist->srcline = srcline;
49
50 if (dso != NULL) {
51 demangled = dso__demangle_sym(dso, 0, funcname);
52 if (demangled == NULL) {
53 ilist->funcname = funcname;
54 } else {
55 ilist->funcname = demangled;
56 free(funcname);
57 }
58 }
59 48
60 if (callchain_param.order == ORDER_CALLEE) 49 if (callchain_param.order == ORDER_CALLEE)
61 list_add_tail(&ilist->list, &node->val); 50 list_add_tail(&ilist->list, &node->val);
@@ -65,6 +54,65 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
65 return 0; 54 return 0;
66} 55}
67 56
57/* basename version that takes a const input string */
58static const char *gnu_basename(const char *path)
59{
60 const char *base = strrchr(path, '/');
61
62 return base ? base + 1 : path;
63}
64
65static char *srcline_from_fileline(const char *file, unsigned int line)
66{
67 char *srcline;
68
69 if (!file)
70 return NULL;
71
72 if (!srcline_full_filename)
73 file = gnu_basename(file);
74
75 if (asprintf(&srcline, "%s:%u", file, line) < 0)
76 return NULL;
77
78 return srcline;
79}
80
81static struct symbol *new_inline_sym(struct dso *dso,
82 struct symbol *base_sym,
83 const char *funcname)
84{
85 struct symbol *inline_sym;
86 char *demangled = NULL;
87
88 if (dso) {
89 demangled = dso__demangle_sym(dso, 0, funcname);
90 if (demangled)
91 funcname = demangled;
92 }
93
94 if (base_sym && strcmp(funcname, base_sym->name) == 0) {
95 /* reuse the real, existing symbol */
96 inline_sym = base_sym;
97 /* ensure that we don't alias an inlined symbol, which could
98 * lead to double frees in inline_node__delete
99 */
100 assert(!base_sym->inlined);
101 } else {
102 /* create a fake symbol for the inline frame */
103 inline_sym = symbol__new(base_sym ? base_sym->start : 0,
104 base_sym ? base_sym->end : 0,
105 base_sym ? base_sym->binding : 0,
106 funcname);
107 if (inline_sym)
108 inline_sym->inlined = 1;
109 }
110
111 free(demangled);
112
113 return inline_sym;
114}
115
68#ifdef HAVE_LIBBFD_SUPPORT 116#ifdef HAVE_LIBBFD_SUPPORT
69 117
70/* 118/*
@@ -208,18 +256,23 @@ static void addr2line_cleanup(struct a2l_data *a2l)
208#define MAX_INLINE_NEST 1024 256#define MAX_INLINE_NEST 1024
209 257
210static int inline_list__append_dso_a2l(struct dso *dso, 258static int inline_list__append_dso_a2l(struct dso *dso,
211 struct inline_node *node) 259 struct inline_node *node,
260 struct symbol *sym)
212{ 261{
213 struct a2l_data *a2l = dso->a2l; 262 struct a2l_data *a2l = dso->a2l;
214 char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL; 263 struct symbol *inline_sym = new_inline_sym(dso, sym, a2l->funcname);
215 char *filename = a2l->filename ? strdup(a2l->filename) : NULL; 264 char *srcline = NULL;
216 265
217 return inline_list__append(filename, funcname, a2l->line, node, dso); 266 if (a2l->filename)
267 srcline = srcline_from_fileline(a2l->filename, a2l->line);
268
269 return inline_list__append(inline_sym, srcline, node);
218} 270}
219 271
220static int addr2line(const char *dso_name, u64 addr, 272static int addr2line(const char *dso_name, u64 addr,
221 char **file, unsigned int *line, struct dso *dso, 273 char **file, unsigned int *line, struct dso *dso,
222 bool unwind_inlines, struct inline_node *node) 274 bool unwind_inlines, struct inline_node *node,
275 struct symbol *sym)
223{ 276{
224 int ret = 0; 277 int ret = 0;
225 struct a2l_data *a2l = dso->a2l; 278 struct a2l_data *a2l = dso->a2l;
@@ -245,7 +298,7 @@ static int addr2line(const char *dso_name, u64 addr,
245 if (unwind_inlines) { 298 if (unwind_inlines) {
246 int cnt = 0; 299 int cnt = 0;
247 300
248 if (node && inline_list__append_dso_a2l(dso, node)) 301 if (node && inline_list__append_dso_a2l(dso, node, sym))
249 return 0; 302 return 0;
250 303
251 while (bfd_find_inliner_info(a2l->abfd, &a2l->filename, 304 while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
@@ -256,7 +309,7 @@ static int addr2line(const char *dso_name, u64 addr,
256 a2l->filename = NULL; 309 a2l->filename = NULL;
257 310
258 if (node != NULL) { 311 if (node != NULL) {
259 if (inline_list__append_dso_a2l(dso, node)) 312 if (inline_list__append_dso_a2l(dso, node, sym))
260 return 0; 313 return 0;
261 // found at least one inline frame 314 // found at least one inline frame
262 ret = 1; 315 ret = 1;
@@ -288,7 +341,7 @@ void dso__free_a2l(struct dso *dso)
288} 341}
289 342
290static struct inline_node *addr2inlines(const char *dso_name, u64 addr, 343static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
291 struct dso *dso) 344 struct dso *dso, struct symbol *sym)
292{ 345{
293 struct inline_node *node; 346 struct inline_node *node;
294 347
@@ -301,17 +354,8 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
301 INIT_LIST_HEAD(&node->val); 354 INIT_LIST_HEAD(&node->val);
302 node->addr = addr; 355 node->addr = addr;
303 356
304 if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node)) 357 addr2line(dso_name, addr, NULL, NULL, dso, true, node, sym);
305 goto out_free_inline_node;
306
307 if (list_empty(&node->val))
308 goto out_free_inline_node;
309
310 return node; 358 return node;
311
312out_free_inline_node:
313 inline_node__delete(node);
314 return NULL;
315} 359}
316 360
317#else /* HAVE_LIBBFD_SUPPORT */ 361#else /* HAVE_LIBBFD_SUPPORT */
@@ -341,7 +385,8 @@ static int addr2line(const char *dso_name, u64 addr,
341 char **file, unsigned int *line_nr, 385 char **file, unsigned int *line_nr,
342 struct dso *dso __maybe_unused, 386 struct dso *dso __maybe_unused,
343 bool unwind_inlines __maybe_unused, 387 bool unwind_inlines __maybe_unused,
344 struct inline_node *node __maybe_unused) 388 struct inline_node *node __maybe_unused,
389 struct symbol *sym __maybe_unused)
345{ 390{
346 FILE *fp; 391 FILE *fp;
347 char cmd[PATH_MAX]; 392 char cmd[PATH_MAX];
@@ -381,16 +426,18 @@ void dso__free_a2l(struct dso *dso __maybe_unused)
381} 426}
382 427
383static struct inline_node *addr2inlines(const char *dso_name, u64 addr, 428static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
384 struct dso *dso __maybe_unused) 429 struct dso *dso __maybe_unused,
430 struct symbol *sym)
385{ 431{
386 FILE *fp; 432 FILE *fp;
387 char cmd[PATH_MAX]; 433 char cmd[PATH_MAX];
388 struct inline_node *node; 434 struct inline_node *node;
389 char *filename = NULL; 435 char *filename = NULL;
390 size_t len; 436 char *funcname = NULL;
437 size_t filelen, funclen;
391 unsigned int line_nr = 0; 438 unsigned int line_nr = 0;
392 439
393 scnprintf(cmd, sizeof(cmd), "addr2line -e %s -i %016"PRIx64, 440 scnprintf(cmd, sizeof(cmd), "addr2line -e %s -i -f %016"PRIx64,
394 dso_name, addr); 441 dso_name, addr);
395 442
396 fp = popen(cmd, "r"); 443 fp = popen(cmd, "r");
@@ -408,26 +455,34 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
408 INIT_LIST_HEAD(&node->val); 455 INIT_LIST_HEAD(&node->val);
409 node->addr = addr; 456 node->addr = addr;
410 457
411 while (getline(&filename, &len, fp) != -1) { 458 /* addr2line -f generates two lines for each inlined functions */
412 if (filename_split(filename, &line_nr) != 1) { 459 while (getline(&funcname, &funclen, fp) != -1) {
413 free(filename); 460 char *srcline;
461 struct symbol *inline_sym;
462
463 rtrim(funcname);
464
465 if (getline(&filename, &filelen, fp) == -1)
414 goto out; 466 goto out;
415 }
416 467
417 if (inline_list__append(filename, NULL, line_nr, node, 468 if (filename_split(filename, &line_nr) != 1)
418 NULL) != 0)
419 goto out; 469 goto out;
420 470
421 filename = NULL; 471 srcline = srcline_from_fileline(filename, line_nr);
472 inline_sym = new_inline_sym(dso, sym, funcname);
473
474 if (inline_list__append(inline_sym, srcline, node) != 0) {
475 free(srcline);
476 if (inline_sym && inline_sym->inlined)
477 symbol__delete(inline_sym);
478 goto out;
479 }
422 } 480 }
423 481
424out: 482out:
425 pclose(fp); 483 pclose(fp);
426 484 free(filename);
427 if (list_empty(&node->val)) { 485 free(funcname);
428 inline_node__delete(node);
429 return NULL;
430 }
431 486
432 return node; 487 return node;
433} 488}
@@ -455,19 +510,18 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
455 if (dso_name == NULL) 510 if (dso_name == NULL)
456 goto out; 511 goto out;
457 512
458 if (!addr2line(dso_name, addr, &file, &line, dso, unwind_inlines, NULL)) 513 if (!addr2line(dso_name, addr, &file, &line, dso,
514 unwind_inlines, NULL, sym))
459 goto out; 515 goto out;
460 516
461 if (asprintf(&srcline, "%s:%u", 517 srcline = srcline_from_fileline(file, line);
462 srcline_full_filename ? file : basename(file), 518 free(file);
463 line) < 0) { 519
464 free(file); 520 if (!srcline)
465 goto out; 521 goto out;
466 }
467 522
468 dso->a2l_fails = 0; 523 dso->a2l_fails = 0;
469 524
470 free(file);
471 return srcline; 525 return srcline;
472 526
473out: 527out:
@@ -501,7 +555,74 @@ char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
501 return __get_srcline(dso, addr, sym, show_sym, show_addr, false); 555 return __get_srcline(dso, addr, sym, show_sym, show_addr, false);
502} 556}
503 557
504struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr) 558struct srcline_node {
559 u64 addr;
560 char *srcline;
561 struct rb_node rb_node;
562};
563
564void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
565{
566 struct rb_node **p = &tree->rb_node;
567 struct rb_node *parent = NULL;
568 struct srcline_node *i, *node;
569
570 node = zalloc(sizeof(struct srcline_node));
571 if (!node) {
572 perror("not enough memory for the srcline node");
573 return;
574 }
575
576 node->addr = addr;
577 node->srcline = srcline;
578
579 while (*p != NULL) {
580 parent = *p;
581 i = rb_entry(parent, struct srcline_node, rb_node);
582 if (addr < i->addr)
583 p = &(*p)->rb_left;
584 else
585 p = &(*p)->rb_right;
586 }
587 rb_link_node(&node->rb_node, parent, p);
588 rb_insert_color(&node->rb_node, tree);
589}
590
591char *srcline__tree_find(struct rb_root *tree, u64 addr)
592{
593 struct rb_node *n = tree->rb_node;
594
595 while (n) {
596 struct srcline_node *i = rb_entry(n, struct srcline_node,
597 rb_node);
598
599 if (addr < i->addr)
600 n = n->rb_left;
601 else if (addr > i->addr)
602 n = n->rb_right;
603 else
604 return i->srcline;
605 }
606
607 return NULL;
608}
609
610void srcline__tree_delete(struct rb_root *tree)
611{
612 struct srcline_node *pos;
613 struct rb_node *next = rb_first(tree);
614
615 while (next) {
616 pos = rb_entry(next, struct srcline_node, rb_node);
617 next = rb_next(&pos->rb_node);
618 rb_erase(&pos->rb_node, tree);
619 free_srcline(pos->srcline);
620 zfree(&pos);
621 }
622}
623
624struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
625 struct symbol *sym)
505{ 626{
506 const char *dso_name; 627 const char *dso_name;
507 628
@@ -509,7 +630,7 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr)
509 if (dso_name == NULL) 630 if (dso_name == NULL)
510 return NULL; 631 return NULL;
511 632
512 return addr2inlines(dso_name, addr, dso); 633 return addr2inlines(dso_name, addr, dso, sym);
513} 634}
514 635
515void inline_node__delete(struct inline_node *node) 636void inline_node__delete(struct inline_node *node)
@@ -518,10 +639,63 @@ void inline_node__delete(struct inline_node *node)
518 639
519 list_for_each_entry_safe(ilist, tmp, &node->val, list) { 640 list_for_each_entry_safe(ilist, tmp, &node->val, list) {
520 list_del_init(&ilist->list); 641 list_del_init(&ilist->list);
521 zfree(&ilist->filename); 642 free_srcline(ilist->srcline);
522 zfree(&ilist->funcname); 643 /* only the inlined symbols are owned by the list */
644 if (ilist->symbol && ilist->symbol->inlined)
645 symbol__delete(ilist->symbol);
523 free(ilist); 646 free(ilist);
524 } 647 }
525 648
526 free(node); 649 free(node);
527} 650}
651
652void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines)
653{
654 struct rb_node **p = &tree->rb_node;
655 struct rb_node *parent = NULL;
656 const u64 addr = inlines->addr;
657 struct inline_node *i;
658
659 while (*p != NULL) {
660 parent = *p;
661 i = rb_entry(parent, struct inline_node, rb_node);
662 if (addr < i->addr)
663 p = &(*p)->rb_left;
664 else
665 p = &(*p)->rb_right;
666 }
667 rb_link_node(&inlines->rb_node, parent, p);
668 rb_insert_color(&inlines->rb_node, tree);
669}
670
671struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
672{
673 struct rb_node *n = tree->rb_node;
674
675 while (n) {
676 struct inline_node *i = rb_entry(n, struct inline_node,
677 rb_node);
678
679 if (addr < i->addr)
680 n = n->rb_left;
681 else if (addr > i->addr)
682 n = n->rb_right;
683 else
684 return i;
685 }
686
687 return NULL;
688}
689
690void inlines__tree_delete(struct rb_root *tree)
691{
692 struct inline_node *pos;
693 struct rb_node *next = rb_first(tree);
694
695 while (next) {
696 pos = rb_entry(next, struct inline_node, rb_node);
697 next = rb_next(&pos->rb_node);
698 rb_erase(&pos->rb_node, tree);
699 inline_node__delete(pos);
700 }
701}
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 8e73f607dfa3..847b7086182c 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -3,6 +3,7 @@
3#define PERF_SRCLINE_H 3#define PERF_SRCLINE_H
4 4
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/rbtree.h>
6#include <linux/types.h> 7#include <linux/types.h>
7 8
8struct dso; 9struct dso;
@@ -15,21 +16,38 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
15 bool show_sym, bool show_addr, bool unwind_inlines); 16 bool show_sym, bool show_addr, bool unwind_inlines);
16void free_srcline(char *srcline); 17void free_srcline(char *srcline);
17 18
19/* insert the srcline into the DSO, which will take ownership */
20void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
21/* find previously inserted srcline */
22char *srcline__tree_find(struct rb_root *tree, u64 addr);
23/* delete all srclines within the tree */
24void srcline__tree_delete(struct rb_root *tree);
25
18#define SRCLINE_UNKNOWN ((char *) "??:0") 26#define SRCLINE_UNKNOWN ((char *) "??:0")
19 27
20struct inline_list { 28struct inline_list {
21 char *filename; 29 struct symbol *symbol;
22 char *funcname; 30 char *srcline;
23 unsigned int line_nr;
24 struct list_head list; 31 struct list_head list;
25}; 32};
26 33
27struct inline_node { 34struct inline_node {
28 u64 addr; 35 u64 addr;
29 struct list_head val; 36 struct list_head val;
37 struct rb_node rb_node;
30}; 38};
31 39
32struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr); 40/* parse inlined frames for the given address */
41struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
42 struct symbol *sym);
43/* free resources associated to the inline node list */
33void inline_node__delete(struct inline_node *node); 44void inline_node__delete(struct inline_node *node);
34 45
46/* insert the inline node list into the DSO, which will take ownership */
47void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines);
48/* find previously inserted inline node list */
49struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr);
50/* delete all nodes within the tree of inline_node s */
51void inlines__tree_delete(struct rb_root *tree);
52
35#endif /* PERF_SRCLINE_H */ 53#endif /* PERF_SRCLINE_H */
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 37363869c9a1..855e35cbb1dc 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -7,6 +7,7 @@
7#include "rblist.h" 7#include "rblist.h"
8#include "evlist.h" 8#include "evlist.h"
9#include "expr.h" 9#include "expr.h"
10#include "metricgroup.h"
10 11
11enum { 12enum {
12 CTX_BIT_USER = 1 << 0, 13 CTX_BIT_USER = 1 << 0,
@@ -56,7 +57,6 @@ struct saved_value {
56 struct rb_node rb_node; 57 struct rb_node rb_node;
57 struct perf_evsel *evsel; 58 struct perf_evsel *evsel;
58 int cpu; 59 int cpu;
59 int ctx;
60 struct stats stats; 60 struct stats stats;
61}; 61};
62 62
@@ -67,8 +67,6 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
67 rb_node); 67 rb_node);
68 const struct saved_value *b = entry; 68 const struct saved_value *b = entry;
69 69
70 if (a->ctx != b->ctx)
71 return a->ctx - b->ctx;
72 if (a->cpu != b->cpu) 70 if (a->cpu != b->cpu)
73 return a->cpu - b->cpu; 71 return a->cpu - b->cpu;
74 if (a->evsel == b->evsel) 72 if (a->evsel == b->evsel)
@@ -90,13 +88,12 @@ static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
90} 88}
91 89
92static struct saved_value *saved_value_lookup(struct perf_evsel *evsel, 90static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
93 int cpu, int ctx, 91 int cpu,
94 bool create) 92 bool create)
95{ 93{
96 struct rb_node *nd; 94 struct rb_node *nd;
97 struct saved_value dm = { 95 struct saved_value dm = {
98 .cpu = cpu, 96 .cpu = cpu,
99 .ctx = ctx,
100 .evsel = evsel, 97 .evsel = evsel,
101 }; 98 };
102 nd = rblist__find(&runtime_saved_values, &dm); 99 nd = rblist__find(&runtime_saved_values, &dm);
@@ -182,59 +179,60 @@ void perf_stat__reset_shadow_stats(void)
182 * more semantic information such as miss/hit ratios, 179 * more semantic information such as miss/hit ratios,
183 * instruction rates, etc: 180 * instruction rates, etc:
184 */ 181 */
185void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, 182void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
186 int cpu) 183 int cpu)
187{ 184{
188 int ctx = evsel_context(counter); 185 int ctx = evsel_context(counter);
189 186
187 count *= counter->scale;
188
190 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) || 189 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
191 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK)) 190 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
192 update_stats(&runtime_nsecs_stats[cpu], count[0]); 191 update_stats(&runtime_nsecs_stats[cpu], count);
193 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 192 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
194 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); 193 update_stats(&runtime_cycles_stats[ctx][cpu], count);
195 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 194 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
196 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]); 195 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count);
197 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) 196 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
198 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 197 update_stats(&runtime_transaction_stats[ctx][cpu], count);
199 else if (perf_stat_evsel__is(counter, ELISION_START)) 198 else if (perf_stat_evsel__is(counter, ELISION_START))
200 update_stats(&runtime_elision_stats[ctx][cpu], count[0]); 199 update_stats(&runtime_elision_stats[ctx][cpu], count);
201 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS)) 200 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
202 update_stats(&runtime_topdown_total_slots[ctx][cpu], count[0]); 201 update_stats(&runtime_topdown_total_slots[ctx][cpu], count);
203 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED)) 202 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
204 update_stats(&runtime_topdown_slots_issued[ctx][cpu], count[0]); 203 update_stats(&runtime_topdown_slots_issued[ctx][cpu], count);
205 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED)) 204 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
206 update_stats(&runtime_topdown_slots_retired[ctx][cpu], count[0]); 205 update_stats(&runtime_topdown_slots_retired[ctx][cpu], count);
207 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES)) 206 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
208 update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu],count[0]); 207 update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu], count);
209 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES)) 208 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
210 update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count[0]); 209 update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count);
211 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) 210 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
212 update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]); 211 update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count);
213 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND)) 212 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
214 update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]); 213 update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count);
215 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS)) 214 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
216 update_stats(&runtime_branches_stats[ctx][cpu], count[0]); 215 update_stats(&runtime_branches_stats[ctx][cpu], count);
217 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES)) 216 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
218 update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]); 217 update_stats(&runtime_cacherefs_stats[ctx][cpu], count);
219 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D)) 218 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
220 update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]); 219 update_stats(&runtime_l1_dcache_stats[ctx][cpu], count);
221 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I)) 220 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
222 update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]); 221 update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
223 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL)) 222 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
224 update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]); 223 update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
225 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB)) 224 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
226 update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]); 225 update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count);
227 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB)) 226 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
228 update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]); 227 update_stats(&runtime_itlb_cache_stats[ctx][cpu], count);
229 else if (perf_stat_evsel__is(counter, SMI_NUM)) 228 else if (perf_stat_evsel__is(counter, SMI_NUM))
230 update_stats(&runtime_smi_num_stats[ctx][cpu], count[0]); 229 update_stats(&runtime_smi_num_stats[ctx][cpu], count);
231 else if (perf_stat_evsel__is(counter, APERF)) 230 else if (perf_stat_evsel__is(counter, APERF))
232 update_stats(&runtime_aperf_stats[ctx][cpu], count[0]); 231 update_stats(&runtime_aperf_stats[ctx][cpu], count);
233 232
234 if (counter->collect_stat) { 233 if (counter->collect_stat) {
235 struct saved_value *v = saved_value_lookup(counter, cpu, ctx, 234 struct saved_value *v = saved_value_lookup(counter, cpu, true);
236 true); 235 update_stats(&v->stats, count);
237 update_stats(&v->stats, count[0]);
238 } 236 }
239} 237}
240 238
@@ -628,15 +626,68 @@ static void print_smi_cost(int cpu, struct perf_evsel *evsel,
628 out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num); 626 out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num);
629} 627}
630 628
629static void generic_metric(const char *metric_expr,
630 struct perf_evsel **metric_events,
631 char *name,
632 const char *metric_name,
633 double avg,
634 int cpu,
635 struct perf_stat_output_ctx *out)
636{
637 print_metric_t print_metric = out->print_metric;
638 struct parse_ctx pctx;
639 double ratio;
640 int i;
641 void *ctxp = out->ctx;
642
643 expr__ctx_init(&pctx);
644 expr__add_id(&pctx, name, avg);
645 for (i = 0; metric_events[i]; i++) {
646 struct saved_value *v;
647 struct stats *stats;
648 double scale;
649
650 if (!strcmp(metric_events[i]->name, "duration_time")) {
651 stats = &walltime_nsecs_stats;
652 scale = 1e-9;
653 } else {
654 v = saved_value_lookup(metric_events[i], cpu, false);
655 if (!v)
656 break;
657 stats = &v->stats;
658 scale = 1.0;
659 }
660 expr__add_id(&pctx, metric_events[i]->name, avg_stats(stats)*scale);
661 }
662 if (!metric_events[i]) {
663 const char *p = metric_expr;
664
665 if (expr__parse(&ratio, &pctx, &p) == 0)
666 print_metric(ctxp, NULL, "%8.1f",
667 metric_name ?
668 metric_name :
669 out->force_header ? name : "",
670 ratio);
671 else
672 print_metric(ctxp, NULL, NULL,
673 out->force_header ?
674 (metric_name ? metric_name : name) : "", 0);
675 } else
676 print_metric(ctxp, NULL, NULL, "", 0);
677}
678
631void perf_stat__print_shadow_stats(struct perf_evsel *evsel, 679void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
632 double avg, int cpu, 680 double avg, int cpu,
633 struct perf_stat_output_ctx *out) 681 struct perf_stat_output_ctx *out,
682 struct rblist *metric_events)
634{ 683{
635 void *ctxp = out->ctx; 684 void *ctxp = out->ctx;
636 print_metric_t print_metric = out->print_metric; 685 print_metric_t print_metric = out->print_metric;
637 double total, ratio = 0.0, total2; 686 double total, ratio = 0.0, total2;
638 const char *color = NULL; 687 const char *color = NULL;
639 int ctx = evsel_context(evsel); 688 int ctx = evsel_context(evsel);
689 struct metric_event *me;
690 int num = 1;
640 691
641 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) { 692 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
642 total = avg_stats(&runtime_cycles_stats[ctx][cpu]); 693 total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
@@ -820,33 +871,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
820 else 871 else
821 print_metric(ctxp, NULL, NULL, name, 0); 872 print_metric(ctxp, NULL, NULL, name, 0);
822 } else if (evsel->metric_expr) { 873 } else if (evsel->metric_expr) {
823 struct parse_ctx pctx; 874 generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
824 int i; 875 evsel->metric_name, avg, cpu, out);
825
826 expr__ctx_init(&pctx);
827 expr__add_id(&pctx, evsel->name, avg);
828 for (i = 0; evsel->metric_events[i]; i++) {
829 struct saved_value *v;
830
831 v = saved_value_lookup(evsel->metric_events[i], cpu, ctx, false);
832 if (!v)
833 break;
834 expr__add_id(&pctx, evsel->metric_events[i]->name,
835 avg_stats(&v->stats));
836 }
837 if (!evsel->metric_events[i]) {
838 const char *p = evsel->metric_expr;
839
840 if (expr__parse(&ratio, &pctx, &p) == 0)
841 print_metric(ctxp, NULL, "%8.1f",
842 evsel->metric_name ?
843 evsel->metric_name :
844 out->force_header ? evsel->name : "",
845 ratio);
846 else
847 print_metric(ctxp, NULL, NULL, "", 0);
848 } else
849 print_metric(ctxp, NULL, NULL, "", 0);
850 } else if (runtime_nsecs_stats[cpu].n != 0) { 876 } else if (runtime_nsecs_stats[cpu].n != 0) {
851 char unit = 'M'; 877 char unit = 'M';
852 char unit_buf[10]; 878 char unit_buf[10];
@@ -864,6 +890,20 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
864 } else if (perf_stat_evsel__is(evsel, SMI_NUM)) { 890 } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
865 print_smi_cost(cpu, evsel, out); 891 print_smi_cost(cpu, evsel, out);
866 } else { 892 } else {
867 print_metric(ctxp, NULL, NULL, NULL, 0); 893 num = 0;
868 } 894 }
895
896 if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
897 struct metric_expr *mexp;
898
899 list_for_each_entry (mexp, &me->head, nd) {
900 if (num++ > 0)
901 out->new_line(ctxp);
902 generic_metric(mexp->metric_expr, mexp->metric_events,
903 evsel->name, mexp->metric_name,
904 avg, cpu, out);
905 }
906 }
907 if (num == 0)
908 print_metric(ctxp, NULL, NULL, NULL, 0);
869} 909}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index c9bae5fb8b47..151e9efd7286 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -70,7 +70,7 @@ double rel_stddev_stats(double stddev, double avg)
70bool __perf_evsel_stat__is(struct perf_evsel *evsel, 70bool __perf_evsel_stat__is(struct perf_evsel *evsel,
71 enum perf_stat_evsel_id id) 71 enum perf_stat_evsel_id id)
72{ 72{
73 struct perf_stat_evsel *ps = evsel->priv; 73 struct perf_stat_evsel *ps = evsel->stats;
74 74
75 return ps->id == id; 75 return ps->id == id;
76} 76}
@@ -94,7 +94,7 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
94 94
95void perf_stat_evsel_id_init(struct perf_evsel *evsel) 95void perf_stat_evsel_id_init(struct perf_evsel *evsel)
96{ 96{
97 struct perf_stat_evsel *ps = evsel->priv; 97 struct perf_stat_evsel *ps = evsel->stats;
98 int i; 98 int i;
99 99
100 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ 100 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
@@ -110,7 +110,7 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
110static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) 110static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
111{ 111{
112 int i; 112 int i;
113 struct perf_stat_evsel *ps = evsel->priv; 113 struct perf_stat_evsel *ps = evsel->stats;
114 114
115 for (i = 0; i < 3; i++) 115 for (i = 0; i < 3; i++)
116 init_stats(&ps->res_stats[i]); 116 init_stats(&ps->res_stats[i]);
@@ -120,8 +120,8 @@ static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
120 120
121static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) 121static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
122{ 122{
123 evsel->priv = zalloc(sizeof(struct perf_stat_evsel)); 123 evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
124 if (evsel->priv == NULL) 124 if (evsel->stats == NULL)
125 return -ENOMEM; 125 return -ENOMEM;
126 perf_evsel__reset_stat_priv(evsel); 126 perf_evsel__reset_stat_priv(evsel);
127 return 0; 127 return 0;
@@ -129,11 +129,11 @@ static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
129 129
130static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) 130static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
131{ 131{
132 struct perf_stat_evsel *ps = evsel->priv; 132 struct perf_stat_evsel *ps = evsel->stats;
133 133
134 if (ps) 134 if (ps)
135 free(ps->group_data); 135 free(ps->group_data);
136 zfree(&evsel->priv); 136 zfree(&evsel->stats);
137} 137}
138 138
139static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, 139static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
@@ -278,7 +278,9 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
278 perf_evsel__compute_deltas(evsel, cpu, thread, count); 278 perf_evsel__compute_deltas(evsel, cpu, thread, count);
279 perf_counts_values__scale(count, config->scale, NULL); 279 perf_counts_values__scale(count, config->scale, NULL);
280 if (config->aggr_mode == AGGR_NONE) 280 if (config->aggr_mode == AGGR_NONE)
281 perf_stat__update_shadow_stats(evsel, count->values, cpu); 281 perf_stat__update_shadow_stats(evsel, count->val, cpu);
282 if (config->aggr_mode == AGGR_THREAD)
283 perf_stat__update_shadow_stats(evsel, count->val, 0);
282 break; 284 break;
283 case AGGR_GLOBAL: 285 case AGGR_GLOBAL:
284 aggr->val += count->val; 286 aggr->val += count->val;
@@ -319,9 +321,8 @@ int perf_stat_process_counter(struct perf_stat_config *config,
319 struct perf_evsel *counter) 321 struct perf_evsel *counter)
320{ 322{
321 struct perf_counts_values *aggr = &counter->counts->aggr; 323 struct perf_counts_values *aggr = &counter->counts->aggr;
322 struct perf_stat_evsel *ps = counter->priv; 324 struct perf_stat_evsel *ps = counter->stats;
323 u64 *count = counter->counts->aggr.values; 325 u64 *count = counter->counts->aggr.values;
324 u64 val;
325 int i, ret; 326 int i, ret;
326 327
327 aggr->val = aggr->ena = aggr->run = 0; 328 aggr->val = aggr->ena = aggr->run = 0;
@@ -361,8 +362,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
361 /* 362 /*
362 * Save the full runtime - to allow normalization during printout: 363 * Save the full runtime - to allow normalization during printout:
363 */ 364 */
364 val = counter->scale * *count; 365 perf_stat__update_shadow_stats(counter, *count, 0);
365 perf_stat__update_shadow_stats(counter, &val, 0);
366 366
367 return 0; 367 return 0;
368} 368}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 96326b1f9443..eefca5c981fd 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -83,7 +83,7 @@ typedef void (*new_line_t )(void *ctx);
83 83
84void perf_stat__init_shadow_stats(void); 84void perf_stat__init_shadow_stats(void);
85void perf_stat__reset_shadow_stats(void); 85void perf_stat__reset_shadow_stats(void);
86void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, 86void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
87 int cpu); 87 int cpu);
88struct perf_stat_output_ctx { 88struct perf_stat_output_ctx {
89 void *ctx; 89 void *ctx;
@@ -92,9 +92,11 @@ struct perf_stat_output_ctx {
92 bool force_header; 92 bool force_header;
93}; 93};
94 94
95struct rblist;
95void perf_stat__print_shadow_stats(struct perf_evsel *evsel, 96void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
96 double avg, int cpu, 97 double avg, int cpu,
97 struct perf_stat_output_ctx *out); 98 struct perf_stat_output_ctx *out,
99 struct rblist *metric_events);
98void perf_stat__collect_metric_expr(struct perf_evlist *); 100void perf_stat__collect_metric_expr(struct perf_evlist *);
99 101
100int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw); 102int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 6492ef38b090..1b67a8639dfe 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -46,6 +46,7 @@ struct symbol_conf symbol_conf = {
46 .show_hist_headers = true, 46 .show_hist_headers = true,
47 .symfs = "", 47 .symfs = "",
48 .event_group = true, 48 .event_group = true,
49 .inline_name = true,
49}; 50};
50 51
51static enum dso_binary_type binary_type_symtab[] = { 52static enum dso_binary_type binary_type_symtab[] = {
@@ -227,7 +228,7 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
227 struct maps *maps = &mg->maps[type]; 228 struct maps *maps = &mg->maps[type];
228 struct map *next, *curr; 229 struct map *next, *curr;
229 230
230 pthread_rwlock_wrlock(&maps->lock); 231 down_write(&maps->lock);
231 232
232 curr = maps__first(maps); 233 curr = maps__first(maps);
233 if (curr == NULL) 234 if (curr == NULL)
@@ -247,7 +248,7 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
247 curr->end = ~0ULL; 248 curr->end = ~0ULL;
248 249
249out_unlock: 250out_unlock:
250 pthread_rwlock_unlock(&maps->lock); 251 up_write(&maps->lock);
251} 252}
252 253
253struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 254struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
@@ -1672,7 +1673,7 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
1672 struct maps *maps = &mg->maps[type]; 1673 struct maps *maps = &mg->maps[type];
1673 struct map *map; 1674 struct map *map;
1674 1675
1675 pthread_rwlock_rdlock(&maps->lock); 1676 down_read(&maps->lock);
1676 1677
1677 for (map = maps__first(maps); map; map = map__next(map)) { 1678 for (map = maps__first(maps); map; map = map__next(map)) {
1678 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1679 if (map->dso && strcmp(map->dso->short_name, name) == 0)
@@ -1682,7 +1683,7 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
1682 map = NULL; 1683 map = NULL;
1683 1684
1684out_unlock: 1685out_unlock:
1685 pthread_rwlock_unlock(&maps->lock); 1686 up_read(&maps->lock);
1686 return map; 1687 return map;
1687} 1688}
1688 1689
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 6352022593c6..a4f0075b4e5c 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -60,6 +60,7 @@ struct symbol {
60 u8 binding; 60 u8 binding;
61 u8 idle:1; 61 u8 idle:1;
62 u8 ignore:1; 62 u8 ignore:1;
63 u8 inlined:1;
63 u8 arch_sym; 64 u8 arch_sym;
64 char name[0]; 65 char name[0];
65}; 66};
@@ -209,6 +210,7 @@ struct addr_location {
209 struct thread *thread; 210 struct thread *thread;
210 struct map *map; 211 struct map *map;
211 struct symbol *sym; 212 struct symbol *sym;
213 const char *srcline;
212 u64 addr; 214 u64 addr;
213 char level; 215 char level;
214 u8 filtered; 216 u8 filtered;
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 1dbcd3c8dee0..68b65b10579b 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -46,6 +46,8 @@ struct thread *thread__new(pid_t pid, pid_t tid)
46 thread->cpu = -1; 46 thread->cpu = -1;
47 INIT_LIST_HEAD(&thread->namespaces_list); 47 INIT_LIST_HEAD(&thread->namespaces_list);
48 INIT_LIST_HEAD(&thread->comm_list); 48 INIT_LIST_HEAD(&thread->comm_list);
49 init_rwsem(&thread->namespaces_lock);
50 init_rwsem(&thread->comm_lock);
49 51
50 comm_str = malloc(32); 52 comm_str = malloc(32);
51 if (!comm_str) 53 if (!comm_str)
@@ -84,18 +86,26 @@ void thread__delete(struct thread *thread)
84 map_groups__put(thread->mg); 86 map_groups__put(thread->mg);
85 thread->mg = NULL; 87 thread->mg = NULL;
86 } 88 }
89 down_write(&thread->namespaces_lock);
87 list_for_each_entry_safe(namespaces, tmp_namespaces, 90 list_for_each_entry_safe(namespaces, tmp_namespaces,
88 &thread->namespaces_list, list) { 91 &thread->namespaces_list, list) {
89 list_del(&namespaces->list); 92 list_del(&namespaces->list);
90 namespaces__free(namespaces); 93 namespaces__free(namespaces);
91 } 94 }
95 up_write(&thread->namespaces_lock);
96
97 down_write(&thread->comm_lock);
92 list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) { 98 list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
93 list_del(&comm->list); 99 list_del(&comm->list);
94 comm__free(comm); 100 comm__free(comm);
95 } 101 }
102 up_write(&thread->comm_lock);
103
96 unwind__finish_access(thread); 104 unwind__finish_access(thread);
97 nsinfo__zput(thread->nsinfo); 105 nsinfo__zput(thread->nsinfo);
98 106
107 exit_rwsem(&thread->namespaces_lock);
108 exit_rwsem(&thread->comm_lock);
99 free(thread); 109 free(thread);
100} 110}
101 111
@@ -126,8 +136,8 @@ struct namespaces *thread__namespaces(const struct thread *thread)
126 return list_first_entry(&thread->namespaces_list, struct namespaces, list); 136 return list_first_entry(&thread->namespaces_list, struct namespaces, list);
127} 137}
128 138
129int thread__set_namespaces(struct thread *thread, u64 timestamp, 139static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
130 struct namespaces_event *event) 140 struct namespaces_event *event)
131{ 141{
132 struct namespaces *new, *curr = thread__namespaces(thread); 142 struct namespaces *new, *curr = thread__namespaces(thread);
133 143
@@ -150,6 +160,17 @@ int thread__set_namespaces(struct thread *thread, u64 timestamp,
150 return 0; 160 return 0;
151} 161}
152 162
163int thread__set_namespaces(struct thread *thread, u64 timestamp,
164 struct namespaces_event *event)
165{
166 int ret;
167
168 down_write(&thread->namespaces_lock);
169 ret = __thread__set_namespaces(thread, timestamp, event);
170 up_write(&thread->namespaces_lock);
171 return ret;
172}
173
153struct comm *thread__comm(const struct thread *thread) 174struct comm *thread__comm(const struct thread *thread)
154{ 175{
155 if (list_empty(&thread->comm_list)) 176 if (list_empty(&thread->comm_list))
@@ -171,8 +192,8 @@ struct comm *thread__exec_comm(const struct thread *thread)
171 return last; 192 return last;
172} 193}
173 194
174int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, 195static int ____thread__set_comm(struct thread *thread, const char *str,
175 bool exec) 196 u64 timestamp, bool exec)
176{ 197{
177 struct comm *new, *curr = thread__comm(thread); 198 struct comm *new, *curr = thread__comm(thread);
178 199
@@ -196,6 +217,17 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
196 return 0; 217 return 0;
197} 218}
198 219
220int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
221 bool exec)
222{
223 int ret;
224
225 down_write(&thread->comm_lock);
226 ret = ____thread__set_comm(thread, str, timestamp, exec);
227 up_write(&thread->comm_lock);
228 return ret;
229}
230
199int thread__set_comm_from_proc(struct thread *thread) 231int thread__set_comm_from_proc(struct thread *thread)
200{ 232{
201 char path[64]; 233 char path[64];
@@ -213,7 +245,7 @@ int thread__set_comm_from_proc(struct thread *thread)
213 return err; 245 return err;
214} 246}
215 247
216const char *thread__comm_str(const struct thread *thread) 248static const char *__thread__comm_str(const struct thread *thread)
217{ 249{
218 const struct comm *comm = thread__comm(thread); 250 const struct comm *comm = thread__comm(thread);
219 251
@@ -223,6 +255,17 @@ const char *thread__comm_str(const struct thread *thread)
223 return comm__str(comm); 255 return comm__str(comm);
224} 256}
225 257
258const char *thread__comm_str(const struct thread *thread)
259{
260 const char *str;
261
262 down_read((struct rw_semaphore *)&thread->comm_lock);
263 str = __thread__comm_str(thread);
264 up_read((struct rw_semaphore *)&thread->comm_lock);
265
266 return str;
267}
268
226/* CHECKME: it should probably better return the max comm len from its comm list */ 269/* CHECKME: it should probably better return the max comm len from its comm list */
227int thread__comm_len(struct thread *thread) 270int thread__comm_len(struct thread *thread)
228{ 271{
@@ -265,7 +308,7 @@ static int __thread__prepare_access(struct thread *thread)
265 struct maps *maps = &thread->mg->maps[i]; 308 struct maps *maps = &thread->mg->maps[i];
266 struct map *map; 309 struct map *map;
267 310
268 pthread_rwlock_rdlock(&maps->lock); 311 down_read(&maps->lock);
269 312
270 for (map = maps__first(maps); map; map = map__next(map)) { 313 for (map = maps__first(maps); map; map = map__next(map)) {
271 err = unwind__prepare_access(thread, map, &initialized); 314 err = unwind__prepare_access(thread, map, &initialized);
@@ -273,7 +316,7 @@ static int __thread__prepare_access(struct thread *thread)
273 break; 316 break;
274 } 317 }
275 318
276 pthread_rwlock_unlock(&maps->lock); 319 up_read(&maps->lock);
277 } 320 }
278 321
279 return err; 322 return err;
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index fdcea7c0cac1..40cfa36c022a 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -10,6 +10,7 @@
10#include "symbol.h" 10#include "symbol.h"
11#include <strlist.h> 11#include <strlist.h>
12#include <intlist.h> 12#include <intlist.h>
13#include "rwsem.h"
13 14
14struct thread_stack; 15struct thread_stack;
15struct unwind_libunwind_ops; 16struct unwind_libunwind_ops;
@@ -30,7 +31,9 @@ struct thread {
30 int comm_len; 31 int comm_len;
31 bool dead; /* if set thread has exited */ 32 bool dead; /* if set thread has exited */
32 struct list_head namespaces_list; 33 struct list_head namespaces_list;
34 struct rw_semaphore namespaces_lock;
33 struct list_head comm_list; 35 struct list_head comm_list;
36 struct rw_semaphore comm_lock;
34 u64 db_id; 37 u64 db_id;
35 38
36 void *priv; 39 void *priv;
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 506150a75bd0..9892323cdd7c 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -38,6 +38,7 @@ struct perf_top {
38 int sym_pcnt_filter; 38 int sym_pcnt_filter;
39 const char *sym_filter; 39 const char *sym_filter;
40 float min_percent; 40 float min_percent;
41 unsigned int nr_threads_synthesize;
41}; 42};
42 43
43#define CONSOLE_CLEAR "" 44#define CONSOLE_CLEAR ""
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index e7d60d05596d..d7f2113462fb 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -28,7 +28,6 @@
28#include <sys/types.h> 28#include <sys/types.h>
29#include <sys/stat.h> 29#include <sys/stat.h>
30#include <sys/wait.h> 30#include <sys/wait.h>
31#include <pthread.h>
32#include <fcntl.h> 31#include <fcntl.h>
33#include <unistd.h> 32#include <unistd.h>
34#include <errno.h> 33#include <errno.h>
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 8a9a677f7576..40b425949aa3 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -27,7 +27,6 @@
27#include <sys/stat.h> 27#include <sys/stat.h>
28#include <sys/wait.h> 28#include <sys/wait.h>
29#include <sys/mman.h> 29#include <sys/mman.h>
30#include <pthread.h>
31#include <fcntl.h> 30#include <fcntl.h>
32#include <unistd.h> 31#include <unistd.h>
33#include <errno.h> 32#include <errno.h>
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 3687b720327a..a789f952b3e9 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -7,6 +7,7 @@
7#include <sys/stat.h> 7#include <sys/stat.h>
8#include <sys/utsname.h> 8#include <sys/utsname.h>
9#include <dirent.h> 9#include <dirent.h>
10#include <fcntl.h>
10#include <inttypes.h> 11#include <inttypes.h>
11#include <signal.h> 12#include <signal.h>
12#include <stdio.h> 13#include <stdio.h>
@@ -23,6 +24,19 @@
23/* 24/*
24 * XXX We need to find a better place for these things... 25 * XXX We need to find a better place for these things...
25 */ 26 */
27
28bool perf_singlethreaded = true;
29
30void perf_set_singlethreaded(void)
31{
32 perf_singlethreaded = true;
33}
34
35void perf_set_multithreaded(void)
36{
37 perf_singlethreaded = false;
38}
39
26unsigned int page_size; 40unsigned int page_size;
27int cacheline_size; 41int cacheline_size;
28 42
@@ -175,7 +189,7 @@ out:
175 return err; 189 return err;
176} 190}
177 191
178int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size) 192static int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
179{ 193{
180 void *ptr; 194 void *ptr;
181 loff_t pgoff; 195 loff_t pgoff;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index b52765e6d7b4..01434509c2e9 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -6,7 +6,6 @@
6/* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */ 6/* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */
7#define _DEFAULT_SOURCE 1 7#define _DEFAULT_SOURCE 1
8 8
9#include <fcntl.h>
10#include <stdbool.h> 9#include <stdbool.h>
11#include <stddef.h> 10#include <stddef.h>
12#include <stdlib.h> 11#include <stdlib.h>
@@ -36,7 +35,6 @@ bool lsdir_no_dot_filter(const char *name, struct dirent *d);
36int copyfile(const char *from, const char *to); 35int copyfile(const char *from, const char *to);
37int copyfile_mode(const char *from, const char *to, mode_t mode); 36int copyfile_mode(const char *from, const char *to, mode_t mode);
38int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi); 37int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
39int copyfile_offset(int fromfd, loff_t from_ofs, int tofd, loff_t to_ofs, u64 size);
40 38
41ssize_t readn(int fd, void *buf, size_t n); 39ssize_t readn(int fd, void *buf, size_t n);
42ssize_t writen(int fd, const void *buf, size_t n); 40ssize_t writen(int fd, const void *buf, size_t n);
@@ -65,4 +63,9 @@ int sched_getcpu(void);
65int setns(int fd, int nstype); 63int setns(int fd, int nstype);
66#endif 64#endif
67 65
66extern bool perf_singlethreaded;
67
68void perf_set_singlethreaded(void);
69void perf_set_multithreaded(void);
70
68#endif /* GIT_COMPAT_UTIL_H */ 71#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index cffcda448c28..0acb1ec0e2f0 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -320,7 +320,7 @@ struct dso *machine__findnew_vdso(struct machine *machine,
320 struct vdso_info *vdso_info; 320 struct vdso_info *vdso_info;
321 struct dso *dso = NULL; 321 struct dso *dso = NULL;
322 322
323 pthread_rwlock_wrlock(&machine->dsos.lock); 323 down_write(&machine->dsos.lock);
324 if (!machine->vdso_info) 324 if (!machine->vdso_info)
325 machine->vdso_info = vdso_info__new(); 325 machine->vdso_info = vdso_info__new();
326 326
@@ -348,7 +348,7 @@ struct dso *machine__findnew_vdso(struct machine *machine,
348 348
349out_unlock: 349out_unlock:
350 dso__get(dso); 350 dso__get(dso);
351 pthread_rwlock_unlock(&machine->dsos.lock); 351 up_write(&machine->dsos.lock);
352 return dso; 352 return dso;
353} 353}
354 354
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 008fe68d7b76..a725b958cf31 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -1,4 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <fcntl.h>
2#include <stdio.h> 3#include <stdio.h>
3#include <unistd.h> 4#include <unistd.h>
4#include <sys/stat.h> 5#include <sys/stat.h>
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
index f7c7af1f9258..b436f8675f6a 100644
--- a/tools/power/acpi/tools/acpidump/Makefile
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -39,6 +39,7 @@ TOOL_OBJS = \
39 utnonansi.o\ 39 utnonansi.o\
40 utprint.o\ 40 utprint.o\
41 utstring.o\ 41 utstring.o\
42 utstrsuppt.o\
42 utstrtoul64.o\ 43 utstrtoul64.o\
43 utxferror.o\ 44 utxferror.o\
44 oslinuxtbl.o\ 45 oslinuxtbl.o\
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 60df1fbd4a77..0634449156d8 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -287,8 +287,7 @@ int ap_dump_table_by_address(char *ascii_address)
287 287
288 /* Convert argument to an integer physical address */ 288 /* Convert argument to an integer physical address */
289 289
290 status = acpi_ut_strtoul64(ascii_address, ACPI_STRTOUL_64BIT, 290 status = acpi_ut_strtoul64(ascii_address, &long_address);
291 &long_address);
292 if (ACPI_FAILURE(status)) { 291 if (ACPI_FAILURE(status)) {
293 fprintf(stderr, "%s: Could not convert to a physical address\n", 292 fprintf(stderr, "%s: Could not convert to a physical address\n",
294 ascii_address); 293 ascii_address);
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 943b6b614683..22c3b4ee1617 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -208,9 +208,7 @@ static int ap_do_options(int argc, char **argv)
208 case 'r': /* Dump tables from specified RSDP */ 208 case 'r': /* Dump tables from specified RSDP */
209 209
210 status = 210 status =
211 acpi_ut_strtoul64(acpi_gbl_optarg, 211 acpi_ut_strtoul64(acpi_gbl_optarg, &gbl_rsdp_base);
212 ACPI_STRTOUL_64BIT,
213 &gbl_rsdp_base);
214 if (ACPI_FAILURE(status)) { 212 if (ACPI_FAILURE(status)) {
215 fprintf(stderr, 213 fprintf(stderr,
216 "%s: Could not convert to a physical address\n", 214 "%s: Could not convert to a physical address\n",
diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore
index d42073f12609..1f9977cc609c 100644
--- a/tools/power/cpupower/.gitignore
+++ b/tools/power/cpupower/.gitignore
@@ -1,7 +1,6 @@
1.libs 1.libs
2libcpupower.so 2libcpupower.so
3libcpupower.so.0 3libcpupower.so.*
4libcpupower.so.0.0.0
5build/ccdv 4build/ccdv
6cpufreq-info 5cpufreq-info
7cpufreq-set 6cpufreq-set
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index d6e1c02ddcfe..1dd5f4fcffd5 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -26,10 +26,12 @@ endif
26 26
27ifneq ($(OUTPUT),) 27ifneq ($(OUTPUT),)
28# check that the output directory actually exists 28# check that the output directory actually exists
29OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) 29OUTDIR := $(shell cd $(OUTPUT) && pwd)
30$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 30$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
31endif 31endif
32 32
33include ../../scripts/Makefile.arch
34
33# --- CONFIGURATION BEGIN --- 35# --- CONFIGURATION BEGIN ---
34 36
35# Set the following to `true' to make a unstripped, unoptimized 37# Set the following to `true' to make a unstripped, unoptimized
@@ -79,7 +81,11 @@ bindir ?= /usr/bin
79sbindir ?= /usr/sbin 81sbindir ?= /usr/sbin
80mandir ?= /usr/man 82mandir ?= /usr/man
81includedir ?= /usr/include 83includedir ?= /usr/include
84ifeq ($(IS_64_BIT), 1)
85libdir ?= /usr/lib64
86else
82libdir ?= /usr/lib 87libdir ?= /usr/lib
88endif
83localedir ?= /usr/share/locale 89localedir ?= /usr/share/locale
84docdir ?= /usr/share/doc/packages/cpupower 90docdir ?= /usr/share/doc/packages/cpupower
85confdir ?= /etc/ 91confdir ?= /etc/
diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
index c25a74ae51ba..2bb3eef7d5c1 100644
--- a/tools/power/cpupower/bench/system.c
+++ b/tools/power/cpupower/bench/system.c
@@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
61 61
62 dprintf("set %s as cpufreq governor\n", governor); 62 dprintf("set %s as cpufreq governor\n", governor);
63 63
64 if (cpupower_is_cpu_online(cpu) != 0) { 64 if (cpupower_is_cpu_online(cpu) != 1) {
65 perror("cpufreq_cpu_exists"); 65 perror("cpufreq_cpu_exists");
66 fprintf(stderr, "error: cpu %u does not exist\n", cpu); 66 fprintf(stderr, "error: cpu %u does not exist\n", cpu);
67 return -1; 67 return -1;
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index 3e701f0e9c14..df43cd45d810 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -93,8 +93,6 @@ static void print_speed(unsigned long speed)
93 if (speed > 1000000) 93 if (speed > 1000000)
94 printf("%u.%06u GHz", ((unsigned int) speed/1000000), 94 printf("%u.%06u GHz", ((unsigned int) speed/1000000),
95 ((unsigned int) speed%1000000)); 95 ((unsigned int) speed%1000000));
96 else if (speed > 100000)
97 printf("%u MHz", (unsigned int) speed);
98 else if (speed > 1000) 96 else if (speed > 1000)
99 printf("%u.%03u MHz", ((unsigned int) speed/1000), 97 printf("%u.%03u MHz", ((unsigned int) speed/1000),
100 (unsigned int) (speed%1000)); 98 (unsigned int) (speed%1000));
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index 1b5da0066ebf..5b3205f16217 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
130{ 130{
131 int num; 131 int num;
132 char *tmp; 132 char *tmp;
133 int this_cpu;
134
135 this_cpu = sched_getcpu();
133 136
134 /* Assume idle state count is the same for all CPUs */ 137 /* Assume idle state count is the same for all CPUs */
135 cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0); 138 cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
136 139
137 if (cpuidle_sysfs_monitor.hw_states_num <= 0) 140 if (cpuidle_sysfs_monitor.hw_states_num <= 0)
138 return NULL; 141 return NULL;
139 142
140 for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { 143 for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
141 tmp = cpuidle_state_name(0, num); 144 tmp = cpuidle_state_name(this_cpu, num);
142 if (tmp == NULL) 145 if (tmp == NULL)
143 continue; 146 continue;
144 147
@@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
146 strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1); 149 strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
147 free(tmp); 150 free(tmp);
148 151
149 tmp = cpuidle_state_desc(0, num); 152 tmp = cpuidle_state_desc(this_cpu, num);
150 if (tmp == NULL) 153 if (tmp == NULL)
151 continue; 154 continue;
152 strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1); 155 strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 654efd9768fd..3fab179b1aba 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -13,7 +13,7 @@ endif
13 13
14# check that the output directory actually exists 14# check that the output directory actually exists
15ifneq ($(OUTPUT),) 15ifneq ($(OUTPUT),)
16OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) 16OUTDIR := $(shell cd $(OUTPUT) && pwd)
17$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 17$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
18endif 18endif
19 19
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 65368d9027f5..db33b28c5ef3 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -70,6 +70,7 @@ libnvdimm-y += $(NVDIMM_SRC)/region_devs.o
70libnvdimm-y += $(NVDIMM_SRC)/region.o 70libnvdimm-y += $(NVDIMM_SRC)/region.o
71libnvdimm-y += $(NVDIMM_SRC)/namespace_devs.o 71libnvdimm-y += $(NVDIMM_SRC)/namespace_devs.o
72libnvdimm-y += $(NVDIMM_SRC)/label.o 72libnvdimm-y += $(NVDIMM_SRC)/label.o
73libnvdimm-y += $(NVDIMM_SRC)/badrange.o
73libnvdimm-$(CONFIG_ND_CLAIM) += $(NVDIMM_SRC)/claim.o 74libnvdimm-$(CONFIG_ND_CLAIM) += $(NVDIMM_SRC)/claim.o
74libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o 75libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o
75libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o 76libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index bef419d4266d..7217b2b953b5 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -168,8 +168,12 @@ struct nfit_test {
168 spinlock_t lock; 168 spinlock_t lock;
169 } ars_state; 169 } ars_state;
170 struct device *dimm_dev[NUM_DCR]; 170 struct device *dimm_dev[NUM_DCR];
171 struct badrange badrange;
172 struct work_struct work;
171}; 173};
172 174
175static struct workqueue_struct *nfit_wq;
176
173static struct nfit_test *to_nfit_test(struct device *dev) 177static struct nfit_test *to_nfit_test(struct device *dev)
174{ 178{
175 struct platform_device *pdev = to_platform_device(dev); 179 struct platform_device *pdev = to_platform_device(dev);
@@ -234,48 +238,68 @@ static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
234 return rc; 238 return rc;
235} 239}
236 240
237#define NFIT_TEST_ARS_RECORDS 4
238#define NFIT_TEST_CLEAR_ERR_UNIT 256 241#define NFIT_TEST_CLEAR_ERR_UNIT 256
239 242
240static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, 243static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
241 unsigned int buf_len) 244 unsigned int buf_len)
242{ 245{
246 int ars_recs;
247
243 if (buf_len < sizeof(*nd_cmd)) 248 if (buf_len < sizeof(*nd_cmd))
244 return -EINVAL; 249 return -EINVAL;
245 250
251 /* for testing, only store up to n records that fit within 4k */
252 ars_recs = SZ_4K / sizeof(struct nd_ars_record);
253
246 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status) 254 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
247 + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record); 255 + ars_recs * sizeof(struct nd_ars_record);
248 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; 256 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
249 nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT; 257 nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
250 258
251 return 0; 259 return 0;
252} 260}
253 261
254/* 262static void post_ars_status(struct ars_state *ars_state,
255 * Initialize the ars_state to return an ars_result 1 second in the future with 263 struct badrange *badrange, u64 addr, u64 len)
256 * a 4K error range in the middle of the requested address range.
257 */
258static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
259{ 264{
260 struct nd_cmd_ars_status *ars_status; 265 struct nd_cmd_ars_status *ars_status;
261 struct nd_ars_record *ars_record; 266 struct nd_ars_record *ars_record;
267 struct badrange_entry *be;
268 u64 end = addr + len - 1;
269 int i = 0;
262 270
263 ars_state->deadline = jiffies + 1*HZ; 271 ars_state->deadline = jiffies + 1*HZ;
264 ars_status = ars_state->ars_status; 272 ars_status = ars_state->ars_status;
265 ars_status->status = 0; 273 ars_status->status = 0;
266 ars_status->out_length = sizeof(struct nd_cmd_ars_status)
267 + sizeof(struct nd_ars_record);
268 ars_status->address = addr; 274 ars_status->address = addr;
269 ars_status->length = len; 275 ars_status->length = len;
270 ars_status->type = ND_ARS_PERSISTENT; 276 ars_status->type = ND_ARS_PERSISTENT;
271 ars_status->num_records = 1; 277
272 ars_record = &ars_status->records[0]; 278 spin_lock(&badrange->lock);
273 ars_record->handle = 0; 279 list_for_each_entry(be, &badrange->list, list) {
274 ars_record->err_address = addr + len / 2; 280 u64 be_end = be->start + be->length - 1;
275 ars_record->length = SZ_4K; 281 u64 rstart, rend;
282
283 /* skip entries outside the range */
284 if (be_end < addr || be->start > end)
285 continue;
286
287 rstart = (be->start < addr) ? addr : be->start;
288 rend = (be_end < end) ? be_end : end;
289 ars_record = &ars_status->records[i];
290 ars_record->handle = 0;
291 ars_record->err_address = rstart;
292 ars_record->length = rend - rstart + 1;
293 i++;
294 }
295 spin_unlock(&badrange->lock);
296 ars_status->num_records = i;
297 ars_status->out_length = sizeof(struct nd_cmd_ars_status)
298 + i * sizeof(struct nd_ars_record);
276} 299}
277 300
278static int nfit_test_cmd_ars_start(struct ars_state *ars_state, 301static int nfit_test_cmd_ars_start(struct nfit_test *t,
302 struct ars_state *ars_state,
279 struct nd_cmd_ars_start *ars_start, unsigned int buf_len, 303 struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
280 int *cmd_rc) 304 int *cmd_rc)
281{ 305{
@@ -289,7 +313,7 @@ static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
289 } else { 313 } else {
290 ars_start->status = 0; 314 ars_start->status = 0;
291 ars_start->scrub_time = 1; 315 ars_start->scrub_time = 1;
292 post_ars_status(ars_state, ars_start->address, 316 post_ars_status(ars_state, &t->badrange, ars_start->address,
293 ars_start->length); 317 ars_start->length);
294 *cmd_rc = 0; 318 *cmd_rc = 0;
295 } 319 }
@@ -320,7 +344,8 @@ static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
320 return 0; 344 return 0;
321} 345}
322 346
323static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err, 347static int nfit_test_cmd_clear_error(struct nfit_test *t,
348 struct nd_cmd_clear_error *clear_err,
324 unsigned int buf_len, int *cmd_rc) 349 unsigned int buf_len, int *cmd_rc)
325{ 350{
326 const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1; 351 const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
@@ -330,18 +355,91 @@ static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
330 if ((clear_err->address & mask) || (clear_err->length & mask)) 355 if ((clear_err->address & mask) || (clear_err->length & mask))
331 return -EINVAL; 356 return -EINVAL;
332 357
333 /* 358 badrange_forget(&t->badrange, clear_err->address, clear_err->length);
334 * Report 'all clear' success for all commands even though a new
335 * scrub will find errors again. This is enough to have the
336 * error removed from the 'badblocks' tracking in the pmem
337 * driver.
338 */
339 clear_err->status = 0; 359 clear_err->status = 0;
340 clear_err->cleared = clear_err->length; 360 clear_err->cleared = clear_err->length;
341 *cmd_rc = 0; 361 *cmd_rc = 0;
342 return 0; 362 return 0;
343} 363}
344 364
365struct region_search_spa {
366 u64 addr;
367 struct nd_region *region;
368};
369
370static int is_region_device(struct device *dev)
371{
372 return !strncmp(dev->kobj.name, "region", 6);
373}
374
375static int nfit_test_search_region_spa(struct device *dev, void *data)
376{
377 struct region_search_spa *ctx = data;
378 struct nd_region *nd_region;
379 resource_size_t ndr_end;
380
381 if (!is_region_device(dev))
382 return 0;
383
384 nd_region = to_nd_region(dev);
385 ndr_end = nd_region->ndr_start + nd_region->ndr_size;
386
387 if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) {
388 ctx->region = nd_region;
389 return 1;
390 }
391
392 return 0;
393}
394
395static int nfit_test_search_spa(struct nvdimm_bus *bus,
396 struct nd_cmd_translate_spa *spa)
397{
398 int ret;
399 struct nd_region *nd_region = NULL;
400 struct nvdimm *nvdimm = NULL;
401 struct nd_mapping *nd_mapping = NULL;
402 struct region_search_spa ctx = {
403 .addr = spa->spa,
404 .region = NULL,
405 };
406 u64 dpa;
407
408 ret = device_for_each_child(&bus->dev, &ctx,
409 nfit_test_search_region_spa);
410
411 if (!ret)
412 return -ENODEV;
413
414 nd_region = ctx.region;
415
416 dpa = ctx.addr - nd_region->ndr_start;
417
418 /*
419 * last dimm is selected for test
420 */
421 nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
422 nvdimm = nd_mapping->nvdimm;
423
424 spa->devices[0].nfit_device_handle = handle[nvdimm->id];
425 spa->num_nvdimms = 1;
426 spa->devices[0].dpa = dpa;
427
428 return 0;
429}
430
431static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus,
432 struct nd_cmd_translate_spa *spa, unsigned int buf_len)
433{
434 if (buf_len < spa->translate_length)
435 return -EINVAL;
436
437 if (nfit_test_search_spa(bus, spa) < 0 || !spa->num_nvdimms)
438 spa->status = 2;
439
440 return 0;
441}
442
345static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len) 443static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
346{ 444{
347 static const struct nd_smart_payload smart_data = { 445 static const struct nd_smart_payload smart_data = {
@@ -378,6 +476,93 @@ static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
378 return 0; 476 return 0;
379} 477}
380 478
479static void uc_error_notify(struct work_struct *work)
480{
481 struct nfit_test *t = container_of(work, typeof(*t), work);
482
483 __acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR);
484}
485
486static int nfit_test_cmd_ars_error_inject(struct nfit_test *t,
487 struct nd_cmd_ars_err_inj *err_inj, unsigned int buf_len)
488{
489 int rc;
490
491 if (buf_len != sizeof(*err_inj)) {
492 rc = -EINVAL;
493 goto err;
494 }
495
496 if (err_inj->err_inj_spa_range_length <= 0) {
497 rc = -EINVAL;
498 goto err;
499 }
500
501 rc = badrange_add(&t->badrange, err_inj->err_inj_spa_range_base,
502 err_inj->err_inj_spa_range_length);
503 if (rc < 0)
504 goto err;
505
506 if (err_inj->err_inj_options & (1 << ND_ARS_ERR_INJ_OPT_NOTIFY))
507 queue_work(nfit_wq, &t->work);
508
509 err_inj->status = 0;
510 return 0;
511
512err:
513 err_inj->status = NFIT_ARS_INJECT_INVALID;
514 return rc;
515}
516
517static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t,
518 struct nd_cmd_ars_err_inj_clr *err_clr, unsigned int buf_len)
519{
520 int rc;
521
522 if (buf_len != sizeof(*err_clr)) {
523 rc = -EINVAL;
524 goto err;
525 }
526
527 if (err_clr->err_inj_clr_spa_range_length <= 0) {
528 rc = -EINVAL;
529 goto err;
530 }
531
532 badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base,
533 err_clr->err_inj_clr_spa_range_length);
534
535 err_clr->status = 0;
536 return 0;
537
538err:
539 err_clr->status = NFIT_ARS_INJECT_INVALID;
540 return rc;
541}
542
543static int nfit_test_cmd_ars_inject_status(struct nfit_test *t,
544 struct nd_cmd_ars_err_inj_stat *err_stat,
545 unsigned int buf_len)
546{
547 struct badrange_entry *be;
548 int max = SZ_4K / sizeof(struct nd_error_stat_query_record);
549 int i = 0;
550
551 err_stat->status = 0;
552 spin_lock(&t->badrange.lock);
553 list_for_each_entry(be, &t->badrange.list, list) {
554 err_stat->record[i].err_inj_stat_spa_range_base = be->start;
555 err_stat->record[i].err_inj_stat_spa_range_length = be->length;
556 i++;
557 if (i > max)
558 break;
559 }
560 spin_unlock(&t->badrange.lock);
561 err_stat->inj_err_rec_count = i;
562
563 return 0;
564}
565
381static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, 566static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
382 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 567 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
383 unsigned int buf_len, int *cmd_rc) 568 unsigned int buf_len, int *cmd_rc)
@@ -449,6 +634,38 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
449 } 634 }
450 } else { 635 } else {
451 struct ars_state *ars_state = &t->ars_state; 636 struct ars_state *ars_state = &t->ars_state;
637 struct nd_cmd_pkg *call_pkg = buf;
638
639 if (!nd_desc)
640 return -ENOTTY;
641
642 if (cmd == ND_CMD_CALL) {
643 func = call_pkg->nd_command;
644
645 buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
646 buf = (void *) call_pkg->nd_payload;
647
648 switch (func) {
649 case NFIT_CMD_TRANSLATE_SPA:
650 rc = nfit_test_cmd_translate_spa(
651 acpi_desc->nvdimm_bus, buf, buf_len);
652 return rc;
653 case NFIT_CMD_ARS_INJECT_SET:
654 rc = nfit_test_cmd_ars_error_inject(t, buf,
655 buf_len);
656 return rc;
657 case NFIT_CMD_ARS_INJECT_CLEAR:
658 rc = nfit_test_cmd_ars_inject_clear(t, buf,
659 buf_len);
660 return rc;
661 case NFIT_CMD_ARS_INJECT_GET:
662 rc = nfit_test_cmd_ars_inject_status(t, buf,
663 buf_len);
664 return rc;
665 default:
666 return -ENOTTY;
667 }
668 }
452 669
453 if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask)) 670 if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
454 return -ENOTTY; 671 return -ENOTTY;
@@ -458,15 +675,15 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
458 rc = nfit_test_cmd_ars_cap(buf, buf_len); 675 rc = nfit_test_cmd_ars_cap(buf, buf_len);
459 break; 676 break;
460 case ND_CMD_ARS_START: 677 case ND_CMD_ARS_START:
461 rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len, 678 rc = nfit_test_cmd_ars_start(t, ars_state, buf,
462 cmd_rc); 679 buf_len, cmd_rc);
463 break; 680 break;
464 case ND_CMD_ARS_STATUS: 681 case ND_CMD_ARS_STATUS:
465 rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len, 682 rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
466 cmd_rc); 683 cmd_rc);
467 break; 684 break;
468 case ND_CMD_CLEAR_ERROR: 685 case ND_CMD_CLEAR_ERROR:
469 rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc); 686 rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc);
470 break; 687 break;
471 default: 688 default:
472 return -ENOTTY; 689 return -ENOTTY;
@@ -566,10 +783,9 @@ static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
566 783
567static int ars_state_init(struct device *dev, struct ars_state *ars_state) 784static int ars_state_init(struct device *dev, struct ars_state *ars_state)
568{ 785{
786 /* for testing, only store up to n records that fit within 4k */
569 ars_state->ars_status = devm_kzalloc(dev, 787 ars_state->ars_status = devm_kzalloc(dev,
570 sizeof(struct nd_cmd_ars_status) 788 sizeof(struct nd_cmd_ars_status) + SZ_4K, GFP_KERNEL);
571 + sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
572 GFP_KERNEL);
573 if (!ars_state->ars_status) 789 if (!ars_state->ars_status)
574 return -ENOMEM; 790 return -ENOMEM;
575 spin_lock_init(&ars_state->lock); 791 spin_lock_init(&ars_state->lock);
@@ -1419,7 +1635,8 @@ static void nfit_test0_setup(struct nfit_test *t)
1419 + i * sizeof(u64); 1635 + i * sizeof(u64);
1420 } 1636 }
1421 1637
1422 post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE); 1638 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
1639 SPA0_SIZE);
1423 1640
1424 acpi_desc = &t->acpi_desc; 1641 acpi_desc = &t->acpi_desc;
1425 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); 1642 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
@@ -1430,7 +1647,12 @@ static void nfit_test0_setup(struct nfit_test *t)
1430 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en); 1647 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1431 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); 1648 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1432 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); 1649 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1650 set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
1433 set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en); 1651 set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
1652 set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en);
1653 set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en);
1654 set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en);
1655 set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en);
1434} 1656}
1435 1657
1436static void nfit_test1_setup(struct nfit_test *t) 1658static void nfit_test1_setup(struct nfit_test *t)
@@ -1520,7 +1742,8 @@ static void nfit_test1_setup(struct nfit_test *t)
1520 dcr->code = NFIT_FIC_BYTE; 1742 dcr->code = NFIT_FIC_BYTE;
1521 dcr->windows = 0; 1743 dcr->windows = 0;
1522 1744
1523 post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE); 1745 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
1746 SPA2_SIZE);
1524 1747
1525 acpi_desc = &t->acpi_desc; 1748 acpi_desc = &t->acpi_desc;
1526 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en); 1749 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
@@ -1589,6 +1812,7 @@ static int nfit_ctl_test(struct device *dev)
1589 unsigned long mask, cmd_size, offset; 1812 unsigned long mask, cmd_size, offset;
1590 union { 1813 union {
1591 struct nd_cmd_get_config_size cfg_size; 1814 struct nd_cmd_get_config_size cfg_size;
1815 struct nd_cmd_clear_error clear_err;
1592 struct nd_cmd_ars_status ars_stat; 1816 struct nd_cmd_ars_status ars_stat;
1593 struct nd_cmd_ars_cap ars_cap; 1817 struct nd_cmd_ars_cap ars_cap;
1594 char buf[sizeof(struct nd_cmd_ars_status) 1818 char buf[sizeof(struct nd_cmd_ars_status)
@@ -1613,10 +1837,15 @@ static int nfit_ctl_test(struct device *dev)
1613 .cmd_mask = 1UL << ND_CMD_ARS_CAP 1837 .cmd_mask = 1UL << ND_CMD_ARS_CAP
1614 | 1UL << ND_CMD_ARS_START 1838 | 1UL << ND_CMD_ARS_START
1615 | 1UL << ND_CMD_ARS_STATUS 1839 | 1UL << ND_CMD_ARS_STATUS
1616 | 1UL << ND_CMD_CLEAR_ERROR, 1840 | 1UL << ND_CMD_CLEAR_ERROR
1841 | 1UL << ND_CMD_CALL,
1617 .module = THIS_MODULE, 1842 .module = THIS_MODULE,
1618 .provider_name = "ACPI.NFIT", 1843 .provider_name = "ACPI.NFIT",
1619 .ndctl = acpi_nfit_ctl, 1844 .ndctl = acpi_nfit_ctl,
1845 .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA
1846 | 1UL << NFIT_CMD_ARS_INJECT_SET
1847 | 1UL << NFIT_CMD_ARS_INJECT_CLEAR
1848 | 1UL << NFIT_CMD_ARS_INJECT_GET,
1620 }, 1849 },
1621 .dev = &adev->dev, 1850 .dev = &adev->dev,
1622 }; 1851 };
@@ -1767,6 +1996,23 @@ static int nfit_ctl_test(struct device *dev)
1767 return -EIO; 1996 return -EIO;
1768 } 1997 }
1769 1998
1999 /* test clear error */
2000 cmd_size = sizeof(cmds.clear_err);
2001 cmds.clear_err = (struct nd_cmd_clear_error) {
2002 .length = 512,
2003 .cleared = 512,
2004 };
2005 rc = setup_result(cmds.buf, cmd_size);
2006 if (rc)
2007 return rc;
2008 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR,
2009 cmds.buf, cmd_size, &cmd_rc);
2010 if (rc < 0 || cmd_rc) {
2011 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2012 __func__, __LINE__, rc, cmd_rc);
2013 return -EIO;
2014 }
2015
1770 return 0; 2016 return 0;
1771} 2017}
1772 2018
@@ -1915,6 +2161,10 @@ static __init int nfit_test_init(void)
1915 2161
1916 nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm); 2162 nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
1917 2163
2164 nfit_wq = create_singlethread_workqueue("nfit");
2165 if (!nfit_wq)
2166 return -ENOMEM;
2167
1918 nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm"); 2168 nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
1919 if (IS_ERR(nfit_test_dimm)) { 2169 if (IS_ERR(nfit_test_dimm)) {
1920 rc = PTR_ERR(nfit_test_dimm); 2170 rc = PTR_ERR(nfit_test_dimm);
@@ -1931,6 +2181,7 @@ static __init int nfit_test_init(void)
1931 goto err_register; 2181 goto err_register;
1932 } 2182 }
1933 INIT_LIST_HEAD(&nfit_test->resources); 2183 INIT_LIST_HEAD(&nfit_test->resources);
2184 badrange_init(&nfit_test->badrange);
1934 switch (i) { 2185 switch (i) {
1935 case 0: 2186 case 0:
1936 nfit_test->num_pm = NUM_PM; 2187 nfit_test->num_pm = NUM_PM;
@@ -1966,6 +2217,7 @@ static __init int nfit_test_init(void)
1966 goto err_register; 2217 goto err_register;
1967 2218
1968 instances[i] = nfit_test; 2219 instances[i] = nfit_test;
2220 INIT_WORK(&nfit_test->work, uc_error_notify);
1969 } 2221 }
1970 2222
1971 rc = platform_driver_register(&nfit_test_driver); 2223 rc = platform_driver_register(&nfit_test_driver);
@@ -1974,6 +2226,7 @@ static __init int nfit_test_init(void)
1974 return 0; 2226 return 0;
1975 2227
1976 err_register: 2228 err_register:
2229 destroy_workqueue(nfit_wq);
1977 for (i = 0; i < NUM_NFITS; i++) 2230 for (i = 0; i < NUM_NFITS; i++)
1978 if (instances[i]) 2231 if (instances[i])
1979 platform_device_unregister(&instances[i]->pdev); 2232 platform_device_unregister(&instances[i]->pdev);
@@ -1989,6 +2242,8 @@ static __exit void nfit_test_exit(void)
1989{ 2242{
1990 int i; 2243 int i;
1991 2244
2245 flush_workqueue(nfit_wq);
2246 destroy_workqueue(nfit_wq);
1992 for (i = 0; i < NUM_NFITS; i++) 2247 for (i = 0; i < NUM_NFITS; i++)
1993 platform_device_unregister(&instances[i]->pdev); 2248 platform_device_unregister(&instances[i]->pdev);
1994 platform_driver_unregister(&nfit_test_driver); 2249 platform_driver_unregister(&nfit_test_driver);
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index d3d63dd5ed38..113b44675a71 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -32,6 +32,58 @@ struct nfit_test_resource {
32 void *buf; 32 void *buf;
33}; 33};
34 34
35#define ND_TRANSLATE_SPA_STATUS_INVALID_SPA 2
36#define NFIT_ARS_INJECT_INVALID 2
37
38enum err_inj_options {
39 ND_ARS_ERR_INJ_OPT_NOTIFY = 0,
40};
41
42/* nfit commands */
43enum nfit_cmd_num {
44 NFIT_CMD_TRANSLATE_SPA = 5,
45 NFIT_CMD_ARS_INJECT_SET = 7,
46 NFIT_CMD_ARS_INJECT_CLEAR = 8,
47 NFIT_CMD_ARS_INJECT_GET = 9,
48};
49
50struct nd_cmd_translate_spa {
51 __u64 spa;
52 __u32 status;
53 __u8 flags;
54 __u8 _reserved[3];
55 __u64 translate_length;
56 __u32 num_nvdimms;
57 struct nd_nvdimm_device {
58 __u32 nfit_device_handle;
59 __u32 _reserved;
60 __u64 dpa;
61 } __packed devices[0];
62
63} __packed;
64
65struct nd_cmd_ars_err_inj {
66 __u64 err_inj_spa_range_base;
67 __u64 err_inj_spa_range_length;
68 __u8 err_inj_options;
69 __u32 status;
70} __packed;
71
72struct nd_cmd_ars_err_inj_clr {
73 __u64 err_inj_clr_spa_range_base;
74 __u64 err_inj_clr_spa_range_length;
75 __u32 status;
76} __packed;
77
78struct nd_cmd_ars_err_inj_stat {
79 __u32 status;
80 __u32 inj_err_rec_count;
81 struct nd_error_stat_query_record {
82 __u64 err_inj_stat_spa_range_base;
83 __u64 err_inj_stat_spa_range_length;
84 } __packed record[0];
85} __packed;
86
35union acpi_object; 87union acpi_object;
36typedef void *acpi_handle; 88typedef void *acpi_handle;
37 89
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 06c71178d07d..59245b3d587c 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -618,7 +618,7 @@ static void multiorder_account(void)
618 __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12); 618 __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
619 __radix_tree_lookup(&tree, 1 << 5, &node, &slot); 619 __radix_tree_lookup(&tree, 1 << 5, &node, &slot);
620 assert(node->count == node->exceptional * 2); 620 assert(node->count == node->exceptional * 2);
621 __radix_tree_replace(&tree, node, slot, NULL, NULL, NULL); 621 __radix_tree_replace(&tree, node, slot, NULL, NULL);
622 assert(node->exceptional == 0); 622 assert(node->exceptional == 0);
623 623
624 item_kill_tree(&tree); 624 item_kill_tree(&tree);
diff --git a/tools/testing/scatterlist/Makefile b/tools/testing/scatterlist/Makefile
new file mode 100644
index 000000000000..933c3a6e4d77
--- /dev/null
+++ b/tools/testing/scatterlist/Makefile
@@ -0,0 +1,30 @@
1CFLAGS += -I. -I../../include -g -O2 -Wall -fsanitize=address
2LDFLAGS += -fsanitize=address -fsanitize=undefined
3TARGETS = main
4OFILES = main.o scatterlist.o
5
6ifeq ($(BUILD), 32)
7 CFLAGS += -m32
8 LDFLAGS += -m32
9endif
10
11targets: include $(TARGETS)
12
13main: $(OFILES)
14
15clean:
16 $(RM) $(TARGETS) $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h asm/io.h
17 @rmdir asm
18
19scatterlist.c: ../../../lib/scatterlist.c
20 @sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
21
22.PHONY: include
23
24include: ../../../include/linux/scatterlist.h
25 @mkdir -p linux
26 @mkdir -p asm
27 @touch asm/io.h
28 @touch linux/highmem.h
29 @touch linux/kmemleak.h
30 @cp $< linux/scatterlist.h
diff --git a/tools/testing/scatterlist/linux/mm.h b/tools/testing/scatterlist/linux/mm.h
new file mode 100644
index 000000000000..6f9ac14aa800
--- /dev/null
+++ b/tools/testing/scatterlist/linux/mm.h
@@ -0,0 +1,125 @@
1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <assert.h>
5#include <string.h>
6#include <stdlib.h>
7#include <errno.h>
8#include <limits.h>
9#include <stdio.h>
10
11typedef unsigned long dma_addr_t;
12
13#define unlikely
14
15#define BUG_ON(x) assert(!(x))
16
17#define WARN_ON(condition) ({ \
18 int __ret_warn_on = !!(condition); \
19 unlikely(__ret_warn_on); \
20})
21
22#define WARN_ON_ONCE(condition) ({ \
23 int __ret_warn_on = !!(condition); \
24 if (unlikely(__ret_warn_on)) \
25 assert(0); \
26 unlikely(__ret_warn_on); \
27})
28
29#define PAGE_SIZE (4096)
30#define PAGE_SHIFT (12)
31#define PAGE_MASK (~(PAGE_SIZE-1))
32
33#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
34#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
35#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
36
37#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
38
39#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
40
41#define virt_to_page(x) ((void *)x)
42#define page_address(x) ((void *)x)
43
44static inline unsigned long page_to_phys(struct page *page)
45{
46 assert(0);
47
48 return 0;
49}
50
51#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
52#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
53#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
54
55#define __min(t1, t2, min1, min2, x, y) ({ \
56 t1 min1 = (x); \
57 t2 min2 = (y); \
58 (void) (&min1 == &min2); \
59 min1 < min2 ? min1 : min2; })
60
61#define ___PASTE(a,b) a##b
62#define __PASTE(a,b) ___PASTE(a,b)
63
64#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
65
66#define min(x, y) \
67 __min(typeof(x), typeof(y), \
68 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
69 x, y)
70
71#define min_t(type, x, y) \
72 __min(type, type, \
73 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
74 x, y)
75
76#define preemptible() (1)
77
78static inline void *kmap(struct page *page)
79{
80 assert(0);
81
82 return NULL;
83}
84
85static inline void *kmap_atomic(struct page *page)
86{
87 assert(0);
88
89 return NULL;
90}
91
92static inline void kunmap(void *addr)
93{
94 assert(0);
95}
96
97static inline void kunmap_atomic(void *addr)
98{
99 assert(0);
100}
101
102static inline unsigned long __get_free_page(unsigned int flags)
103{
104 return (unsigned long)malloc(PAGE_SIZE);
105}
106
107static inline void free_page(unsigned long page)
108{
109 free((void *)page);
110}
111
112static inline void *kmalloc(unsigned int size, unsigned int flags)
113{
114 return malloc(size);
115}
116
117#define kfree(x) free(x)
118
119#define kmemleak_alloc(a, b, c, d)
120#define kmemleak_free(a)
121
122#define PageSlab(p) (0)
123#define flush_kernel_dcache_page(p)
124
125#endif
diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
new file mode 100644
index 000000000000..0a1464181226
--- /dev/null
+++ b/tools/testing/scatterlist/main.c
@@ -0,0 +1,79 @@
1#include <stdio.h>
2#include <assert.h>
3
4#include <linux/scatterlist.h>
5
6#define MAX_PAGES (64)
7
8static void set_pages(struct page **pages, const unsigned *array, unsigned num)
9{
10 unsigned int i;
11
12 assert(num < MAX_PAGES);
13 for (i = 0; i < num; i++)
14 pages[i] = (struct page *)(unsigned long)
15 ((1 + array[i]) * PAGE_SIZE);
16}
17
18#define pfn(...) (unsigned []){ __VA_ARGS__ }
19
20int main(void)
21{
22 const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT;
23 struct test {
24 int alloc_ret;
25 unsigned num_pages;
26 unsigned *pfn;
27 unsigned size;
28 unsigned int max_seg;
29 unsigned int expected_segments;
30 } *test, tests[] = {
31 { -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
32 { -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },
33 { -EINVAL, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 },
34 { 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 },
35 { 0, 1, pfn(0), 1, sgmax, 1 },
36 { 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 },
37 { 0, 2, pfn(1, 0), 2 * PAGE_SIZE, sgmax, 2 },
38 { 0, 3, pfn(0, 1, 2), 3 * PAGE_SIZE, sgmax, 1 },
39 { 0, 3, pfn(0, 2, 1), 3 * PAGE_SIZE, sgmax, 3 },
40 { 0, 3, pfn(0, 1, 3), 3 * PAGE_SIZE, sgmax, 2 },
41 { 0, 3, pfn(1, 2, 4), 3 * PAGE_SIZE, sgmax, 2 },
42 { 0, 3, pfn(1, 3, 4), 3 * PAGE_SIZE, sgmax, 2 },
43 { 0, 4, pfn(0, 1, 3, 4), 4 * PAGE_SIZE, sgmax, 2 },
44 { 0, 5, pfn(0, 1, 3, 4, 5), 5 * PAGE_SIZE, sgmax, 2 },
45 { 0, 5, pfn(0, 1, 3, 4, 6), 5 * PAGE_SIZE, sgmax, 3 },
46 { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, sgmax, 1 },
47 { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
48 { 0, 6, pfn(0, 1, 2, 3, 4, 5), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
49 { 0, 6, pfn(0, 2, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 4 },
50 { 0, 6, pfn(0, 1, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
51 { 0, 0, NULL, 0, 0, 0 },
52 };
53 unsigned int i;
54
55 for (i = 0, test = tests; test->expected_segments; test++, i++) {
56 struct page *pages[MAX_PAGES];
57 struct sg_table st;
58 int ret;
59
60 set_pages(pages, test->pfn, test->num_pages);
61
62 ret = __sg_alloc_table_from_pages(&st, pages, test->num_pages,
63 0, test->size, test->max_seg,
64 GFP_KERNEL);
65 assert(ret == test->alloc_ret);
66
67 if (test->alloc_ret)
68 continue;
69
70 assert(st.nents == test->expected_segments);
71 assert(st.orig_nents == test->expected_segments);
72
73 sg_free_table(&st);
74 }
75
76 assert(i == (sizeof(tests) / sizeof(tests[0])) - 1);
77
78 return 0;
79}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 3c9c0bbe7dbb..eaf599dc2137 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,5 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2TARGETS = bpf 2TARGETS = android
3TARGETS += bpf
3TARGETS += breakpoints 4TARGETS += breakpoints
4TARGETS += capabilities 5TARGETS += capabilities
5TARGETS += cpufreq 6TARGETS += cpufreq
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
new file mode 100644
index 000000000000..1a7492268993
--- /dev/null
+++ b/tools/testing/selftests/android/Makefile
@@ -0,0 +1,46 @@
1SUBDIRS := ion
2
3TEST_PROGS := run.sh
4
5.PHONY: all clean
6
7include ../lib.mk
8
9all:
10 @for DIR in $(SUBDIRS); do \
11 BUILD_TARGET=$(OUTPUT)/$$DIR; \
12 mkdir $$BUILD_TARGET -p; \
13 make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
14 #SUBDIR test prog name should be in the form: SUBDIR_test.sh
15 TEST=$$DIR"_test.sh"; \
16 if [ -e $$DIR/$$TEST ]; then
17 rsync -a $$DIR/$$TEST $$BUILD_TARGET/;
18 fi
19 done
20
21override define RUN_TESTS
22 @cd $(OUTPUT); ./run.sh
23endef
24
25override define INSTALL_RULE
26 mkdir -p $(INSTALL_PATH)
27 install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
28
29 @for SUBDIR in $(SUBDIRS); do \
30 BUILD_TARGET=$(OUTPUT)/$$SUBDIR; \
31 mkdir $$BUILD_TARGET -p; \
32 $(MAKE) OUTPUT=$$BUILD_TARGET -C $$SUBDIR INSTALL_PATH=$(INSTALL_PATH)/$$SUBDIR install; \
33 done;
34endef
35
36override define EMIT_TESTS
37 echo "./run.sh"
38endef
39
40override define CLEAN
41 @for DIR in $(SUBDIRS); do \
42 BUILD_TARGET=$(OUTPUT)/$$DIR; \
43 mkdir $$BUILD_TARGET -p; \
44 make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
45 done
46endef
diff --git a/tools/testing/selftests/android/ion/.gitignore b/tools/testing/selftests/android/ion/.gitignore
new file mode 100644
index 000000000000..67e6f391b2a9
--- /dev/null
+++ b/tools/testing/selftests/android/ion/.gitignore
@@ -0,0 +1,2 @@
1ionapp_export
2ionapp_import
diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile
new file mode 100644
index 000000000000..96e0c448b39d
--- /dev/null
+++ b/tools/testing/selftests/android/ion/Makefile
@@ -0,0 +1,16 @@
1
2INCLUDEDIR := -I. -I../../../../../drivers/staging/android/uapi/
3CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
4
5TEST_GEN_FILES := ionapp_export ionapp_import
6
7all: $(TEST_GEN_FILES)
8
9$(TEST_GEN_FILES): ipcsocket.c ionutils.c
10
11TEST_PROGS := ion_test.sh
12
13include ../../lib.mk
14
15$(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
16$(OUTPUT)/ionapp_import: ionapp_import.c ipcsocket.c ionutils.c
diff --git a/tools/testing/selftests/android/ion/README b/tools/testing/selftests/android/ion/README
new file mode 100644
index 000000000000..21783e9c451e
--- /dev/null
+++ b/tools/testing/selftests/android/ion/README
@@ -0,0 +1,101 @@
1ION BUFFER SHARING UTILITY
2==========================
3File: ion_test.sh : Utility to test ION driver buffer sharing mechanism.
4Author: Pintu Kumar <pintu.ping@gmail.com>
5
6Introduction:
7-------------
8This is a test utility to verify ION buffer sharing in user space
9between 2 independent processes.
10It uses unix domain socket (with SCM_RIGHTS) as IPC to transfer an FD to
11another process to share the same buffer.
12This utility demonstrates how ION buffer sharing can be implemented between
13two user space processes, using various heap types.
14The following heap types are supported by ION driver.
15ION_HEAP_TYPE_SYSTEM (0)
16ION_HEAP_TYPE_SYSTEM_CONTIG (1)
17ION_HEAP_TYPE_CARVEOUT (2)
18ION_HEAP_TYPE_CHUNK (3)
19ION_HEAP_TYPE_DMA (4)
20
21By default only the SYSTEM and SYSTEM_CONTIG heaps are supported.
22Each heap is associated with the respective heap id.
23This utility is designed in the form of client/server program.
24The server part (ionapp_export) is the exporter of the buffer.
25It is responsible for creating an ION client, allocating the buffer based on
26the heap id, writing some data to this buffer and then exporting the FD
27(associated with this buffer) to another process using socket IPC.
28This FD is called as buffer FD (which is different than the ION client FD).
29
30The client part (ionapp_import) is the importer of the buffer.
31It retrives the FD from the socket data and installs into its address space.
32This new FD internally points to the same kernel buffer.
33So first it reads the data that is stored in this buffer and prints it.
34Then it writes the different size of data (it could be different data) to the
35same buffer.
36Finally the buffer FD must be closed by both the exporter and importer.
37Thus the same kernel buffer is shared among two user space processes using
38ION driver and only one time allocation.
39
40Prerequisite:
41-------------
42This utility works only if /dev/ion interface is present.
43The following configs needs to be enabled in kernel to include ion driver.
44CONFIG_ANDROID=y
45CONFIG_STAGING=y
46CONFIG_ION=y
47CONFIG_ION_SYSTEM_HEAP=y
48
49This utility requires to be run as root user.
50
51
52Compile and test:
53-----------------
54This utility is made to be run as part of kselftest framework in kernel.
55To compile and run using kselftest you can simply do the following from the
56kernel top directory.
57linux$ make TARGETS=android kselftest
58Or you can also use:
59linux$ make -C tools/testing/selftests TARGETS=android run_tests
60Using the selftest it can directly execute the ion_test.sh script to test the
61buffer sharing using ion system heap.
62Currently the heap size is hard coded as just 10 bytes inside this script.
63You need to be a root user to run under selftest.
64
65You can also compile and test manually using the following steps:
66ion$ make
67These will generate 2 executable: ionapp_export, ionapp_import
68Now you can run the export and import manually by specifying the heap type
69and the heap size.
70You can also directly execute the shell script to run the test automatically.
71Simply use the following command to run the test.
72ion$ sudo ./ion_test.sh
73
74Test Results:
75-------------
76The utility is verified on Ubuntu-32 bit system with Linux Kernel 4.14.
77Here is the snapshot of the test result using kselftest.
78
79linux# make TARGETS=android kselftest
80heap_type: 0, heap_size: 10
81--------------------------------------
82heap type: 0
83 heap id: 1
84heap name: ion_system_heap
85--------------------------------------
86Fill buffer content:
870xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd
88Sharing fd: 6, Client fd: 5
89<ion_close_buffer_fd>: buffer release successfully....
90Received buffer fd: 4
91Read buffer content:
920xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0x0 0x0 0x0 0x0 0x0 0x0
930x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x0
94Fill buffer content:
950xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd
960xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd 0xfd
970xfd 0xfd
98<ion_close_buffer_fd>: buffer release successfully....
99ion_test.sh: heap_type: 0 - [PASS]
100
101ion_test.sh: done
diff --git a/tools/testing/selftests/android/ion/config b/tools/testing/selftests/android/ion/config
new file mode 100644
index 000000000000..19db6ca9aa2b
--- /dev/null
+++ b/tools/testing/selftests/android/ion/config
@@ -0,0 +1,4 @@
1CONFIG_ANDROID=y
2CONFIG_STAGING=y
3CONFIG_ION=y
4CONFIG_ION_SYSTEM_HEAP=y
diff --git a/tools/testing/selftests/android/ion/ion.h b/tools/testing/selftests/android/ion/ion.h
new file mode 100644
index 000000000000..f7021ac51335
--- /dev/null
+++ b/tools/testing/selftests/android/ion/ion.h
@@ -0,0 +1,143 @@
1/*
2 * ion.h
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This file is copied from drivers/staging/android/uapi/ion.h
18 * This local copy is required for the selftest to pass, when build
19 * outside the kernel source tree.
20 * Please keep this file in sync with its original file until the
21 * ion driver is moved outside the staging tree.
22 */
23
24#ifndef _UAPI_LINUX_ION_H
25#define _UAPI_LINUX_ION_H
26
27#include <linux/ioctl.h>
28#include <linux/types.h>
29
30/**
31 * enum ion_heap_types - list of all possible types of heaps
32 * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
33 * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
34 * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
35 * carveout heap, allocations are physically
36 * contiguous
37 * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
38 * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
39 * is used to identify the heaps, so only 32
40 * total heap types are supported
41 */
42enum ion_heap_type {
43 ION_HEAP_TYPE_SYSTEM,
44 ION_HEAP_TYPE_SYSTEM_CONTIG,
45 ION_HEAP_TYPE_CARVEOUT,
46 ION_HEAP_TYPE_CHUNK,
47 ION_HEAP_TYPE_DMA,
48 ION_HEAP_TYPE_CUSTOM, /*
49 * must be last so device specific heaps always
50 * are at the end of this enum
51 */
52};
53
54#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
55
56/**
57 * allocation flags - the lower 16 bits are used by core ion, the upper 16
58 * bits are reserved for use by the heaps themselves.
59 */
60
61/*
62 * mappings of this buffer should be cached, ion will do cache maintenance
63 * when the buffer is mapped for dma
64 */
65#define ION_FLAG_CACHED 1
66
67/**
68 * DOC: Ion Userspace API
69 *
70 * create a client by opening /dev/ion
71 * most operations handled via following ioctls
72 *
73 */
74
75/**
76 * struct ion_allocation_data - metadata passed from userspace for allocations
77 * @len: size of the allocation
78 * @heap_id_mask: mask of heap ids to allocate from
79 * @flags: flags passed to heap
80 * @handle: pointer that will be populated with a cookie to use to
81 * refer to this allocation
82 *
83 * Provided by userspace as an argument to the ioctl
84 */
85struct ion_allocation_data {
86 __u64 len;
87 __u32 heap_id_mask;
88 __u32 flags;
89 __u32 fd;
90 __u32 unused;
91};
92
93#define MAX_HEAP_NAME 32
94
95/**
96 * struct ion_heap_data - data about a heap
97 * @name - first 32 characters of the heap name
98 * @type - heap type
99 * @heap_id - heap id for the heap
100 */
101struct ion_heap_data {
102 char name[MAX_HEAP_NAME];
103 __u32 type;
104 __u32 heap_id;
105 __u32 reserved0;
106 __u32 reserved1;
107 __u32 reserved2;
108};
109
110/**
111 * struct ion_heap_query - collection of data about all heaps
112 * @cnt - total number of heaps to be copied
113 * @heaps - buffer to copy heap data
114 */
115struct ion_heap_query {
116 __u32 cnt; /* Total number of heaps to be copied */
117 __u32 reserved0; /* align to 64bits */
118 __u64 heaps; /* buffer to be populated */
119 __u32 reserved1;
120 __u32 reserved2;
121};
122
123#define ION_IOC_MAGIC 'I'
124
125/**
126 * DOC: ION_IOC_ALLOC - allocate memory
127 *
128 * Takes an ion_allocation_data struct and returns it with the handle field
129 * populated with the opaque handle for the allocation.
130 */
131#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
132 struct ion_allocation_data)
133
134/**
135 * DOC: ION_IOC_HEAP_QUERY - information about available heaps
136 *
137 * Takes an ion_heap_query structure and populates information about
138 * available Ion heaps.
139 */
140#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, \
141 struct ion_heap_query)
142
143#endif /* _UAPI_LINUX_ION_H */
diff --git a/tools/testing/selftests/android/ion/ion_test.sh b/tools/testing/selftests/android/ion/ion_test.sh
new file mode 100755
index 000000000000..a1aff506f5e6
--- /dev/null
+++ b/tools/testing/selftests/android/ion/ion_test.sh
@@ -0,0 +1,55 @@
1#!/bin/bash
2
3heapsize=4096
4TCID="ion_test.sh"
5errcode=0
6
7run_test()
8{
9 heaptype=$1
10 ./ionapp_export -i $heaptype -s $heapsize &
11 sleep 1
12 ./ionapp_import
13 if [ $? -ne 0 ]; then
14 echo "$TCID: heap_type: $heaptype - [FAIL]"
15 errcode=1
16 else
17 echo "$TCID: heap_type: $heaptype - [PASS]"
18 fi
19 sleep 1
20 echo ""
21}
22
23check_root()
24{
25 uid=$(id -u)
26 if [ $uid -ne 0 ]; then
27 echo $TCID: must be run as root >&2
28 exit 0
29 fi
30}
31
32check_device()
33{
34 DEVICE=/dev/ion
35 if [ ! -e $DEVICE ]; then
36 echo $TCID: No $DEVICE device found >&2
37 echo $TCID: May be CONFIG_ION is not set >&2
38 exit 0
39 fi
40}
41
42main_function()
43{
44 check_device
45 check_root
46
47 # ION_SYSTEM_HEAP TEST
48 run_test 0
49 # ION_SYSTEM_CONTIG_HEAP TEST
50 run_test 1
51}
52
53main_function
54echo "$TCID: done"
55exit $errcode
diff --git a/tools/testing/selftests/android/ion/ionapp_export.c b/tools/testing/selftests/android/ion/ionapp_export.c
new file mode 100644
index 000000000000..a944e72621a9
--- /dev/null
+++ b/tools/testing/selftests/android/ion/ionapp_export.c
@@ -0,0 +1,135 @@
1/*
2 * ionapp_export.c
3 *
4 * It is a user space utility to create and export android
5 * ion memory buffer fd to another process using unix domain socket as IPC.
6 * This acts like a server for ionapp_import(client).
7 * So, this server has to be started first before the client.
8 *
9 * Copyright (C) 2017 Pintu Kumar <pintu.ping@gmail.com>
10 *
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 */
21
22#include <stdio.h>
23#include <stdlib.h>
24#include <string.h>
25#include <unistd.h>
26#include <errno.h>
27#include <sys/time.h>
28#include "ionutils.h"
29#include "ipcsocket.h"
30
31
32void print_usage(int argc, char *argv[])
33{
34 printf("Usage: %s [-h <help>] [-i <heap id>] [-s <size in bytes>]\n",
35 argv[0]);
36}
37
38int main(int argc, char *argv[])
39{
40 int opt, ret, status, heapid;
41 int sockfd, client_fd, shared_fd;
42 unsigned char *map_buf;
43 unsigned long map_len, heap_type, heap_size, flags;
44 struct ion_buffer_info info;
45 struct socket_info skinfo;
46
47 if (argc < 2) {
48 print_usage(argc, argv);
49 return -1;
50 }
51
52 heap_size = 0;
53 flags = 0;
54
55 while ((opt = getopt(argc, argv, "hi:s:")) != -1) {
56 switch (opt) {
57 case 'h':
58 print_usage(argc, argv);
59 exit(0);
60 break;
61 case 'i':
62 heapid = atoi(optarg);
63 switch (heapid) {
64 case 0:
65 heap_type = ION_HEAP_TYPE_SYSTEM;
66 break;
67 case 1:
68 heap_type = ION_HEAP_TYPE_SYSTEM_CONTIG;
69 break;
70 default:
71 printf("ERROR: heap type not supported\n");
72 exit(1);
73 }
74 break;
75 case 's':
76 heap_size = atoi(optarg);
77 break;
78 default:
79 print_usage(argc, argv);
80 exit(1);
81 break;
82 }
83 }
84
85 if (heap_size <= 0) {
86 printf("heap_size cannot be 0\n");
87 print_usage(argc, argv);
88 exit(1);
89 }
90
91 printf("heap_type: %ld, heap_size: %ld\n", heap_type, heap_size);
92 info.heap_type = heap_type;
93 info.heap_size = heap_size;
94 info.flag_type = flags;
95
96 /* This is server: open the socket connection first */
97 /* Here; 1 indicates server or exporter */
98 status = opensocket(&sockfd, SOCKET_NAME, 1);
99 if (status < 0) {
100 fprintf(stderr, "<%s>: Failed opensocket.\n", __func__);
101 goto err_socket;
102 }
103 skinfo.sockfd = sockfd;
104
105 ret = ion_export_buffer_fd(&info);
106 if (ret < 0) {
107 fprintf(stderr, "FAILED: ion_get_buffer_fd\n");
108 goto err_export;
109 }
110 client_fd = info.ionfd;
111 shared_fd = info.buffd;
112 map_buf = info.buffer;
113 map_len = info.buflen;
114 write_buffer(map_buf, map_len);
115
116 /* share ion buf fd with other user process */
117 printf("Sharing fd: %d, Client fd: %d\n", shared_fd, client_fd);
118 skinfo.datafd = shared_fd;
119 skinfo.buflen = map_len;
120
121 ret = socket_send_fd(&skinfo);
122 if (ret < 0) {
123 fprintf(stderr, "FAILED: socket_send_fd\n");
124 goto err_send;
125 }
126
127err_send:
128err_export:
129 ion_close_buffer_fd(&info);
130
131err_socket:
132 closesocket(sockfd, SOCKET_NAME);
133
134 return 0;
135}
diff --git a/tools/testing/selftests/android/ion/ionapp_import.c b/tools/testing/selftests/android/ion/ionapp_import.c
new file mode 100644
index 000000000000..ae2d704cfa46
--- /dev/null
+++ b/tools/testing/selftests/android/ion/ionapp_import.c
@@ -0,0 +1,88 @@
1/*
2 * ionapp_import.c
3 *
4 * It is a user space utility to receive android ion memory buffer fd
5 * over unix domain socket IPC that can be exported by ionapp_export.
6 * This acts like a client for ionapp_export.
7 *
8 * Copyright (C) 2017 Pintu Kumar <pintu.ping@gmail.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 */
20
21#include <stdio.h>
22#include <stdlib.h>
23#include <unistd.h>
24#include <string.h>
25#include "ionutils.h"
26#include "ipcsocket.h"
27
28
29int main(void)
30{
31 int ret, status;
32 int sockfd, shared_fd;
33 unsigned char *map_buf;
34 unsigned long map_len;
35 struct ion_buffer_info info;
36 struct socket_info skinfo;
37
38 /* This is the client part. Here 0 means client or importer */
39 status = opensocket(&sockfd, SOCKET_NAME, 0);
40 if (status < 0) {
41 fprintf(stderr, "No exporter exists...\n");
42 ret = status;
43 goto err_socket;
44 }
45
46 skinfo.sockfd = sockfd;
47
48 ret = socket_receive_fd(&skinfo);
49 if (ret < 0) {
50 fprintf(stderr, "Failed: socket_receive_fd\n");
51 goto err_recv;
52 }
53
54 shared_fd = skinfo.datafd;
55 printf("Received buffer fd: %d\n", shared_fd);
56 if (shared_fd <= 0) {
57 fprintf(stderr, "ERROR: improper buf fd\n");
58 ret = -1;
59 goto err_fd;
60 }
61
62 memset(&info, 0, sizeof(info));
63 info.buffd = shared_fd;
64 info.buflen = ION_BUFFER_LEN;
65
66 ret = ion_import_buffer_fd(&info);
67 if (ret < 0) {
68 fprintf(stderr, "Failed: ion_use_buffer_fd\n");
69 goto err_import;
70 }
71
72 map_buf = info.buffer;
73 map_len = info.buflen;
74 read_buffer(map_buf, map_len);
75
76 /* Write probably new data to the same buffer again */
77 map_len = ION_BUFFER_LEN;
78 write_buffer(map_buf, map_len);
79
80err_import:
81 ion_close_buffer_fd(&info);
82err_fd:
83err_recv:
84err_socket:
85 closesocket(sockfd, SOCKET_NAME);
86
87 return ret;
88}
diff --git a/tools/testing/selftests/android/ion/ionutils.c b/tools/testing/selftests/android/ion/ionutils.c
new file mode 100644
index 000000000000..ce69c14f51fa
--- /dev/null
+++ b/tools/testing/selftests/android/ion/ionutils.c
@@ -0,0 +1,259 @@
1#include <stdio.h>
2#include <string.h>
3#include <unistd.h>
4#include <fcntl.h>
5#include <errno.h>
6//#include <stdint.h>
7#include <sys/ioctl.h>
8#include <sys/mman.h>
9#include "ionutils.h"
10#include "ipcsocket.h"
11
12
13void write_buffer(void *buffer, unsigned long len)
14{
15 int i;
16 unsigned char *ptr = (unsigned char *)buffer;
17
18 if (!ptr) {
19 fprintf(stderr, "<%s>: Invalid buffer...\n", __func__);
20 return;
21 }
22
23 printf("Fill buffer content:\n");
24 memset(ptr, 0xfd, len);
25 for (i = 0; i < len; i++)
26 printf("0x%x ", ptr[i]);
27 printf("\n");
28}
29
30void read_buffer(void *buffer, unsigned long len)
31{
32 int i;
33 unsigned char *ptr = (unsigned char *)buffer;
34
35 if (!ptr) {
36 fprintf(stderr, "<%s>: Invalid buffer...\n", __func__);
37 return;
38 }
39
40 printf("Read buffer content:\n");
41 for (i = 0; i < len; i++)
42 printf("0x%x ", ptr[i]);
43 printf("\n");
44}
45
46int ion_export_buffer_fd(struct ion_buffer_info *ion_info)
47{
48 int i, ret, ionfd, buffer_fd;
49 unsigned int heap_id;
50 unsigned long maplen;
51 unsigned char *map_buffer;
52 struct ion_allocation_data alloc_data;
53 struct ion_heap_query query;
54 struct ion_heap_data heap_data[MAX_HEAP_COUNT];
55
56 if (!ion_info) {
57 fprintf(stderr, "<%s>: Invalid ion info\n", __func__);
58 return -1;
59 }
60
61 /* Create an ION client */
62 ionfd = open(ION_DEVICE, O_RDWR);
63 if (ionfd < 0) {
64 fprintf(stderr, "<%s>: Failed to open ion client: %s\n",
65 __func__, strerror(errno));
66 return -1;
67 }
68
69 memset(&query, 0, sizeof(query));
70 query.cnt = MAX_HEAP_COUNT;
71 query.heaps = (unsigned long int)&heap_data[0];
72 /* Query ION heap_id_mask from ION heap */
73 ret = ioctl(ionfd, ION_IOC_HEAP_QUERY, &query);
74 if (ret < 0) {
75 fprintf(stderr, "<%s>: Failed: ION_IOC_HEAP_QUERY: %s\n",
76 __func__, strerror(errno));
77 goto err_query;
78 }
79
80 heap_id = MAX_HEAP_COUNT + 1;
81 for (i = 0; i < query.cnt; i++) {
82 if (heap_data[i].type == ion_info->heap_type) {
83 printf("--------------------------------------\n");
84 printf("heap type: %d\n", heap_data[i].type);
85 printf(" heap id: %d\n", heap_data[i].heap_id);
86 printf("heap name: %s\n", heap_data[i].name);
87 printf("--------------------------------------\n");
88 heap_id = heap_data[i].heap_id;
89 break;
90 }
91 }
92
93 if (heap_id > MAX_HEAP_COUNT) {
94 fprintf(stderr, "<%s>: ERROR: heap type does not exists\n",
95 __func__);
96 goto err_heap;
97 }
98
99 alloc_data.len = ion_info->heap_size;
100 alloc_data.heap_id_mask = 1 << heap_id;
101 alloc_data.flags = ion_info->flag_type;
102
103 /* Allocate memory for this ION client as per heap_type */
104 ret = ioctl(ionfd, ION_IOC_ALLOC, &alloc_data);
105 if (ret < 0) {
106 fprintf(stderr, "<%s>: Failed: ION_IOC_ALLOC: %s\n",
107 __func__, strerror(errno));
108 goto err_alloc;
109 }
110
111 /* This will return a valid buffer fd */
112 buffer_fd = alloc_data.fd;
113 maplen = alloc_data.len;
114
115 if (buffer_fd < 0 || maplen <= 0) {
116 fprintf(stderr, "<%s>: Invalid map data, fd: %d, len: %ld\n",
117 __func__, buffer_fd, maplen);
118 goto err_fd_data;
119 }
120
121 /* Create memory mapped buffer for the buffer fd */
122 map_buffer = (unsigned char *)mmap(NULL, maplen, PROT_READ|PROT_WRITE,
123 MAP_SHARED, buffer_fd, 0);
124 if (map_buffer == MAP_FAILED) {
125 fprintf(stderr, "<%s>: Failed: mmap: %s\n",
126 __func__, strerror(errno));
127 goto err_mmap;
128 }
129
130 ion_info->ionfd = ionfd;
131 ion_info->buffd = buffer_fd;
132 ion_info->buffer = map_buffer;
133 ion_info->buflen = maplen;
134
135 return 0;
136
137 munmap(map_buffer, maplen);
138
139err_fd_data:
140err_mmap:
141 /* in case of error: close the buffer fd */
142 if (buffer_fd)
143 close(buffer_fd);
144
145err_query:
146err_heap:
147err_alloc:
148 /* In case of error: close the ion client fd */
149 if (ionfd)
150 close(ionfd);
151
152 return -1;
153}
154
155int ion_import_buffer_fd(struct ion_buffer_info *ion_info)
156{
157 int buffd;
158 unsigned char *map_buf;
159 unsigned long map_len;
160
161 if (!ion_info) {
162 fprintf(stderr, "<%s>: Invalid ion info\n", __func__);
163 return -1;
164 }
165
166 map_len = ion_info->buflen;
167 buffd = ion_info->buffd;
168
169 if (buffd < 0 || map_len <= 0) {
170 fprintf(stderr, "<%s>: Invalid map data, fd: %d, len: %ld\n",
171 __func__, buffd, map_len);
172 goto err_buffd;
173 }
174
175 map_buf = (unsigned char *)mmap(NULL, map_len, PROT_READ|PROT_WRITE,
176 MAP_SHARED, buffd, 0);
177 if (map_buf == MAP_FAILED) {
178 printf("<%s>: Failed - mmap: %s\n",
179 __func__, strerror(errno));
180 goto err_mmap;
181 }
182
183 ion_info->buffer = map_buf;
184 ion_info->buflen = map_len;
185
186 return 0;
187
188err_mmap:
189 if (buffd)
190 close(buffd);
191
192err_buffd:
193 return -1;
194}
195
196void ion_close_buffer_fd(struct ion_buffer_info *ion_info)
197{
198 if (ion_info) {
199 /* unmap the buffer properly in the end */
200 munmap(ion_info->buffer, ion_info->buflen);
201 /* close the buffer fd */
202 if (ion_info->buffd > 0)
203 close(ion_info->buffd);
204 /* Finally, close the client fd */
205 if (ion_info->ionfd > 0)
206 close(ion_info->ionfd);
207 printf("<%s>: buffer release successfully....\n", __func__);
208 }
209}
210
211int socket_send_fd(struct socket_info *info)
212{
213 int status;
214 int fd, sockfd;
215 struct socketdata skdata;
216
217 if (!info) {
218 fprintf(stderr, "<%s>: Invalid socket info\n", __func__);
219 return -1;
220 }
221
222 sockfd = info->sockfd;
223 fd = info->datafd;
224 memset(&skdata, 0, sizeof(skdata));
225 skdata.data = fd;
226 skdata.len = sizeof(skdata.data);
227 status = sendtosocket(sockfd, &skdata);
228 if (status < 0) {
229 fprintf(stderr, "<%s>: Failed: sendtosocket\n", __func__);
230 return -1;
231 }
232
233 return 0;
234}
235
236int socket_receive_fd(struct socket_info *info)
237{
238 int status;
239 int fd, sockfd;
240 struct socketdata skdata;
241
242 if (!info) {
243 fprintf(stderr, "<%s>: Invalid socket info\n", __func__);
244 return -1;
245 }
246
247 sockfd = info->sockfd;
248 memset(&skdata, 0, sizeof(skdata));
249 status = receivefromsocket(sockfd, &skdata);
250 if (status < 0) {
251 fprintf(stderr, "<%s>: Failed: receivefromsocket\n", __func__);
252 return -1;
253 }
254
255 fd = (int)skdata.data;
256 info->datafd = fd;
257
258 return status;
259}
diff --git a/tools/testing/selftests/android/ion/ionutils.h b/tools/testing/selftests/android/ion/ionutils.h
new file mode 100644
index 000000000000..9941eb858576
--- /dev/null
+++ b/tools/testing/selftests/android/ion/ionutils.h
@@ -0,0 +1,55 @@
1#ifndef __ION_UTILS_H
2#define __ION_UTILS_H
3
4#include "ion.h"
5
6#define SOCKET_NAME "ion_socket"
7#define ION_DEVICE "/dev/ion"
8
9#define ION_BUFFER_LEN 4096
10#define MAX_HEAP_COUNT ION_HEAP_TYPE_CUSTOM
11
12struct socket_info {
13 int sockfd;
14 int datafd;
15 unsigned long buflen;
16};
17
18struct ion_buffer_info {
19 int ionfd;
20 int buffd;
21 unsigned int heap_type;
22 unsigned int flag_type;
23 unsigned long heap_size;
24 unsigned long buflen;
25 unsigned char *buffer;
26};
27
28
29/* This is used to fill the data into the mapped buffer */
30void write_buffer(void *buffer, unsigned long len);
31
32/* This is used to read the data from the exported buffer */
33void read_buffer(void *buffer, unsigned long len);
34
35/* This is used to create an ION buffer FD for the kernel buffer
36 * So you can export this same buffer to others in the form of FD
37 */
38int ion_export_buffer_fd(struct ion_buffer_info *ion_info);
39
40/* This is used to import or map an exported FD.
41 * So we point to same buffer without making a copy. Hence zero-copy.
42 */
43int ion_import_buffer_fd(struct ion_buffer_info *ion_info);
44
45/* This is used to close all references for the ION client */
46void ion_close_buffer_fd(struct ion_buffer_info *ion_info);
47
48/* This is used to send FD to another process using socket IPC */
49int socket_send_fd(struct socket_info *skinfo);
50
51/* This is used to receive FD from another process using socket IPC */
52int socket_receive_fd(struct socket_info *skinfo);
53
54
55#endif
diff --git a/tools/testing/selftests/android/ion/ipcsocket.c b/tools/testing/selftests/android/ion/ipcsocket.c
new file mode 100644
index 000000000000..7dc521002095
--- /dev/null
+++ b/tools/testing/selftests/android/ion/ipcsocket.c
@@ -0,0 +1,227 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <string.h>
4#include <unistd.h>
5#include <sys/types.h>
6#include <sys/socket.h>
7#include <sys/time.h>
8#include <sys/un.h>
9#include <errno.h>
10
11#include "ipcsocket.h"
12
13
14int opensocket(int *sockfd, const char *name, int connecttype)
15{
16 int ret, temp = 1;
17
18 if (!name || strlen(name) > MAX_SOCK_NAME_LEN) {
19 fprintf(stderr, "<%s>: Invalid socket name.\n", __func__);
20 return -1;
21 }
22
23 ret = socket(PF_LOCAL, SOCK_STREAM, 0);
24 if (ret < 0) {
25 fprintf(stderr, "<%s>: Failed socket: <%s>\n",
26 __func__, strerror(errno));
27 return ret;
28 }
29
30 *sockfd = ret;
31 if (setsockopt(*sockfd, SOL_SOCKET, SO_REUSEADDR,
32 (char *)&temp, sizeof(int)) < 0) {
33 fprintf(stderr, "<%s>: Failed setsockopt: <%s>\n",
34 __func__, strerror(errno));
35 goto err;
36 }
37
38 sprintf(sock_name, "/tmp/%s", name);
39
40 if (connecttype == 1) {
41 /* This is for Server connection */
42 struct sockaddr_un skaddr;
43 int clientfd;
44 socklen_t sklen;
45
46 unlink(sock_name);
47 memset(&skaddr, 0, sizeof(skaddr));
48 skaddr.sun_family = AF_LOCAL;
49 strcpy(skaddr.sun_path, sock_name);
50
51 ret = bind(*sockfd, (struct sockaddr *)&skaddr,
52 SUN_LEN(&skaddr));
53 if (ret < 0) {
54 fprintf(stderr, "<%s>: Failed bind: <%s>\n",
55 __func__, strerror(errno));
56 goto err;
57 }
58
59 ret = listen(*sockfd, 5);
60 if (ret < 0) {
61 fprintf(stderr, "<%s>: Failed listen: <%s>\n",
62 __func__, strerror(errno));
63 goto err;
64 }
65
66 memset(&skaddr, 0, sizeof(skaddr));
67 sklen = sizeof(skaddr);
68
69 ret = accept(*sockfd, (struct sockaddr *)&skaddr,
70 (socklen_t *)&sklen);
71 if (ret < 0) {
72 fprintf(stderr, "<%s>: Failed accept: <%s>\n",
73 __func__, strerror(errno));
74 goto err;
75 }
76
77 clientfd = ret;
78 *sockfd = clientfd;
79 } else {
80 /* This is for client connection */
81 struct sockaddr_un skaddr;
82
83 memset(&skaddr, 0, sizeof(skaddr));
84 skaddr.sun_family = AF_LOCAL;
85 strcpy(skaddr.sun_path, sock_name);
86
87 ret = connect(*sockfd, (struct sockaddr *)&skaddr,
88 SUN_LEN(&skaddr));
89 if (ret < 0) {
90 fprintf(stderr, "<%s>: Failed connect: <%s>\n",
91 __func__, strerror(errno));
92 goto err;
93 }
94 }
95
96 return 0;
97
98err:
99 if (*sockfd)
100 close(*sockfd);
101
102 return ret;
103}
104
105int sendtosocket(int sockfd, struct socketdata *skdata)
106{
107 int ret, buffd;
108 unsigned int len;
109 char cmsg_b[CMSG_SPACE(sizeof(int))];
110 struct cmsghdr *cmsg;
111 struct msghdr msgh;
112 struct iovec iov;
113 struct timeval timeout;
114 fd_set selFDs;
115
116 if (!skdata) {
117 fprintf(stderr, "<%s>: socketdata is NULL\n", __func__);
118 return -1;
119 }
120
121 FD_ZERO(&selFDs);
122 FD_SET(0, &selFDs);
123 FD_SET(sockfd, &selFDs);
124 timeout.tv_sec = 20;
125 timeout.tv_usec = 0;
126
127 ret = select(sockfd+1, NULL, &selFDs, NULL, &timeout);
128 if (ret < 0) {
129 fprintf(stderr, "<%s>: Failed select: <%s>\n",
130 __func__, strerror(errno));
131 return -1;
132 }
133
134 if (FD_ISSET(sockfd, &selFDs)) {
135 buffd = skdata->data;
136 len = skdata->len;
137 memset(&msgh, 0, sizeof(msgh));
138 msgh.msg_control = &cmsg_b;
139 msgh.msg_controllen = CMSG_LEN(len);
140 iov.iov_base = "OK";
141 iov.iov_len = 2;
142 msgh.msg_iov = &iov;
143 msgh.msg_iovlen = 1;
144 cmsg = CMSG_FIRSTHDR(&msgh);
145 cmsg->cmsg_level = SOL_SOCKET;
146 cmsg->cmsg_type = SCM_RIGHTS;
147 cmsg->cmsg_len = CMSG_LEN(len);
148 memcpy(CMSG_DATA(cmsg), &buffd, len);
149
150 ret = sendmsg(sockfd, &msgh, MSG_DONTWAIT);
151 if (ret < 0) {
152 fprintf(stderr, "<%s>: Failed sendmsg: <%s>\n",
153 __func__, strerror(errno));
154 return -1;
155 }
156 }
157
158 return 0;
159}
160
161int receivefromsocket(int sockfd, struct socketdata *skdata)
162{
163 int ret, buffd;
164 unsigned int len = 0;
165 char cmsg_b[CMSG_SPACE(sizeof(int))];
166 struct cmsghdr *cmsg;
167 struct msghdr msgh;
168 struct iovec iov;
169 fd_set recvFDs;
170 char data[32];
171
172 if (!skdata) {
173 fprintf(stderr, "<%s>: socketdata is NULL\n", __func__);
174 return -1;
175 }
176
177 FD_ZERO(&recvFDs);
178 FD_SET(0, &recvFDs);
179 FD_SET(sockfd, &recvFDs);
180
181 ret = select(sockfd+1, &recvFDs, NULL, NULL, NULL);
182 if (ret < 0) {
183 fprintf(stderr, "<%s>: Failed select: <%s>\n",
184 __func__, strerror(errno));
185 return -1;
186 }
187
188 if (FD_ISSET(sockfd, &recvFDs)) {
189 len = sizeof(buffd);
190 memset(&msgh, 0, sizeof(msgh));
191 msgh.msg_control = &cmsg_b;
192 msgh.msg_controllen = CMSG_LEN(len);
193 iov.iov_base = data;
194 iov.iov_len = sizeof(data)-1;
195 msgh.msg_iov = &iov;
196 msgh.msg_iovlen = 1;
197 cmsg = CMSG_FIRSTHDR(&msgh);
198 cmsg->cmsg_level = SOL_SOCKET;
199 cmsg->cmsg_type = SCM_RIGHTS;
200 cmsg->cmsg_len = CMSG_LEN(len);
201
202 ret = recvmsg(sockfd, &msgh, MSG_DONTWAIT);
203 if (ret < 0) {
204 fprintf(stderr, "<%s>: Failed recvmsg: <%s>\n",
205 __func__, strerror(errno));
206 return -1;
207 }
208
209 memcpy(&buffd, CMSG_DATA(cmsg), len);
210 skdata->data = buffd;
211 skdata->len = len;
212 }
213 return 0;
214}
215
216int closesocket(int sockfd, char *name)
217{
218 char sockname[MAX_SOCK_NAME_LEN];
219
220 if (sockfd)
221 close(sockfd);
222 sprintf(sockname, "/tmp/%s", name);
223 unlink(sockname);
224 shutdown(sockfd, 2);
225
226 return 0;
227}
diff --git a/tools/testing/selftests/android/ion/ipcsocket.h b/tools/testing/selftests/android/ion/ipcsocket.h
new file mode 100644
index 000000000000..b3e84498a8a1
--- /dev/null
+++ b/tools/testing/selftests/android/ion/ipcsocket.h
@@ -0,0 +1,35 @@
1
2#ifndef _IPCSOCKET_H
3#define _IPCSOCKET_H
4
5
6#define MAX_SOCK_NAME_LEN 64
7
8char sock_name[MAX_SOCK_NAME_LEN];
9
10/* This structure is responsible for holding the IPC data
11 * data: hold the buffer fd
12 * len: just the length of 32-bit integer fd
13 */
14struct socketdata {
15 int data;
16 unsigned int len;
17};
18
19/* This API is used to open the IPC socket connection
20 * name: implies a unique socket name in the system
21 * connecttype: implies server(0) or client(1)
22 */
23int opensocket(int *sockfd, const char *name, int connecttype);
24
25/* This is the API to send socket data over IPC socket */
26int sendtosocket(int sockfd, struct socketdata *data);
27
28/* This is the API to receive socket data over IPC socket */
29int receivefromsocket(int sockfd, struct socketdata *data);
30
31/* This is the API to close the socket connection */
32int closesocket(int sockfd, char *name);
33
34
35#endif
diff --git a/tools/testing/selftests/android/run.sh b/tools/testing/selftests/android/run.sh
new file mode 100755
index 000000000000..dd8edf291454
--- /dev/null
+++ b/tools/testing/selftests/android/run.sh
@@ -0,0 +1,3 @@
1#!/bin/sh
2
3(cd ion; ./ion_test.sh)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index eab7644a07b4..9316e648a880 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,4 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2
2LIBDIR := ../../../lib 3LIBDIR := ../../../lib
3BPFDIR := $(LIBDIR)/bpf 4BPFDIR := $(LIBDIR)/bpf
4APIDIR := ../../../include/uapi 5APIDIR := ../../../include/uapi
@@ -10,19 +11,20 @@ ifneq ($(wildcard $(GENHDR)),)
10endif 11endif
11 12
12CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 13CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
13LDLIBS += -lcap -lelf 14LDLIBS += -lcap -lelf -lrt
14 15
15TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ 16TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
16 test_align 17 test_align test_verifier_log test_dev_cgroup
17 18
18TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ 19TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
19 test_pkt_md_access.o test_xdp_redirect.o sockmap_parse_prog.o sockmap_verdict_prog.o 20 test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
21 sockmap_verdict_prog.o dev_cgroup.o
20 22
21TEST_PROGS := test_kmod.sh test_xdp_redirect.sh 23TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh
22 24
23include ../lib.mk 25include ../lib.mk
24 26
25BPFOBJ := $(OUTPUT)/libbpf.a 27BPFOBJ := $(OUTPUT)/libbpf.a $(OUTPUT)/cgroup_helpers.c
26 28
27$(TEST_GEN_PROGS): $(BPFOBJ) 29$(TEST_GEN_PROGS): $(BPFOBJ)
28 30
@@ -35,8 +37,20 @@ $(BPFOBJ): force
35 $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ 37 $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
36 38
37CLANG ?= clang 39CLANG ?= clang
40LLC ?= llc
41
42PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
43
44# Let newer LLVM versions transparently probe the kernel for availability
45# of full BPF instruction set.
46ifeq ($(PROBE),)
47 CPU ?= probe
48else
49 CPU ?= generic
50endif
38 51
39%.o: %.c 52%.o: %.c
40 $(CLANG) -I. -I./include/uapi -I../../../include/uapi \ 53 $(CLANG) -I. -I./include/uapi -I../../../include/uapi \
41 -Wno-compare-distinct-pointer-types \ 54 -Wno-compare-distinct-pointer-types \
42 -O2 -target bpf -c $< -o $@ 55 -O2 -target bpf -emit-llvm -c $< -o - | \
56 $(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 50353c10573c..fd9a17fa8a8b 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -63,14 +63,25 @@ static unsigned long long (*bpf_get_prandom_u32)(void) =
63 (void *) BPF_FUNC_get_prandom_u32; 63 (void *) BPF_FUNC_get_prandom_u32;
64static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = 64static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
65 (void *) BPF_FUNC_xdp_adjust_head; 65 (void *) BPF_FUNC_xdp_adjust_head;
66static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
67 (void *) BPF_FUNC_xdp_adjust_meta;
66static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, 68static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
67 int optlen) = 69 int optlen) =
68 (void *) BPF_FUNC_setsockopt; 70 (void *) BPF_FUNC_setsockopt;
71static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
72 int optlen) =
73 (void *) BPF_FUNC_getsockopt;
69static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) = 74static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
70 (void *) BPF_FUNC_sk_redirect_map; 75 (void *) BPF_FUNC_sk_redirect_map;
71static int (*bpf_sock_map_update)(void *map, void *key, void *value, 76static int (*bpf_sock_map_update)(void *map, void *key, void *value,
72 unsigned long long flags) = 77 unsigned long long flags) =
73 (void *) BPF_FUNC_sock_map_update; 78 (void *) BPF_FUNC_sock_map_update;
79static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
80 void *buf, unsigned int buf_size) =
81 (void *) BPF_FUNC_perf_event_read_value;
82static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
83 unsigned int buf_size) =
84 (void *) BPF_FUNC_perf_prog_read_value;
74 85
75 86
76/* llvm builtin functions that eBPF C program may use to 87/* llvm builtin functions that eBPF C program may use to
@@ -110,7 +121,47 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
110static int (*bpf_skb_change_head)(void *, int len, int flags) = 121static int (*bpf_skb_change_head)(void *, int len, int flags) =
111 (void *) BPF_FUNC_skb_change_head; 122 (void *) BPF_FUNC_skb_change_head;
112 123
124/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
125#if defined(__TARGET_ARCH_x86)
126 #define bpf_target_x86
127 #define bpf_target_defined
128#elif defined(__TARGET_ARCH_s930x)
129 #define bpf_target_s930x
130 #define bpf_target_defined
131#elif defined(__TARGET_ARCH_arm64)
132 #define bpf_target_arm64
133 #define bpf_target_defined
134#elif defined(__TARGET_ARCH_mips)
135 #define bpf_target_mips
136 #define bpf_target_defined
137#elif defined(__TARGET_ARCH_powerpc)
138 #define bpf_target_powerpc
139 #define bpf_target_defined
140#elif defined(__TARGET_ARCH_sparc)
141 #define bpf_target_sparc
142 #define bpf_target_defined
143#else
144 #undef bpf_target_defined
145#endif
146
147/* Fall back to what the compiler says */
148#ifndef bpf_target_defined
113#if defined(__x86_64__) 149#if defined(__x86_64__)
150 #define bpf_target_x86
151#elif defined(__s390x__)
152 #define bpf_target_s930x
153#elif defined(__aarch64__)
154 #define bpf_target_arm64
155#elif defined(__mips__)
156 #define bpf_target_mips
157#elif defined(__powerpc__)
158 #define bpf_target_powerpc
159#elif defined(__sparc__)
160 #define bpf_target_sparc
161#endif
162#endif
163
164#if defined(bpf_target_x86)
114 165
115#define PT_REGS_PARM1(x) ((x)->di) 166#define PT_REGS_PARM1(x) ((x)->di)
116#define PT_REGS_PARM2(x) ((x)->si) 167#define PT_REGS_PARM2(x) ((x)->si)
@@ -123,7 +174,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
123#define PT_REGS_SP(x) ((x)->sp) 174#define PT_REGS_SP(x) ((x)->sp)
124#define PT_REGS_IP(x) ((x)->ip) 175#define PT_REGS_IP(x) ((x)->ip)
125 176
126#elif defined(__s390x__) 177#elif defined(bpf_target_s390x)
127 178
128#define PT_REGS_PARM1(x) ((x)->gprs[2]) 179#define PT_REGS_PARM1(x) ((x)->gprs[2])
129#define PT_REGS_PARM2(x) ((x)->gprs[3]) 180#define PT_REGS_PARM2(x) ((x)->gprs[3])
@@ -136,7 +187,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
136#define PT_REGS_SP(x) ((x)->gprs[15]) 187#define PT_REGS_SP(x) ((x)->gprs[15])
137#define PT_REGS_IP(x) ((x)->psw.addr) 188#define PT_REGS_IP(x) ((x)->psw.addr)
138 189
139#elif defined(__aarch64__) 190#elif defined(bpf_target_arm64)
140 191
141#define PT_REGS_PARM1(x) ((x)->regs[0]) 192#define PT_REGS_PARM1(x) ((x)->regs[0])
142#define PT_REGS_PARM2(x) ((x)->regs[1]) 193#define PT_REGS_PARM2(x) ((x)->regs[1])
@@ -149,7 +200,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
149#define PT_REGS_SP(x) ((x)->sp) 200#define PT_REGS_SP(x) ((x)->sp)
150#define PT_REGS_IP(x) ((x)->pc) 201#define PT_REGS_IP(x) ((x)->pc)
151 202
152#elif defined(__mips__) 203#elif defined(bpf_target_mips)
153 204
154#define PT_REGS_PARM1(x) ((x)->regs[4]) 205#define PT_REGS_PARM1(x) ((x)->regs[4])
155#define PT_REGS_PARM2(x) ((x)->regs[5]) 206#define PT_REGS_PARM2(x) ((x)->regs[5])
@@ -162,7 +213,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
162#define PT_REGS_SP(x) ((x)->regs[29]) 213#define PT_REGS_SP(x) ((x)->regs[29])
163#define PT_REGS_IP(x) ((x)->cp0_epc) 214#define PT_REGS_IP(x) ((x)->cp0_epc)
164 215
165#elif defined(__powerpc__) 216#elif defined(bpf_target_powerpc)
166 217
167#define PT_REGS_PARM1(x) ((x)->gpr[3]) 218#define PT_REGS_PARM1(x) ((x)->gpr[3])
168#define PT_REGS_PARM2(x) ((x)->gpr[4]) 219#define PT_REGS_PARM2(x) ((x)->gpr[4])
@@ -173,7 +224,7 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
173#define PT_REGS_SP(x) ((x)->sp) 224#define PT_REGS_SP(x) ((x)->sp)
174#define PT_REGS_IP(x) ((x)->nip) 225#define PT_REGS_IP(x) ((x)->nip)
175 226
176#elif defined(__sparc__) 227#elif defined(bpf_target_sparc)
177 228
178#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) 229#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
179#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) 230#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
@@ -183,6 +234,8 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
183#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) 234#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
184#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) 235#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
185#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) 236#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
237
238/* Should this also be a bpf_target check for the sparc case? */
186#if defined(__arch64__) 239#if defined(__arch64__)
187#define PT_REGS_IP(x) ((x)->tpc) 240#define PT_REGS_IP(x) ((x)->tpc)
188#else 241#else
@@ -191,10 +244,10 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
191 244
192#endif 245#endif
193 246
194#ifdef __powerpc__ 247#ifdef bpf_target_powerpc
195#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) 248#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
196#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP 249#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
197#elif defined(__sparc__) 250#elif bpf_target_sparc
198#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) 251#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
199#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP 252#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
200#else 253#else
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
new file mode 100644
index 000000000000..f3bca3ade0f3
--- /dev/null
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -0,0 +1,178 @@
1// SPDX-License-Identifier: GPL-2.0
2#define _GNU_SOURCE
3#include <sched.h>
4#include <sys/mount.h>
5#include <sys/stat.h>
6#include <sys/types.h>
7#include <linux/limits.h>
8#include <stdio.h>
9#include <linux/sched.h>
10#include <fcntl.h>
11#include <unistd.h>
12#include <ftw.h>
13
14
15#include "cgroup_helpers.h"
16
17/*
18 * To avoid relying on the system setup, when setup_cgroup_env is called
19 * we create a new mount namespace, and cgroup namespace. The cgroup2
20 * root is mounted at CGROUP_MOUNT_PATH
21 *
22 * Unfortunately, most people don't have cgroupv2 enabled at this point in time.
23 * It's easier to create our own mount namespace and manage it ourselves.
24 *
25 * We assume /mnt exists.
26 */
27
28#define WALK_FD_LIMIT 16
29#define CGROUP_MOUNT_PATH "/mnt"
30#define CGROUP_WORK_DIR "/cgroup-test-work-dir"
31#define format_cgroup_path(buf, path) \
32 snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \
33 CGROUP_WORK_DIR, path)
34
35/**
36 * setup_cgroup_environment() - Setup the cgroup environment
37 *
38 * After calling this function, cleanup_cgroup_environment should be called
39 * once testing is complete.
40 *
41 * This function will print an error to stderr and return 1 if it is unable
42 * to setup the cgroup environment. If setup is successful, 0 is returned.
43 */
44int setup_cgroup_environment(void)
45{
46 char cgroup_workdir[PATH_MAX + 1];
47
48 format_cgroup_path(cgroup_workdir, "");
49
50 if (unshare(CLONE_NEWNS)) {
51 log_err("unshare");
52 return 1;
53 }
54
55 if (mount("none", "/", NULL, MS_REC | MS_PRIVATE, NULL)) {
56 log_err("mount fakeroot");
57 return 1;
58 }
59
60 if (mount("none", CGROUP_MOUNT_PATH, "cgroup2", 0, NULL) && errno != EBUSY) {
61 log_err("mount cgroup2");
62 return 1;
63 }
64
65 /* Cleanup existing failed runs, now that the environment is setup */
66 cleanup_cgroup_environment();
67
68 if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
69 log_err("mkdir cgroup work dir");
70 return 1;
71 }
72
73 return 0;
74}
75
76static int nftwfunc(const char *filename, const struct stat *statptr,
77 int fileflags, struct FTW *pfwt)
78{
79 if ((fileflags & FTW_D) && rmdir(filename))
80 log_err("Removing cgroup: %s", filename);
81 return 0;
82}
83
84
85static int join_cgroup_from_top(char *cgroup_path)
86{
87 char cgroup_procs_path[PATH_MAX + 1];
88 pid_t pid = getpid();
89 int fd, rc = 0;
90
91 snprintf(cgroup_procs_path, sizeof(cgroup_procs_path),
92 "%s/cgroup.procs", cgroup_path);
93
94 fd = open(cgroup_procs_path, O_WRONLY);
95 if (fd < 0) {
96 log_err("Opening Cgroup Procs: %s", cgroup_procs_path);
97 return 1;
98 }
99
100 if (dprintf(fd, "%d\n", pid) < 0) {
101 log_err("Joining Cgroup");
102 rc = 1;
103 }
104
105 close(fd);
106 return rc;
107}
108
109/**
110 * join_cgroup() - Join a cgroup
111 * @path: The cgroup path, relative to the workdir, to join
112 *
113 * This function expects a cgroup to already be created, relative to the cgroup
114 * work dir, and it joins it. For example, passing "/my-cgroup" as the path
115 * would actually put the calling process into the cgroup
116 * "/cgroup-test-work-dir/my-cgroup"
117 *
118 * On success, it returns 0, otherwise on failure it returns 1.
119 */
120int join_cgroup(char *path)
121{
122 char cgroup_path[PATH_MAX + 1];
123
124 format_cgroup_path(cgroup_path, path);
125 return join_cgroup_from_top(cgroup_path);
126}
127
128/**
129 * cleanup_cgroup_environment() - Cleanup Cgroup Testing Environment
130 *
131 * This is an idempotent function to delete all temporary cgroups that
132 * have been created during the test, including the cgroup testing work
133 * directory.
134 *
135 * At call time, it moves the calling process to the root cgroup, and then
136 * runs the deletion process. It is idempotent, and should not fail, unless
137 * a process is lingering.
138 *
139 * On failure, it will print an error to stderr, and try to continue.
140 */
141void cleanup_cgroup_environment(void)
142{
143 char cgroup_workdir[PATH_MAX + 1];
144
145 format_cgroup_path(cgroup_workdir, "");
146 join_cgroup_from_top(CGROUP_MOUNT_PATH);
147 nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
148}
149
150/**
151 * create_and_get_cgroup() - Create a cgroup, relative to workdir, and get the FD
152 * @path: The cgroup path, relative to the workdir, to join
153 *
154 * This function creates a cgroup under the top level workdir and returns the
155 * file descriptor. It is idempotent.
156 *
157 * On success, it returns the file descriptor. On failure it returns 0.
158 * If there is a failure, it prints the error to stderr.
159 */
160int create_and_get_cgroup(char *path)
161{
162 char cgroup_path[PATH_MAX + 1];
163 int fd;
164
165 format_cgroup_path(cgroup_path, path);
166 if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
167 log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
168 return 0;
169 }
170
171 fd = open(cgroup_path, O_RDONLY);
172 if (fd < 0) {
173 log_err("Opening Cgroup");
174 return 0;
175 }
176
177 return fd;
178}
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h
new file mode 100644
index 000000000000..06485e0002b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/cgroup_helpers.h
@@ -0,0 +1,17 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __CGROUP_HELPERS_H
3#define __CGROUP_HELPERS_H
4#include <errno.h>
5#include <string.h>
6
7#define clean_errno() (errno == 0 ? "None" : strerror(errno))
8#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
9 __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
10
11
12int create_and_get_cgroup(char *path);
13int join_cgroup(char *path);
14int setup_cgroup_environment(void);
15void cleanup_cgroup_environment(void);
16
17#endif
diff --git a/tools/testing/selftests/bpf/dev_cgroup.c b/tools/testing/selftests/bpf/dev_cgroup.c
new file mode 100644
index 000000000000..ce41a3475f27
--- /dev/null
+++ b/tools/testing/selftests/bpf/dev_cgroup.c
@@ -0,0 +1,60 @@
1/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7
8#include <linux/bpf.h>
9#include <linux/version.h>
10#include "bpf_helpers.h"
11
12SEC("cgroup/dev")
13int bpf_prog1(struct bpf_cgroup_dev_ctx *ctx)
14{
15 short type = ctx->access_type & 0xFFFF;
16#ifdef DEBUG
17 short access = ctx->access_type >> 16;
18 char fmt[] = " %d:%d \n";
19
20 switch (type) {
21 case BPF_DEVCG_DEV_BLOCK:
22 fmt[0] = 'b';
23 break;
24 case BPF_DEVCG_DEV_CHAR:
25 fmt[0] = 'c';
26 break;
27 default:
28 fmt[0] = '?';
29 break;
30 }
31
32 if (access & BPF_DEVCG_ACC_READ)
33 fmt[8] = 'r';
34
35 if (access & BPF_DEVCG_ACC_WRITE)
36 fmt[9] = 'w';
37
38 if (access & BPF_DEVCG_ACC_MKNOD)
39 fmt[10] = 'm';
40
41 bpf_trace_printk(fmt, sizeof(fmt), ctx->major, ctx->minor);
42#endif
43
44 /* Allow access to /dev/zero and /dev/random.
45 * Forbid everything else.
46 */
47 if (ctx->major != 1 || type != BPF_DEVCG_DEV_CHAR)
48 return 0;
49
50 switch (ctx->minor) {
51 case 5: /* 1:5 /dev/zero */
52 case 9: /* 1:9 /dev/urandom */
53 return 1;
54 }
55
56 return 0;
57}
58
59char _license[] SEC("license") = "GPL";
60__u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/testing/selftests/bpf/sockmap_parse_prog.c b/tools/testing/selftests/bpf/sockmap_parse_prog.c
index fae3b96c3aa4..a1dec2b6d9c5 100644
--- a/tools/testing/selftests/bpf/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/sockmap_parse_prog.c
@@ -29,9 +29,6 @@ int bpf_prog1(struct __sk_buff *skb)
29 * fields. 29 * fields.
30 */ 30 */
31 d[7] = 1; 31 d[7] = 1;
32
33 bpf_printk("parse: data[0] = (%u): local_port %i remote %i\n",
34 d[0], lport, bpf_ntohl(rport));
35 return skb->len; 32 return skb->len;
36} 33}
37 34
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
index 2cd2d552938b..d7bea972cb21 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
@@ -58,8 +58,6 @@ int bpf_prog2(struct __sk_buff *skb)
58 d[6] = 0xe; 58 d[6] = 0xe;
59 d[7] = 0xf; 59 d[7] = 0xf;
60 60
61 bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk);
62
63 if (!map) 61 if (!map)
64 return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0); 62 return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
65 return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0); 63 return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
new file mode 100644
index 000000000000..02c85d6c89b0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -0,0 +1,93 @@
1/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <errno.h>
12#include <assert.h>
13
14#include <linux/bpf.h>
15#include <bpf/bpf.h>
16#include <bpf/libbpf.h>
17
18#include "cgroup_helpers.h"
19
20#define DEV_CGROUP_PROG "./dev_cgroup.o"
21
22#define TEST_CGROUP "test-bpf-based-device-cgroup/"
23
24int main(int argc, char **argv)
25{
26 struct bpf_object *obj;
27 int error = EXIT_FAILURE;
28 int prog_fd, cgroup_fd;
29 __u32 prog_cnt;
30
31 if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
32 &obj, &prog_fd)) {
33 printf("Failed to load DEV_CGROUP program\n");
34 goto err;
35 }
36
37 if (setup_cgroup_environment()) {
38 printf("Failed to load DEV_CGROUP program\n");
39 goto err;
40 }
41
42 /* Create a cgroup, get fd, and join it */
43 cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
44 if (!cgroup_fd) {
45 printf("Failed to create test cgroup\n");
46 goto err;
47 }
48
49 if (join_cgroup(TEST_CGROUP)) {
50 printf("Failed to join cgroup\n");
51 goto err;
52 }
53
54 /* Attach bpf program */
55 if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_DEVICE, 0)) {
56 printf("Failed to attach DEV_CGROUP program");
57 goto err;
58 }
59
60 if (bpf_prog_query(cgroup_fd, BPF_CGROUP_DEVICE, 0, NULL, NULL,
61 &prog_cnt)) {
62 printf("Failed to query attached programs");
63 goto err;
64 }
65
66 /* All operations with /dev/zero and and /dev/urandom are allowed,
67 * everything else is forbidden.
68 */
69 assert(system("rm -f /tmp/test_dev_cgroup_null") == 0);
70 assert(system("mknod /tmp/test_dev_cgroup_null c 1 3"));
71 assert(system("rm -f /tmp/test_dev_cgroup_null") == 0);
72
73 /* /dev/zero is whitelisted */
74 assert(system("rm -f /tmp/test_dev_cgroup_zero") == 0);
75 assert(system("mknod /tmp/test_dev_cgroup_zero c 1 5") == 0);
76 assert(system("rm -f /tmp/test_dev_cgroup_zero") == 0);
77
78 assert(system("dd if=/dev/urandom of=/dev/zero count=64") == 0);
79
80 /* src is allowed, target is forbidden */
81 assert(system("dd if=/dev/urandom of=/dev/full count=64"));
82
83 /* src is forbidden, target is allowed */
84 assert(system("dd if=/dev/random of=/dev/zero count=64"));
85
86 error = 0;
87 printf("test_dev_cgroup:PASS\n");
88
89err:
90 cleanup_cgroup_environment();
91
92 return error;
93}
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index f93a333cbf2c..f61480641b6e 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -32,6 +32,10 @@ struct tlpm_node {
32 uint8_t key[]; 32 uint8_t key[];
33}; 33};
34 34
35static struct tlpm_node *tlpm_match(struct tlpm_node *list,
36 const uint8_t *key,
37 size_t n_bits);
38
35static struct tlpm_node *tlpm_add(struct tlpm_node *list, 39static struct tlpm_node *tlpm_add(struct tlpm_node *list,
36 const uint8_t *key, 40 const uint8_t *key,
37 size_t n_bits) 41 size_t n_bits)
@@ -39,9 +43,17 @@ static struct tlpm_node *tlpm_add(struct tlpm_node *list,
39 struct tlpm_node *node; 43 struct tlpm_node *node;
40 size_t n; 44 size_t n;
41 45
46 n = (n_bits + 7) / 8;
47
48 /* 'overwrite' an equivalent entry if one already exists */
49 node = tlpm_match(list, key, n_bits);
50 if (node && node->n_bits == n_bits) {
51 memcpy(node->key, key, n);
52 return list;
53 }
54
42 /* add new entry with @key/@n_bits to @list and return new head */ 55 /* add new entry with @key/@n_bits to @list and return new head */
43 56
44 n = (n_bits + 7) / 8;
45 node = malloc(sizeof(*node) + n); 57 node = malloc(sizeof(*node) + n);
46 assert(node); 58 assert(node);
47 59
@@ -93,6 +105,34 @@ static struct tlpm_node *tlpm_match(struct tlpm_node *list,
93 return best; 105 return best;
94} 106}
95 107
108static struct tlpm_node *tlpm_delete(struct tlpm_node *list,
109 const uint8_t *key,
110 size_t n_bits)
111{
112 struct tlpm_node *best = tlpm_match(list, key, n_bits);
113 struct tlpm_node *node;
114
115 if (!best || best->n_bits != n_bits)
116 return list;
117
118 if (best == list) {
119 node = best->next;
120 free(best);
121 return node;
122 }
123
124 for (node = list; node; node = node->next) {
125 if (node->next == best) {
126 node->next = best->next;
127 free(best);
128 return list;
129 }
130 }
131 /* should never get here */
132 assert(0);
133 return list;
134}
135
96static void test_lpm_basic(void) 136static void test_lpm_basic(void)
97{ 137{
98 struct tlpm_node *list = NULL, *t1, *t2; 138 struct tlpm_node *list = NULL, *t1, *t2;
@@ -115,6 +155,13 @@ static void test_lpm_basic(void)
115 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15)); 155 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15));
116 assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16)); 156 assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16));
117 157
158 list = tlpm_delete(list, (uint8_t[]){ 0xff, 0xff }, 16);
159 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
160 assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
161
162 list = tlpm_delete(list, (uint8_t[]){ 0xff }, 8);
163 assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8));
164
118 tlpm_clear(list); 165 tlpm_clear(list);
119} 166}
120 167
@@ -159,7 +206,7 @@ static void test_lpm_order(void)
159 206
160static void test_lpm_map(int keysize) 207static void test_lpm_map(int keysize)
161{ 208{
162 size_t i, j, n_matches, n_nodes, n_lookups; 209 size_t i, j, n_matches, n_matches_after_delete, n_nodes, n_lookups;
163 struct tlpm_node *t, *list = NULL; 210 struct tlpm_node *t, *list = NULL;
164 struct bpf_lpm_trie_key *key; 211 struct bpf_lpm_trie_key *key;
165 uint8_t *data, *value; 212 uint8_t *data, *value;
@@ -171,6 +218,7 @@ static void test_lpm_map(int keysize)
171 */ 218 */
172 219
173 n_matches = 0; 220 n_matches = 0;
221 n_matches_after_delete = 0;
174 n_nodes = 1 << 8; 222 n_nodes = 1 << 8;
175 n_lookups = 1 << 16; 223 n_lookups = 1 << 16;
176 224
@@ -224,15 +272,54 @@ static void test_lpm_map(int keysize)
224 } 272 }
225 } 273 }
226 274
275 /* Remove the first half of the elements in the tlpm and the
276 * corresponding nodes from the bpf-lpm. Then run the same
277 * large number of random lookups in both and make sure they match.
278 * Note: we need to count the number of nodes actually inserted
279 * since there may have been duplicates.
280 */
281 for (i = 0, t = list; t; i++, t = t->next)
282 ;
283 for (j = 0; j < i / 2; ++j) {
284 key->prefixlen = list->n_bits;
285 memcpy(key->data, list->key, keysize);
286 r = bpf_map_delete_elem(map, key);
287 assert(!r);
288 list = tlpm_delete(list, list->key, list->n_bits);
289 assert(list);
290 }
291 for (i = 0; i < n_lookups; ++i) {
292 for (j = 0; j < keysize; ++j)
293 data[j] = rand() & 0xff;
294
295 t = tlpm_match(list, data, 8 * keysize);
296
297 key->prefixlen = 8 * keysize;
298 memcpy(key->data, data, keysize);
299 r = bpf_map_lookup_elem(map, key, value);
300 assert(!r || errno == ENOENT);
301 assert(!t == !!r);
302
303 if (t) {
304 ++n_matches_after_delete;
305 assert(t->n_bits == value[keysize]);
306 for (j = 0; j < t->n_bits; ++j)
307 assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
308 (value[j / 8] & (1 << (7 - j % 8))));
309 }
310 }
311
227 close(map); 312 close(map);
228 tlpm_clear(list); 313 tlpm_clear(list);
229 314
230 /* With 255 random nodes in the map, we are pretty likely to match 315 /* With 255 random nodes in the map, we are pretty likely to match
231 * something on every lookup. For statistics, use this: 316 * something on every lookup. For statistics, use this:
232 * 317 *
233 * printf(" nodes: %zu\n" 318 * printf(" nodes: %zu\n"
234 * "lookups: %zu\n" 319 * " lookups: %zu\n"
235 * "matches: %zu\n", n_nodes, n_lookups, n_matches); 320 * " matches: %zu\n"
321 * "matches(delete): %zu\n",
322 * n_nodes, n_lookups, n_matches, n_matches_after_delete);
236 */ 323 */
237} 324}
238 325
@@ -332,6 +419,108 @@ static void test_lpm_ipaddr(void)
332 close(map_fd_ipv6); 419 close(map_fd_ipv6);
333} 420}
334 421
422static void test_lpm_delete(void)
423{
424 struct bpf_lpm_trie_key *key;
425 size_t key_size;
426 int map_fd;
427 __u64 value;
428
429 key_size = sizeof(*key) + sizeof(__u32);
430 key = alloca(key_size);
431
432 map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
433 key_size, sizeof(value),
434 100, BPF_F_NO_PREALLOC);
435 assert(map_fd >= 0);
436
437 /* Add nodes:
438 * 192.168.0.0/16 (1)
439 * 192.168.0.0/24 (2)
440 * 192.168.128.0/24 (3)
441 * 192.168.1.0/24 (4)
442 *
443 * (1)
444 * / \
445 * (IM) (3)
446 * / \
447 * (2) (4)
448 */
449 value = 1;
450 key->prefixlen = 16;
451 inet_pton(AF_INET, "192.168.0.0", key->data);
452 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0);
453
454 value = 2;
455 key->prefixlen = 24;
456 inet_pton(AF_INET, "192.168.0.0", key->data);
457 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0);
458
459 value = 3;
460 key->prefixlen = 24;
461 inet_pton(AF_INET, "192.168.128.0", key->data);
462 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0);
463
464 value = 4;
465 key->prefixlen = 24;
466 inet_pton(AF_INET, "192.168.1.0", key->data);
467 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0);
468
469 /* remove non-existent node */
470 key->prefixlen = 32;
471 inet_pton(AF_INET, "10.0.0.1", key->data);
472 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
473 errno == ENOENT);
474
475 /* assert initial lookup */
476 key->prefixlen = 32;
477 inet_pton(AF_INET, "192.168.0.1", key->data);
478 assert(bpf_map_lookup_elem(map_fd, key, &value) == 0);
479 assert(value == 2);
480
481 /* remove leaf node */
482 key->prefixlen = 24;
483 inet_pton(AF_INET, "192.168.0.0", key->data);
484 assert(bpf_map_delete_elem(map_fd, key) == 0);
485
486 key->prefixlen = 32;
487 inet_pton(AF_INET, "192.168.0.1", key->data);
488 assert(bpf_map_lookup_elem(map_fd, key, &value) == 0);
489 assert(value == 1);
490
491 /* remove leaf (and intermediary) node */
492 key->prefixlen = 24;
493 inet_pton(AF_INET, "192.168.1.0", key->data);
494 assert(bpf_map_delete_elem(map_fd, key) == 0);
495
496 key->prefixlen = 32;
497 inet_pton(AF_INET, "192.168.1.1", key->data);
498 assert(bpf_map_lookup_elem(map_fd, key, &value) == 0);
499 assert(value == 1);
500
501 /* remove root node */
502 key->prefixlen = 16;
503 inet_pton(AF_INET, "192.168.0.0", key->data);
504 assert(bpf_map_delete_elem(map_fd, key) == 0);
505
506 key->prefixlen = 32;
507 inet_pton(AF_INET, "192.168.128.1", key->data);
508 assert(bpf_map_lookup_elem(map_fd, key, &value) == 0);
509 assert(value == 3);
510
511 /* remove last node */
512 key->prefixlen = 24;
513 inet_pton(AF_INET, "192.168.128.0", key->data);
514 assert(bpf_map_delete_elem(map_fd, key) == 0);
515
516 key->prefixlen = 32;
517 inet_pton(AF_INET, "192.168.128.1", key->data);
518 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
519 errno == ENOENT);
520
521 close(map_fd);
522}
523
335int main(void) 524int main(void)
336{ 525{
337 struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; 526 struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
@@ -354,6 +543,8 @@ int main(void)
354 543
355 test_lpm_ipaddr(); 544 test_lpm_ipaddr();
356 545
546 test_lpm_delete();
547
357 printf("test_lpm: OK\n"); 548 printf("test_lpm: OK\n");
358 return 0; 549 return 0;
359} 550}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 50ce52d2013d..040356ecc862 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1043,6 +1043,51 @@ static void test_map_parallel(void)
1043 assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); 1043 assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
1044} 1044}
1045 1045
1046static void test_map_rdonly(void)
1047{
1048 int fd, key = 0, value = 0;
1049
1050 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
1051 MAP_SIZE, map_flags | BPF_F_RDONLY);
1052 if (fd < 0) {
1053 printf("Failed to create map for read only test '%s'!\n",
1054 strerror(errno));
1055 exit(1);
1056 }
1057
1058 key = 1;
1059 value = 1234;
1060 /* Insert key=1 element. */
1061 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == -1 &&
1062 errno == EPERM);
1063
1064 /* Check that key=2 is not found. */
1065 assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
1066 assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == ENOENT);
1067}
1068
1069static void test_map_wronly(void)
1070{
1071 int fd, key = 0, value = 0;
1072
1073 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
1074 MAP_SIZE, map_flags | BPF_F_WRONLY);
1075 if (fd < 0) {
1076 printf("Failed to create map for read only test '%s'!\n",
1077 strerror(errno));
1078 exit(1);
1079 }
1080
1081 key = 1;
1082 value = 1234;
1083 /* Insert key=1 element. */
1084 assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
1085
1086 /* Check that key=2 is not found. */
1087 assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == EPERM);
1088 assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == EPERM);
1089}
1090
1046static void run_all_tests(void) 1091static void run_all_tests(void)
1047{ 1092{
1048 test_hashmap(0, NULL); 1093 test_hashmap(0, NULL);
@@ -1060,6 +1105,9 @@ static void run_all_tests(void)
1060 test_map_large(); 1105 test_map_large();
1061 test_map_parallel(); 1106 test_map_parallel();
1062 test_map_stress(); 1107 test_map_stress();
1108
1109 test_map_rdonly();
1110 test_map_wronly();
1063} 1111}
1064 1112
1065int main(void) 1113int main(void)
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 11ee25cea227..6761be18a91f 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -10,6 +10,7 @@
10#include <string.h> 10#include <string.h>
11#include <assert.h> 11#include <assert.h>
12#include <stdlib.h> 12#include <stdlib.h>
13#include <time.h>
13 14
14#include <linux/types.h> 15#include <linux/types.h>
15typedef __u16 __sum16; 16typedef __u16 __sum16;
@@ -19,6 +20,8 @@ typedef __u16 __sum16;
19#include <linux/ip.h> 20#include <linux/ip.h>
20#include <linux/ipv6.h> 21#include <linux/ipv6.h>
21#include <linux/tcp.h> 22#include <linux/tcp.h>
23#include <linux/filter.h>
24#include <linux/unistd.h>
22 25
23#include <sys/wait.h> 26#include <sys/wait.h>
24#include <sys/resource.h> 27#include <sys/resource.h>
@@ -273,16 +276,26 @@ static void test_bpf_obj_id(void)
273 const int nr_iters = 2; 276 const int nr_iters = 2;
274 const char *file = "./test_obj_id.o"; 277 const char *file = "./test_obj_id.o";
275 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; 278 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
279 const char *expected_prog_name = "test_obj_id";
280 const char *expected_map_name = "test_map_id";
281 const __u64 nsec_per_sec = 1000000000;
276 282
277 struct bpf_object *objs[nr_iters]; 283 struct bpf_object *objs[nr_iters];
278 int prog_fds[nr_iters], map_fds[nr_iters]; 284 int prog_fds[nr_iters], map_fds[nr_iters];
279 /* +1 to test for the info_len returned by kernel */ 285 /* +1 to test for the info_len returned by kernel */
280 struct bpf_prog_info prog_infos[nr_iters + 1]; 286 struct bpf_prog_info prog_infos[nr_iters + 1];
281 struct bpf_map_info map_infos[nr_iters + 1]; 287 struct bpf_map_info map_infos[nr_iters + 1];
288 /* Each prog only uses one map. +1 to test nr_map_ids
289 * returned by kernel.
290 */
291 __u32 map_ids[nr_iters + 1];
282 char jited_insns[128], xlated_insns[128], zeros[128]; 292 char jited_insns[128], xlated_insns[128], zeros[128];
283 __u32 i, next_id, info_len, nr_id_found, duration = 0; 293 __u32 i, next_id, info_len, nr_id_found, duration = 0;
294 struct timespec real_time_ts, boot_time_ts;
284 int sysctl_fd, jit_enabled = 0, err = 0; 295 int sysctl_fd, jit_enabled = 0, err = 0;
285 __u64 array_value; 296 __u64 array_value;
297 uid_t my_uid = getuid();
298 time_t now, load_time;
286 299
287 sysctl_fd = open(jit_sysctl, 0, O_RDONLY); 300 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
288 if (sysctl_fd != -1) { 301 if (sysctl_fd != -1) {
@@ -307,6 +320,7 @@ static void test_bpf_obj_id(void)
307 /* Check bpf_obj_get_info_by_fd() */ 320 /* Check bpf_obj_get_info_by_fd() */
308 bzero(zeros, sizeof(zeros)); 321 bzero(zeros, sizeof(zeros));
309 for (i = 0; i < nr_iters; i++) { 322 for (i = 0; i < nr_iters; i++) {
323 now = time(NULL);
310 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER, 324 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
311 &objs[i], &prog_fds[i]); 325 &objs[i], &prog_fds[i]);
312 /* test_obj_id.o is a dumb prog. It should never fail 326 /* test_obj_id.o is a dumb prog. It should never fail
@@ -316,6 +330,38 @@ static void test_bpf_obj_id(void)
316 error_cnt++; 330 error_cnt++;
317 assert(!err); 331 assert(!err);
318 332
333 /* Insert a magic value to the map */
334 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
335 assert(map_fds[i] >= 0);
336 err = bpf_map_update_elem(map_fds[i], &array_key,
337 &array_magic_value, 0);
338 assert(!err);
339
340 /* Check getting map info */
341 info_len = sizeof(struct bpf_map_info) * 2;
342 bzero(&map_infos[i], info_len);
343 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
344 &info_len);
345 if (CHECK(err ||
346 map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
347 map_infos[i].key_size != sizeof(__u32) ||
348 map_infos[i].value_size != sizeof(__u64) ||
349 map_infos[i].max_entries != 1 ||
350 map_infos[i].map_flags != 0 ||
351 info_len != sizeof(struct bpf_map_info) ||
352 strcmp((char *)map_infos[i].name, expected_map_name),
353 "get-map-info(fd)",
354 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
355 err, errno,
356 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
357 info_len, sizeof(struct bpf_map_info),
358 map_infos[i].key_size,
359 map_infos[i].value_size,
360 map_infos[i].max_entries,
361 map_infos[i].map_flags,
362 map_infos[i].name, expected_map_name))
363 goto done;
364
319 /* Check getting prog info */ 365 /* Check getting prog info */
320 info_len = sizeof(struct bpf_prog_info) * 2; 366 info_len = sizeof(struct bpf_prog_info) * 2;
321 bzero(&prog_infos[i], info_len); 367 bzero(&prog_infos[i], info_len);
@@ -325,8 +371,16 @@ static void test_bpf_obj_id(void)
325 prog_infos[i].jited_prog_len = sizeof(jited_insns); 371 prog_infos[i].jited_prog_len = sizeof(jited_insns);
326 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); 372 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
327 prog_infos[i].xlated_prog_len = sizeof(xlated_insns); 373 prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
374 prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
375 prog_infos[i].nr_map_ids = 2;
376 err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
377 assert(!err);
378 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
379 assert(!err);
328 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], 380 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
329 &info_len); 381 &info_len);
382 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
383 + (prog_infos[i].load_time / nsec_per_sec);
330 if (CHECK(err || 384 if (CHECK(err ||
331 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || 385 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
332 info_len != sizeof(struct bpf_prog_info) || 386 info_len != sizeof(struct bpf_prog_info) ||
@@ -334,9 +388,14 @@ static void test_bpf_obj_id(void)
334 (jit_enabled && 388 (jit_enabled &&
335 !memcmp(jited_insns, zeros, sizeof(zeros))) || 389 !memcmp(jited_insns, zeros, sizeof(zeros))) ||
336 !prog_infos[i].xlated_prog_len || 390 !prog_infos[i].xlated_prog_len ||
337 !memcmp(xlated_insns, zeros, sizeof(zeros)), 391 !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
392 load_time < now - 60 || load_time > now + 60 ||
393 prog_infos[i].created_by_uid != my_uid ||
394 prog_infos[i].nr_map_ids != 1 ||
395 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
396 strcmp((char *)prog_infos[i].name, expected_prog_name),
338 "get-prog-info(fd)", 397 "get-prog-info(fd)",
339 "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d\n", 398 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
340 err, errno, i, 399 err, errno, i,
341 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, 400 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
342 info_len, sizeof(struct bpf_prog_info), 401 info_len, sizeof(struct bpf_prog_info),
@@ -344,36 +403,12 @@ static void test_bpf_obj_id(void)
344 prog_infos[i].jited_prog_len, 403 prog_infos[i].jited_prog_len,
345 prog_infos[i].xlated_prog_len, 404 prog_infos[i].xlated_prog_len,
346 !!memcmp(jited_insns, zeros, sizeof(zeros)), 405 !!memcmp(jited_insns, zeros, sizeof(zeros)),
347 !!memcmp(xlated_insns, zeros, sizeof(zeros)))) 406 !!memcmp(xlated_insns, zeros, sizeof(zeros)),
348 goto done; 407 load_time, now,
349 408 prog_infos[i].created_by_uid, my_uid,
350 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); 409 prog_infos[i].nr_map_ids, 1,
351 assert(map_fds[i] >= 0); 410 *(int *)prog_infos[i].map_ids, map_infos[i].id,
352 err = bpf_map_update_elem(map_fds[i], &array_key, 411 prog_infos[i].name, expected_prog_name))
353 &array_magic_value, 0);
354 assert(!err);
355
356 /* Check getting map info */
357 info_len = sizeof(struct bpf_map_info) * 2;
358 bzero(&map_infos[i], info_len);
359 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
360 &info_len);
361 if (CHECK(err ||
362 map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
363 map_infos[i].key_size != sizeof(__u32) ||
364 map_infos[i].value_size != sizeof(__u64) ||
365 map_infos[i].max_entries != 1 ||
366 map_infos[i].map_flags != 0 ||
367 info_len != sizeof(struct bpf_map_info),
368 "get-map-info(fd)",
369 "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X\n",
370 err, errno,
371 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
372 info_len, sizeof(struct bpf_map_info),
373 map_infos[i].key_size,
374 map_infos[i].value_size,
375 map_infos[i].max_entries,
376 map_infos[i].map_flags))
377 goto done; 412 goto done;
378 } 413 }
379 414
@@ -382,6 +417,7 @@ static void test_bpf_obj_id(void)
382 next_id = 0; 417 next_id = 0;
383 while (!bpf_prog_get_next_id(next_id, &next_id)) { 418 while (!bpf_prog_get_next_id(next_id, &next_id)) {
384 struct bpf_prog_info prog_info = {}; 419 struct bpf_prog_info prog_info = {};
420 __u32 saved_map_id;
385 int prog_fd; 421 int prog_fd;
386 422
387 info_len = sizeof(prog_info); 423 info_len = sizeof(prog_info);
@@ -404,16 +440,33 @@ static void test_bpf_obj_id(void)
404 440
405 nr_id_found++; 441 nr_id_found++;
406 442
443 /* Negative test:
444 * prog_info.nr_map_ids = 1
445 * prog_info.map_ids = NULL
446 */
447 prog_info.nr_map_ids = 1;
448 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
449 if (CHECK(!err || errno != EFAULT,
450 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
451 err, errno, EFAULT))
452 break;
453 bzero(&prog_info, sizeof(prog_info));
454 info_len = sizeof(prog_info);
455
456 saved_map_id = *(int *)(prog_infos[i].map_ids);
457 prog_info.map_ids = prog_infos[i].map_ids;
458 prog_info.nr_map_ids = 2;
407 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len); 459 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
408 prog_infos[i].jited_prog_insns = 0; 460 prog_infos[i].jited_prog_insns = 0;
409 prog_infos[i].xlated_prog_insns = 0; 461 prog_infos[i].xlated_prog_insns = 0;
410 CHECK(err || info_len != sizeof(struct bpf_prog_info) || 462 CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
411 memcmp(&prog_info, &prog_infos[i], info_len), 463 memcmp(&prog_info, &prog_infos[i], info_len) ||
464 *(int *)prog_info.map_ids != saved_map_id,
412 "get-prog-info(next_id->fd)", 465 "get-prog-info(next_id->fd)",
413 "err %d errno %d info_len %u(%lu) memcmp %d\n", 466 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
414 err, errno, info_len, sizeof(struct bpf_prog_info), 467 err, errno, info_len, sizeof(struct bpf_prog_info),
415 memcmp(&prog_info, &prog_infos[i], info_len)); 468 memcmp(&prog_info, &prog_infos[i], info_len),
416 469 *(int *)prog_info.map_ids, saved_map_id);
417 close(prog_fd); 470 close(prog_fd);
418 } 471 }
419 CHECK(nr_id_found != nr_iters, 472 CHECK(nr_id_found != nr_iters,
@@ -456,7 +509,7 @@ static void test_bpf_obj_id(void)
456 memcmp(&map_info, &map_infos[i], info_len) || 509 memcmp(&map_info, &map_infos[i], info_len) ||
457 array_value != array_magic_value, 510 array_value != array_magic_value,
458 "check get-map-info(next_id->fd)", 511 "check get-map-info(next_id->fd)",
459 "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n", 512 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
460 err, errno, info_len, sizeof(struct bpf_map_info), 513 err, errno, info_len, sizeof(struct bpf_map_info),
461 memcmp(&map_info, &map_infos[i], info_len), 514 memcmp(&map_info, &map_infos[i], info_len),
462 array_value, array_magic_value); 515 array_value, array_magic_value);
@@ -495,6 +548,75 @@ static void test_pkt_md_access(void)
495 bpf_object__close(obj); 548 bpf_object__close(obj);
496} 549}
497 550
551static void test_obj_name(void)
552{
553 struct {
554 const char *name;
555 int success;
556 int expected_errno;
557 } tests[] = {
558 { "", 1, 0 },
559 { "_123456789ABCDE", 1, 0 },
560 { "_123456789ABCDEF", 0, EINVAL },
561 { "_123456789ABCD\n", 0, EINVAL },
562 };
563 struct bpf_insn prog[] = {
564 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
565 BPF_EXIT_INSN(),
566 };
567 __u32 duration = 0;
568 int i;
569
570 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
571 size_t name_len = strlen(tests[i].name) + 1;
572 union bpf_attr attr;
573 size_t ncopy;
574 int fd;
575
576 /* test different attr.prog_name during BPF_PROG_LOAD */
577 ncopy = name_len < sizeof(attr.prog_name) ?
578 name_len : sizeof(attr.prog_name);
579 bzero(&attr, sizeof(attr));
580 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
581 attr.insn_cnt = 2;
582 attr.insns = ptr_to_u64(prog);
583 attr.license = ptr_to_u64("");
584 memcpy(attr.prog_name, tests[i].name, ncopy);
585
586 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
587 CHECK((tests[i].success && fd < 0) ||
588 (!tests[i].success && fd != -1) ||
589 (!tests[i].success && errno != tests[i].expected_errno),
590 "check-bpf-prog-name",
591 "fd %d(%d) errno %d(%d)\n",
592 fd, tests[i].success, errno, tests[i].expected_errno);
593
594 if (fd != -1)
595 close(fd);
596
597 /* test different attr.map_name during BPF_MAP_CREATE */
598 ncopy = name_len < sizeof(attr.map_name) ?
599 name_len : sizeof(attr.map_name);
600 bzero(&attr, sizeof(attr));
601 attr.map_type = BPF_MAP_TYPE_ARRAY;
602 attr.key_size = 4;
603 attr.value_size = 4;
604 attr.max_entries = 1;
605 attr.map_flags = 0;
606 memcpy(attr.map_name, tests[i].name, ncopy);
607 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
608 CHECK((tests[i].success && fd < 0) ||
609 (!tests[i].success && fd != -1) ||
610 (!tests[i].success && errno != tests[i].expected_errno),
611 "check-bpf-map-name",
612 "fd %d(%d) errno %d(%d)\n",
613 fd, tests[i].success, errno, tests[i].expected_errno);
614
615 if (fd != -1)
616 close(fd);
617 }
618}
619
498int main(void) 620int main(void)
499{ 621{
500 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 622 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -507,6 +629,7 @@ int main(void)
507 test_tcp_estats(); 629 test_tcp_estats();
508 test_bpf_obj_id(); 630 test_bpf_obj_id();
509 test_pkt_md_access(); 631 test_pkt_md_access();
632 test_obj_name();
510 633
511 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); 634 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
512 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 635 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 64ae21f64489..b51017404c62 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -422,9 +422,7 @@ static struct bpf_test tests[] = {
422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 BPF_EXIT_INSN(), 423 BPF_EXIT_INSN(),
424 }, 424 },
425 .errstr_unpriv = "R1 subtraction from stack pointer", 425 .errstr = "R1 subtraction from stack pointer",
426 .result_unpriv = REJECT,
427 .errstr = "R1 invalid mem access",
428 .result = REJECT, 426 .result = REJECT,
429 }, 427 },
430 { 428 {
@@ -606,7 +604,6 @@ static struct bpf_test tests[] = {
606 }, 604 },
607 .errstr = "misaligned stack access", 605 .errstr = "misaligned stack access",
608 .result = REJECT, 606 .result = REJECT,
609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
610 }, 607 },
611 { 608 {
612 "invalid map_fd for function call", 609 "invalid map_fd for function call",
@@ -1797,7 +1794,6 @@ static struct bpf_test tests[] = {
1797 }, 1794 },
1798 .result = REJECT, 1795 .result = REJECT,
1799 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", 1796 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1800 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1801 }, 1797 },
1802 { 1798 {
1803 "PTR_TO_STACK store/load - bad alignment on reg", 1799 "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +1806,6 @@ static struct bpf_test tests[] = {
1810 }, 1806 },
1811 .result = REJECT, 1807 .result = REJECT,
1812 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", 1808 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1813 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1814 }, 1809 },
1815 { 1810 {
1816 "PTR_TO_STACK store/load - out of bounds low", 1811 "PTR_TO_STACK store/load - out of bounds low",
@@ -1862,9 +1857,8 @@ static struct bpf_test tests[] = {
1862 BPF_MOV64_IMM(BPF_REG_0, 0), 1857 BPF_MOV64_IMM(BPF_REG_0, 0),
1863 BPF_EXIT_INSN(), 1858 BPF_EXIT_INSN(),
1864 }, 1859 },
1865 .result = ACCEPT, 1860 .result = REJECT,
1866 .result_unpriv = REJECT, 1861 .errstr = "R1 pointer += pointer",
1867 .errstr_unpriv = "R1 pointer += pointer",
1868 }, 1862 },
1869 { 1863 {
1870 "unpriv: neg pointer", 1864 "unpriv: neg pointer",
@@ -2592,7 +2586,8 @@ static struct bpf_test tests[] = {
2592 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2586 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2593 offsetof(struct __sk_buff, data)), 2587 offsetof(struct __sk_buff, data)),
2594 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), 2588 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 2589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2590 offsetof(struct __sk_buff, len)),
2596 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), 2591 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2597 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), 2592 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2598 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), 2593 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2899,7 +2894,7 @@ static struct bpf_test tests[] = {
2899 BPF_MOV64_IMM(BPF_REG_0, 0), 2894 BPF_MOV64_IMM(BPF_REG_0, 0),
2900 BPF_EXIT_INSN(), 2895 BPF_EXIT_INSN(),
2901 }, 2896 },
2902 .errstr = "invalid access to packet", 2897 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
2903 .result = REJECT, 2898 .result = REJECT,
2904 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2899 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2905 }, 2900 },
@@ -3579,7 +3574,7 @@ static struct bpf_test tests[] = {
3579 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3574 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3580 }, 3575 },
3581 { 3576 {
3582 "helper access to packet: test19, cls helper fail range zero", 3577 "helper access to packet: test19, cls helper range zero",
3583 .insns = { 3578 .insns = {
3584 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 3579 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3585 offsetof(struct __sk_buff, data)), 3580 offsetof(struct __sk_buff, data)),
@@ -3599,8 +3594,7 @@ static struct bpf_test tests[] = {
3599 BPF_MOV64_IMM(BPF_REG_0, 0), 3594 BPF_MOV64_IMM(BPF_REG_0, 0),
3600 BPF_EXIT_INSN(), 3595 BPF_EXIT_INSN(),
3601 }, 3596 },
3602 .result = REJECT, 3597 .result = ACCEPT,
3603 .errstr = "invalid access to packet",
3604 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3598 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3605 }, 3599 },
3606 { 3600 {
@@ -3886,9 +3880,7 @@ static struct bpf_test tests[] = {
3886 BPF_EXIT_INSN(), 3880 BPF_EXIT_INSN(),
3887 }, 3881 },
3888 .fixup_map2 = { 3, 11 }, 3882 .fixup_map2 = { 3, 11 },
3889 .errstr_unpriv = "R0 pointer += pointer", 3883 .errstr = "R0 pointer += pointer",
3890 .errstr = "R0 invalid mem access 'inv'",
3891 .result_unpriv = REJECT,
3892 .result = REJECT, 3884 .result = REJECT,
3893 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3885 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3894 }, 3886 },
@@ -3929,7 +3921,7 @@ static struct bpf_test tests[] = {
3929 BPF_EXIT_INSN(), 3921 BPF_EXIT_INSN(),
3930 }, 3922 },
3931 .fixup_map1 = { 4 }, 3923 .fixup_map1 = { 4 },
3932 .errstr = "R4 invalid mem access", 3924 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3933 .result = REJECT, 3925 .result = REJECT,
3934 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3926 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3935 }, 3927 },
@@ -3950,7 +3942,7 @@ static struct bpf_test tests[] = {
3950 BPF_EXIT_INSN(), 3942 BPF_EXIT_INSN(),
3951 }, 3943 },
3952 .fixup_map1 = { 4 }, 3944 .fixup_map1 = { 4 },
3953 .errstr = "R4 invalid mem access", 3945 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3954 .result = REJECT, 3946 .result = REJECT,
3955 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3947 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3956 }, 3948 },
@@ -3971,7 +3963,7 @@ static struct bpf_test tests[] = {
3971 BPF_EXIT_INSN(), 3963 BPF_EXIT_INSN(),
3972 }, 3964 },
3973 .fixup_map1 = { 4 }, 3965 .fixup_map1 = { 4 },
3974 .errstr = "R4 invalid mem access", 3966 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3975 .result = REJECT, 3967 .result = REJECT,
3976 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3968 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3977 }, 3969 },
@@ -4378,11 +4370,10 @@ static struct bpf_test tests[] = {
4378 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4370 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4379 BPF_LD_MAP_FD(BPF_REG_1, 0), 4371 BPF_LD_MAP_FD(BPF_REG_1, 0),
4380 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4372 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4381 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4373 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4374 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4383 BPF_MOV64_IMM(BPF_REG_2, 0), 4375 BPF_MOV64_IMM(BPF_REG_2, 0),
4384 BPF_MOV64_IMM(BPF_REG_3, 0), 4376 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
4385 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4386 BPF_EXIT_INSN(), 4377 BPF_EXIT_INSN(),
4387 }, 4378 },
4388 .fixup_map2 = { 3 }, 4379 .fixup_map2 = { 3 },
@@ -4482,13 +4473,12 @@ static struct bpf_test tests[] = {
4482 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4473 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4483 BPF_LD_MAP_FD(BPF_REG_1, 0), 4474 BPF_LD_MAP_FD(BPF_REG_1, 0),
4484 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4475 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4485 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4476 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4486 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4477 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4488 offsetof(struct test_val, foo)), 4479 offsetof(struct test_val, foo)),
4489 BPF_MOV64_IMM(BPF_REG_2, 0), 4480 BPF_MOV64_IMM(BPF_REG_2, 0),
4490 BPF_MOV64_IMM(BPF_REG_3, 0), 4481 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
4491 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4492 BPF_EXIT_INSN(), 4482 BPF_EXIT_INSN(),
4493 }, 4483 },
4494 .fixup_map2 = { 3 }, 4484 .fixup_map2 = { 3 },
@@ -4618,13 +4608,12 @@ static struct bpf_test tests[] = {
4618 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4608 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4619 BPF_LD_MAP_FD(BPF_REG_1, 0), 4609 BPF_LD_MAP_FD(BPF_REG_1, 0),
4620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4610 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4611 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4612 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4623 BPF_MOV64_IMM(BPF_REG_3, 0), 4613 BPF_MOV64_IMM(BPF_REG_3, 0),
4624 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4614 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4625 BPF_MOV64_IMM(BPF_REG_2, 0), 4615 BPF_MOV64_IMM(BPF_REG_2, 0),
4626 BPF_MOV64_IMM(BPF_REG_3, 0), 4616 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
4627 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4628 BPF_EXIT_INSN(), 4617 BPF_EXIT_INSN(),
4629 }, 4618 },
4630 .fixup_map2 = { 3 }, 4619 .fixup_map2 = { 3 },
@@ -4759,15 +4748,14 @@ static struct bpf_test tests[] = {
4759 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4748 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4760 BPF_LD_MAP_FD(BPF_REG_1, 0), 4749 BPF_LD_MAP_FD(BPF_REG_1, 0),
4761 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4750 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4751 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4763 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4752 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4764 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4753 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4765 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4754 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4766 offsetof(struct test_val, foo), 4), 4755 offsetof(struct test_val, foo), 3),
4767 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4756 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4768 BPF_MOV64_IMM(BPF_REG_2, 0), 4757 BPF_MOV64_IMM(BPF_REG_2, 0),
4769 BPF_MOV64_IMM(BPF_REG_3, 0), 4758 BPF_EMIT_CALL(BPF_FUNC_trace_printk),
4770 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4771 BPF_EXIT_INSN(), 4759 BPF_EXIT_INSN(),
4772 }, 4760 },
4773 .fixup_map2 = { 3 }, 4761 .fixup_map2 = { 3 },
@@ -5200,10 +5188,8 @@ static struct bpf_test tests[] = {
5200 BPF_EXIT_INSN(), 5188 BPF_EXIT_INSN(),
5201 }, 5189 },
5202 .fixup_map2 = { 3 }, 5190 .fixup_map2 = { 3 },
5203 .errstr_unpriv = "R0 bitwise operator &= on pointer", 5191 .errstr = "R0 bitwise operator &= on pointer",
5204 .errstr = "invalid mem access 'inv'",
5205 .result = REJECT, 5192 .result = REJECT,
5206 .result_unpriv = REJECT,
5207 }, 5193 },
5208 { 5194 {
5209 "map element value illegal alu op, 2", 5195 "map element value illegal alu op, 2",
@@ -5219,10 +5205,8 @@ static struct bpf_test tests[] = {
5219 BPF_EXIT_INSN(), 5205 BPF_EXIT_INSN(),
5220 }, 5206 },
5221 .fixup_map2 = { 3 }, 5207 .fixup_map2 = { 3 },
5222 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited", 5208 .errstr = "R0 32-bit pointer arithmetic prohibited",
5223 .errstr = "invalid mem access 'inv'",
5224 .result = REJECT, 5209 .result = REJECT,
5225 .result_unpriv = REJECT,
5226 }, 5210 },
5227 { 5211 {
5228 "map element value illegal alu op, 3", 5212 "map element value illegal alu op, 3",
@@ -5238,10 +5222,8 @@ static struct bpf_test tests[] = {
5238 BPF_EXIT_INSN(), 5222 BPF_EXIT_INSN(),
5239 }, 5223 },
5240 .fixup_map2 = { 3 }, 5224 .fixup_map2 = { 3 },
5241 .errstr_unpriv = "R0 pointer arithmetic with /= operator", 5225 .errstr = "R0 pointer arithmetic with /= operator",
5242 .errstr = "invalid mem access 'inv'",
5243 .result = REJECT, 5226 .result = REJECT,
5244 .result_unpriv = REJECT,
5245 }, 5227 },
5246 { 5228 {
5247 "map element value illegal alu op, 4", 5229 "map element value illegal alu op, 4",
@@ -5350,7 +5332,7 @@ static struct bpf_test tests[] = {
5350 BPF_EMIT_CALL(BPF_FUNC_probe_read), 5332 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5351 BPF_EXIT_INSN(), 5333 BPF_EXIT_INSN(),
5352 }, 5334 },
5353 .errstr = "invalid stack type R1 off=-64 access_size=0", 5335 .errstr = "invalid indirect read from stack off -64+0 size 64",
5354 .result = REJECT, 5336 .result = REJECT,
5355 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5337 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5356 }, 5338 },
@@ -5505,7 +5487,7 @@ static struct bpf_test tests[] = {
5505 BPF_MOV64_IMM(BPF_REG_0, 0), 5487 BPF_MOV64_IMM(BPF_REG_0, 0),
5506 BPF_EXIT_INSN(), 5488 BPF_EXIT_INSN(),
5507 }, 5489 },
5508 .errstr = "invalid stack type R1 off=-64 access_size=0", 5490 .errstr = "invalid indirect read from stack off -64+0 size 64",
5509 .result = REJECT, 5491 .result = REJECT,
5510 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5492 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5511 }, 5493 },
@@ -5636,7 +5618,7 @@ static struct bpf_test tests[] = {
5636 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5618 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5637 }, 5619 },
5638 { 5620 {
5639 "helper access to variable memory: size = 0 allowed on NULL", 5621 "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
5640 .insns = { 5622 .insns = {
5641 BPF_MOV64_IMM(BPF_REG_1, 0), 5623 BPF_MOV64_IMM(BPF_REG_1, 0),
5642 BPF_MOV64_IMM(BPF_REG_2, 0), 5624 BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -5650,7 +5632,7 @@ static struct bpf_test tests[] = {
5650 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5632 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5651 }, 5633 },
5652 { 5634 {
5653 "helper access to variable memory: size > 0 not allowed on NULL", 5635 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
5654 .insns = { 5636 .insns = {
5655 BPF_MOV64_IMM(BPF_REG_1, 0), 5637 BPF_MOV64_IMM(BPF_REG_1, 0),
5656 BPF_MOV64_IMM(BPF_REG_2, 0), 5638 BPF_MOV64_IMM(BPF_REG_2, 0),
@@ -5668,7 +5650,7 @@ static struct bpf_test tests[] = {
5668 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5669 }, 5651 },
5670 { 5652 {
5671 "helper access to variable memory: size = 0 not allowed on != NULL", 5653 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
5672 .insns = { 5654 .insns = {
5673 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5655 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 5656 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
@@ -5681,11 +5663,201 @@ static struct bpf_test tests[] = {
5681 BPF_EMIT_CALL(BPF_FUNC_csum_diff), 5663 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5682 BPF_EXIT_INSN(), 5664 BPF_EXIT_INSN(),
5683 }, 5665 },
5684 .errstr = "invalid stack type R1 off=-8 access_size=0", 5666 .result = ACCEPT,
5685 .result = REJECT, 5667 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5668 },
5669 {
5670 "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
5671 .insns = {
5672 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5673 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5674 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5675 BPF_LD_MAP_FD(BPF_REG_1, 0),
5676 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5677 BPF_FUNC_map_lookup_elem),
5678 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5679 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5680 BPF_MOV64_IMM(BPF_REG_2, 0),
5681 BPF_MOV64_IMM(BPF_REG_3, 0),
5682 BPF_MOV64_IMM(BPF_REG_4, 0),
5683 BPF_MOV64_IMM(BPF_REG_5, 0),
5684 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5685 BPF_EXIT_INSN(),
5686 },
5687 .fixup_map1 = { 3 },
5688 .result = ACCEPT,
5686 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5689 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5687 }, 5690 },
5688 { 5691 {
5692 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
5693 .insns = {
5694 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5695 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5696 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5697 BPF_LD_MAP_FD(BPF_REG_1, 0),
5698 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5699 BPF_FUNC_map_lookup_elem),
5700 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
5701 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5702 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
5703 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5704 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5705 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5706 BPF_MOV64_IMM(BPF_REG_3, 0),
5707 BPF_MOV64_IMM(BPF_REG_4, 0),
5708 BPF_MOV64_IMM(BPF_REG_5, 0),
5709 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5710 BPF_EXIT_INSN(),
5711 },
5712 .fixup_map1 = { 3 },
5713 .result = ACCEPT,
5714 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5715 },
5716 {
5717 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
5718 .insns = {
5719 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5722 BPF_LD_MAP_FD(BPF_REG_1, 0),
5723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5724 BPF_FUNC_map_lookup_elem),
5725 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5726 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5727 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5728 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5729 BPF_MOV64_IMM(BPF_REG_3, 0),
5730 BPF_MOV64_IMM(BPF_REG_4, 0),
5731 BPF_MOV64_IMM(BPF_REG_5, 0),
5732 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5733 BPF_EXIT_INSN(),
5734 },
5735 .fixup_map1 = { 3 },
5736 .result = ACCEPT,
5737 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5738 },
5739 {
5740 "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
5741 .insns = {
5742 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5743 offsetof(struct __sk_buff, data)),
5744 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5745 offsetof(struct __sk_buff, data_end)),
5746 BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
5747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5748 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
5749 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5750 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
5751 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5752 BPF_MOV64_IMM(BPF_REG_3, 0),
5753 BPF_MOV64_IMM(BPF_REG_4, 0),
5754 BPF_MOV64_IMM(BPF_REG_5, 0),
5755 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5756 BPF_EXIT_INSN(),
5757 },
5758 .result = ACCEPT,
5759 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5760 },
5761 {
5762 "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
5763 .insns = {
5764 BPF_MOV64_IMM(BPF_REG_1, 0),
5765 BPF_MOV64_IMM(BPF_REG_2, 0),
5766 BPF_MOV64_IMM(BPF_REG_3, 0),
5767 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5768 BPF_EXIT_INSN(),
5769 },
5770 .errstr = "R1 type=inv expected=fp",
5771 .result = REJECT,
5772 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5773 },
5774 {
5775 "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
5776 .insns = {
5777 BPF_MOV64_IMM(BPF_REG_1, 0),
5778 BPF_MOV64_IMM(BPF_REG_2, 1),
5779 BPF_MOV64_IMM(BPF_REG_3, 0),
5780 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5781 BPF_EXIT_INSN(),
5782 },
5783 .errstr = "R1 type=inv expected=fp",
5784 .result = REJECT,
5785 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5786 },
5787 {
5788 "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5789 .insns = {
5790 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5792 BPF_MOV64_IMM(BPF_REG_2, 0),
5793 BPF_MOV64_IMM(BPF_REG_3, 0),
5794 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5795 BPF_EXIT_INSN(),
5796 },
5797 .result = ACCEPT,
5798 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5799 },
5800 {
5801 "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5802 .insns = {
5803 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5804 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5806 BPF_LD_MAP_FD(BPF_REG_1, 0),
5807 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5809 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5810 BPF_MOV64_IMM(BPF_REG_2, 0),
5811 BPF_MOV64_IMM(BPF_REG_3, 0),
5812 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5813 BPF_EXIT_INSN(),
5814 },
5815 .fixup_map1 = { 3 },
5816 .result = ACCEPT,
5817 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5818 },
5819 {
5820 "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5821 .insns = {
5822 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5823 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5825 BPF_LD_MAP_FD(BPF_REG_1, 0),
5826 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5827 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5828 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5829 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
5830 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5831 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5832 BPF_MOV64_IMM(BPF_REG_3, 0),
5833 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5834 BPF_EXIT_INSN(),
5835 },
5836 .fixup_map1 = { 3 },
5837 .result = ACCEPT,
5838 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5839 },
5840 {
5841 "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
5842 .insns = {
5843 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5844 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5846 BPF_LD_MAP_FD(BPF_REG_1, 0),
5847 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5848 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5849 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5850 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5851 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
5852 BPF_MOV64_IMM(BPF_REG_3, 0),
5853 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5854 BPF_EXIT_INSN(),
5855 },
5856 .fixup_map1 = { 3 },
5857 .result = ACCEPT,
5858 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5859 },
5860 {
5689 "helper access to variable memory: 8 bytes leak", 5861 "helper access to variable memory: 8 bytes leak",
5690 .insns = { 5862 .insns = {
5691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5863 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
@@ -5834,8 +6006,7 @@ static struct bpf_test tests[] = {
5834 BPF_EXIT_INSN(), 6006 BPF_EXIT_INSN(),
5835 }, 6007 },
5836 .fixup_map_in_map = { 3 }, 6008 .fixup_map_in_map = { 3 },
5837 .errstr = "R1 type=inv expected=map_ptr", 6009 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5838 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5839 .result = REJECT, 6010 .result = REJECT,
5840 }, 6011 },
5841 { 6012 {
@@ -5932,6 +6103,30 @@ static struct bpf_test tests[] = {
5932 .result = ACCEPT, 6103 .result = ACCEPT,
5933 }, 6104 },
5934 { 6105 {
6106 "ld_abs: tests on r6 and skb data reload helper",
6107 .insns = {
6108 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
6109 BPF_LD_ABS(BPF_B, 0),
6110 BPF_LD_ABS(BPF_H, 0),
6111 BPF_LD_ABS(BPF_W, 0),
6112 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
6113 BPF_MOV64_IMM(BPF_REG_6, 0),
6114 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
6115 BPF_MOV64_IMM(BPF_REG_2, 1),
6116 BPF_MOV64_IMM(BPF_REG_3, 2),
6117 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6118 BPF_FUNC_skb_vlan_push),
6119 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
6120 BPF_LD_ABS(BPF_B, 0),
6121 BPF_LD_ABS(BPF_H, 0),
6122 BPF_LD_ABS(BPF_W, 0),
6123 BPF_MOV64_IMM(BPF_REG_0, 42),
6124 BPF_EXIT_INSN(),
6125 },
6126 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6127 .result = ACCEPT,
6128 },
6129 {
5935 "ld_ind: check calling conv, r1", 6130 "ld_ind: check calling conv, r1",
5936 .insns = { 6131 .insns = {
5937 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 6132 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
@@ -6115,7 +6310,7 @@ static struct bpf_test tests[] = {
6115 BPF_EXIT_INSN(), 6310 BPF_EXIT_INSN(),
6116 }, 6311 },
6117 .fixup_map1 = { 3 }, 6312 .fixup_map1 = { 3 },
6118 .errstr = "R0 min value is negative", 6313 .errstr = "unbounded min value",
6119 .result = REJECT, 6314 .result = REJECT,
6120 }, 6315 },
6121 { 6316 {
@@ -6139,7 +6334,7 @@ static struct bpf_test tests[] = {
6139 BPF_EXIT_INSN(), 6334 BPF_EXIT_INSN(),
6140 }, 6335 },
6141 .fixup_map1 = { 3 }, 6336 .fixup_map1 = { 3 },
6142 .errstr = "R0 min value is negative", 6337 .errstr = "unbounded min value",
6143 .result = REJECT, 6338 .result = REJECT,
6144 }, 6339 },
6145 { 6340 {
@@ -6165,7 +6360,7 @@ static struct bpf_test tests[] = {
6165 BPF_EXIT_INSN(), 6360 BPF_EXIT_INSN(),
6166 }, 6361 },
6167 .fixup_map1 = { 3 }, 6362 .fixup_map1 = { 3 },
6168 .errstr = "R8 invalid mem access 'inv'", 6363 .errstr = "unbounded min value",
6169 .result = REJECT, 6364 .result = REJECT,
6170 }, 6365 },
6171 { 6366 {
@@ -6190,7 +6385,7 @@ static struct bpf_test tests[] = {
6190 BPF_EXIT_INSN(), 6385 BPF_EXIT_INSN(),
6191 }, 6386 },
6192 .fixup_map1 = { 3 }, 6387 .fixup_map1 = { 3 },
6193 .errstr = "R8 invalid mem access 'inv'", 6388 .errstr = "unbounded min value",
6194 .result = REJECT, 6389 .result = REJECT,
6195 }, 6390 },
6196 { 6391 {
@@ -6238,7 +6433,7 @@ static struct bpf_test tests[] = {
6238 BPF_EXIT_INSN(), 6433 BPF_EXIT_INSN(),
6239 }, 6434 },
6240 .fixup_map1 = { 3 }, 6435 .fixup_map1 = { 3 },
6241 .errstr = "R0 min value is negative", 6436 .errstr = "unbounded min value",
6242 .result = REJECT, 6437 .result = REJECT,
6243 }, 6438 },
6244 { 6439 {
@@ -6309,7 +6504,7 @@ static struct bpf_test tests[] = {
6309 BPF_EXIT_INSN(), 6504 BPF_EXIT_INSN(),
6310 }, 6505 },
6311 .fixup_map1 = { 3 }, 6506 .fixup_map1 = { 3 },
6312 .errstr = "R0 min value is negative", 6507 .errstr = "unbounded min value",
6313 .result = REJECT, 6508 .result = REJECT,
6314 }, 6509 },
6315 { 6510 {
@@ -6360,7 +6555,7 @@ static struct bpf_test tests[] = {
6360 BPF_EXIT_INSN(), 6555 BPF_EXIT_INSN(),
6361 }, 6556 },
6362 .fixup_map1 = { 3 }, 6557 .fixup_map1 = { 3 },
6363 .errstr = "R0 min value is negative", 6558 .errstr = "unbounded min value",
6364 .result = REJECT, 6559 .result = REJECT,
6365 }, 6560 },
6366 { 6561 {
@@ -6387,7 +6582,7 @@ static struct bpf_test tests[] = {
6387 BPF_EXIT_INSN(), 6582 BPF_EXIT_INSN(),
6388 }, 6583 },
6389 .fixup_map1 = { 3 }, 6584 .fixup_map1 = { 3 },
6390 .errstr = "R0 min value is negative", 6585 .errstr = "unbounded min value",
6391 .result = REJECT, 6586 .result = REJECT,
6392 }, 6587 },
6393 { 6588 {
@@ -6413,7 +6608,7 @@ static struct bpf_test tests[] = {
6413 BPF_EXIT_INSN(), 6608 BPF_EXIT_INSN(),
6414 }, 6609 },
6415 .fixup_map1 = { 3 }, 6610 .fixup_map1 = { 3 },
6416 .errstr = "R0 min value is negative", 6611 .errstr = "unbounded min value",
6417 .result = REJECT, 6612 .result = REJECT,
6418 }, 6613 },
6419 { 6614 {
@@ -6442,7 +6637,7 @@ static struct bpf_test tests[] = {
6442 BPF_EXIT_INSN(), 6637 BPF_EXIT_INSN(),
6443 }, 6638 },
6444 .fixup_map1 = { 3 }, 6639 .fixup_map1 = { 3 },
6445 .errstr = "R0 min value is negative", 6640 .errstr = "unbounded min value",
6446 .result = REJECT, 6641 .result = REJECT,
6447 }, 6642 },
6448 { 6643 {
@@ -6472,7 +6667,7 @@ static struct bpf_test tests[] = {
6472 BPF_JMP_IMM(BPF_JA, 0, 0, -7), 6667 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6473 }, 6668 },
6474 .fixup_map1 = { 4 }, 6669 .fixup_map1 = { 4 },
6475 .errstr = "R0 min value is negative", 6670 .errstr = "unbounded min value",
6476 .result = REJECT, 6671 .result = REJECT,
6477 }, 6672 },
6478 { 6673 {
@@ -6500,8 +6695,7 @@ static struct bpf_test tests[] = {
6500 BPF_EXIT_INSN(), 6695 BPF_EXIT_INSN(),
6501 }, 6696 },
6502 .fixup_map1 = { 3 }, 6697 .fixup_map1 = { 3 },
6503 .errstr_unpriv = "R0 pointer comparison prohibited", 6698 .errstr = "unbounded min value",
6504 .errstr = "R0 min value is negative",
6505 .result = REJECT, 6699 .result = REJECT,
6506 .result_unpriv = REJECT, 6700 .result_unpriv = REJECT,
6507 }, 6701 },
@@ -6557,6 +6751,462 @@ static struct bpf_test tests[] = {
6557 .result = REJECT, 6751 .result = REJECT,
6558 }, 6752 },
6559 { 6753 {
6754 "bounds check based on zero-extended MOV",
6755 .insns = {
6756 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6759 BPF_LD_MAP_FD(BPF_REG_1, 0),
6760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6761 BPF_FUNC_map_lookup_elem),
6762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6763 /* r2 = 0x0000'0000'ffff'ffff */
6764 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6765 /* r2 = 0 */
6766 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6767 /* no-op */
6768 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6769 /* access at offset 0 */
6770 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6771 /* exit */
6772 BPF_MOV64_IMM(BPF_REG_0, 0),
6773 BPF_EXIT_INSN(),
6774 },
6775 .fixup_map1 = { 3 },
6776 .result = ACCEPT
6777 },
6778 {
6779 "bounds check based on sign-extended MOV. test1",
6780 .insns = {
6781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6782 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6784 BPF_LD_MAP_FD(BPF_REG_1, 0),
6785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6786 BPF_FUNC_map_lookup_elem),
6787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6788 /* r2 = 0xffff'ffff'ffff'ffff */
6789 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6790 /* r2 = 0xffff'ffff */
6791 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6792 /* r0 = <oob pointer> */
6793 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6794 /* access to OOB pointer */
6795 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6796 /* exit */
6797 BPF_MOV64_IMM(BPF_REG_0, 0),
6798 BPF_EXIT_INSN(),
6799 },
6800 .fixup_map1 = { 3 },
6801 .errstr = "map_value pointer and 4294967295",
6802 .result = REJECT
6803 },
6804 {
6805 "bounds check based on sign-extended MOV. test2",
6806 .insns = {
6807 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6808 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6810 BPF_LD_MAP_FD(BPF_REG_1, 0),
6811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6812 BPF_FUNC_map_lookup_elem),
6813 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6814 /* r2 = 0xffff'ffff'ffff'ffff */
6815 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6816 /* r2 = 0xfff'ffff */
6817 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6818 /* r0 = <oob pointer> */
6819 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6820 /* access to OOB pointer */
6821 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6822 /* exit */
6823 BPF_MOV64_IMM(BPF_REG_0, 0),
6824 BPF_EXIT_INSN(),
6825 },
6826 .fixup_map1 = { 3 },
6827 .errstr = "R0 min value is outside of the array range",
6828 .result = REJECT
6829 },
6830 {
6831 "bounds check based on reg_off + var_off + insn_off. test1",
6832 .insns = {
6833 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6834 offsetof(struct __sk_buff, mark)),
6835 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6836 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6838 BPF_LD_MAP_FD(BPF_REG_1, 0),
6839 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6840 BPF_FUNC_map_lookup_elem),
6841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6842 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6844 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6846 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6847 BPF_MOV64_IMM(BPF_REG_0, 0),
6848 BPF_EXIT_INSN(),
6849 },
6850 .fixup_map1 = { 4 },
6851 .errstr = "value_size=8 off=1073741825",
6852 .result = REJECT,
6853 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6854 },
6855 {
6856 "bounds check based on reg_off + var_off + insn_off. test2",
6857 .insns = {
6858 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6859 offsetof(struct __sk_buff, mark)),
6860 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6861 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6863 BPF_LD_MAP_FD(BPF_REG_1, 0),
6864 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6865 BPF_FUNC_map_lookup_elem),
6866 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6867 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6869 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6871 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6872 BPF_MOV64_IMM(BPF_REG_0, 0),
6873 BPF_EXIT_INSN(),
6874 },
6875 .fixup_map1 = { 4 },
6876 .errstr = "value 1073741823",
6877 .result = REJECT,
6878 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6879 },
6880 {
6881 "bounds check after truncation of non-boundary-crossing range",
6882 .insns = {
6883 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6884 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6886 BPF_LD_MAP_FD(BPF_REG_1, 0),
6887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6888 BPF_FUNC_map_lookup_elem),
6889 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6890 /* r1 = [0x00, 0xff] */
6891 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6892 BPF_MOV64_IMM(BPF_REG_2, 1),
6893 /* r2 = 0x10'0000'0000 */
6894 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6895 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6896 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6897 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6899 /* r1 = [0x00, 0xff] */
6900 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6901 /* r1 = 0 */
6902 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6903 /* no-op */
6904 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6905 /* access at offset 0 */
6906 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6907 /* exit */
6908 BPF_MOV64_IMM(BPF_REG_0, 0),
6909 BPF_EXIT_INSN(),
6910 },
6911 .fixup_map1 = { 3 },
6912 .result = ACCEPT
6913 },
6914 {
6915 "bounds check after truncation of boundary-crossing range (1)",
6916 .insns = {
6917 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6918 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6920 BPF_LD_MAP_FD(BPF_REG_1, 0),
6921 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6922 BPF_FUNC_map_lookup_elem),
6923 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6924 /* r1 = [0x00, 0xff] */
6925 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6927 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6929 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6930 * [0x0000'0000, 0x0000'007f]
6931 */
6932 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6933 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6934 /* r1 = [0x00, 0xff] or
6935 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6936 */
6937 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6938 /* r1 = 0 or
6939 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6940 */
6941 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6942 /* no-op or OOB pointer computation */
6943 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6944 /* potentially OOB access */
6945 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6946 /* exit */
6947 BPF_MOV64_IMM(BPF_REG_0, 0),
6948 BPF_EXIT_INSN(),
6949 },
6950 .fixup_map1 = { 3 },
6951 /* not actually fully unbounded, but the bound is very high */
6952 .errstr = "R0 unbounded memory access",
6953 .result = REJECT
6954 },
6955 {
6956 "bounds check after truncation of boundary-crossing range (2)",
6957 .insns = {
6958 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6959 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6961 BPF_LD_MAP_FD(BPF_REG_1, 0),
6962 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6963 BPF_FUNC_map_lookup_elem),
6964 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6965 /* r1 = [0x00, 0xff] */
6966 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6968 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6970 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6971 * [0x0000'0000, 0x0000'007f]
6972 * difference to previous test: truncation via MOV32
6973 * instead of ALU32.
6974 */
6975 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
6976 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6977 /* r1 = [0x00, 0xff] or
6978 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6979 */
6980 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6981 /* r1 = 0 or
6982 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6983 */
6984 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6985 /* no-op or OOB pointer computation */
6986 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6987 /* potentially OOB access */
6988 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6989 /* exit */
6990 BPF_MOV64_IMM(BPF_REG_0, 0),
6991 BPF_EXIT_INSN(),
6992 },
6993 .fixup_map1 = { 3 },
6994 /* not actually fully unbounded, but the bound is very high */
6995 .errstr = "R0 unbounded memory access",
6996 .result = REJECT
6997 },
6998 {
6999 "bounds check after wrapping 32-bit addition",
7000 .insns = {
7001 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7004 BPF_LD_MAP_FD(BPF_REG_1, 0),
7005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7006 BPF_FUNC_map_lookup_elem),
7007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7008 /* r1 = 0x7fff'ffff */
7009 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7010 /* r1 = 0xffff'fffe */
7011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7012 /* r1 = 0 */
7013 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7014 /* no-op */
7015 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7016 /* access at offset 0 */
7017 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7018 /* exit */
7019 BPF_MOV64_IMM(BPF_REG_0, 0),
7020 BPF_EXIT_INSN(),
7021 },
7022 .fixup_map1 = { 3 },
7023 .result = ACCEPT
7024 },
7025 {
7026 "bounds check after shift with oversized count operand",
7027 .insns = {
7028 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7031 BPF_LD_MAP_FD(BPF_REG_1, 0),
7032 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7033 BPF_FUNC_map_lookup_elem),
7034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7035 BPF_MOV64_IMM(BPF_REG_2, 32),
7036 BPF_MOV64_IMM(BPF_REG_1, 1),
7037 /* r1 = (u32)1 << (u32)32 = ? */
7038 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7039 /* r1 = [0x0000, 0xffff] */
7040 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7041 /* computes unknown pointer, potentially OOB */
7042 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7043 /* potentially OOB access */
7044 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7045 /* exit */
7046 BPF_MOV64_IMM(BPF_REG_0, 0),
7047 BPF_EXIT_INSN(),
7048 },
7049 .fixup_map1 = { 3 },
7050 .errstr = "R0 max value is outside of the array range",
7051 .result = REJECT
7052 },
7053 {
7054 "bounds check after right shift of maybe-negative number",
7055 .insns = {
7056 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7057 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7059 BPF_LD_MAP_FD(BPF_REG_1, 0),
7060 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7061 BPF_FUNC_map_lookup_elem),
7062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7063 /* r1 = [0x00, 0xff] */
7064 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7065 /* r1 = [-0x01, 0xfe] */
7066 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7067 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7068 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7069 /* r1 = 0 or 0xffff'ffff'ffff */
7070 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7071 /* computes unknown pointer, potentially OOB */
7072 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7073 /* potentially OOB access */
7074 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7075 /* exit */
7076 BPF_MOV64_IMM(BPF_REG_0, 0),
7077 BPF_EXIT_INSN(),
7078 },
7079 .fixup_map1 = { 3 },
7080 .errstr = "R0 unbounded memory access",
7081 .result = REJECT
7082 },
7083 {
7084 "bounds check map access with off+size signed 32bit overflow. test1",
7085 .insns = {
7086 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7087 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7089 BPF_LD_MAP_FD(BPF_REG_1, 0),
7090 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7091 BPF_FUNC_map_lookup_elem),
7092 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7093 BPF_EXIT_INSN(),
7094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7095 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7096 BPF_JMP_A(0),
7097 BPF_EXIT_INSN(),
7098 },
7099 .fixup_map1 = { 3 },
7100 .errstr = "map_value pointer and 2147483646",
7101 .result = REJECT
7102 },
7103 {
7104 "bounds check map access with off+size signed 32bit overflow. test2",
7105 .insns = {
7106 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7109 BPF_LD_MAP_FD(BPF_REG_1, 0),
7110 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7111 BPF_FUNC_map_lookup_elem),
7112 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7113 BPF_EXIT_INSN(),
7114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7117 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7118 BPF_JMP_A(0),
7119 BPF_EXIT_INSN(),
7120 },
7121 .fixup_map1 = { 3 },
7122 .errstr = "pointer offset 1073741822",
7123 .result = REJECT
7124 },
7125 {
7126 "bounds check map access with off+size signed 32bit overflow. test3",
7127 .insns = {
7128 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7131 BPF_LD_MAP_FD(BPF_REG_1, 0),
7132 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7133 BPF_FUNC_map_lookup_elem),
7134 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7135 BPF_EXIT_INSN(),
7136 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7137 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7138 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7139 BPF_JMP_A(0),
7140 BPF_EXIT_INSN(),
7141 },
7142 .fixup_map1 = { 3 },
7143 .errstr = "pointer offset -1073741822",
7144 .result = REJECT
7145 },
7146 {
7147 "bounds check map access with off+size signed 32bit overflow. test4",
7148 .insns = {
7149 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7152 BPF_LD_MAP_FD(BPF_REG_1, 0),
7153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7154 BPF_FUNC_map_lookup_elem),
7155 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7156 BPF_EXIT_INSN(),
7157 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7158 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7159 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7161 BPF_JMP_A(0),
7162 BPF_EXIT_INSN(),
7163 },
7164 .fixup_map1 = { 3 },
7165 .errstr = "map_value pointer and 1000000000000",
7166 .result = REJECT
7167 },
7168 {
7169 "pointer/scalar confusion in state equality check (way 1)",
7170 .insns = {
7171 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7172 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7174 BPF_LD_MAP_FD(BPF_REG_1, 0),
7175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7176 BPF_FUNC_map_lookup_elem),
7177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7178 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7179 BPF_JMP_A(1),
7180 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7181 BPF_JMP_A(0),
7182 BPF_EXIT_INSN(),
7183 },
7184 .fixup_map1 = { 3 },
7185 .result = ACCEPT,
7186 .result_unpriv = REJECT,
7187 .errstr_unpriv = "R0 leaks addr as return value"
7188 },
7189 {
7190 "pointer/scalar confusion in state equality check (way 2)",
7191 .insns = {
7192 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7195 BPF_LD_MAP_FD(BPF_REG_1, 0),
7196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7197 BPF_FUNC_map_lookup_elem),
7198 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7199 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7200 BPF_JMP_A(1),
7201 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7202 BPF_EXIT_INSN(),
7203 },
7204 .fixup_map1 = { 3 },
7205 .result = ACCEPT,
7206 .result_unpriv = REJECT,
7207 .errstr_unpriv = "R0 leaks addr as return value"
7208 },
7209 {
6560 "variable-offset ctx access", 7210 "variable-offset ctx access",
6561 .insns = { 7211 .insns = {
6562 /* Get an unknown value */ 7212 /* Get an unknown value */
@@ -6598,6 +7248,71 @@ static struct bpf_test tests[] = {
6598 .prog_type = BPF_PROG_TYPE_LWT_IN, 7248 .prog_type = BPF_PROG_TYPE_LWT_IN,
6599 }, 7249 },
6600 { 7250 {
7251 "indirect variable-offset stack access",
7252 .insns = {
7253 /* Fill the top 8 bytes of the stack */
7254 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7255 /* Get an unknown value */
7256 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7257 /* Make it small and 4-byte aligned */
7258 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7259 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7260 /* add it to fp. We now have either fp-4 or fp-8, but
7261 * we don't know which
7262 */
7263 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7264 /* dereference it indirectly */
7265 BPF_LD_MAP_FD(BPF_REG_1, 0),
7266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7267 BPF_FUNC_map_lookup_elem),
7268 BPF_MOV64_IMM(BPF_REG_0, 0),
7269 BPF_EXIT_INSN(),
7270 },
7271 .fixup_map1 = { 5 },
7272 .errstr = "variable stack read R2",
7273 .result = REJECT,
7274 .prog_type = BPF_PROG_TYPE_LWT_IN,
7275 },
7276 {
7277 "direct stack access with 32-bit wraparound. test1",
7278 .insns = {
7279 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7282 BPF_MOV32_IMM(BPF_REG_0, 0),
7283 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7284 BPF_EXIT_INSN()
7285 },
7286 .errstr = "fp pointer and 2147483647",
7287 .result = REJECT
7288 },
7289 {
7290 "direct stack access with 32-bit wraparound. test2",
7291 .insns = {
7292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7295 BPF_MOV32_IMM(BPF_REG_0, 0),
7296 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7297 BPF_EXIT_INSN()
7298 },
7299 .errstr = "fp pointer and 1073741823",
7300 .result = REJECT
7301 },
7302 {
7303 "direct stack access with 32-bit wraparound. test3",
7304 .insns = {
7305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7308 BPF_MOV32_IMM(BPF_REG_0, 0),
7309 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7310 BPF_EXIT_INSN()
7311 },
7312 .errstr = "fp pointer offset 1073741822",
7313 .result = REJECT
7314 },
7315 {
6601 "liveness pruning and write screening", 7316 "liveness pruning and write screening",
6602 .insns = { 7317 .insns = {
6603 /* Get an unknown value */ 7318 /* Get an unknown value */
@@ -6658,6 +7373,253 @@ static struct bpf_test tests[] = {
6658 .result = REJECT, 7373 .result = REJECT,
6659 }, 7374 },
6660 { 7375 {
7376 "meta access, test1",
7377 .insns = {
7378 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7379 offsetof(struct xdp_md, data_meta)),
7380 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7381 offsetof(struct xdp_md, data)),
7382 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7383 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7384 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7385 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7386 BPF_MOV64_IMM(BPF_REG_0, 0),
7387 BPF_EXIT_INSN(),
7388 },
7389 .result = ACCEPT,
7390 .prog_type = BPF_PROG_TYPE_XDP,
7391 },
7392 {
7393 "meta access, test2",
7394 .insns = {
7395 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7396 offsetof(struct xdp_md, data_meta)),
7397 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7398 offsetof(struct xdp_md, data)),
7399 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7400 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
7401 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7403 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7404 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7405 BPF_MOV64_IMM(BPF_REG_0, 0),
7406 BPF_EXIT_INSN(),
7407 },
7408 .result = REJECT,
7409 .errstr = "invalid access to packet, off=-8",
7410 .prog_type = BPF_PROG_TYPE_XDP,
7411 },
7412 {
7413 "meta access, test3",
7414 .insns = {
7415 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7416 offsetof(struct xdp_md, data_meta)),
7417 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7418 offsetof(struct xdp_md, data_end)),
7419 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7421 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7422 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7423 BPF_MOV64_IMM(BPF_REG_0, 0),
7424 BPF_EXIT_INSN(),
7425 },
7426 .result = REJECT,
7427 .errstr = "invalid access to packet",
7428 .prog_type = BPF_PROG_TYPE_XDP,
7429 },
7430 {
7431 "meta access, test4",
7432 .insns = {
7433 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7434 offsetof(struct xdp_md, data_meta)),
7435 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7436 offsetof(struct xdp_md, data_end)),
7437 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7438 offsetof(struct xdp_md, data)),
7439 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7441 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
7442 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7443 BPF_MOV64_IMM(BPF_REG_0, 0),
7444 BPF_EXIT_INSN(),
7445 },
7446 .result = REJECT,
7447 .errstr = "invalid access to packet",
7448 .prog_type = BPF_PROG_TYPE_XDP,
7449 },
7450 {
7451 "meta access, test5",
7452 .insns = {
7453 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7454 offsetof(struct xdp_md, data_meta)),
7455 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7456 offsetof(struct xdp_md, data)),
7457 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7458 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7459 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
7460 BPF_MOV64_IMM(BPF_REG_2, -8),
7461 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7462 BPF_FUNC_xdp_adjust_meta),
7463 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7464 BPF_MOV64_IMM(BPF_REG_0, 0),
7465 BPF_EXIT_INSN(),
7466 },
7467 .result = REJECT,
7468 .errstr = "R3 !read_ok",
7469 .prog_type = BPF_PROG_TYPE_XDP,
7470 },
7471 {
7472 "meta access, test6",
7473 .insns = {
7474 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7475 offsetof(struct xdp_md, data_meta)),
7476 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7477 offsetof(struct xdp_md, data)),
7478 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7480 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7481 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7482 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
7483 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7484 BPF_MOV64_IMM(BPF_REG_0, 0),
7485 BPF_EXIT_INSN(),
7486 },
7487 .result = REJECT,
7488 .errstr = "invalid access to packet",
7489 .prog_type = BPF_PROG_TYPE_XDP,
7490 },
7491 {
7492 "meta access, test7",
7493 .insns = {
7494 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7495 offsetof(struct xdp_md, data_meta)),
7496 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7497 offsetof(struct xdp_md, data)),
7498 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7499 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7500 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7501 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
7502 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7503 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7504 BPF_MOV64_IMM(BPF_REG_0, 0),
7505 BPF_EXIT_INSN(),
7506 },
7507 .result = ACCEPT,
7508 .prog_type = BPF_PROG_TYPE_XDP,
7509 },
7510 {
7511 "meta access, test8",
7512 .insns = {
7513 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7514 offsetof(struct xdp_md, data_meta)),
7515 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7516 offsetof(struct xdp_md, data)),
7517 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7518 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7519 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7520 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7521 BPF_MOV64_IMM(BPF_REG_0, 0),
7522 BPF_EXIT_INSN(),
7523 },
7524 .result = ACCEPT,
7525 .prog_type = BPF_PROG_TYPE_XDP,
7526 },
7527 {
7528 "meta access, test9",
7529 .insns = {
7530 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7531 offsetof(struct xdp_md, data_meta)),
7532 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7533 offsetof(struct xdp_md, data)),
7534 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
7535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
7536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7537 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
7538 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7539 BPF_MOV64_IMM(BPF_REG_0, 0),
7540 BPF_EXIT_INSN(),
7541 },
7542 .result = REJECT,
7543 .errstr = "invalid access to packet",
7544 .prog_type = BPF_PROG_TYPE_XDP,
7545 },
7546 {
7547 "meta access, test10",
7548 .insns = {
7549 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7550 offsetof(struct xdp_md, data_meta)),
7551 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7552 offsetof(struct xdp_md, data)),
7553 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7554 offsetof(struct xdp_md, data_end)),
7555 BPF_MOV64_IMM(BPF_REG_5, 42),
7556 BPF_MOV64_IMM(BPF_REG_6, 24),
7557 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7558 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7559 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7560 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7561 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
7562 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
7563 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7565 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
7566 BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
7567 BPF_MOV64_IMM(BPF_REG_0, 0),
7568 BPF_EXIT_INSN(),
7569 },
7570 .result = REJECT,
7571 .errstr = "invalid access to packet",
7572 .prog_type = BPF_PROG_TYPE_XDP,
7573 },
7574 {
7575 "meta access, test11",
7576 .insns = {
7577 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7578 offsetof(struct xdp_md, data_meta)),
7579 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7580 offsetof(struct xdp_md, data)),
7581 BPF_MOV64_IMM(BPF_REG_5, 42),
7582 BPF_MOV64_IMM(BPF_REG_6, 24),
7583 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
7584 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
7585 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
7586 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
7587 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
7588 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
7589 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
7590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
7591 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
7592 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
7593 BPF_MOV64_IMM(BPF_REG_0, 0),
7594 BPF_EXIT_INSN(),
7595 },
7596 .result = ACCEPT,
7597 .prog_type = BPF_PROG_TYPE_XDP,
7598 },
7599 {
7600 "meta access, test12",
7601 .insns = {
7602 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7603 offsetof(struct xdp_md, data_meta)),
7604 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7605 offsetof(struct xdp_md, data)),
7606 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
7607 offsetof(struct xdp_md, data_end)),
7608 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
7609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
7610 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
7611 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
7612 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
7613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
7614 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
7615 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
7616 BPF_MOV64_IMM(BPF_REG_0, 0),
7617 BPF_EXIT_INSN(),
7618 },
7619 .result = ACCEPT,
7620 .prog_type = BPF_PROG_TYPE_XDP,
7621 },
7622 {
6661 "arithmetic ops make PTR_TO_CTX unusable", 7623 "arithmetic ops make PTR_TO_CTX unusable",
6662 .insns = { 7624 .insns = {
6663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
@@ -6672,6 +7634,19 @@ static struct bpf_test tests[] = {
6672 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 7634 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6673 }, 7635 },
6674 { 7636 {
7637 "pkt_end - pkt_start is allowed",
7638 .insns = {
7639 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7640 offsetof(struct __sk_buff, data_end)),
7641 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7642 offsetof(struct __sk_buff, data)),
7643 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
7644 BPF_EXIT_INSN(),
7645 },
7646 .result = ACCEPT,
7647 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7648 },
7649 {
6675 "XDP pkt read, pkt_end mangling, bad access 1", 7650 "XDP pkt read, pkt_end mangling, bad access 1",
6676 .insns = { 7651 .insns = {
6677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7652 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -6686,7 +7661,7 @@ static struct bpf_test tests[] = {
6686 BPF_MOV64_IMM(BPF_REG_0, 0), 7661 BPF_MOV64_IMM(BPF_REG_0, 0),
6687 BPF_EXIT_INSN(), 7662 BPF_EXIT_INSN(),
6688 }, 7663 },
6689 .errstr = "R1 offset is outside of the packet", 7664 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
6690 .result = REJECT, 7665 .result = REJECT,
6691 .prog_type = BPF_PROG_TYPE_XDP, 7666 .prog_type = BPF_PROG_TYPE_XDP,
6692 }, 7667 },
@@ -6705,7 +7680,7 @@ static struct bpf_test tests[] = {
6705 BPF_MOV64_IMM(BPF_REG_0, 0), 7680 BPF_MOV64_IMM(BPF_REG_0, 0),
6706 BPF_EXIT_INSN(), 7681 BPF_EXIT_INSN(),
6707 }, 7682 },
6708 .errstr = "R1 offset is outside of the packet", 7683 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
6709 .result = REJECT, 7684 .result = REJECT,
6710 .prog_type = BPF_PROG_TYPE_XDP, 7685 .prog_type = BPF_PROG_TYPE_XDP,
6711 }, 7686 },
@@ -7151,6 +8126,520 @@ static struct bpf_test tests[] = {
7151 .prog_type = BPF_PROG_TYPE_XDP, 8126 .prog_type = BPF_PROG_TYPE_XDP,
7152 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 8127 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7153 }, 8128 },
8129 {
8130 "XDP pkt read, pkt_meta' > pkt_data, good access",
8131 .insns = {
8132 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8133 offsetof(struct xdp_md, data_meta)),
8134 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8135 offsetof(struct xdp_md, data)),
8136 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8137 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8138 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8139 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8140 BPF_MOV64_IMM(BPF_REG_0, 0),
8141 BPF_EXIT_INSN(),
8142 },
8143 .result = ACCEPT,
8144 .prog_type = BPF_PROG_TYPE_XDP,
8145 },
8146 {
8147 "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
8148 .insns = {
8149 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8150 offsetof(struct xdp_md, data_meta)),
8151 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8152 offsetof(struct xdp_md, data)),
8153 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8155 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
8156 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8157 BPF_MOV64_IMM(BPF_REG_0, 0),
8158 BPF_EXIT_INSN(),
8159 },
8160 .errstr = "R1 offset is outside of the packet",
8161 .result = REJECT,
8162 .prog_type = BPF_PROG_TYPE_XDP,
8163 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8164 },
8165 {
8166 "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
8167 .insns = {
8168 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8169 offsetof(struct xdp_md, data_meta)),
8170 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8171 offsetof(struct xdp_md, data)),
8172 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8174 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
8175 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8176 BPF_MOV64_IMM(BPF_REG_0, 0),
8177 BPF_EXIT_INSN(),
8178 },
8179 .errstr = "R1 offset is outside of the packet",
8180 .result = REJECT,
8181 .prog_type = BPF_PROG_TYPE_XDP,
8182 },
8183 {
8184 "XDP pkt read, pkt_data > pkt_meta', good access",
8185 .insns = {
8186 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8187 offsetof(struct xdp_md, data_meta)),
8188 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8189 offsetof(struct xdp_md, data)),
8190 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8191 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8192 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8193 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8194 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8195 BPF_MOV64_IMM(BPF_REG_0, 0),
8196 BPF_EXIT_INSN(),
8197 },
8198 .result = ACCEPT,
8199 .prog_type = BPF_PROG_TYPE_XDP,
8200 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8201 },
8202 {
8203 "XDP pkt read, pkt_data > pkt_meta', bad access 1",
8204 .insns = {
8205 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8206 offsetof(struct xdp_md, data_meta)),
8207 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8208 offsetof(struct xdp_md, data)),
8209 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8210 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8211 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8212 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8213 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8214 BPF_MOV64_IMM(BPF_REG_0, 0),
8215 BPF_EXIT_INSN(),
8216 },
8217 .errstr = "R1 offset is outside of the packet",
8218 .result = REJECT,
8219 .prog_type = BPF_PROG_TYPE_XDP,
8220 },
8221 {
8222 "XDP pkt read, pkt_data > pkt_meta', bad access 2",
8223 .insns = {
8224 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8225 offsetof(struct xdp_md, data_meta)),
8226 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8227 offsetof(struct xdp_md, data)),
8228 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8229 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8230 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
8231 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8232 BPF_MOV64_IMM(BPF_REG_0, 0),
8233 BPF_EXIT_INSN(),
8234 },
8235 .errstr = "R1 offset is outside of the packet",
8236 .result = REJECT,
8237 .prog_type = BPF_PROG_TYPE_XDP,
8238 },
8239 {
8240 "XDP pkt read, pkt_meta' < pkt_data, good access",
8241 .insns = {
8242 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8243 offsetof(struct xdp_md, data_meta)),
8244 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8245 offsetof(struct xdp_md, data)),
8246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8248 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8249 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8250 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8251 BPF_MOV64_IMM(BPF_REG_0, 0),
8252 BPF_EXIT_INSN(),
8253 },
8254 .result = ACCEPT,
8255 .prog_type = BPF_PROG_TYPE_XDP,
8256 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8257 },
8258 {
8259 "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
8260 .insns = {
8261 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8262 offsetof(struct xdp_md, data_meta)),
8263 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8264 offsetof(struct xdp_md, data)),
8265 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8267 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8268 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8269 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8270 BPF_MOV64_IMM(BPF_REG_0, 0),
8271 BPF_EXIT_INSN(),
8272 },
8273 .errstr = "R1 offset is outside of the packet",
8274 .result = REJECT,
8275 .prog_type = BPF_PROG_TYPE_XDP,
8276 },
8277 {
8278 "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
8279 .insns = {
8280 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8281 offsetof(struct xdp_md, data_meta)),
8282 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8283 offsetof(struct xdp_md, data)),
8284 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8285 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8286 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
8287 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8288 BPF_MOV64_IMM(BPF_REG_0, 0),
8289 BPF_EXIT_INSN(),
8290 },
8291 .errstr = "R1 offset is outside of the packet",
8292 .result = REJECT,
8293 .prog_type = BPF_PROG_TYPE_XDP,
8294 },
8295 {
8296 "XDP pkt read, pkt_data < pkt_meta', good access",
8297 .insns = {
8298 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8299 offsetof(struct xdp_md, data_meta)),
8300 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8301 offsetof(struct xdp_md, data)),
8302 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8304 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8305 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8306 BPF_MOV64_IMM(BPF_REG_0, 0),
8307 BPF_EXIT_INSN(),
8308 },
8309 .result = ACCEPT,
8310 .prog_type = BPF_PROG_TYPE_XDP,
8311 },
8312 {
8313 "XDP pkt read, pkt_data < pkt_meta', bad access 1",
8314 .insns = {
8315 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8316 offsetof(struct xdp_md, data_meta)),
8317 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8318 offsetof(struct xdp_md, data)),
8319 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8320 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8321 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
8322 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8323 BPF_MOV64_IMM(BPF_REG_0, 0),
8324 BPF_EXIT_INSN(),
8325 },
8326 .errstr = "R1 offset is outside of the packet",
8327 .result = REJECT,
8328 .prog_type = BPF_PROG_TYPE_XDP,
8329 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8330 },
8331 {
8332 "XDP pkt read, pkt_data < pkt_meta', bad access 2",
8333 .insns = {
8334 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8335 offsetof(struct xdp_md, data_meta)),
8336 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8337 offsetof(struct xdp_md, data)),
8338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8339 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8340 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
8341 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8342 BPF_MOV64_IMM(BPF_REG_0, 0),
8343 BPF_EXIT_INSN(),
8344 },
8345 .errstr = "R1 offset is outside of the packet",
8346 .result = REJECT,
8347 .prog_type = BPF_PROG_TYPE_XDP,
8348 },
8349 {
8350 "XDP pkt read, pkt_meta' >= pkt_data, good access",
8351 .insns = {
8352 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8353 offsetof(struct xdp_md, data_meta)),
8354 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8355 offsetof(struct xdp_md, data)),
8356 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8357 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8358 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8359 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8360 BPF_MOV64_IMM(BPF_REG_0, 0),
8361 BPF_EXIT_INSN(),
8362 },
8363 .result = ACCEPT,
8364 .prog_type = BPF_PROG_TYPE_XDP,
8365 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8366 },
8367 {
8368 "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
8369 .insns = {
8370 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8371 offsetof(struct xdp_md, data_meta)),
8372 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8373 offsetof(struct xdp_md, data)),
8374 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8375 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8376 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
8377 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8378 BPF_MOV64_IMM(BPF_REG_0, 0),
8379 BPF_EXIT_INSN(),
8380 },
8381 .errstr = "R1 offset is outside of the packet",
8382 .result = REJECT,
8383 .prog_type = BPF_PROG_TYPE_XDP,
8384 },
8385 {
8386 "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
8387 .insns = {
8388 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8389 offsetof(struct xdp_md, data_meta)),
8390 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8391 offsetof(struct xdp_md, data)),
8392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8394 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
8395 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8396 BPF_MOV64_IMM(BPF_REG_0, 0),
8397 BPF_EXIT_INSN(),
8398 },
8399 .errstr = "R1 offset is outside of the packet",
8400 .result = REJECT,
8401 .prog_type = BPF_PROG_TYPE_XDP,
8402 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8403 },
8404 {
8405 "XDP pkt read, pkt_data >= pkt_meta', good access",
8406 .insns = {
8407 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8408 offsetof(struct xdp_md, data_meta)),
8409 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8410 offsetof(struct xdp_md, data)),
8411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8413 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8414 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8415 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8416 BPF_MOV64_IMM(BPF_REG_0, 0),
8417 BPF_EXIT_INSN(),
8418 },
8419 .result = ACCEPT,
8420 .prog_type = BPF_PROG_TYPE_XDP,
8421 },
8422 {
8423 "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
8424 .insns = {
8425 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8426 offsetof(struct xdp_md, data_meta)),
8427 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8428 offsetof(struct xdp_md, data)),
8429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8431 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8432 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8433 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8434 BPF_MOV64_IMM(BPF_REG_0, 0),
8435 BPF_EXIT_INSN(),
8436 },
8437 .errstr = "R1 offset is outside of the packet",
8438 .result = REJECT,
8439 .prog_type = BPF_PROG_TYPE_XDP,
8440 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8441 },
8442 {
8443 "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
8444 .insns = {
8445 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8446 offsetof(struct xdp_md, data_meta)),
8447 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8448 offsetof(struct xdp_md, data)),
8449 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8450 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8451 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
8452 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8453 BPF_MOV64_IMM(BPF_REG_0, 0),
8454 BPF_EXIT_INSN(),
8455 },
8456 .errstr = "R1 offset is outside of the packet",
8457 .result = REJECT,
8458 .prog_type = BPF_PROG_TYPE_XDP,
8459 },
8460 {
8461 "XDP pkt read, pkt_meta' <= pkt_data, good access",
8462 .insns = {
8463 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8464 offsetof(struct xdp_md, data_meta)),
8465 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8466 offsetof(struct xdp_md, data)),
8467 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8468 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8469 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8470 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8471 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8472 BPF_MOV64_IMM(BPF_REG_0, 0),
8473 BPF_EXIT_INSN(),
8474 },
8475 .result = ACCEPT,
8476 .prog_type = BPF_PROG_TYPE_XDP,
8477 },
8478 {
8479 "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
8480 .insns = {
8481 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8482 offsetof(struct xdp_md, data_meta)),
8483 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8484 offsetof(struct xdp_md, data)),
8485 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8487 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8488 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
8489 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
8490 BPF_MOV64_IMM(BPF_REG_0, 0),
8491 BPF_EXIT_INSN(),
8492 },
8493 .errstr = "R1 offset is outside of the packet",
8494 .result = REJECT,
8495 .prog_type = BPF_PROG_TYPE_XDP,
8496 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8497 },
8498 {
8499 "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
8500 .insns = {
8501 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8502 offsetof(struct xdp_md, data_meta)),
8503 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8504 offsetof(struct xdp_md, data)),
8505 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8507 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
8508 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8509 BPF_MOV64_IMM(BPF_REG_0, 0),
8510 BPF_EXIT_INSN(),
8511 },
8512 .errstr = "R1 offset is outside of the packet",
8513 .result = REJECT,
8514 .prog_type = BPF_PROG_TYPE_XDP,
8515 },
8516 {
8517 "XDP pkt read, pkt_data <= pkt_meta', good access",
8518 .insns = {
8519 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8520 offsetof(struct xdp_md, data_meta)),
8521 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8522 offsetof(struct xdp_md, data)),
8523 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8525 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8526 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8527 BPF_MOV64_IMM(BPF_REG_0, 0),
8528 BPF_EXIT_INSN(),
8529 },
8530 .result = ACCEPT,
8531 .prog_type = BPF_PROG_TYPE_XDP,
8532 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8533 },
8534 {
8535 "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
8536 .insns = {
8537 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8538 offsetof(struct xdp_md, data_meta)),
8539 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8540 offsetof(struct xdp_md, data)),
8541 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8543 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
8544 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
8545 BPF_MOV64_IMM(BPF_REG_0, 0),
8546 BPF_EXIT_INSN(),
8547 },
8548 .errstr = "R1 offset is outside of the packet",
8549 .result = REJECT,
8550 .prog_type = BPF_PROG_TYPE_XDP,
8551 },
8552 {
8553 "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
8554 .insns = {
8555 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8556 offsetof(struct xdp_md, data_meta)),
8557 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8558 offsetof(struct xdp_md, data)),
8559 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
8560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8561 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
8562 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
8563 BPF_MOV64_IMM(BPF_REG_0, 0),
8564 BPF_EXIT_INSN(),
8565 },
8566 .errstr = "R1 offset is outside of the packet",
8567 .result = REJECT,
8568 .prog_type = BPF_PROG_TYPE_XDP,
8569 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8570 },
8571 {
8572 "bpf_exit with invalid return code. test1",
8573 .insns = {
8574 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8575 BPF_EXIT_INSN(),
8576 },
8577 .errstr = "R0 has value (0x0; 0xffffffff)",
8578 .result = REJECT,
8579 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8580 },
8581 {
8582 "bpf_exit with invalid return code. test2",
8583 .insns = {
8584 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8585 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
8586 BPF_EXIT_INSN(),
8587 },
8588 .result = ACCEPT,
8589 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8590 },
8591 {
8592 "bpf_exit with invalid return code. test3",
8593 .insns = {
8594 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8595 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
8596 BPF_EXIT_INSN(),
8597 },
8598 .errstr = "R0 has value (0x0; 0x3)",
8599 .result = REJECT,
8600 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8601 },
8602 {
8603 "bpf_exit with invalid return code. test4",
8604 .insns = {
8605 BPF_MOV64_IMM(BPF_REG_0, 1),
8606 BPF_EXIT_INSN(),
8607 },
8608 .result = ACCEPT,
8609 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8610 },
8611 {
8612 "bpf_exit with invalid return code. test5",
8613 .insns = {
8614 BPF_MOV64_IMM(BPF_REG_0, 2),
8615 BPF_EXIT_INSN(),
8616 },
8617 .errstr = "R0 has value (0x2; 0x0)",
8618 .result = REJECT,
8619 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8620 },
8621 {
8622 "bpf_exit with invalid return code. test6",
8623 .insns = {
8624 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8625 BPF_EXIT_INSN(),
8626 },
8627 .errstr = "R0 is not a known value (ctx)",
8628 .result = REJECT,
8629 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8630 },
8631 {
8632 "bpf_exit with invalid return code. test7",
8633 .insns = {
8634 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8635 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
8636 BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
8637 BPF_EXIT_INSN(),
8638 },
8639 .errstr = "R0 has unknown scalar value",
8640 .result = REJECT,
8641 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
8642 },
7154}; 8643};
7155 8644
7156static int probe_filter_length(const struct bpf_insn *fp) 8645static int probe_filter_length(const struct bpf_insn *fp)
@@ -7198,7 +8687,7 @@ static int create_map_in_map(void)
7198 return inner_map_fd; 8687 return inner_map_fd;
7199 } 8688 }
7200 8689
7201 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, 8690 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
7202 sizeof(int), inner_map_fd, 1, 0); 8691 sizeof(int), inner_map_fd, 1, 0);
7203 if (outer_map_fd < 0) 8692 if (outer_map_fd < 0)
7204 printf("Failed to create array of maps '%s'!\n", 8693 printf("Failed to create array of maps '%s'!\n",
diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c
new file mode 100644
index 000000000000..e9626cf5607a
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_verifier_log.c
@@ -0,0 +1,178 @@
1#include <errno.h>
2#include <stdlib.h>
3#include <stdio.h>
4#include <string.h>
5#include <unistd.h>
6#include <sys/time.h>
7#include <sys/resource.h>
8
9#include <linux/bpf.h>
10#include <linux/filter.h>
11#include <linux/unistd.h>
12
13#include <bpf/bpf.h>
14
15#define LOG_SIZE (1 << 20)
16
17#define err(str...) printf("ERROR: " str)
18
19static const struct bpf_insn code_sample[] = {
20 /* We need a few instructions to pass the min log length */
21 BPF_MOV64_IMM(BPF_REG_0, 0),
22 BPF_MOV64_IMM(BPF_REG_0, 0),
23 BPF_MOV64_IMM(BPF_REG_0, 0),
24 BPF_MOV64_IMM(BPF_REG_0, 0),
25 BPF_MOV64_IMM(BPF_REG_0, 0),
26 BPF_MOV64_IMM(BPF_REG_0, 0),
27 BPF_MOV64_IMM(BPF_REG_0, 0),
28 BPF_MOV64_IMM(BPF_REG_0, 0),
29 BPF_MOV64_IMM(BPF_REG_0, 0),
30 BPF_MOV64_IMM(BPF_REG_0, 0),
31 BPF_MOV64_IMM(BPF_REG_0, 0),
32 BPF_MOV64_IMM(BPF_REG_0, 0),
33 BPF_MOV64_IMM(BPF_REG_0, 0),
34 BPF_MOV64_IMM(BPF_REG_0, 0),
35 BPF_MOV64_IMM(BPF_REG_0, 0),
36 BPF_MOV64_IMM(BPF_REG_0, 0),
37 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
38 BPF_FUNC_map_lookup_elem),
39 BPF_EXIT_INSN(),
40};
41
42static inline __u64 ptr_to_u64(const void *ptr)
43{
44 return (__u64) (unsigned long) ptr;
45}
46
47static int load(char *log, size_t log_len, int log_level)
48{
49 union bpf_attr attr;
50
51 bzero(&attr, sizeof(attr));
52 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
53 attr.insn_cnt = (__u32)(sizeof(code_sample) / sizeof(struct bpf_insn));
54 attr.insns = ptr_to_u64(code_sample);
55 attr.license = ptr_to_u64("GPL");
56 attr.log_buf = ptr_to_u64(log);
57 attr.log_size = log_len;
58 attr.log_level = log_level;
59
60 return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
61}
62
63static void check_ret(int ret, int exp_errno)
64{
65 if (ret > 0) {
66 close(ret);
67 err("broken sample loaded successfully!?\n");
68 exit(1);
69 }
70
71 if (!ret || errno != exp_errno) {
72 err("Program load returned: ret:%d/errno:%d, expected ret:%d/errno:%d\n",
73 ret, errno, -1, exp_errno);
74 exit(1);
75 }
76}
77
78static void check_ones(const char *buf, size_t len, const char *msg)
79{
80 while (len--)
81 if (buf[len] != 1) {
82 err("%s", msg);
83 exit(1);
84 }
85}
86
87static void test_log_good(char *log, size_t buf_len, size_t log_len,
88 size_t exp_len, int exp_errno, const char *full_log)
89{
90 size_t len;
91 int ret;
92
93 memset(log, 1, buf_len);
94
95 ret = load(log, log_len, 1);
96 check_ret(ret, exp_errno);
97
98 len = strnlen(log, buf_len);
99 if (len == buf_len) {
100 err("verifier did not NULL terminate the log\n");
101 exit(1);
102 }
103 if (exp_len && len != exp_len) {
104 err("incorrect log length expected:%zd have:%zd\n",
105 exp_len, len);
106 exit(1);
107 }
108
109 if (strchr(log, 1)) {
110 err("verifier leaked a byte through\n");
111 exit(1);
112 }
113
114 check_ones(log + len + 1, buf_len - len - 1,
115 "verifier wrote bytes past NULL termination\n");
116
117 if (memcmp(full_log, log, LOG_SIZE)) {
118 err("log did not match expected output\n");
119 exit(1);
120 }
121}
122
123static void test_log_bad(char *log, size_t log_len, int log_level)
124{
125 int ret;
126
127 ret = load(log, log_len, log_level);
128 check_ret(ret, EINVAL);
129 if (log)
130 check_ones(log, LOG_SIZE,
131 "verifier touched log with bad parameters\n");
132}
133
134int main(int argc, char **argv)
135{
136 struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
137 char full_log[LOG_SIZE];
138 char log[LOG_SIZE];
139 size_t want_len;
140 int i;
141
142 /* allow unlimited locked memory to have more consistent error code */
143 if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
144 perror("Unable to lift memlock rlimit");
145
146 memset(log, 1, LOG_SIZE);
147
148 /* Test incorrect attr */
149 printf("Test log_level 0...\n");
150 test_log_bad(log, LOG_SIZE, 0);
151
152 printf("Test log_size < 128...\n");
153 test_log_bad(log, 15, 1);
154
155 printf("Test log_buff = NULL...\n");
156 test_log_bad(NULL, LOG_SIZE, 1);
157
158 /* Test with log big enough */
159 printf("Test oversized buffer...\n");
160 test_log_good(full_log, LOG_SIZE, LOG_SIZE, 0, EACCES, full_log);
161
162 want_len = strlen(full_log);
163
164 printf("Test exact buffer...\n");
165 test_log_good(log, LOG_SIZE, want_len + 2, want_len, EACCES, full_log);
166
167 printf("Test undersized buffers...\n");
168 for (i = 0; i < 64; i++) {
169 full_log[want_len - i + 1] = 1;
170 full_log[want_len - i] = 0;
171
172 test_log_good(log, LOG_SIZE, want_len + 1 - i, want_len - i,
173 ENOSPC, full_log);
174 }
175
176 printf("test_verifier_log: OK\n");
177 return 0;
178}
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.c b/tools/testing/selftests/bpf/test_xdp_meta.c
new file mode 100644
index 000000000000..8d0182650653
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_meta.c
@@ -0,0 +1,53 @@
1#include <linux/bpf.h>
2#include <linux/if_ether.h>
3#include <linux/pkt_cls.h>
4
5#include "bpf_helpers.h"
6
7#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
8#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
9#define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem
10
11SEC("t")
12int ing_cls(struct __sk_buff *ctx)
13{
14 __u8 *data, *data_meta, *data_end;
15 __u32 diff = 0;
16
17 data_meta = ctx_ptr(ctx, data_meta);
18 data_end = ctx_ptr(ctx, data_end);
19 data = ctx_ptr(ctx, data);
20
21 if (data + ETH_ALEN > data_end ||
22 data_meta + round_up(ETH_ALEN, 4) > data)
23 return TC_ACT_SHOT;
24
25 diff |= ((__u32 *)data_meta)[0] ^ ((__u32 *)data)[0];
26 diff |= ((__u16 *)data_meta)[2] ^ ((__u16 *)data)[2];
27
28 return diff ? TC_ACT_SHOT : TC_ACT_OK;
29}
30
31SEC("x")
32int ing_xdp(struct xdp_md *ctx)
33{
34 __u8 *data, *data_meta, *data_end;
35 int ret;
36
37 ret = bpf_xdp_adjust_meta(ctx, -round_up(ETH_ALEN, 4));
38 if (ret < 0)
39 return XDP_DROP;
40
41 data_meta = ctx_ptr(ctx, data_meta);
42 data_end = ctx_ptr(ctx, data_end);
43 data = ctx_ptr(ctx, data);
44
45 if (data + ETH_ALEN > data_end ||
46 data_meta + round_up(ETH_ALEN, 4) > data)
47 return XDP_DROP;
48
49 __builtin_memcpy(data_meta, data, ETH_ALEN);
50 return XDP_PASS;
51}
52
53char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
new file mode 100755
index 000000000000..307aa856cee3
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -0,0 +1,51 @@
1#!/bin/sh
2
3cleanup()
4{
5 if [ "$?" = "0" ]; then
6 echo "selftests: test_xdp_meta [PASS]";
7 else
8 echo "selftests: test_xdp_meta [FAILED]";
9 fi
10
11 set +e
12 ip netns del ns1 2> /dev/null
13 ip netns del ns2 2> /dev/null
14}
15
16ip link set dev lo xdp off 2>/dev/null > /dev/null
17if [ $? -ne 0 ];then
18 echo "selftests: [SKIP] Could not run test without the ip xdp support"
19 exit 0
20fi
21set -e
22
23ip netns add ns1
24ip netns add ns2
25
26trap cleanup 0 2 3 6 9
27
28ip link add veth1 type veth peer name veth2
29
30ip link set veth1 netns ns1
31ip link set veth2 netns ns2
32
33ip netns exec ns1 ip addr add 10.1.1.11/24 dev veth1
34ip netns exec ns2 ip addr add 10.1.1.22/24 dev veth2
35
36ip netns exec ns1 tc qdisc add dev veth1 clsact
37ip netns exec ns2 tc qdisc add dev veth2 clsact
38
39ip netns exec ns1 tc filter add dev veth1 ingress bpf da obj test_xdp_meta.o sec t
40ip netns exec ns2 tc filter add dev veth2 ingress bpf da obj test_xdp_meta.o sec t
41
42ip netns exec ns1 ip link set dev veth1 xdp obj test_xdp_meta.o sec x
43ip netns exec ns2 ip link set dev veth2 xdp obj test_xdp_meta.o sec x
44
45ip netns exec ns1 ip link set dev veth1 up
46ip netns exec ns2 ip link set dev veth2 up
47
48ip netns exec ns1 ping -c 1 10.1.1.22
49ip netns exec ns2 ping -c 1 10.1.1.11
50
51exit 0
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
index 960d02100c26..2d95e5adde72 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -19,6 +19,7 @@
19 19
20#define _GNU_SOURCE 20#define _GNU_SOURCE
21 21
22#include <asm/ptrace.h>
22#include <sys/types.h> 23#include <sys/types.h>
23#include <sys/wait.h> 24#include <sys/wait.h>
24#include <sys/ptrace.h> 25#include <sys/ptrace.h>
diff --git a/tools/testing/selftests/cpu-hotplug/config b/tools/testing/selftests/cpu-hotplug/config
index e6ab090cfbf3..d4aca2ad5069 100644
--- a/tools/testing/selftests/cpu-hotplug/config
+++ b/tools/testing/selftests/cpu-hotplug/config
@@ -1,2 +1 @@
1CONFIG_NOTIFIER_ERROR_INJECTION=y CONFIG_NOTIFIER_ERROR_INJECTION=y
2CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 8d5d1d2ee7c1..67cd4597db2b 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -147,7 +147,7 @@ static void exe_cp(const char *src, const char *dest)
147} 147}
148 148
149#define XX_DIR_LEN 200 149#define XX_DIR_LEN 200
150static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script) 150static int check_execveat_pathmax(int root_dfd, const char *src, int is_script)
151{ 151{
152 int fail = 0; 152 int fail = 0;
153 int ii, count, len; 153 int ii, count, len;
@@ -156,20 +156,30 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
156 156
157 if (*longpath == '\0') { 157 if (*longpath == '\0') {
158 /* Create a filename close to PATH_MAX in length */ 158 /* Create a filename close to PATH_MAX in length */
159 char *cwd = getcwd(NULL, 0);
160
161 if (!cwd) {
162 printf("Failed to getcwd(), errno=%d (%s)\n",
163 errno, strerror(errno));
164 return 2;
165 }
166 strcpy(longpath, cwd);
167 strcat(longpath, "/");
159 memset(longname, 'x', XX_DIR_LEN - 1); 168 memset(longname, 'x', XX_DIR_LEN - 1);
160 longname[XX_DIR_LEN - 1] = '/'; 169 longname[XX_DIR_LEN - 1] = '/';
161 longname[XX_DIR_LEN] = '\0'; 170 longname[XX_DIR_LEN] = '\0';
162 count = (PATH_MAX - 3) / XX_DIR_LEN; 171 count = (PATH_MAX - 3 - strlen(cwd)) / XX_DIR_LEN;
163 for (ii = 0; ii < count; ii++) { 172 for (ii = 0; ii < count; ii++) {
164 strcat(longpath, longname); 173 strcat(longpath, longname);
165 mkdir(longpath, 0755); 174 mkdir(longpath, 0755);
166 } 175 }
167 len = (PATH_MAX - 3) - (count * XX_DIR_LEN); 176 len = (PATH_MAX - 3 - strlen(cwd)) - (count * XX_DIR_LEN);
168 if (len <= 0) 177 if (len <= 0)
169 len = 1; 178 len = 1;
170 memset(longname, 'y', len); 179 memset(longname, 'y', len);
171 longname[len] = '\0'; 180 longname[len] = '\0';
172 strcat(longpath, longname); 181 strcat(longpath, longname);
182 free(cwd);
173 } 183 }
174 exe_cp(src, longpath); 184 exe_cp(src, longpath);
175 185
@@ -190,7 +200,7 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
190 } 200 }
191 201
192 /* 202 /*
193 * Execute as a long pathname relative to ".". If this is a script, 203 * Execute as a long pathname relative to "/". If this is a script,
194 * the interpreter will launch but fail to open the script because its 204 * the interpreter will launch but fail to open the script because its
195 * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX. 205 * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
196 * 206 *
@@ -200,10 +210,10 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
200 * the exit status shall be 126."), so allow either. 210 * the exit status shall be 126."), so allow either.
201 */ 211 */
202 if (is_script) 212 if (is_script)
203 fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 213 fail += check_execveat_invoked_rc(root_dfd, longpath + 1, 0,
204 127, 126); 214 127, 126);
205 else 215 else
206 fail += check_execveat(dot_dfd, longpath, 0); 216 fail += check_execveat(root_dfd, longpath + 1, 0);
207 217
208 return fail; 218 return fail;
209} 219}
@@ -218,6 +228,7 @@ static int run_tests(void)
218 int subdir_dfd_ephemeral = open_or_die("subdir.ephemeral", 228 int subdir_dfd_ephemeral = open_or_die("subdir.ephemeral",
219 O_DIRECTORY|O_RDONLY); 229 O_DIRECTORY|O_RDONLY);
220 int dot_dfd = open_or_die(".", O_DIRECTORY|O_RDONLY); 230 int dot_dfd = open_or_die(".", O_DIRECTORY|O_RDONLY);
231 int root_dfd = open_or_die("/", O_DIRECTORY|O_RDONLY);
221 int dot_dfd_path = open_or_die(".", O_DIRECTORY|O_RDONLY|O_PATH); 232 int dot_dfd_path = open_or_die(".", O_DIRECTORY|O_RDONLY|O_PATH);
222 int dot_dfd_cloexec = open_or_die(".", O_DIRECTORY|O_RDONLY|O_CLOEXEC); 233 int dot_dfd_cloexec = open_or_die(".", O_DIRECTORY|O_RDONLY|O_CLOEXEC);
223 int fd = open_or_die("execveat", O_RDONLY); 234 int fd = open_or_die("execveat", O_RDONLY);
@@ -353,8 +364,8 @@ static int run_tests(void)
353 /* Attempt to execute relative to non-directory => ENOTDIR */ 364 /* Attempt to execute relative to non-directory => ENOTDIR */
354 fail += check_execveat_fail(fd, "execveat", 0, ENOTDIR); 365 fail += check_execveat_fail(fd, "execveat", 0, ENOTDIR);
355 366
356 fail += check_execveat_pathmax(dot_dfd, "execveat", 0); 367 fail += check_execveat_pathmax(root_dfd, "execveat", 0);
357 fail += check_execveat_pathmax(dot_dfd, "script", 1); 368 fail += check_execveat_pathmax(root_dfd, "script", 1);
358 return fail; 369 return fail;
359} 370}
360 371
diff --git a/tools/testing/selftests/firmware/fw_fallback.sh b/tools/testing/selftests/firmware/fw_fallback.sh
index a52a3bab532b..34a42c68ebfb 100755
--- a/tools/testing/selftests/firmware/fw_fallback.sh
+++ b/tools/testing/selftests/firmware/fw_fallback.sh
@@ -86,6 +86,11 @@ load_fw_cancel()
86 86
87load_fw_custom() 87load_fw_custom()
88{ 88{
89 if [ ! -e "$DIR"/trigger_custom_fallback ]; then
90 echo "$0: custom fallback trigger not present, ignoring test" >&2
91 return 1
92 fi
93
89 local name="$1" 94 local name="$1"
90 local file="$2" 95 local file="$2"
91 96
@@ -108,11 +113,17 @@ load_fw_custom()
108 113
109 # Wait for request to finish. 114 # Wait for request to finish.
110 wait 115 wait
116 return 0
111} 117}
112 118
113 119
114load_fw_custom_cancel() 120load_fw_custom_cancel()
115{ 121{
122 if [ ! -e "$DIR"/trigger_custom_fallback ]; then
123 echo "$0: canceling custom fallback trigger not present, ignoring test" >&2
124 return 1
125 fi
126
116 local name="$1" 127 local name="$1"
117 local file="$2" 128 local file="$2"
118 129
@@ -133,6 +144,7 @@ load_fw_custom_cancel()
133 144
134 # Wait for request to finish. 145 # Wait for request to finish.
135 wait 146 wait
147 return 0
136} 148}
137 149
138load_fw_fallback_with_child() 150load_fw_fallback_with_child()
@@ -227,20 +239,22 @@ else
227 echo "$0: cancelling fallback mechanism works" 239 echo "$0: cancelling fallback mechanism works"
228fi 240fi
229 241
230load_fw_custom "$NAME" "$FW" 242if load_fw_custom "$NAME" "$FW" ; then
231if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then 243 if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
232 echo "$0: firmware was not loaded" >&2 244 echo "$0: firmware was not loaded" >&2
233 exit 1 245 exit 1
234else 246 else
235 echo "$0: custom fallback loading mechanism works" 247 echo "$0: custom fallback loading mechanism works"
248 fi
236fi 249fi
237 250
238load_fw_custom_cancel "nope-$NAME" "$FW" 251if load_fw_custom_cancel "nope-$NAME" "$FW" ; then
239if diff -q "$FW" /dev/test_firmware >/dev/null ; then 252 if diff -q "$FW" /dev/test_firmware >/dev/null ; then
240 echo "$0: firmware was expected to be cancelled" >&2 253 echo "$0: firmware was expected to be cancelled" >&2
241 exit 1 254 exit 1
242else 255 else
243 echo "$0: cancelling custom fallback mechanism works" 256 echo "$0: cancelling custom fallback mechanism works"
257 fi
244fi 258fi
245 259
246set +e 260set +e
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 62f2d6f54929..b1f20fef36c7 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -70,9 +70,13 @@ if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then
70 exit 1 70 exit 1
71fi 71fi
72 72
73if printf '\000' >"$DIR"/trigger_async_request 2> /dev/null; then 73if [ ! -e "$DIR"/trigger_async_request ]; then
74 echo "$0: empty filename should not succeed (async)" >&2 74 echo "$0: empty filename: async trigger not present, ignoring test" >&2
75 exit 1 75else
76 if printf '\000' >"$DIR"/trigger_async_request 2> /dev/null; then
77 echo "$0: empty filename should not succeed (async)" >&2
78 exit 1
79 fi
76fi 80fi
77 81
78# Request a firmware that doesn't exist, it should fail. 82# Request a firmware that doesn't exist, it should fail.
@@ -105,17 +109,21 @@ else
105fi 109fi
106 110
107# Try the asynchronous version too 111# Try the asynchronous version too
108if ! echo -n "$NAME" >"$DIR"/trigger_async_request ; then 112if [ ! -e "$DIR"/trigger_async_request ]; then
109 echo "$0: could not trigger async request" >&2 113 echo "$0: firmware loading: async trigger not present, ignoring test" >&2
110 exit 1
111fi
112
113# Verify the contents are what we expect.
114if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
115 echo "$0: firmware was not loaded (async)" >&2
116 exit 1
117else 114else
118 echo "$0: async filesystem loading works" 115 if ! echo -n "$NAME" >"$DIR"/trigger_async_request ; then
116 echo "$0: could not trigger async request" >&2
117 exit 1
118 fi
119
120 # Verify the contents are what we expect.
121 if ! diff -q "$FW" /dev/test_firmware >/dev/null ; then
122 echo "$0: firmware was not loaded (async)" >&2
123 exit 1
124 else
125 echo "$0: async filesystem loading works"
126 fi
119fi 127fi
120 128
121### Batched requests tests 129### Batched requests tests
diff --git a/tools/testing/selftests/ftrace/config b/tools/testing/selftests/ftrace/config
index 8a1c9f949fe0..b01924c71c09 100644
--- a/tools/testing/selftests/ftrace/config
+++ b/tools/testing/selftests/ftrace/config
@@ -1,2 +1,6 @@
1CONFIG_KPROBES=y 1CONFIG_KPROBES=y
2CONFIG_FTRACE=y 2CONFIG_FTRACE=y
3CONFIG_FUNCTION_PROFILER=y
4CONFIG_TRACER_SNAPSHOT=y
5CONFIG_STACK_TRACER=y
6CONFIG_HIST_TRIGGERS=y
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index abc706cf7702..f9a9d424c980 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -222,7 +222,14 @@ SIG_RESULT=
222SIG_BASE=36 # Use realtime signals 222SIG_BASE=36 # Use realtime signals
223SIG_PID=$$ 223SIG_PID=$$
224 224
225exit_pass () {
226 exit 0
227}
228
225SIG_FAIL=$((SIG_BASE + FAIL)) 229SIG_FAIL=$((SIG_BASE + FAIL))
230exit_fail () {
231 exit 1
232}
226trap 'SIG_RESULT=$FAIL' $SIG_FAIL 233trap 'SIG_RESULT=$FAIL' $SIG_FAIL
227 234
228SIG_UNRESOLVED=$((SIG_BASE + UNRESOLVED)) 235SIG_UNRESOLVED=$((SIG_BASE + UNRESOLVED))
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/basic4.tc b/tools/testing/selftests/ftrace/test.d/00basic/basic4.tc
index aa51f6c17359..0696098d6408 100644
--- a/tools/testing/selftests/ftrace/test.d/00basic/basic4.tc
+++ b/tools/testing/selftests/ftrace/test.d/00basic/basic4.tc
@@ -2,4 +2,4 @@
2# description: Basic event tracing check 2# description: Basic event tracing check
3test -f available_events -a -f set_event -a -d events 3test -f available_events -a -f set_event -a -d events
4# check scheduler events are available 4# check scheduler events are available
5grep -q sched available_events && exit 0 || exit $FAIL 5grep -q sched available_events && exit_pass || exit_fail
diff --git a/tools/testing/selftests/ftrace/test.d/event/event-enable.tc b/tools/testing/selftests/ftrace/test.d/event/event-enable.tc
index 6ff851a75884..9daf034186f5 100644
--- a/tools/testing/selftests/ftrace/test.d/event/event-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/event-enable.tc
@@ -11,7 +11,7 @@ do_reset() {
11fail() { #msg 11fail() { #msg
12 do_reset 12 do_reset
13 echo $1 13 echo $1
14 exit $FAIL 14 exit_fail
15} 15}
16 16
17yield() { 17yield() {
diff --git a/tools/testing/selftests/ftrace/test.d/event/event-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-pid.tc
index cc14feec6e1f..132478b305c2 100644
--- a/tools/testing/selftests/ftrace/test.d/event/event-pid.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/event-pid.tc
@@ -13,7 +13,7 @@ do_reset() {
13fail() { #msg 13fail() { #msg
14 do_reset 14 do_reset
15 echo $1 15 echo $1
16 exit $FAIL 16 exit_fail
17} 17}
18 18
19yield() { 19yield() {
diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
index 85094904aa79..6a37a8642ee6 100644
--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
@@ -11,7 +11,7 @@ do_reset() {
11fail() { #msg 11fail() { #msg
12 do_reset 12 do_reset
13 echo $1 13 echo $1
14 exit $FAIL 14 exit_fail
15} 15}
16 16
17yield() { 17yield() {
diff --git a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
index cc1cf4d30ef5..4e9b6e2c0219 100644
--- a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
@@ -10,7 +10,7 @@ do_reset() {
10fail() { #msg 10fail() { #msg
11 do_reset 11 do_reset
12 echo $1 12 echo $1
13 exit $FAIL 13 exit_fail
14} 14}
15 15
16yield() { 16yield() {
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
index 45df747887e0..1aec99d108eb 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
@@ -28,7 +28,7 @@ do_reset() {
28fail() { # msg 28fail() { # msg
29 do_reset 29 do_reset
30 echo $1 30 echo $1
31 exit $FAIL 31 exit_fail
32} 32}
33 33
34disable_tracing 34disable_tracing
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
index 0387e22e7577..9f8d27ca39cf 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
@@ -18,7 +18,7 @@ do_reset() {
18fail() { # msg 18fail() { # msg
19 do_reset 19 do_reset
20 echo $1 20 echo $1
21 exit $FAIL 21 exit_fail
22} 22}
23 23
24disable_tracing 24disable_tracing
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
index 78524fcc25ae..524ce24b3c22 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
@@ -51,7 +51,7 @@ do_reset() {
51fail() { # msg 51fail() { # msg
52 do_reset 52 do_reset
53 echo $1 53 echo $1
54 exit $FAIL 54 exit_fail
55} 55}
56 56
57yield() { 57yield() {
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
index 9d4afcca1e36..6fed4cf2db81 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
@@ -27,7 +27,7 @@ do_reset() {
27fail() { # mesg 27fail() { # mesg
28 do_reset 28 do_reset
29 echo $1 29 echo $1
30 exit $FAIL 30 exit_fail
31} 31}
32 32
33SLEEP_TIME=".1" 33SLEEP_TIME=".1"
@@ -48,8 +48,7 @@ test_event_enabled() {
48 48
49 e=`cat $EVENT_ENABLE` 49 e=`cat $EVENT_ENABLE`
50 if [ "$e" != $val ]; then 50 if [ "$e" != $val ]; then
51 echo "Expected $val but found $e" 51 fail "Expected $val but found $e"
52 exit 1
53 fi 52 fi
54} 53}
55 54
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
index fe0dc5a7ea26..b2d5a8febfe8 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
@@ -32,7 +32,7 @@ fail() { # mesg
32 reset_tracer 32 reset_tracer
33 echo > set_ftrace_filter 33 echo > set_ftrace_filter
34 echo $1 34 echo $1
35 exit $FAIL 35 exit_fail
36} 36}
37 37
38echo "Testing function tracer with profiler:" 38echo "Testing function tracer with profiler:"
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
index 5ad723724adb..0f3f92622e33 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
@@ -26,14 +26,14 @@ do_reset() {
26fail() { # mesg 26fail() { # mesg
27 do_reset 27 do_reset
28 echo $1 28 echo $1
29 exit $FAIL 29 exit_fail
30} 30}
31 31
32do_reset 32do_reset
33 33
34FILTER=set_ftrace_filter 34FILTER=set_ftrace_filter
35FUNC1="schedule" 35FUNC1="schedule"
36FUNC2="do_IRQ" 36FUNC2="do_softirq"
37 37
38ALL_FUNCS="#### all functions enabled ####" 38ALL_FUNCS="#### all functions enabled ####"
39 39
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
index cdc92a371cd7..f6d9ac73268a 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
@@ -27,7 +27,7 @@ do_reset() {
27fail() { # mesg 27fail() { # mesg
28 do_reset 28 do_reset
29 echo $1 29 echo $1
30 exit $FAIL 30 exit_fail
31} 31}
32 32
33SLEEP_TIME=".1" 33SLEEP_TIME=".1"
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
index d7f48b55df51..4fa0f79144f4 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
@@ -11,7 +11,7 @@ fail() { # mesg
11 rmdir foo 2>/dev/null 11 rmdir foo 2>/dev/null
12 echo $1 12 echo $1
13 set -e 13 set -e
14 exit $FAIL 14 exit_fail
15} 15}
16 16
17cd instances 17cd instances
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
index ddda62203366..b84651283bf3 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
@@ -11,7 +11,7 @@ fail() { # mesg
11 rmdir x y z 2>/dev/null 11 rmdir x y z 2>/dev/null
12 echo $1 12 echo $1
13 set -e 13 set -e
14 exit $FAIL 14 exit_fail
15} 15}
16 16
17cd instances 17cd instances
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc b/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc
index 0e6f415c6152..bbc443a9190c 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc
@@ -9,7 +9,7 @@ echo > kprobe_events
9echo p:myevent _do_fork > kprobe_events 9echo p:myevent _do_fork > kprobe_events
10test -d events/kprobes/myevent 10test -d events/kprobes/myevent
11echo 1 > events/kprobes/myevent/enable 11echo 1 > events/kprobes/myevent/enable
12echo > kprobe_events && exit 1 # this must fail 12echo > kprobe_events && exit_fail # this must fail
13echo 0 > events/kprobes/myevent/enable 13echo 0 > events/kprobes/myevent/enable
14echo > kprobe_events # this must succeed 14echo > kprobe_events # this must succeed
15clear_trace 15clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc
index 679bbd23bcc3..8b43c6804fc3 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc
@@ -14,5 +14,5 @@ echo 1 > events/kprobes/testprobe/enable
14echo 0 > events/kprobes/testprobe/enable 14echo 0 > events/kprobes/testprobe/enable
15echo "-:testprobe" >> kprobe_events 15echo "-:testprobe" >> kprobe_events
16clear_trace 16clear_trace
17test -d events/kprobes/testprobe && exit 1 || exit 0 17test -d events/kprobes/testprobe && exit_fail || exit_pass
18 18
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
index 17d33ba192f6..2a1755bfc290 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
@@ -35,4 +35,4 @@ check_types $ARGS
35 35
36echo "-:testprobe" >> kprobe_events 36echo "-:testprobe" >> kprobe_events
37clear_trace 37clear_trace
38test -d events/kprobes/testprobe && exit 1 || exit 0 38test -d events/kprobes/testprobe && exit_fail || exit_pass
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc
index f1825bdbe3f3..321954683aaa 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc
@@ -14,4 +14,4 @@ echo 1 > events/kprobes/testprobe2/enable
14echo 0 > events/kprobes/testprobe2/enable 14echo 0 > events/kprobes/testprobe2/enable
15echo '-:testprobe2' >> kprobe_events 15echo '-:testprobe2' >> kprobe_events
16clear_trace 16clear_trace
17test -d events/kprobes/testprobe2 && exit 1 || exit 0 17test -d events/kprobes/testprobe2 && exit_fail || exit_pass
diff --git a/tools/testing/selftests/ftrace/test.d/template b/tools/testing/selftests/ftrace/test.d/template
index 5448f7abad5f..5c39ceb18a0d 100644
--- a/tools/testing/selftests/ftrace/test.d/template
+++ b/tools/testing/selftests/ftrace/test.d/template
@@ -4,6 +4,7 @@
4# Note that all tests are run with "errexit" option. 4# Note that all tests are run with "errexit" option.
5 5
6exit 0 # Return 0 if the test is passed, otherwise return !0 6exit 0 # Return 0 if the test is passed, otherwise return !0
7# Or you can call exit_pass for passed test, and exit_fail for failed test.
7# If the test could not run because of lack of feature, call exit_unsupported 8# If the test could not run because of lack of feature, call exit_unsupported
8# If the test returned unclear results, call exit_unresolved 9# If the test returned unclear results, call exit_unresolved
9# If the test is a dummy, or a placeholder, call exit_untested 10# If the test is a dummy, or a placeholder, call exit_untested
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc
index 839ac4320b24..28cc355a3a7b 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc
@@ -12,7 +12,7 @@ do_reset() {
12fail() { #msg 12fail() { #msg
13 do_reset 13 do_reset
14 echo $1 14 echo $1
15 exit $FAIL 15 exit_fail
16} 16}
17 17
18if [ ! -f set_event -o ! -d events/sched ]; then 18if [ ! -f set_event -o ! -d events/sched ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc
index 66873c4b12c9..a48e23eb8a8b 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc
@@ -12,7 +12,7 @@ do_reset() {
12fail() { #msg 12fail() { #msg
13 do_reset 13 do_reset
14 echo $1 14 echo $1
15 exit $FAIL 15 exit_fail
16} 16}
17 17
18if [ ! -f set_event -o ! -d events/sched ]; then 18if [ ! -f set_event -o ! -d events/sched ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
index 4237b32769f1..8da80efc44d8 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
@@ -12,7 +12,7 @@ do_reset() {
12fail() { #msg 12fail() { #msg
13 do_reset 13 do_reset
14 echo $1 14 echo $1
15 exit $FAIL 15 exit_fail
16} 16}
17 17
18if [ ! -f set_event -o ! -d events/sched ]; then 18if [ ! -f set_event -o ! -d events/sched ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
index d24e2b8bd863..449fe9ff91a2 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
@@ -12,7 +12,7 @@ do_reset() {
12fail() { #msg 12fail() { #msg
13 do_reset 13 do_reset
14 echo $1 14 echo $1
15 exit $FAIL 15 exit_fail
16} 16}
17 17
18if [ ! -f set_event -o ! -d events/sched ]; then 18if [ ! -f set_event -o ! -d events/sched ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
index 4c0774fff378..c5ef8b9d02b3 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
@@ -12,7 +12,7 @@ do_reset() {
12fail() { #msg 12fail() { #msg
13 do_reset 13 do_reset
14 echo $1 14 echo $1
15 exit $FAIL 15 exit_fail
16} 16}
17 17
18if [ ! -f set_event -o ! -d events/sched ]; then 18if [ ! -f set_event -o ! -d events/sched ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
index 3fc6321e081f..ed38f0050d77 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
@@ -11,7 +11,7 @@ do_reset() {
11fail() { #msg 11fail() { #msg
12 do_reset 12 do_reset
13 echo $1 13 echo $1
14 exit $FAIL 14 exit_fail
15} 15}
16 16
17if [ ! -f set_event -o ! -d events/sched ]; then 17if [ ! -f set_event -o ! -d events/sched ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc
index 3652824f81ed..3121d795a868 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc
@@ -11,7 +11,7 @@ do_reset() {
11fail() { #msg 11fail() { #msg
12 do_reset 12 do_reset
13 echo $1 13 echo $1
14 exit $FAIL 14 exit_fail
15} 15}
16 16
17if [ ! -f set_event -o ! -d events/sched ]; then 17if [ ! -f set_event -o ! -d events/sched ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc
index 6d9051cdf408..c59d9eb546da 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc
@@ -11,7 +11,7 @@ do_reset() {
11fail() { #msg 11fail() { #msg
12 do_reset 12 do_reset
13 echo $1 13 echo $1
14 exit $FAIL 14 exit_fail
15} 15}
16 16
17if [ ! -f set_event -o ! -d events/sched ]; then 17if [ ! -f set_event -o ! -d events/sched ]; then
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 845e5f67b6f0..132a54f74e88 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -515,7 +515,7 @@ static void mfd_assert_grow_write(int fd)
515 515
516 buf = malloc(mfd_def_size * 8); 516 buf = malloc(mfd_def_size * 8);
517 if (!buf) { 517 if (!buf) {
518 printf("malloc(%d) failed: %m\n", mfd_def_size * 8); 518 printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
519 abort(); 519 abort();
520 } 520 }
521 521
@@ -535,7 +535,7 @@ static void mfd_fail_grow_write(int fd)
535 535
536 buf = malloc(mfd_def_size * 8); 536 buf = malloc(mfd_def_size * 8);
537 if (!buf) { 537 if (!buf) {
538 printf("malloc(%d) failed: %m\n", mfd_def_size * 8); 538 printf("malloc(%zu) failed: %m\n", mfd_def_size * 8);
539 abort(); 539 abort();
540 } 540 }
541 541
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile
index 23db11c94b59..86636d207adf 100644
--- a/tools/testing/selftests/memory-hotplug/Makefile
+++ b/tools/testing/selftests/memory-hotplug/Makefile
@@ -4,10 +4,10 @@ all:
4include ../lib.mk 4include ../lib.mk
5 5
6TEST_PROGS := mem-on-off-test.sh 6TEST_PROGS := mem-on-off-test.sh
7override RUN_TESTS := ./mem-on-off-test.sh -r 2 || echo "selftests: memory-hotplug [FAIL]" 7override RUN_TESTS := ./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]"
8override EMIT_TESTS := echo "$(RUN_TESTS)" 8override EMIT_TESTS := echo "$(RUN_TESTS)"
9 9
10run_full_test: 10run_full_test:
11 @/bin/bash ./mem-on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" 11 @/bin/bash ./mem-on-off-test.sh && echo "memory-hotplug selftests: [PASS]" || echo "memory-hotplug selftests: [FAIL]"
12 12
13clean: 13clean:
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index e57b4ac40e72..7177bea1fdfa 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -1,3 +1,4 @@
1CONFIG_USER_NS=y 1CONFIG_USER_NS=y
2CONFIG_BPF_SYSCALL=y 2CONFIG_BPF_SYSCALL=y
3CONFIG_TEST_BPF=m 3CONFIG_TEST_BPF=m
4CONFIG_NUMA=y
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 57b5ff576240..5215493166c9 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -15,6 +15,14 @@ check_err()
15 fi 15 fi
16} 16}
17 17
18# same but inverted -- used when command must fail for test to pass
19check_fail()
20{
21 if [ $1 -eq 0 ]; then
22 ret=1
23 fi
24}
25
18kci_add_dummy() 26kci_add_dummy()
19{ 27{
20 ip link add name "$devdummy" type dummy 28 ip link add name "$devdummy" type dummy
@@ -29,6 +37,26 @@ kci_del_dummy()
29 check_err $? 37 check_err $?
30} 38}
31 39
40kci_test_netconf()
41{
42 dev="$1"
43 r=$ret
44
45 ip netconf show dev "$dev" > /dev/null
46 check_err $?
47
48 for f in 4 6; do
49 ip -$f netconf show dev "$dev" > /dev/null
50 check_err $?
51 done
52
53 if [ $ret -ne 0 ] ;then
54 echo "FAIL: ip netconf show $dev"
55 test $r -eq 0 && ret=0
56 return 1
57 fi
58}
59
32# add a bridge with vlans on top 60# add a bridge with vlans on top
33kci_test_bridge() 61kci_test_bridge()
34{ 62{
@@ -55,6 +83,11 @@ kci_test_bridge()
55 check_err $? 83 check_err $?
56 ip r s t all > /dev/null 84 ip r s t all > /dev/null
57 check_err $? 85 check_err $?
86
87 for name in "$devbr" "$vlandev" "$devdummy" ; do
88 kci_test_netconf "$name"
89 done
90
58 ip -6 addr del dev "$vlandev" dead:42::1234/64 91 ip -6 addr del dev "$vlandev" dead:42::1234/64
59 check_err $? 92 check_err $?
60 93
@@ -92,6 +125,9 @@ kci_test_gre()
92 check_err $? 125 check_err $?
93 ip addr > /dev/null 126 ip addr > /dev/null
94 check_err $? 127 check_err $?
128
129 kci_test_netconf "$gredev"
130
95 ip addr del dev "$devdummy" 10.23.7.11/24 131 ip addr del dev "$devdummy" 10.23.7.11/24
96 check_err $? 132 check_err $?
97 133
@@ -235,6 +271,237 @@ kci_test_addrlabel()
235 echo "PASS: ipv6 addrlabel" 271 echo "PASS: ipv6 addrlabel"
236} 272}
237 273
274kci_test_ifalias()
275{
276 ret=0
277 namewant=$(uuidgen)
278 syspathname="/sys/class/net/$devdummy/ifalias"
279
280 ip link set dev "$devdummy" alias "$namewant"
281 check_err $?
282
283 if [ $ret -ne 0 ]; then
284 echo "FAIL: cannot set interface alias of $devdummy to $namewant"
285 return 1
286 fi
287
288 ip link show "$devdummy" | grep -q "alias $namewant"
289 check_err $?
290
291 if [ -r "$syspathname" ] ; then
292 read namehave < "$syspathname"
293 if [ "$namewant" != "$namehave" ]; then
294 echo "FAIL: did set ifalias $namewant but got $namehave"
295 return 1
296 fi
297
298 namewant=$(uuidgen)
299 echo "$namewant" > "$syspathname"
300 ip link show "$devdummy" | grep -q "alias $namewant"
301 check_err $?
302
303 # sysfs interface allows to delete alias again
304 echo "" > "$syspathname"
305
306 ip link show "$devdummy" | grep -q "alias $namewant"
307 check_fail $?
308
309 for i in $(seq 1 100); do
310 uuidgen > "$syspathname" &
311 done
312
313 wait
314
315 # re-add the alias -- kernel should free mem when dummy dev is removed
316 ip link set dev "$devdummy" alias "$namewant"
317 check_err $?
318 fi
319
320 if [ $ret -ne 0 ]; then
321 echo "FAIL: set interface alias $devdummy to $namewant"
322 return 1
323 fi
324
325 echo "PASS: set ifalias $namewant for $devdummy"
326}
327
328kci_test_vrf()
329{
330 vrfname="test-vrf"
331 ret=0
332
333 ip link show type vrf 2>/dev/null
334 if [ $? -ne 0 ]; then
335 echo "SKIP: vrf: iproute2 too old"
336 return 0
337 fi
338
339 ip link add "$vrfname" type vrf table 10
340 check_err $?
341 if [ $ret -ne 0 ];then
342 echo "FAIL: can't add vrf interface, skipping test"
343 return 0
344 fi
345
346 ip -br link show type vrf | grep -q "$vrfname"
347 check_err $?
348 if [ $ret -ne 0 ];then
349 echo "FAIL: created vrf device not found"
350 return 1
351 fi
352
353 ip link set dev "$vrfname" up
354 check_err $?
355
356 ip link set dev "$devdummy" master "$vrfname"
357 check_err $?
358 ip link del dev "$vrfname"
359 check_err $?
360
361 if [ $ret -ne 0 ];then
362 echo "FAIL: vrf"
363 return 1
364 fi
365
366 echo "PASS: vrf"
367}
368
369kci_test_encap_vxlan()
370{
371 ret=0
372 vxlan="test-vxlan0"
373 vlan="test-vlan0"
374 testns="$1"
375
376 ip netns exec "$testns" ip link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
377 dev "$devdummy" dstport 4789 2>/dev/null
378 if [ $? -ne 0 ]; then
379 echo "FAIL: can't add vxlan interface, skipping test"
380 return 0
381 fi
382 check_err $?
383
384 ip netns exec "$testns" ip addr add 10.2.11.49/24 dev "$vxlan"
385 check_err $?
386
387 ip netns exec "$testns" ip link set up dev "$vxlan"
388 check_err $?
389
390 ip netns exec "$testns" ip link add link "$vxlan" name "$vlan" type vlan id 1
391 check_err $?
392
393 ip netns exec "$testns" ip link del "$vxlan"
394 check_err $?
395
396 if [ $ret -ne 0 ]; then
397 echo "FAIL: vxlan"
398 return 1
399 fi
400 echo "PASS: vxlan"
401}
402
403kci_test_encap_fou()
404{
405 ret=0
406 name="test-fou"
407 testns="$1"
408
409 ip fou help 2>&1 |grep -q 'Usage: ip fou'
410 if [ $? -ne 0 ];then
411 echo "SKIP: fou: iproute2 too old"
412 return 1
413 fi
414
415 ip netns exec "$testns" ip fou add port 7777 ipproto 47 2>/dev/null
416 if [ $? -ne 0 ];then
417 echo "FAIL: can't add fou port 7777, skipping test"
418 return 1
419 fi
420
421 ip netns exec "$testns" ip fou add port 8888 ipproto 4
422 check_err $?
423
424 ip netns exec "$testns" ip fou del port 9999 2>/dev/null
425 check_fail $?
426
427 ip netns exec "$testns" ip fou del port 7777
428 check_err $?
429
430 if [ $ret -ne 0 ]; then
431 echo "FAIL: fou"
432 return 1
433 fi
434
435 echo "PASS: fou"
436}
437
438# test various encap methods, use netns to avoid unwanted interference
439kci_test_encap()
440{
441 testns="testns"
442 ret=0
443
444 ip netns add "$testns"
445 if [ $? -ne 0 ]; then
446 echo "SKIP encap tests: cannot add net namespace $testns"
447 return 1
448 fi
449
450 ip netns exec "$testns" ip link set lo up
451 check_err $?
452
453 ip netns exec "$testns" ip link add name "$devdummy" type dummy
454 check_err $?
455 ip netns exec "$testns" ip link set "$devdummy" up
456 check_err $?
457
458 kci_test_encap_vxlan "$testns"
459 kci_test_encap_fou "$testns"
460
461 ip netns del "$testns"
462}
463
464kci_test_macsec()
465{
466 msname="test_macsec0"
467 ret=0
468
469 ip macsec help 2>&1 | grep -q "^Usage: ip macsec"
470 if [ $? -ne 0 ]; then
471 echo "SKIP: macsec: iproute2 too old"
472 return 0
473 fi
474
475 ip link add link "$devdummy" "$msname" type macsec port 42 encrypt on
476 check_err $?
477 if [ $ret -ne 0 ];then
478 echo "FAIL: can't add macsec interface, skipping test"
479 return 1
480 fi
481
482 ip macsec add "$msname" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012
483 check_err $?
484
485 ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef"
486 check_err $?
487
488 ip macsec add "$msname" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on key 00 0123456789abcdef0123456789abcdef
489 check_err $?
490
491 ip macsec show > /dev/null
492 check_err $?
493
494 ip link del dev "$msname"
495 check_err $?
496
497 if [ $ret -ne 0 ];then
498 echo "FAIL: macsec"
499 return 1
500 fi
501
502 echo "PASS: macsec"
503}
504
238kci_test_rtnl() 505kci_test_rtnl()
239{ 506{
240 kci_add_dummy 507 kci_add_dummy
@@ -249,6 +516,10 @@ kci_test_rtnl()
249 kci_test_gre 516 kci_test_gre
250 kci_test_bridge 517 kci_test_bridge
251 kci_test_addrlabel 518 kci_test_addrlabel
519 kci_test_ifalias
520 kci_test_vrf
521 kci_test_encap
522 kci_test_macsec
252 523
253 kci_del_dummy 524 kci_del_dummy
254} 525}
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
index f4241339edd2..87f1f0252299 100644
--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#define _GNU_SOURCE 12#define _GNU_SOURCE
13#include <errno.h>
13#include <sched.h> 14#include <sched.h>
14#include <string.h> 15#include <string.h>
15#include <stdio.h> 16#include <stdio.h>
@@ -75,6 +76,7 @@ static void touch(void)
75 76
76static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu) 77static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
77{ 78{
79 int rc;
78 pthread_t tid; 80 pthread_t tid;
79 cpu_set_t cpuset; 81 cpu_set_t cpuset;
80 pthread_attr_t attr; 82 pthread_attr_t attr;
@@ -82,14 +84,23 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
82 CPU_ZERO(&cpuset); 84 CPU_ZERO(&cpuset);
83 CPU_SET(cpu, &cpuset); 85 CPU_SET(cpu, &cpuset);
84 86
85 pthread_attr_init(&attr); 87 rc = pthread_attr_init(&attr);
88 if (rc) {
89 errno = rc;
90 perror("pthread_attr_init");
91 exit(1);
92 }
86 93
87 if (pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset)) { 94 rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
95 if (rc) {
96 errno = rc;
88 perror("pthread_attr_setaffinity_np"); 97 perror("pthread_attr_setaffinity_np");
89 exit(1); 98 exit(1);
90 } 99 }
91 100
92 if (pthread_create(&tid, &attr, fn, arg)) { 101 rc = pthread_create(&tid, &attr, fn, arg);
102 if (rc) {
103 errno = rc;
93 perror("pthread_create"); 104 perror("pthread_create");
94 exit(1); 105 exit(1);
95 } 106 }
diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h
index 18ea223bd398..cdb840bc54f2 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr.h
+++ b/tools/testing/selftests/powerpc/dscr/dscr.h
@@ -39,7 +39,7 @@
39#define rmb() asm volatile("lwsync":::"memory") 39#define rmb() asm volatile("lwsync":::"memory")
40#define wmb() asm volatile("lwsync":::"memory") 40#define wmb() asm volatile("lwsync":::"memory")
41 41
42#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 42#define READ_ONCE(x) (*(volatile typeof(x) *)&(x))
43 43
44/* Prilvilege state DSCR access */ 44/* Prilvilege state DSCR access */
45inline unsigned long get_dscr(void) 45inline unsigned long get_dscr(void)
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
index df17c3bab0a7..9e1a37e93b63 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
@@ -27,7 +27,7 @@ static void *do_test(void *in)
27 unsigned long d, cur_dscr, cur_dscr_usr; 27 unsigned long d, cur_dscr, cur_dscr_usr;
28 unsigned long s1, s2; 28 unsigned long s1, s2;
29 29
30 s1 = ACCESS_ONCE(sequence); 30 s1 = READ_ONCE(sequence);
31 if (s1 & 1) 31 if (s1 & 1)
32 continue; 32 continue;
33 rmb(); 33 rmb();
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
index 17fb1b43c320..1899bd85121f 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
@@ -53,6 +53,8 @@ static int check_all_cpu_dscr_defaults(unsigned long val)
53 } 53 }
54 54
55 while ((dp = readdir(sysfs))) { 55 while ((dp = readdir(sysfs))) {
56 int len;
57
56 if (!(dp->d_type & DT_DIR)) 58 if (!(dp->d_type & DT_DIR))
57 continue; 59 continue;
58 if (!strcmp(dp->d_name, "cpuidle")) 60 if (!strcmp(dp->d_name, "cpuidle"))
@@ -60,7 +62,9 @@ static int check_all_cpu_dscr_defaults(unsigned long val)
60 if (!strstr(dp->d_name, "cpu")) 62 if (!strstr(dp->d_name, "cpu"))
61 continue; 63 continue;
62 64
63 sprintf(file, "%s%s/dscr", CPU_PATH, dp->d_name); 65 len = snprintf(file, LEN_MAX, "%s%s/dscr", CPU_PATH, dp->d_name);
66 if (len >= LEN_MAX)
67 continue;
64 if (access(file, F_OK)) 68 if (access(file, F_OK))
65 continue; 69 continue;
66 70
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 2f1f7b013293..241a4a4ee0e4 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -12,3 +12,4 @@ tm-signal-context-chk-gpr
12tm-signal-context-chk-vmx 12tm-signal-context-chk-vmx
13tm-signal-context-chk-vsx 13tm-signal-context-chk-vsx
14tm-vmx-unavail 14tm-vmx-unavail
15tm-unavailable
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index fca7c7f5e640..8ed6f8c57230 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -3,7 +3,7 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
3 tm-signal-context-chk-vmx tm-signal-context-chk-vsx 3 tm-signal-context-chk-vmx tm-signal-context-chk-vsx
4 4
5TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ 5TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
6 tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail \ 6 tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable \
7 $(SIGNAL_CONTEXT_CHK_TESTS) 7 $(SIGNAL_CONTEXT_CHK_TESTS)
8 8
9include ../../lib.mk 9include ../../lib.mk
@@ -17,6 +17,7 @@ $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include
17$(OUTPUT)/tm-tmspr: CFLAGS += -pthread 17$(OUTPUT)/tm-tmspr: CFLAGS += -pthread
18$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 18$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
19$(OUTPUT)/tm-resched-dscr: ../pmu/lib.o 19$(OUTPUT)/tm-resched-dscr: ../pmu/lib.o
20$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx
20 21
21SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS)) 22SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
22$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S 23$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
diff --git a/tools/testing/selftests/powerpc/tm/tm-unavailable.c b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
new file mode 100644
index 000000000000..96c37f84ce54
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
@@ -0,0 +1,371 @@
1/*
2 * Copyright 2017, Gustavo Romero, Breno Leitao, Cyril Bur, IBM Corp.
3 * Licensed under GPLv2.
4 *
5 * Force FP, VEC and VSX unavailable exception during transaction in all
6 * possible scenarios regarding the MSR.FP and MSR.VEC state, e.g. when FP
7 * is enable and VEC is disable, when FP is disable and VEC is enable, and
8 * so on. Then we check if the restored state is correctly set for the
9 * FP and VEC registers to the previous state we set just before we entered
10 * in TM, i.e. we check if it corrupts somehow the recheckpointed FP and
11 * VEC/Altivec registers on abortion due to an unavailable exception in TM.
12 * N.B. In this test we do not test all the FP/Altivec/VSX registers for
13 * corruption, but only for registers vs0 and vs32, which are respectively
14 * representatives of FP and VEC/Altivec reg sets.
15 */
16
17#define _GNU_SOURCE
18#include <stdio.h>
19#include <stdlib.h>
20#include <unistd.h>
21#include <inttypes.h>
22#include <stdbool.h>
23#include <pthread.h>
24#include <sched.h>
25
26#include "tm.h"
27
28#define DEBUG 0
29
30/* Unavailable exceptions to test in HTM */
31#define FP_UNA_EXCEPTION 0
32#define VEC_UNA_EXCEPTION 1
33#define VSX_UNA_EXCEPTION 2
34
35#define NUM_EXCEPTIONS 3
36
37struct Flags {
38 int touch_fp;
39 int touch_vec;
40 int result;
41 int exception;
42} flags;
43
44bool expecting_failure(void)
45{
46 if (flags.touch_fp && flags.exception == FP_UNA_EXCEPTION)
47 return false;
48
49 if (flags.touch_vec && flags.exception == VEC_UNA_EXCEPTION)
50 return false;
51
52 /*
53 * If both FP and VEC are touched it does not mean that touching VSX
54 * won't raise an exception. However since FP and VEC state are already
55 * correctly loaded, the transaction is not aborted (i.e.
56 * treclaimed/trecheckpointed) and MSR.VSX is just set as 1, so a TM
57 * failure is not expected also in this case.
58 */
59 if ((flags.touch_fp && flags.touch_vec) &&
60 flags.exception == VSX_UNA_EXCEPTION)
61 return false;
62
63 return true;
64}
65
66/* Check if failure occurred whilst in transaction. */
67bool is_failure(uint64_t condition_reg)
68{
69 /*
70 * When failure handling occurs, CR0 is set to 0b1010 (0xa). Otherwise
71 * transaction completes without failure and hence reaches out 'tend.'
72 * that sets CR0 to 0b0100 (0x4).
73 */
74 return ((condition_reg >> 28) & 0xa) == 0xa;
75}
76
77void *ping(void *input)
78{
79
80 /*
81 * Expected values for vs0 and vs32 after a TM failure. They must never
82 * change, otherwise they got corrupted.
83 */
84 uint64_t high_vs0 = 0x5555555555555555;
85 uint64_t low_vs0 = 0xffffffffffffffff;
86 uint64_t high_vs32 = 0x5555555555555555;
87 uint64_t low_vs32 = 0xffffffffffffffff;
88
89 /* Counter for busy wait */
90 uint64_t counter = 0x1ff000000;
91
92 /*
93 * Variable to keep a copy of CR register content taken just after we
94 * leave the transactional state.
95 */
96 uint64_t cr_ = 0;
97
98 /*
99 * Wait a bit so thread can get its name "ping". This is not important
100 * to reproduce the issue but it's nice to have for systemtap debugging.
101 */
102 if (DEBUG)
103 sleep(1);
104
105 printf("If MSR.FP=%d MSR.VEC=%d: ", flags.touch_fp, flags.touch_vec);
106
107 if (flags.exception != FP_UNA_EXCEPTION &&
108 flags.exception != VEC_UNA_EXCEPTION &&
109 flags.exception != VSX_UNA_EXCEPTION) {
110 printf("No valid exception specified to test.\n");
111 return NULL;
112 }
113
114 asm (
115 /* Prepare to merge low and high. */
116 " mtvsrd 33, %[high_vs0] ;"
117 " mtvsrd 34, %[low_vs0] ;"
118
119 /*
120 * Adjust VS0 expected value after an TM failure,
121 * i.e. vs0 = 0x5555555555555555555FFFFFFFFFFFFFFFF
122 */
123 " xxmrghd 0, 33, 34 ;"
124
125 /*
126 * Adjust VS32 expected value after an TM failure,
127 * i.e. vs32 = 0x5555555555555555555FFFFFFFFFFFFFFFF
128 */
129 " xxmrghd 32, 33, 34 ;"
130
131 /*
132 * Wait an amount of context switches so load_fp and load_vec
133 * overflow and MSR.FP, MSR.VEC, and MSR.VSX become zero (off).
134 */
135 " mtctr %[counter] ;"
136
137 /* Decrement CTR branch if CTR non zero. */
138 "1: bdnz 1b ;"
139
140 /*
141 * Check if we want to touch FP prior to the test in order
142 * to set MSR.FP = 1 before provoking an unavailable
143 * exception in TM.
144 */
145 " cmpldi %[touch_fp], 0 ;"
146 " beq no_fp ;"
147 " fadd 10, 10, 10 ;"
148 "no_fp: ;"
149
150 /*
151 * Check if we want to touch VEC prior to the test in order
152 * to set MSR.VEC = 1 before provoking an unavailable
153 * exception in TM.
154 */
155 " cmpldi %[touch_vec], 0 ;"
156 " beq no_vec ;"
157 " vaddcuw 10, 10, 10 ;"
158 "no_vec: ;"
159
160 /*
161 * Perhaps it would be a better idea to do the
162 * compares outside transactional context and simply
163 * duplicate code.
164 */
165 " tbegin. ;"
166 " beq trans_fail ;"
167
168 /* Do we do FP Unavailable? */
169 " cmpldi %[exception], %[ex_fp] ;"
170 " bne 1f ;"
171 " fadd 10, 10, 10 ;"
172 " b done ;"
173
174 /* Do we do VEC Unavailable? */
175 "1: cmpldi %[exception], %[ex_vec] ;"
176 " bne 2f ;"
177 " vaddcuw 10, 10, 10 ;"
178 " b done ;"
179
180 /*
181 * Not FP or VEC, therefore VSX. Ensure this
182 * instruction always generates a VSX Unavailable.
183 * ISA 3.0 is tricky here.
184 * (xxmrghd will on ISA 2.07 and ISA 3.0)
185 */
186 "2: xxmrghd 10, 10, 10 ;"
187
188 "done: tend. ;"
189
190 "trans_fail: ;"
191
192 /* Give values back to C. */
193 " mfvsrd %[high_vs0], 0 ;"
194 " xxsldwi 3, 0, 0, 2 ;"
195 " mfvsrd %[low_vs0], 3 ;"
196 " mfvsrd %[high_vs32], 32 ;"
197 " xxsldwi 3, 32, 32, 2 ;"
198 " mfvsrd %[low_vs32], 3 ;"
199
200 /* Give CR back to C so that it can check what happened. */
201 " mfcr %[cr_] ;"
202
203 : [high_vs0] "+r" (high_vs0),
204 [low_vs0] "+r" (low_vs0),
205 [high_vs32] "=r" (high_vs32),
206 [low_vs32] "=r" (low_vs32),
207 [cr_] "+r" (cr_)
208 : [touch_fp] "r" (flags.touch_fp),
209 [touch_vec] "r" (flags.touch_vec),
210 [exception] "r" (flags.exception),
211 [ex_fp] "i" (FP_UNA_EXCEPTION),
212 [ex_vec] "i" (VEC_UNA_EXCEPTION),
213 [ex_vsx] "i" (VSX_UNA_EXCEPTION),
214 [counter] "r" (counter)
215
216 : "cr0", "ctr", "v10", "vs0", "vs10", "vs3", "vs32", "vs33",
217 "vs34", "fr10"
218
219 );
220
221 /*
222 * Check if we were expecting a failure and it did not occur by checking
223 * CR0 state just after we leave the transaction. Either way we check if
224 * vs0 or vs32 got corrupted.
225 */
226 if (expecting_failure() && !is_failure(cr_)) {
227 printf("\n\tExpecting the transaction to fail, %s",
228 "but it didn't\n\t");
229 flags.result++;
230 }
231
232 /* Check if we were not expecting a failure and a it occurred. */
233 if (!expecting_failure() && is_failure(cr_)) {
234 printf("\n\tUnexpected transaction failure 0x%02lx\n\t",
235 failure_code());
236 return (void *) -1;
237 }
238
239 /*
240 * Check if TM failed due to the cause we were expecting. 0xda is a
241 * TM_CAUSE_FAC_UNAV cause, otherwise it's an unexpected cause.
242 */
243 if (is_failure(cr_) && !failure_is_unavailable()) {
244 printf("\n\tUnexpected failure cause 0x%02lx\n\t",
245 failure_code());
246 return (void *) -1;
247 }
248
249 /* 0x4 is a success and 0xa is a fail. See comment in is_failure(). */
250 if (DEBUG)
251 printf("CR0: 0x%1lx ", cr_ >> 28);
252
253 /* Check FP (vs0) for the expected value. */
254 if (high_vs0 != 0x5555555555555555 || low_vs0 != 0xFFFFFFFFFFFFFFFF) {
255 printf("FP corrupted!");
256 printf(" high = %#16" PRIx64 " low = %#16" PRIx64 " ",
257 high_vs0, low_vs0);
258 flags.result++;
259 } else
260 printf("FP ok ");
261
262 /* Check VEC (vs32) for the expected value. */
263 if (high_vs32 != 0x5555555555555555 || low_vs32 != 0xFFFFFFFFFFFFFFFF) {
264 printf("VEC corrupted!");
265 printf(" high = %#16" PRIx64 " low = %#16" PRIx64,
266 high_vs32, low_vs32);
267 flags.result++;
268 } else
269 printf("VEC ok");
270
271 putchar('\n');
272
273 return NULL;
274}
275
276/* Thread to force context switch */
277void *pong(void *not_used)
278{
279 /* Wait thread get its name "pong". */
280 if (DEBUG)
281 sleep(1);
282
283 /* Classed as an interactive-like thread. */
284 while (1)
285 sched_yield();
286}
287
288/* Function that creates a thread and launches the "ping" task. */
289void test_fp_vec(int fp, int vec, pthread_attr_t *attr)
290{
291 int retries = 2;
292 void *ret_value;
293 pthread_t t0;
294
295 flags.touch_fp = fp;
296 flags.touch_vec = vec;
297
298 /*
299 * Without luck it's possible that the transaction is aborted not due to
300 * the unavailable exception caught in the middle as we expect but also,
301 * for instance, due to a context switch or due to a KVM reschedule (if
302 * it's running on a VM). Thus we try a few times before giving up,
303 * checking if the failure cause is the one we expect.
304 */
305 do {
306 /* Bind 'ping' to CPU 0, as specified in 'attr'. */
307 pthread_create(&t0, attr, ping, (void *) &flags);
308 pthread_setname_np(t0, "ping");
309 pthread_join(t0, &ret_value);
310 retries--;
311 } while (ret_value != NULL && retries);
312
313 if (!retries) {
314 flags.result = 1;
315 if (DEBUG)
316 printf("All transactions failed unexpectedly\n");
317
318 }
319}
320
321int main(int argc, char **argv)
322{
323 int exception; /* FP = 0, VEC = 1, VSX = 2 */
324 pthread_t t1;
325 pthread_attr_t attr;
326 cpu_set_t cpuset;
327
328 /* Set only CPU 0 in the mask. Both threads will be bound to CPU 0. */
329 CPU_ZERO(&cpuset);
330 CPU_SET(0, &cpuset);
331
332 /* Init pthread attribute. */
333 pthread_attr_init(&attr);
334
335 /* Set CPU 0 mask into the pthread attribute. */
336 pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
337
338 pthread_create(&t1, &attr /* Bind 'pong' to CPU 0 */, pong, NULL);
339 pthread_setname_np(t1, "pong"); /* Name it for systemtap convenience */
340
341 flags.result = 0;
342
343 for (exception = 0; exception < NUM_EXCEPTIONS; exception++) {
344 printf("Checking if FP/VEC registers are sane after");
345
346 if (exception == FP_UNA_EXCEPTION)
347 printf(" a FP unavailable exception...\n");
348
349 else if (exception == VEC_UNA_EXCEPTION)
350 printf(" a VEC unavailable exception...\n");
351
352 else
353 printf(" a VSX unavailable exception...\n");
354
355 flags.exception = exception;
356
357 test_fp_vec(0, 0, &attr);
358 test_fp_vec(1, 0, &attr);
359 test_fp_vec(0, 1, &attr);
360 test_fp_vec(1, 1, &attr);
361
362 }
363
364 if (flags.result > 0) {
365 printf("result: failed!\n");
366 exit(1);
367 } else {
368 printf("result: success\n");
369 exit(0);
370 }
371}
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
index 0ffff04433c5..df4204247d45 100644
--- a/tools/testing/selftests/powerpc/tm/tm.h
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -47,6 +47,11 @@ static inline bool failure_is_syscall(void)
47 return (failure_code() & TM_CAUSE_SYSCALL) == TM_CAUSE_SYSCALL; 47 return (failure_code() & TM_CAUSE_SYSCALL) == TM_CAUSE_SYSCALL;
48} 48}
49 49
50static inline bool failure_is_unavailable(void)
51{
52 return (failure_code() & TM_CAUSE_FAC_UNAV) == TM_CAUSE_FAC_UNAV;
53}
54
50static inline bool failure_is_nesting(void) 55static inline bool failure_is_nesting(void)
51{ 56{
52 return (__builtin_get_texasru() & 0x400000); 57 return (__builtin_get_texasru() & 0x400000);
diff --git a/tools/testing/selftests/rcutorture/bin/config_override.sh b/tools/testing/selftests/rcutorture/bin/config_override.sh
index 49fa51726ce3..ef7fcbac3d42 100755
--- a/tools/testing/selftests/rcutorture/bin/config_override.sh
+++ b/tools/testing/selftests/rcutorture/bin/config_override.sh
@@ -42,7 +42,7 @@ else
42 exit 1 42 exit 1
43fi 43fi
44 44
45T=/tmp/config_override.sh.$$ 45T=${TMPDIR-/tmp}/config_override.sh.$$
46trap 'rm -rf $T' 0 46trap 'rm -rf $T' 0
47mkdir $T 47mkdir $T
48 48
diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh
index 70fca318a82b..197deece7c7c 100755
--- a/tools/testing/selftests/rcutorture/bin/configcheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh
@@ -19,7 +19,7 @@
19# 19#
20# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 20# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 21
22T=/tmp/abat-chk-config.sh.$$ 22T=${TMPDIR-/tmp}/abat-chk-config.sh.$$
23trap 'rm -rf $T' 0 23trap 'rm -rf $T' 0
24mkdir $T 24mkdir $T
25 25
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 3f81a1095206..51f66a7ce876 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -32,7 +32,7 @@
32# 32#
33# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 33# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
34 34
35T=/tmp/configinit.sh.$$ 35T=${TMPDIR-/tmp}/configinit.sh.$$
36trap 'rm -rf $T' 0 36trap 'rm -rf $T' 0
37mkdir $T 37mkdir $T
38 38
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 46752c164676..fb66d0173638 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -35,7 +35,7 @@ then
35 exit 1 35 exit 1
36fi 36fi
37 37
38T=/tmp/test-linux.sh.$$ 38T=${TMPDIR-/tmp}/test-linux.sh.$$
39trap 'rm -rf $T' 0 39trap 'rm -rf $T' 0
40mkdir $T 40mkdir $T
41 41
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 0af36a721b9c..ab14b97c942c 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -38,7 +38,7 @@
38# 38#
39# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 39# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
40 40
41T=/tmp/kvm-test-1-run.sh.$$ 41T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
42trap 'rm -rf $T' 0 42trap 'rm -rf $T' 0
43mkdir $T 43mkdir $T
44 44
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index b55895fb10ed..ccd49e958fd2 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -30,7 +30,7 @@
30scriptname=$0 30scriptname=$0
31args="$*" 31args="$*"
32 32
33T=/tmp/kvm.sh.$$ 33T=${TMPDIR-/tmp}/kvm.sh.$$
34trap 'rm -rf $T' 0 34trap 'rm -rf $T' 0
35mkdir $T 35mkdir $T
36 36
@@ -222,7 +222,7 @@ do
222 exit 1 222 exit 1
223 fi 223 fi
224done 224done
225sort -k2nr $T/cfgcpu > $T/cfgcpu.sort 225sort -k2nr $T/cfgcpu -T="$T" > $T/cfgcpu.sort
226 226
227# Use a greedy bin-packing algorithm, sorting the list accordingly. 227# Use a greedy bin-packing algorithm, sorting the list accordingly.
228awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus ' 228awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus '
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index a6b57622c2e5..24fe5f822b28 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -28,7 +28,7 @@
28 28
29F=$1 29F=$1
30title=$2 30title=$2
31T=/tmp/parse-build.sh.$$ 31T=${TMPDIR-/tmp}/parse-build.sh.$$
32trap 'rm -rf $T' 0 32trap 'rm -rf $T' 0
33mkdir $T 33mkdir $T
34 34
diff --git a/tools/testing/selftests/rcutorture/bin/parse-torture.sh b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
index e3c5f0705696..f12c38909b00 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
@@ -27,7 +27,7 @@
27# 27#
28# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 28# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
29 29
30T=/tmp/parse-torture.sh.$$ 30T=${TMPDIR-/tmp}/parse-torture.sh.$$
31file="$1" 31file="$1"
32title="$2" 32title="$2"
33 33
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
index be3fdd351937..3f95a768a03b 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
@@ -35,8 +35,7 @@
35#define rs_smp_mb() do {} while (0) 35#define rs_smp_mb() do {} while (0)
36#endif 36#endif
37 37
38#define ACCESS_ONCE(x) (*(volatile typeof(x) *) &(x)) 38#define READ_ONCE(x) (*(volatile typeof(x) *) &(x))
39#define READ_ONCE(x) ACCESS_ONCE(x) 39#define WRITE_ONCE(x) ((*(volatile typeof(x) *) &(x)) = (val))
40#define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
41 40
42#endif 41#endif
diff --git a/tools/testing/selftests/seccomp/.gitignore b/tools/testing/selftests/seccomp/.gitignore
index 346d83ca8069..5af29d3a1b0a 100644
--- a/tools/testing/selftests/seccomp/.gitignore
+++ b/tools/testing/selftests/seccomp/.gitignore
@@ -1 +1,2 @@
1seccomp_bpf 1seccomp_bpf
2seccomp_benchmark
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
index c18dd8d83cee..7a60b85e148f 100644
--- a/tools/testing/selftests/tc-testing/.gitignore
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -1 +1,2 @@
1__pycache__/ 1__pycache__/
2*.pyc
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
index 4e09257bc443..00438331ba47 100644
--- a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
+++ b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
@@ -34,6 +34,12 @@ category: A list of single-word descriptions covering what the command
34setup: The list of commands required to ensure the command under test 34setup: The list of commands required to ensure the command under test
35 succeeds. For example: if testing a filter, the command to create 35 succeeds. For example: if testing a filter, the command to create
36 the qdisc would appear here. 36 the qdisc would appear here.
37 This list can be empty.
38 Each command can be a string to be executed, or a list consisting
39 of a string which is a command to be executed, followed by 1 or
40 more acceptable exit codes for this command.
41 If only a string is given for the command, then an exit code of 0
42 will be expected.
37cmdUnderTest: The tc command being tested itself. 43cmdUnderTest: The tc command being tested itself.
38expExitCode: The code returned by the command under test upon its termination. 44expExitCode: The code returned by the command under test upon its termination.
39 tdc will compare this value against the actual returned value. 45 tdc will compare this value against the actual returned value.
@@ -49,6 +55,12 @@ matchCount: How many times the regex in matchPattern should match. A value
49teardown: The list of commands to clean up after the test is completed. 55teardown: The list of commands to clean up after the test is completed.
50 The environment should be returned to the same state as when 56 The environment should be returned to the same state as when
51 this test was started: qdiscs deleted, actions flushed, etc. 57 this test was started: qdiscs deleted, actions flushed, etc.
58 This list can be empty.
59 Each command can be a string to be executed, or a list consisting
60 of a string which is a command to be executed, followed by 1 or
61 more acceptable exit codes for this command.
62 If only a string is given for the command, then an exit code of 0
63 will be expected.
52 64
53 65
54SETUP/TEARDOWN ERRORS 66SETUP/TEARDOWN ERRORS
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/example.json b/tools/testing/selftests/tc-testing/creating-testcases/example.json
new file mode 100644
index 000000000000..5ec501200970
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/creating-testcases/example.json
@@ -0,0 +1,55 @@
1[
2 {
3 "id": "1f",
4 "name": "simple test to test framework",
5 "category": [
6 "example"
7 ],
8 "setup": [
9 "mkdir mytest"
10 ],
11 "cmdUnderTest": "touch mytest/blorfl",
12 "expExitCode": "0",
13 "verifyCmd": "ls mytest/* | grep '[b]lorfl'",
14 "matchPattern": "orfl",
15 "matchCount": "1",
16 "teardown": [
17 "rm -rf mytest"
18 ]
19 },
20 {
21 "id": "2f",
22 "name": "simple test, no need for verify",
23 "category": [
24 "example"
25 ],
26 "setup": [
27 "mkdir mytest",
28 "touch mytest/blorfl"
29 ],
30 "cmdUnderTest": "ls mytest/blorfl",
31 "expExitCode": "0",
32 "verifyCmd": "/bin/true",
33 "matchPattern": " ",
34 "matchCount": "0",
35 "teardown": [
36 "rm -rf mytest"
37 ]
38 },
39 {
40 "id": "3f",
41 "name": "simple test, no need for setup or teardown (or verify)",
42 "category": [
43 "example"
44 ],
45 "setup": [
46 ],
47 "cmdUnderTest": "ip l l lo",
48 "expExitCode": "0",
49 "verifyCmd": "/bin/true",
50 "matchPattern": " ",
51 "matchCount": "0",
52 "teardown": [
53 ]
54 }
55]
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/template.json b/tools/testing/selftests/tc-testing/creating-testcases/template.json
index 87971744bdd4..8b99b86d65bd 100644
--- a/tools/testing/selftests/tc-testing/creating-testcases/template.json
+++ b/tools/testing/selftests/tc-testing/creating-testcases/template.json
@@ -26,7 +26,13 @@
26 "" 26 ""
27 ], 27 ],
28 "setup": [ 28 "setup": [
29 "" 29 "",
30 [
31 "",
32 0,
33 1,
34 255
35 ]
30 ], 36 ],
31 "cmdUnderTest": "", 37 "cmdUnderTest": "",
32 "expExitCode": "", 38 "expExitCode": "",
@@ -34,7 +40,12 @@
34 "matchPattern": "", 40 "matchPattern": "",
35 "matchCount": "", 41 "matchCount": "",
36 "teardown": [ 42 "teardown": [
37 "" 43 "",
44 [
45 "",
46 0,
47 255
48 ]
38 ] 49 ]
39 } 50 }
40] 51]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
new file mode 100644
index 000000000000..e2187b6e0b7a
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -0,0 +1,469 @@
1[
2 {
3 "id": "e89a",
4 "name": "Add valid pass action",
5 "category": [
6 "actions",
7 "gact"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action gact",
12 0,
13 1,
14 255
15 ]
16 ],
17 "cmdUnderTest": "$TC actions add action pass index 8",
18 "expExitCode": "0",
19 "verifyCmd": "$TC actions list action gact",
20 "matchPattern": "action order [0-9]*: gact action pass.*index 8 ref",
21 "matchCount": "1",
22 "teardown": [
23 "$TC actions flush action gact"
24 ]
25 },
26 {
27 "id": "a02c",
28 "name": "Add valid pipe action",
29 "category": [
30 "actions",
31 "gact"
32 ],
33 "setup": [
34 [
35 "$TC actions flush action gact",
36 0,
37 1,
38 255
39 ]
40 ],
41 "cmdUnderTest": "$TC actions add action pipe index 6",
42 "expExitCode": "0",
43 "verifyCmd": "$TC actions list action gact",
44 "matchPattern": "action order [0-9]*: gact action pipe.*index 6 ref",
45 "matchCount": "1",
46 "teardown": [
47 "$TC actions flush action gact"
48 ]
49 },
50 {
51 "id": "feef",
52 "name": "Add valid reclassify action",
53 "category": [
54 "actions",
55 "gact"
56 ],
57 "setup": [
58 [
59 "$TC actions flush action gact",
60 0,
61 1,
62 255
63 ]
64 ],
65 "cmdUnderTest": "$TC actions add action reclassify index 5",
66 "expExitCode": "0",
67 "verifyCmd": "$TC actions list action gact",
68 "matchPattern": "action order [0-9]*: gact action reclassify.*index 5 ref",
69 "matchCount": "1",
70 "teardown": [
71 "$TC actions flush action gact"
72 ]
73 },
74 {
75 "id": "8a7a",
76 "name": "Add valid drop action",
77 "category": [
78 "actions",
79 "gact"
80 ],
81 "setup": [
82 [
83 "$TC actions flush action gact",
84 0,
85 1,
86 255
87 ]
88 ],
89 "cmdUnderTest": "$TC actions add action drop index 30",
90 "expExitCode": "0",
91 "verifyCmd": "$TC actions list action gact",
92 "matchPattern": "action order [0-9]*: gact action drop.*index 30 ref",
93 "matchCount": "1",
94 "teardown": [
95 "$TC actions flush action gact"
96 ]
97 },
98 {
99 "id": "9a52",
100 "name": "Add valid continue action",
101 "category": [
102 "actions",
103 "gact"
104 ],
105 "setup": [
106 [
107 "$TC actions flush action gact",
108 0,
109 1,
110 255
111 ]
112 ],
113 "cmdUnderTest": "$TC actions add action continue index 432",
114 "expExitCode": "0",
115 "verifyCmd": "$TC actions list action gact",
116 "matchPattern": "action order [0-9]*: gact action continue.*index 432 ref",
117 "matchCount": "1",
118 "teardown": [
119 "$TC actions flush action gact"
120 ]
121 },
122 {
123 "id": "d700",
124 "name": "Add invalid action",
125 "category": [
126 "actions",
127 "gact"
128 ],
129 "setup": [
130 [
131 "$TC actions flush action gact",
132 0,
133 1,
134 255
135 ]
136 ],
137 "cmdUnderTest": "$TC actions add action pump index 386",
138 "expExitCode": "255",
139 "verifyCmd": "$TC actions list action gact",
140 "matchPattern": "action order [0-9]*: gact action.*index 386 ref",
141 "matchCount": "0",
142 "teardown": [
143 "$TC actions flush action gact"
144 ]
145 },
146 {
147 "id": "9215",
148 "name": "Add action with duplicate index",
149 "category": [
150 "actions",
151 "gact"
152 ],
153 "setup": [
154 [
155 "$TC actions flush action gact",
156 0,
157 1,
158 255
159 ],
160 "$TC actions add action pipe index 15"
161 ],
162 "cmdUnderTest": "$TC actions add action drop index 15",
163 "expExitCode": "255",
164 "verifyCmd": "$TC actions list action gact",
165 "matchPattern": "action order [0-9]*: gact action drop.*index 15 ref",
166 "matchCount": "0",
167 "teardown": [
168 "$TC actions flush action gact"
169 ]
170 },
171 {
172 "id": "798e",
173 "name": "Add action with index exceeding 32-bit maximum",
174 "category": [
175 "actions",
176 "gact"
177 ],
178 "setup": [
179 [
180 "$TC actions flush action gact",
181 0,
182 1,
183 255
184 ]
185 ],
186 "cmdUnderTest": "$TC actions add action drop index 4294967296",
187 "expExitCode": "255",
188 "verifyCmd": "actions list action gact",
189 "matchPattern": "action order [0-9]*: gact action drop.*index 4294967296 ref",
190 "matchCount": "0",
191 "teardown": [
192 "$TC actions flush action gact"
193 ]
194 },
195 {
196 "id": "22be",
197 "name": "Add action with index at 32-bit maximum",
198 "category": [
199 "actions",
200 "gact"
201 ],
202 "setup": [
203 [
204 "$TC actions flush action gact",
205 0,
206 1,
207 255
208 ]
209 ],
210 "cmdUnderTest": "$TC actions add action drop index 4294967295",
211 "expExitCode": "0",
212 "verifyCmd": "$TC actions list action gact",
213 "matchPattern": "action order [0-9]*: gact action drop.*index 4294967295 ref",
214 "matchCount": "1",
215 "teardown": [
216 "$TC actions flush action gact"
217 ]
218 },
219 {
220 "id": "ac2a",
221 "name": "List actions",
222 "category": [
223 "actions",
224 "gact"
225 ],
226 "setup": [
227 [
228 "$TC actions flush action gact",
229 0,
230 1,
231 255
232 ],
233 "$TC actions add action reclassify index 101",
234 "$TC actions add action reclassify index 102",
235 "$TC actions add action reclassify index 103",
236 "$TC actions add action reclassify index 104",
237 "$TC actions add action reclassify index 105"
238 ],
239 "cmdUnderTest": "$TC actions list action gact",
240 "expExitCode": "0",
241 "verifyCmd": "$TC actions list action gact",
242 "matchPattern": "action order [0-9]*: gact action reclassify",
243 "matchCount": "5",
244 "teardown": [
245 "$TC actions flush action gact"
246 ]
247 },
248 {
249 "id": "3edf",
250 "name": "Flush gact actions",
251 "category": [
252 "actions",
253 "gact"
254 ],
255 "setup": [
256 "$TC actions add action reclassify index 101",
257 "$TC actions add action reclassify index 102",
258 "$TC actions add action reclassify index 103",
259 "$TC actions add action reclassify index 104",
260 "$TC actions add action reclassify index 105"
261 ],
262 "cmdUnderTest": "$TC actions flush action gact",
263 "expExitCode": "0",
264 "verifyCmd": "$TC actions list action gact",
265 "matchPattern": "action order [0-9]*: gact action reclassify",
266 "matchCount": "0",
267 "teardown": []
268 },
269 {
270 "id": "63ec",
271 "name": "Delete pass action",
272 "category": [
273 "actions",
274 "gact"
275 ],
276 "setup": [
277 [
278 "$TC actions flush action gact",
279 0,
280 1,
281 255
282 ],
283 "$TC actions add action pass index 1"
284 ],
285 "cmdUnderTest": "$TC actions del action gact index 1",
286 "expExitCode": "0",
287 "verifyCmd": "$TC actions list action gact",
288 "matchPattern": "action order [0-9]*: gact action pass.*index 1 ref",
289 "matchCount": "0",
290 "teardown": [
291 "$TC actions flush action gact"
292 ]
293 },
294 {
295 "id": "46be",
296 "name": "Delete pipe action",
297 "category": [
298 "actions",
299 "gact"
300 ],
301 "setup": [
302 [
303 "$TC actions flush action gact",
304 0,
305 1,
306 255
307 ],
308 "$TC actions add action pipe index 9"
309 ],
310 "cmdUnderTest": "$TC actions del action gact index 9",
311 "expExitCode": "0",
312 "verifyCmd": "$TC actions list action gact",
313 "matchPattern": "action order [0-9]*: gact action pipe.*index 9 ref",
314 "matchCount": "0",
315 "teardown": [
316 "$TC actions flush action gact"
317 ]
318 },
319 {
320 "id": "2e08",
321 "name": "Delete reclassify action",
322 "category": [
323 "actions",
324 "gact"
325 ],
326 "setup": [
327 [
328 "$TC actions flush action gact",
329 0,
330 1,
331 255
332 ],
333 "$TC actions add action reclassify index 65536"
334 ],
335 "cmdUnderTest": "$TC actions del action gact index 65536",
336 "expExitCode": "0",
337 "verifyCmd": "$TC actions list action gact",
338 "matchPattern": "action order [0-9]*: gact action reclassify.*index 65536 ref",
339 "matchCount": "0",
340 "teardown": [
341 "$TC actions flush action gact"
342 ]
343 },
344 {
345 "id": "99c4",
346 "name": "Delete drop action",
347 "category": [
348 "actions",
349 "gact"
350 ],
351 "setup": [
352 [
353 "$TC actions flush action gact",
354 0,
355 1,
356 255
357 ],
358 "$TC actions add action drop index 16"
359 ],
360 "cmdUnderTest": "$TC actions del action gact index 16",
361 "expExitCode": "0",
362 "verifyCmd": "$TC actions list action gact",
363 "matchPattern": "action order [0-9]*: gact action drop.*index 16 ref",
364 "matchCount": "0",
365 "teardown": [
366 "$TC actions flush action gact"
367 ]
368 },
369 {
370 "id": "fb6b",
371 "name": "Delete continue action",
372 "category": [
373 "actions",
374 "gact"
375 ],
376 "setup": [
377 [
378 "$TC actions flush action gact",
379 0,
380 1,
381 255
382 ],
383 "$TC actions add action continue index 32"
384 ],
385 "cmdUnderTest": "$TC actions del action gact index 32",
386 "expExitCode": "0",
387 "verifyCmd": "actions list action gact",
388 "matchPattern": "action order [0-9]*: gact action continue.*index 32 ref",
389 "matchCount": "0",
390 "teardown": [
391 "$TC actions flush action gact"
392 ]
393 },
394 {
395 "id": "0eb3",
396 "name": "Delete non-existent action",
397 "category": [
398 "actions",
399 "gact"
400 ],
401 "setup": [
402 [
403 "$TC actions flush action gact",
404 0,
405 1,
406 255
407 ]
408 ],
409 "cmdUnderTest": "$TC actions del action gact index 2",
410 "expExitCode": "255",
411 "verifyCmd": "$TC actions list action gact",
412 "matchPattern": "action order [0-9]*: gact action",
413 "matchCount": "0",
414 "teardown": [
415 "$TC actions flush action gact"
416 ]
417 },
418 {
419 "id": "f02c",
420 "name": "Replace gact action",
421 "category": [
422 "actions",
423 "gact"
424 ],
425 "setup": [
426 [
427 "$TC actions flush action gact",
428 0,
429 1,
430 255
431 ],
432 "$TC actions add action drop index 10",
433 "$TC actions add action drop index 12"
434 ],
435 "cmdUnderTest": "$TC actions replace action ok index 12",
436 "expExitCode": "0",
437 "verifyCmd": "$TC actions ls action gact",
438 "matchPattern": "action order [0-9]*: gact action pass",
439 "matchCount": "1",
440 "teardown": [
441 "$TC actions flush action gact"
442 ]
443 },
444 {
445 "id": "525f",
446 "name": "Get gact action by index",
447 "category": [
448 "actions",
449 "gact"
450 ],
451 "setup": [
452 [
453 "$TC actions flush action gact",
454 0,
455 1,
456 255
457 ],
458 "$TC actions add action drop index 3900800700"
459 ],
460 "cmdUnderTest": "$TC actions get action gact index 3900800700",
461 "expExitCode": "0",
462 "verifyCmd": "$TC actions get action gact index 3900800700",
463 "matchPattern": "index 3900800700",
464 "matchCount": "1",
465 "teardown": [
466 "$TC actions flush action gact"
467 ]
468 }
469]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
new file mode 100644
index 000000000000..9f34f0753969
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -0,0 +1,52 @@
1[
2 {
3 "id": "a568",
4 "name": "Add action with ife type",
5 "category": [
6 "actions",
7 "ife"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action ife",
12 0,
13 1,
14 255
15 ],
16 "$TC actions add action ife encode type 0xDEAD index 1"
17 ],
18 "cmdUnderTest": "$TC actions get action ife index 1",
19 "expExitCode": "0",
20 "verifyCmd": "$TC actions get action ife index 1",
21 "matchPattern": "type 0xDEAD",
22 "matchCount": "1",
23 "teardown": [
24 "$TC actions flush action ife"
25 ]
26 },
27 {
28 "id": "b983",
29 "name": "Add action without ife type",
30 "category": [
31 "actions",
32 "ife"
33 ],
34 "setup": [
35 [
36 "$TC actions flush action ife",
37 0,
38 1,
39 255
40 ],
41 "$TC actions add action ife encode index 1"
42 ],
43 "cmdUnderTest": "$TC actions get action ife index 1",
44 "expExitCode": "0",
45 "verifyCmd": "$TC actions get action ife index 1",
46 "matchPattern": "type 0xED3E",
47 "matchCount": "1",
48 "teardown": [
49 "$TC actions flush action ife"
50 ]
51 }
52]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
new file mode 100644
index 000000000000..0fcccf18399b
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -0,0 +1,223 @@
1[
2 {
3 "id": "5124",
4 "name": "Add mirred mirror to egress action",
5 "category": [
6 "actions",
7 "mirred"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action mirred",
12 0,
13 1,
14 255
15 ]
16 ],
17 "cmdUnderTest": "$TC actions add action mirred egress mirror index 1 dev lo",
18 "expExitCode": "0",
19 "verifyCmd": "$TC actions list action mirred",
20 "matchPattern": "action order [0-9]*: mirred \\(Egress Mirror to device lo\\).*index 1 ref",
21 "matchCount": "1",
22 "teardown": [
23 "$TC actions flush action mirred"
24 ]
25 },
26 {
27 "id": "6fb4",
28 "name": "Add mirred redirect to egress action",
29 "category": [
30 "actions",
31 "mirred"
32 ],
33 "setup": [
34 [
35 "$TC actions flush action mirred",
36 0,
37 1,
38 255
39 ]
40 ],
41 "cmdUnderTest": "$TC actions add action mirred egress redirect index 2 dev lo action pipe",
42 "expExitCode": "0",
43 "verifyCmd": "$TC actions list action mirred",
44 "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 2 ref",
45 "matchCount": "1",
46 "teardown": [
47 "$TC actions flush action mirred"
48 ]
49 },
50 {
51 "id": "ba38",
52 "name": "Get mirred actions",
53 "category": [
54 "actions",
55 "mirred"
56 ],
57 "setup": [
58 [
59 "$TC actions flush action mirred",
60 0,
61 1,
62 255
63 ],
64 "$TC actions add action mirred egress mirror index 1 dev lo",
65 "$TC actions add action mirred egress redirect index 2 dev lo"
66 ],
67 "cmdUnderTest": "$TC actions show action mirred",
68 "expExitCode": "0",
69 "verifyCmd": "$TC actions list action mirred",
70 "matchPattern": "[Mirror|Redirect] to device lo",
71 "matchCount": "2",
72 "teardown": [
73 "$TC actions flush action mirred"
74 ]
75 },
76 {
77 "id": "d7c0",
78 "name": "Add invalid mirred direction",
79 "category": [
80 "actions",
81 "mirred"
82 ],
83 "setup": [
84 [
85 "$TC actions flush action mirred",
86 0,
87 1,
88 255
89 ]
90 ],
91 "cmdUnderTest": "$TC actions add action mirred inbound mirror index 20 dev lo",
92 "expExitCode": "255",
93 "verifyCmd": "$TC actions list action mirred",
94 "matchPattern": "action order [0-9]*: mirred \\(.*to device lo\\).*index 20 ref",
95 "matchCount": "0",
96 "teardown": [
97 "$TC actions flush action mirred"
98 ]
99 },
100 {
101 "id": "e213",
102 "name": "Add invalid mirred action",
103 "category": [
104 "actions",
105 "mirred"
106 ],
107 "setup": [
108 [
109 "$TC actions flush action mirred",
110 0,
111 1,
112 255
113 ]
114 ],
115 "cmdUnderTest": "$TC actions add action mirred egress remirror index 20 dev lo",
116 "expExitCode": "255",
117 "verifyCmd": "$TC actions list action mirred",
118 "matchPattern": "action order [0-9]*: mirred \\(Egress.*to device lo\\).*index 20 ref",
119 "matchCount": "0",
120 "teardown": [
121 "$TC actions flush action mirred"
122 ]
123 },
124 {
125 "id": "2d89",
126 "name": "Add mirred action with invalid device",
127 "category": [
128 "actions",
129 "mirred"
130 ],
131 "setup": [
132 [
133 "$TC actions flush action mirred",
134 0,
135 1,
136 255
137 ]
138 ],
139 "cmdUnderTest": "$TC actions add action mirred egress mirror index 20 dev eltoh",
140 "expExitCode": "255",
141 "verifyCmd": "$TC actions list action mirred",
142 "matchPattern": "action order [0-9]*: mirred \\(.*to device eltoh\\).*index 20 ref",
143 "matchCount": "0",
144 "teardown": [
145 "$TC actions flush action mirred"
146 ]
147 },
148 {
149 "id": "300b",
150 "name": "Add mirred action with duplicate index",
151 "category": [
152 "actions",
153 "mirred"
154 ],
155 "setup": [
156 [
157 "$TC actions flush action mirred",
158 0,
159 1,
160 255
161 ],
162 "$TC actions add action mirred egress redirect index 15 dev lo"
163 ],
164 "cmdUnderTest": "$TC actions add action mirred egress mirror index 15 dev lo",
165 "expExitCode": "255",
166 "verifyCmd": "$TC actions list action mirred",
167 "matchPattern": "action order [0-9]*: mirred \\(.*to device lo\\).*index 15 ref",
168 "matchCount": "1",
169 "teardown": [
170 "$TC actions flush action mirred"
171 ]
172 },
173 {
174 "id": "a70e",
175 "name": "Delete mirred mirror action",
176 "category": [
177 "actions",
178 "mirred"
179 ],
180 "setup": [
181 [
182 "$TC actions flush action mirred",
183 0,
184 1,
185 255
186 ],
187 "$TC actions add action mirred egress mirror index 5 dev lo"
188 ],
189 "cmdUnderTest": "$TC actions del action mirred index 5",
190 "expExitCode": "0",
191 "verifyCmd": "$TC actions list action mirred",
192 "matchPattern": "action order [0-9]*: mirred \\(Egress Mirror to device lo\\).*index 5 ref",
193 "matchCount": "0",
194 "teardown": [
195 "$TC actions flush action mirred"
196 ]
197 },
198 {
199 "id": "3fb3",
200 "name": "Delete mirred redirect action",
201 "category": [
202 "actions",
203 "mirred"
204 ],
205 "setup": [
206 [
207 "$TC actions flush action mirred",
208 0,
209 1,
210 255
211 ],
212 "$TC actions add action mirred egress redirect index 5 dev lo"
213 ],
214 "cmdUnderTest": "$TC actions del action mirred index 5",
215 "expExitCode": "0",
216 "verifyCmd": "$TC actions list action mirred",
217 "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 5 ref",
218 "matchCount": "0",
219 "teardown": [
220 "$TC actions flush action mirred"
221 ]
222 }
223]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
new file mode 100644
index 000000000000..0e602a3f9393
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -0,0 +1,527 @@
1[
2 {
3 "id": "49aa",
4 "name": "Add valid basic police action",
5 "category": [
6 "actions",
7 "police"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action police",
12 0,
13 1,
14 255
15 ]
16 ],
17 "cmdUnderTest": "$TC actions add action police rate 1kbit burst 10k index 1",
18 "expExitCode": "0",
19 "verifyCmd": "$TC actions ls action police",
20 "matchPattern": "action order [0-9]*: police 0x1 rate 1Kbit burst 10Kb",
21 "matchCount": "1",
22 "teardown": [
23 "$TC actions flush action police"
24 ]
25 },
26 {
27 "id": "3abe",
28 "name": "Add police action with duplicate index",
29 "category": [
30 "actions",
31 "police"
32 ],
33 "setup": [
34 [
35 "$TC actions flush action police",
36 0,
37 1,
38 255
39 ],
40 "$TC actions add action police rate 4Mbit burst 120k index 9"
41 ],
42 "cmdUnderTest": "$TC actions add action police rate 8kbit burst 24k index 9",
43 "expExitCode": "255",
44 "verifyCmd": "$TC actions ls action police",
45 "matchPattern": "action order [0-9]*: police 0x9",
46 "matchCount": "1",
47 "teardown": [
48 "$TC actions flush action police"
49 ]
50 },
51 {
52 "id": "49fa",
53 "name": "Add valid police action with mtu",
54 "category": [
55 "actions",
56 "police"
57 ],
58 "setup": [
59 [
60 "$TC actions flush action police",
61 0,
62 1,
63 255
64 ]
65 ],
66 "cmdUnderTest": "$TC actions add action police rate 90kbit burst 10k mtu 1k index 98",
67 "expExitCode": "0",
68 "verifyCmd": "$TC actions get action police index 98",
69 "matchPattern": "action order [0-9]*: police 0x62 rate 90Kbit burst 10Kb mtu 1Kb",
70 "matchCount": "1",
71 "teardown": [
72 "$TC actions flush action police"
73 ]
74 },
75 {
76 "id": "7943",
77 "name": "Add valid police action with peakrate",
78 "category": [
79 "actions",
80 "police"
81 ],
82 "setup": [
83 [
84 "$TC actions flush action police",
85 0,
86 1,
87 255
88 ]
89 ],
90 "cmdUnderTest": "$TC actions add action police rate 90kbit burst 10k mtu 2kb peakrate 100kbit index 3",
91 "expExitCode": "0",
92 "verifyCmd": "$TC actions ls action police",
93 "matchPattern": "action order [0-9]*: police 0x3 rate 90Kbit burst 10Kb mtu 2Kb peakrate 100Kbit",
94 "matchCount": "1",
95 "teardown": [
96 "$TC actions flush action police"
97 ]
98 },
99 {
100 "id": "055e",
101 "name": "Add police action with peakrate and no mtu",
102 "category": [
103 "actions",
104 "police"
105 ],
106 "setup": [
107 [
108 "$TC actions flush action police",
109 0,
110 1,
111 255
112 ]
113 ],
114 "cmdUnderTest": "$TC actions add action police rate 5kbit burst 6kb peakrate 10kbit index 9",
115 "expExitCode": "255",
116 "verifyCmd": "$TC actions ls action police",
117 "matchPattern": "action order [0-9]*: police 0x9 rate 5Kb burst 10Kb",
118 "matchCount": "0",
119 "teardown": [
120 "$TC actions flush action police"
121 ]
122 },
123 {
124 "id": "f057",
125 "name": "Add police action with valid overhead",
126 "category": [
127 "actions",
128 "police"
129 ],
130 "setup": [
131 [
132 "$TC actions flush action police",
133 0,
134 1,
135 255
136 ]
137 ],
138 "cmdUnderTest": "$TC actions add action police rate 1mbit burst 100k overhead 64 index 64",
139 "expExitCode": "0",
140 "verifyCmd": "$TC actions get action police index 64",
141 "matchPattern": "action order [0-9]*: police 0x40 rate 1Mbit burst 100Kb mtu 2Kb action reclassify overhead 64b",
142 "matchCount": "1",
143 "teardown": [
144 "$TC actions flush action police"
145 ]
146 },
147 {
148 "id": "7ffb",
149 "name": "Add police action with ethernet linklayer type",
150 "category": [
151 "actions",
152 "police"
153 ],
154 "setup": [
155 [
156 "$TC actions flush action police",
157 0,
158 1,
159 255
160 ]
161 ],
162 "cmdUnderTest": "$TC actions add action police rate 2mbit burst 200k linklayer ethernet index 8",
163 "expExitCode": "0",
164 "verifyCmd": "$TC actions show action police",
165 "matchPattern": "action order [0-9]*: police 0x8 rate 2Mbit burst 200Kb mtu 2Kb action reclassify overhead 0b",
166 "matchCount": "1",
167 "teardown": [
168 "$TC actions flush action police"
169 ]
170 },
171 {
172 "id": "3dda",
173 "name": "Add police action with atm linklayer type",
174 "category": [
175 "actions",
176 "police"
177 ],
178 "setup": [
179 [
180 "$TC actions flush action police",
181 0,
182 1,
183 255
184 ]
185 ],
186 "cmdUnderTest": "$TC actions add action police rate 2mbit burst 200k linklayer atm index 8",
187 "expExitCode": "0",
188 "verifyCmd": "$TC actions show action police",
189 "matchPattern": "action order [0-9]*: police 0x8 rate 2Mbit burst 200Kb mtu 2Kb action reclassify overhead 0b linklayer atm",
190 "matchCount": "1",
191 "teardown": [
192 "$TC actions flush action police"
193 ]
194 },
195 {
196 "id": "551b",
197 "name": "Add police actions with conform-exceed control continue/drop",
198 "category": [
199 "actions",
200 "police"
201 ],
202 "setup": [
203 [
204 "$TC actions flush action police",
205 0,
206 1,
207 255
208 ]
209 ],
210 "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed continue/drop index 1",
211 "expExitCode": "0",
212 "verifyCmd": "$TC actions get action police index 1",
213 "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action continue/drop",
214 "matchCount": "1",
215 "teardown": [
216 "$TC actions flush action police"
217 ]
218 },
219 {
220 "id": "0c70",
221 "name": "Add police actions with conform-exceed control pass/reclassify",
222 "category": [
223 "actions",
224 "police"
225 ],
226 "setup": [
227 [
228 "$TC actions flush action police",
229 0,
230 1,
231 255
232 ]
233 ],
234 "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed pass/reclassify index 4",
235 "expExitCode": "0",
236 "verifyCmd": "$TC actions ls action police",
237 "matchPattern": "action order [0-9]*: police 0x4 rate 3Mbit burst 250Kb mtu 2Kb action pass/reclassify",
238 "matchCount": "1",
239 "teardown": [
240 "$TC actions flush action police"
241 ]
242 },
243 {
244 "id": "d946",
245 "name": "Add police actions with conform-exceed control pass/pipe",
246 "category": [
247 "actions",
248 "police"
249 ],
250 "setup": [
251 [
252 "$TC actions flush action police",
253 0,
254 1,
255 255
256 ]
257 ],
258 "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed pass/pipe index 5",
259 "expExitCode": "0",
260 "verifyCmd": "$TC actions ls action police",
261 "matchPattern": "action order [0-9]*: police 0x5 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe",
262 "matchCount": "1",
263 "teardown": [
264 "$TC actions flush action police"
265 ]
266 },
267 {
268 "id": "336e",
269 "name": "Delete police action",
270 "category": [
271 "actions",
272 "police"
273 ],
274 "setup": [
275 [
276 "$TC actions flush action police",
277 0,
278 1,
279 255
280 ],
281 "$TC actions add action police rate 5mbit burst 2m index 12"
282 ],
283 "cmdUnderTest": "$TC actions delete action police index 12",
284 "expExitCode": "0",
285 "verifyCmd": "$TC actions ls action police",
286 "matchPattern": "action order [0-9]*: police 0xc rate 5Mb burst 2Mb",
287 "matchCount": "0",
288 "teardown": [
289 "$TC actions flush action police"
290 ]
291 },
292 {
293 "id": "77fa",
294 "name": "Get single police action from many actions",
295 "category": [
296 "actions",
297 "police"
298 ],
299 "setup": [
300 [
301 "$TC actions flush action police",
302 0,
303 1,
304 255
305 ],
306 "$TC actions add action police rate 1mbit burst 100k index 1",
307 "$TC actions add action police rate 2mbit burst 200k index 2",
308 "$TC actions add action police rate 3mbit burst 300k index 3",
309 "$TC actions add action police rate 4mbit burst 400k index 4",
310 "$TC actions add action police rate 5mbit burst 500k index 5",
311 "$TC actions add action police rate 6mbit burst 600k index 6",
312 "$TC actions add action police rate 7mbit burst 700k index 7",
313 "$TC actions add action police rate 8mbit burst 800k index 8"
314 ],
315 "cmdUnderTest": "$TC actions get action police index 4",
316 "expExitCode": "0",
317 "verifyCmd": "$TC actions get action police index 4",
318 "matchPattern": "action order [0-9]*: police 0x4 rate 4Mbit burst 400Kb",
319 "matchCount": "1",
320 "teardown": [
321 "$TC actions flush action police"
322 ]
323 },
324 {
325 "id": "aa43",
326 "name": "Get single police action without specifying index",
327 "category": [
328 "actions",
329 "police"
330 ],
331 "setup": [
332 [
333 "$TC actions flush action police",
334 0,
335 1,
336 255
337 ],
338 "$TC actions add action police rate 1mbit burst 100k index 1"
339 ],
340 "cmdUnderTest": "$TC actions get action police",
341 "expExitCode": "255",
342 "verifyCmd": "$TC actions get action police",
343 "matchPattern": "action order [0-9]*: police",
344 "matchCount": "0",
345 "teardown": [
346 "$TC actions flush action police"
347 ]
348 },
349 {
350 "id": "858b",
351 "name": "List police actions",
352 "category": [
353 "actions",
354 "police"
355 ],
356 "setup": [
357 [
358 "$TC actions flush action police",
359 0,
360 1,
361 255
362 ],
363 "$TC actions add action police rate 1mbit burst 100k index 1",
364 "$TC actions add action police rate 2mbit burst 200k index 2",
365 "$TC actions add action police rate 3mbit burst 300k index 3",
366 "$TC actions add action police rate 4mbit burst 400k index 4",
367 "$TC actions add action police rate 5mbit burst 500k index 5",
368 "$TC actions add action police rate 6mbit burst 600k index 6",
369 "$TC actions add action police rate 7mbit burst 700k index 7",
370 "$TC actions add action police rate 8mbit burst 800k index 8"
371 ],
372 "cmdUnderTest": "$TC actions list action police",
373 "expExitCode": "0",
374 "verifyCmd": "$TC actions ls action police",
375 "matchPattern": "action order [0-9]*: police 0x[1-8] rate [1-8]Mbit burst [1-8]00Kb",
376 "matchCount": "8",
377 "teardown": [
378 "$TC actions flush action police"
379 ]
380 },
381 {
382 "id": "1c3a",
383 "name": "Flush police actions",
384 "category": [
385 "actions",
386 "police"
387 ],
388 "setup": [
389 "$TC actions add action police rate 1mbit burst 100k index 1",
390 "$TC actions add action police rate 2mbit burst 200k index 2",
391 "$TC actions add action police rate 3mbit burst 300k index 3",
392 "$TC actions add action police rate 4mbit burst 400k index 4",
393 "$TC actions add action police rate 5mbit burst 500k index 5",
394 "$TC actions add action police rate 6mbit burst 600k index 6",
395 "$TC actions add action police rate 7mbit burst 700k index 7",
396 "$TC actions add action police rate 8mbit burst 800k index 8"
397 ],
398 "cmdUnderTest": "$TC actions flush action police",
399 "expExitCode": "0",
400 "verifyCmd": "$TC actions ls action police",
401 "matchPattern": "action order [0-9]*: police",
402 "matchCount": "0",
403 "teardown": [
404 ""
405 ]
406 },
407 {
408 "id": "7326",
409 "name": "Add police action with control continue",
410 "category": [
411 "actions",
412 "police"
413 ],
414 "setup": [
415 [
416 "$TC actions flush action police",
417 0,
418 1,
419 255
420 ]
421 ],
422 "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m continue index 1",
423 "expExitCode": "0",
424 "verifyCmd": "$TC actions get action police index 1",
425 "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action continue",
426 "matchCount": "1",
427 "teardown": [
428 "$TC actions flush action police"
429 ]
430 },
431 {
432 "id": "34fa",
433 "name": "Add police action with control drop",
434 "category": [
435 "actions",
436 "police"
437 ],
438 "setup": [
439 [
440 "$TC actions flush action police",
441 0,
442 1,
443 255
444 ]
445 ],
446 "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m drop index 1",
447 "expExitCode": "0",
448 "verifyCmd": "$TC actions ls action police",
449 "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action drop",
450 "matchCount": "1",
451 "teardown": [
452 "$TC actions flush action police"
453 ]
454 },
455 {
456 "id": "8dd5",
457 "name": "Add police action with control ok",
458 "category": [
459 "actions",
460 "police"
461 ],
462 "setup": [
463 [
464 "$TC actions flush action police",
465 0,
466 1,
467 255
468 ]
469 ],
470 "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m ok index 1",
471 "expExitCode": "0",
472 "verifyCmd": "$TC actions ls action police",
473 "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action pass",
474 "matchCount": "1",
475 "teardown": [
476 "$TC actions flush action police"
477 ]
478 },
479 {
480 "id": "b9d1",
481 "name": "Add police action with control reclassify",
482 "category": [
483 "actions",
484 "police"
485 ],
486 "setup": [
487 [
488 "$TC actions flush action police",
489 0,
490 1,
491 255
492 ]
493 ],
494 "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m reclassify index 1",
495 "expExitCode": "0",
496 "verifyCmd": "$TC actions get action police index 1",
497 "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action reclassify",
498 "matchCount": "1",
499 "teardown": [
500 "$TC actions flush action police"
501 ]
502 },
503 {
504 "id": "c534",
505 "name": "Add police action with control pipe",
506 "category": [
507 "actions",
508 "police"
509 ],
510 "setup": [
511 [
512 "$TC actions flush action police",
513 0,
514 1,
515 255
516 ]
517 ],
518 "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m pipe index 1",
519 "expExitCode": "0",
520 "verifyCmd": "$TC actions ls action police",
521 "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action pipe",
522 "matchCount": "1",
523 "teardown": [
524 "$TC actions flush action police"
525 ]
526 }
527]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
new file mode 100644
index 000000000000..e89a7aa4012d
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
@@ -0,0 +1,130 @@
1[
2 {
3 "id": "b078",
4 "name": "Add simple action",
5 "category": [
6 "actions",
7 "simple"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action simple",
12 0,
13 1,
14 255
15 ]
16 ],
17 "cmdUnderTest": "$TC actions add action simple sdata \"A triumph\" index 60",
18 "expExitCode": "0",
19 "verifyCmd": "$TC actions list action simple",
20 "matchPattern": "action order [0-9]*: Simple <A triumph>.*index 60 ref",
21 "matchCount": "1",
22 "teardown": [
23 "$TC actions flush action simple"
24 ]
25 },
26 {
27 "id": "6d4c",
28 "name": "Add simple action with duplicate index",
29 "category": [
30 "actions",
31 "simple"
32 ],
33 "setup": [
34 [
35 "$TC actions flush action simple",
36 0,
37 1,
38 255
39 ],
40 "$TC actions add action simple sdata \"Aruba\" index 4"
41 ],
42 "cmdUnderTest": "$TC actions add action simple sdata \"Jamaica\" index 4",
43 "expExitCode": "255",
44 "verifyCmd": "$TC actions list action simple",
45 "matchPattern": "action order [0-9]*: Simple <Jamaica>.*ref",
46 "matchCount": "0",
47 "teardown": [
48 "$TC actions flush action simple"
49 ]
50 },
51 {
52 "id": "2542",
53 "name": "List simple actions",
54 "category": [
55 "actions",
56 "simple"
57 ],
58 "setup": [
59 [
60 "$TC actions flush action simple",
61 0,
62 1,
63 255
64 ],
65 "$TC actions add action simple sdata \"Rock\"",
66 "$TC actions add action simple sdata \"Paper\"",
67 "$TC actions add action simple sdata \"Scissors\" index 98"
68 ],
69 "cmdUnderTest": "$TC actions list action simple",
70 "expExitCode": "0",
71 "verifyCmd": "$TC actions list action simple",
72 "matchPattern": "action order [0-9]*: Simple <[A-Z][a-z]*>",
73 "matchCount": "3",
74 "teardown": [
75 "$TC actions flush action simple"
76 ]
77 },
78 {
79 "id": "ea67",
80 "name": "Delete simple action",
81 "category": [
82 "actions",
83 "simple"
84 ],
85 "setup": [
86 [
87 "$TC actions flush action simple",
88 0,
89 1,
90 255
91 ],
92 "$TC actions add action simple sdata \"Blinkenlights\" index 1"
93 ],
94 "cmdUnderTest": "$TC actions delete action simple index 1",
95 "expExitCode": "0",
96 "verifyCmd": "$TC actions list action simple",
97 "matchPattern": "action order [0-9]*: Simple <Blinkenlights>.*index 1 ref",
98 "matchCount": "0",
99 "teardown": [
100 "$TC actions flush action simple"
101 ]
102 },
103 {
104 "id": "8ff1",
105 "name": "Flush simple actions",
106 "category": [
107 "actions",
108 "simple"
109 ],
110 "setup": [
111 [
112 "$TC actions flush action simple",
113 0,
114 1,
115 255
116 ],
117 "$TC actions add action simple sdata \"Kirk\"",
118 "$TC actions add action simple sdata \"Spock\" index 50",
119 "$TC actions add action simple sdata \"McCoy\" index 9"
120 ],
121 "cmdUnderTest": "$TC actions flush action simple",
122 "expExitCode": "0",
123 "verifyCmd": "$TC actions list action simple",
124 "matchPattern": "action order [0-9]*: Simple <[A-Z][a-z]*>",
125 "matchCount": "0",
126 "teardown": [
127 ""
128 ]
129 }
130]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
new file mode 100644
index 000000000000..99635ea4722e
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -0,0 +1,320 @@
1[
2 {
3 "id": "6236",
4 "name": "Add skbedit action with valid mark",
5 "category": [
6 "actions",
7 "skbedit"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action skbedit",
12 0,
13 1,
14 255
15 ]
16 ],
17 "cmdUnderTest": "$TC actions add action skbedit mark 1",
18 "expExitCode": "0",
19 "verifyCmd": "$TC actions list action skbedit",
20 "matchPattern": "action order [0-9]*: skbedit mark 1",
21 "matchCount": "1",
22 "teardown": [
23 "$TC actions flush action skbedit"
24 ]
25 },
26 {
27 "id": "407b",
28 "name": "Add skbedit action with invalid mark",
29 "category": [
30 "actions",
31 "skbedit"
32 ],
33 "setup": [
34 [
35 "$TC actions flush action skbedit",
36 0,
37 1,
38 255
39 ]
40 ],
41 "cmdUnderTest": "$TC actions add action skbedit mark 666777888999",
42 "expExitCode": "255",
43 "verifyCmd": "$TC actions list action skbedit",
44 "matchPattern": "action order [0-9]*: skbedit mark",
45 "matchCount": "0",
46 "teardown": [
47 "$TC actions flush action skbedit"
48 ]
49 },
50 {
51 "id": "081d",
52 "name": "Add skbedit action with priority",
53 "category": [
54 "actions",
55 "skbedit"
56 ],
57 "setup": [
58 [
59 "$TC actions flush action skbedit",
60 0,
61 1,
62 255
63 ]
64 ],
65 "cmdUnderTest": "$TC actions add action skbedit prio 99",
66 "expExitCode": "0",
67 "verifyCmd": "$TC actions list action skbedit",
68 "matchPattern": "action order [0-9]*: skbedit priority :99",
69 "matchCount": "1",
70 "teardown": [
71 "$TC actions flush action skbedit"
72 ]
73 },
74 {
75 "id": "cc37",
76 "name": "Add skbedit action with invalid priority",
77 "category": [
78 "actions",
79 "skbedit"
80 ],
81 "setup": [
82 [
83 "$TC actions flush action skbedit",
84 0,
85 1,
86 255
87 ]
88 ],
89 "cmdUnderTest": "$TC actions add action skbedit prio foo",
90 "expExitCode": "255",
91 "verifyCmd": "$TC actions list action skbedit",
92 "matchPattern": "action order [0-9]*: skbedit priority",
93 "matchCount": "0",
94 "teardown": [
95 "$TC actions flush action skbedit"
96 ]
97 },
98 {
99 "id": "3c95",
100 "name": "Add skbedit action with queue_mapping",
101 "category": [
102 "actions",
103 "skbedit"
104 ],
105 "setup": [
106 [
107 "$TC actions flush action skbedit",
108 0,
109 1,
110 255
111 ]
112 ],
113 "cmdUnderTest": "$TC actions add action skbedit queue_mapping 909",
114 "expExitCode": "0",
115 "verifyCmd": "$TC actions list action skbedit",
116 "matchPattern": "action order [0-9]*: skbedit queue_mapping 909",
117 "matchCount": "1",
118 "teardown": [
119 "$TC actions flush action skbedit"
120 ]
121 },
122 {
123 "id": "985c",
124 "name": "Add skbedit action with invalid queue_mapping",
125 "category": [
126 "actions",
127 "skbedit"
128 ],
129 "setup": [
130 [
131 "$TC actions flush action skbedit",
132 0,
133 1,
134 255
135 ]
136 ],
137 "cmdUnderTest": "$TC actions add action skbedit queue_mapping 67000",
138 "expExitCode": "255",
139 "verifyCmd": "$TC actions list action skbedit",
140 "matchPattern": "action order [0-9]*: skbedit queue_mapping",
141 "matchCount": "0",
142 "teardown": [
143 "$TC actions flush action skbedit"
144 ]
145 },
146 {
147 "id": "224f",
148 "name": "Add skbedit action with ptype host",
149 "category": [
150 "actions",
151 "skbedit"
152 ],
153 "setup": [
154 [
155 "$TC actions flush action skbedit",
156 0,
157 1,
158 255
159 ]
160 ],
161 "cmdUnderTest": "$TC actions add action skbedit ptype host",
162 "expExitCode": "0",
163 "verifyCmd": "$TC actions list action skbedit",
164 "matchPattern": "action order [0-9]*: skbedit ptype host",
165 "matchCount": "1",
166 "teardown": [
167 "$TC actions flush action skbedit"
168 ]
169 },
170 {
171 "id": "d1a3",
172 "name": "Add skbedit action with ptype otherhost",
173 "category": [
174 "actions",
175 "skbedit"
176 ],
177 "setup": [
178 [
179 "$TC actions flush action skbedit",
180 0,
181 1,
182 255
183 ]
184 ],
185 "cmdUnderTest": "$TC actions add action skbedit ptype otherhost",
186 "expExitCode": "0",
187 "verifyCmd": "$TC actions list action skbedit",
188 "matchPattern": "action order [0-9]*: skbedit ptype otherhost",
189 "matchCount": "1",
190 "teardown": [
191 "$TC actions flush action skbedit"
192 ]
193 },
194 {
195 "id": "b9c6",
196 "name": "Add skbedit action with invalid ptype",
197 "category": [
198 "actions",
199 "skbedit"
200 ],
201 "setup": [
202 [
203 "$TC actions flush action skbedit",
204 0,
205 1,
206 255
207 ]
208 ],
209 "cmdUnderTest": "$TC actions add action skbedit ptype openair",
210 "expExitCode": "255",
211 "verifyCmd": "$TC actions list action skbedit",
212 "matchPattern": "action order [0-9]*: skbedit ptype openair",
213 "matchCount": "0",
214 "teardown": [
215 "$TC actions flush action skbedit"
216 ]
217 },
218 {
219 "id": "5172",
220 "name": "List skbedit actions",
221 "category": [
222 "actions",
223 "skbedit"
224 ],
225 "setup": [
226 [
227 "$TC actions flush action skbedit",
228 0,
229 1,
230 255
231 ],
232 "$TC actions add action skbedit ptype otherhost",
233 "$TC actions add action skbedit ptype broadcast",
234 "$TC actions add action skbedit mark 59",
235 "$TC actions add action skbedit mark 409"
236 ],
237 "cmdUnderTest": "$TC actions list action skbedit",
238 "expExitCode": "0",
239 "verifyCmd": "$TC actions list action skbedit",
240 "matchPattern": "action order [0-9]*: skbedit",
241 "matchCount": "4",
242 "teardown": [
243 "$TC actions flush action skbedit"
244 ]
245 },
246 {
247 "id": "a6d6",
248 "name": "Add skbedit action with index",
249 "category": [
250 "actions",
251 "skbedit"
252 ],
253 "setup": [
254 [
255 "$TC actions flush action skbedit",
256 0,
257 1,
258 255
259 ]
260 ],
261 "cmdUnderTest": "$TC actions add action skbedit mark 808 index 4040404040",
262 "expExitCode": "0",
263 "verifyCmd": "$TC actions list action skbedit",
264 "matchPattern": "index 4040404040",
265 "matchCount": "1",
266 "teardown": [
267 "$TC actions flush action skbedit"
268 ]
269 },
270 {
271 "id": "38f3",
272 "name": "Delete skbedit action",
273 "category": [
274 "actions",
275 "skbedit"
276 ],
277 "setup": [
278 [
279 "$TC actions flush action skbedit",
280 0,
281 1,
282 255
283 ],
284 "$TC actions add action skbedit mark 42 index 9009"
285 ],
286 "cmdUnderTest": "$TC actions del action skbedit index 9009",
287 "expExitCode": "0",
288 "verifyCmd": "$TC actions list action skbedit",
289 "matchPattern": "action order [0-9]*: skbedit mark 42",
290 "matchCount": "0",
291 "teardown": [
292 "$TC actions flush action skbedit"
293 ]
294 },
295 {
296 "id": "ce97",
297 "name": "Flush skbedit actions",
298 "category": [
299 "actions",
300 "skbedit"
301 ],
302 "setup": [
303 "$TC actions add action skbedit mark 500",
304 "$TC actions add action skbedit mark 501",
305 "$TC actions add action skbedit mark 502",
306 "$TC actions add action skbedit mark 503",
307 "$TC actions add action skbedit mark 504",
308 "$TC actions add action skbedit mark 505",
309 "$TC actions add action skbedit mark 506"
310 ],
311 "cmdUnderTest": "$TC actions flush action skbedit",
312 "expExitCode": "0",
313 "verifyCmd": "$TC actions list action skbedit",
314 "matchPattern": "action order [0-9]*: skbedit",
315 "matchCount": "0",
316 "teardown": [
317 "$TC actions flush action skbedit"
318 ]
319 }
320]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
new file mode 100644
index 000000000000..e34075059c26
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
@@ -0,0 +1,372 @@
1[
2 {
3 "id": "7d50",
4 "name": "Add skbmod action to set destination mac",
5 "category": [
6 "actions",
7 "skbmod"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action skbmod",
12 0,
13 1,
14 255
15 ]
16 ],
17 "cmdUnderTest": "$TC actions add action skbmod set dmac 11:22:33:44:55:66 index 5",
18 "expExitCode": "0",
19 "verifyCmd": "$TC actions ls action skbmod",
20 "matchPattern": "action order [0-9]*: skbmod pipe set dmac 11:22:33:44:55:66\\s+index 5",
21 "matchCount": "1",
22 "teardown": [
23 "$TC actions flush action skbmod"
24 ]
25 },
26 {
27 "id": "9b29",
28 "name": "Add skbmod action to set source mac",
29 "category": [
30 "actions",
31 "skbmod"
32 ],
33 "setup": [
34 [
35 "$TC actions flush action skbmod",
36 0,
37 1,
38 255
39 ]
40 ],
41 "cmdUnderTest": "$TC actions add action skbmod set smac 77:88:99:AA:BB:CC index 7",
42 "expExitCode": "0",
43 "verifyCmd": "$TC actions get action skbmod index 7",
44 "matchPattern": "action order [0-9]*: skbmod pipe set smac 77:88:99:aa:bb:cc\\s+index 7",
45 "matchCount": "1",
46 "teardown": [
47 "$TC actions flush action skbmod"
48 ]
49 },
50 {
51 "id": "1724",
52 "name": "Add skbmod action with invalid mac",
53 "category": [
54 "actions",
55 "skbmod"
56 ],
57 "setup": [
58 [
59 "$TC actions flush action skbmod",
60 0,
61 1,
62 255
63 ]
64 ],
65 "cmdUnderTest": "$TC actions add action skbmod set smac 00:44:55:44:55",
66 "expExitCode": "255",
67 "verifyCmd": "$TC actions ls action skbmod",
68 "matchPattern": "action order [0-9]*: skbmod pipe set smac 00:44:55:44:55",
69 "matchCount": "0",
70 "teardown": [
71 "$TC actions flush action skbmod"
72 ]
73 },
74 {
75 "id": "3cf1",
76 "name": "Add skbmod action with valid etype",
77 "category": [
78 "actions",
79 "skbmod"
80 ],
81 "setup": [
82 [
83 "$TC actions flush action skbmod",
84 0,
85 1,
86 255
87 ]
88 ],
89 "cmdUnderTest": "$TC actions add action skbmod set etype 0xfefe",
90 "expExitCode": "0",
91 "verifyCmd": "$TC actions ls action skbmod",
92 "matchPattern": "action order [0-9]*: skbmod pipe set etype 0xFEFE",
93 "matchCount": "1",
94 "teardown": [
95 "$TC actions flush action skbmod"
96 ]
97 },
98 {
99 "id": "a749",
100 "name": "Add skbmod action with invalid etype",
101 "category": [
102 "actions",
103 "skbmod"
104 ],
105 "setup": [
106 [
107 "$TC actions flush action skbmod",
108 0,
109 1,
110 255
111 ]
112 ],
113 "cmdUnderTest": "$TC actions add action skbmod set etype 0xfefef",
114 "expExitCode": "255",
115 "verifyCmd": "$TC actions ls action skbmod",
116 "matchPattern": "action order [0-9]*: skbmod pipe set etype 0xFEFEF",
117 "matchCount": "0",
118 "teardown": [
119 "$TC actions flush action skbmod"
120 ]
121 },
122 {
123 "id": "bfe6",
124 "name": "Add skbmod action to swap mac",
125 "category": [
126 "actions",
127 "skbmod"
128 ],
129 "setup": [
130 [
131 "$TC actions flush action skbmod",
132 0,
133 1,
134 255
135 ]
136 ],
137 "cmdUnderTest": "$TC actions add action skbmod swap mac",
138 "expExitCode": "0",
139 "verifyCmd": "$TC actions get action skbmod index 1",
140 "matchPattern": "action order [0-9]*: skbmod pipe swap mac",
141 "matchCount": "1",
142 "teardown": [
143 "$TC actions flush action skbmod"
144 ]
145 },
146 {
147 "id": "839b",
148 "name": "Add skbmod action with control pipe",
149 "category": [
150 "actions",
151 "skbmod"
152 ],
153 "setup": [
154 [
155 "$TC actions flush action skbmod",
156 0,
157 1,
158 255
159 ]
160 ],
161 "cmdUnderTest": "$TC actions add action skbmod swap mac pipe",
162 "expExitCode": "0",
163 "verifyCmd": "$TC actions ls action skbmod",
164 "matchPattern": "action order [0-9]*: skbmod pipe swap mac",
165 "matchCount": "1",
166 "teardown": [
167 "$TC actions flush action skbmod"
168 ]
169 },
170 {
171 "id": "c167",
172 "name": "Add skbmod action with control reclassify",
173 "category": [
174 "actions",
175 "skbmod"
176 ],
177 "setup": [
178 [
179 "$TC actions flush action skbmod",
180 0,
181 1,
182 255
183 ]
184 ],
185 "cmdUnderTest": "$TC actions add action skbmod set etype 0xbeef reclassify",
186 "expExitCode": "0",
187 "verifyCmd": "$TC actions ls action skbmod",
188 "matchPattern": "action order [0-9]*: skbmod reclassify set etype 0xBEEF",
189 "matchCount": "1",
190 "teardown": [
191 "$TC actions flush action skbmod"
192 ]
193 },
194 {
195 "id": "0c2f",
196 "name": "Add skbmod action with control drop",
197 "category": [
198 "actions",
199 "skbmod"
200 ],
201 "setup": [
202 [
203 "$TC actions flush action skbmod",
204 0,
205 1,
206 255
207 ]
208 ],
209 "cmdUnderTest": "$TC actions add action skbmod set etype 0x0001 drop",
210 "expExitCode": "0",
211 "verifyCmd": "$TC actions get action skbmod index 1",
212 "matchPattern": "action order [0-9]*: skbmod drop set etype 0x1",
213 "matchCount": "1",
214 "teardown": [
215 "$TC actions flush action skbmod"
216 ]
217 },
218 {
219 "id": "d113",
220 "name": "Add skbmod action with control continue",
221 "category": [
222 "actions",
223 "skbmod"
224 ],
225 "setup": [
226 [
227 "$TC actions flush action skbmod",
228 0,
229 1,
230 255
231 ]
232 ],
233 "cmdUnderTest": "$TC actions add action skbmod set etype 0x1 continue",
234 "expExitCode": "0",
235 "verifyCmd": "$TC actions ls action skbmod",
236 "matchPattern": "action order [0-9]*: skbmod continue set etype 0x1",
237 "matchCount": "1",
238 "teardown": [
239 "$TC actions flush action skbmod"
240 ]
241 },
242 {
243 "id": "7242",
244 "name": "Add skbmod action with control pass",
245 "category": [
246 "actions",
247 "skbmod"
248 ],
249 "setup": [
250 [
251 "$TC actions flush action skbmod",
252 0,
253 1,
254 255
255 ]
256 ],
257 "cmdUnderTest": "$TC actions add action skbmod set smac 00:00:00:00:00:01 pass",
258 "expExitCode": "0",
259 "verifyCmd": "$TC actions ls action skbmod",
260 "matchPattern": "action order [0-9]*: skbmod pass set smac 00:00:00:00:00:01",
261 "matchCount": "1",
262 "teardown": [
263 "$TC actions flush action skbmod"
264 ]
265 },
266 {
267 "id": "58cb",
268 "name": "List skbmod actions",
269 "category": [
270 "actions",
271 "skbmod"
272 ],
273 "setup": [
274 [
275 "$TC actions flush action skbmod",
276 0,
277 1,
278 255
279 ],
280 "$TC actions add action skbmod set etype 0x0001",
281 "$TC actions add action skbmod set etype 0x0011",
282 "$TC actions add action skbmod set etype 0x0021",
283 "$TC actions add action skbmod set etype 0x0031",
284 "$TC actions add action skbmod set etype 0x0041"
285 ],
286 "cmdUnderTest": "$TC actions ls action skbmod",
287 "expExitCode": "0",
288 "verifyCmd": "$TC actions ls action skbmod",
289 "matchPattern": "action order [0-9]*: skbmod",
290 "matchCount": "5",
291 "teardown": [
292 "$TC actions flush action skbmod"
293 ]
294 },
295 {
296 "id": "9aa8",
297 "name": "Get a single skbmod action from a list",
298 "category": [
299 "actions",
300 "skbmod"
301 ],
302 "setup": [
303 [
304 "$TC actions flush action skbmod",
305 0,
306 1,
307 255
308 ],
309 "$TC actions add action skbmod set etype 0x0001",
310 "$TC actions add action skbmod set etype 0x0011",
311 "$TC actions add action skbmod set etype 0x0021",
312 "$TC actions add action skbmod set etype 0x0031",
313 "$TC actions add action skbmod set etype 0x0041"
314 ],
315 "cmdUnderTest": "$TC actions ls action skbmod",
316 "expExitCode": "0",
317 "verifyCmd": "$TC actions get action skbmod index 4",
318 "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x0031",
319 "matchCount": "1",
320 "teardown": [
321 "$TC actions flush action skbmod"
322 ]
323 },
324 {
325 "id": "e93a",
326 "name": "Delete an skbmod action",
327 "category": [
328 "actions",
329 "skbmod"
330 ],
331 "setup": [
332 [
333 "$TC actions flush action skbmod",
334 0,
335 1,
336 255
337 ],
338 "$TC actions add action skbmod set etype 0x1111 index 909"
339 ],
340 "cmdUnderTest": "$TC actions del action skbmod index 909",
341 "expExitCode": "0",
342 "verifyCmd": "$TC actions ls action skbmod",
343 "matchPattern": "action order [0-9]*: skbmod pipe set etype 0x1111\\s+index 909",
344 "matchCount": "0",
345 "teardown": [
346 "$TC actions flush action skbmod"
347 ]
348 },
349 {
350 "id": "40c2",
351 "name": "Flush skbmod actions",
352 "category": [
353 "actions",
354 "skbmod"
355 ],
356 "setup": [
357 "$TC actions add action skbmod set etype 0x0001",
358 "$TC actions add action skbmod set etype 0x0011",
359 "$TC actions add action skbmod set etype 0x0021",
360 "$TC actions add action skbmod set etype 0x0031",
361 "$TC actions add action skbmod set etype 0x0041"
362 ],
363 "cmdUnderTest": "$TC actions flush action skbmod",
364 "expExitCode": "0",
365 "verifyCmd": "$TC actions ls action skbmod",
366 "matchPattern": "action order [0-9]*: skbmod",
367 "matchCount": "0",
368 "teardown": [
369 "$TC actions flush action skbmod"
370 ]
371 }
372]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tests.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tests.json
deleted file mode 100644
index 6973bdc5b5bf..000000000000
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tests.json
+++ /dev/null
@@ -1,1165 +0,0 @@
1[
2 {
3 "id": "e89a",
4 "name": "Add valid pass action",
5 "category": [
6 "actions",
7 "gact"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action gact",
12 0,
13 1,
14 255
15 ]
16 ],
17 "cmdUnderTest": "$TC actions add action pass index 8",
18 "expExitCode": "0",
19 "verifyCmd": "$TC actions list action gact",
20 "matchPattern": "action order [0-9]*: gact action pass.*index 8 ref",
21 "matchCount": "1",
22 "teardown": [
23 "$TC actions flush action gact"
24 ]
25 },
26 {
27 "id": "a02c",
28 "name": "Add valid pipe action",
29 "category": [
30 "actions",
31 "gact"
32 ],
33 "setup": [
34 [
35 "$TC actions flush action gact",
36 0,
37 1,
38 255
39 ]
40 ],
41 "cmdUnderTest": "$TC actions add action pipe index 6",
42 "expExitCode": "0",
43 "verifyCmd": "$TC actions list action gact",
44 "matchPattern": "action order [0-9]*: gact action pipe.*index 6 ref",
45 "matchCount": "1",
46 "teardown": [
47 "$TC actions flush action gact"
48 ]
49 },
50 {
51 "id": "feef",
52 "name": "Add valid reclassify action",
53 "category": [
54 "actions",
55 "gact"
56 ],
57 "setup": [
58 [
59 "$TC actions flush action gact",
60 0,
61 1,
62 255
63 ]
64 ],
65 "cmdUnderTest": "$TC actions add action reclassify index 5",
66 "expExitCode": "0",
67 "verifyCmd": "$TC actions list action gact",
68 "matchPattern": "action order [0-9]*: gact action reclassify.*index 5 ref",
69 "matchCount": "1",
70 "teardown": [
71 "$TC actions flush action gact"
72 ]
73 },
74 {
75 "id": "8a7a",
76 "name": "Add valid drop action",
77 "category": [
78 "actions",
79 "gact"
80 ],
81 "setup": [
82 [
83 "$TC actions flush action gact",
84 0,
85 1,
86 255
87 ]
88 ],
89 "cmdUnderTest": "$TC actions add action drop index 30",
90 "expExitCode": "0",
91 "verifyCmd": "$TC actions list action gact",
92 "matchPattern": "action order [0-9]*: gact action drop.*index 30 ref",
93 "matchCount": "1",
94 "teardown": [
95 "$TC actions flush action gact"
96 ]
97 },
98 {
99 "id": "9a52",
100 "name": "Add valid continue action",
101 "category": [
102 "actions",
103 "gact"
104 ],
105 "setup": [
106 [
107 "$TC actions flush action gact",
108 0,
109 1,
110 255
111 ]
112 ],
113 "cmdUnderTest": "$TC actions add action continue index 432",
114 "expExitCode": "0",
115 "verifyCmd": "$TC actions list action gact",
116 "matchPattern": "action order [0-9]*: gact action continue.*index 432 ref",
117 "matchCount": "1",
118 "teardown": [
119 "$TC actions flush action gact"
120 ]
121 },
122 {
123 "id": "d700",
124 "name": "Add invalid action",
125 "category": [
126 "actions",
127 "gact"
128 ],
129 "setup": [
130 [
131 "$TC actions flush action gact",
132 0,
133 1,
134 255
135 ]
136 ],
137 "cmdUnderTest": "$TC actions add action pump index 386",
138 "expExitCode": "255",
139 "verifyCmd": "$TC actions list action gact",
140 "matchPattern": "action order [0-9]*: gact action.*index 386 ref",
141 "matchCount": "0",
142 "teardown": [
143 "$TC actions flush action gact"
144 ]
145 },
146 {
147 "id": "9215",
148 "name": "Add action with duplicate index",
149 "category": [
150 "actions",
151 "gact"
152 ],
153 "setup": [
154 [
155 "$TC actions flush action gact",
156 0,
157 1,
158 255
159 ],
160 "$TC actions add action pipe index 15"
161 ],
162 "cmdUnderTest": "$TC actions add action drop index 15",
163 "expExitCode": "255",
164 "verifyCmd": "$TC actions list action gact",
165 "matchPattern": "action order [0-9]*: gact action drop.*index 15 ref",
166 "matchCount": "0",
167 "teardown": [
168 "$TC actions flush action gact"
169 ]
170 },
171 {
172 "id": "798e",
173 "name": "Add action with index exceeding 32-bit maximum",
174 "category": [
175 "actions",
176 "gact"
177 ],
178 "setup": [
179 [
180 "$TC actions flush action gact",
181 0,
182 1,
183 255
184 ]
185 ],
186 "cmdUnderTest": "$TC actions add action drop index 4294967296",
187 "expExitCode": "255",
188 "verifyCmd": "actions list action gact",
189 "matchPattern": "action order [0-9]*: gact action drop.*index 4294967296 ref",
190 "matchCount": "0",
191 "teardown": [
192 "$TC actions flush action gact"
193 ]
194 },
195 {
196 "id": "22be",
197 "name": "Add action with index at 32-bit maximum",
198 "category": [
199 "actions",
200 "gact"
201 ],
202 "setup": [
203 [
204 "$TC actions flush action gact",
205 0,
206 1,
207 255
208 ]
209 ],
210 "cmdUnderTest": "$TC actions add action drop index 4294967295",
211 "expExitCode": "0",
212 "verifyCmd": "$TC actions list action gact",
213 "matchPattern": "action order [0-9]*: gact action drop.*index 4294967295 ref",
214 "matchCount": "1",
215 "teardown": [
216 "$TC actions flush action gact"
217 ]
218 },
219 {
220 "id": "ac2a",
221 "name": "List actions",
222 "category": [
223 "actions",
224 "gact"
225 ],
226 "setup": [
227 [
228 "$TC actions flush action gact",
229 0,
230 1,
231 255
232 ],
233 "$TC actions add action reclassify index 101",
234 "$TC actions add action reclassify index 102",
235 "$TC actions add action reclassify index 103",
236 "$TC actions add action reclassify index 104",
237 "$TC actions add action reclassify index 105"
238 ],
239 "cmdUnderTest": "$TC actions list action gact",
240 "expExitCode": "0",
241 "verifyCmd": "$TC actions list action gact",
242 "matchPattern": "action order [0-9]*: gact action reclassify",
243 "matchCount": "5",
244 "teardown": [
245 "$TC actions flush action gact"
246 ]
247 },
248 {
249 "id": "63ec",
250 "name": "Delete pass action",
251 "category": [
252 "actions",
253 "gact"
254 ],
255 "setup": [
256 [
257 "$TC actions flush action gact",
258 0,
259 1,
260 255
261 ],
262 "$TC actions add action pass index 1"
263 ],
264 "cmdUnderTest": "$TC actions del action gact index 1",
265 "expExitCode": "0",
266 "verifyCmd": "$TC actions list action gact",
267 "matchPattern": "action order [0-9]*: gact action pass.*index 1 ref",
268 "matchCount": "0",
269 "teardown": [
270 "$TC actions flush action gact"
271 ]
272 },
273 {
274 "id": "46be",
275 "name": "Delete pipe action",
276 "category": [
277 "actions",
278 "gact"
279 ],
280 "setup": [
281 [
282 "$TC actions flush action gact",
283 0,
284 1,
285 255
286 ],
287 "$TC actions add action pipe index 9"
288 ],
289 "cmdUnderTest": "$TC actions del action gact index 9",
290 "expExitCode": "0",
291 "verifyCmd": "$TC actions list action gact",
292 "matchPattern": "action order [0-9]*: gact action pipe.*index 9 ref",
293 "matchCount": "0",
294 "teardown": [
295 "$TC actions flush action gact"
296 ]
297 },
298 {
299 "id": "2e08",
300 "name": "Delete reclassify action",
301 "category": [
302 "actions",
303 "gact"
304 ],
305 "setup": [
306 [
307 "$TC actions flush action gact",
308 0,
309 1,
310 255
311 ],
312 "$TC actions add action reclassify index 65536"
313 ],
314 "cmdUnderTest": "$TC actions del action gact index 65536",
315 "expExitCode": "0",
316 "verifyCmd": "$TC actions list action gact",
317 "matchPattern": "action order [0-9]*: gact action reclassify.*index 65536 ref",
318 "matchCount": "0",
319 "teardown": [
320 "$TC actions flush action gact"
321 ]
322 },
323 {
324 "id": "99c4",
325 "name": "Delete drop action",
326 "category": [
327 "actions",
328 "gact"
329 ],
330 "setup": [
331 [
332 "$TC actions flush action gact",
333 0,
334 1,
335 255
336 ],
337 "$TC actions add action drop index 16"
338 ],
339 "cmdUnderTest": "$TC actions del action gact index 16",
340 "expExitCode": "0",
341 "verifyCmd": "$TC actions list action gact",
342 "matchPattern": "action order [0-9]*: gact action drop.*index 16 ref",
343 "matchCount": "0",
344 "teardown": [
345 "$TC actions flush action gact"
346 ]
347 },
348 {
349 "id": "fb6b",
350 "name": "Delete continue action",
351 "category": [
352 "actions",
353 "gact"
354 ],
355 "setup": [
356 [
357 "$TC actions flush action gact",
358 0,
359 1,
360 255
361 ],
362 "$TC actions add action continue index 32"
363 ],
364 "cmdUnderTest": "$TC actions del action gact index 32",
365 "expExitCode": "0",
366 "verifyCmd": "actions list action gact",
367 "matchPattern": "action order [0-9]*: gact action continue.*index 32 ref",
368 "matchCount": "0",
369 "teardown": [
370 "$TC actions flush action gact"
371 ]
372 },
373 {
374 "id": "0eb3",
375 "name": "Delete non-existent action",
376 "category": [
377 "actions",
378 "gact"
379 ],
380 "setup": [
381 [
382 "$TC actions flush action gact",
383 0,
384 1,
385 255
386 ]
387 ],
388 "cmdUnderTest": "$TC actions del action gact index 2",
389 "expExitCode": "255",
390 "verifyCmd": "$TC actions list action gact",
391 "matchPattern": "action order [0-9]*: gact action",
392 "matchCount": "0",
393 "teardown": [
394 "$TC actions flush action gact"
395 ]
396 },
397 {
398 "id": "5124",
399 "name": "Add mirred mirror to egress action",
400 "category": [
401 "actions",
402 "mirred"
403 ],
404 "setup": [
405 [
406 "$TC actions flush action mirred",
407 0,
408 1,
409 255
410 ]
411 ],
412 "cmdUnderTest": "$TC actions add action mirred egress mirror index 1 dev lo",
413 "expExitCode": "0",
414 "verifyCmd": "$TC actions list action mirred",
415 "matchPattern": "action order [0-9]*: mirred \\(Egress Mirror to device lo\\).*index 1 ref",
416 "matchCount": "1",
417 "teardown": [
418 "$TC actions flush action mirred"
419 ]
420 },
421 {
422 "id": "6fb4",
423 "name": "Add mirred redirect to egress action",
424 "category": [
425 "actions",
426 "mirred"
427 ],
428 "setup": [
429 [
430 "$TC actions flush action mirred",
431 0,
432 1,
433 255
434 ]
435 ],
436 "cmdUnderTest": "$TC actions add action mirred egress redirect index 2 dev lo action pipe",
437 "expExitCode": "0",
438 "verifyCmd": "$TC actions list action mirred",
439 "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 2 ref",
440 "matchCount": "1",
441 "teardown": [
442 "$TC actions flush action mirred"
443 ]
444 },
445 {
446 "id": "ba38",
447 "name": "Get mirred actions",
448 "category": [
449 "actions",
450 "mirred"
451 ],
452 "setup": [
453 [
454 "$TC actions flush action mirred",
455 0,
456 1,
457 255
458 ],
459 "$TC actions add action mirred egress mirror index 1 dev lo",
460 "$TC actions add action mirred egress redirect index 2 dev lo"
461 ],
462 "cmdUnderTest": "$TC actions show action mirred",
463 "expExitCode": "0",
464 "verifyCmd": "$TC actions list action mirred",
465 "matchPattern": "[Mirror|Redirect] to device lo",
466 "matchCount": "2",
467 "teardown": [
468 "$TC actions flush action mirred"
469 ]
470 },
471 {
472 "id": "d7c0",
473 "name": "Add invalid mirred direction",
474 "category": [
475 "actions",
476 "mirred"
477 ],
478 "setup": [
479 [
480 "$TC actions flush action mirred",
481 0,
482 1,
483 255
484 ]
485 ],
486 "cmdUnderTest": "$TC actions add action mirred inbound mirror index 20 dev lo",
487 "expExitCode": "255",
488 "verifyCmd": "$TC actions list action mirred",
489 "matchPattern": "action order [0-9]*: mirred \\(.*to device lo\\).*index 20 ref",
490 "matchCount": "0",
491 "teardown": [
492 "$TC actions flush action mirred"
493 ]
494 },
495 {
496 "id": "e213",
497 "name": "Add invalid mirred action",
498 "category": [
499 "actions",
500 "mirred"
501 ],
502 "setup": [
503 [
504 "$TC actions flush action mirred",
505 0,
506 1,
507 255
508 ]
509 ],
510 "cmdUnderTest": "$TC actions add action mirred egress remirror index 20 dev lo",
511 "expExitCode": "255",
512 "verifyCmd": "$TC actions list action mirred",
513 "matchPattern": "action order [0-9]*: mirred \\(Egress.*to device lo\\).*index 20 ref",
514 "matchCount": "0",
515 "teardown": [
516 "$TC actions flush action mirred"
517 ]
518 },
519 {
520 "id": "2d89",
521 "name": "Add mirred action with invalid device",
522 "category": [
523 "actions",
524 "mirred"
525 ],
526 "setup": [
527 [
528 "$TC actions flush action mirred",
529 0,
530 1,
531 255
532 ]
533 ],
534 "cmdUnderTest": "$TC actions add action mirred egress mirror index 20 dev eltoh",
535 "expExitCode": "255",
536 "verifyCmd": "$TC actions list action mirred",
537 "matchPattern": "action order [0-9]*: mirred \\(.*to device eltoh\\).*index 20 ref",
538 "matchCount": "0",
539 "teardown": [
540 "$TC actions flush action mirred"
541 ]
542 },
543 {
544 "id": "300b",
545 "name": "Add mirred action with duplicate index",
546 "category": [
547 "actions",
548 "mirred"
549 ],
550 "setup": [
551 [
552 "$TC actions flush action mirred",
553 0,
554 1,
555 255
556 ],
557 "$TC actions add action mirred egress redirect index 15 dev lo"
558 ],
559 "cmdUnderTest": "$TC actions add action mirred egress mirror index 15 dev lo",
560 "expExitCode": "255",
561 "verifyCmd": "$TC actions list action mirred",
562 "matchPattern": "action order [0-9]*: mirred \\(.*to device lo\\).*index 15 ref",
563 "matchCount": "1",
564 "teardown": [
565 "$TC actions flush action mirred"
566 ]
567 },
568 {
569 "id": "a70e",
570 "name": "Delete mirred mirror action",
571 "category": [
572 "actions",
573 "mirred"
574 ],
575 "setup": [
576 [
577 "$TC actions flush action mirred",
578 0,
579 1,
580 255
581 ],
582 "$TC actions add action mirred egress mirror index 5 dev lo"
583 ],
584 "cmdUnderTest": "$TC actions del action mirred index 5",
585 "expExitCode": "0",
586 "verifyCmd": "$TC actions list action mirred",
587 "matchPattern": "action order [0-9]*: mirred \\(Egress Mirror to device lo\\).*index 5 ref",
588 "matchCount": "0",
589 "teardown": [
590 "$TC actions flush action mirred"
591 ]
592 },
593 {
594 "id": "3fb3",
595 "name": "Delete mirred redirect action",
596 "category": [
597 "actions",
598 "mirred"
599 ],
600 "setup": [
601 [
602 "$TC actions flush action mirred",
603 0,
604 1,
605 255
606 ],
607 "$TC actions add action mirred egress redirect index 5 dev lo"
608 ],
609 "cmdUnderTest": "$TC actions del action mirred index 5",
610 "expExitCode": "0",
611 "verifyCmd": "$TC actions list action mirred",
612 "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 5 ref",
613 "matchCount": "0",
614 "teardown": [
615 "$TC actions flush action mirred"
616 ]
617 },
618 {
619 "id": "b078",
620 "name": "Add simple action",
621 "category": [
622 "actions",
623 "simple"
624 ],
625 "setup": [
626 [
627 "$TC actions flush action simple",
628 0,
629 1,
630 255
631 ]
632 ],
633 "cmdUnderTest": "$TC actions add action simple sdata \"A triumph\" index 60",
634 "expExitCode": "0",
635 "verifyCmd": "$TC actions list action simple",
636 "matchPattern": "action order [0-9]*: Simple <A triumph>.*index 60 ref",
637 "matchCount": "1",
638 "teardown": [
639 "$TC actions flush action simple"
640 ]
641 },
642 {
643 "id": "6d4c",
644 "name": "Add simple action with duplicate index",
645 "category": [
646 "actions",
647 "simple"
648 ],
649 "setup": [
650 [
651 "$TC actions flush action simple",
652 0,
653 1,
654 255
655 ],
656 "$TC actions add action simple sdata \"Aruba\" index 4"
657 ],
658 "cmdUnderTest": "$TC actions add action simple sdata \"Jamaica\" index 4",
659 "expExitCode": "255",
660 "verifyCmd": "$TC actions list action simple",
661 "matchPattern": "action order [0-9]*: Simple <Jamaica>.*ref",
662 "matchCount": "0",
663 "teardown": [
664 "$TC actions flush action simple"
665 ]
666 },
667 {
668 "id": "2542",
669 "name": "List simple actions",
670 "category": [
671 "actions",
672 "simple"
673 ],
674 "setup": [
675 [
676 "$TC actions flush action simple",
677 0,
678 1,
679 255
680 ],
681 "$TC actions add action simple sdata \"Rock\"",
682 "$TC actions add action simple sdata \"Paper\"",
683 "$TC actions add action simple sdata \"Scissors\" index 98"
684 ],
685 "cmdUnderTest": "$TC actions list action simple",
686 "expExitCode": "0",
687 "verifyCmd": "$TC actions list action simple",
688 "matchPattern": "action order [0-9]*: Simple <[A-Z][a-z]*>",
689 "matchCount": "3",
690 "teardown": [
691 "$TC actions flush action simple"
692 ]
693 },
694 {
695 "id": "ea67",
696 "name": "Delete simple action",
697 "category": [
698 "actions",
699 "simple"
700 ],
701 "setup": [
702 [
703 "$TC actions flush action simple",
704 0,
705 1,
706 255
707 ],
708 "$TC actions add action simple sdata \"Blinkenlights\" index 1"
709 ],
710 "cmdUnderTest": "$TC actions delete action simple index 1",
711 "expExitCode": "0",
712 "verifyCmd": "$TC actions list action simple",
713 "matchPattern": "action order [0-9]*: Simple <Blinkenlights>.*index 1 ref",
714 "matchCount": "0",
715 "teardown": [
716 "$TC actions flush action simple"
717 ]
718 },
719 {
720 "id": "8ff1",
721 "name": "Flush simple actions",
722 "category": [
723 "actions",
724 "simple"
725 ],
726 "setup": [
727 [
728 "$TC actions flush action simple",
729 0,
730 1,
731 255
732 ],
733 "$TC actions add action simple sdata \"Kirk\"",
734 "$TC actions add action simple sdata \"Spock\" index 50",
735 "$TC actions add action simple sdata \"McCoy\" index 9"
736 ],
737 "cmdUnderTest": "$TC actions flush action simple",
738 "expExitCode": "0",
739 "verifyCmd": "$TC actions list action simple",
740 "matchPattern": "action order [0-9]*: Simple <[A-Z][a-z]*>",
741 "matchCount": "0",
742 "teardown": [
743 ""
744 ]
745 },
746 {
747 "id": "6236",
748 "name": "Add skbedit action with valid mark",
749 "category": [
750 "actions",
751 "skbedit"
752 ],
753 "setup": [
754 [
755 "$TC actions flush action skbedit",
756 0,
757 1,
758 255
759 ]
760 ],
761 "cmdUnderTest": "$TC actions add action skbedit mark 1",
762 "expExitCode": "0",
763 "verifyCmd": "$TC actions list action skbedit",
764 "matchPattern": "action order [0-9]*: skbedit mark 1",
765 "matchCount": "1",
766 "teardown": [
767 "$TC actions flush action skbedit"
768 ]
769 },
770 {
771 "id": "407b",
772 "name": "Add skbedit action with invalid mark",
773 "category": [
774 "actions",
775 "skbedit"
776 ],
777 "setup": [
778 [
779 "$TC actions flush action skbedit",
780 0,
781 1,
782 255
783 ]
784 ],
785 "cmdUnderTest": "$TC actions add action skbedit mark 666777888999",
786 "expExitCode": "255",
787 "verifyCmd": "$TC actions list action skbedit",
788 "matchPattern": "action order [0-9]*: skbedit mark",
789 "matchCount": "0",
790 "teardown": [
791 "$TC actions flush action skbedit"
792 ]
793 },
794 {
795 "id": "081d",
796 "name": "Add skbedit action with priority",
797 "category": [
798 "actions",
799 "skbedit"
800 ],
801 "setup": [
802 [
803 "$TC actions flush action skbedit",
804 0,
805 1,
806 255
807 ]
808 ],
809 "cmdUnderTest": "$TC actions add action skbedit prio 99",
810 "expExitCode": "0",
811 "verifyCmd": "$TC actions list action skbedit",
812 "matchPattern": "action order [0-9]*: skbedit priority :99",
813 "matchCount": "1",
814 "teardown": [
815 "$TC actions flush action skbedit"
816 ]
817 },
818 {
819 "id": "cc37",
820 "name": "Add skbedit action with invalid priority",
821 "category": [
822 "actions",
823 "skbedit"
824 ],
825 "setup": [
826 [
827 "$TC actions flush action skbedit",
828 0,
829 1,
830 255
831 ]
832 ],
833 "cmdUnderTest": "$TC actions add action skbedit prio foo",
834 "expExitCode": "255",
835 "verifyCmd": "$TC actions list action skbedit",
836 "matchPattern": "action order [0-9]*: skbedit priority",
837 "matchCount": "0",
838 "teardown": [
839 "$TC actions flush action skbedit"
840 ]
841 },
842 {
843 "id": "3c95",
844 "name": "Add skbedit action with queue_mapping",
845 "category": [
846 "actions",
847 "skbedit"
848 ],
849 "setup": [
850 [
851 "$TC actions flush action skbedit",
852 0,
853 1,
854 255
855 ]
856 ],
857 "cmdUnderTest": "$TC actions add action skbedit queue_mapping 909",
858 "expExitCode": "0",
859 "verifyCmd": "$TC actions list action skbedit",
860 "matchPattern": "action order [0-9]*: skbedit queue_mapping 909",
861 "matchCount": "1",
862 "teardown": [
863 "$TC actions flush action skbedit"
864 ]
865 },
866 {
867 "id": "985c",
868 "name": "Add skbedit action with invalid queue_mapping",
869 "category": [
870 "actions",
871 "skbedit"
872 ],
873 "setup": [
874 [
875 "$TC actions flush action skbedit",
876 0,
877 1,
878 255
879 ]
880 ],
881 "cmdUnderTest": "$TC actions add action skbedit queue_mapping 67000",
882 "expExitCode": "255",
883 "verifyCmd": "$TC actions list action skbedit",
884 "matchPattern": "action order [0-9]*: skbedit queue_mapping",
885 "matchCount": "0",
886 "teardown": [
887 "$TC actions flush action skbedit"
888 ]
889 },
890 {
891 "id": "224f",
892 "name": "Add skbedit action with ptype host",
893 "category": [
894 "actions",
895 "skbedit"
896 ],
897 "setup": [
898 [
899 "$TC actions flush action skbedit",
900 0,
901 1,
902 255
903 ]
904 ],
905 "cmdUnderTest": "$TC actions add action skbedit ptype host",
906 "expExitCode": "0",
907 "verifyCmd": "$TC actions list action skbedit",
908 "matchPattern": "action order [0-9]*: skbedit ptype host",
909 "matchCount": "1",
910 "teardown": [
911 "$TC actions flush action skbedit"
912 ]
913 },
914 {
915 "id": "d1a3",
916 "name": "Add skbedit action with ptype otherhost",
917 "category": [
918 "actions",
919 "skbedit"
920 ],
921 "setup": [
922 [
923 "$TC actions flush action skbedit",
924 0,
925 1,
926 255
927 ]
928 ],
929 "cmdUnderTest": "$TC actions add action skbedit ptype otherhost",
930 "expExitCode": "0",
931 "verifyCmd": "$TC actions list action skbedit",
932 "matchPattern": "action order [0-9]*: skbedit ptype otherhost",
933 "matchCount": "1",
934 "teardown": [
935 "$TC actions flush action skbedit"
936 ]
937 },
938 {
939 "id": "b9c6",
940 "name": "Add skbedit action with invalid ptype",
941 "category": [
942 "actions",
943 "skbedit"
944 ],
945 "setup": [
946 [
947 "$TC actions flush action skbedit",
948 0,
949 1,
950 255
951 ]
952 ],
953 "cmdUnderTest": "$TC actions add action skbedit ptype openair",
954 "expExitCode": "255",
955 "verifyCmd": "$TC actions list action skbedit",
956 "matchPattern": "action order [0-9]*: skbedit ptype openair",
957 "matchCount": "0",
958 "teardown": [
959 "$TC actions flush action skbedit"
960 ]
961 },
962 {
963 "id": "5172",
964 "name": "List skbedit actions",
965 "category": [
966 "actions",
967 "skbedit"
968 ],
969 "setup": [
970 [
971 "$TC actions flush action skbedit",
972 0,
973 1,
974 255
975 ],
976 "$TC actions add action skbedit ptype otherhost",
977 "$TC actions add action skbedit ptype broadcast",
978 "$TC actions add action skbedit mark 59",
979 "$TC actions add action skbedit mark 409"
980 ],
981 "cmdUnderTest": "$TC actions list action skbedit",
982 "expExitCode": "0",
983 "verifyCmd": "$TC actions list action skbedit",
984 "matchPattern": "action order [0-9]*: skbedit",
985 "matchCount": "4",
986 "teardown": [
987 "$TC actions flush action skbedit"
988 ]
989 },
990 {
991 "id": "a6d6",
992 "name": "Add skbedit action with index",
993 "category": [
994 "actions",
995 "skbedit"
996 ],
997 "setup": [
998 [
999 "$TC actions flush action skbedit",
1000 0,
1001 1,
1002 255
1003 ]
1004 ],
1005 "cmdUnderTest": "$TC actions add action skbedit mark 808 index 4040404040",
1006 "expExitCode": "0",
1007 "verifyCmd": "$TC actions list action skbedit",
1008 "matchPattern": "index 4040404040",
1009 "matchCount": "1",
1010 "teardown": [
1011 "$TC actions flush action skbedit"
1012 ]
1013 },
1014 {
1015 "id": "38f3",
1016 "name": "Delete skbedit action",
1017 "category": [
1018 "actions",
1019 "skbedit"
1020 ],
1021 "setup": [
1022 [
1023 "$TC actions flush action skbedit",
1024 0,
1025 1,
1026 255
1027 ],
1028 "$TC actions add action skbedit mark 42 index 9009"
1029 ],
1030 "cmdUnderTest": "$TC actions del action skbedit index 9009",
1031 "expExitCode": "0",
1032 "verifyCmd": "$TC actions list action skbedit",
1033 "matchPattern": "action order [0-9]*: skbedit mark 42",
1034 "matchCount": "0",
1035 "teardown": [
1036 "$TC actions flush action skbedit"
1037 ]
1038 },
1039 {
1040 "id": "ce97",
1041 "name": "Flush skbedit actions",
1042 "category": [
1043 "actions",
1044 "skbedit"
1045 ],
1046 "setup": [
1047 "$TC actions add action skbedit mark 500",
1048 "$TC actions add action skbedit mark 501",
1049 "$TC actions add action skbedit mark 502",
1050 "$TC actions add action skbedit mark 503",
1051 "$TC actions add action skbedit mark 504",
1052 "$TC actions add action skbedit mark 505",
1053 "$TC actions add action skbedit mark 506"
1054 ],
1055 "cmdUnderTest": "$TC actions flush action skbedit",
1056 "expExitCode": "0",
1057 "verifyCmd": "$TC actions list action skbedit",
1058 "matchPattern": "action order [0-9]*: skbedit",
1059 "matchCount": "0",
1060 "teardown": [
1061 "$TC actions flush action skbedit"
1062 ]
1063 },
1064 {
1065 "id": "f02c",
1066 "name": "Replace gact action",
1067 "category": [
1068 "actions",
1069 "gact"
1070 ],
1071 "setup": [
1072 [
1073 "$TC actions flush action gact",
1074 0,
1075 1,
1076 255
1077 ],
1078 "$TC actions add action drop index 10",
1079 "$TC actions add action drop index 12"
1080 ],
1081 "cmdUnderTest": "$TC actions replace action ok index 12",
1082 "expExitCode": "0",
1083 "verifyCmd": "$TC actions ls action gact",
1084 "matchPattern": "action order [0-9]*: gact action pass",
1085 "matchCount": "1",
1086 "teardown": [
1087 "$TC actions flush action gact"
1088 ]
1089 },
1090 {
1091 "id": "525f",
1092 "name": "Get gact action by index",
1093 "category": [
1094 "actions",
1095 "gact"
1096 ],
1097 "setup": [
1098 [
1099 "$TC actions flush action gact",
1100 0,
1101 1,
1102 255
1103 ],
1104 "$TC actions add action drop index 3900800700"
1105 ],
1106 "cmdUnderTest": "$TC actions get action gact index 3900800700",
1107 "expExitCode": "0",
1108 "verifyCmd": "$TC actions get action gact index 3900800700",
1109 "matchPattern": "index 3900800700",
1110 "matchCount": "1",
1111 "teardown": [
1112 "$TC actions flush action gact"
1113 ]
1114 },
1115 {
1116 "id": "a568",
1117 "name": "Add action with ife type",
1118 "category": [
1119 "actions",
1120 "ife"
1121 ],
1122 "setup": [
1123 [
1124 "$TC actions flush action ife",
1125 0,
1126 1,
1127 255
1128 ],
1129 "$TC actions add action ife encode type 0xDEAD index 1"
1130 ],
1131 "cmdUnderTest": "$TC actions get action ife index 1",
1132 "expExitCode": "0",
1133 "verifyCmd": "$TC actions get action ife index 1",
1134 "matchPattern": "type 0xDEAD",
1135 "matchCount": "1",
1136 "teardown": [
1137 "$TC actions flush action ife"
1138 ]
1139 },
1140 {
1141 "id": "b983",
1142 "name": "Add action without ife type",
1143 "category": [
1144 "actions",
1145 "ife"
1146 ],
1147 "setup": [
1148 [
1149 "$TC actions flush action ife",
1150 0,
1151 1,
1152 255
1153 ],
1154 "$TC actions add action ife encode index 1"
1155 ],
1156 "cmdUnderTest": "$TC actions get action ife index 1",
1157 "expExitCode": "0",
1158 "verifyCmd": "$TC actions get action ife index 1",
1159 "matchPattern": "type 0xED3E",
1160 "matchCount": "1",
1161 "teardown": [
1162 "$TC actions flush action ife"
1163 ]
1164 }
1165] \ No newline at end of file
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index b8462e1b74f9..fc373fdf2bdc 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -50,7 +50,7 @@ def exec_cmd(command, nsonly=True):
50 stderr=subprocess.PIPE) 50 stderr=subprocess.PIPE)
51 (rawout, serr) = proc.communicate() 51 (rawout, serr) = proc.communicate()
52 52
53 if proc.returncode != 0: 53 if proc.returncode != 0 and len(serr) > 0:
54 foutput = serr.decode("utf-8") 54 foutput = serr.decode("utf-8")
55 else: 55 else:
56 foutput = rawout.decode("utf-8") 56 foutput = rawout.decode("utf-8")
@@ -180,15 +180,20 @@ def has_blank_ids(idlist):
180 180
181def load_from_file(filename): 181def load_from_file(filename):
182 """ 182 """
183 Open the JSON file containing the test cases and return them as an 183 Open the JSON file containing the test cases and return them
184 ordered dictionary object. 184 as list of ordered dictionary objects.
185 """ 185 """
186 with open(filename) as test_data: 186 try:
187 testlist = json.load(test_data, object_pairs_hook=OrderedDict) 187 with open(filename) as test_data:
188 idlist = get_id_list(testlist) 188 testlist = json.load(test_data, object_pairs_hook=OrderedDict)
189 if (has_blank_ids(idlist)): 189 except json.JSONDecodeError as jde:
190 for k in testlist: 190 print('IGNORING test case file {}\n\tBECAUSE: {}'.format(filename, jde))
191 k['filename'] = filename 191 testlist = list()
192 else:
193 idlist = get_id_list(testlist)
194 if (has_blank_ids(idlist)):
195 for k in testlist:
196 k['filename'] = filename
192 return testlist 197 return testlist
193 198
194 199
@@ -210,7 +215,7 @@ def set_args(parser):
210 help='Run tests only from the specified category, or if no category is specified, list known categories.') 215 help='Run tests only from the specified category, or if no category is specified, list known categories.')
211 parser.add_argument('-f', '--file', type=str, 216 parser.add_argument('-f', '--file', type=str,
212 help='Run tests from the specified file') 217 help='Run tests from the specified file')
213 parser.add_argument('-l', '--list', type=str, nargs='?', const="", metavar='CATEGORY', 218 parser.add_argument('-l', '--list', type=str, nargs='?', const="++", metavar='CATEGORY',
214 help='List all test cases, or those only within the specified category') 219 help='List all test cases, or those only within the specified category')
215 parser.add_argument('-s', '--show', type=str, nargs=1, metavar='ID', dest='showID', 220 parser.add_argument('-s', '--show', type=str, nargs=1, metavar='ID', dest='showID',
216 help='Display the test case with specified id') 221 help='Display the test case with specified id')
@@ -367,10 +372,10 @@ def set_operation_mode(args):
367 testcases = get_categorized_testlist(alltests, ucat) 372 testcases = get_categorized_testlist(alltests, ucat)
368 373
369 if args.list: 374 if args.list:
370 if (len(args.list) == 0): 375 if (args.list == "++"):
371 list_test_cases(alltests) 376 list_test_cases(alltests)
372 exit(0) 377 exit(0)
373 elif(len(args.list > 0)): 378 elif(len(args.list) > 0):
374 if (args.list not in ucat): 379 if (args.list not in ucat):
375 print("Unknown category " + args.list) 380 print("Unknown category " + args.list)
376 print("Available categories:") 381 print("Available categories:")
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index eb188c729dd6..a023d0d62b25 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -18,3 +18,17 @@ NAMES = {
18 # Name of the namespace to use 18 # Name of the namespace to use
19 'NS': 'tcut' 19 'NS': 'tcut'
20 } 20 }
21
22
23ENVIR = { }
24
25# put customizations in tdc_config_local.py
26try:
27 from tdc_config_local import *
28except ImportError as ie:
29 pass
30
31try:
32 NAMES.update(EXTRA_NAMES)
33except NameError as ne:
34 pass
diff --git a/tools/testing/selftests/tc-testing/tdc_config_local_template.py b/tools/testing/selftests/tc-testing/tdc_config_local_template.py
new file mode 100644
index 000000000000..d48fc732a399
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_config_local_template.py
@@ -0,0 +1,23 @@
1"""
2tdc_config_local.py - tdc plugin-writer-specified values
3
4Copyright (C) 2017 bjb@mojatatu.com
5"""
6
7import os
8
9ENVIR = os.environ.copy()
10
11ENV_LD_LIBRARY_PATH = os.getenv('LD_LIBRARY_PATH', '')
12ENV_OTHER_LIB = os.getenv('OTHER_LIB', '')
13
14
15# example adding value to NAMES, without editing tdc_config.py
16EXTRA_NAMES = dict()
17EXTRA_NAMES['SOME_BIN'] = os.path.join(os.getenv('OTHER_BIN', ''), 'some_bin')
18
19
20# example adding values to ENVIR, without editing tdc_config.py
21ENVIR['VALGRIND_LIB'] = '/usr/lib/valgrind'
22ENVIR['VALGRIND_BIN'] = '/usr/bin/valgrind'
23ENVIR['VGDB_BIN'] = '/usr/bin/vgdb'
diff --git a/tools/testing/selftests/tc-testing/tdc_helper.py b/tools/testing/selftests/tc-testing/tdc_helper.py
index ccf2d2458703..db381120a566 100644
--- a/tools/testing/selftests/tc-testing/tdc_helper.py
+++ b/tools/testing/selftests/tc-testing/tdc_helper.py
@@ -16,7 +16,7 @@ def get_categorized_testlist(alltests, ucat):
16 16
17 17
18def get_unique_item(lst): 18def get_unique_item(lst):
19 """ For a list, return a set of the unique items in the list. """ 19 """ For a list, return a list of the unique items in the list. """
20 return list(set(lst)) 20 return list(set(lst))
21 21
22 22
@@ -58,7 +58,7 @@ def print_sll(items):
58def print_test_case(tcase): 58def print_test_case(tcase):
59 """ Pretty-printing of a given test case. """ 59 """ Pretty-printing of a given test case. """
60 for k in tcase.keys(): 60 for k in tcase.keys():
61 if (type(tcase[k]) == list): 61 if (isinstance(tcase[k], list)):
62 print(k + ":") 62 print(k + ":")
63 print_list(tcase[k]) 63 print_list(tcase[k])
64 else: 64 else:
diff --git a/tools/testing/selftests/timers/.gitignore b/tools/testing/selftests/timers/.gitignore
index cc986621f512..2c8ac8416299 100644
--- a/tools/testing/selftests/timers/.gitignore
+++ b/tools/testing/selftests/timers/.gitignore
@@ -18,3 +18,5 @@ threadtest
18valid-adjtimex 18valid-adjtimex
19adjtick 19adjtick
20set-tz 20set-tz
21freq-step
22rtctest_setdate
diff --git a/tools/testing/selftests/vDSO/vdso_test.c b/tools/testing/selftests/vDSO/vdso_test.c
index 8daeb7d7032c..2df26bd0099c 100644
--- a/tools/testing/selftests/vDSO/vdso_test.c
+++ b/tools/testing/selftests/vDSO/vdso_test.c
@@ -19,6 +19,19 @@ extern void *vdso_sym(const char *version, const char *name);
19extern void vdso_init_from_sysinfo_ehdr(uintptr_t base); 19extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
20extern void vdso_init_from_auxv(void *auxv); 20extern void vdso_init_from_auxv(void *auxv);
21 21
22/*
23 * ARM64's vDSO exports its gettimeofday() implementation with a different
24 * name and version from other architectures, so we need to handle it as
25 * a special case.
26 */
27#if defined(__aarch64__)
28const char *version = "LINUX_2.6.39";
29const char *name = "__kernel_gettimeofday";
30#else
31const char *version = "LINUX_2.6";
32const char *name = "__vdso_gettimeofday";
33#endif
34
22int main(int argc, char **argv) 35int main(int argc, char **argv)
23{ 36{
24 unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR); 37 unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
@@ -31,10 +44,10 @@ int main(int argc, char **argv)
31 44
32 /* Find gettimeofday. */ 45 /* Find gettimeofday. */
33 typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz); 46 typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
34 gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday"); 47 gtod_t gtod = (gtod_t)vdso_sym(version, name);
35 48
36 if (!gtod) { 49 if (!gtod) {
37 printf("Could not find __vdso_gettimeofday\n"); 50 printf("Could not find %s\n", name);
38 return 1; 51 return 1;
39 } 52 }
40 53
@@ -45,7 +58,7 @@ int main(int argc, char **argv)
45 printf("The time is %lld.%06lld\n", 58 printf("The time is %lld.%06lld\n",
46 (long long)tv.tv_sec, (long long)tv.tv_usec); 59 (long long)tv.tv_sec, (long long)tv.tv_usec);
47 } else { 60 } else {
48 printf("__vdso_gettimeofday failed\n"); 61 printf("%s failed\n", name);
49 } 62 }
50 63
51 return 0; 64 return 0;
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index 142c565bb351..1ca2ee4d15b9 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -8,3 +8,5 @@ on-fault-limit
8transhuge-stress 8transhuge-stress
9userfaultfd 9userfaultfd
10mlock-intersect-test 10mlock-intersect-test
11mlock-random-test
12virtual_address_range
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index e49eca1915f8..7f45806bd863 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -18,6 +18,7 @@ TEST_GEN_FILES += transhuge-stress
18TEST_GEN_FILES += userfaultfd 18TEST_GEN_FILES += userfaultfd
19TEST_GEN_FILES += mlock-random-test 19TEST_GEN_FILES += mlock-random-test
20TEST_GEN_FILES += virtual_address_range 20TEST_GEN_FILES += virtual_address_range
21TEST_GEN_FILES += gup_benchmark
21 22
22TEST_PROGS := run_vmtests 23TEST_PROGS := run_vmtests
23 24
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
new file mode 100644
index 000000000000..36df55132036
--- /dev/null
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -0,0 +1,91 @@
1#include <fcntl.h>
2#include <stdio.h>
3#include <stdlib.h>
4#include <unistd.h>
5
6#include <sys/ioctl.h>
7#include <sys/mman.h>
8#include <sys/prctl.h>
9#include <sys/stat.h>
10#include <sys/types.h>
11
12#include <linux/types.h>
13
14#define MB (1UL << 20)
15#define PAGE_SIZE sysconf(_SC_PAGESIZE)
16
17#define GUP_FAST_BENCHMARK _IOWR('g', 1, struct gup_benchmark)
18
19struct gup_benchmark {
20 __u64 delta_usec;
21 __u64 addr;
22 __u64 size;
23 __u32 nr_pages_per_call;
24 __u32 flags;
25};
26
27int main(int argc, char **argv)
28{
29 struct gup_benchmark gup;
30 unsigned long size = 128 * MB;
31 int i, fd, opt, nr_pages = 1, thp = -1, repeats = 1, write = 0;
32 char *p;
33
34 while ((opt = getopt(argc, argv, "m:r:n:tT")) != -1) {
35 switch (opt) {
36 case 'm':
37 size = atoi(optarg) * MB;
38 break;
39 case 'r':
40 repeats = atoi(optarg);
41 break;
42 case 'n':
43 nr_pages = atoi(optarg);
44 break;
45 case 't':
46 thp = 1;
47 break;
48 case 'T':
49 thp = 0;
50 break;
51 case 'w':
52 write = 1;
53 default:
54 return -1;
55 }
56 }
57
58 gup.nr_pages_per_call = nr_pages;
59 gup.flags = write;
60
61 fd = open("/sys/kernel/debug/gup_benchmark", O_RDWR);
62 if (fd == -1)
63 perror("open"), exit(1);
64
65 p = mmap(NULL, size, PROT_READ | PROT_WRITE,
66 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
67 if (p == MAP_FAILED)
68 perror("mmap"), exit(1);
69 gup.addr = (unsigned long)p;
70
71 if (thp == 1)
72 madvise(p, size, MADV_HUGEPAGE);
73 else if (thp == 0)
74 madvise(p, size, MADV_NOHUGEPAGE);
75
76 for (; (unsigned long)p < gup.addr + size; p += PAGE_SIZE)
77 p[0] = 0;
78
79 for (i = 0; i < repeats; i++) {
80 gup.size = size;
81 if (ioctl(fd, GUP_FAST_BENCHMARK, &gup))
82 perror("ioctl"), exit(1);
83
84 printf("Time: %lld us", gup.delta_usec);
85 if (gup.size != size)
86 printf(", truncated (size: %lld)", gup.size);
87 printf("\n");
88 }
89
90 return 0;
91}
diff --git a/tools/testing/selftests/x86/5lvl.c b/tools/testing/selftests/x86/5lvl.c
new file mode 100644
index 000000000000..2eafdcd4c2b3
--- /dev/null
+++ b/tools/testing/selftests/x86/5lvl.c
@@ -0,0 +1,177 @@
1#include <stdio.h>
2#include <sys/mman.h>
3
4#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
5
6#define PAGE_SIZE 4096
7#define LOW_ADDR ((void *) (1UL << 30))
8#define HIGH_ADDR ((void *) (1UL << 50))
9
10struct testcase {
11 void *addr;
12 unsigned long size;
13 unsigned long flags;
14 const char *msg;
15 unsigned int low_addr_required:1;
16 unsigned int keep_mapped:1;
17};
18
19static struct testcase testcases[] = {
20 {
21 .addr = NULL,
22 .size = 2 * PAGE_SIZE,
23 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
24 .msg = "mmap(NULL)",
25 .low_addr_required = 1,
26 },
27 {
28 .addr = LOW_ADDR,
29 .size = 2 * PAGE_SIZE,
30 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
31 .msg = "mmap(LOW_ADDR)",
32 .low_addr_required = 1,
33 },
34 {
35 .addr = HIGH_ADDR,
36 .size = 2 * PAGE_SIZE,
37 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
38 .msg = "mmap(HIGH_ADDR)",
39 .keep_mapped = 1,
40 },
41 {
42 .addr = HIGH_ADDR,
43 .size = 2 * PAGE_SIZE,
44 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
45 .msg = "mmap(HIGH_ADDR) again",
46 .keep_mapped = 1,
47 },
48 {
49 .addr = HIGH_ADDR,
50 .size = 2 * PAGE_SIZE,
51 .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
52 .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
53 },
54 {
55 .addr = (void*) -1,
56 .size = 2 * PAGE_SIZE,
57 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
58 .msg = "mmap(-1)",
59 .keep_mapped = 1,
60 },
61 {
62 .addr = (void*) -1,
63 .size = 2 * PAGE_SIZE,
64 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
65 .msg = "mmap(-1) again",
66 },
67 {
68 .addr = (void *)((1UL << 47) - PAGE_SIZE),
69 .size = 2 * PAGE_SIZE,
70 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
71 .msg = "mmap((1UL << 47), 2 * PAGE_SIZE)",
72 .low_addr_required = 1,
73 .keep_mapped = 1,
74 },
75 {
76 .addr = (void *)((1UL << 47) - PAGE_SIZE / 2),
77 .size = 2 * PAGE_SIZE,
78 .flags = MAP_PRIVATE | MAP_ANONYMOUS,
79 .msg = "mmap((1UL << 47), 2 * PAGE_SIZE / 2)",
80 .low_addr_required = 1,
81 .keep_mapped = 1,
82 },
83 {
84 .addr = (void *)((1UL << 47) - PAGE_SIZE),
85 .size = 2 * PAGE_SIZE,
86 .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
87 .msg = "mmap((1UL << 47) - PAGE_SIZE, 2 * PAGE_SIZE, MAP_FIXED)",
88 },
89 {
90 .addr = NULL,
91 .size = 2UL << 20,
92 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
93 .msg = "mmap(NULL, MAP_HUGETLB)",
94 .low_addr_required = 1,
95 },
96 {
97 .addr = LOW_ADDR,
98 .size = 2UL << 20,
99 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
100 .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
101 .low_addr_required = 1,
102 },
103 {
104 .addr = HIGH_ADDR,
105 .size = 2UL << 20,
106 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
107 .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
108 .keep_mapped = 1,
109 },
110 {
111 .addr = HIGH_ADDR,
112 .size = 2UL << 20,
113 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
114 .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
115 .keep_mapped = 1,
116 },
117 {
118 .addr = HIGH_ADDR,
119 .size = 2UL << 20,
120 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
121 .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
122 },
123 {
124 .addr = (void*) -1,
125 .size = 2UL << 20,
126 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
127 .msg = "mmap(-1, MAP_HUGETLB)",
128 .keep_mapped = 1,
129 },
130 {
131 .addr = (void*) -1,
132 .size = 2UL << 20,
133 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
134 .msg = "mmap(-1, MAP_HUGETLB) again",
135 },
136 {
137 .addr = (void *)((1UL << 47) - PAGE_SIZE),
138 .size = 4UL << 20,
139 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
140 .msg = "mmap((1UL << 47), 4UL << 20, MAP_HUGETLB)",
141 .low_addr_required = 1,
142 .keep_mapped = 1,
143 },
144 {
145 .addr = (void *)((1UL << 47) - (2UL << 20)),
146 .size = 4UL << 20,
147 .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
148 .msg = "mmap((1UL << 47) - (2UL << 20), 4UL << 20, MAP_FIXED | MAP_HUGETLB)",
149 },
150};
151
152int main(int argc, char **argv)
153{
154 int i;
155 void *p;
156
157 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
158 struct testcase *t = testcases + i;
159
160 p = mmap(t->addr, t->size, PROT_NONE, t->flags, -1, 0);
161
162 printf("%s: %p - ", t->msg, p);
163
164 if (p == MAP_FAILED) {
165 printf("FAILED\n");
166 continue;
167 }
168
169 if (t->low_addr_required && p >= (void *)(1UL << 47))
170 printf("FAILED\n");
171 else
172 printf("OK\n");
173 if (!t->keep_mapped)
174 munmap(p, t->size);
175 }
176 return 0;
177}
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 7b1adeee4b0f..939a337128db 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -11,7 +11,7 @@ TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_sysc
11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ 11TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
12 test_FCMOV test_FCOMI test_FISTTP \ 12 test_FCMOV test_FCOMI test_FISTTP \
13 vdso_restorer 13 vdso_restorer
14TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 14TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
15 15
16TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) 16TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
17TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY) 17TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index d075ea0e5ca1..361466a2eaef 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -95,6 +95,27 @@ asm (
95 "int3\n\t" 95 "int3\n\t"
96 "vmcode_int80:\n\t" 96 "vmcode_int80:\n\t"
97 "int $0x80\n\t" 97 "int $0x80\n\t"
98 "vmcode_umip:\n\t"
99 /* addressing via displacements */
100 "smsw (2052)\n\t"
101 "sidt (2054)\n\t"
102 "sgdt (2060)\n\t"
103 /* addressing via registers */
104 "mov $2066, %bx\n\t"
105 "smsw (%bx)\n\t"
106 "mov $2068, %bx\n\t"
107 "sidt (%bx)\n\t"
108 "mov $2074, %bx\n\t"
109 "sgdt (%bx)\n\t"
110 /* register operands, only for smsw */
111 "smsw %ax\n\t"
112 "mov %ax, (2080)\n\t"
113 "int3\n\t"
114 "vmcode_umip_str:\n\t"
115 "str %eax\n\t"
116 "vmcode_umip_sldt:\n\t"
117 "sldt %eax\n\t"
118 "int3\n\t"
98 ".size vmcode, . - vmcode\n\t" 119 ".size vmcode, . - vmcode\n\t"
99 "end_vmcode:\n\t" 120 "end_vmcode:\n\t"
100 ".code32\n\t" 121 ".code32\n\t"
@@ -103,7 +124,8 @@ asm (
103 124
104extern unsigned char vmcode[], end_vmcode[]; 125extern unsigned char vmcode[], end_vmcode[];
105extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[], 126extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
106 vmcode_sti[], vmcode_int3[], vmcode_int80[]; 127 vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_umip[],
128 vmcode_umip_str[], vmcode_umip_sldt[];
107 129
108/* Returns false if the test was skipped. */ 130/* Returns false if the test was skipped. */
109static bool do_test(struct vm86plus_struct *v86, unsigned long eip, 131static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
@@ -160,6 +182,68 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
160 return true; 182 return true;
161} 183}
162 184
185void do_umip_tests(struct vm86plus_struct *vm86, unsigned char *test_mem)
186{
187 struct table_desc {
188 unsigned short limit;
189 unsigned long base;
190 } __attribute__((packed));
191
192 /* Initialize variables with arbitrary values */
193 struct table_desc gdt1 = { .base = 0x3c3c3c3c, .limit = 0x9999 };
194 struct table_desc gdt2 = { .base = 0x1a1a1a1a, .limit = 0xaeae };
195 struct table_desc idt1 = { .base = 0x7b7b7b7b, .limit = 0xf1f1 };
196 struct table_desc idt2 = { .base = 0x89898989, .limit = 0x1313 };
197 unsigned short msw1 = 0x1414, msw2 = 0x2525, msw3 = 3737;
198
199 /* UMIP -- exit with INT3 unless kernel emulation did not trap #GP */
200 do_test(vm86, vmcode_umip - vmcode, VM86_TRAP, 3, "UMIP tests");
201
202 /* Results from displacement-only addressing */
203 msw1 = *(unsigned short *)(test_mem + 2052);
204 memcpy(&idt1, test_mem + 2054, sizeof(idt1));
205 memcpy(&gdt1, test_mem + 2060, sizeof(gdt1));
206
207 /* Results from register-indirect addressing */
208 msw2 = *(unsigned short *)(test_mem + 2066);
209 memcpy(&idt2, test_mem + 2068, sizeof(idt2));
210 memcpy(&gdt2, test_mem + 2074, sizeof(gdt2));
211
212 /* Results when using register operands */
213 msw3 = *(unsigned short *)(test_mem + 2080);
214
215 printf("[INFO]\tResult from SMSW:[0x%04x]\n", msw1);
216 printf("[INFO]\tResult from SIDT: limit[0x%04x]base[0x%08lx]\n",
217 idt1.limit, idt1.base);
218 printf("[INFO]\tResult from SGDT: limit[0x%04x]base[0x%08lx]\n",
219 gdt1.limit, gdt1.base);
220
221 if (msw1 != msw2 || msw1 != msw3)
222 printf("[FAIL]\tAll the results of SMSW should be the same.\n");
223 else
224 printf("[PASS]\tAll the results from SMSW are identical.\n");
225
226 if (memcmp(&gdt1, &gdt2, sizeof(gdt1)))
227 printf("[FAIL]\tAll the results of SGDT should be the same.\n");
228 else
229 printf("[PASS]\tAll the results from SGDT are identical.\n");
230
231 if (memcmp(&idt1, &idt2, sizeof(idt1)))
232 printf("[FAIL]\tAll the results of SIDT should be the same.\n");
233 else
234 printf("[PASS]\tAll the results from SIDT are identical.\n");
235
236 sethandler(SIGILL, sighandler, 0);
237 do_test(vm86, vmcode_umip_str - vmcode, VM86_SIGNAL, 0,
238 "STR instruction");
239 clearhandler(SIGILL);
240
241 sethandler(SIGILL, sighandler, 0);
242 do_test(vm86, vmcode_umip_sldt - vmcode, VM86_SIGNAL, 0,
243 "SLDT instruction");
244 clearhandler(SIGILL);
245}
246
163int main(void) 247int main(void)
164{ 248{
165 struct vm86plus_struct v86; 249 struct vm86plus_struct v86;
@@ -218,6 +302,9 @@ int main(void)
218 v86.regs.eax = (unsigned int)-1; 302 v86.regs.eax = (unsigned int)-1;
219 do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80"); 303 do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80");
220 304
305 /* UMIP -- should exit with INTx 0x80 unless UMIP was not disabled */
306 do_umip_tests(&v86, addr);
307
221 /* Execute a null pointer */ 308 /* Execute a null pointer */
222 v86.regs.cs = 0; 309 v86.regs.cs = 0;
223 v86.regs.ss = 0; 310 v86.regs.ss = 0;
diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h
index 3f0093911f03..d1b61ab870f8 100644
--- a/tools/testing/selftests/x86/mpx-hw.h
+++ b/tools/testing/selftests/x86/mpx-hw.h
@@ -52,14 +52,14 @@
52struct mpx_bd_entry { 52struct mpx_bd_entry {
53 union { 53 union {
54 char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES]; 54 char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
55 void *contents[1]; 55 void *contents[0];
56 }; 56 };
57} __attribute__((packed)); 57} __attribute__((packed));
58 58
59struct mpx_bt_entry { 59struct mpx_bt_entry {
60 union { 60 union {
61 char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES]; 61 char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
62 unsigned long contents[1]; 62 unsigned long contents[0];
63 }; 63 };
64} __attribute__((packed)); 64} __attribute__((packed));
65 65
diff --git a/tools/testing/selftests/x86/pkey-helpers.h b/tools/testing/selftests/x86/pkey-helpers.h
index 3818f25391c2..b3cb7670e026 100644
--- a/tools/testing/selftests/x86/pkey-helpers.h
+++ b/tools/testing/selftests/x86/pkey-helpers.h
@@ -30,6 +30,7 @@ static inline void sigsafe_printf(const char *format, ...)
30 if (!dprint_in_signal) { 30 if (!dprint_in_signal) {
31 vprintf(format, ap); 31 vprintf(format, ap);
32 } else { 32 } else {
33 int ret;
33 int len = vsnprintf(dprint_in_signal_buffer, 34 int len = vsnprintf(dprint_in_signal_buffer,
34 DPRINT_IN_SIGNAL_BUF_SIZE, 35 DPRINT_IN_SIGNAL_BUF_SIZE,
35 format, ap); 36 format, ap);
@@ -39,7 +40,9 @@ static inline void sigsafe_printf(const char *format, ...)
39 */ 40 */
40 if (len > DPRINT_IN_SIGNAL_BUF_SIZE) 41 if (len > DPRINT_IN_SIGNAL_BUF_SIZE)
41 len = DPRINT_IN_SIGNAL_BUF_SIZE; 42 len = DPRINT_IN_SIGNAL_BUF_SIZE;
42 write(1, dprint_in_signal_buffer, len); 43 ret = write(1, dprint_in_signal_buffer, len);
44 if (ret < 0)
45 abort();
43 } 46 }
44 va_end(ap); 47 va_end(ap);
45} 48}
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 7a1cc0e56d2d..bc1b0735bb50 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -250,7 +250,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
250 unsigned long ip; 250 unsigned long ip;
251 char *fpregs; 251 char *fpregs;
252 u32 *pkru_ptr; 252 u32 *pkru_ptr;
253 u64 si_pkey; 253 u64 siginfo_pkey;
254 u32 *si_pkey_ptr; 254 u32 *si_pkey_ptr;
255 int pkru_offset; 255 int pkru_offset;
256 fpregset_t fpregset; 256 fpregset_t fpregset;
@@ -292,9 +292,9 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
292 si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset); 292 si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
293 dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr); 293 dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
294 dump_mem(si_pkey_ptr - 8, 24); 294 dump_mem(si_pkey_ptr - 8, 24);
295 si_pkey = *si_pkey_ptr; 295 siginfo_pkey = *si_pkey_ptr;
296 pkey_assert(si_pkey < NR_PKEYS); 296 pkey_assert(siginfo_pkey < NR_PKEYS);
297 last_si_pkey = si_pkey; 297 last_si_pkey = siginfo_pkey;
298 298
299 if ((si->si_code == SEGV_MAPERR) || 299 if ((si->si_code == SEGV_MAPERR) ||
300 (si->si_code == SEGV_ACCERR) || 300 (si->si_code == SEGV_ACCERR) ||
@@ -306,7 +306,7 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
306 dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr); 306 dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
307 /* need __rdpkru() version so we do not do shadow_pkru checking */ 307 /* need __rdpkru() version so we do not do shadow_pkru checking */
308 dprintf1("signal pkru from pkru: %08x\n", __rdpkru()); 308 dprintf1("signal pkru from pkru: %08x\n", __rdpkru());
309 dprintf1("si_pkey from siginfo: %jx\n", si_pkey); 309 dprintf1("pkey from siginfo: %jx\n", siginfo_pkey);
310 *(u64 *)pkru_ptr = 0x00000000; 310 *(u64 *)pkru_ptr = 0x00000000;
311 dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n"); 311 dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
312 pkru_faults++; 312 pkru_faults++;
diff --git a/tools/testing/vsock/.gitignore b/tools/testing/vsock/.gitignore
new file mode 100644
index 000000000000..dc5f11faf530
--- /dev/null
+++ b/tools/testing/vsock/.gitignore
@@ -0,0 +1,2 @@
1*.d
2vsock_diag_test
diff --git a/tools/testing/vsock/Makefile b/tools/testing/vsock/Makefile
new file mode 100644
index 000000000000..66ba0924194d
--- /dev/null
+++ b/tools/testing/vsock/Makefile
@@ -0,0 +1,9 @@
1all: test
2test: vsock_diag_test
3vsock_diag_test: vsock_diag_test.o timeout.o control.o
4
5CFLAGS += -g -O2 -Werror -Wall -I. -I../../include/uapi -I../../include -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -D_GNU_SOURCE
6.PHONY: all test clean
7clean:
8 ${RM} *.o *.d vsock_diag_test
9-include *.d
diff --git a/tools/testing/vsock/README b/tools/testing/vsock/README
new file mode 100644
index 000000000000..2cc6d7302db6
--- /dev/null
+++ b/tools/testing/vsock/README
@@ -0,0 +1,36 @@
1AF_VSOCK test suite
2-------------------
3These tests exercise net/vmw_vsock/ host<->guest sockets for VMware, KVM, and
4Hyper-V.
5
6The following tests are available:
7
8 * vsock_diag_test - vsock_diag.ko module for listing open sockets
9
10The following prerequisite steps are not automated and must be performed prior
11to running tests:
12
131. Build the kernel and these tests.
142. Install the kernel and tests on the host.
153. Install the kernel and tests inside the guest.
164. Boot the guest and ensure that the AF_VSOCK transport is enabled.
17
18Invoke test binaries in both directions as follows:
19
20 # host=server, guest=client
21 (host)# $TEST_BINARY --mode=server \
22 --control-port=1234 \
23 --peer-cid=3
24 (guest)# $TEST_BINARY --mode=client \
25 --control-host=$HOST_IP \
26 --control-port=1234 \
27 --peer-cid=2
28
29 # host=client, guest=server
30 (guest)# $TEST_BINARY --mode=server \
31 --control-port=1234 \
32 --peer-cid=2
33 (host)# $TEST_BINARY --mode=client \
34 --control-port=$GUEST_IP \
35 --control-port=1234 \
36 --peer-cid=3
diff --git a/tools/testing/vsock/control.c b/tools/testing/vsock/control.c
new file mode 100644
index 000000000000..90fd47f0e422
--- /dev/null
+++ b/tools/testing/vsock/control.c
@@ -0,0 +1,219 @@
1/* Control socket for client/server test execution
2 *
3 * Copyright (C) 2017 Red Hat, Inc.
4 *
5 * Author: Stefan Hajnoczi <stefanha@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13/* The client and server may need to coordinate to avoid race conditions like
14 * the client attempting to connect to a socket that the server is not
15 * listening on yet. The control socket offers a communications channel for
16 * such coordination tasks.
17 *
18 * If the client calls control_expectln("LISTENING"), then it will block until
19 * the server calls control_writeln("LISTENING"). This provides a simple
20 * mechanism for coordinating between the client and the server.
21 */
22
23#include <errno.h>
24#include <netdb.h>
25#include <stdio.h>
26#include <stdlib.h>
27#include <string.h>
28#include <unistd.h>
29#include <sys/types.h>
30#include <sys/socket.h>
31
32#include "timeout.h"
33#include "control.h"
34
35static int control_fd = -1;
36
37/* Open the control socket, either in server or client mode */
38void control_init(const char *control_host,
39 const char *control_port,
40 bool server)
41{
42 struct addrinfo hints = {
43 .ai_socktype = SOCK_STREAM,
44 };
45 struct addrinfo *result = NULL;
46 struct addrinfo *ai;
47 int ret;
48
49 ret = getaddrinfo(control_host, control_port, &hints, &result);
50 if (ret != 0) {
51 fprintf(stderr, "%s\n", gai_strerror(ret));
52 exit(EXIT_FAILURE);
53 }
54
55 for (ai = result; ai; ai = ai->ai_next) {
56 int fd;
57 int val = 1;
58
59 fd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
60 if (fd < 0)
61 continue;
62
63 if (!server) {
64 if (connect(fd, ai->ai_addr, ai->ai_addrlen) < 0)
65 goto next;
66 control_fd = fd;
67 printf("Control socket connected to %s:%s.\n",
68 control_host, control_port);
69 break;
70 }
71
72 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
73 &val, sizeof(val)) < 0) {
74 perror("setsockopt");
75 exit(EXIT_FAILURE);
76 }
77
78 if (bind(fd, ai->ai_addr, ai->ai_addrlen) < 0)
79 goto next;
80 if (listen(fd, 1) < 0)
81 goto next;
82
83 printf("Control socket listening on %s:%s\n",
84 control_host, control_port);
85 fflush(stdout);
86
87 control_fd = accept(fd, NULL, 0);
88 close(fd);
89
90 if (control_fd < 0) {
91 perror("accept");
92 exit(EXIT_FAILURE);
93 }
94 printf("Control socket connection accepted...\n");
95 break;
96
97next:
98 close(fd);
99 }
100
101 if (control_fd < 0) {
102 fprintf(stderr, "Control socket initialization failed. Invalid address %s:%s?\n",
103 control_host, control_port);
104 exit(EXIT_FAILURE);
105 }
106
107 freeaddrinfo(result);
108}
109
110/* Free resources */
111void control_cleanup(void)
112{
113 close(control_fd);
114 control_fd = -1;
115}
116
117/* Write a line to the control socket */
118void control_writeln(const char *str)
119{
120 ssize_t len = strlen(str);
121 ssize_t ret;
122
123 timeout_begin(TIMEOUT);
124
125 do {
126 ret = send(control_fd, str, len, MSG_MORE);
127 timeout_check("send");
128 } while (ret < 0 && errno == EINTR);
129
130 if (ret != len) {
131 perror("send");
132 exit(EXIT_FAILURE);
133 }
134
135 do {
136 ret = send(control_fd, "\n", 1, 0);
137 timeout_check("send");
138 } while (ret < 0 && errno == EINTR);
139
140 if (ret != 1) {
141 perror("send");
142 exit(EXIT_FAILURE);
143 }
144
145 timeout_end();
146}
147
148/* Return the next line from the control socket (without the trailing newline).
149 *
150 * The program terminates if a timeout occurs.
151 *
152 * The caller must free() the returned string.
153 */
154char *control_readln(void)
155{
156 char *buf = NULL;
157 size_t idx = 0;
158 size_t buflen = 0;
159
160 timeout_begin(TIMEOUT);
161
162 for (;;) {
163 ssize_t ret;
164
165 if (idx >= buflen) {
166 char *new_buf;
167
168 new_buf = realloc(buf, buflen + 80);
169 if (!new_buf) {
170 perror("realloc");
171 exit(EXIT_FAILURE);
172 }
173
174 buf = new_buf;
175 buflen += 80;
176 }
177
178 do {
179 ret = recv(control_fd, &buf[idx], 1, 0);
180 timeout_check("recv");
181 } while (ret < 0 && errno == EINTR);
182
183 if (ret == 0) {
184 fprintf(stderr, "unexpected EOF on control socket\n");
185 exit(EXIT_FAILURE);
186 }
187
188 if (ret != 1) {
189 perror("recv");
190 exit(EXIT_FAILURE);
191 }
192
193 if (buf[idx] == '\n') {
194 buf[idx] = '\0';
195 break;
196 }
197
198 idx++;
199 }
200
201 timeout_end();
202
203 return buf;
204}
205
206/* Wait until a given line is received or a timeout occurs */
207void control_expectln(const char *str)
208{
209 char *line;
210
211 line = control_readln();
212 if (strcmp(str, line) != 0) {
213 fprintf(stderr, "expected \"%s\" on control socket, got \"%s\"\n",
214 str, line);
215 exit(EXIT_FAILURE);
216 }
217
218 free(line);
219}
diff --git a/tools/testing/vsock/control.h b/tools/testing/vsock/control.h
new file mode 100644
index 000000000000..54a07efd267c
--- /dev/null
+++ b/tools/testing/vsock/control.h
@@ -0,0 +1,13 @@
1#ifndef CONTROL_H
2#define CONTROL_H
3
4#include <stdbool.h>
5
6void control_init(const char *control_host, const char *control_port,
7 bool server);
8void control_cleanup(void);
9void control_writeln(const char *str);
10char *control_readln(void);
11void control_expectln(const char *str);
12
13#endif /* CONTROL_H */
diff --git a/tools/testing/vsock/timeout.c b/tools/testing/vsock/timeout.c
new file mode 100644
index 000000000000..c49b3003b2db
--- /dev/null
+++ b/tools/testing/vsock/timeout.c
@@ -0,0 +1,64 @@
1/* Timeout API for single-threaded programs that use blocking
2 * syscalls (read/write/send/recv/connect/accept).
3 *
4 * Copyright (C) 2017 Red Hat, Inc.
5 *
6 * Author: Stefan Hajnoczi <stefanha@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
11 * of the License.
12 */
13
14/* Use the following pattern:
15 *
16 * timeout_begin(TIMEOUT);
17 * do {
18 * ret = accept(...);
19 * timeout_check("accept");
20 * } while (ret < 0 && ret == EINTR);
21 * timeout_end();
22 */
23
24#include <stdlib.h>
25#include <stdbool.h>
26#include <unistd.h>
27#include <stdio.h>
28#include "timeout.h"
29
30static volatile bool timeout;
31
32/* SIGALRM handler function. Do not use sleep(2), alarm(2), or
33 * setitimer(2) while using this API - they may interfere with each
34 * other.
35 */
36void sigalrm(int signo)
37{
38 timeout = true;
39}
40
41/* Start a timeout. Call timeout_check() to verify that the timeout hasn't
42 * expired. timeout_end() must be called to stop the timeout. Timeouts cannot
43 * be nested.
44 */
45void timeout_begin(unsigned int seconds)
46{
47 alarm(seconds);
48}
49
50/* Exit with an error message if the timeout has expired */
51void timeout_check(const char *operation)
52{
53 if (timeout) {
54 fprintf(stderr, "%s timed out\n", operation);
55 exit(EXIT_FAILURE);
56 }
57}
58
59/* Stop a timeout */
60void timeout_end(void)
61{
62 alarm(0);
63 timeout = false;
64}
diff --git a/tools/testing/vsock/timeout.h b/tools/testing/vsock/timeout.h
new file mode 100644
index 000000000000..77db9ce9860a
--- /dev/null
+++ b/tools/testing/vsock/timeout.h
@@ -0,0 +1,14 @@
1#ifndef TIMEOUT_H
2#define TIMEOUT_H
3
4enum {
5 /* Default timeout */
6 TIMEOUT = 10 /* seconds */
7};
8
9void sigalrm(int signo);
10void timeout_begin(unsigned int seconds);
11void timeout_check(const char *operation);
12void timeout_end(void);
13
14#endif /* TIMEOUT_H */
diff --git a/tools/testing/vsock/vsock_diag_test.c b/tools/testing/vsock/vsock_diag_test.c
new file mode 100644
index 000000000000..e896a4af52f4
--- /dev/null
+++ b/tools/testing/vsock/vsock_diag_test.c
@@ -0,0 +1,681 @@
1/*
2 * vsock_diag_test - vsock_diag.ko test suite
3 *
4 * Copyright (C) 2017 Red Hat, Inc.
5 *
6 * Author: Stefan Hajnoczi <stefanha@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
11 * of the License.
12 */
13
14#include <getopt.h>
15#include <stdio.h>
16#include <stdbool.h>
17#include <stdlib.h>
18#include <string.h>
19#include <errno.h>
20#include <unistd.h>
21#include <signal.h>
22#include <sys/socket.h>
23#include <sys/stat.h>
24#include <sys/types.h>
25#include <linux/list.h>
26#include <linux/net.h>
27#include <linux/netlink.h>
28#include <linux/sock_diag.h>
29#include <netinet/tcp.h>
30
31#include "../../../include/uapi/linux/vm_sockets.h"
32#include "../../../include/uapi/linux/vm_sockets_diag.h"
33
34#include "timeout.h"
35#include "control.h"
36
37enum test_mode {
38 TEST_MODE_UNSET,
39 TEST_MODE_CLIENT,
40 TEST_MODE_SERVER
41};
42
43/* Per-socket status */
44struct vsock_stat {
45 struct list_head list;
46 struct vsock_diag_msg msg;
47};
48
49static const char *sock_type_str(int type)
50{
51 switch (type) {
52 case SOCK_DGRAM:
53 return "DGRAM";
54 case SOCK_STREAM:
55 return "STREAM";
56 default:
57 return "INVALID TYPE";
58 }
59}
60
61static const char *sock_state_str(int state)
62{
63 switch (state) {
64 case TCP_CLOSE:
65 return "UNCONNECTED";
66 case TCP_SYN_SENT:
67 return "CONNECTING";
68 case TCP_ESTABLISHED:
69 return "CONNECTED";
70 case TCP_CLOSING:
71 return "DISCONNECTING";
72 case TCP_LISTEN:
73 return "LISTEN";
74 default:
75 return "INVALID STATE";
76 }
77}
78
79static const char *sock_shutdown_str(int shutdown)
80{
81 switch (shutdown) {
82 case 1:
83 return "RCV_SHUTDOWN";
84 case 2:
85 return "SEND_SHUTDOWN";
86 case 3:
87 return "RCV_SHUTDOWN | SEND_SHUTDOWN";
88 default:
89 return "0";
90 }
91}
92
93static void print_vsock_addr(FILE *fp, unsigned int cid, unsigned int port)
94{
95 if (cid == VMADDR_CID_ANY)
96 fprintf(fp, "*:");
97 else
98 fprintf(fp, "%u:", cid);
99
100 if (port == VMADDR_PORT_ANY)
101 fprintf(fp, "*");
102 else
103 fprintf(fp, "%u", port);
104}
105
106static void print_vsock_stat(FILE *fp, struct vsock_stat *st)
107{
108 print_vsock_addr(fp, st->msg.vdiag_src_cid, st->msg.vdiag_src_port);
109 fprintf(fp, " ");
110 print_vsock_addr(fp, st->msg.vdiag_dst_cid, st->msg.vdiag_dst_port);
111 fprintf(fp, " %s %s %s %u\n",
112 sock_type_str(st->msg.vdiag_type),
113 sock_state_str(st->msg.vdiag_state),
114 sock_shutdown_str(st->msg.vdiag_shutdown),
115 st->msg.vdiag_ino);
116}
117
118static void print_vsock_stats(FILE *fp, struct list_head *head)
119{
120 struct vsock_stat *st;
121
122 list_for_each_entry(st, head, list)
123 print_vsock_stat(fp, st);
124}
125
126static struct vsock_stat *find_vsock_stat(struct list_head *head, int fd)
127{
128 struct vsock_stat *st;
129 struct stat stat;
130
131 if (fstat(fd, &stat) < 0) {
132 perror("fstat");
133 exit(EXIT_FAILURE);
134 }
135
136 list_for_each_entry(st, head, list)
137 if (st->msg.vdiag_ino == stat.st_ino)
138 return st;
139
140 fprintf(stderr, "cannot find fd %d\n", fd);
141 exit(EXIT_FAILURE);
142}
143
144static void check_no_sockets(struct list_head *head)
145{
146 if (!list_empty(head)) {
147 fprintf(stderr, "expected no sockets\n");
148 print_vsock_stats(stderr, head);
149 exit(1);
150 }
151}
152
153static void check_num_sockets(struct list_head *head, int expected)
154{
155 struct list_head *node;
156 int n = 0;
157
158 list_for_each(node, head)
159 n++;
160
161 if (n != expected) {
162 fprintf(stderr, "expected %d sockets, found %d\n",
163 expected, n);
164 print_vsock_stats(stderr, head);
165 exit(EXIT_FAILURE);
166 }
167}
168
169static void check_socket_state(struct vsock_stat *st, __u8 state)
170{
171 if (st->msg.vdiag_state != state) {
172 fprintf(stderr, "expected socket state %#x, got %#x\n",
173 state, st->msg.vdiag_state);
174 exit(EXIT_FAILURE);
175 }
176}
177
178static void send_req(int fd)
179{
180 struct sockaddr_nl nladdr = {
181 .nl_family = AF_NETLINK,
182 };
183 struct {
184 struct nlmsghdr nlh;
185 struct vsock_diag_req vreq;
186 } req = {
187 .nlh = {
188 .nlmsg_len = sizeof(req),
189 .nlmsg_type = SOCK_DIAG_BY_FAMILY,
190 .nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP,
191 },
192 .vreq = {
193 .sdiag_family = AF_VSOCK,
194 .vdiag_states = ~(__u32)0,
195 },
196 };
197 struct iovec iov = {
198 .iov_base = &req,
199 .iov_len = sizeof(req),
200 };
201 struct msghdr msg = {
202 .msg_name = &nladdr,
203 .msg_namelen = sizeof(nladdr),
204 .msg_iov = &iov,
205 .msg_iovlen = 1,
206 };
207
208 for (;;) {
209 if (sendmsg(fd, &msg, 0) < 0) {
210 if (errno == EINTR)
211 continue;
212
213 perror("sendmsg");
214 exit(EXIT_FAILURE);
215 }
216
217 return;
218 }
219}
220
221static ssize_t recv_resp(int fd, void *buf, size_t len)
222{
223 struct sockaddr_nl nladdr = {
224 .nl_family = AF_NETLINK,
225 };
226 struct iovec iov = {
227 .iov_base = buf,
228 .iov_len = len,
229 };
230 struct msghdr msg = {
231 .msg_name = &nladdr,
232 .msg_namelen = sizeof(nladdr),
233 .msg_iov = &iov,
234 .msg_iovlen = 1,
235 };
236 ssize_t ret;
237
238 do {
239 ret = recvmsg(fd, &msg, 0);
240 } while (ret < 0 && errno == EINTR);
241
242 if (ret < 0) {
243 perror("recvmsg");
244 exit(EXIT_FAILURE);
245 }
246
247 return ret;
248}
249
250static void add_vsock_stat(struct list_head *sockets,
251 const struct vsock_diag_msg *resp)
252{
253 struct vsock_stat *st;
254
255 st = malloc(sizeof(*st));
256 if (!st) {
257 perror("malloc");
258 exit(EXIT_FAILURE);
259 }
260
261 st->msg = *resp;
262 list_add_tail(&st->list, sockets);
263}
264
265/*
266 * Read vsock stats into a list.
267 */
268static void read_vsock_stat(struct list_head *sockets)
269{
270 long buf[8192 / sizeof(long)];
271 int fd;
272
273 fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_SOCK_DIAG);
274 if (fd < 0) {
275 perror("socket");
276 exit(EXIT_FAILURE);
277 }
278
279 send_req(fd);
280
281 for (;;) {
282 const struct nlmsghdr *h;
283 ssize_t ret;
284
285 ret = recv_resp(fd, buf, sizeof(buf));
286 if (ret == 0)
287 goto done;
288 if (ret < sizeof(*h)) {
289 fprintf(stderr, "short read of %zd bytes\n", ret);
290 exit(EXIT_FAILURE);
291 }
292
293 h = (struct nlmsghdr *)buf;
294
295 while (NLMSG_OK(h, ret)) {
296 if (h->nlmsg_type == NLMSG_DONE)
297 goto done;
298
299 if (h->nlmsg_type == NLMSG_ERROR) {
300 const struct nlmsgerr *err = NLMSG_DATA(h);
301
302 if (h->nlmsg_len < NLMSG_LENGTH(sizeof(*err)))
303 fprintf(stderr, "NLMSG_ERROR\n");
304 else {
305 errno = -err->error;
306 perror("NLMSG_ERROR");
307 }
308
309 exit(EXIT_FAILURE);
310 }
311
312 if (h->nlmsg_type != SOCK_DIAG_BY_FAMILY) {
313 fprintf(stderr, "unexpected nlmsg_type %#x\n",
314 h->nlmsg_type);
315 exit(EXIT_FAILURE);
316 }
317 if (h->nlmsg_len <
318 NLMSG_LENGTH(sizeof(struct vsock_diag_msg))) {
319 fprintf(stderr, "short vsock_diag_msg\n");
320 exit(EXIT_FAILURE);
321 }
322
323 add_vsock_stat(sockets, NLMSG_DATA(h));
324
325 h = NLMSG_NEXT(h, ret);
326 }
327 }
328
329done:
330 close(fd);
331}
332
333static void free_sock_stat(struct list_head *sockets)
334{
335 struct vsock_stat *st;
336 struct vsock_stat *next;
337
338 list_for_each_entry_safe(st, next, sockets, list)
339 free(st);
340}
341
342static void test_no_sockets(unsigned int peer_cid)
343{
344 LIST_HEAD(sockets);
345
346 read_vsock_stat(&sockets);
347
348 check_no_sockets(&sockets);
349
350 free_sock_stat(&sockets);
351}
352
353static void test_listen_socket_server(unsigned int peer_cid)
354{
355 union {
356 struct sockaddr sa;
357 struct sockaddr_vm svm;
358 } addr = {
359 .svm = {
360 .svm_family = AF_VSOCK,
361 .svm_port = 1234,
362 .svm_cid = VMADDR_CID_ANY,
363 },
364 };
365 LIST_HEAD(sockets);
366 struct vsock_stat *st;
367 int fd;
368
369 fd = socket(AF_VSOCK, SOCK_STREAM, 0);
370
371 if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) {
372 perror("bind");
373 exit(EXIT_FAILURE);
374 }
375
376 if (listen(fd, 1) < 0) {
377 perror("listen");
378 exit(EXIT_FAILURE);
379 }
380
381 read_vsock_stat(&sockets);
382
383 check_num_sockets(&sockets, 1);
384 st = find_vsock_stat(&sockets, fd);
385 check_socket_state(st, TCP_LISTEN);
386
387 close(fd);
388 free_sock_stat(&sockets);
389}
390
391static void test_connect_client(unsigned int peer_cid)
392{
393 union {
394 struct sockaddr sa;
395 struct sockaddr_vm svm;
396 } addr = {
397 .svm = {
398 .svm_family = AF_VSOCK,
399 .svm_port = 1234,
400 .svm_cid = peer_cid,
401 },
402 };
403 int fd;
404 int ret;
405 LIST_HEAD(sockets);
406 struct vsock_stat *st;
407
408 control_expectln("LISTENING");
409
410 fd = socket(AF_VSOCK, SOCK_STREAM, 0);
411
412 timeout_begin(TIMEOUT);
413 do {
414 ret = connect(fd, &addr.sa, sizeof(addr.svm));
415 timeout_check("connect");
416 } while (ret < 0 && errno == EINTR);
417 timeout_end();
418
419 if (ret < 0) {
420 perror("connect");
421 exit(EXIT_FAILURE);
422 }
423
424 read_vsock_stat(&sockets);
425
426 check_num_sockets(&sockets, 1);
427 st = find_vsock_stat(&sockets, fd);
428 check_socket_state(st, TCP_ESTABLISHED);
429
430 control_expectln("DONE");
431 control_writeln("DONE");
432
433 close(fd);
434 free_sock_stat(&sockets);
435}
436
437static void test_connect_server(unsigned int peer_cid)
438{
439 union {
440 struct sockaddr sa;
441 struct sockaddr_vm svm;
442 } addr = {
443 .svm = {
444 .svm_family = AF_VSOCK,
445 .svm_port = 1234,
446 .svm_cid = VMADDR_CID_ANY,
447 },
448 };
449 union {
450 struct sockaddr sa;
451 struct sockaddr_vm svm;
452 } clientaddr;
453 socklen_t clientaddr_len = sizeof(clientaddr.svm);
454 LIST_HEAD(sockets);
455 struct vsock_stat *st;
456 int fd;
457 int client_fd;
458
459 fd = socket(AF_VSOCK, SOCK_STREAM, 0);
460
461 if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) {
462 perror("bind");
463 exit(EXIT_FAILURE);
464 }
465
466 if (listen(fd, 1) < 0) {
467 perror("listen");
468 exit(EXIT_FAILURE);
469 }
470
471 control_writeln("LISTENING");
472
473 timeout_begin(TIMEOUT);
474 do {
475 client_fd = accept(fd, &clientaddr.sa, &clientaddr_len);
476 timeout_check("accept");
477 } while (client_fd < 0 && errno == EINTR);
478 timeout_end();
479
480 if (client_fd < 0) {
481 perror("accept");
482 exit(EXIT_FAILURE);
483 }
484 if (clientaddr.sa.sa_family != AF_VSOCK) {
485 fprintf(stderr, "expected AF_VSOCK from accept(2), got %d\n",
486 clientaddr.sa.sa_family);
487 exit(EXIT_FAILURE);
488 }
489 if (clientaddr.svm.svm_cid != peer_cid) {
490 fprintf(stderr, "expected peer CID %u from accept(2), got %u\n",
491 peer_cid, clientaddr.svm.svm_cid);
492 exit(EXIT_FAILURE);
493 }
494
495 read_vsock_stat(&sockets);
496
497 check_num_sockets(&sockets, 2);
498 find_vsock_stat(&sockets, fd);
499 st = find_vsock_stat(&sockets, client_fd);
500 check_socket_state(st, TCP_ESTABLISHED);
501
502 control_writeln("DONE");
503 control_expectln("DONE");
504
505 close(client_fd);
506 close(fd);
507 free_sock_stat(&sockets);
508}
509
510static struct {
511 const char *name;
512 void (*run_client)(unsigned int peer_cid);
513 void (*run_server)(unsigned int peer_cid);
514} test_cases[] = {
515 {
516 .name = "No sockets",
517 .run_server = test_no_sockets,
518 },
519 {
520 .name = "Listen socket",
521 .run_server = test_listen_socket_server,
522 },
523 {
524 .name = "Connect",
525 .run_client = test_connect_client,
526 .run_server = test_connect_server,
527 },
528 {},
529};
530
531static void init_signals(void)
532{
533 struct sigaction act = {
534 .sa_handler = sigalrm,
535 };
536
537 sigaction(SIGALRM, &act, NULL);
538 signal(SIGPIPE, SIG_IGN);
539}
540
541static unsigned int parse_cid(const char *str)
542{
543 char *endptr = NULL;
544 unsigned long int n;
545
546 errno = 0;
547 n = strtoul(str, &endptr, 10);
548 if (errno || *endptr != '\0') {
549 fprintf(stderr, "malformed CID \"%s\"\n", str);
550 exit(EXIT_FAILURE);
551 }
552 return n;
553}
554
555static const char optstring[] = "";
556static const struct option longopts[] = {
557 {
558 .name = "control-host",
559 .has_arg = required_argument,
560 .val = 'H',
561 },
562 {
563 .name = "control-port",
564 .has_arg = required_argument,
565 .val = 'P',
566 },
567 {
568 .name = "mode",
569 .has_arg = required_argument,
570 .val = 'm',
571 },
572 {
573 .name = "peer-cid",
574 .has_arg = required_argument,
575 .val = 'p',
576 },
577 {
578 .name = "help",
579 .has_arg = no_argument,
580 .val = '?',
581 },
582 {},
583};
584
585static void usage(void)
586{
587 fprintf(stderr, "Usage: vsock_diag_test [--help] [--control-host=<host>] --control-port=<port> --mode=client|server --peer-cid=<cid>\n"
588 "\n"
589 " Server: vsock_diag_test --control-port=1234 --mode=server --peer-cid=3\n"
590 " Client: vsock_diag_test --control-host=192.168.0.1 --control-port=1234 --mode=client --peer-cid=2\n"
591 "\n"
592 "Run vsock_diag.ko tests. Must be launched in both\n"
593 "guest and host. One side must use --mode=client and\n"
594 "the other side must use --mode=server.\n"
595 "\n"
596 "A TCP control socket connection is used to coordinate tests\n"
597 "between the client and the server. The server requires a\n"
598 "listen address and the client requires an address to\n"
599 "connect to.\n"
600 "\n"
601 "The CID of the other side must be given with --peer-cid=<cid>.\n");
602 exit(EXIT_FAILURE);
603}
604
605int main(int argc, char **argv)
606{
607 const char *control_host = NULL;
608 const char *control_port = NULL;
609 int mode = TEST_MODE_UNSET;
610 unsigned int peer_cid = VMADDR_CID_ANY;
611 int i;
612
613 init_signals();
614
615 for (;;) {
616 int opt = getopt_long(argc, argv, optstring, longopts, NULL);
617
618 if (opt == -1)
619 break;
620
621 switch (opt) {
622 case 'H':
623 control_host = optarg;
624 break;
625 case 'm':
626 if (strcmp(optarg, "client") == 0)
627 mode = TEST_MODE_CLIENT;
628 else if (strcmp(optarg, "server") == 0)
629 mode = TEST_MODE_SERVER;
630 else {
631 fprintf(stderr, "--mode must be \"client\" or \"server\"\n");
632 return EXIT_FAILURE;
633 }
634 break;
635 case 'p':
636 peer_cid = parse_cid(optarg);
637 break;
638 case 'P':
639 control_port = optarg;
640 break;
641 case '?':
642 default:
643 usage();
644 }
645 }
646
647 if (!control_port)
648 usage();
649 if (mode == TEST_MODE_UNSET)
650 usage();
651 if (peer_cid == VMADDR_CID_ANY)
652 usage();
653
654 if (!control_host) {
655 if (mode != TEST_MODE_SERVER)
656 usage();
657 control_host = "0.0.0.0";
658 }
659
660 control_init(control_host, control_port, mode == TEST_MODE_SERVER);
661
662 for (i = 0; test_cases[i].name; i++) {
663 void (*run)(unsigned int peer_cid);
664
665 printf("%s...", test_cases[i].name);
666 fflush(stdout);
667
668 if (mode == TEST_MODE_CLIENT)
669 run = test_cases[i].run_client;
670 else
671 run = test_cases[i].run_server;
672
673 if (run)
674 run(peer_cid);
675
676 printf("ok\n");
677 }
678
679 control_cleanup();
680 return EXIT_SUCCESS;
681}
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 21169322baea..735a510230c3 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -1,10 +1,16 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# We need this for the "cc-option" macro.
3include ../../../scripts/Kbuild.include
4
2VERSION = 1.0 5VERSION = 1.0
3 6
4BINDIR=usr/bin 7BINDIR=usr/bin
5WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int 8WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
6CFLAGS+= -O1 ${WARNFLAGS} -fstack-protector 9CFLAGS+= -O1 ${WARNFLAGS}
7CC=$(CROSS_COMPILE)gcc 10# Add "-fstack-protector" only if toolchain supports it.
11CFLAGS+= $(call cc-option,-fstack-protector)
12CC?= $(CROSS_COMPILE)gcc
13PKG_CONFIG?= pkg-config
8 14
9CFLAGS+=-D VERSION=\"$(VERSION)\" 15CFLAGS+=-D VERSION=\"$(VERSION)\"
10LDFLAGS+= 16LDFLAGS+=
@@ -19,12 +25,12 @@ STATIC := --static
19endif 25endif
20 26
21TMON_LIBS=-lm -lpthread 27TMON_LIBS=-lm -lpthread
22TMON_LIBS += $(shell pkg-config --libs $(STATIC) panelw ncursesw 2> /dev/null || \ 28TMON_LIBS += $(shell $(PKG_CONFIG) --libs $(STATIC) panelw ncursesw 2> /dev/null || \
23 pkg-config --libs $(STATIC) panel ncurses 2> /dev/null || \ 29 $(PKG_CONFIG) --libs $(STATIC) panel ncurses 2> /dev/null || \
24 echo -lpanel -lncurses) 30 echo -lpanel -lncurses)
25 31
26CFLAGS += $(shell pkg-config --cflags $(STATIC) panelw ncursesw 2> /dev/null || \ 32CFLAGS += $(shell $(PKG_CONFIG) --cflags $(STATIC) panelw ncursesw 2> /dev/null || \
27 pkg-config --cflags $(STATIC) panel ncurses 2> /dev/null) 33 $(PKG_CONFIG) --cflags $(STATIC) panel ncurses 2> /dev/null)
28 34
29OBJS = tmon.o tui.o sysfs.o pid.o 35OBJS = tmon.o tui.o sysfs.o pid.o
30OBJS += 36OBJS +=
diff --git a/tools/usb/usbip/Makefile.am b/tools/usb/usbip/Makefile.am
index da3a430849a8..5961e9c18812 100644
--- a/tools/usb/usbip/Makefile.am
+++ b/tools/usb/usbip/Makefile.am
@@ -2,6 +2,7 @@
2SUBDIRS := libsrc src 2SUBDIRS := libsrc src
3includedir = @includedir@/usbip 3includedir = @includedir@/usbip
4include_HEADERS := $(addprefix libsrc/, \ 4include_HEADERS := $(addprefix libsrc/, \
5 usbip_common.h vhci_driver.h usbip_host_driver.h) 5 usbip_common.h vhci_driver.h usbip_host_driver.h \
6 list.h sysfs_utils.h usbip_host_common.h)
6 7
7dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8) 8dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8)
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index 5727dfb15a83..c9c81614a66a 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -50,14 +50,14 @@ static int parse_status(const char *value)
50 50
51 while (*c != '\0') { 51 while (*c != '\0') {
52 int port, status, speed, devid; 52 int port, status, speed, devid;
53 unsigned long socket; 53 int sockfd;
54 char lbusid[SYSFS_BUS_ID_SIZE]; 54 char lbusid[SYSFS_BUS_ID_SIZE];
55 struct usbip_imported_device *idev; 55 struct usbip_imported_device *idev;
56 char hub[3]; 56 char hub[3];
57 57
58 ret = sscanf(c, "%2s %d %d %d %x %lx %31s\n", 58 ret = sscanf(c, "%2s %d %d %d %x %u %31s\n",
59 hub, &port, &status, &speed, 59 hub, &port, &status, &speed,
60 &devid, &socket, lbusid); 60 &devid, &sockfd, lbusid);
61 61
62 if (ret < 5) { 62 if (ret < 5) {
63 dbg("sscanf failed: %d", ret); 63 dbg("sscanf failed: %d", ret);
@@ -66,7 +66,7 @@ static int parse_status(const char *value)
66 66
67 dbg("hub %s port %d status %d speed %d devid %x", 67 dbg("hub %s port %d status %d speed %d devid %x",
68 hub, port, status, speed, devid); 68 hub, port, status, speed, devid);
69 dbg("socket %lx lbusid %s", socket, lbusid); 69 dbg("sockfd %u lbusid %s", sockfd, lbusid);
70 70
71 /* if a device is connected, look at it */ 71 /* if a device is connected, look at it */
72 idev = &vhci_driver->idev[port]; 72 idev = &vhci_driver->idev[port];
@@ -106,7 +106,7 @@ static int parse_status(const char *value)
106 return 0; 106 return 0;
107} 107}
108 108
109#define MAX_STATUS_NAME 16 109#define MAX_STATUS_NAME 18
110 110
111static int refresh_imported_device_list(void) 111static int refresh_imported_device_list(void)
112{ 112{
@@ -329,9 +329,17 @@ err:
329int usbip_vhci_get_free_port(uint32_t speed) 329int usbip_vhci_get_free_port(uint32_t speed)
330{ 330{
331 for (int i = 0; i < vhci_driver->nports; i++) { 331 for (int i = 0; i < vhci_driver->nports; i++) {
332 if (speed == USB_SPEED_SUPER && 332
333 vhci_driver->idev[i].hub != HUB_SPEED_SUPER) 333 switch (speed) {
334 continue; 334 case USB_SPEED_SUPER:
335 if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
336 continue;
337 break;
338 default:
339 if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH)
340 continue;
341 break;
342 }
335 343
336 if (vhci_driver->idev[i].status == VDEV_ST_NULL) 344 if (vhci_driver->idev[i].status == VDEV_ST_NULL)
337 return vhci_driver->idev[i].port; 345 return vhci_driver->idev[i].port;
diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
index 2b3d6d235015..3d7b42e77299 100644
--- a/tools/usb/usbip/src/utils.c
+++ b/tools/usb/usbip/src/utils.c
@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
30 char command[SYSFS_BUS_ID_SIZE + 4]; 30 char command[SYSFS_BUS_ID_SIZE + 4];
31 char match_busid_attr_path[SYSFS_PATH_MAX]; 31 char match_busid_attr_path[SYSFS_PATH_MAX];
32 int rc; 32 int rc;
33 int cmd_size;
33 34
34 snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), 35 snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
35 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, 36 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
37 attr_name); 38 attr_name);
38 39
39 if (add) 40 if (add)
40 snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); 41 cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
42 busid);
41 else 43 else
42 snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); 44 cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
45 busid);
43 46
44 rc = write_sysfs_attribute(match_busid_attr_path, command, 47 rc = write_sysfs_attribute(match_busid_attr_path, command,
45 sizeof(command)); 48 cmd_size);
46 if (rc < 0) { 49 if (rc < 0) {
47 dbg("failed to write match_busid: %s", strerror(errno)); 50 dbg("failed to write match_busid: %s", strerror(errno));
48 return -1; 51 return -1;
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 90b0133004e1..5706e075adf2 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -110,11 +110,15 @@ static inline void busy_wait(void)
110 barrier(); 110 barrier();
111} 111}
112 112
113#if defined(__x86_64__) || defined(__i386__)
114#define smp_mb() asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
115#else
113/* 116/*
114 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized 117 * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
115 * with other __ATOMIC_SEQ_CST calls. 118 * with other __ATOMIC_SEQ_CST calls.
116 */ 119 */
117#define smp_mb() __sync_synchronize() 120#define smp_mb() __sync_synchronize()
121#endif
118 122
119/* 123/*
120 * This abuses the atomic builtins for thread fences, and 124 * This abuses the atomic builtins for thread fences, and
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index 38bb171aceba..e6e81305ef46 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -16,24 +16,41 @@
16#define unlikely(x) (__builtin_expect(!!(x), 0)) 16#define unlikely(x) (__builtin_expect(!!(x), 0))
17#define likely(x) (__builtin_expect(!!(x), 1)) 17#define likely(x) (__builtin_expect(!!(x), 1))
18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19#define SIZE_MAX (~(size_t)0)
20
19typedef pthread_spinlock_t spinlock_t; 21typedef pthread_spinlock_t spinlock_t;
20 22
21typedef int gfp_t; 23typedef int gfp_t;
22static void *kmalloc(unsigned size, gfp_t gfp) 24#define __GFP_ZERO 0x1
23{
24 return memalign(64, size);
25}
26 25
27static void *kzalloc(unsigned size, gfp_t gfp) 26static void *kmalloc(unsigned size, gfp_t gfp)
28{ 27{
29 void *p = memalign(64, size); 28 void *p = memalign(64, size);
30 if (!p) 29 if (!p)
31 return p; 30 return p;
32 memset(p, 0, size);
33 31
32 if (gfp & __GFP_ZERO)
33 memset(p, 0, size);
34 return p; 34 return p;
35} 35}
36 36
37static inline void *kzalloc(unsigned size, gfp_t flags)
38{
39 return kmalloc(size, flags | __GFP_ZERO);
40}
41
42static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
43{
44 if (size != 0 && n > SIZE_MAX / size)
45 return NULL;
46 return kmalloc(n * size, flags);
47}
48
49static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
50{
51 return kmalloc_array(n, size, flags | __GFP_ZERO);
52}
53
37static void kfree(void *p) 54static void kfree(void *p)
38{ 55{
39 if (p) 56 if (p)
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh
index 35b039864b77..0cf28aa6f21c 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/vm/slabinfo-gnuplot.sh
@@ -1,4 +1,4 @@
1#!/bin/sh 1#!/bin/bash
2 2
3# Sergey Senozhatsky, 2015 3# Sergey Senozhatsky, 2015
4# sergey.senozhatsky.work@gmail.com 4# sergey.senozhatsky.work@gmail.com
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index b0b7ef6d0de1..f82c2eaa859d 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -84,6 +84,7 @@ int output_lines = -1;
84int sort_loss; 84int sort_loss;
85int extended_totals; 85int extended_totals;
86int show_bytes; 86int show_bytes;
87int unreclaim_only;
87 88
88/* Debug options */ 89/* Debug options */
89int sanity; 90int sanity;
@@ -133,6 +134,7 @@ static void usage(void)
133 "-L|--Loss Sort by loss\n" 134 "-L|--Loss Sort by loss\n"
134 "-X|--Xtotals Show extended summary information\n" 135 "-X|--Xtotals Show extended summary information\n"
135 "-B|--Bytes Show size in bytes\n" 136 "-B|--Bytes Show size in bytes\n"
137 "-U|--Unreclaim Show unreclaimable slabs only\n"
136 "\nValid debug options (FZPUT may be combined)\n" 138 "\nValid debug options (FZPUT may be combined)\n"
137 "a / A Switch on all debug options (=FZUP)\n" 139 "a / A Switch on all debug options (=FZUP)\n"
138 "- Switch off all debug options\n" 140 "- Switch off all debug options\n"
@@ -569,6 +571,9 @@ static void slabcache(struct slabinfo *s)
569 if (strcmp(s->name, "*") == 0) 571 if (strcmp(s->name, "*") == 0)
570 return; 572 return;
571 573
574 if (unreclaim_only && s->reclaim_account)
575 return;
576
572 if (actual_slabs == 1) { 577 if (actual_slabs == 1) {
573 report(s); 578 report(s);
574 return; 579 return;
@@ -1347,6 +1352,7 @@ struct option opts[] = {
1347 { "Loss", no_argument, NULL, 'L'}, 1352 { "Loss", no_argument, NULL, 'L'},
1348 { "Xtotals", no_argument, NULL, 'X'}, 1353 { "Xtotals", no_argument, NULL, 'X'},
1349 { "Bytes", no_argument, NULL, 'B'}, 1354 { "Bytes", no_argument, NULL, 'B'},
1355 { "Unreclaim", no_argument, NULL, 'U'},
1350 { NULL, 0, NULL, 0 } 1356 { NULL, 0, NULL, 0 }
1351}; 1357};
1352 1358
@@ -1358,7 +1364,7 @@ int main(int argc, char *argv[])
1358 1364
1359 page_size = getpagesize(); 1365 page_size = getpagesize();
1360 1366
1361 while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTSN:LXB", 1367 while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTSN:LXBU",
1362 opts, NULL)) != -1) 1368 opts, NULL)) != -1)
1363 switch (c) { 1369 switch (c) {
1364 case '1': 1370 case '1':
@@ -1439,6 +1445,9 @@ int main(int argc, char *argv[])
1439 case 'B': 1445 case 'B':
1440 show_bytes = 1; 1446 show_bytes = 1;
1441 break; 1447 break;
1448 case 'U':
1449 unreclaim_only = 1;
1450 break;
1442 default: 1451 default:
1443 fatal("%s: Invalid option '%c'\n", argv[0], optopt); 1452 fatal("%s: Invalid option '%c'\n", argv[0], optopt);
1444 1453
diff --git a/tools/wmi/Makefile b/tools/wmi/Makefile
new file mode 100644
index 000000000000..e664f1167388
--- /dev/null
+++ b/tools/wmi/Makefile
@@ -0,0 +1,18 @@
1PREFIX ?= /usr
2SBINDIR ?= sbin
3INSTALL ?= install
4CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
5CC = $(CROSS_COMPILE)gcc
6
7TARGET = dell-smbios-example
8
9all: $(TARGET)
10
11%: %.c
12 $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $<
13
14clean:
15 $(RM) $(TARGET)
16
17install: dell-smbios-example
18 $(INSTALL) -D -m 755 $(TARGET) $(DESTDIR)$(PREFIX)/$(SBINDIR)/$(TARGET)
diff --git a/tools/wmi/dell-smbios-example.c b/tools/wmi/dell-smbios-example.c
new file mode 100644
index 000000000000..9d3bde081249
--- /dev/null
+++ b/tools/wmi/dell-smbios-example.c
@@ -0,0 +1,210 @@
1/*
2 * Sample application for SMBIOS communication over WMI interface
3 * Performs the following:
4 * - Simple cmd_class/cmd_select lookup for TPM information
5 * - Simple query of known tokens and their values
6 * - Simple activation of a token
7 *
8 * Copyright (C) 2017 Dell, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <errno.h>
16#include <fcntl.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <sys/ioctl.h>
20#include <unistd.h>
21
22/* if uapi header isn't installed, this might not yet exist */
23#ifndef __packed
24#define __packed __attribute__((packed))
25#endif
26#include <linux/wmi.h>
27
28/* It would be better to discover these using udev, but for a simple
29 * application they're hardcoded
30 */
31static const char *ioctl_devfs = "/dev/wmi/dell-smbios";
32static const char *token_sysfs =
33 "/sys/bus/platform/devices/dell-smbios.0/tokens";
34
35static void show_buffer(struct dell_wmi_smbios_buffer *buffer)
36{
37 printf("Call: %x/%x [%x,%x,%x,%x]\nResults: [%8x,%8x,%8x,%8x]\n",
38 buffer->std.cmd_class, buffer->std.cmd_select,
39 buffer->std.input[0], buffer->std.input[1],
40 buffer->std.input[2], buffer->std.input[3],
41 buffer->std.output[0], buffer->std.output[1],
42 buffer->std.output[2], buffer->std.output[3]);
43}
44
45static int run_wmi_smbios_cmd(struct dell_wmi_smbios_buffer *buffer)
46{
47 int fd;
48 int ret;
49
50 fd = open(ioctl_devfs, O_NONBLOCK);
51 ret = ioctl(fd, DELL_WMI_SMBIOS_CMD, buffer);
52 close(fd);
53 return ret;
54}
55
56static int find_token(__u16 token, __u16 *location, __u16 *value)
57{
58 char location_sysfs[60];
59 char value_sysfs[57];
60 char buf[4096];
61 FILE *f;
62 int ret;
63
64 ret = sprintf(value_sysfs, "%s/%04x_value", token_sysfs, token);
65 if (ret < 0) {
66 printf("sprintf value failed\n");
67 return 2;
68 }
69 f = fopen(value_sysfs, "rb");
70 if (!f) {
71 printf("failed to open %s\n", value_sysfs);
72 return 2;
73 }
74 fread(buf, 1, 4096, f);
75 fclose(f);
76 *value = (__u16) strtol(buf, NULL, 16);
77
78 ret = sprintf(location_sysfs, "%s/%04x_location", token_sysfs, token);
79 if (ret < 0) {
80 printf("sprintf location failed\n");
81 return 1;
82 }
83 f = fopen(location_sysfs, "rb");
84 if (!f) {
85 printf("failed to open %s\n", location_sysfs);
86 return 2;
87 }
88 fread(buf, 1, 4096, f);
89 fclose(f);
90 *location = (__u16) strtol(buf, NULL, 16);
91
92 if (*location)
93 return 0;
94 return 2;
95}
96
97static int token_is_active(__u16 *location, __u16 *cmpvalue,
98 struct dell_wmi_smbios_buffer *buffer)
99{
100 int ret;
101
102 buffer->std.cmd_class = CLASS_TOKEN_READ;
103 buffer->std.cmd_select = SELECT_TOKEN_STD;
104 buffer->std.input[0] = *location;
105 ret = run_wmi_smbios_cmd(buffer);
106 if (ret != 0 || buffer->std.output[0] != 0)
107 return ret;
108 ret = (buffer->std.output[1] == *cmpvalue);
109 return ret;
110}
111
112static int query_token(__u16 token, struct dell_wmi_smbios_buffer *buffer)
113{
114 __u16 location;
115 __u16 value;
116 int ret;
117
118 ret = find_token(token, &location, &value);
119 if (ret != 0) {
120 printf("unable to find token %04x\n", token);
121 return 1;
122 }
123 return token_is_active(&location, &value, buffer);
124}
125
126static int activate_token(struct dell_wmi_smbios_buffer *buffer,
127 __u16 token)
128{
129 __u16 location;
130 __u16 value;
131 int ret;
132
133 ret = find_token(token, &location, &value);
134 if (ret != 0) {
135 printf("unable to find token %04x\n", token);
136 return 1;
137 }
138 buffer->std.cmd_class = CLASS_TOKEN_WRITE;
139 buffer->std.cmd_select = SELECT_TOKEN_STD;
140 buffer->std.input[0] = location;
141 buffer->std.input[1] = 1;
142 ret = run_wmi_smbios_cmd(buffer);
143 return ret;
144}
145
146static int query_buffer_size(__u64 *buffer_size)
147{
148 FILE *f;
149
150 f = fopen(ioctl_devfs, "rb");
151 if (!f)
152 return -EINVAL;
153 fread(buffer_size, sizeof(__u64), 1, f);
154 fclose(f);
155 return EXIT_SUCCESS;
156}
157
158int main(void)
159{
160 struct dell_wmi_smbios_buffer *buffer;
161 int ret;
162 __u64 value = 0;
163
164 ret = query_buffer_size(&value);
165 if (ret == EXIT_FAILURE || !value) {
166 printf("Unable to read buffer size\n");
167 goto out;
168 }
169 printf("Detected required buffer size %lld\n", value);
170
171 buffer = malloc(value);
172 if (buffer == NULL) {
173 printf("failed to alloc memory for ioctl\n");
174 ret = -ENOMEM;
175 goto out;
176 }
177 buffer->length = value;
178
179 /* simple SMBIOS call for looking up TPM info */
180 buffer->std.cmd_class = CLASS_FLASH_INTERFACE;
181 buffer->std.cmd_select = SELECT_FLASH_INTERFACE;
182 buffer->std.input[0] = 2;
183 ret = run_wmi_smbios_cmd(buffer);
184 if (ret) {
185 printf("smbios ioctl failed: %d\n", ret);
186 ret = EXIT_FAILURE;
187 goto out;
188 }
189 show_buffer(buffer);
190
191 /* query some tokens */
192 ret = query_token(CAPSULE_EN_TOKEN, buffer);
193 printf("UEFI Capsule enabled token is: %d\n", ret);
194 ret = query_token(CAPSULE_DIS_TOKEN, buffer);
195 printf("UEFI Capsule disabled token is: %d\n", ret);
196
197 /* activate UEFI capsule token if disabled */
198 if (ret) {
199 printf("Enabling UEFI capsule token");
200 if (activate_token(buffer, CAPSULE_EN_TOKEN)) {
201 printf("activate failed\n");
202 ret = -1;
203 goto out;
204 }
205 }
206 ret = EXIT_SUCCESS;
207out:
208 free(buffer);
209 return ret;
210}